arch/alpha/include/asm/mmu_context.h
119
__get_new_mm_context(struct mm_struct *mm, long cpu)
arch/alpha/include/asm/mmu_context.h
121
unsigned long asn = cpu_last_asn(cpu);
arch/alpha/include/asm/mmu_context.h
129
cpu_last_asn(cpu) = next;
arch/alpha/include/asm/mmu_context.h
140
long cpu = smp_processor_id();
arch/alpha/include/asm/mmu_context.h
143
cpu_data[cpu].asn_lock = 1;
arch/alpha/include/asm/mmu_context.h
146
asn = cpu_last_asn(cpu);
arch/alpha/include/asm/mmu_context.h
147
mmc = next_mm->context[cpu];
arch/alpha/include/asm/mmu_context.h
149
mmc = __get_new_mm_context(next_mm, cpu);
arch/alpha/include/asm/mmu_context.h
150
next_mm->context[cpu] = mmc;
arch/alpha/include/asm/mmu_context.h
154
cpu_data[cpu].need_new_asn = 1;
arch/alpha/include/asm/mmu_context.h
170
int cpu = smp_processor_id(); \
arch/alpha/include/asm/mmu_context.h
171
cpu_data[cpu].asn_lock = 0; \
arch/alpha/include/asm/mmu_context.h
173
if (cpu_data[cpu].need_new_asn) { \
arch/alpha/include/asm/mmu_context.h
175
cpu_data[cpu].need_new_asn = 0; \
arch/alpha/include/asm/mmu_context.h
176
if (!mm->context[cpu]) \
arch/alpha/include/asm/smp.h
44
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/alpha/include/asm/smp.h
48
extern void arch_send_call_function_single_ipi(int cpu);
arch/alpha/include/asm/smp.h
54
#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; })
arch/alpha/include/asm/thread_info.h
22
unsigned cpu; /* current CPU */
arch/alpha/kernel/bugs.c
11
struct percpu_struct *cpu;
arch/alpha/kernel/bugs.c
14
cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
arch/alpha/kernel/bugs.c
15
cputype = cpu->type & 0xffffffff;
arch/alpha/kernel/core_irongate.c
206
struct percpu_struct *cpu;
arch/alpha/kernel/core_irongate.c
209
cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
arch/alpha/kernel/core_irongate.c
210
pal_rev = cpu->pal_revision & 0xffff;
arch/alpha/kernel/core_irongate.c
211
pal_var = (cpu->pal_revision >> 16) & 0xff;
arch/alpha/kernel/core_mcpcia.c
110
mcheck_expected(cpu) = 1;
arch/alpha/kernel/core_mcpcia.c
111
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_mcpcia.c
112
mcheck_extra(cpu) = mid;
arch/alpha/kernel/core_mcpcia.c
120
if (mcheck_taken(cpu)) {
arch/alpha/kernel/core_mcpcia.c
121
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_mcpcia.c
125
mcheck_expected(cpu) = 0;
arch/alpha/kernel/core_mcpcia.c
140
unsigned int stat0, cpu;
arch/alpha/kernel/core_mcpcia.c
142
cpu = smp_processor_id();
arch/alpha/kernel/core_mcpcia.c
153
mcheck_expected(cpu) = 1;
arch/alpha/kernel/core_mcpcia.c
154
mcheck_extra(cpu) = mid;
arch/alpha/kernel/core_mcpcia.c
162
mcheck_expected(cpu) = 0;
arch/alpha/kernel/core_mcpcia.c
256
int cpu = smp_processor_id();
arch/alpha/kernel/core_mcpcia.c
267
mcheck_expected(cpu) = 2; /* indicates probing */
arch/alpha/kernel/core_mcpcia.c
268
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_mcpcia.c
269
mcheck_extra(cpu) = mid;
arch/alpha/kernel/core_mcpcia.c
277
if (mcheck_taken(cpu)) {
arch/alpha/kernel/core_mcpcia.c
278
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_mcpcia.c
282
mcheck_expected(cpu) = 0;
arch/alpha/kernel/core_mcpcia.c
579
unsigned int cpu = smp_processor_id();
arch/alpha/kernel/core_mcpcia.c
583
expected = mcheck_expected(cpu);
arch/alpha/kernel/core_mcpcia.c
600
mcpcia_pci_clr_err(mcheck_extra(cpu));
arch/alpha/kernel/core_mcpcia.c
92
unsigned int stat0, value, cpu;
arch/alpha/kernel/core_mcpcia.c
94
cpu = smp_processor_id();
arch/alpha/kernel/core_t2.c
183
unsigned int value, cpu, taken;
arch/alpha/kernel/core_t2.c
186
cpu = smp_processor_id();
arch/alpha/kernel/core_t2.c
199
mcheck_expected(cpu) = 1;
arch/alpha/kernel/core_t2.c
200
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_t2.c
201
t2_mcheck_any_expected |= (1 << cpu);
arch/alpha/kernel/core_t2.c
215
if ((taken = mcheck_taken(cpu))) {
arch/alpha/kernel/core_t2.c
216
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_t2.c
217
t2_mcheck_last_taken |= (1 << cpu);
arch/alpha/kernel/core_t2.c
221
mcheck_expected(cpu) = 0;
arch/alpha/kernel/core_t2.c
237
unsigned int cpu, taken;
arch/alpha/kernel/core_t2.c
240
cpu = smp_processor_id();
arch/alpha/kernel/core_t2.c
251
mcheck_expected(cpu) = 1;
arch/alpha/kernel/core_t2.c
252
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_t2.c
253
t2_mcheck_any_expected |= (1 << cpu);
arch/alpha/kernel/core_t2.c
267
if ((taken = mcheck_taken(cpu))) {
arch/alpha/kernel/core_t2.c
268
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_t2.c
269
t2_mcheck_last_taken |= (1 << cpu);
arch/alpha/kernel/core_t2.c
272
mcheck_expected(cpu) = 0;
arch/alpha/kernel/core_t2.c
526
t2_clear_errors(int cpu)
arch/alpha/kernel/core_t2.c
530
cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
arch/alpha/kernel/core_t2.c
559
int cpu = smp_processor_id();
arch/alpha/kernel/core_t2.c
568
t2_clear_errors(cpu);
arch/alpha/kernel/core_t2.c
576
if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
arch/alpha/kernel/core_t2.c
587
" code 0x%x\n", cpu, t2_mcheck_any_expected,
arch/alpha/kernel/core_t2.c
594
if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
arch/alpha/kernel/core_t2.c
595
if (t2_mcheck_last_taken & (1 << cpu)) {
arch/alpha/kernel/core_t2.c
600
cpu, t2_mcheck_last_taken,
arch/alpha/kernel/core_t2.c
617
(mcheck_expected(cpu) ? "EX" : "UN"), cpu,
arch/alpha/kernel/core_t2.c
623
process_mcheck_info(vector, la_ptr, "T2", mcheck_expected(cpu));
arch/alpha/kernel/core_tsunami.c
204
int cpu = smp_processor_id();
arch/alpha/kernel/core_tsunami.c
207
mcheck_taken(cpu) = 0;
arch/alpha/kernel/core_tsunami.c
208
mcheck_expected(cpu) = 1;
arch/alpha/kernel/core_tsunami.c
212
mcheck_expected(cpu) = 0;
arch/alpha/kernel/core_tsunami.c
213
probe_result = !mcheck_taken(cpu);
arch/alpha/kernel/core_tsunami.c
214
mcheck_taken(cpu) = 0;
arch/alpha/kernel/err_common.c
242
cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu)
arch/alpha/kernel/err_common.c
250
err_print_prefix, cpu);
arch/alpha/kernel/err_common.c
260
err_print_prefix, err, cpu);
arch/alpha/kernel/err_common.c
267
unsigned long cpu;
arch/alpha/kernel/err_common.c
269
for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) {
arch/alpha/kernel/err_common.c
272
+ cpu * hwrpb->processor_size);
arch/alpha/kernel/err_common.c
274
cdl_process_console_data_log(cpu, pcpu);
arch/alpha/kernel/irq.c
50
int cpu = last_cpu + 1;
arch/alpha/kernel/irq.c
59
while (!cpu_possible(cpu) ||
arch/alpha/kernel/irq.c
60
!cpumask_test_cpu(cpu, irq_default_affinity))
arch/alpha/kernel/irq.c
61
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
arch/alpha/kernel/irq.c
62
last_cpu = cpu;
arch/alpha/kernel/irq.c
64
irq_data_update_affinity(data, cpumask_of(cpu));
arch/alpha/kernel/irq.c
65
chip->irq_set_affinity(data, cpumask_of(cpu), false);
arch/alpha/kernel/irq_alpha.c
147
int cpu = smp_processor_id();
arch/alpha/kernel/irq_alpha.c
148
mcheck_expected(cpu) = 0;
arch/alpha/kernel/irq_alpha.c
149
mcheck_taken(cpu) = 1;
arch/alpha/kernel/osf_sys.c
753
struct percpu_struct *cpu;
arch/alpha/kernel/osf_sys.c
785
cpu = (struct percpu_struct*)
arch/alpha/kernel/osf_sys.c
787
w = cpu->type;
arch/alpha/kernel/perf_event.c
582
struct percpu_struct *cpu;
arch/alpha/kernel/perf_event.c
586
cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
arch/alpha/kernel/perf_event.c
587
cputype = cpu->type & 0xffffffff;
arch/alpha/kernel/perf_event.c
777
int cpu;
arch/alpha/kernel/perf_event.c
784
cpu = smp_processor_id();
arch/alpha/kernel/perf_event.c
790
pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
arch/alpha/kernel/proto.h
208
#define mcheck_expected(cpu) (cpu_data[cpu].mcheck_expected)
arch/alpha/kernel/proto.h
209
#define mcheck_taken(cpu) (cpu_data[cpu].mcheck_taken)
arch/alpha/kernel/proto.h
210
#define mcheck_extra(cpu) (cpu_data[cpu].mcheck_extra)
arch/alpha/kernel/proto.h
219
#define mcheck_expected(cpu) (*((void)(cpu), &__mcheck_info.expected))
arch/alpha/kernel/proto.h
220
#define mcheck_taken(cpu) (*((void)(cpu), &__mcheck_info.taken))
arch/alpha/kernel/proto.h
221
#define mcheck_extra(cpu) (*((void)(cpu), &__mcheck_info.extra))
arch/alpha/kernel/setup.c
1017
struct percpu_struct *cpu;
arch/alpha/kernel/setup.c
1022
cpu = (struct percpu_struct *)
arch/alpha/kernel/setup.c
1024
if ((cpu->flags & 0x1cc) == 0x1cc)
arch/alpha/kernel/setup.c
1056
struct percpu_struct *cpu = slot;
arch/alpha/kernel/setup.c
1064
cpu_index = (unsigned) (cpu->type - 1);
arch/alpha/kernel/setup.c
1070
cpu->type, &systype_name, &sysvariation_name);
arch/alpha/kernel/setup.c
1072
nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
arch/alpha/kernel/setup.c
1099
cpu_name, cpu->variation, cpu->revision,
arch/alpha/kernel/setup.c
1100
(char*)cpu->serial_no,
arch/alpha/kernel/setup.c
395
struct cpu *p = kzalloc_obj(*p);
arch/alpha/kernel/setup.c
425
struct percpu_struct *cpu;
arch/alpha/kernel/setup.c
540
cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
arch/alpha/kernel/setup.c
543
cpu->type, &type_name, &var_name);
arch/alpha/kernel/setup.c
549
cpu->type);
arch/alpha/kernel/setup.c
612
determine_cpu_caches(cpu->type);
arch/alpha/kernel/setup.c
706
get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
arch/alpha/kernel/setup.c
826
cpu &= 0xffffffff; /* make it usable */
arch/alpha/kernel/setup.c
838
if (vec == &eb164_mv && cpu == EV56_CPU)
arch/alpha/kernel/setup.c
909
get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
arch/alpha/kernel/setup.c
938
cpu &= 0xffffffff; /* make it usable */
arch/alpha/kernel/setup.c
949
if (eb164_indices[member] == 0 && cpu == EV56_CPU)
arch/alpha/kernel/smp.c
200
struct percpu_struct *cpu;
arch/alpha/kernel/smp.c
205
cpu = (struct percpu_struct *)
arch/alpha/kernel/smp.c
216
*(unsigned int *)&cpu->ipc_buffer[0] = len;
arch/alpha/kernel/smp.c
217
cp1 = (char *) &cpu->ipc_buffer[1];
arch/alpha/kernel/smp.c
241
struct percpu_struct *cpu;
arch/alpha/kernel/smp.c
254
cpu = (struct percpu_struct *)
arch/alpha/kernel/smp.c
261
mycpu, i, cpu->halt_reason, cpu->flags));
arch/alpha/kernel/smp.c
263
cnt = cpu->ipc_buffer[0] >> 32;
arch/alpha/kernel/smp.c
267
cp1 = (char *) &cpu->ipc_buffer[1];
arch/alpha/kernel/smp.c
292
struct percpu_struct *cpu;
arch/alpha/kernel/smp.c
296
cpu = (struct percpu_struct *)
arch/alpha/kernel/smp.c
300
hwpcb = (struct pcb_struct *) cpu->hwpcb;
arch/alpha/kernel/smp.c
335
cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
arch/alpha/kernel/smp.c
336
cpu->flags &= ~1; /* turn off Bootstrap In Progress */
arch/alpha/kernel/smp.c
344
if (cpu->flags & 1)
arch/alpha/kernel/smp.c
403
struct percpu_struct *cpubase, *cpu;
arch/alpha/kernel/smp.c
422
cpu = (struct percpu_struct *)
arch/alpha/kernel/smp.c
424
if ((cpu->flags & 0x1cc) == 0x1cc) {
arch/alpha/kernel/smp.c
428
cpu->pal_revision = boot_cpu_palrev;
arch/alpha/kernel/smp.c
432
i, cpu->flags, cpu->type));
arch/alpha/kernel/smp.c
434
i, cpu->pal_revision));
arch/alpha/kernel/smp.c
453
current_thread_info()->cpu = boot_cpuid;
arch/alpha/kernel/smp.c
472
__cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/alpha/kernel/smp.c
474
smp_boot_one_cpu(cpu, tidle);
arch/alpha/kernel/smp.c
476
return cpu_online(cpu) ? 0 : -ENOSYS;
arch/alpha/kernel/smp.c
482
int cpu;
arch/alpha/kernel/smp.c
485
for(cpu = 0; cpu < NR_CPUS; cpu++)
arch/alpha/kernel/smp.c
486
if (cpu_online(cpu))
arch/alpha/kernel/smp.c
487
bogosum += cpu_data[cpu].loops_per_jiffy;
arch/alpha/kernel/smp.c
561
arch_smp_send_reschedule(int cpu)
arch/alpha/kernel/smp.c
564
if (cpu == hard_smp_processor_id())
arch/alpha/kernel/smp.c
568
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
arch/alpha/kernel/smp.c
589
void arch_send_call_function_single_ipi(int cpu)
arch/alpha/kernel/smp.c
591
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
arch/alpha/kernel/smp.c
642
int cpu, this_cpu = smp_processor_id();
arch/alpha/kernel/smp.c
643
for (cpu = 0; cpu < NR_CPUS; cpu++) {
arch/alpha/kernel/smp.c
644
if (!cpu_online(cpu) || cpu == this_cpu)
arch/alpha/kernel/smp.c
646
if (mm->context[cpu])
arch/alpha/kernel/smp.c
647
mm->context[cpu] = 0;
arch/alpha/kernel/smp.c
689
int cpu, this_cpu = smp_processor_id();
arch/alpha/kernel/smp.c
690
for (cpu = 0; cpu < NR_CPUS; cpu++) {
arch/alpha/kernel/smp.c
691
if (!cpu_online(cpu) || cpu == this_cpu)
arch/alpha/kernel/smp.c
693
if (mm->context[cpu])
arch/alpha/kernel/smp.c
694
mm->context[cpu] = 0;
arch/alpha/kernel/smp.c
743
int cpu, this_cpu = smp_processor_id();
arch/alpha/kernel/smp.c
744
for (cpu = 0; cpu < NR_CPUS; cpu++) {
arch/alpha/kernel/smp.c
745
if (!cpu_online(cpu) || cpu == this_cpu)
arch/alpha/kernel/smp.c
747
if (mm->context[cpu])
arch/alpha/kernel/smp.c
748
mm->context[cpu] = 0;
arch/alpha/kernel/sys_dp264.c
138
int cpu;
arch/alpha/kernel/sys_dp264.c
140
for (cpu = 0; cpu < 4; cpu++) {
arch/alpha/kernel/sys_dp264.c
141
unsigned long aff = cpu_irq_affinity[cpu];
arch/alpha/kernel/sys_dp264.c
142
if (cpumask_test_cpu(cpu, &affinity))
arch/alpha/kernel/sys_dp264.c
146
cpu_irq_affinity[cpu] = aff;
arch/alpha/kernel/sys_sx164.c
131
struct percpu_struct *cpu = (struct percpu_struct*)
arch/alpha/kernel/sys_sx164.c
136
&& (cpu->pal_revision & 0xffff) <= 0x117) {
arch/alpha/kernel/sys_titan.c
137
int cpu;
arch/alpha/kernel/sys_titan.c
139
for (cpu = 0; cpu < 4; cpu++) {
arch/alpha/kernel/sys_titan.c
140
if (cpumask_test_cpu(cpu, &affinity))
arch/alpha/kernel/sys_titan.c
141
titan_cpu_irq_affinity[cpu] |= 1UL << irq;
arch/alpha/kernel/sys_titan.c
143
titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
arch/alpha/kernel/time.c
118
int cpu = smp_processor_id();
arch/alpha/kernel/time.c
119
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/alpha/kernel/time.c
125
.cpumask = cpumask_of(cpu),
arch/alpha/kernel/time.c
175
int cpu = smp_processor_id();
arch/alpha/kernel/time.c
176
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/alpha/kernel/time.c
185
int cpu = smp_processor_id();
arch/alpha/kernel/time.c
186
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/alpha/kernel/time.c
192
.cpumask = cpumask_of(cpu),
arch/alpha/kernel/time.c
313
struct percpu_struct *cpu;
arch/alpha/kernel/time.c
316
cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
arch/alpha/kernel/time.c
317
index = cpu->type & 0xffffffff;
arch/alpha/kernel/time.c
93
int cpu = smp_processor_id();
arch/alpha/kernel/time.c
94
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/alpha/mm/tlbflush.c
102
for (cpu = 0; cpu < NR_CPUS; cpu++) {
arch/alpha/mm/tlbflush.c
103
if (!cpu_online(cpu) || cpu == this_cpu)
arch/alpha/mm/tlbflush.c
105
if (READ_ONCE(mm->context[cpu]))
arch/alpha/mm/tlbflush.c
106
WRITE_ONCE(mm->context[cpu], 0);
arch/alpha/mm/tlbflush.c
99
int cpu, this_cpu;
arch/arc/include/asm/mmu_context.h
135
const int cpu = smp_processor_id();
arch/arc/include/asm/mmu_context.h
148
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/arc/include/asm/mmu_context.h
49
#define asid_mm(mm, cpu) mm->context.asid[cpu]
arch/arc/include/asm/mmu_context.h
50
#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
arch/arc/include/asm/mmu_context.h
53
#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
arch/arc/include/asm/mmu_context.h
61
const unsigned int cpu = smp_processor_id();
arch/arc/include/asm/mmu_context.h
76
if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
arch/arc/include/asm/mmu_context.h
80
if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
arch/arc/include/asm/mmu_context.h
89
if (!asid_cpu(cpu))
arch/arc/include/asm/mmu_context.h
90
asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
arch/arc/include/asm/mmu_context.h
94
asid_mm(mm, cpu) = asid_cpu(cpu);
arch/arc/include/asm/mmu_context.h
97
mmu_setup_asid(mm, hw_pid(mm, cpu));
arch/arc/include/asm/smp.h
15
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/arc/include/asm/smp.h
23
extern void arch_send_call_function_single_ipi(int cpu);
arch/arc/include/asm/smp.h
41
extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
arch/arc/include/asm/smp.h
59
void (*init_per_cpu)(int cpu);
arch/arc/include/asm/smp.h
60
void (*cpu_kick)(int cpu, unsigned long pc);
arch/arc/include/asm/smp.h
61
void (*ipi_send)(int cpu);
arch/arc/include/asm/thread_info.h
42
int cpu; /* current CPU */
arch/arc/include/asm/thread_info.h
55
.cpu = 0, \
arch/arc/kernel/mcip.c
103
if (unlikely(cpu == raw_smp_processor_id())) {
arch/arc/kernel/mcip.c
116
__mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
arch/arc/kernel/mcip.c
119
__mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
arch/arc/kernel/mcip.c
126
unsigned int cpu, c;
arch/arc/kernel/mcip.c
139
cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
arch/arc/kernel/mcip.c
147
c = __ffs(cpu); /* 0,1,2,3 */
arch/arc/kernel/mcip.c
149
cpu &= ~(1U << c);
arch/arc/kernel/mcip.c
150
} while (cpu);
arch/arc/kernel/mcip.c
26
static void mcip_update_gfrc_halt_mask(int cpu)
arch/arc/kernel/mcip.c
45
gfrc_halt_mask |= BIT(cpu);
arch/arc/kernel/mcip.c
51
static void mcip_update_debug_halt_mask(int cpu)
arch/arc/kernel/mcip.c
66
mcip_mask |= BIT(cpu);
arch/arc/kernel/mcip.c
79
static void mcip_setup_per_cpu(int cpu)
arch/arc/kernel/mcip.c
85
smp_ipi_irq_setup(cpu, IPI_IRQ);
arch/arc/kernel/mcip.c
86
smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
arch/arc/kernel/mcip.c
90
mcip_update_gfrc_halt_mask(cpu);
arch/arc/kernel/mcip.c
94
mcip_update_debug_halt_mask(cpu);
arch/arc/kernel/mcip.c
97
static void mcip_ipi_send(int cpu)
arch/arc/kernel/setup.c
642
static DEFINE_PER_CPU(struct cpu, cpu_topology);
arch/arc/kernel/setup.c
646
int cpu;
arch/arc/kernel/setup.c
648
for_each_present_cpu(cpu)
arch/arc/kernel/setup.c
649
register_cpu(&per_cpu(cpu_topology, cpu), cpu);
arch/arc/kernel/smp.c
131
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
arch/arc/kernel/smp.c
133
BUG_ON(cpu == 0);
arch/arc/kernel/smp.c
135
__boot_write(wake_flag, cpu);
arch/arc/kernel/smp.c
138
void arc_platform_smp_wait_to_boot(int cpu)
arch/arc/kernel/smp.c
144
while (__boot_read(wake_flag) != cpu)
arch/arc/kernel/smp.c
163
unsigned int cpu = smp_processor_id();
arch/arc/kernel/smp.c
171
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/arc/kernel/smp.c
175
plat_smp_ops.init_per_cpu(cpu);
arch/arc/kernel/smp.c
178
machine_desc->init_per_cpu(cpu);
arch/arc/kernel/smp.c
180
notify_cpu_starting(cpu);
arch/arc/kernel/smp.c
181
set_cpu_online(cpu, true);
arch/arc/kernel/smp.c
183
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
arch/arc/kernel/smp.c
199
int __cpu_up(unsigned int cpu, struct task_struct *idle)
arch/arc/kernel/smp.c
205
pr_info("Idle Task [%d] %p", cpu, idle);
arch/arc/kernel/smp.c
206
pr_info("Trying to bring up CPU%u ...\n", cpu);
arch/arc/kernel/smp.c
209
plat_smp_ops.cpu_kick(cpu,
arch/arc/kernel/smp.c
212
arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
arch/arc/kernel/smp.c
217
if (cpu_online(cpu))
arch/arc/kernel/smp.c
221
if (!cpu_online(cpu)) {
arch/arc/kernel/smp.c
222
pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
arch/arc/kernel/smp.c
250
static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
arch/arc/kernel/smp.c
252
unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
arch/arc/kernel/smp.c
256
pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
arch/arc/kernel/smp.c
278
plat_smp_ops.ipi_send(cpu);
arch/arc/kernel/smp.c
285
unsigned int cpu;
arch/arc/kernel/smp.c
287
for_each_cpu(cpu, callmap)
arch/arc/kernel/smp.c
288
ipi_send_msg_one(cpu, msg);
arch/arc/kernel/smp.c
291
void arch_smp_send_reschedule(int cpu)
arch/arc/kernel/smp.c
293
ipi_send_msg_one(cpu, IPI_RESCHEDULE);
arch/arc/kernel/smp.c
304
void arch_send_call_function_single_ipi(int cpu)
arch/arc/kernel/smp.c
306
ipi_send_msg_one(cpu, IPI_CALL_FUNC);
arch/arc/kernel/smp.c
389
int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
arch/arc/kernel/smp.c
391
int *dev = per_cpu_ptr(&ipi_dev, cpu);
arch/arc/kernel/smp.c
398
if (!cpu) {
arch/arc/mm/cache.c
1069
unsigned int __maybe_unused cpu = smp_processor_id();
arch/arc/mm/cache.c
1071
if (!cpu)
arch/arc/mm/tlb.c
211
const unsigned int cpu = smp_processor_id();
arch/arc/mm/tlb.c
235
if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
arch/arc/mm/tlb.c
237
tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
arch/arc/mm/tlb.c
280
const unsigned int cpu = smp_processor_id();
arch/arc/mm/tlb.c
288
if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
arch/arc/mm/tlb.c
289
tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
arch/arc/mm/tlb.c
540
unsigned int cpu;
arch/arc/mm/tlb.c
545
cpu = smp_processor_id();
arch/arc/mm/tlb.c
547
if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
arch/arc/mm/tlb.c
548
unsigned int asid = hw_pid(vma->vm_mm, cpu);
arch/arm/common/bL_switcher.c
306
static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
arch/arm/common/bL_switcher.c
311
cpu, "kswitcher_%d");
arch/arm/common/bL_switcher.c
313
pr_err("%s failed for CPU %d\n", __func__, cpu);
arch/arm/common/bL_switcher.c
340
int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
arch/arm/common/bL_switcher.c
346
if (cpu >= ARRAY_SIZE(bL_threads)) {
arch/arm/common/bL_switcher.c
347
pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
arch/arm/common/bL_switcher.c
351
t = &bL_threads[cpu];
arch/arm/common/bL_switcher.c
420
unsigned int cpu, cluster, mask;
arch/arm/common/bL_switcher.c
426
cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
arch/arm/common/bL_switcher.c
432
if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
arch/arm/common/bL_switcher.c
482
cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
arch/arm/common/bL_switcher.c
492
bL_gic_id[cpu][cluster] = gic_id;
arch/arm/common/bL_switcher.c
494
cpu, cluster, gic_id);
arch/arm/common/bL_switcher.c
515
int cpu;
arch/arm/common/bL_switcher.c
521
for_each_online_cpu(cpu) {
arch/arm/common/bL_switcher.c
522
int pairing = bL_switcher_cpu_pairing[cpu];
arch/arm/common/bL_switcher.c
525
if ((mpidr == cpu_logical_map(cpu)) ||
arch/arm/common/bL_switcher.c
527
return cpu;
arch/arm/common/bL_switcher.c
552
int cpu, ret;
arch/arm/common/bL_switcher.c
574
for_each_online_cpu(cpu) {
arch/arm/common/bL_switcher.c
575
struct bL_thread *t = &bL_threads[cpu];
arch/arm/common/bL_switcher.c
580
t->task = bL_switcher_thread_create(cpu, t);
arch/arm/common/bL_switcher.c
602
unsigned int cpu, cluster;
arch/arm/common/bL_switcher.c
627
for_each_online_cpu(cpu) {
arch/arm/common/bL_switcher.c
628
t = &bL_threads[cpu];
arch/arm/common/bL_switcher.c
635
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
arch/arm/common/bL_switcher.c
636
if (cluster == bL_switcher_cpu_original_cluster[cpu])
arch/arm/common/bL_switcher.c
639
t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
arch/arm/common/bL_switcher.c
640
task = bL_switcher_thread_create(cpu, t);
arch/arm/common/bL_switcher.c
644
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
arch/arm/common/bL_switcher.c
645
if (cluster == bL_switcher_cpu_original_cluster[cpu])
arch/arm/common/bL_switcher.c
650
__func__, cpu);
arch/arm/common/bL_switcher.c
652
__func__, bL_switcher_cpu_pairing[cpu]);
arch/arm/common/bL_switcher.c
653
cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
arch/arm/common/bL_switcher.c
753
static int bL_switcher_cpu_pre(unsigned int cpu)
arch/arm/common/bL_switcher.c
760
pairing = bL_switcher_cpu_pairing[cpu];
arch/arm/common/bL_switcher_dummy_if.c
22
unsigned int cpu, cluster;
arch/arm/common/bL_switcher_dummy_if.c
39
cpu = val[0] - '0';
arch/arm/common/bL_switcher_dummy_if.c
41
ret = bL_switch_request(cpu, cluster);
arch/arm/common/mcpm_entry.c
107
if (i == cpu)
arch/arm/common/mcpm_entry.c
111
cpustate = c->cpus[i].cpu;
arch/arm/common/mcpm_entry.c
116
sync_cache_r(&c->cpus[i].cpu);
arch/arm/common/mcpm_entry.c
143
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
arch/arm/common/mcpm_entry.c
146
mcpm_entry_vectors[cluster][cpu] = val;
arch/arm/common/mcpm_entry.c
147
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
arch/arm/common/mcpm_entry.c
152
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
arch/arm/common/mcpm_entry.c
155
unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
arch/arm/common/mcpm_entry.c
195
int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
arch/arm/common/mcpm_entry.c
200
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/common/mcpm_entry.c
212
cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
arch/arm/common/mcpm_entry.c
215
mcpm_cpu_use_count[cluster][cpu]++;
arch/arm/common/mcpm_entry.c
224
BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
arch/arm/common/mcpm_entry.c
225
mcpm_cpu_use_count[cluster][cpu] != 2);
arch/arm/common/mcpm_entry.c
230
ret = platform_ops->cpu_powerup(cpu, cluster);
arch/arm/common/mcpm_entry.c
241
unsigned int mpidr, cpu, cluster;
arch/arm/common/mcpm_entry.c
246
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/common/mcpm_entry.c
248
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/common/mcpm_entry.c
255
__mcpm_cpu_going_down(cpu, cluster);
arch/arm/common/mcpm_entry.c
259
mcpm_cpu_use_count[cluster][cpu]--;
arch/arm/common/mcpm_entry.c
260
BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
arch/arm/common/mcpm_entry.c
261
mcpm_cpu_use_count[cluster][cpu] != 1);
arch/arm/common/mcpm_entry.c
262
cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
arch/arm/common/mcpm_entry.c
265
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch/arm/common/mcpm_entry.c
266
platform_ops->cpu_powerdown_prepare(cpu, cluster);
arch/arm/common/mcpm_entry.c
273
platform_ops->cpu_powerdown_prepare(cpu, cluster);
arch/arm/common/mcpm_entry.c
286
__mcpm_cpu_down(cpu, cluster);
arch/arm/common/mcpm_entry.c
308
int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
arch/arm/common/mcpm_entry.c
315
ret = platform_ops->wait_for_powerdown(cpu, cluster);
arch/arm/common/mcpm_entry.c
318
__func__, cpu, cluster, ret);
arch/arm/common/mcpm_entry.c
331
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/common/mcpm_entry.c
334
platform_ops->cpu_suspend_prepare(cpu, cluster);
arch/arm/common/mcpm_entry.c
34
static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
arch/arm/common/mcpm_entry.c
342
unsigned int mpidr, cpu, cluster;
arch/arm/common/mcpm_entry.c
350
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/common/mcpm_entry.c
355
cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
arch/arm/common/mcpm_entry.c
36
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
arch/arm/common/mcpm_entry.c
361
mcpm_cpu_use_count[cluster][cpu] = 1;
arch/arm/common/mcpm_entry.c
363
platform_ops->cpu_is_up(cpu, cluster);
arch/arm/common/mcpm_entry.c
37
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
arch/arm/common/mcpm_entry.c
377
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/common/mcpm_entry.c
381
mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
arch/arm/common/mcpm_entry.c
384
__mcpm_cpu_going_down(cpu, cluster);
arch/arm/common/mcpm_entry.c
385
BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
arch/arm/common/mcpm_entry.c
388
__mcpm_cpu_down(cpu, cluster);
arch/arm/common/mcpm_entry.c
439
mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
arch/arm/common/mcpm_entry.c
445
mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
arch/arm/common/mcpm_entry.c
47
static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
arch/arm/common/mcpm_entry.c
50
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
arch/arm/common/mcpm_entry.c
51
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
arch/arm/common/mcpm_entry.c
82
static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
arch/arm/common/mcpm_platsmp.c
19
static void cpu_to_pcpu(unsigned int cpu,
arch/arm/common/mcpm_platsmp.c
24
mpidr = cpu_logical_map(cpu);
arch/arm/common/mcpm_platsmp.c
29
static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/common/mcpm_platsmp.c
34
cpu_to_pcpu(cpu, &pcpu, &pcluster);
arch/arm/common/mcpm_platsmp.c
37
__func__, cpu, pcpu, pcluster);
arch/arm/common/mcpm_platsmp.c
44
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/common/mcpm_platsmp.c
49
static void mcpm_secondary_init(unsigned int cpu)
arch/arm/common/mcpm_platsmp.c
56
static int mcpm_cpu_kill(unsigned int cpu)
arch/arm/common/mcpm_platsmp.c
60
cpu_to_pcpu(cpu, &pcpu, &pcluster);
arch/arm/common/mcpm_platsmp.c
65
static bool mcpm_cpu_can_disable(unsigned int cpu)
arch/arm/common/mcpm_platsmp.c
71
static void mcpm_cpu_die(unsigned int cpu)
arch/arm/include/asm/bL_switcher.h
17
int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
arch/arm/include/asm/bL_switcher.h
20
static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
arch/arm/include/asm/bL_switcher.h
22
return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
arch/arm/include/asm/cpuidle.h
37
int (*init)(struct device_node *, int cpu);
arch/arm/include/asm/cpuidle.h
51
extern int arm_cpuidle_init(int cpu);
arch/arm/include/asm/firmware.h
32
int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr);
arch/arm/include/asm/firmware.h
36
int (*get_cpu_boot_addr)(int cpu, unsigned long *boot_addr);
arch/arm/include/asm/firmware.h
40
int (*cpu_boot)(int cpu);
arch/arm/include/asm/fpstate.h
41
__u32 cpu;
arch/arm/include/asm/mcpm.h
132
int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
arch/arm/include/asm/mcpm.h
219
int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
arch/arm/include/asm/mcpm.h
221
void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
arch/arm/include/asm/mcpm.h
222
void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
arch/arm/include/asm/mcpm.h
226
void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
arch/arm/include/asm/mcpm.h
228
int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
arch/arm/include/asm/mcpm.h
287
s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
arch/arm/include/asm/mcpm.h
44
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
arch/arm/include/asm/mcpm.h
51
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
arch/arm/include/asm/mcpm.h
84
int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
arch/arm/include/asm/mmu_context.h
121
unsigned int cpu = smp_processor_id();
arch/arm/include/asm/mmu_context.h
130
!cpumask_test_cpu(cpu, mm_cpumask(next)))
arch/arm/include/asm/mmu_context.h
133
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
arch/arm/include/asm/mmu_context.h
136
cpumask_clear_cpu(cpu, mm_cpumask(prev));
arch/arm/include/asm/proc-fns.h
118
unsigned int cpu = smp_processor_id();
arch/arm/include/asm/proc-fns.h
119
*cpu_vtable[cpu] = *p;
arch/arm/include/asm/proc-fns.h
120
WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
arch/arm/include/asm/proc-fns.h
122
WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
arch/arm/include/asm/smp.h
18
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/arm/include/asm/smp.h
67
static inline void __cpu_die(unsigned int cpu) { }
arch/arm/include/asm/smp.h
69
extern void arch_send_call_function_single_ipi(int cpu);
arch/arm/include/asm/smp.h
73
extern int register_ipi_completion(struct completion *completion, int cpu);
arch/arm/include/asm/smp.h
89
void (*smp_secondary_init)(unsigned int cpu);
arch/arm/include/asm/smp.h
94
int (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle);
arch/arm/include/asm/smp.h
96
int (*cpu_kill)(unsigned int cpu);
arch/arm/include/asm/smp.h
97
void (*cpu_die)(unsigned int cpu);
arch/arm/include/asm/smp.h
98
bool (*cpu_can_disable)(unsigned int cpu);
arch/arm/include/asm/smp.h
99
int (*cpu_disable)(unsigned int cpu);
arch/arm/include/asm/smp_plat.h
112
extern int platform_can_hotplug_cpu(unsigned int cpu);
arch/arm/include/asm/smp_plat.h
114
static inline int platform_can_hotplug_cpu(unsigned int cpu)
arch/arm/include/asm/smp_plat.h
36
static inline unsigned int smp_cpuid_part(int cpu)
arch/arm/include/asm/smp_plat.h
38
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
arch/arm/include/asm/smp_plat.h
73
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
arch/arm/include/asm/smp_plat.h
82
int cpu;
arch/arm/include/asm/smp_plat.h
83
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
arch/arm/include/asm/smp_plat.h
84
if (cpu_logical_map(cpu) == mpidr)
arch/arm/include/asm/smp_plat.h
85
return cpu;
arch/arm/include/asm/thread_info.h
66
__u32 cpu; /* cpu */
arch/arm/kernel/asm-offsets.c
50
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
arch/arm/kernel/asm-offsets.c
59
DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
arch/arm/kernel/cacheinfo.c
114
int early_cache_level(unsigned int cpu)
arch/arm/kernel/cacheinfo.c
116
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/arm/kernel/cacheinfo.c
121
int init_cache_level(unsigned int cpu)
arch/arm/kernel/cacheinfo.c
124
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/arm/kernel/cacheinfo.c
132
fw_level = of_find_last_cache_level(cpu);
arch/arm/kernel/cacheinfo.c
149
int populate_cache_leaves(unsigned int cpu)
arch/arm/kernel/cacheinfo.c
153
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/arm/kernel/cpuidle.c
108
cpuidle_ops[cpu] = *ops; /* structure copy */
arch/arm/kernel/cpuidle.c
133
int __init arm_cpuidle_init(int cpu)
arch/arm/kernel/cpuidle.c
135
struct device_node *cpu_node = of_cpu_device_node_get(cpu);
arch/arm/kernel/cpuidle.c
141
ret = arm_cpuidle_read_ops(cpu_node, cpu);
arch/arm/kernel/cpuidle.c
143
ret = cpuidle_ops[cpu].init(cpu_node, cpu);
arch/arm/kernel/cpuidle.c
47
int cpu = smp_processor_id();
arch/arm/kernel/cpuidle.c
49
return cpuidle_ops[cpu].suspend(index);
arch/arm/kernel/cpuidle.c
86
static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
arch/arm/kernel/devtree.c
109
of_node_put(cpu);
arch/arm/kernel/devtree.c
133
of_node_put(cpu);
arch/arm/kernel/devtree.c
140
found_method = set_smp_ops_by_method(cpu);
arch/arm/kernel/devtree.c
167
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
arch/arm/kernel/devtree.c
169
return phys_id == cpu_logical_map(cpu);
arch/arm/kernel/devtree.c
73
struct device_node *cpu, *cpus;
arch/arm/kernel/devtree.c
85
for_each_of_cpu_node(cpu) {
arch/arm/kernel/devtree.c
86
u32 hwid = of_get_cpu_hwid(cpu, 0);
arch/arm/kernel/devtree.c
88
pr_debug(" * %pOF...\n", cpu);
arch/arm/kernel/devtree.c
95
of_node_put(cpu);
arch/arm/kernel/hw_breakpoint.c
1003
int cpu = smp_processor_id();
arch/arm/kernel/hw_breakpoint.c
1006
instr, cpu);
arch/arm/kernel/hw_breakpoint.c
1009
cpumask_set_cpu(cpu, &debug_err_mask);
arch/arm/kernel/hw_breakpoint.c
1047
static void reset_ctrl_regs(unsigned int cpu)
arch/arm/kernel/hw_breakpoint.c
1088
pr_warn_once("CPU %d debug is powered down!\n", cpu);
arch/arm/kernel/hw_breakpoint.c
1089
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
arch/arm/kernel/hw_breakpoint.c
1108
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
arch/arm/kernel/hw_breakpoint.c
1109
pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
arch/arm/kernel/hw_breakpoint.c
1128
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
arch/arm/kernel/hw_breakpoint.c
1129
pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
arch/arm/kernel/hw_breakpoint.c
1139
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
arch/arm/kernel/hw_breakpoint.c
1142
static int dbg_reset_online(unsigned int cpu)
arch/arm/kernel/hw_breakpoint.c
1145
reset_ctrl_regs(cpu);
arch/arm/kernel/irq.c
57
int cpu;
arch/arm/kernel/irq.c
59
for_each_possible_cpu(cpu) {
arch/arm/kernel/irq.c
70
per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
arch/arm/kernel/machine_kexec.c
105
int cpu, this_cpu = raw_smp_processor_id();
arch/arm/kernel/machine_kexec.c
111
for_each_online_cpu(cpu) {
arch/arm/kernel/machine_kexec.c
112
if (cpu == this_cpu)
arch/arm/kernel/machine_kexec.c
115
csd = &per_cpu(cpu_stop_csd, cpu);
arch/arm/kernel/machine_kexec.c
116
smp_call_function_single_async(cpu, csd);
arch/arm/kernel/psci_smp.c
101
pr_info("CPU%d killed.\n", cpu);
arch/arm/kernel/psci_smp.c
110
cpu, err);
arch/arm/kernel/psci_smp.c
45
static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/kernel/psci_smp.c
49
return psci_ops.cpu_on(cpu_logical_map(cpu),
arch/arm/kernel/psci_smp.c
54
return psci_ops.cpu_on(cpu_logical_map(cpu),
arch/arm/kernel/psci_smp.c
61
static int psci_cpu_disable(unsigned int cpu)
arch/arm/kernel/psci_smp.c
68
if (psci_tos_resident_on(cpu))
arch/arm/kernel/psci_smp.c
74
static void psci_cpu_die(unsigned int cpu)
arch/arm/kernel/psci_smp.c
83
panic("psci: cpu %d failed to shutdown\n", cpu);
arch/arm/kernel/psci_smp.c
86
static int psci_cpu_kill(unsigned int cpu)
arch/arm/kernel/psci_smp.c
99
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
arch/arm/kernel/setup.c
534
unsigned int cpu = smp_processor_id();
arch/arm/kernel/setup.c
535
struct stack *stk = &stacks[cpu];
arch/arm/kernel/setup.c
537
if (cpu >= NR_CPUS) {
arch/arm/kernel/setup.c
538
pr_crit("CPU%u: bad primary CPU number\n", cpu);
arch/arm/kernel/setup.c
546
set_my_cpu_offset(per_cpu_offset(cpu));
arch/arm/kernel/setup.c
600
u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/kernel/setup.c
602
cpu_logical_map(0) = cpu;
arch/arm/kernel/setup.c
604
cpu_logical_map(i) = i == cpu ? 0 : i;
arch/arm/kernel/smp.c
108
static int secondary_biglittle_prepare(unsigned int cpu)
arch/arm/kernel/smp.c
110
if (!cpu_vtable[cpu])
arch/arm/kernel/smp.c
111
cpu_vtable[cpu] = kzalloc_obj(*cpu_vtable[cpu]);
arch/arm/kernel/smp.c
113
return cpu_vtable[cpu] ? 0 : -ENOMEM;
arch/arm/kernel/smp.c
121
static int secondary_biglittle_prepare(unsigned int cpu)
arch/arm/kernel/smp.c
131
int __cpu_up(unsigned int cpu, struct task_struct *idle)
arch/arm/kernel/smp.c
138
ret = secondary_biglittle_prepare(cpu);
arch/arm/kernel/smp.c
161
ret = smp_ops.smp_boot_secondary(cpu, idle);
arch/arm/kernel/smp.c
170
if (!cpu_online(cpu)) {
arch/arm/kernel/smp.c
171
pr_crit("CPU%u: failed to come online\n", cpu);
arch/arm/kernel/smp.c
175
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
arch/arm/kernel/smp.c
206
static int platform_cpu_kill(unsigned int cpu)
arch/arm/kernel/smp.c
209
return smp_ops.cpu_kill(cpu);
arch/arm/kernel/smp.c
213
static int platform_cpu_disable(unsigned int cpu)
arch/arm/kernel/smp.c
216
return smp_ops.cpu_disable(cpu);
arch/arm/kernel/smp.c
221
int platform_can_hotplug_cpu(unsigned int cpu)
arch/arm/kernel/smp.c
228
return smp_ops.cpu_can_disable(cpu);
arch/arm/kernel/smp.c
235
return cpu != 0;
arch/arm/kernel/smp.c
238
static void ipi_teardown(int cpu)
arch/arm/kernel/smp.c
254
unsigned int cpu = smp_processor_id();
arch/arm/kernel/smp.c
257
ret = platform_cpu_disable(cpu);
arch/arm/kernel/smp.c
262
remove_cpu_topology(cpu);
arch/arm/kernel/smp.c
269
set_cpu_online(cpu, false);
arch/arm/kernel/smp.c
270
ipi_teardown(cpu);
arch/arm/kernel/smp.c
294
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
arch/arm/kernel/smp.c
296
pr_debug("CPU%u: shutdown\n", cpu);
arch/arm/kernel/smp.c
298
clear_tasks_mm_cpumask(cpu);
arch/arm/kernel/smp.c
306
if (!platform_cpu_kill(cpu))
arch/arm/kernel/smp.c
307
pr_err("CPU%u: unable to kill\n", cpu);
arch/arm/kernel/smp.c
320
unsigned int cpu = smp_processor_id();
arch/arm/kernel/smp.c
362
smp_ops.cpu_die(cpu);
arch/arm/kernel/smp.c
365
cpu);
arch/arm/kernel/smp.c
413
unsigned int cpu;
arch/arm/kernel/smp.c
432
cpu = smp_processor_id();
arch/arm/kernel/smp.c
435
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/arm/kernel/smp.c
442
pr_debug("CPU%u: Booted secondary processor\n", cpu);
arch/arm/kernel/smp.c
450
smp_ops.smp_secondary_init(cpu);
arch/arm/kernel/smp.c
452
notify_cpu_starting(cpu);
arch/arm/kernel/smp.c
454
ipi_setup(cpu);
arch/arm/kernel/smp.c
458
smp_store_cpu_info(cpu);
arch/arm/kernel/smp.c
465
set_cpu_online(cpu, true);
arch/arm/kernel/smp.c
483
int cpu;
arch/arm/kernel/smp.c
486
for_each_online_cpu(cpu)
arch/arm/kernel/smp.c
487
bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
arch/arm/kernel/smp.c
548
unsigned int cpu, i;
arch/arm/kernel/smp.c
557
for_each_online_cpu(cpu)
arch/arm/kernel/smp.c
558
seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
arch/arm/kernel/smp.c
574
void arch_send_call_function_single_ipi(int cpu)
arch/arm/kernel/smp.c
576
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
arch/arm/kernel/smp.c
599
static void ipi_cpu_stop(unsigned int cpu)
arch/arm/kernel/smp.c
605
pr_crit("CPU%u: stopping\n", cpu);
arch/arm/kernel/smp.c
610
set_cpu_online(cpu, false);
arch/arm/kernel/smp.c
620
int register_ipi_completion(struct completion *completion, int cpu)
arch/arm/kernel/smp.c
622
per_cpu(cpu_completion, cpu) = completion;
arch/arm/kernel/smp.c
626
static void ipi_complete(unsigned int cpu)
arch/arm/kernel/smp.c
628
complete(per_cpu(cpu_completion, cpu));
arch/arm/kernel/smp.c
636
unsigned int cpu = smp_processor_id();
arch/arm/kernel/smp.c
660
ipi_cpu_stop(cpu);
arch/arm/kernel/smp.c
670
ipi_complete(cpu);
arch/arm/kernel/smp.c
681
cpu, ipinr);
arch/arm/kernel/smp.c
713
static void ipi_setup(int cpu)
arch/arm/kernel/smp.c
748
void arch_smp_send_reschedule(int cpu)
arch/arm/kernel/smp.c
750
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
arch/arm/kernel/smp.c
799
int cpu, first = cpumask_first(cpus);
arch/arm/kernel/smp.c
806
for_each_cpu(cpu, cpus) {
arch/arm/kernel/smp.c
807
per_cpu(l_p_j_ref, cpu) =
arch/arm/kernel/smp.c
808
per_cpu(cpu_data, cpu).loops_per_jiffy;
arch/arm/kernel/smp.c
809
per_cpu(l_p_j_ref_freq, cpu) = freq->old;
arch/arm/kernel/smp.c
826
for_each_cpu(cpu, cpus)
arch/arm/kernel/smp.c
827
per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
arch/arm/kernel/smp.c
86
static void ipi_setup(int cpu);
arch/arm/kernel/smp_scu.c
107
int scu_cpu_power_enable(void __iomem *scu_base, unsigned int cpu)
arch/arm/kernel/smp_scu.c
109
return scu_set_power_mode_internal(scu_base, cpu, SCU_PM_NORMAL);
arch/arm/kernel/smp_scu.c
115
int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0);
arch/arm/kernel/smp_scu.c
117
if (cpu > 3)
arch/arm/kernel/smp_scu.c
120
val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu);
arch/arm/kernel/smp_scu.c
78
int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0);
arch/arm/kernel/smp_scu.c
80
if (mode > 3 || mode == 1 || cpu > 3)
arch/arm/kernel/smp_scu.c
83
val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu);
arch/arm/kernel/smp_scu.c
86
writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
arch/arm/kernel/smp_twd.c
221
int cpu = smp_processor_id();
arch/arm/kernel/smp_twd.c
227
if (per_cpu(percpu_setup_called, cpu)) {
arch/arm/kernel/smp_twd.c
233
per_cpu(percpu_setup_called, cpu) = true;
arch/arm/kernel/smp_twd.c
252
clk->cpumask = cpumask_of(cpu);
arch/arm/kernel/smp_twd.c
259
static int twd_timer_starting_cpu(unsigned int cpu)
arch/arm/kernel/smp_twd.c
265
static int twd_timer_dying_cpu(unsigned int cpu)
arch/arm/kernel/topology.c
102
cn = of_get_cpu_node(cpu, NULL);
arch/arm/kernel/topology.c
104
pr_err("missing device node for CPU %d\n", cpu);
arch/arm/kernel/topology.c
108
if (topology_parse_cpu_capacity(cn, cpu)) {
arch/arm/kernel/topology.c
138
cpu_capacity(cpu) = capacity;
arch/arm/kernel/topology.c
164
static void update_cpu_capacity(unsigned int cpu)
arch/arm/kernel/topology.c
166
if (!cpu_capacity(cpu) || cap_from_dt)
arch/arm/kernel/topology.c
169
topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
arch/arm/kernel/topology.c
172
cpu, topology_get_cpu_scale(cpu));
arch/arm/kernel/topology.c
72
#define cpu_capacity(cpu) __cpu_capacity[cpu]
arch/arm/kernel/topology.c
92
int cpu = 0;
arch/arm/kernel/topology.c
97
for_each_possible_cpu(cpu) {
arch/arm/kernel/traps.c
309
int cpu;
arch/arm/kernel/traps.c
316
cpu = smp_processor_id();
arch/arm/kernel/traps.c
318
if (cpu == die_owner)
arch/arm/kernel/traps.c
324
die_owner = cpu;
arch/arm/kernel/traps.c
909
int cpu;
arch/arm/kernel/traps.c
911
for_each_possible_cpu(cpu) {
arch/arm/kernel/traps.c
915
per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE];
arch/arm/mach-actions/platsmp.c
40
static int s500_wakeup_secondary(unsigned int cpu)
arch/arm/mach-actions/platsmp.c
44
if (cpu > 3)
arch/arm/mach-actions/platsmp.c
48
switch (cpu) {
arch/arm/mach-actions/platsmp.c
69
timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
arch/arm/mach-actions/platsmp.c
71
timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
arch/arm/mach-actions/platsmp.c
79
static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-actions/platsmp.c
83
ret = s500_wakeup_secondary(cpu);
arch/arm/mach-actions/platsmp.c
89
smp_send_reschedule(cpu);
arch/arm/mach-actions/platsmp.c
91
writel(0, timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
arch/arm/mach-actions/platsmp.c
92
writel(0, timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
arch/arm/mach-alpine/alpine_cpu_pm.c
18
#define AL_SYSFAB_POWER_CONTROL(cpu) (0x2000 + (cpu)*0x100 + 0x20)
arch/arm/mach-alpine/platsmp.c
17
static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-alpine/platsmp.c
28
return alpine_cpu_wakeup(cpu_logical_map(cpu), (uint32_t)addr);
arch/arm/mach-aspeed/platsmp.c
15
static int aspeed_g6_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-aspeed/platsmp.c
27
writel_relaxed((0xABBAAB00 | (cpu & 0xff)), base + BOOT_SIG);
arch/arm/mach-axxia/platsmp.c
31
static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-axxia/platsmp.c
48
tmp &= ~(1 << cpu);
arch/arm/mach-axxia/platsmp.c
57
int cpu;
arch/arm/mach-axxia/platsmp.c
63
for_each_possible_cpu(cpu) {
arch/arm/mach-axxia/platsmp.c
67
np = of_get_cpu_node(cpu, NULL);
arch/arm/mach-axxia/platsmp.c
74
set_cpu_present(cpu, true);
arch/arm/mach-bcm/bcm63xx_pmb.c
127
unsigned int cpu, addr;
arch/arm/mach-bcm/bcm63xx_pmb.c
132
ret = bcm63xx_pmb_get_resources(dn, &base, &cpu, &addr);
arch/arm/mach-bcm/bcm63xx_pmb.c
137
WARN_ON(cpu > 1);
arch/arm/mach-bcm/bcm63xx_pmb.c
149
if (ctrl & CPU_RESET_N(cpu)) {
arch/arm/mach-bcm/bcm63xx_pmb.c
150
pr_info("PMB: CPU%d is already powered on\n", cpu);
arch/arm/mach-bcm/bcm63xx_pmb.c
156
ret = bpcm_rd(base, addr, ARM_PWR_CONTROL(cpu), &val);
arch/arm/mach-bcm/bcm63xx_pmb.c
162
ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
arch/arm/mach-bcm/bcm63xx_pmb.c
169
ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
arch/arm/mach-bcm/bcm63xx_pmb.c
176
ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
arch/arm/mach-bcm/bcm63xx_pmb.c
183
ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
arch/arm/mach-bcm/bcm63xx_pmb.c
189
ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
arch/arm/mach-bcm/bcm63xx_pmb.c
196
ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
arch/arm/mach-bcm/bcm63xx_pmb.c
203
ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
arch/arm/mach-bcm/bcm63xx_pmb.c
208
ctrl |= CPU_RESET_N(cpu);
arch/arm/mach-bcm/bcm63xx_pmb.c
88
unsigned int *cpu,
arch/arm/mach-bcm/bcm63xx_pmb.c
94
*cpu = of_get_cpu_hwid(dn, 0);
arch/arm/mach-bcm/bcm63xx_pmb.c
95
if (*cpu == ~0U) {
arch/arm/mach-bcm/bcm63xx_smp.c
106
static int bcm63138_smp_boot_secondary(unsigned int cpu,
arch/arm/mach-bcm/bcm63xx_smp.c
129
dn = of_get_cpu_node(cpu, NULL);
arch/arm/mach-bcm/bcm63xx_smp.c
131
pr_err("SMP: failed to locate secondary CPU%d node\n", cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
101
static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask)
arch/arm/mach-bcm/platsmp-brcmstb.c
107
tmp = pwr_ctrl_rd(cpu) & mask;
arch/arm/mach-bcm/platsmp-brcmstb.c
112
tmp = pwr_ctrl_rd(cpu) & mask;
arch/arm/mach-bcm/platsmp-brcmstb.c
119
static void cpu_rst_cfg_set(u32 cpu, int set)
arch/arm/mach-bcm/platsmp-brcmstb.c
124
val |= BIT(cpu_logical_map(cpu));
arch/arm/mach-bcm/platsmp-brcmstb.c
126
val &= ~BIT(cpu_logical_map(cpu));
arch/arm/mach-bcm/platsmp-brcmstb.c
130
static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr)
arch/arm/mach-bcm/platsmp-brcmstb.c
132
const int reg_ofs = cpu_logical_map(cpu) * 8;
arch/arm/mach-bcm/platsmp-brcmstb.c
137
static void brcmstb_cpu_boot(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
140
per_cpu_sw_state_wr(cpu, 1);
arch/arm/mach-bcm/platsmp-brcmstb.c
146
cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup));
arch/arm/mach-bcm/platsmp-brcmstb.c
149
cpu_rst_cfg_set(cpu, 0);
arch/arm/mach-bcm/platsmp-brcmstb.c
152
static void brcmstb_cpu_power_on(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
158
pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00);
arch/arm/mach-bcm/platsmp-brcmstb.c
159
pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
160
pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
162
pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
164
if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK))
arch/arm/mach-bcm/platsmp-brcmstb.c
167
pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
169
if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK))
arch/arm/mach-bcm/platsmp-brcmstb.c
172
pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
173
pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
176
static int brcmstb_cpu_get_power_state(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
178
int tmp = pwr_ctrl_rd(cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
184
static void brcmstb_cpu_die(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
188
per_cpu_sw_state_wr(cpu, 0);
arch/arm/mach-bcm/platsmp-brcmstb.c
198
static int brcmstb_cpu_kill(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
206
if (cpu == 0) {
arch/arm/mach-bcm/platsmp-brcmstb.c
211
while (per_cpu_sw_state_rd(cpu))
arch/arm/mach-bcm/platsmp-brcmstb.c
214
pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
215
pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
216
pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
217
pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
218
pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
220
if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK))
arch/arm/mach-bcm/platsmp-brcmstb.c
223
pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1);
arch/arm/mach-bcm/platsmp-brcmstb.c
225
if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK))
arch/arm/mach-bcm/platsmp-brcmstb.c
232
cpu_rst_cfg_set(cpu, 1);
arch/arm/mach-bcm/platsmp-brcmstb.c
339
static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-bcm/platsmp-brcmstb.c
346
if (brcmstb_cpu_get_power_state(cpu) == 0)
arch/arm/mach-bcm/platsmp-brcmstb.c
347
brcmstb_cpu_power_on(cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
349
brcmstb_cpu_boot(cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
59
static int per_cpu_sw_state_rd(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
61
sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
arch/arm/mach-bcm/platsmp-brcmstb.c
62
return per_cpu(per_cpu_sw_state, cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
65
static void per_cpu_sw_state_wr(u32 cpu, int val)
arch/arm/mach-bcm/platsmp-brcmstb.c
68
per_cpu(per_cpu_sw_state, cpu) = val;
arch/arm/mach-bcm/platsmp-brcmstb.c
69
sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
arch/arm/mach-bcm/platsmp-brcmstb.c
72
static inline void per_cpu_sw_state_wr(u32 cpu, int val) { }
arch/arm/mach-bcm/platsmp-brcmstb.c
75
static void __iomem *pwr_ctrl_get_base(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
78
base += (cpu_logical_map(cpu) * 4);
arch/arm/mach-bcm/platsmp-brcmstb.c
82
static u32 pwr_ctrl_rd(u32 cpu)
arch/arm/mach-bcm/platsmp-brcmstb.c
84
void __iomem *base = pwr_ctrl_get_base(cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
88
static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask)
arch/arm/mach-bcm/platsmp-brcmstb.c
90
void __iomem *base = pwr_ctrl_get_base(cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
94
static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask)
arch/arm/mach-bcm/platsmp-brcmstb.c
96
void __iomem *base = pwr_ctrl_get_base(cpu);
arch/arm/mach-bcm/platsmp.c
103
const u32 secondary_boot_addr = secondary_boot_addr_for(cpu);
arch/arm/mach-bcm/platsmp.c
111
pr_warn("unable to ioremap SKU-ROM LUT register for cpu %u\n", cpu);
arch/arm/mach-bcm/platsmp.c
158
static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-bcm/platsmp.c
166
const u32 secondary_boot_addr = secondary_boot_addr_for(cpu);
arch/arm/mach-bcm/platsmp.c
168
cpu_id = cpu_logical_map(cpu);
arch/arm/mach-bcm/platsmp.c
216
#define CDC_CMD_REG(cpu) (CDC_CMD_OFFSET + 4*(cpu))
arch/arm/mach-bcm/platsmp.c
223
static int bcm23550_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-bcm/platsmp.c
249
ret = kona_boot_secondary(cpu, idle);
arch/arm/mach-bcm/platsmp.c
256
writel_relaxed(CDC_CMD, cdc_base + CDC_CMD_REG(cpu));
arch/arm/mach-bcm/platsmp.c
264
static int nsp_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-bcm/platsmp.c
272
ret = nsp_write_lut(cpu);
arch/arm/mach-bcm/platsmp.c
279
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-bcm/platsmp.c
285
static int bcm2836_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-bcm/platsmp.c
307
intc_base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
arch/arm/mach-bcm/platsmp.c
78
static u32 secondary_boot_addr_for(unsigned int cpu)
arch/arm/mach-bcm/platsmp.c
81
struct device_node *cpu_node = of_get_cpu_node(cpu, NULL);
arch/arm/mach-bcm/platsmp.c
84
pr_err("Failed to find device tree node for CPU%u\n", cpu);
arch/arm/mach-bcm/platsmp.c
92
cpu);
arch/arm/mach-bcm/platsmp.c
99
static int nsp_write_lut(unsigned int cpu)
arch/arm/mach-berlin/platsmp.c
100
static void berlin_cpu_die(unsigned int cpu)
arch/arm/mach-berlin/platsmp.c
107
static int berlin_cpu_kill(unsigned int cpu)
arch/arm/mach-berlin/platsmp.c
112
val &= ~BIT(cpu_logical_map(cpu));
arch/arm/mach-berlin/platsmp.c
33
static inline void berlin_perform_reset_cpu(unsigned int cpu)
arch/arm/mach-berlin/platsmp.c
38
val &= ~BIT(cpu_logical_map(cpu));
arch/arm/mach-berlin/platsmp.c
40
val |= BIT(cpu_logical_map(cpu));
arch/arm/mach-berlin/platsmp.c
44
static int berlin_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-berlin/platsmp.c
53
berlin_perform_reset_cpu(cpu);
arch/arm/mach-exynos/common.h
123
void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
arch/arm/mach-exynos/common.h
124
void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
arch/arm/mach-exynos/common.h
137
extern void exynos_cpu_power_down(int cpu);
arch/arm/mach-exynos/common.h
138
extern void exynos_cpu_power_up(int cpu);
arch/arm/mach-exynos/common.h
139
extern int exynos_cpu_power_state(int cpu);
arch/arm/mach-exynos/firmware.c
103
static int exynos_get_cpu_boot_addr(int cpu, unsigned long *boot_addr)
arch/arm/mach-exynos/firmware.c
113
boot_reg += 4 * cpu;
arch/arm/mach-exynos/firmware.c
241
void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
arch/arm/mach-exynos/firmware.c
245
tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4);
arch/arm/mach-exynos/firmware.c
251
writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4);
arch/arm/mach-exynos/firmware.c
254
void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
arch/arm/mach-exynos/firmware.c
258
tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4);
arch/arm/mach-exynos/firmware.c
260
writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4);
arch/arm/mach-exynos/firmware.c
58
static int exynos_cpu_boot(int cpu)
arch/arm/mach-exynos/firmware.c
76
cpu = 0;
arch/arm/mach-exynos/firmware.c
78
exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
arch/arm/mach-exynos/firmware.c
82
static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
arch/arm/mach-exynos/firmware.c
97
boot_reg += 4 * cpu;
arch/arm/mach-exynos/mcpm-exynos.c
116
static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
arch/arm/mach-exynos/mcpm-exynos.c
118
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
arch/arm/mach-exynos/mcpm-exynos.c
120
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-exynos/mcpm-exynos.c
121
BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
arch/arm/mach-exynos/mcpm-exynos.c
163
static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
arch/arm/mach-exynos/mcpm-exynos.c
166
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
arch/arm/mach-exynos/mcpm-exynos.c
168
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-exynos/mcpm-exynos.c
169
BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
arch/arm/mach-exynos/mcpm-exynos.c
184
static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster)
arch/arm/mach-exynos/mcpm-exynos.c
187
exynos_cpu_powerup(cpu, cluster);
arch/arm/mach-exynos/mcpm-exynos.c
57
static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
arch/arm/mach-exynos/mcpm-exynos.c
59
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
arch/arm/mach-exynos/mcpm-exynos.c
62
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-exynos/mcpm-exynos.c
63
if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
arch/arm/mach-exynos/mcpm-exynos.c
93
cpu, cluster);
arch/arm/mach-exynos/mcpm-exynos.c
98
pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
arch/arm/mach-exynos/platsmp.c
103
core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
arch/arm/mach-exynos/platsmp.c
105
pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
arch/arm/mach-exynos/platsmp.c
114
void exynos_cpu_power_up(int cpu)
arch/arm/mach-exynos/platsmp.c
122
EXYNOS_ARM_CORE_CONFIGURATION(cpu));
arch/arm/mach-exynos/platsmp.c
129
int exynos_cpu_power_state(int cpu)
arch/arm/mach-exynos/platsmp.c
131
return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
arch/arm/mach-exynos/platsmp.c
192
static inline void __iomem *cpu_boot_reg(int cpu)
arch/arm/mach-exynos/platsmp.c
200
boot_reg += 4*cpu;
arch/arm/mach-exynos/platsmp.c
252
static void exynos_secondary_init(unsigned int cpu)
arch/arm/mach-exynos/platsmp.c
317
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-exynos/platsmp.c
320
u32 mpidr = cpu_logical_map(cpu);
arch/arm/mach-exynos/platsmp.c
385
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-exynos/platsmp.c
422
static void exynos_cpu_die(unsigned int cpu)
arch/arm/mach-exynos/platsmp.c
425
u32 mpidr = cpu_logical_map(cpu);
arch/arm/mach-exynos/platsmp.c
430
platform_do_lowpower(cpu, &spurious);
arch/arm/mach-exynos/platsmp.c
439
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
arch/arm/mach-exynos/platsmp.c
49
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
arch/arm/mach-exynos/platsmp.c
51
u32 mpidr = cpu_logical_map(cpu);
arch/arm/mach-exynos/platsmp.c
87
void exynos_cpu_power_down(int cpu)
arch/arm/mach-exynos/platsmp.c
91
if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
arch/arm/mach-exynos/suspend.c
269
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-exynos/suspend.c
272
mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
arch/arm/mach-highbank/sysregs.h
29
int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
arch/arm/mach-highbank/sysregs.h
33
writel_relaxed(1, sregs_base + SREG_CPU_PWR_CTRL(cpu));
arch/arm/mach-highbank/sysregs.h
38
int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
arch/arm/mach-highbank/sysregs.h
42
writel_relaxed(0, sregs_base + SREG_CPU_PWR_CTRL(cpu));
arch/arm/mach-hisi/core.h
11
extern void hi3xxx_cpu_die(unsigned int cpu);
arch/arm/mach-hisi/core.h
12
extern int hi3xxx_cpu_kill(unsigned int cpu);
arch/arm/mach-hisi/core.h
13
extern void hi3xxx_set_cpu(int cpu, bool enable);
arch/arm/mach-hisi/core.h
15
extern void hix5hd2_set_cpu(int cpu, bool enable);
arch/arm/mach-hisi/core.h
16
extern void hix5hd2_cpu_die(unsigned int cpu);
arch/arm/mach-hisi/core.h
18
extern void hip01_set_cpu(int cpu, bool enable);
arch/arm/mach-hisi/core.h
7
extern void hi3xxx_set_cpu_jump(int cpu, void *jump_addr);
arch/arm/mach-hisi/core.h
8
extern int hi3xxx_get_cpu_jump(int cpu);
arch/arm/mach-hisi/hotplug.c
105
val &= ~(CPU0_WFI_MASK_CFG << cpu);
arch/arm/mach-hisi/hotplug.c
111
writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS);
arch/arm/mach-hisi/hotplug.c
115
val |= (CPU0_WFI_MASK_CFG << cpu);
arch/arm/mach-hisi/hotplug.c
119
writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREDIS);
arch/arm/mach-hisi/hotplug.c
121
if ((cpu == 2) || (cpu == 3)) {
arch/arm/mach-hisi/hotplug.c
123
writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
arch/arm/mach-hisi/hotplug.c
131
writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN);
arch/arm/mach-hisi/hotplug.c
133
if ((cpu == 2) || (cpu == 3)) {
arch/arm/mach-hisi/hotplug.c
135
writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
arch/arm/mach-hisi/hotplug.c
163
void hi3xxx_set_cpu(int cpu, bool enable)
arch/arm/mach-hisi/hotplug.c
171
set_cpu_hi3620(cpu, enable);
arch/arm/mach-hisi/hotplug.c
190
void hix5hd2_set_cpu(int cpu, bool enable)
arch/arm/mach-hisi/hotplug.c
222
void hip01_set_cpu(int cpu, bool enable)
arch/arm/mach-hisi/hotplug.c
272
void hi3xxx_cpu_die(unsigned int cpu)
arch/arm/mach-hisi/hotplug.c
275
hi3xxx_set_cpu_jump(cpu, phys_to_virt(0));
arch/arm/mach-hisi/hotplug.c
279
panic("cpu %d unexpectedly exit from shutdown\n", cpu);
arch/arm/mach-hisi/hotplug.c
282
int hi3xxx_cpu_kill(unsigned int cpu)
arch/arm/mach-hisi/hotplug.c
286
while (hi3xxx_get_cpu_jump(cpu))
arch/arm/mach-hisi/hotplug.c
289
hi3xxx_set_cpu(cpu, false);
arch/arm/mach-hisi/hotplug.c
293
void hix5hd2_cpu_die(unsigned int cpu)
arch/arm/mach-hisi/hotplug.c
296
hix5hd2_set_cpu(cpu, false);
arch/arm/mach-hisi/hotplug.c
75
static void set_cpu_hi3620(int cpu, bool enable)
arch/arm/mach-hisi/hotplug.c
81
if ((cpu == 2) || (cpu == 3))
arch/arm/mach-hisi/hotplug.c
82
writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
arch/arm/mach-hisi/hotplug.c
87
writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREEN);
arch/arm/mach-hisi/hotplug.c
92
writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS);
arch/arm/mach-hisi/hotplug.c
95
writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN);
arch/arm/mach-hisi/hotplug.c
98
if ((cpu == 2) || (cpu == 3))
arch/arm/mach-hisi/hotplug.c
99
writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
arch/arm/mach-hisi/platmcpm.c
100
unsigned int mpidr, cpu, cluster;
arch/arm/mach-hisi/platmcpm.c
105
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-hisi/platmcpm.c
110
if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
arch/arm/mach-hisi/platmcpm.c
115
if (hip04_cpu_table[cluster][cpu])
arch/arm/mach-hisi/platmcpm.c
130
data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
arch/arm/mach-hisi/platmcpm.c
131
CORE_DEBUG_RESET_BIT(cpu);
arch/arm/mach-hisi/platmcpm.c
146
hip04_cpu_table[cluster][cpu]++;
arch/arm/mach-hisi/platmcpm.c
155
unsigned int mpidr, cpu, cluster;
arch/arm/mach-hisi/platmcpm.c
159
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-hisi/platmcpm.c
163
hip04_cpu_table[cluster][cpu]--;
arch/arm/mach-hisi/platmcpm.c
164
if (hip04_cpu_table[cluster][cpu] == 1) {
arch/arm/mach-hisi/platmcpm.c
168
} else if (hip04_cpu_table[cluster][cpu] > 1) {
arch/arm/mach-hisi/platmcpm.c
169
pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
arch/arm/mach-hisi/platmcpm.c
193
unsigned int mpidr, cpu, cluster;
arch/arm/mach-hisi/platmcpm.c
197
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-hisi/platmcpm.c
200
cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
arch/arm/mach-hisi/platmcpm.c
205
if (hip04_cpu_table[cluster][cpu])
arch/arm/mach-hisi/platmcpm.c
209
if (data & CORE_WFI_STATUS(cpu))
arch/arm/mach-hisi/platmcpm.c
218
data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
arch/arm/mach-hisi/platmcpm.c
219
CORE_DEBUG_RESET_BIT(cpu);
arch/arm/mach-hisi/platmcpm.c
224
if (data & CORE_RESET_STATUS(cpu))
arch/arm/mach-hisi/platmcpm.c
249
unsigned int mpidr, cpu, cluster;
arch/arm/mach-hisi/platmcpm.c
252
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-hisi/platmcpm.c
256
cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
arch/arm/mach-hisi/platmcpm.c
261
hip04_cpu_table[cluster][cpu] = 1;
arch/arm/mach-hisi/platsmp.c
117
static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-hisi/platsmp.c
123
hix5hd2_set_cpu(cpu, true);
arch/arm/mach-hisi/platsmp.c
124
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-hisi/platsmp.c
152
static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-hisi/platsmp.c
175
hip01_set_cpu(cpu, true);
arch/arm/mach-hisi/platsmp.c
23
void hi3xxx_set_cpu_jump(int cpu, void *jump_addr)
arch/arm/mach-hisi/platsmp.c
25
cpu = cpu_logical_map(cpu);
arch/arm/mach-hisi/platsmp.c
26
if (!cpu || !ctrl_base)
arch/arm/mach-hisi/platsmp.c
28
writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2));
arch/arm/mach-hisi/platsmp.c
31
int hi3xxx_get_cpu_jump(int cpu)
arch/arm/mach-hisi/platsmp.c
33
cpu = cpu_logical_map(cpu);
arch/arm/mach-hisi/platsmp.c
34
if (!cpu || !ctrl_base)
arch/arm/mach-hisi/platsmp.c
36
return readl_relaxed(ctrl_base + ((cpu - 1) << 2));
arch/arm/mach-hisi/platsmp.c
84
static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-hisi/platsmp.c
86
hi3xxx_set_cpu(cpu, true);
arch/arm/mach-hisi/platsmp.c
87
hi3xxx_set_cpu_jump(cpu, secondary_startup);
arch/arm/mach-hisi/platsmp.c
88
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-imx/common.h
63
void imx_enable_cpu(int cpu, bool enable);
arch/arm/mach-imx/common.h
64
void imx_set_cpu_jump(int cpu, void *jump_addr);
arch/arm/mach-imx/common.h
65
u32 imx_get_cpu_arg(int cpu);
arch/arm/mach-imx/common.h
66
void imx_set_cpu_arg(int cpu, u32 arg);
arch/arm/mach-imx/common.h
92
void imx_cpu_die(unsigned int cpu);
arch/arm/mach-imx/common.h
93
int imx_cpu_kill(unsigned int cpu);
arch/arm/mach-imx/cpu.c
29
void imx_print_silicon_rev(const char *cpu, int srev)
arch/arm/mach-imx/cpu.c
32
pr_info("CPU identified as %s, unknown revision\n", cpu);
arch/arm/mach-imx/cpu.c
35
cpu, (srev >> 4) & 0xf, srev & 0xf);
arch/arm/mach-imx/hotplug.c
21
void imx_cpu_die(unsigned int cpu)
arch/arm/mach-imx/hotplug.c
29
imx_set_cpu_arg(cpu, ~0);
arch/arm/mach-imx/hotplug.c
35
int imx_cpu_kill(unsigned int cpu)
arch/arm/mach-imx/hotplug.c
39
while (imx_get_cpu_arg(cpu) == 0)
arch/arm/mach-imx/hotplug.c
42
imx_enable_cpu(cpu, false);
arch/arm/mach-imx/hotplug.c
43
imx_set_cpu_arg(cpu, 0);
arch/arm/mach-imx/mmdc.c
100
cpumask_t cpu;
arch/arm/mach-imx/mmdc.c
130
return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
arch/arm/mach-imx/mmdc.c
217
static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
arch/arm/mach-imx/mmdc.c
222
if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
arch/arm/mach-imx/mmdc.c
225
target = cpumask_any_but(cpu_online_mask, cpu);
arch/arm/mach-imx/mmdc.c
229
perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
arch/arm/mach-imx/mmdc.c
230
cpumask_set_cpu(target, &pmu_mmdc->cpu);
arch/arm/mach-imx/mmdc.c
289
if (event->cpu < 0) {
arch/arm/mach-imx/mmdc.c
303
event->cpu = cpumask_first(&pmu_mmdc->cpu);
arch/arm/mach-imx/mmdc.c
515
cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
arch/arm/mach-imx/platsmp.c
123
static int ls1021a_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-imx/platsmp.c
125
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-imx/platsmp.c
43
static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-imx/platsmp.c
45
imx_set_cpu_jump(cpu, v7_secondary_startup);
arch/arm/mach-imx/platsmp.c
46
imx_enable_cpu(cpu, true);
arch/arm/mach-imx/src.c
122
void imx_enable_cpu(int cpu, bool enable)
arch/arm/mach-imx/src.c
126
cpu = cpu_logical_map(cpu);
arch/arm/mach-imx/src.c
132
mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1);
arch/arm/mach-imx/src.c
137
mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
arch/arm/mach-imx/src.c
140
val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
arch/arm/mach-imx/src.c
146
void imx_set_cpu_jump(int cpu, void *jump_addr)
arch/arm/mach-imx/src.c
148
cpu = cpu_logical_map(cpu);
arch/arm/mach-imx/src.c
150
src_base + SRC_GPR1(gpr_v2) + cpu * 8);
arch/arm/mach-imx/src.c
153
u32 imx_get_cpu_arg(int cpu)
arch/arm/mach-imx/src.c
155
cpu = cpu_logical_map(cpu);
arch/arm/mach-imx/src.c
156
return readl_relaxed(src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4);
arch/arm/mach-imx/src.c
159
void imx_set_cpu_arg(int cpu, u32 arg)
arch/arm/mach-imx/src.c
161
cpu = cpu_logical_map(cpu);
arch/arm/mach-imx/src.c
162
writel_relaxed(arg, src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4);
arch/arm/mach-mediatek/platsmp.c
69
static int mtk_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-mediatek/platsmp.c
74
if (!mtk_smp_info->core_keys[cpu-1])
arch/arm/mach-mediatek/platsmp.c
77
writel_relaxed(mtk_smp_info->core_keys[cpu-1],
arch/arm/mach-mediatek/platsmp.c
78
mtk_smp_base + mtk_smp_info->core_regs[cpu-1]);
arch/arm/mach-mediatek/platsmp.c
80
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-meson/platsmp.c
116
static void meson_smp_begin_secondary_boot(unsigned int cpu)
arch/arm/mach-meson/platsmp.c
125
sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu));
arch/arm/mach-meson/platsmp.c
131
scu_cpu_power_enable(scu_base, cpu);
arch/arm/mach-meson/platsmp.c
134
static int meson_smp_finalize_secondary_boot(unsigned int cpu)
arch/arm/mach-meson/platsmp.c
139
while (readl(sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu))) {
arch/arm/mach-meson/platsmp.c
142
cpu);
arch/arm/mach-meson/platsmp.c
148
sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu));
arch/arm/mach-meson/platsmp.c
150
meson_smp_set_cpu_ctrl(cpu, true);
arch/arm/mach-meson/platsmp.c
155
static int meson8_smp_boot_secondary(unsigned int cpu,
arch/arm/mach-meson/platsmp.c
161
rstc = meson_smp_get_core_reset(cpu);
arch/arm/mach-meson/platsmp.c
163
pr_err("Couldn't get the reset controller for CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
167
meson_smp_begin_secondary_boot(cpu);
arch/arm/mach-meson/platsmp.c
172
pr_err("Failed to assert CPU%d reset\n", cpu);
arch/arm/mach-meson/platsmp.c
178
MESON_CPU_PWR_A9_CNTL1_M(cpu), 0);
arch/arm/mach-meson/platsmp.c
180
pr_err("Couldn't wake up CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
187
ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
arch/arm/mach-meson/platsmp.c
190
pr_err("Error when disabling isolation of CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
197
pr_err("Failed to de-assert CPU%d reset\n", cpu);
arch/arm/mach-meson/platsmp.c
201
ret = meson_smp_finalize_secondary_boot(cpu);
arch/arm/mach-meson/platsmp.c
211
static int meson8b_smp_boot_secondary(unsigned int cpu,
arch/arm/mach-meson/platsmp.c
218
rstc = meson_smp_get_core_reset(cpu);
arch/arm/mach-meson/platsmp.c
220
pr_err("Couldn't get the reset controller for CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
224
meson_smp_begin_secondary_boot(cpu);
arch/arm/mach-meson/platsmp.c
228
MESON_CPU_PWR_A9_CNTL0_M(cpu), 0);
arch/arm/mach-meson/platsmp.c
230
pr_err("Couldn't power up CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
239
pr_err("Failed to assert CPU%d reset\n", cpu);
arch/arm/mach-meson/platsmp.c
245
MESON_CPU_PWR_A9_MEM_PD0_M(cpu), 0);
arch/arm/mach-meson/platsmp.c
247
pr_err("Couldn't power up the memory for CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
253
MESON_CPU_PWR_A9_CNTL1_M(cpu), 0);
arch/arm/mach-meson/platsmp.c
255
pr_err("Couldn't wake up CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
262
val & MESON_CPU_PWR_A9_CNTL1_ST(cpu),
arch/arm/mach-meson/platsmp.c
265
pr_err("Timeout while polling PMU for CPU%d status\n", cpu);
arch/arm/mach-meson/platsmp.c
270
ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
arch/arm/mach-meson/platsmp.c
273
pr_err("Error when disabling isolation of CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
280
pr_err("Failed to de-assert CPU%d reset\n", cpu);
arch/arm/mach-meson/platsmp.c
284
ret = meson_smp_finalize_secondary_boot(cpu);
arch/arm/mach-meson/platsmp.c
295
static void meson8_smp_cpu_die(unsigned int cpu)
arch/arm/mach-meson/platsmp.c
297
meson_smp_set_cpu_ctrl(cpu, false);
arch/arm/mach-meson/platsmp.c
310
static int meson8_smp_cpu_kill(unsigned int cpu)
arch/arm/mach-meson/platsmp.c
317
power_mode = scu_get_cpu_power_mode(scu_base, cpu);
arch/arm/mach-meson/platsmp.c
327
cpu);
arch/arm/mach-meson/platsmp.c
334
ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
arch/arm/mach-meson/platsmp.c
337
pr_err("Error when enabling isolation for CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
345
MESON_CPU_PWR_A9_CNTL1_M(cpu), 0x3);
arch/arm/mach-meson/platsmp.c
347
pr_err("Couldn't change sleep status of CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
354
static int meson8b_smp_cpu_kill(unsigned int cpu)
arch/arm/mach-meson/platsmp.c
359
power_mode = scu_get_cpu_power_mode(scu_base, cpu);
arch/arm/mach-meson/platsmp.c
369
cpu);
arch/arm/mach-meson/platsmp.c
377
MESON_CPU_PWR_A9_CNTL0_M(cpu), 0x3);
arch/arm/mach-meson/platsmp.c
379
pr_err("Couldn't power down CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
38
static struct reset_control *meson_smp_get_core_reset(int cpu)
arch/arm/mach-meson/platsmp.c
384
ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
arch/arm/mach-meson/platsmp.c
387
pr_err("Error when enabling isolation for CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
395
MESON_CPU_PWR_A9_CNTL1_M(cpu), 0x3);
arch/arm/mach-meson/platsmp.c
397
pr_err("Couldn't change sleep status of CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
40
struct device_node *np = of_get_cpu_node(cpu, 0);
arch/arm/mach-meson/platsmp.c
403
MESON_CPU_PWR_A9_MEM_PD0_M(cpu), 0xf);
arch/arm/mach-meson/platsmp.c
405
pr_err("Couldn't power down the memory of CPU%d\n", cpu);
arch/arm/mach-meson/platsmp.c
45
static void meson_smp_set_cpu_ctrl(int cpu, bool on_off)
arch/arm/mach-meson/platsmp.c
50
val |= BIT(cpu);
arch/arm/mach-meson/platsmp.c
52
val &= ~BIT(cpu);
arch/arm/mach-milbeaut/platsmp.c
25
unsigned int mpidr, cpu, cluster;
arch/arm/mach-milbeaut/platsmp.c
31
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-milbeaut/platsmp.c
34
if (cpu >= M10V_MAX_CPU)
arch/arm/mach-milbeaut/platsmp.c
38
__func__, cpu, l_cpu, cluster);
arch/arm/mach-milbeaut/platsmp.c
40
writel(__pa_symbol(secondary_startup), m10v_smp_base + cpu * 4);
arch/arm/mach-milbeaut/platsmp.c
48
unsigned int mpidr, cpu, cluster;
arch/arm/mach-milbeaut/platsmp.c
60
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-milbeaut/platsmp.c
62
pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster);
arch/arm/mach-milbeaut/platsmp.c
64
for (cpu = 0; cpu < M10V_MAX_CPU; cpu++)
arch/arm/mach-milbeaut/platsmp.c
65
writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
arch/arm/mach-milbeaut/platsmp.c
78
unsigned int mpidr, cpu;
arch/arm/mach-milbeaut/platsmp.c
81
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-milbeaut/platsmp.c
83
writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
arch/arm/mach-mmp/platsmp.c
12
static int mmp3_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-mstar/mstarv7.c
101
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-mstar/mstarv7.c
74
static int mstarv7_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-mstar/mstarv7.c
84
if (cpu != 1)
arch/arm/mach-mvebu/coherency.c
111
static int armada_xp_clear_l2_starting(unsigned int cpu)
arch/arm/mach-mvebu/common.h
18
int mvebu_cpu_reset_deassert(int cpu);
arch/arm/mach-mvebu/cpu-reset.c
21
#define CPU_RESET_OFFSET(cpu) (cpu * 0x8)
arch/arm/mach-mvebu/cpu-reset.c
24
int mvebu_cpu_reset_deassert(int cpu)
arch/arm/mach-mvebu/cpu-reset.c
31
if (CPU_RESET_OFFSET(cpu) >= cpu_reset_size)
arch/arm/mach-mvebu/cpu-reset.c
34
reg = readl(cpu_reset_base + CPU_RESET_OFFSET(cpu));
arch/arm/mach-mvebu/cpu-reset.c
36
writel(reg, cpu_reset_base + CPU_RESET_OFFSET(cpu));
arch/arm/mach-mvebu/platsmp-a9.c
24
static int mvebu_cortex_a9_boot_secondary(unsigned int cpu,
arch/arm/mach-mvebu/platsmp-a9.c
29
pr_info("Booting CPU %d\n", cpu);
arch/arm/mach-mvebu/platsmp-a9.c
37
hw_cpu = cpu_logical_map(cpu);
arch/arm/mach-mvebu/platsmp-a9.c
48
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-mvebu/platsmp-a9.c
66
static void armada_38x_secondary_init(unsigned int cpu)
arch/arm/mach-mvebu/platsmp-a9.c
72
static void armada_38x_cpu_die(unsigned int cpu)
arch/arm/mach-mvebu/platsmp-a9.c
87
static int armada_38x_cpu_kill(unsigned int cpu)
arch/arm/mach-mvebu/platsmp.c
101
struct clk *cpu_clk = get_cpu_clk(cpu);
arch/arm/mach-mvebu/platsmp.c
148
static void armada_xp_cpu_die(unsigned int cpu)
arch/arm/mach-mvebu/platsmp.c
163
static int armada_xp_cpu_kill(unsigned int cpu)
arch/arm/mach-mvebu/platsmp.c
216
static int mv98dx3236_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-mvebu/platsmp.c
220
hw_cpu = cpu_logical_map(cpu);
arch/arm/mach-mvebu/platsmp.c
228
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-mvebu/platsmp.c
37
static struct clk *get_cpu_clk(int cpu)
arch/arm/mach-mvebu/platsmp.c
40
struct device_node *np = of_get_cpu_node(cpu, NULL);
arch/arm/mach-mvebu/platsmp.c
50
static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-mvebu/platsmp.c
54
pr_info("Booting CPU %d\n", cpu);
arch/arm/mach-mvebu/platsmp.c
56
hw_cpu = cpu_logical_map(cpu);
arch/arm/mach-mvebu/platsmp.c
63
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-mvebu/platsmp.c
86
static void armada_xp_secondary_init(unsigned int cpu)
arch/arm/mach-mvebu/platsmp.c
99
static int armada_xp_sync_secondary_clk(unsigned int cpu)
arch/arm/mach-mvebu/pmsu.c
45
#define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
arch/arm/mach-mvebu/pmsu.c
50
#define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
arch/arm/mach-mvebu/pmsu.c
538
u32 cpu = smp_processor_id();
arch/arm/mach-mvebu/pmsu.c
54
#define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
arch/arm/mach-mvebu/pmsu.c
544
reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
arch/arm/mach-mvebu/pmsu.c
548
writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
arch/arm/mach-mvebu/pmsu.c
551
reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
arch/arm/mach-mvebu/pmsu.c
553
writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
arch/arm/mach-mvebu/pmsu.c
562
reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
arch/arm/mach-mvebu/pmsu.c
564
writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
arch/arm/mach-mvebu/pmsu.c
569
int mvebu_pmsu_dfs_request(int cpu)
arch/arm/mach-mvebu/pmsu.c
572
int hwcpu = cpu_logical_map(cpu);
arch/arm/mach-mvebu/pmsu.c
586
smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local,
arch/arm/mach-mvebu/pmsu.c
63
#define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120)
arch/arm/mach-mvebu/pmsu.c
67
#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
arch/arm/mach-npcm/platsmp.c
21
static int npcm7xx_smp_boot_secondary(unsigned int cpu,
arch/arm/mach-omap1/clock.h
21
u16 cpu;
arch/arm/mach-omap1/clock.h
27
.cpu = cp, \
arch/arm/mach-omap1/clock_data.c
791
if (!(c->cpu & cpu_mask))
arch/arm/mach-omap2/common.h
249
extern void omap4_cpu_die(unsigned int cpu);
arch/arm/mach-omap2/common.h
250
extern int omap4_cpu_kill(unsigned int cpu);
arch/arm/mach-omap2/common.h
259
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state,
arch/arm/mach-omap2/common.h
261
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
arch/arm/mach-omap2/common.h
263
static inline int omap4_enter_lowpower(unsigned int cpu,
arch/arm/mach-omap2/common.h
271
static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
arch/arm/mach-omap2/cpuidle44xx.c
108
omap4_enter_lowpower(dev->cpu, cx->cpu_state, true);
arch/arm/mach-omap2/cpuidle44xx.c
133
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
arch/arm/mach-omap2/cpuidle44xx.c
167
if (dev->cpu == 0) {
arch/arm/mach-omap2/cpuidle44xx.c
187
omap4_enter_lowpower(dev->cpu, cx->cpu_state, true);
arch/arm/mach-omap2/cpuidle44xx.c
188
cpu_done[dev->cpu] = true;
arch/arm/mach-omap2/cpuidle44xx.c
191
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
arch/arm/mach-omap2/cpuidle44xx.c
215
if (dev->cpu == 0 && mpuss_can_lose_context)
arch/arm/mach-omap2/cpuidle44xx.c
229
cpu_done[dev->cpu] = false;
arch/arm/mach-omap2/omap-hotplug.c
27
void omap4_cpu_die(unsigned int cpu)
arch/arm/mach-omap2/omap-hotplug.c
47
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
arch/arm/mach-omap2/omap-hotplug.c
61
pr_debug("CPU%u: spurious wakeup call\n", cpu);
arch/arm/mach-omap2/omap-hotplug.c
66
int omap4_cpu_kill(unsigned int cpu)
arch/arm/mach-omap2/omap-mpuss-lowpower.c
227
__cpuidle int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state,
arch/arm/mach-omap2/omap-mpuss-lowpower.c
230
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
271
cpu_clear_prev_logic_pwrst(cpu);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
278
set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
arch/arm/mach-omap2/omap-mpuss-lowpower.c
279
omap_pm_ops.scu_prepare(cpu, power_state);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
280
l2x0_pwrst_prepare(cpu, save_state);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
290
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
arch/arm/mach-omap2/omap-mpuss-lowpower.c
315
int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
arch/arm/mach-omap2/omap-mpuss-lowpower.c
317
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
332
set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
arch/arm/mach-omap2/omap-mpuss-lowpower.c
333
omap_pm_ops.scu_prepare(cpu, power_state);
arch/arm/mach-omap2/omap-secure.c
64
int cpu;
arch/arm/mach-omap2/omap-secure.c
67
cpu = get_cpu();
arch/arm/mach-omap2/omap-secure.c
68
param = buf[cpu];
arch/arm/mach-omap2/omap-smp.c
146
static void omap4_secondary_init(unsigned int cpu)
arch/arm/mach-omap2/omap-smp.c
173
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-omap2/omap-smp.c
250
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-omap2/omap-wakeupgen.c
104
static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
112
val = wakeupgen_readl(i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
114
wakeupgen_writel(val, i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
117
static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
125
val = wakeupgen_readl(i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
127
wakeupgen_writel(val, i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
190
static void _wakeupgen_save_masks(unsigned int cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
195
per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
198
static void _wakeupgen_restore_masks(unsigned int cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
203
wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
206
static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
arch/arm/mach-omap2/omap-wakeupgen.c
211
wakeupgen_writel(reg, i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
221
static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
arch/arm/mach-omap2/omap-wakeupgen.c
227
_wakeupgen_save_masks(cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
228
_wakeupgen_set_all(cpu, WKG_MASK_ALL);
arch/arm/mach-omap2/omap-wakeupgen.c
230
_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
arch/arm/mach-omap2/omap-wakeupgen.c
231
_wakeupgen_restore_masks(cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
407
static int omap_wakeupgen_cpu_online(unsigned int cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
409
wakeupgen_irqmask_all(cpu, 0);
arch/arm/mach-omap2/omap-wakeupgen.c
413
static int omap_wakeupgen_cpu_dead(unsigned int cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
415
wakeupgen_irqmask_all(cpu, 1);
arch/arm/mach-omap2/omap-wakeupgen.c
75
static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
78
(cpu * CPU_ENA_OFFSET) + (idx * 4));
arch/arm/mach-omap2/omap-wakeupgen.c
81
static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
arch/arm/mach-omap2/omap-wakeupgen.c
84
(cpu * CPU_ENA_OFFSET) + (idx * 4));
arch/arm/mach-omap2/pm33xx-core.c
379
static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
arch/arm/mach-qcom/platsmp.c
135
static int kpssv1_release_secondary(unsigned int cpu)
arch/arm/mach-qcom/platsmp.c
142
cpu_node = of_get_cpu_node(cpu, NULL);
arch/arm/mach-qcom/platsmp.c
214
static int kpssv2_release_secondary(unsigned int cpu)
arch/arm/mach-qcom/platsmp.c
222
cpu_node = of_get_cpu_node(cpu, NULL);
arch/arm/mach-qcom/platsmp.c
316
static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
arch/arm/mach-qcom/platsmp.c
320
if (!per_cpu(cold_boot_done, cpu)) {
arch/arm/mach-qcom/platsmp.c
321
ret = func(cpu);
arch/arm/mach-qcom/platsmp.c
323
per_cpu(cold_boot_done, cpu) = true;
arch/arm/mach-qcom/platsmp.c
331
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-qcom/platsmp.c
336
static int msm8660_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-qcom/platsmp.c
338
return qcom_boot_secondary(cpu, scss_release_secondary);
arch/arm/mach-qcom/platsmp.c
341
static int cortex_a7_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-qcom/platsmp.c
343
return qcom_boot_secondary(cpu, cortex_a7_release_secondary);
arch/arm/mach-qcom/platsmp.c
346
static int kpssv1_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-qcom/platsmp.c
348
return qcom_boot_secondary(cpu, kpssv1_release_secondary);
arch/arm/mach-qcom/platsmp.c
351
static int kpssv2_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-qcom/platsmp.c
353
return qcom_boot_secondary(cpu, kpssv2_release_secondary);
arch/arm/mach-qcom/platsmp.c
358
int cpu;
arch/arm/mach-qcom/platsmp.c
361
for_each_present_cpu(cpu) {
arch/arm/mach-qcom/platsmp.c
362
if (cpu == smp_processor_id())
arch/arm/mach-qcom/platsmp.c
364
set_cpu_present(cpu, false);
arch/arm/mach-qcom/platsmp.c
48
static void qcom_cpu_die(unsigned int cpu)
arch/arm/mach-qcom/platsmp.c
54
static int scss_release_secondary(unsigned int cpu)
arch/arm/mach-qcom/platsmp.c
79
static int cortex_a7_release_secondary(unsigned int cpu)
arch/arm/mach-qcom/platsmp.c
86
cpu_node = of_get_cpu_node(cpu, NULL);
arch/arm/mach-rockchip/platsmp.c
116
static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-rockchip/platsmp.c
125
if (cpu >= ncores) {
arch/arm/mach-rockchip/platsmp.c
127
__func__, cpu, ncores);
arch/arm/mach-rockchip/platsmp.c
132
ret = pmu_set_power_domain(0 + cpu, true);
arch/arm/mach-rockchip/platsmp.c
337
static int rockchip_cpu_kill(unsigned int cpu)
arch/arm/mach-rockchip/platsmp.c
346
pmu_set_power_domain(0 + cpu, false);
arch/arm/mach-rockchip/platsmp.c
350
static void rockchip_cpu_die(unsigned int cpu)
arch/arm/mach-rockchip/platsmp.c
50
static struct reset_control *rockchip_get_core_reset(int cpu)
arch/arm/mach-rockchip/platsmp.c
52
struct device *dev = get_cpu_device(cpu);
arch/arm/mach-rockchip/platsmp.c
59
np = of_get_cpu_node(cpu, NULL);
arch/arm/mach-s3c/init.c
115
if (cpu == NULL)
arch/arm/mach-s3c/init.c
118
if (cpu->init_uarts == NULL && IS_ENABLED(CONFIG_SAMSUNG_ATAGS)) {
arch/arm/mach-s3c/init.c
121
(cpu->init_uarts)(cfg, no);
arch/arm/mach-s3c/init.c
135
if (cpu == NULL) {
arch/arm/mach-s3c/init.c
142
ret = (cpu->init)();
arch/arm/mach-s3c/init.c
29
static struct cpu_table *cpu;
arch/arm/mach-s3c/init.c
46
cpu = s3c_lookup_cpu(idcode, cputab, cputab_size);
arch/arm/mach-s3c/init.c
48
if (cpu == NULL) {
arch/arm/mach-s3c/init.c
53
printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
arch/arm/mach-s3c/init.c
55
if (cpu->init == NULL) {
arch/arm/mach-s3c/init.c
56
printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
arch/arm/mach-s3c/init.c
60
if (cpu->map_io)
arch/arm/mach-s3c/init.c
61
cpu->map_io();
arch/arm/mach-sa1100/generic.c
67
unsigned int sa11x0_getspeed(unsigned int cpu)
arch/arm/mach-sa1100/generic.c
69
if (cpu)
arch/arm/mach-sa1100/generic.h
25
extern unsigned int sa11x0_getspeed(unsigned int cpu);
arch/arm/mach-shmobile/common.h
15
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
arch/arm/mach-shmobile/common.h
17
extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
arch/arm/mach-shmobile/common.h
22
extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
arch/arm/mach-shmobile/common.h
23
extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
arch/arm/mach-shmobile/platsmp-apmu.c
118
static void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu)
arch/arm/mach-shmobile/platsmp-apmu.c
122
apmu_wrap(cpu, apmu_power_off);
arch/arm/mach-shmobile/platsmp-apmu.c
130
static void shmobile_smp_apmu_cpu_die(unsigned int cpu)
arch/arm/mach-shmobile/platsmp-apmu.c
133
shmobile_smp_hook(cpu, 0, 0);
arch/arm/mach-shmobile/platsmp-apmu.c
136
shmobile_smp_apmu_cpu_shutdown(cpu);
arch/arm/mach-shmobile/platsmp-apmu.c
142
static int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
arch/arm/mach-shmobile/platsmp-apmu.c
144
return apmu_wrap(cpu, apmu_power_off_poll);
arch/arm/mach-shmobile/platsmp-apmu.c
149
static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
arch/arm/mach-shmobile/platsmp-apmu.c
151
shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
arch/arm/mach-shmobile/platsmp-apmu.c
152
shmobile_smp_apmu_cpu_shutdown(cpu);
arch/arm/mach-shmobile/platsmp-apmu.c
186
static void apmu_init_cpu(struct resource *res, int cpu, int bit)
arch/arm/mach-shmobile/platsmp-apmu.c
190
if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem)
arch/arm/mach-shmobile/platsmp-apmu.c
193
apmu_cpus[cpu].iomem = ioremap(res->start, resource_size(res));
arch/arm/mach-shmobile/platsmp-apmu.c
194
apmu_cpus[cpu].bit = bit;
arch/arm/mach-shmobile/platsmp-apmu.c
196
pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res);
arch/arm/mach-shmobile/platsmp-apmu.c
199
x = readl(apmu_cpus[cpu].iomem + DBGRCR_OFFS);
arch/arm/mach-shmobile/platsmp-apmu.c
201
writel(x, apmu_cpus[cpu].iomem + DBGRCR_OFFS);
arch/arm/mach-shmobile/platsmp-apmu.c
209
static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit))
arch/arm/mach-shmobile/platsmp-apmu.c
255
static int shmobile_smp_apmu_boot_secondary(unsigned int cpu,
arch/arm/mach-shmobile/platsmp-apmu.c
259
shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_apmu), 0);
arch/arm/mach-shmobile/platsmp-apmu.c
261
return apmu_wrap(cpu, apmu_power_on);
arch/arm/mach-shmobile/platsmp-apmu.c
80
static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
arch/arm/mach-shmobile/platsmp-apmu.c
82
void __iomem *p = apmu_cpus[cpu].iomem;
arch/arm/mach-shmobile/platsmp-apmu.c
84
return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
arch/arm/mach-shmobile/platsmp-scu.c
21
static int shmobile_scu_cpu_prepare(unsigned int cpu)
arch/arm/mach-shmobile/platsmp-scu.c
24
shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_scu),
arch/arm/mach-shmobile/platsmp-scu.c
48
void shmobile_smp_scu_cpu_die(unsigned int cpu)
arch/arm/mach-shmobile/platsmp-scu.c
51
shmobile_smp_hook(cpu, 0, 0);
arch/arm/mach-shmobile/platsmp-scu.c
63
static int shmobile_smp_scu_psr_core_disabled(int cpu)
arch/arm/mach-shmobile/platsmp-scu.c
65
unsigned long mask = SCU_PM_POWEROFF << (cpu * 8);
arch/arm/mach-shmobile/platsmp-scu.c
73
int shmobile_smp_scu_cpu_kill(unsigned int cpu)
arch/arm/mach-shmobile/platsmp-scu.c
82
if (shmobile_smp_scu_psr_core_disabled(cpu))
arch/arm/mach-shmobile/platsmp.c
19
void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
arch/arm/mach-shmobile/platsmp.c
21
shmobile_smp_fn[cpu] = 0;
arch/arm/mach-shmobile/platsmp.c
24
shmobile_smp_mpidr[cpu] = cpu_logical_map(cpu);
arch/arm/mach-shmobile/platsmp.c
25
shmobile_smp_fn[cpu] = fn;
arch/arm/mach-shmobile/platsmp.c
26
shmobile_smp_arg[cpu] = arg;
arch/arm/mach-shmobile/platsmp.c
31
bool shmobile_smp_cpu_can_disable(unsigned int cpu)
arch/arm/mach-shmobile/smp-emev2.c
24
static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-shmobile/smp-emev2.c
26
arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
arch/arm/mach-shmobile/smp-r8a7779.c
28
static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-shmobile/smp-r8a7779.c
32
cpu = cpu_logical_map(cpu);
arch/arm/mach-shmobile/smp-r8a7779.c
33
if (cpu)
arch/arm/mach-shmobile/smp-r8a7779.c
34
ret = rcar_sysc_power_up_cpu(cpu);
arch/arm/mach-shmobile/smp-r8a7779.c
60
static int r8a7779_platform_cpu_kill(unsigned int cpu)
arch/arm/mach-shmobile/smp-r8a7779.c
64
cpu = cpu_logical_map(cpu);
arch/arm/mach-shmobile/smp-r8a7779.c
65
if (cpu)
arch/arm/mach-shmobile/smp-r8a7779.c
66
ret = rcar_sysc_power_down_cpu(cpu);
arch/arm/mach-shmobile/smp-r8a7779.c
71
static int r8a7779_cpu_kill(unsigned int cpu)
arch/arm/mach-shmobile/smp-r8a7779.c
73
if (shmobile_smp_scu_cpu_kill(cpu))
arch/arm/mach-shmobile/smp-r8a7779.c
74
return r8a7779_platform_cpu_kill(cpu);
arch/arm/mach-shmobile/smp-sh73a0.c
32
static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-shmobile/smp-sh73a0.c
34
unsigned int lcpu = cpu_logical_map(cpu);
arch/arm/mach-socfpga/platsmp.c
105
static int socfpga_cpu_kill(unsigned int cpu)
arch/arm/mach-socfpga/platsmp.c
21
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-socfpga/platsmp.c
46
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-socfpga/platsmp.c
92
static void socfpga_cpu_die(unsigned int cpu)
arch/arm/mach-spear/generic.h
36
void spear13xx_cpu_die(unsigned int cpu);
arch/arm/mach-spear/hotplug.c
54
static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
arch/arm/mach-spear/hotplug.c
59
if (spear_pen_release == cpu) {
arch/arm/mach-spear/hotplug.c
82
void spear13xx_cpu_die(unsigned int cpu)
arch/arm/mach-spear/hotplug.c
90
spear13xx_do_lowpower(cpu, &spurious);
arch/arm/mach-spear/hotplug.c
99
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
arch/arm/mach-spear/platsmp.c
41
static void spear13xx_secondary_init(unsigned int cpu)
arch/arm/mach-spear/platsmp.c
56
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-spear/platsmp.c
74
spear_write_pen_release(cpu);
arch/arm/mach-sti/platsmp.c
30
static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-sti/platsmp.c
54
int cpu;
arch/arm/mach-sti/platsmp.c
67
for_each_possible_cpu(cpu) {
arch/arm/mach-sti/platsmp.c
69
np = of_get_cpu_node(cpu, NULL);
arch/arm/mach-sti/platsmp.c
77
"property\n", cpu);
arch/arm/mach-sti/platsmp.c
93
set_cpu_possible(cpu, true);
arch/arm/mach-sunxi/mc_smp.c
115
static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster,
arch/arm/mach-sunxi/mc_smp.c
121
reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
arch/arm/mach-sunxi/mc_smp.c
125
cluster, cpu);
arch/arm/mach-sunxi/mc_smp.c
129
writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
arch/arm/mach-sunxi/mc_smp.c
131
writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
arch/arm/mach-sunxi/mc_smp.c
133
writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
arch/arm/mach-sunxi/mc_smp.c
135
writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
arch/arm/mach-sunxi/mc_smp.c
137
writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
arch/arm/mach-sunxi/mc_smp.c
140
writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
arch/arm/mach-sunxi/mc_smp.c
158
static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
arch/arm/mach-sunxi/mc_smp.c
162
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
arch/arm/mach-sunxi/mc_smp.c
163
if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
arch/arm/mach-sunxi/mc_smp.c
167
if (cluster == 0 && cpu == 0)
arch/arm/mach-sunxi/mc_smp.c
172
reg &= ~PRCM_CPU_PO_RST_CTRL_CORE(cpu);
arch/arm/mach-sunxi/mc_smp.c
179
reg &= ~(R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu));
arch/arm/mach-sunxi/mc_smp.c
186
if (!sunxi_core_is_cortex_a15(cpu, cluster)) {
arch/arm/mach-sunxi/mc_smp.c
188
reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(cpu);
arch/arm/mach-sunxi/mc_smp.c
194
reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
arch/arm/mach-sunxi/mc_smp.c
200
if (!sunxi_core_is_cortex_a15(cpu, cluster))
arch/arm/mach-sunxi/mc_smp.c
201
reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
arch/arm/mach-sunxi/mc_smp.c
206
sunxi_cpu_power_switch_set(cpu, cluster, true);
arch/arm/mach-sunxi/mc_smp.c
210
if (cpu == 0)
arch/arm/mach-sunxi/mc_smp.c
211
cpu = 4;
arch/arm/mach-sunxi/mc_smp.c
216
reg &= ~PRCM_PWROFF_GATING_REG_CORE(cpu);
arch/arm/mach-sunxi/mc_smp.c
222
if (cpu == 4)
arch/arm/mach-sunxi/mc_smp.c
223
cpu = 0;
arch/arm/mach-sunxi/mc_smp.c
228
reg |= PRCM_CPU_PO_RST_CTRL_CORE(cpu);
arch/arm/mach-sunxi/mc_smp.c
234
reg |= R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu);
arch/arm/mach-sunxi/mc_smp.c
242
reg |= CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
arch/arm/mach-sunxi/mc_smp.c
243
reg |= CPUCFG_CX_RST_CTRL_CORE_RST(cpu);
arch/arm/mach-sunxi/mc_smp.c
244
if (!sunxi_core_is_cortex_a15(cpu, cluster))
arch/arm/mach-sunxi/mc_smp.c
245
reg |= CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
arch/arm/mach-sunxi/mc_smp.c
247
reg |= CPUCFG_CX_RST_CTRL_CX_RST(cpu); /* NEON */
arch/arm/mach-sunxi/mc_smp.c
384
static void sunxi_mc_smp_secondary_init(unsigned int cpu)
arch/arm/mach-sunxi/mc_smp.c
387
if (cpu == 0)
arch/arm/mach-sunxi/mc_smp.c
393
unsigned int mpidr, cpu, cluster;
arch/arm/mach-sunxi/mc_smp.c
396
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-sunxi/mc_smp.c
401
if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER)
arch/arm/mach-sunxi/mc_smp.c
406
if (sunxi_mc_smp_cpu_table[cluster][cpu])
arch/arm/mach-sunxi/mc_smp.c
418
sunxi_cpu_powerup(cpu, cluster);
arch/arm/mach-sunxi/mc_smp.c
421
sunxi_mc_smp_cpu_table[cluster][cpu]++;
arch/arm/mach-sunxi/mc_smp.c
445
unsigned int mpidr, cpu, cluster;
arch/arm/mach-sunxi/mc_smp.c
449
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-sunxi/mc_smp.c
451
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
arch/arm/mach-sunxi/mc_smp.c
454
sunxi_mc_smp_cpu_table[cluster][cpu]--;
arch/arm/mach-sunxi/mc_smp.c
455
if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) {
arch/arm/mach-sunxi/mc_smp.c
461
} else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
arch/arm/mach-sunxi/mc_smp.c
463
cluster, cpu);
arch/arm/mach-sunxi/mc_smp.c
480
static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
arch/arm/mach-sunxi/mc_smp.c
483
int gating_bit = cpu;
arch/arm/mach-sunxi/mc_smp.c
485
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
arch/arm/mach-sunxi/mc_smp.c
486
if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
arch/arm/mach-sunxi/mc_smp.c
489
if (is_a83t && cpu == 0)
arch/arm/mach-sunxi/mc_smp.c
499
sunxi_cpu_power_switch_set(cpu, cluster, false);
arch/arm/mach-sunxi/mc_smp.c
535
unsigned int mpidr, cpu, cluster;
arch/arm/mach-sunxi/mc_smp.c
541
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-sunxi/mc_smp.c
546
cpu >= SUNXI_CPUS_PER_CLUSTER))
arch/arm/mach-sunxi/mc_smp.c
564
if (sunxi_mc_smp_cpu_table[cluster][cpu])
arch/arm/mach-sunxi/mc_smp.c
568
if (reg & CPUCFG_CX_STATUS_STANDBYWFI(cpu))
arch/arm/mach-sunxi/mc_smp.c
578
sunxi_cpu_powerdown(cpu, cluster);
arch/arm/mach-sunxi/mc_smp.c
604
__func__, cluster, cpu, ret);
arch/arm/mach-sunxi/mc_smp.c
608
static bool sunxi_mc_smp_cpu_can_disable(unsigned int cpu)
arch/arm/mach-sunxi/mc_smp.c
612
if (cpu == 0)
arch/arm/mach-sunxi/mc_smp.c
630
unsigned int mpidr, cpu, cluster;
arch/arm/mach-sunxi/mc_smp.c
633
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-sunxi/mc_smp.c
636
if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) {
arch/arm/mach-sunxi/mc_smp.c
640
sunxi_mc_smp_cpu_table[cluster][cpu] = 1;
arch/arm/mach-sunxi/mc_smp.c
67
#define PRCM_PWR_SWITCH_REG(c, cpu) (0x140 + 0x10 * (c) + 0x4 * (cpu))
arch/arm/mach-sunxi/mc_smp.c
90
int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core;
arch/arm/mach-sunxi/mc_smp.c
93
node = of_cpu_device_node_get(cpu);
arch/arm/mach-sunxi/mc_smp.c
97
node = of_get_cpu_node(cpu, NULL);
arch/arm/mach-sunxi/platsmp.c
104
writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG);
arch/arm/mach-sunxi/platsmp.c
108
writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
arch/arm/mach-sunxi/platsmp.c
112
writel(reg | BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG);
arch/arm/mach-sunxi/platsmp.c
156
static int sun8i_smp_boot_secondary(unsigned int cpu,
arch/arm/mach-sunxi/platsmp.c
171
writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
arch/arm/mach-sunxi/platsmp.c
175
writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG);
arch/arm/mach-sunxi/platsmp.c
179
writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG);
arch/arm/mach-sunxi/platsmp.c
183
writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
arch/arm/mach-sunxi/platsmp.c
22
#define CPUCFG_CPU_PWR_CLAMP_STATUS_REG(cpu) ((cpu) * 0x40 + 0x64)
arch/arm/mach-sunxi/platsmp.c
23
#define CPUCFG_CPU_RST_CTRL_REG(cpu) (((cpu) + 1) * 0x40)
arch/arm/mach-sunxi/platsmp.c
24
#define CPUCFG_CPU_CTRL_REG(cpu) (((cpu) + 1) * 0x40 + 0x04)
arch/arm/mach-sunxi/platsmp.c
25
#define CPUCFG_CPU_STATUS_REG(cpu) (((cpu) + 1) * 0x40 + 0x08)
arch/arm/mach-sunxi/platsmp.c
33
#define PRCM_CPU_PWR_CLAMP_REG(cpu) (((cpu) * 4) + 0x140)
arch/arm/mach-sunxi/platsmp.c
71
static int sun6i_smp_boot_secondary(unsigned int cpu,
arch/arm/mach-sunxi/platsmp.c
87
writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
arch/arm/mach-sunxi/platsmp.c
91
writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG);
arch/arm/mach-sunxi/platsmp.c
95
writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG);
arch/arm/mach-sunxi/platsmp.c
99
writel(0xff >> i, prcm_membase + PRCM_CPU_PWR_CLAMP_REG(cpu));
arch/arm/mach-tegra/common.h
12
extern int tegra_cpu_kill(unsigned int cpu);
arch/arm/mach-tegra/common.h
13
extern void tegra_cpu_die(unsigned int cpu);
arch/arm/mach-tegra/hotplug.c
22
int tegra_cpu_kill(unsigned cpu)
arch/arm/mach-tegra/hotplug.c
24
cpu = cpu_logical_map(cpu);
arch/arm/mach-tegra/hotplug.c
27
tegra_wait_cpu_in_reset(cpu);
arch/arm/mach-tegra/hotplug.c
28
tegra_disable_cpu_clock(cpu);
arch/arm/mach-tegra/hotplug.c
38
void tegra_cpu_die(unsigned int cpu)
arch/arm/mach-tegra/platsmp.c
108
ret = tegra_pmc_cpu_power_on(cpu);
arch/arm/mach-tegra/platsmp.c
114
tegra_enable_cpu_clock(cpu);
arch/arm/mach-tegra/platsmp.c
118
ret = tegra_pmc_cpu_remove_clamping(cpu);
arch/arm/mach-tegra/platsmp.c
124
flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */
arch/arm/mach-tegra/platsmp.c
125
tegra_cpu_out_of_reset(cpu);
arch/arm/mach-tegra/platsmp.c
129
static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-tegra/platsmp.c
133
cpu = cpu_logical_map(cpu);
arch/arm/mach-tegra/platsmp.c
135
if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) {
arch/arm/mach-tegra/platsmp.c
142
flowctrl_write_cpu_csr(cpu, 1);
arch/arm/mach-tegra/platsmp.c
143
flowctrl_write_cpu_halt(cpu,
arch/arm/mach-tegra/platsmp.c
152
ret = tegra_pmc_cpu_power_on(cpu);
arch/arm/mach-tegra/platsmp.c
158
static int tegra_boot_secondary(unsigned int cpu,
arch/arm/mach-tegra/platsmp.c
162
return tegra20_boot_secondary(cpu, idle);
arch/arm/mach-tegra/platsmp.c
164
return tegra30_boot_secondary(cpu, idle);
arch/arm/mach-tegra/platsmp.c
166
return tegra114_boot_secondary(cpu, idle);
arch/arm/mach-tegra/platsmp.c
168
return tegra114_boot_secondary(cpu, idle);
arch/arm/mach-tegra/platsmp.c
36
static void tegra_secondary_init(unsigned int cpu)
arch/arm/mach-tegra/platsmp.c
38
cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
arch/arm/mach-tegra/platsmp.c
42
static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-tegra/platsmp.c
44
cpu = cpu_logical_map(cpu);
arch/arm/mach-tegra/platsmp.c
54
tegra_put_cpu_in_reset(cpu);
arch/arm/mach-tegra/platsmp.c
62
flowctrl_write_cpu_halt(cpu, 0);
arch/arm/mach-tegra/platsmp.c
64
tegra_enable_cpu_clock(cpu);
arch/arm/mach-tegra/platsmp.c
65
flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */
arch/arm/mach-tegra/platsmp.c
66
tegra_cpu_out_of_reset(cpu);
arch/arm/mach-tegra/platsmp.c
70
static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-tegra/platsmp.c
75
cpu = cpu_logical_map(cpu);
arch/arm/mach-tegra/platsmp.c
76
tegra_put_cpu_in_reset(cpu);
arch/arm/mach-tegra/platsmp.c
77
flowctrl_write_cpu_halt(cpu, 0);
arch/arm/mach-tegra/platsmp.c
93
if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) {
arch/arm/mach-tegra/platsmp.c
96
if (tegra_pmc_cpu_is_powered(cpu))
arch/arm/mach-tegra/pm.c
101
BUG_ON(cpu != 0);
arch/arm/mach-tegra/pm.c
104
cpu = cpu_logical_map(cpu);
arch/arm/mach-tegra/pm.c
110
flowctrl_cpu_suspend_enter(cpu);
arch/arm/mach-tegra/pm.c
440
int tegra_pm_park_secondary_cpu(unsigned long cpu)
arch/arm/mach-tegra/pm.c
442
if (cpu > 0) {
arch/arm/mach-tegra/pm.c
75
int cpu = smp_processor_id();
arch/arm/mach-tegra/pm.c
77
BUG_ON(cpu != 0);
arch/arm/mach-tegra/pm.c
80
cpu = cpu_logical_map(cpu);
arch/arm/mach-tegra/pm.c
86
flowctrl_cpu_suspend_exit(cpu);
arch/arm/mach-tegra/pm.c
99
int cpu = smp_processor_id();
arch/arm/mach-tegra/sleep.h
52
cmp \rcpu, #0
arch/arm/mach-tegra/sleep.h
53
subne \rd, \rcpu, #1
arch/arm/mach-tegra/sleep.h
61
cmp \rcpu, #0
arch/arm/mach-tegra/sleep.h
62
subne \rd, \rcpu, #1
arch/arm/mach-ux500/platsmp.c
67
static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-ux500/platsmp.c
82
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-ux500/platsmp.c
87
static void ux500_cpu_die(unsigned int cpu)
arch/arm/mach-ux500/pm.c
130
bool prcmu_is_cpu_in_wfi(int cpu)
arch/arm/mach-ux500/pm.c
133
(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
arch/arm/mach-versatile/hotplug.c
101
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
arch/arm/mach-versatile/hotplug.c
57
static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious)
arch/arm/mach-versatile/hotplug.c
69
if (versatile_cpu_release == cpu_logical_map(cpu)) {
arch/arm/mach-versatile/hotplug.c
92
void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask)
arch/arm/mach-versatile/hotplug.c
97
versatile_immitation_do_lowpower(cpu, &spurious);
arch/arm/mach-versatile/platsmp-realview.c
84
static void realview_cpu_die(unsigned int cpu)
arch/arm/mach-versatile/platsmp-realview.c
86
return versatile_immitation_cpu_die(cpu, 0x20);
arch/arm/mach-versatile/platsmp-vexpress.c
23
int cpu;
arch/arm/mach-versatile/platsmp-vexpress.c
33
for_each_possible_cpu(cpu) {
arch/arm/mach-versatile/platsmp-vexpress.c
36
cpu_node = of_get_cpu_node(cpu, NULL);
arch/arm/mach-versatile/platsmp-vexpress.c
80
static void vexpress_cpu_die(unsigned int cpu)
arch/arm/mach-versatile/platsmp-vexpress.c
82
versatile_immitation_cpu_die(cpu, 0x40);
arch/arm/mach-versatile/platsmp.c
51
void versatile_secondary_init(unsigned int cpu)
arch/arm/mach-versatile/platsmp.c
66
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-versatile/platsmp.c
82
versatile_write_cpu_release(cpu_logical_map(cpu));
arch/arm/mach-versatile/platsmp.c
89
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
arch/arm/mach-versatile/platsmp.h
10
extern int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle);
arch/arm/mach-versatile/platsmp.h
11
void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask);
arch/arm/mach-versatile/platsmp.h
9
extern void versatile_secondary_init(unsigned int cpu);
arch/arm/mach-versatile/spc.c
150
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
arch/arm/mach-versatile/spc.c
157
mask = BIT(cpu);
arch/arm/mach-versatile/spc.c
179
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
arch/arm/mach-versatile/spc.c
187
baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
arch/arm/mach-versatile/spc.c
189
baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
arch/arm/mach-versatile/spc.c
215
static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
arch/arm/mach-versatile/spc.c
218
STANDBYWFI_STAT_A15_CPU_MASK(cpu)
arch/arm/mach-versatile/spc.c
219
: STANDBYWFI_STAT_A7_CPU_MASK(cpu);
arch/arm/mach-versatile/spc.c
234
int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
arch/arm/mach-versatile/spc.c
237
u32 mask = standbywfi_cpu_mask(cpu, cluster);
arch/arm/mach-versatile/spc.c
52
#define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu))
arch/arm/mach-versatile/spc.c
53
#define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu)))
arch/arm/mach-versatile/spc.c
547
int cpu, cluster;
arch/arm/mach-versatile/spc.c
559
for_each_possible_cpu(cpu) {
arch/arm/mach-versatile/spc.c
560
struct device *cpu_dev = get_cpu_device(cpu);
arch/arm/mach-versatile/spc.c
562
pr_warn("failed to get cpu%d device\n", cpu);
arch/arm/mach-versatile/spc.c
567
pr_warn("failed to register cpu%d clock\n", cpu);
arch/arm/mach-versatile/spc.c
571
pr_warn("failed to register cpu%d clock lookup\n", cpu);
arch/arm/mach-versatile/spc.c
580
pr_warn("failed to initialise cpu%d opp table\n", cpu);
arch/arm/mach-versatile/spc.c
583
pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
arch/arm/mach-versatile/spc.h
13
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
arch/arm/mach-versatile/spc.h
14
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
arch/arm/mach-versatile/spc.h
16
int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
arch/arm/mach-versatile/tc2_pm.c
112
static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
arch/arm/mach-versatile/tc2_pm.c
115
RESET_A7_NCORERESET(cpu)
arch/arm/mach-versatile/tc2_pm.c
116
: RESET_A15_NCORERESET(cpu);
arch/arm/mach-versatile/tc2_pm.c
124
static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
arch/arm/mach-versatile/tc2_pm.c
128
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-versatile/tc2_pm.c
129
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
arch/arm/mach-versatile/tc2_pm.c
133
__func__, cpu, cluster,
arch/arm/mach-versatile/tc2_pm.c
144
if (tc2_core_in_reset(cpu, cluster) ||
arch/arm/mach-versatile/tc2_pm.c
145
ve_spc_cpu_in_wfi(cpu, cluster))
arch/arm/mach-versatile/tc2_pm.c
155
static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
arch/arm/mach-versatile/tc2_pm.c
157
ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
arch/arm/mach-versatile/tc2_pm.c
160
static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
arch/arm/mach-versatile/tc2_pm.c
162
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-versatile/tc2_pm.c
163
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
arch/arm/mach-versatile/tc2_pm.c
164
ve_spc_cpu_wakeup_irq(cluster, cpu, false);
arch/arm/mach-versatile/tc2_pm.c
165
ve_spc_set_resume_addr(cluster, cpu, 0);
arch/arm/mach-versatile/tc2_pm.c
202
unsigned int mpidr, cpu, cluster;
arch/arm/mach-versatile/tc2_pm.c
243
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
arch/arm/mach-versatile/tc2_pm.c
245
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-versatile/tc2_pm.c
246
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
arch/arm/mach-versatile/tc2_pm.c
31
#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
arch/arm/mach-versatile/tc2_pm.c
32
#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
arch/arm/mach-versatile/tc2_pm.c
46
static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
arch/arm/mach-versatile/tc2_pm.c
48
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-versatile/tc2_pm.c
49
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
arch/arm/mach-versatile/tc2_pm.c
51
ve_spc_set_resume_addr(cluster, cpu,
arch/arm/mach-versatile/tc2_pm.c
53
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
arch/arm/mach-versatile/tc2_pm.c
66
static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
arch/arm/mach-versatile/tc2_pm.c
68
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
arch/arm/mach-versatile/tc2_pm.c
69
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
arch/arm/mach-versatile/tc2_pm.c
70
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
arch/arm/mach-zynq/common.h
13
extern void zynq_slcr_cpu_stop(int cpu);
arch/arm/mach-zynq/common.h
14
extern void zynq_slcr_cpu_start(int cpu);
arch/arm/mach-zynq/common.h
15
extern bool zynq_slcr_cpu_state_read(int cpu);
arch/arm/mach-zynq/common.h
16
extern void zynq_slcr_cpu_state_write(int cpu, bool die);
arch/arm/mach-zynq/common.h
23
extern int zynq_cpun_start(u32 address, int cpu);
arch/arm/mach-zynq/platsmp.c
115
static void zynq_secondary_init(unsigned int cpu)
arch/arm/mach-zynq/platsmp.c
121
static int zynq_cpu_kill(unsigned cpu)
arch/arm/mach-zynq/platsmp.c
125
while (zynq_slcr_cpu_state_read(cpu))
arch/arm/mach-zynq/platsmp.c
129
zynq_slcr_cpu_stop(cpu);
arch/arm/mach-zynq/platsmp.c
140
static void zynq_cpu_die(unsigned int cpu)
arch/arm/mach-zynq/platsmp.c
142
zynq_slcr_cpu_state_write(cpu, true);
arch/arm/mach-zynq/platsmp.c
30
int zynq_cpun_start(u32 address, int cpu)
arch/arm/mach-zynq/platsmp.c
34
u32 phy_cpuid = cpu_logical_map(cpu);
arch/arm/mach-zynq/platsmp.c
78
pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
arch/arm/mach-zynq/platsmp.c
84
static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm/mach-zynq/platsmp.c
86
return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu);
arch/arm/mach-zynq/slcr.c
121
void zynq_slcr_cpu_start(int cpu)
arch/arm/mach-zynq/slcr.c
126
reg &= ~(SLCR_A9_CPU_RST << cpu);
arch/arm/mach-zynq/slcr.c
128
reg &= ~(SLCR_A9_CPU_CLKSTOP << cpu);
arch/arm/mach-zynq/slcr.c
131
zynq_slcr_cpu_state_write(cpu, false);
arch/arm/mach-zynq/slcr.c
138
void zynq_slcr_cpu_stop(int cpu)
arch/arm/mach-zynq/slcr.c
143
reg |= (SLCR_A9_CPU_CLKSTOP | SLCR_A9_CPU_RST) << cpu;
arch/arm/mach-zynq/slcr.c
156
bool zynq_slcr_cpu_state_read(int cpu)
arch/arm/mach-zynq/slcr.c
161
state &= 1 << (31 - cpu);
arch/arm/mach-zynq/slcr.c
174
void zynq_slcr_cpu_state_write(int cpu, bool die)
arch/arm/mach-zynq/slcr.c
179
mask = 1 << (31 - cpu);
arch/arm/mm/cache-b15-rac.c
148
unsigned int cpu;
arch/arm/mm/cache-b15-rac.c
151
for_each_possible_cpu(cpu)
arch/arm/mm/cache-b15-rac.c
152
enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
arch/arm/mm/cache-b15-rac.c
218
static int b15_rac_dying_cpu(unsigned int cpu)
arch/arm/mm/cache-b15-rac.c
240
static int b15_rac_dead_cpu(unsigned int cpu)
arch/arm/mm/cache-b15-rac.c
297
int ret = 0, cpu;
arch/arm/mm/cache-b15-rac.c
358
for_each_possible_cpu(cpu)
arch/arm/mm/cache-b15-rac.c
359
en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
arch/arm/mm/cache-l2x0-pmu.c
305
if (event->cpu < 0)
arch/arm/mm/cache-l2x0-pmu.c
316
event->cpu = cpumask_first(&pmu_cpu);
arch/arm/mm/cache-l2x0-pmu.c
424
static int l2x0_pmu_offline_cpu(unsigned int cpu)
arch/arm/mm/cache-l2x0-pmu.c
428
if (!cpumask_test_and_clear_cpu(cpu, &pmu_cpu))
arch/arm/mm/cache-l2x0-pmu.c
431
target = cpumask_any_but(cpu_online_mask, cpu);
arch/arm/mm/cache-l2x0-pmu.c
435
perf_pmu_migrate_context(l2x0_pmu, cpu, target);
arch/arm/mm/cache-l2x0.c
596
static int l2c310_starting_cpu(unsigned int cpu)
arch/arm/mm/cache-l2x0.c
602
static int l2c310_dying_cpu(unsigned int cpu)
arch/arm/mm/cache-uniphier.c
232
unsigned int cpu;
arch/arm/mm/cache-uniphier.c
234
for_each_possible_cpu(cpu)
arch/arm/mm/cache-uniphier.c
235
writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
arch/arm/mm/context.c
136
static void flush_context(unsigned int cpu)
arch/arm/mm/context.c
167
int cpu;
arch/arm/mm/context.c
179
for_each_possible_cpu(cpu) {
arch/arm/mm/context.c
180
if (per_cpu(reserved_asids, cpu) == asid) {
arch/arm/mm/context.c
182
per_cpu(reserved_asids, cpu) = newasid;
arch/arm/mm/context.c
189
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
arch/arm/mm/context.c
227
flush_context(cpu);
arch/arm/mm/context.c
240
unsigned int cpu = smp_processor_id();
arch/arm/mm/context.c
254
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
arch/arm/mm/context.c
261
asid = new_context(mm, cpu);
arch/arm/mm/context.c
265
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
arch/arm/mm/context.c
270
atomic64_set(&per_cpu(active_asids, cpu), asid);
arch/arm/mm/context.c
271
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/arm/mm/context.c
54
int cpu;
arch/arm/mm/context.c
60
for_each_online_cpu(cpu) {
arch/arm/mm/context.c
61
if (cpu == this_cpu)
arch/arm/mm/context.c
67
asid = per_cpu(active_asids, cpu).counter;
arch/arm/mm/context.c
69
asid = per_cpu(reserved_asids, cpu);
arch/arm/mm/context.c
71
cpumask_set_cpu(cpu, mask);
arch/arm/mm/proc-v7-bugs.c
69
int cpu = smp_processor_id();
arch/arm/mm/proc-v7-bugs.c
71
if (per_cpu(harden_branch_predictor_fn, cpu))
arch/arm/mm/proc-v7-bugs.c
76
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/mm/proc-v7-bugs.c
82
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/mm/proc-v7-bugs.c
88
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/mm/proc-v7-bugs.c
95
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/vfp/vfpmodule.c
104
static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
106
if (vfp_state_in_hw(cpu, thread)) {
arch/arm/vfp/vfpmodule.c
108
vfp_current_hw_state[cpu] = NULL;
arch/arm/vfp/vfpmodule.c
111
thread->vfpstate.hard.cpu = NR_CPUS;
arch/arm/vfp/vfpmodule.c
121
unsigned int cpu;
arch/arm/vfp/vfpmodule.c
131
cpu = get_cpu();
arch/arm/vfp/vfpmodule.c
132
if (vfp_current_hw_state[cpu] == vfp)
arch/arm/vfp/vfpmodule.c
133
vfp_current_hw_state[cpu] = NULL;
arch/arm/vfp/vfpmodule.c
142
vfp->hard.cpu = NR_CPUS;
arch/arm/vfp/vfpmodule.c
150
unsigned int cpu = get_cpu();
arch/arm/vfp/vfpmodule.c
152
if (vfp_current_hw_state[cpu] == vfp)
arch/arm/vfp/vfpmodule.c
153
vfp_current_hw_state[cpu] = NULL;
arch/arm/vfp/vfpmodule.c
164
thread->vfpstate.hard.cpu = NR_CPUS;
arch/arm/vfp/vfpmodule.c
192
unsigned int cpu;
arch/arm/vfp/vfpmodule.c
200
cpu = thread->cpu;
arch/arm/vfp/vfpmodule.c
207
if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
arch/arm/vfp/vfpmodule.c
208
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
arch/arm/vfp/vfpmodule.c
492
} else if (vfp_current_hw_state[ti->cpu]) {
arch/arm/vfp/vfpmodule.c
495
vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
arch/arm/vfp/vfpmodule.c
501
vfp_current_hw_state[ti->cpu] = NULL;
arch/arm/vfp/vfpmodule.c
568
unsigned int cpu = get_cpu();
arch/arm/vfp/vfpmodule.c
570
vfp_force_reload(cpu, thread);
arch/arm/vfp/vfpmodule.c
664
static int vfp_dying_cpu(unsigned int cpu)
arch/arm/vfp/vfpmodule.c
666
vfp_current_hw_state[cpu] = NULL;
arch/arm/vfp/vfpmodule.c
742
if (!vfp_state_in_hw(ti->cpu, ti)) {
arch/arm/vfp/vfpmodule.c
744
vfp_current_hw_state[ti->cpu] != NULL) {
arch/arm/vfp/vfpmodule.c
750
vfp_save_state(vfp_current_hw_state[ti->cpu],
arch/arm/vfp/vfpmodule.c
759
vfp_current_hw_state[ti->cpu] = &ti->vfpstate;
arch/arm/vfp/vfpmodule.c
765
ti->vfpstate.hard.cpu = ti->cpu;
arch/arm/vfp/vfpmodule.c
869
unsigned int cpu;
arch/arm/vfp/vfpmodule.c
881
cpu = __smp_processor_id();
arch/arm/vfp/vfpmodule.c
890
if (vfp_state_in_hw(cpu, thread))
arch/arm/vfp/vfpmodule.c
893
else if (vfp_current_hw_state[cpu] != NULL)
arch/arm/vfp/vfpmodule.c
894
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
arch/arm/vfp/vfpmodule.c
896
vfp_current_hw_state[cpu] = NULL;
arch/arm/vfp/vfpmodule.c
90
static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
93
if (thread->vfpstate.hard.cpu != cpu)
arch/arm/vfp/vfpmodule.c
96
return vfp_current_hw_state[cpu] == &thread->vfpstate;
arch/arm/xen/enlighten.c
142
static int xen_starting_cpu(unsigned int cpu)
arch/arm/xen/enlighten.c
153
if (per_cpu(xen_vcpu, cpu) != NULL)
arch/arm/xen/enlighten.c
156
pr_info("Xen: initializing cpu%d\n", cpu);
arch/arm/xen/enlighten.c
157
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
arch/arm/xen/enlighten.c
162
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
arch/arm/xen/enlighten.c
165
per_cpu(xen_vcpu, cpu) = vcpup;
arch/arm/xen/enlighten.c
172
static int xen_dying_cpu(unsigned int cpu)
arch/arm/xen/enlighten.c
439
int rc, cpu;
arch/arm/xen/enlighten.c
493
for_each_possible_cpu(cpu)
arch/arm/xen/enlighten.c
494
per_cpu(xen_vcpu_id, cpu) = cpu;
arch/arm/xen/enlighten.c
533
static int xen_starting_runstate_cpu(unsigned int cpu)
arch/arm/xen/enlighten.c
535
xen_setup_runstate_info(cpu);
arch/arm64/crypto/ghash-ce-glue.c
529
MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
arch/arm64/crypto/sm4-ce-gcm-glue.c
254
MODULE_DEVICE_TABLE(cpu, sm4_ce_gcm_cpu_feature);
arch/arm64/include/asm/acpi.h
102
#define cpu_physical_id(cpu) cpu_logical_map(cpu)
arch/arm64/include/asm/acpi.h
116
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
arch/arm64/include/asm/acpi.h
117
static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
arch/arm64/include/asm/acpi.h
119
return acpi_cpu_get_madt_gicc(cpu)->uid;
arch/arm64/include/asm/acpi.h
124
int cpu;
arch/arm64/include/asm/acpi.h
126
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
arch/arm64/include/asm/acpi.h
127
if (acpi_cpu_get_madt_gicc(cpu) &&
arch/arm64/include/asm/acpi.h
128
uid == get_acpi_id_for_cpu(cpu))
arch/arm64/include/asm/acpi.h
129
return cpu;
arch/arm64/include/asm/acpi.h
143
bool acpi_parking_protocol_valid(int cpu);
arch/arm64/include/asm/acpi.h
145
acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
arch/arm64/include/asm/acpi.h
147
static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
arch/arm64/include/asm/acpi.h
149
acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
arch/arm64/include/asm/acpi.h
153
static __always_inline const char *acpi_get_enable_method(int cpu)
arch/arm64/include/asm/acpi.h
158
if (acpi_parking_protocol_valid(cpu))
arch/arm64/include/asm/acpi.h
181
int acpi_numa_get_nid(unsigned int cpu);
arch/arm64/include/asm/acpi.h
185
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
arch/arm64/include/asm/cpu.h
79
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
arch/arm64/include/asm/cpu_ops.h
42
bool (*cpu_can_disable)(unsigned int cpu);
arch/arm64/include/asm/cpu_ops.h
43
int (*cpu_disable)(unsigned int cpu);
arch/arm64/include/asm/cpu_ops.h
44
void (*cpu_die)(unsigned int cpu);
arch/arm64/include/asm/cpu_ops.h
45
int (*cpu_kill)(unsigned int cpu);
arch/arm64/include/asm/cpu_ops.h
49
int __init init_cpu_ops(int cpu);
arch/arm64/include/asm/cpu_ops.h
50
extern const struct cpu_operations *get_cpu_ops(int cpu);
arch/arm64/include/asm/cpufeature.h
931
extern bool cpu_has_amu_feat(int cpu);
arch/arm64/include/asm/cpufeature.h
933
static inline bool cpu_has_amu_feat(int cpu)
arch/arm64/include/asm/kvm_asm.h
119
#define per_cpu_ptr_nvhe_sym(sym, cpu) \
arch/arm64/include/asm/kvm_asm.h
122
base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \
arch/arm64/include/asm/kvm_asm.h
137
#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
arch/arm64/include/asm/kvm_asm.h
148
#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
arch/arm64/include/asm/kvm_asm.h
171
#define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
arch/arm64/include/asm/kvm_asm.h
172
? per_cpu_ptr(&sym, cpu) \
arch/arm64/include/asm/kvm_asm.h
173
: per_cpu_ptr_nvhe_sym(sym, cpu))
arch/arm64/include/asm/kvm_asm.h
314
get_host_ctxt \ctxt, \vcpu
arch/arm64/include/asm/kvm_asm.h
315
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
arch/arm64/include/asm/kvm_asm.h
319
adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
arch/arm64/include/asm/kvm_asm.h
320
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
arch/arm64/include/asm/kvm_asm.h
325
str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
arch/arm64/include/asm/percpu.h
261
extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
arch/arm64/include/asm/percpu.h
263
#define per_cpu_offset(cpu) __hyp_per_cpu_offset((cpu))
arch/arm64/include/asm/preempt.h
25
#define init_idle_preempt_count(p, cpu) do { \
arch/arm64/include/asm/smp.h
103
extern void arch_send_wakeup_ipi(unsigned int cpu);
arch/arm64/include/asm/smp.h
105
static inline void arch_send_wakeup_ipi(unsigned int cpu)
arch/arm64/include/asm/smp.h
113
static inline void __cpu_die(unsigned int cpu) { }
arch/arm64/include/asm/smp.h
32
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/arm64/include/asm/smp.h
38
extern u64 cpu_logical_map(unsigned int cpu);
arch/arm64/include/asm/smp.h
40
static inline void set_cpu_logical_map(unsigned int cpu, u64 hwid)
arch/arm64/include/asm/smp.h
42
__cpu_logical_map[cpu] = hwid;
arch/arm64/include/asm/smp.h
99
extern void arch_send_call_function_single_ipi(int cpu);
arch/arm64/include/asm/smp_plat.h
37
int cpu;
arch/arm64/include/asm/smp_plat.h
38
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
arch/arm64/include/asm/smp_plat.h
39
if (cpu_logical_map(cpu) == mpidr)
arch/arm64/include/asm/smp_plat.h
40
return cpu;
arch/arm64/include/asm/spinlock.h
22
static inline bool vcpu_is_preempted(int cpu)
arch/arm64/include/asm/thread_info.h
45
u32 cpu;
arch/arm64/kernel/acpi.c
455
int acpi_unmap_cpu(int cpu)
arch/arm64/kernel/acpi_numa.c
32
int __init acpi_numa_get_nid(unsigned int cpu)
arch/arm64/kernel/acpi_numa.c
34
return acpi_early_node_map[cpu];
arch/arm64/kernel/acpi_numa.c
41
int cpu, pxm, node;
arch/arm64/kernel/acpi_numa.c
62
cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
arch/arm64/kernel/acpi_numa.c
63
if (cpu < 0)
arch/arm64/kernel/acpi_numa.c
66
acpi_early_node_map[cpu] = node;
arch/arm64/kernel/acpi_numa.c
68
cpu_logical_map(cpu), node);
arch/arm64/kernel/acpi_parking_protocol.c
106
arch_send_wakeup_ipi(cpu);
arch/arm64/kernel/acpi_parking_protocol.c
113
int cpu = smp_processor_id();
arch/arm64/kernel/acpi_parking_protocol.c
114
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
arch/arm64/kernel/acpi_parking_protocol.c
29
void __init acpi_set_mailbox_entry(int cpu,
arch/arm64/kernel/acpi_parking_protocol.c
32
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
arch/arm64/kernel/acpi_parking_protocol.c
39
bool acpi_parking_protocol_valid(int cpu)
arch/arm64/kernel/acpi_parking_protocol.c
41
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
arch/arm64/kernel/acpi_parking_protocol.c
46
static int acpi_parking_protocol_cpu_init(unsigned int cpu)
arch/arm64/kernel/acpi_parking_protocol.c
49
cpu_mailbox_entries[cpu].mailbox_addr);
arch/arm64/kernel/acpi_parking_protocol.c
54
static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
arch/arm64/kernel/acpi_parking_protocol.c
59
static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
arch/arm64/kernel/acpi_parking_protocol.c
61
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
arch/arm64/kernel/armv8_deprecated.c
451
static int run_all_insn_set_hw_mode(unsigned int cpu)
arch/arm64/kernel/armv8_deprecated.c
470
cpu, insn->name);
arch/arm64/kernel/asm-offsets.c
29
DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
arch/arm64/kernel/cacheinfo.c
103
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/arm64/kernel/cacheinfo.c
59
int early_cache_level(unsigned int cpu)
arch/arm64/kernel/cacheinfo.c
61
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/arm64/kernel/cacheinfo.c
68
int init_cache_level(unsigned int cpu)
arch/arm64/kernel/cacheinfo.c
72
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/arm64/kernel/cacheinfo.c
77
fw_level = of_find_last_cache_level(cpu);
arch/arm64/kernel/cacheinfo.c
79
ret = acpi_get_cache_info(cpu, &fw_level, NULL);
arch/arm64/kernel/cacheinfo.c
99
int populate_cache_leaves(unsigned int cpu)
arch/arm64/kernel/cpu_ops.c
101
const char *enable_method = cpu_read_enable_method(cpu);
arch/arm64/kernel/cpu_ops.c
106
cpu_ops[cpu] = cpu_get_ops(enable_method);
arch/arm64/kernel/cpu_ops.c
107
if (!cpu_ops[cpu]) {
arch/arm64/kernel/cpu_ops.c
115
const struct cpu_operations *get_cpu_ops(int cpu)
arch/arm64/kernel/cpu_ops.c
117
return cpu_ops[cpu];
arch/arm64/kernel/cpu_ops.c
55
static const char *__init cpu_read_enable_method(int cpu)
arch/arm64/kernel/cpu_ops.c
60
struct device_node *dn = of_get_cpu_node(cpu, NULL);
arch/arm64/kernel/cpu_ops.c
63
if (!cpu)
arch/arm64/kernel/cpu_ops.c
75
if (cpu != 0)
arch/arm64/kernel/cpu_ops.c
81
enable_method = acpi_get_enable_method(cpu);
arch/arm64/kernel/cpu_ops.c
89
if (cpu != 0)
arch/arm64/kernel/cpu_ops.c
99
int __init init_cpu_ops(int cpu)
arch/arm64/kernel/cpufeature.c
1236
static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
arch/arm64/kernel/cpufeature.c
1247
regp->name, boot, cpu, val);
arch/arm64/kernel/cpufeature.c
1286
static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
arch/arm64/kernel/cpufeature.c
1305
taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1307
taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1309
taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1311
taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1313
taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1315
taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1317
taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1319
taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1321
taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1329
taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1331
taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1333
taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1335
taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1337
taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1339
taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1341
taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1343
taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1345
taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1347
taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1349
taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1351
taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1362
void update_cpu_features(int cpu,
arch/arm64/kernel/cpufeature.c
1373
taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
arch/arm64/kernel/cpufeature.c
1381
taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
arch/arm64/kernel/cpufeature.c
1385
taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
arch/arm64/kernel/cpufeature.c
1394
taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1396
taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1402
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1404
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1406
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1408
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR3_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1416
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1418
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1420
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1422
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR3_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1424
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR4_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1427
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1429
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1431
taint |= check_update_ftr_reg(SYS_ID_AA64PFR2_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1434
taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1437
taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1440
taint |= check_update_ftr_reg(SYS_ID_AA64FPFR0_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1468
taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1479
taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
arch/arm64/kernel/cpufeature.c
1493
taint |= update_32bit_cpu_features(cpu, &info->aarch32,
arch/arm64/kernel/cpufeature.c
2042
bool cpu_has_amu_feat(int cpu)
arch/arm64/kernel/cpufeature.c
2044
return cpumask_test_cpu(cpu, &amu_cpus);
arch/arm64/kernel/cpufeature.c
4008
static int enable_mismatched_32bit_el0(unsigned int cpu)
arch/arm64/kernel/cpufeature.c
4017
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpufeature.c
4021
if (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN))
arch/arm64/kernel/cpufeature.c
4022
pr_info("Treating domain isolated CPU %u as 64-bit only\n", cpu);
arch/arm64/kernel/cpufeature.c
4028
cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
arch/arm64/kernel/cpufeature.c
4043
lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
arch/arm64/kernel/cpufeature.c
4049
cpu, lucky_winner);
arch/arm64/kernel/cpuinfo.c
217
int cpu = m->index;
arch/arm64/kernel/cpuinfo.c
227
seq_printf(m, "processor\t: %d\n", cpu);
arch/arm64/kernel/cpuinfo.c
359
static int cpuid_cpu_online(unsigned int cpu)
arch/arm64/kernel/cpuinfo.c
363
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpuinfo.c
365
dev = get_cpu_device(cpu);
arch/arm64/kernel/cpuinfo.c
382
static int cpuid_cpu_offline(unsigned int cpu)
arch/arm64/kernel/cpuinfo.c
385
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpuinfo.c
387
dev = get_cpu_device(cpu);
arch/arm64/kernel/cpuinfo.c
400
int cpu, ret;
arch/arm64/kernel/cpuinfo.c
402
for_each_possible_cpu(cpu) {
arch/arm64/kernel/cpuinfo.c
403
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpuinfo.c
420
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/cpuinfo.c
433
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str(l1ip), cpu);
arch/arm64/kernel/debug-monitors.c
129
static int clear_os_lock(unsigned int cpu)
arch/arm64/kernel/fpsimd.c
2010
static int fpsimd_cpu_dead(unsigned int cpu)
arch/arm64/kernel/fpsimd.c
2012
per_cpu(fpsimd_last_state.st, cpu) = NULL;
arch/arm64/kernel/hw_breakpoint.c
936
static int hw_breakpoint_reset(unsigned int cpu)
arch/arm64/kernel/irq.c
44
int cpu;
arch/arm64/kernel/irq.c
49
for_each_possible_cpu(cpu)
arch/arm64/kernel/irq.c
50
per_cpu(irq_shadow_call_stack_ptr, cpu) =
arch/arm64/kernel/irq.c
51
scs_alloc(early_cpu_to_node(cpu));
arch/arm64/kernel/irq.c
56
int cpu;
arch/arm64/kernel/irq.c
59
for_each_possible_cpu(cpu) {
arch/arm64/kernel/irq.c
60
p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, early_cpu_to_node(cpu));
arch/arm64/kernel/irq.c
61
per_cpu(irq_stack_ptr, cpu) = p;
arch/arm64/kernel/mte.c
615
unsigned int cpu;
arch/arm64/kernel/mte.c
620
for_each_possible_cpu(cpu) {
arch/arm64/kernel/mte.c
621
per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
arch/arm64/kernel/mte.c
622
device_create_file(get_cpu_device(cpu),
arch/arm64/kernel/paravirt.c
44
static u64 para_steal_clock(int cpu)
arch/arm64/kernel/paravirt.c
50
reg = per_cpu_ptr(&stolen_time_region, cpu);
arch/arm64/kernel/paravirt.c
69
static int stolen_time_cpu_down_prepare(unsigned int cpu)
arch/arm64/kernel/paravirt.c
85
static int stolen_time_cpu_online(unsigned int cpu)
arch/arm64/kernel/psci.c
107
cpu, err);
arch/arm64/kernel/psci.c
24
static int __init cpu_psci_cpu_init(unsigned int cpu)
arch/arm64/kernel/psci.c
29
static int __init cpu_psci_cpu_prepare(unsigned int cpu)
arch/arm64/kernel/psci.c
32
pr_err("no cpu_on method, not booting CPU%d\n", cpu);
arch/arm64/kernel/psci.c
39
static int cpu_psci_cpu_boot(unsigned int cpu)
arch/arm64/kernel/psci.c
42
int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry);
arch/arm64/kernel/psci.c
44
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
arch/arm64/kernel/psci.c
50
static bool cpu_psci_cpu_can_disable(unsigned int cpu)
arch/arm64/kernel/psci.c
52
return !psci_tos_resident_on(cpu);
arch/arm64/kernel/psci.c
55
static int cpu_psci_cpu_disable(unsigned int cpu)
arch/arm64/kernel/psci.c
62
if (psci_tos_resident_on(cpu))
arch/arm64/kernel/psci.c
68
static void cpu_psci_cpu_die(unsigned int cpu)
arch/arm64/kernel/psci.c
80
static int cpu_psci_cpu_kill(unsigned int cpu)
arch/arm64/kernel/psci.c
96
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
arch/arm64/kernel/psci.c
98
pr_info("CPU%d killed (polled %d ms)\n", cpu,
arch/arm64/kernel/sdei.c
104
static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
arch/arm64/kernel/sdei.c
108
s = per_cpu(*ptr, cpu);
arch/arm64/kernel/sdei.c
110
per_cpu(*ptr, cpu) = NULL;
arch/arm64/kernel/sdei.c
117
int cpu;
arch/arm64/kernel/sdei.c
119
for_each_possible_cpu(cpu) {
arch/arm64/kernel/sdei.c
120
_free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
arch/arm64/kernel/sdei.c
121
_free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
arch/arm64/kernel/sdei.c
125
static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
arch/arm64/kernel/sdei.c
129
s = scs_alloc(cpu_to_node(cpu));
arch/arm64/kernel/sdei.c
132
per_cpu(*ptr, cpu) = s;
arch/arm64/kernel/sdei.c
139
int cpu;
arch/arm64/kernel/sdei.c
145
for_each_possible_cpu(cpu) {
arch/arm64/kernel/sdei.c
146
err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
arch/arm64/kernel/sdei.c
149
err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
arch/arm64/kernel/sdei.c
51
static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
arch/arm64/kernel/sdei.c
55
p = per_cpu(*ptr, cpu);
arch/arm64/kernel/sdei.c
57
per_cpu(*ptr, cpu) = NULL;
arch/arm64/kernel/sdei.c
64
int cpu;
arch/arm64/kernel/sdei.c
66
for_each_possible_cpu(cpu) {
arch/arm64/kernel/sdei.c
67
_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
arch/arm64/kernel/sdei.c
68
_free_sdei_stack(&sdei_stack_critical_ptr, cpu);
arch/arm64/kernel/sdei.c
72
static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
arch/arm64/kernel/sdei.c
76
p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
arch/arm64/kernel/sdei.c
79
per_cpu(*ptr, cpu) = p;
arch/arm64/kernel/sdei.c
86
int cpu;
arch/arm64/kernel/sdei.c
89
for_each_possible_cpu(cpu) {
arch/arm64/kernel/sdei.c
90
err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
arch/arm64/kernel/sdei.c
93
err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
arch/arm64/kernel/setup.c
101
return phys_id == cpu_logical_map(cpu);
arch/arm64/kernel/setup.c
276
u64 cpu_logical_map(unsigned int cpu)
arch/arm64/kernel/setup.c
278
return __cpu_logical_map[cpu];
arch/arm64/kernel/setup.c
380
static inline bool cpu_can_disable(unsigned int cpu)
arch/arm64/kernel/setup.c
383
const struct cpu_operations *ops = get_cpu_ops(cpu);
arch/arm64/kernel/setup.c
386
return ops->cpu_can_disable(cpu);
arch/arm64/kernel/setup.c
99
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
arch/arm64/kernel/smp.c
1005
kgdb_nmicallback(cpu, get_irq_regs());
arch/arm64/kernel/smp.c
1009
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
arch/arm64/kernel/smp.c
101
const struct cpu_operations *ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
104
return ops->cpu_boot(cpu);
arch/arm64/kernel/smp.c
1046
static void ipi_setup(int cpu)
arch/arm64/kernel/smp.c
1062
enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
arch/arm64/kernel/smp.c
1068
static void ipi_teardown(int cpu)
arch/arm64/kernel/smp.c
1084
disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
arch/arm64/kernel/smp.c
1092
int err, irq, cpu;
arch/arm64/kernel/smp.c
1104
for_each_possible_cpu(cpu)
arch/arm64/kernel/smp.c
1105
get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
arch/arm64/kernel/smp.c
111
int __cpu_up(unsigned int cpu, struct task_struct *idle)
arch/arm64/kernel/smp.c
1112
for (int cpu = 0; cpu < ncpus; cpu++) {
arch/arm64/kernel/smp.c
1115
irq = ipi_irq_base + (cpu * nr_ipi) + ipi;
arch/arm64/kernel/smp.c
1117
err = irq_force_affinity(irq, cpumask_of(cpu));
arch/arm64/kernel/smp.c
1126
get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
arch/arm64/kernel/smp.c
1151
void arch_smp_send_reschedule(int cpu)
arch/arm64/kernel/smp.c
1153
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
arch/arm64/kernel/smp.c
1157
void arch_send_wakeup_ipi(unsigned int cpu)
arch/arm64/kernel/smp.c
1163
smp_send_reschedule(cpu);
arch/arm64/kernel/smp.c
124
ret = boot_secondary(cpu, idle);
arch/arm64/kernel/smp.c
127
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
arch/arm64/kernel/smp.c
137
if (cpu_online(cpu))
arch/arm64/kernel/smp.c
140
pr_crit("CPU%u: failed to come online\n", cpu);
arch/arm64/kernel/smp.c
149
cpu, status);
arch/arm64/kernel/smp.c
153
if (!op_cpu_kill(cpu)) {
arch/arm64/kernel/smp.c
154
pr_crit("CPU%u: died during early boot\n", cpu);
arch/arm64/kernel/smp.c
157
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
arch/arm64/kernel/smp.c
160
pr_crit("CPU%u: is stuck in kernel\n", cpu);
arch/arm64/kernel/smp.c
162
pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
arch/arm64/kernel/smp.c
165
cpu, PAGE_SIZE / SZ_1K);
arch/arm64/kernel/smp.c
170
panic("CPU%u detected unsupported configuration\n", cpu);
arch/arm64/kernel/smp.c
200
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/smp.c
218
rcutree_report_cpu_starting(cpu);
arch/arm64/kernel/smp.c
228
ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
236
store_cpu_topology(cpu);
arch/arm64/kernel/smp.c
241
notify_cpu_starting(cpu);
arch/arm64/kernel/smp.c
243
ipi_setup(cpu);
arch/arm64/kernel/smp.c
245
numa_add_cpu(cpu);
arch/arm64/kernel/smp.c
253
cpu, (unsigned long)mpidr,
arch/arm64/kernel/smp.c
256
set_cpu_online(cpu, true);
arch/arm64/kernel/smp.c
275
static int op_cpu_disable(unsigned int cpu)
arch/arm64/kernel/smp.c
277
const struct cpu_operations *ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
291
return ops->cpu_disable(cpu);
arch/arm64/kernel/smp.c
301
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/smp.c
304
ret = op_cpu_disable(cpu);
arch/arm64/kernel/smp.c
308
remove_cpu_topology(cpu);
arch/arm64/kernel/smp.c
309
numa_remove_cpu(cpu);
arch/arm64/kernel/smp.c
315
set_cpu_online(cpu, false);
arch/arm64/kernel/smp.c
316
ipi_teardown(cpu);
arch/arm64/kernel/smp.c
326
static int op_cpu_kill(unsigned int cpu)
arch/arm64/kernel/smp.c
328
const struct cpu_operations *ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
338
return ops->cpu_kill(cpu);
arch/arm64/kernel/smp.c
345
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
arch/arm64/kernel/smp.c
349
pr_debug("CPU%u: shutdown\n", cpu);
arch/arm64/kernel/smp.c
357
err = op_cpu_kill(cpu);
arch/arm64/kernel/smp.c
359
pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
arch/arm64/kernel/smp.c
368
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/smp.c
369
const struct cpu_operations *ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
383
ops->cpu_die(cpu);
arch/arm64/kernel/smp.c
389
static void __cpu_try_die(int cpu)
arch/arm64/kernel/smp.c
392
const struct cpu_operations *ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
395
ops->cpu_die(cpu);
arch/arm64/kernel/smp.c
405
int cpu = smp_processor_id();
arch/arm64/kernel/smp.c
407
pr_crit("CPU%d: will not boot\n", cpu);
arch/arm64/kernel/smp.c
410
set_cpu_present(cpu, 0);
arch/arm64/kernel/smp.c
415
__cpu_try_die(cpu);
arch/arm64/kernel/smp.c
474
static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
arch/arm64/kernel/smp.c
478
for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
arch/arm64/kernel/smp.c
488
static int __init smp_cpu_setup(int cpu)
arch/arm64/kernel/smp.c
492
if (init_cpu_ops(cpu))
arch/arm64/kernel/smp.c
495
ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
496
if (ops->cpu_init(cpu))
arch/arm64/kernel/smp.c
499
set_cpu_possible(cpu, true);
arch/arm64/kernel/smp.c
507
int arch_register_cpu(int cpu)
arch/arm64/kernel/smp.c
509
acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
arch/arm64/kernel/smp.c
510
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/arm64/kernel/smp.c
518
if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) {
arch/arm64/kernel/smp.c
528
c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
arch/arm64/kernel/smp.c
530
return register_cpu(c, cpu);
arch/arm64/kernel/smp.c
534
void arch_unregister_cpu(int cpu)
arch/arm64/kernel/smp.c
536
acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
arch/arm64/kernel/smp.c
537
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/arm64/kernel/smp.c
551
if (cpu_present(cpu) && !(sta & ACPI_STA_DEVICE_PRESENT)) {
arch/arm64/kernel/smp.c
563
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
arch/arm64/kernel/smp.c
565
return &cpu_madt_gicc[cpu];
arch/arm64/kernel/smp.c
776
unsigned int cpu;
arch/arm64/kernel/smp.c
798
for_each_possible_cpu(cpu) {
arch/arm64/kernel/smp.c
800
if (cpu == smp_processor_id())
arch/arm64/kernel/smp.c
803
ops = get_cpu_ops(cpu);
arch/arm64/kernel/smp.c
807
err = ops->cpu_prepare(cpu);
arch/arm64/kernel/smp.c
811
set_cpu_present(cpu, true);
arch/arm64/kernel/smp.c
812
numa_store_cpu_info(cpu);
arch/arm64/kernel/smp.c
82
static void ipi_setup(int cpu);
arch/arm64/kernel/smp.c
833
unsigned int cpu, i;
arch/arm64/kernel/smp.c
838
for_each_online_cpu(cpu)
arch/arm64/kernel/smp.c
839
seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu));
arch/arm64/kernel/smp.c
85
static void ipi_teardown(int cpu);
arch/arm64/kernel/smp.c
852
void arch_send_call_function_single_ipi(int cpu)
arch/arm64/kernel/smp.c
854
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
arch/arm64/kernel/smp.c
86
static int op_cpu_kill(unsigned int cpu);
arch/arm64/kernel/smp.c
864
static void __noreturn local_cpu_stop(unsigned int cpu)
arch/arm64/kernel/smp.c
866
set_cpu_online(cpu, false);
arch/arm64/kernel/smp.c
88
static inline int op_cpu_kill(unsigned int cpu)
arch/arm64/kernel/smp.c
883
static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
arch/arm64/kernel/smp.c
896
crash_save_cpu(regs, cpu);
arch/arm64/kernel/smp.c
898
set_cpu_online(cpu, false);
arch/arm64/kernel/smp.c
903
__cpu_try_die(cpu);
arch/arm64/kernel/smp.c
914
unsigned int cpu;
arch/arm64/kernel/smp.c
919
for_each_cpu(cpu, mask)
arch/arm64/kernel/smp.c
920
__ipi_send_single(get_ipi_desc(cpu, nr), cpu);
arch/arm64/kernel/smp.c
943
int cpu;
arch/arm64/kernel/smp.c
945
for_each_online_cpu(cpu) {
arch/arm64/kernel/smp.c
947
if (cpu == this_cpu)
arch/arm64/kernel/smp.c
950
__ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu);
arch/arm64/kernel/smp.c
960
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/smp.c
977
ipi_cpu_crash_stop(cpu, get_irq_regs());
arch/arm64/kernel/smp.c
980
local_cpu_stop(cpu);
arch/arm64/kernel/smp.c
99
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/arm64/kernel/smp_spin_table.c
107
static int smp_spin_table_cpu_boot(unsigned int cpu)
arch/arm64/kernel/smp_spin_table.c
112
write_pen_release(cpu_logical_map(cpu));
arch/arm64/kernel/smp_spin_table.c
43
static int smp_spin_table_cpu_init(unsigned int cpu)
arch/arm64/kernel/smp_spin_table.c
48
dn = of_get_cpu_node(cpu, NULL);
arch/arm64/kernel/smp_spin_table.c
56
&cpu_release_addr[cpu]);
arch/arm64/kernel/smp_spin_table.c
59
cpu);
arch/arm64/kernel/smp_spin_table.c
66
static int smp_spin_table_cpu_prepare(unsigned int cpu)
arch/arm64/kernel/smp_spin_table.c
71
if (!cpu_release_addr[cpu])
arch/arm64/kernel/smp_spin_table.c
80
release_addr = ioremap_cache(cpu_release_addr[cpu],
arch/arm64/kernel/suspend.c
46
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/suspend.c
75
hw_breakpoint_restore(cpu);
arch/arm64/kernel/topology.c
114
WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio);
arch/arm64/kernel/topology.c
163
static __always_inline bool amu_fie_cpu_supported(unsigned int cpu)
arch/arm64/kernel/topology.c
166
cpumask_test_cpu(cpu, amu_fie_cpus);
arch/arm64/kernel/topology.c
171
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/topology.c
173
if (!amu_fie_cpu_supported(cpu))
arch/arm64/kernel/topology.c
177
if (housekeeping_cpu(cpu, HK_TYPE_TICK) &&
arch/arm64/kernel/topology.c
178
time_is_before_jiffies(per_cpu(cpu_amu_samples.last_scale_update, cpu)))
arch/arm64/kernel/topology.c
184
int arch_freq_get_on_cpu(int cpu)
arch/arm64/kernel/topology.c
187
unsigned int start_cpu = cpu;
arch/arm64/kernel/topology.c
192
if (!amu_fie_cpu_supported(cpu) || !arch_scale_freq_ref(cpu))
arch/arm64/kernel/topology.c
197
amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu);
arch/arm64/kernel/topology.c
207
if (!housekeeping_cpu(cpu, HK_TYPE_TICK) ||
arch/arm64/kernel/topology.c
209
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
arch/arm64/kernel/topology.c
221
for_each_cpu_wrap(ref_cpu, policy->cpus, cpu + 1) {
arch/arm64/kernel/topology.c
237
cpu = ref_cpu;
arch/arm64/kernel/topology.c
247
scale = arch_scale_freq_capacity(cpu);
arch/arm64/kernel/topology.c
248
freq = scale * arch_scale_freq_ref(cpu);
arch/arm64/kernel/topology.c
255
int cpu;
arch/arm64/kernel/topology.c
262
for_each_cpu(cpu, cpus)
arch/arm64/kernel/topology.c
263
if (!freq_counters_valid(cpu))
arch/arm64/kernel/topology.c
306
static int cpuhp_topology_online(unsigned int cpu)
arch/arm64/kernel/topology.c
308
struct cpufreq_policy *policy = cpufreq_cpu_policy(cpu);
arch/arm64/kernel/topology.c
319
cpumask_test_cpu(cpu, amu_fie_cpus))
arch/arm64/kernel/topology.c
334
if (!freq_counters_valid(cpu)) {
arch/arm64/kernel/topology.c
341
cpumask_set_cpu(cpu, amu_fie_cpus);
arch/arm64/kernel/topology.c
343
topology_set_scale_freq_source(&amu_sfd, cpumask_of(cpu));
arch/arm64/kernel/topology.c
345
pr_debug("CPU[%u]: counter will be used for FIE.", cpu);
arch/arm64/kernel/topology.c
400
int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
arch/arm64/kernel/topology.c
405
if (!cpu_has_amu_feat(cpu))
arch/arm64/kernel/topology.c
416
if (WARN_ON_ONCE(cpu != smp_processor_id()))
arch/arm64/kernel/topology.c
420
smp_call_function_single(cpu, func, val, 1);
arch/arm64/kernel/topology.c
432
int cpu = get_cpu_with_amu_feat();
arch/arm64/kernel/topology.c
444
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
arch/arm64/kernel/topology.c
450
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
arch/arm64/kernel/topology.c
456
ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val);
arch/arm64/kernel/topology.c
459
ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val);
arch/arm64/kernel/topology.c
63
static inline bool freq_counters_valid(int cpu)
arch/arm64/kernel/topology.c
65
struct amu_cntr_sample *amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu);
arch/arm64/kernel/topology.c
67
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
arch/arm64/kernel/topology.c
70
if (!cpu_has_amu_feat(cpu)) {
arch/arm64/kernel/topology.c
71
pr_debug("CPU%d: counters are not supported.\n", cpu);
arch/arm64/kernel/topology.c
77
pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
arch/arm64/kernel/topology.c
84
void freq_inv_set_max_ratio(int cpu, u64 max_rate)
arch/arm64/kernel/topology.c
90
cpu);
arch/arm64/kernel/watchdog_hld.c
18
unsigned int cpu = smp_processor_id();
arch/arm64/kernel/watchdog_hld.c
21
max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
arch/arm64/kernel/watchdog_hld.c
40
int cpu = smp_processor_id();
arch/arm64/kernel/watchdog_hld.c
43
max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
arch/arm64/kernel/watchdog_hld.c
57
int cpu;
arch/arm64/kernel/watchdog_hld.c
79
for_each_cpu(cpu, policy->cpus)
arch/arm64/kernel/watchdog_hld.c
80
smp_call_on_cpu(cpu, watchdog_perf_update_period, NULL, false);
arch/arm64/kvm/arm.c
1176
run->fail_entry.cpu = smp_processor_id();
arch/arm64/kvm/arm.c
2060
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
arch/arm64/kvm/arm.c
2062
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
arch/arm64/kvm/arm.c
2071
params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
arch/arm64/kvm/arm.c
2317
unsigned int cpu;
arch/arm64/kvm/arm.c
2325
for_each_online_cpu(cpu)
arch/arm64/kvm/arm.c
2326
hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
arch/arm64/kvm/arm.c
2436
int cpu;
arch/arm64/kvm/arm.c
2439
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2440
if (per_cpu(kvm_hyp_initialized, cpu))
arch/arm64/kvm/arm.c
2443
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
arch/arm64/kvm/arm.c
2445
if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu])
arch/arm64/kvm/arm.c
2451
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
arch/arm64/kvm/arm.c
2455
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
arch/arm64/kvm/arm.c
2561
int cpu;
arch/arm64/kvm/arm.c
2567
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2573
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
arch/arm64/kvm/arm.c
2590
int cpu;
arch/arm64/kvm/arm.c
2593
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2596
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
arch/arm64/kvm/arm.c
2597
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
arch/arm64/kvm/arm.c
2606
int cpu;
arch/arm64/kvm/arm.c
2608
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2609
hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
arch/arm64/kvm/arm.c
2627
int cpu;
arch/arm64/kvm/arm.c
2647
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2656
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
arch/arm64/kvm/arm.c
2662
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2674
kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
arch/arm64/kvm/arm.c
2730
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2731
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
arch/arm64/kvm/arm.c
2732
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
arch/arm64/kvm/arm.c
2749
for_each_possible_cpu(cpu) {
arch/arm64/kvm/arm.c
2750
char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
arch/arm64/kvm/arm.c
2761
cpu_prepare_hyp_mode(cpu, hyp_va_bits);
arch/arm64/kvm/arm.c
630
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/arm64/kvm/arm.c
669
vcpu->cpu = cpu;
arch/arm64/kvm/arm.c
706
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
arch/arm64/kvm/arm.c
729
vcpu->cpu = -1;
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
19
u64 cpu_logical_map(unsigned int cpu)
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
21
BUG_ON(cpu >= ARRAY_SIZE(hyp_cpu_logical_map));
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
23
return hyp_cpu_logical_map[cpu];
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
28
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
34
BUG_ON(cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base));
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
37
this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
arch/arm64/kvm/hyp/nvhe/mm.c
304
static int create_fixmap_slot(u64 addr, u64 cpu)
arch/arm64/kvm/hyp/nvhe/mm.c
309
.arg = per_cpu_ptr(&fixmap_slots, cpu),
arch/arm64/kvm/mmu.c
1000
for_each_possible_cpu(cpu)
arch/arm64/kvm/mmu.c
1001
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
arch/arm64/kvm/mmu.c
956
int cpu, err;
arch/arm64/kvm/nested.c
1331
vt->cpu = -1;
arch/arm64/kvm/nested.c
1475
vt->cpu = smp_processor_id();
arch/arm64/kvm/nested.c
1491
__set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot);
arch/arm64/kvm/nested.c
29
int cpu;
arch/arm64/kvm/nested.c
800
BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id());
arch/arm64/kvm/nested.c
803
clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu));
arch/arm64/kvm/nested.c
804
vcpu->arch.vncr_tlb->cpu = -1;
arch/arm64/kvm/nested.c
892
if (vt->cpu != -1)
arch/arm64/kvm/nested.c
893
clear_fixmap(vncr_fixmap(vt->cpu));
arch/arm64/kvm/pmu-emul.c
812
int cpu;
arch/arm64/kvm/pmu-emul.c
833
cpu = raw_smp_processor_id();
arch/arm64/kvm/pmu-emul.c
837
if (cpumask_test_cpu(cpu, &pmu->supported_cpus))
arch/arm64/kvm/reset.c
202
loaded = (vcpu->cpu != -1);
arch/arm64/kvm/vmid.c
47
int cpu;
arch/arm64/kvm/vmid.c
52
for_each_possible_cpu(cpu) {
arch/arm64/kvm/vmid.c
53
vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
arch/arm64/kvm/vmid.c
57
vmid = per_cpu(reserved_vmids, cpu);
arch/arm64/kvm/vmid.c
59
per_cpu(reserved_vmids, cpu) = vmid;
arch/arm64/kvm/vmid.c
74
int cpu;
arch/arm64/kvm/vmid.c
82
for_each_possible_cpu(cpu) {
arch/arm64/kvm/vmid.c
83
if (per_cpu(reserved_vmids, cpu) == vmid) {
arch/arm64/kvm/vmid.c
85
per_cpu(reserved_vmids, cpu) = newvmid;
arch/arm64/mm/context.c
136
int cpu;
arch/arm64/mm/context.c
148
for_each_possible_cpu(cpu) {
arch/arm64/mm/context.c
149
if (per_cpu(reserved_asids, cpu) == asid) {
arch/arm64/mm/context.c
151
per_cpu(reserved_asids, cpu) = newasid;
arch/arm64/mm/context.c
218
unsigned int cpu;
arch/arm64/mm/context.c
254
cpu = smp_processor_id();
arch/arm64/mm/context.c
255
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
arch/arm64/mm/mmu.c
1241
int cpu = smp_processor_id();
arch/arm64/mm/mmu.c
1255
if (!cpu) {
arch/arm64/mm/mmu.c
1286
remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
arch/arm64/mm/mmu.c
1289
if (!cpu) {
arch/arm64/net/bpf_jit_comp.c
1578
cpu_offset = offsetof(struct thread_info, cpu);
arch/arm64/net/bpf_jit_comp.c
1971
int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
arch/arm64/net/bpf_jit_comp.c
1974
for_each_possible_cpu(cpu) {
arch/arm64/net/bpf_jit_comp.c
1975
stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
arch/arm64/net/bpf_jit_comp.c
1986
int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
arch/arm64/net/bpf_jit_comp.c
1989
for_each_possible_cpu(cpu) {
arch/arm64/net/bpf_jit_comp.c
1990
stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
arch/csky/abiv2/cacheflush.c
41
unsigned int cpu = smp_processor_id();
arch/csky/abiv2/cacheflush.c
44
if (cpumask_test_cpu(cpu, mask)) {
arch/csky/abiv2/cacheflush.c
45
cpumask_clear_cpu(cpu, mask);
arch/csky/abiv2/cacheflush.c
58
unsigned int cpu;
arch/csky/abiv2/cacheflush.c
76
cpu = smp_processor_id();
arch/csky/abiv2/cacheflush.c
77
cpumask_clear_cpu(cpu, mask);
arch/csky/abiv2/cacheflush.c
84
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
arch/csky/include/asm/asid.h
31
#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
arch/csky/include/asm/asid.h
34
unsigned int cpu, struct mm_struct *mm);
arch/csky/include/asm/asid.h
43
atomic64_t *pasid, unsigned int cpu,
arch/csky/include/asm/asid.h
64
old_active_asid = atomic64_read(&active_asid(info, cpu));
arch/csky/include/asm/asid.h
67
atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
arch/csky/include/asm/asid.h
71
asid_new_context(info, pasid, cpu, mm);
arch/csky/include/asm/mmu_context.h
21
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
arch/csky/include/asm/mmu_context.h
27
unsigned int cpu = smp_processor_id();
arch/csky/include/asm/mmu_context.h
30
check_and_switch_context(next, cpu);
arch/csky/include/asm/smp.h
18
void arch_send_call_function_single_ipi(int cpu);
arch/csky/include/asm/smp.h
22
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/csky/include/asm/smp.h
26
static inline void __cpu_die(unsigned int cpu) { }
arch/csky/include/asm/thread_info.h
21
unsigned int cpu;
arch/csky/include/asm/thread_info.h
28
.cpu = 0, \
arch/csky/kernel/cpu-probe.c
48
int cpu;
arch/csky/kernel/cpu-probe.c
50
for_each_online_cpu(cpu)
arch/csky/kernel/cpu-probe.c
51
smp_call_function_single(cpu, percpu_print, m, true);
arch/csky/kernel/perf_event.c
1280
static int csky_pmu_starting_cpu(unsigned int cpu)
arch/csky/kernel/perf_event.c
1286
static int csky_pmu_dying_cpu(unsigned int cpu)
arch/csky/kernel/smp.c
109
unsigned int cpu, i;
arch/csky/kernel/smp.c
114
for_each_online_cpu(cpu)
arch/csky/kernel/smp.c
116
per_cpu_ptr(&ipi_data, cpu)->stats[i]);
arch/csky/kernel/smp.c
128
void arch_send_call_function_single_ipi(int cpu)
arch/csky/kernel/smp.c
130
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
arch/csky/kernel/smp.c
143
void arch_smp_send_reschedule(int cpu)
arch/csky/kernel/smp.c
145
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
arch/csky/kernel/smp.c
179
unsigned int cpu;
arch/csky/kernel/smp.c
185
cpu = of_get_cpu_hwid(node, 0);
arch/csky/kernel/smp.c
186
if (cpu >= NR_CPUS)
arch/csky/kernel/smp.c
189
set_cpu_possible(cpu, true);
arch/csky/kernel/smp.c
190
set_cpu_present(cpu, true);
arch/csky/kernel/smp.c
203
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/csky/kernel/smp.c
205
unsigned long mask = 1 << cpu;
arch/csky/kernel/smp.c
223
send_arch_ipi(cpumask_of(cpu));
arch/csky/kernel/smp.c
231
while (!cpu_online(cpu));
arch/csky/kernel/smp.c
245
unsigned int cpu = smp_processor_id();
arch/csky/kernel/smp.c
265
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/csky/kernel/smp.c
267
notify_cpu_starting(cpu);
arch/csky/kernel/smp.c
268
set_cpu_online(cpu, true);
arch/csky/kernel/smp.c
270
pr_info("CPU%u Online: %s...\n", cpu, __func__);
arch/csky/kernel/smp.c
279
unsigned int cpu = smp_processor_id();
arch/csky/kernel/smp.c
281
set_cpu_online(cpu, false);
arch/csky/kernel/smp.c
285
clear_tasks_mm_cpumask(cpu);
arch/csky/kernel/smp.c
290
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
arch/csky/kernel/smp.c
292
pr_notice("CPU%u: shutdown\n", cpu);
arch/csky/mm/asid.c
138
unsigned int cpu, struct mm_struct *mm)
arch/csky/mm/asid.c
151
if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
arch/csky/mm/asid.c
154
atomic64_set(&active_asid(info, cpu), asid);
arch/csky/mm/asid.c
155
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/csky/mm/asid.c
16
#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
arch/csky/mm/asid.c
57
int cpu;
arch/csky/mm/asid.c
69
for_each_possible_cpu(cpu) {
arch/csky/mm/asid.c
70
if (reserved_asid(info, cpu) == asid) {
arch/csky/mm/asid.c
72
reserved_asid(info, cpu) = newasid;
arch/csky/mm/context.c
19
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
arch/csky/mm/context.c
21
asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
arch/hexagon/include/asm/hexagon_vm.h
149
static inline long __vmintop_affinity(long i, long cpu)
arch/hexagon/include/asm/hexagon_vm.h
151
return __vmintop(hvmi_affinity, i, cpu, 0, 0);
arch/hexagon/include/asm/smp.h
13
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/hexagon/include/asm/smp.h
25
extern void arch_send_call_function_single_ipi(int cpu);
arch/hexagon/include/asm/thread_info.h
34
__u32 cpu; /* current cpu */
arch/hexagon/include/asm/thread_info.h
62
.cpu = 0, \
arch/hexagon/kernel/setup.c
116
int cpu = (unsigned long) v - 1;
arch/hexagon/kernel/setup.c
119
if (!cpu_online(cpu))
arch/hexagon/kernel/setup.c
123
seq_printf(m, "processor\t: %d\n", cpu);
arch/hexagon/kernel/smp.c
101
for_each_cpu(cpu, cpumask) {
arch/hexagon/kernel/smp.c
102
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
arch/hexagon/kernel/smp.c
106
retval = __vmintop_post(BASE_IPI_IRQ+cpu);
arch/hexagon/kernel/smp.c
110
BASE_IPI_IRQ+cpu);
arch/hexagon/kernel/smp.c
126
unsigned int cpu, irq;
arch/hexagon/kernel/smp.c
146
cpu = smp_processor_id();
arch/hexagon/kernel/smp.c
148
irq = BASE_IPI_IRQ + cpu;
arch/hexagon/kernel/smp.c
156
printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
arch/hexagon/kernel/smp.c
158
notify_cpu_starting(cpu);
arch/hexagon/kernel/smp.c
160
set_cpu_online(cpu, true);
arch/hexagon/kernel/smp.c
174
int __cpu_up(unsigned int cpu, struct task_struct *idle)
arch/hexagon/kernel/smp.c
179
thread->cpu = cpu;
arch/hexagon/kernel/smp.c
185
while (!cpu_online(cpu))
arch/hexagon/kernel/smp.c
216
void arch_smp_send_reschedule(int cpu)
arch/hexagon/kernel/smp.c
218
send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
arch/hexagon/kernel/smp.c
229
void arch_send_call_function_single_ipi(int cpu)
arch/hexagon/kernel/smp.c
231
send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
arch/hexagon/kernel/smp.c
39
int cpu)
arch/hexagon/kernel/smp.c
84
int cpu = smp_processor_id();
arch/hexagon/kernel/smp.c
85
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
arch/hexagon/kernel/smp.c
89
__handle_ipi(&ops, ipi, cpu);
arch/hexagon/kernel/smp.c
96
unsigned long cpu;
arch/hexagon/kernel/time.c
122
int cpu = smp_processor_id();
arch/hexagon/kernel/time.c
125
&per_cpu(clock_events, cpu);
arch/hexagon/kernel/time.c
131
dummy_clock_dev->cpumask = cpumask_of(cpu);
arch/hexagon/kernel/time.c
139
int cpu = smp_processor_id();
arch/hexagon/kernel/time.c
140
struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
arch/loongarch/include/asm/acpi.h
43
static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
arch/loongarch/include/asm/acpi.h
45
return acpi_core_pic[cpu_logical_map(cpu)].processor_id;
arch/loongarch/include/asm/bootinfo.h
49
static inline bool io_master(int cpu)
arch/loongarch/include/asm/bootinfo.h
51
return test_bit(cpu, loongson_sysconf.cores_io_master);
arch/loongarch/include/asm/irq.h
30
static inline bool on_irq_stack(int cpu, unsigned long sp)
arch/loongarch/include/asm/irq.h
32
unsigned long low = per_cpu(irq_stack, cpu);
arch/loongarch/include/asm/loongarch.h
1300
unsigned int data_mask, unsigned int cpu)
arch/loongarch/include/asm/loongarch.h
1305
val |= (cpu << IOCSR_ANY_SEND_CPU_SHIFT);
arch/loongarch/include/asm/mmu_context.h
101
atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir);
arch/loongarch/include/asm/mmu_context.h
110
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/loongarch/include/asm/mmu_context.h
141
drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
arch/loongarch/include/asm/mmu_context.h
150
if (asid == cpu_asid(cpu, mm)) {
arch/loongarch/include/asm/mmu_context.h
154
get_new_mmu_context(mm, cpu, &need_flush);
arch/loongarch/include/asm/mmu_context.h
156
write_csr_asid(cpu_asid(cpu, mm));
arch/loongarch/include/asm/mmu_context.h
165
cpu_context(cpu, mm) = 0;
arch/loongarch/include/asm/mmu_context.h
166
cpumask_clear_cpu(cpu, mm_cpumask(mm));
arch/loongarch/include/asm/mmu_context.h
24
static inline u64 asid_version_mask(unsigned int cpu)
arch/loongarch/include/asm/mmu_context.h
26
return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
arch/loongarch/include/asm/mmu_context.h
29
static inline u64 asid_first_version(unsigned int cpu)
arch/loongarch/include/asm/mmu_context.h
31
return cpu_asid_mask(&cpu_data[cpu]) + 1;
arch/loongarch/include/asm/mmu_context.h
34
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
arch/loongarch/include/asm/mmu_context.h
35
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
arch/loongarch/include/asm/mmu_context.h
36
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
arch/loongarch/include/asm/mmu_context.h
38
static inline int asid_valid(struct mm_struct *mm, unsigned int cpu)
arch/loongarch/include/asm/mmu_context.h
40
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
arch/loongarch/include/asm/mmu_context.h
52
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
arch/loongarch/include/asm/mmu_context.h
54
u64 asid = asid_cache(cpu);
arch/loongarch/include/asm/mmu_context.h
56
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
arch/loongarch/include/asm/mmu_context.h
59
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
arch/loongarch/include/asm/mmu_context.h
92
unsigned int cpu = smp_processor_id();
arch/loongarch/include/asm/mmu_context.h
95
if (!asid_valid(next, cpu))
arch/loongarch/include/asm/mmu_context.h
96
get_new_mmu_context(next, cpu, &need_flush);
arch/loongarch/include/asm/mmu_context.h
99
atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd);
arch/loongarch/include/asm/numa.h
26
extern void numa_add_cpu(unsigned int cpu);
arch/loongarch/include/asm/numa.h
27
extern void numa_remove_cpu(unsigned int cpu);
arch/loongarch/include/asm/numa.h
29
static inline void numa_clear_node(int cpu)
arch/loongarch/include/asm/numa.h
38
extern int early_cpu_to_node(int cpu);
arch/loongarch/include/asm/numa.h
43
static inline void numa_add_cpu(unsigned int cpu) { }
arch/loongarch/include/asm/numa.h
44
static inline void numa_remove_cpu(unsigned int cpu) { }
arch/loongarch/include/asm/numa.h
47
static inline int early_cpu_to_node(int cpu)
arch/loongarch/include/asm/qspinlock.h
39
bool vcpu_is_preempted(int cpu);
arch/loongarch/include/asm/setup.h
21
extern void tlb_init(int cpu);
arch/loongarch/include/asm/setup.h
24
extern void per_cpu_trap_init(int cpu);
arch/loongarch/include/asm/smp.h
101
mp_ops.send_ipi_single(cpu, ACTION_CALL_FUNCTION);
arch/loongarch/include/asm/smp.h
115
static inline void __cpu_die(unsigned int cpu)
arch/loongarch/include/asm/smp.h
117
loongson_cpu_die(cpu);
arch/loongarch/include/asm/smp.h
122
#define cpu_logical_map(cpu) 0
arch/loongarch/include/asm/smp.h
19
void (*send_ipi_single)(int cpu, unsigned int action);
arch/loongarch/include/asm/smp.h
34
void loongson_boot_secondary(int cpu, struct task_struct *idle);
arch/loongarch/include/asm/smp.h
39
void loongson_cpu_die(unsigned int cpu);
arch/loongarch/include/asm/smp.h
54
return current_thread_info()->cpu;
arch/loongarch/include/asm/smp.h
62
#define cpu_number_map(cpu) __cpu_number_map[cpu]
arch/loongarch/include/asm/smp.h
66
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
arch/loongarch/include/asm/smp.h
68
#define cpu_physical_id(cpu) cpu_logical_map(cpu)
arch/loongarch/include/asm/smp.h
99
static inline void arch_send_call_function_single_ipi(int cpu)
arch/loongarch/include/asm/thread_info.h
28
__u32 cpu; /* current CPU */
arch/loongarch/include/asm/thread_info.h
42
.cpu = 0, \
arch/loongarch/include/asm/topology.h
33
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
arch/loongarch/include/asm/topology.h
35
return &cpu_llc_shared_map[cpu];
arch/loongarch/include/asm/topology.h
38
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
arch/loongarch/include/asm/topology.h
39
#define topology_core_id(cpu) (cpu_data[cpu].core)
arch/loongarch/include/asm/topology.h
40
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
arch/loongarch/include/asm/topology.h
41
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
arch/loongarch/kernel/acpi.c
179
int cpu, topology_id;
arch/loongarch/kernel/acpi.c
181
for_each_possible_cpu(cpu) {
arch/loongarch/kernel/acpi.c
182
topology_id = find_acpi_cpu_topology(cpu, 0);
arch/loongarch/kernel/acpi.c
188
if (acpi_pptt_cpu_is_thread(cpu) <= 0)
arch/loongarch/kernel/acpi.c
189
cpu_data[cpu].core = topology_id;
arch/loongarch/kernel/acpi.c
191
topology_id = find_acpi_cpu_topology(cpu, 1);
arch/loongarch/kernel/acpi.c
195
cpu_data[cpu].core = topology_id;
arch/loongarch/kernel/acpi.c
333
static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
arch/loongarch/kernel/acpi.c
341
nid = early_cpu_to_node(cpu);
arch/loongarch/kernel/acpi.c
346
set_cpu_numa_node(cpu, nid);
arch/loongarch/kernel/acpi.c
347
cpumask_set_cpu(cpu, cpumask_of_node(nid));
arch/loongarch/kernel/acpi.c
355
int cpu;
arch/loongarch/kernel/acpi.c
357
cpu = cpu_number_map(physid);
arch/loongarch/kernel/acpi.c
358
if (cpu < 0 || cpu >= nr_cpu_ids) {
arch/loongarch/kernel/acpi.c
364
set_cpu_present(cpu, true);
arch/loongarch/kernel/acpi.c
365
acpi_map_cpu2node(handle, cpu, physid);
arch/loongarch/kernel/acpi.c
367
*pcpu = cpu;
arch/loongarch/kernel/acpi.c
373
int acpi_unmap_cpu(int cpu)
arch/loongarch/kernel/acpi.c
376
set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
arch/loongarch/kernel/acpi.c
378
set_cpu_present(cpu, false);
arch/loongarch/kernel/acpi.c
381
pr_info("cpu%d hot remove!\n", cpu);
arch/loongarch/kernel/acpi.c
65
int cpu = -1, cpuid = id;
arch/loongarch/kernel/acpi.c
76
cpu = 0;
arch/loongarch/kernel/acpi.c
80
if (cpu < 0)
arch/loongarch/kernel/acpi.c
81
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
arch/loongarch/kernel/acpi.c
83
set_cpu_present(cpu, true);
arch/loongarch/kernel/acpi.c
86
if (cpu < 0)
arch/loongarch/kernel/acpi.c
87
cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
arch/loongarch/kernel/acpi.c
91
return cpu;
arch/loongarch/kernel/acpi.c
94
set_cpu_possible(cpu, true);
arch/loongarch/kernel/acpi.c
95
__cpu_number_map[cpuid] = cpu;
arch/loongarch/kernel/acpi.c
96
__cpu_logical_map[cpu] = cpuid;
arch/loongarch/kernel/acpi.c
98
return cpu;
arch/loongarch/kernel/asm-offsets.c
90
OFFSET(TI_CPU, thread_info, cpu);
arch/loongarch/kernel/cacheinfo.c
12
int init_cache_level(unsigned int cpu)
arch/loongarch/kernel/cacheinfo.c
15
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/loongarch/kernel/cacheinfo.c
31
static void cache_cpumap_setup(unsigned int cpu)
arch/loongarch/kernel/cacheinfo.c
35
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/loongarch/kernel/cacheinfo.c
45
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
arch/loongarch/kernel/cacheinfo.c
49
if (i == cpu || !sib_cpu_ci->info_list ||
arch/loongarch/kernel/cacheinfo.c
50
(cpu_to_node(i) != cpu_to_node(cpu)))
arch/loongarch/kernel/cacheinfo.c
55
if (cpus_are_siblings(i, cpu)) {
arch/loongarch/kernel/cacheinfo.c
56
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
arch/loongarch/kernel/cacheinfo.c
61
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
arch/loongarch/kernel/cacheinfo.c
68
int populate_cache_leaves(unsigned int cpu)
arch/loongarch/kernel/cacheinfo.c
71
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/loongarch/kernel/cacheinfo.c
88
cache_cpumap_setup(cpu);
arch/loongarch/kernel/cpu-probe.c
283
static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
arch/loongarch/kernel/cpu-probe.c
294
__cpu_family[cpu] = "Loongson-32bit";
arch/loongarch/kernel/cpu-probe.c
298
__cpu_family[cpu] = "Loongson-64bit";
arch/loongarch/kernel/cpu-probe.c
302
pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
arch/loongarch/kernel/cpu-probe.c
305
__cpu_full_name[cpu] = "Unknown";
arch/loongarch/kernel/cpu-probe.c
319
if (!__cpu_full_name[cpu]) {
arch/loongarch/kernel/cpu-probe.c
321
__cpu_full_name[cpu] = "Unknown";
arch/loongarch/kernel/cpu-probe.c
323
__cpu_full_name[cpu] = cpu_full_name;
arch/loongarch/kernel/cpu-probe.c
367
unsigned int cpu = smp_processor_id();
arch/loongarch/kernel/cpu-probe.c
374
set_elf_platform(cpu, "loongarch");
arch/loongarch/kernel/cpu-probe.c
385
per_cpu_trap_init(cpu);
arch/loongarch/kernel/cpu-probe.c
389
cpu_probe_loongson(c, cpu);
arch/loongarch/kernel/cpu-probe.c
393
BUG_ON(!__cpu_family[cpu]);
arch/loongarch/kernel/cpu-probe.c
399
if (cpu == 0)
arch/loongarch/kernel/cpu-probe.c
97
static inline void set_elf_platform(int cpu, const char *plat)
arch/loongarch/kernel/cpu-probe.c
99
if (cpu == 0)
arch/loongarch/kernel/env.c
42
int cpu, ret;
arch/loongarch/kernel/env.c
56
for (cpu = 0; cpu < NR_CPUS; cpu++)
arch/loongarch/kernel/env.c
57
__cpu_full_name[cpu] = loongson_sysconf.cpuname;
arch/loongarch/kernel/hw_breakpoint.c
528
int cpu;
arch/loongarch/kernel/hw_breakpoint.c
536
for (cpu = 1; cpu < NR_CPUS; cpu++) {
arch/loongarch/kernel/hw_breakpoint.c
537
cpu_data[cpu].watch_ireg_count = boot_cpu_data.watch_ireg_count;
arch/loongarch/kernel/hw_breakpoint.c
538
cpu_data[cpu].watch_dreg_count = boot_cpu_data.watch_dreg_count;
arch/loongarch/kernel/inst.c
239
unsigned int cpu;
arch/loongarch/kernel/inst.c
247
if (smp_processor_id() == copy->cpu) {
arch/loongarch/kernel/inst.c
269
.cpu = raw_smp_processor_id(),
arch/loongarch/kernel/kgdb.c
465
int cpu, cnt = 0;
arch/loongarch/kernel/kgdb.c
468
for_each_online_cpu(cpu) {
arch/loongarch/kernel/kgdb.c
470
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
arch/loongarch/kernel/kgdb.c
478
for_each_online_cpu(cpu) {
arch/loongarch/kernel/kgdb.c
482
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
arch/loongarch/kernel/kgdb.c
491
int cpu;
arch/loongarch/kernel/kgdb.c
497
for_each_online_cpu(cpu) {
arch/loongarch/kernel/kgdb.c
498
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
arch/loongarch/kernel/kgdb.c
588
int cpu = raw_smp_processor_id();
arch/loongarch/kernel/kgdb.c
595
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
arch/loongarch/kernel/kgdb.c
610
int cpu = raw_smp_processor_id();
arch/loongarch/kernel/kgdb.c
617
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
arch/loongarch/kernel/kgdb.c
640
int cpu = raw_smp_processor_id();
arch/loongarch/kernel/kgdb.c
645
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
arch/loongarch/kernel/kgdb.c
684
int i, cpu;
arch/loongarch/kernel/kgdb.c
706
for_each_online_cpu(cpu) {
arch/loongarch/kernel/kgdb.c
707
pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
arch/loongarch/kernel/machine_kexec.c
131
int cpu = smp_processor_id();
arch/loongarch/kernel/machine_kexec.c
133
if (!cpu_online(cpu))
arch/loongarch/kernel/machine_kexec.c
137
set_cpu_online(cpu, false);
arch/loongarch/kernel/machine_kexec.c
148
int cpu = smp_processor_id();
arch/loongarch/kernel/machine_kexec.c
163
if (!cpu_online(cpu))
arch/loongarch/kernel/machine_kexec.c
167
set_cpu_online(cpu, false);
arch/loongarch/kernel/machine_kexec.c
170
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
arch/loongarch/kernel/machine_kexec.c
171
crash_save_cpu(regs, cpu);
arch/loongarch/kernel/machine_kexec.c
172
cpumask_set_cpu(cpu, &cpus_in_crash);
arch/loongarch/kernel/machine_kexec.c
217
int cpu;
arch/loongarch/kernel/machine_kexec.c
220
for_each_possible_cpu(cpu)
arch/loongarch/kernel/machine_kexec.c
221
if (!cpu_online(cpu))
arch/loongarch/kernel/machine_kexec.c
222
cpu_device_up(get_cpu_device(cpu));
arch/loongarch/kernel/numa.c
110
int early_cpu_to_node(int cpu)
arch/loongarch/kernel/numa.c
112
int physid = cpu_logical_map(cpu);
arch/loongarch/kernel/numa.c
122
int cpu = __cpu_number_map[cpuid];
arch/loongarch/kernel/numa.c
124
if (cpu < 0)
arch/loongarch/kernel/numa.c
127
cpumask_set_cpu(cpu, &cpus_on_node[node]);
arch/loongarch/kernel/numa.c
131
void numa_add_cpu(unsigned int cpu)
arch/loongarch/kernel/numa.c
133
int nid = cpu_to_node(cpu);
arch/loongarch/kernel/numa.c
134
cpumask_set_cpu(cpu, &cpus_on_node[nid]);
arch/loongarch/kernel/numa.c
137
void numa_remove_cpu(unsigned int cpu)
arch/loongarch/kernel/numa.c
139
int nid = cpu_to_node(cpu);
arch/loongarch/kernel/numa.c
140
cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
arch/loongarch/kernel/numa.c
47
static int __init pcpu_cpu_to_node(int cpu)
arch/loongarch/kernel/numa.c
49
return early_cpu_to_node(cpu);
arch/loongarch/kernel/numa.c
68
unsigned int cpu;
arch/loongarch/kernel/numa.c
96
for_each_possible_cpu(cpu)
arch/loongarch/kernel/numa.c
97
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
arch/loongarch/kernel/paravirt.c
106
min = max = cpu;
arch/loongarch/kernel/paravirt.c
109
__set_bit(cpu - min, (unsigned long *)&bitmap);
arch/loongarch/kernel/paravirt.c
213
int cpu = smp_processor_id();
arch/loongarch/kernel/paravirt.c
220
st = &per_cpu(steal_time, cpu);
arch/loongarch/kernel/paravirt.c
242
static int pv_time_cpu_online(unsigned int cpu)
arch/loongarch/kernel/paravirt.c
253
static int pv_time_cpu_down_prepare(unsigned int cpu)
arch/loongarch/kernel/paravirt.c
26
static u64 paravt_steal_clock(int cpu)
arch/loongarch/kernel/paravirt.c
264
bool vcpu_is_preempted(int cpu)
arch/loongarch/kernel/paravirt.c
271
src = &per_cpu(steal_time, cpu);
arch/loongarch/kernel/paravirt.c
32
src = &per_cpu(steal_time, cpu);
arch/loongarch/kernel/paravirt.c
48
static void pv_send_ipi_single(int cpu, unsigned int action)
arch/loongarch/kernel/paravirt.c
51
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
arch/loongarch/kernel/paravirt.c
54
native_ops.send_ipi_single(cpu, action);
arch/loongarch/kernel/paravirt.c
62
min = cpu_logical_map(cpu);
arch/loongarch/kernel/paravirt.c
70
int i, cpu, min = 0, max = 0, old;
arch/loongarch/kernel/paravirt.c
89
cpu = cpu_logical_map(i);
arch/loongarch/kernel/paravirt.c
91
min = max = cpu;
arch/loongarch/kernel/paravirt.c
92
} else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
arch/loongarch/kernel/paravirt.c
94
bitmap <<= min - cpu;
arch/loongarch/kernel/paravirt.c
95
min = cpu;
arch/loongarch/kernel/paravirt.c
96
} else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
arch/loongarch/kernel/paravirt.c
98
max = cpu > max ? cpu : max;
arch/loongarch/kernel/perf_event.c
267
unsigned int cpu;
arch/loongarch/kernel/perf_event.c
277
cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
arch/loongarch/kernel/perf_event.c
282
pr_debug("Enabling perf counter for CPU%d\n", cpu);
arch/loongarch/kernel/perf_event.c
551
if (event->cpu >= 0 && !cpu_online(event->cpu))
arch/loongarch/kernel/process.c
359
int cpu;
arch/loongarch/kernel/process.c
361
for_each_cpu(cpu, mask) {
arch/loongarch/kernel/process.c
368
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
arch/loongarch/kernel/process.c
370
cpu);
arch/loongarch/kernel/process.c
374
csd = &per_cpu(backtrace_csd, cpu);
arch/loongarch/kernel/process.c
376
smp_call_function_single_async(cpu, csd);
arch/loongarch/kernel/smp.c
102
cpumask_set_cpu(cpu, &cpu_core_setup_map);
arch/loongarch/kernel/smp.c
105
if (cpu_data[cpu].package == cpu_data[i].package) {
arch/loongarch/kernel/smp.c
106
cpumask_set_cpu(i, &cpu_core_map[cpu]);
arch/loongarch/kernel/smp.c
107
cpumask_set_cpu(cpu, &cpu_core_map[i]);
arch/loongarch/kernel/smp.c
112
static inline void set_cpu_llc_shared_map(int cpu)
arch/loongarch/kernel/smp.c
116
cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map);
arch/loongarch/kernel/smp.c
119
if (cpu_to_node(cpu) == cpu_to_node(i)) {
arch/loongarch/kernel/smp.c
120
cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]);
arch/loongarch/kernel/smp.c
121
cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]);
arch/loongarch/kernel/smp.c
126
static inline void clear_cpu_llc_shared_map(int cpu)
arch/loongarch/kernel/smp.c
131
if (cpu_to_node(cpu) == cpu_to_node(i)) {
arch/loongarch/kernel/smp.c
132
cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]);
arch/loongarch/kernel/smp.c
133
cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]);
arch/loongarch/kernel/smp.c
137
cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map);
arch/loongarch/kernel/smp.c
140
static inline void set_cpu_sibling_map(int cpu)
arch/loongarch/kernel/smp.c
144
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
arch/loongarch/kernel/smp.c
147
if (cpus_are_siblings(cpu, i)) {
arch/loongarch/kernel/smp.c
148
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
arch/loongarch/kernel/smp.c
149
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
arch/loongarch/kernel/smp.c
154
static inline void clear_cpu_sibling_map(int cpu)
arch/loongarch/kernel/smp.c
159
if (cpus_are_siblings(cpu, i)) {
arch/loongarch/kernel/smp.c
160
cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
arch/loongarch/kernel/smp.c
161
cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
arch/loongarch/kernel/smp.c
165
cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
arch/loongarch/kernel/smp.c
194
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
arch/loongarch/kernel/smp.c
201
val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
arch/loongarch/kernel/smp.c
208
val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
arch/loongarch/kernel/smp.c
213
static u32 ipi_read_clear(int cpu)
arch/loongarch/kernel/smp.c
226
static void ipi_write_action(int cpu, u32 action)
arch/loongarch/kernel/smp.c
231
val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
arch/loongarch/kernel/smp.c
235
static void loongson_send_ipi_single(int cpu, unsigned int action)
arch/loongarch/kernel/smp.c
237
ipi_write_action(cpu_logical_map(cpu), (u32)action);
arch/loongarch/kernel/smp.c
253
void arch_smp_send_reschedule(int cpu)
arch/loongarch/kernel/smp.c
255
mp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE);
arch/loongarch/kernel/smp.c
269
unsigned int cpu = smp_processor_id();
arch/loongarch/kernel/smp.c
271
action = ipi_read_clear(cpu_logical_map(cpu));
arch/loongarch/kernel/smp.c
275
per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
arch/loongarch/kernel/smp.c
280
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
arch/loongarch/kernel/smp.c
285
per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
arch/loongarch/kernel/smp.c
290
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
arch/loongarch/kernel/smp.c
319
unsigned int cpu, cpuid;
arch/loongarch/kernel/smp.c
331
cpu = 0;
arch/loongarch/kernel/smp.c
333
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
arch/loongarch/kernel/smp.c
336
set_cpu_possible(cpu, true);
arch/loongarch/kernel/smp.c
337
set_cpu_present(cpu, true);
arch/loongarch/kernel/smp.c
338
__cpu_number_map[cpuid] = cpu;
arch/loongarch/kernel/smp.c
339
__cpu_logical_map[cpu] = cpuid;
arch/loongarch/kernel/smp.c
396
void loongson_boot_secondary(int cpu, struct task_struct *idle)
arch/loongarch/kernel/smp.c
400
pr_info("Booting CPU#%d...\n", cpu);
arch/loongarch/kernel/smp.c
406
csr_mail_send(entry, cpu_logical_map(cpu), 0);
arch/loongarch/kernel/smp.c
408
loongson_send_ipi_single(cpu, ACTION_BOOT_CPU);
arch/loongarch/kernel/smp.c
416
unsigned int cpu = smp_processor_id();
arch/loongarch/kernel/smp.c
425
numa_add_cpu(cpu);
arch/loongarch/kernel/smp.c
427
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/loongarch/kernel/smp.c
428
cpu_data[cpu].package =
arch/loongarch/kernel/smp.c
429
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
arch/loongarch/kernel/smp.c
430
cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
arch/loongarch/kernel/smp.c
431
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
arch/loongarch/kernel/smp.c
432
cpu_data[cpu].global_id = cpu_logical_map(cpu);
arch/loongarch/kernel/smp.c
447
unsigned int cpu = smp_processor_id();
arch/loongarch/kernel/smp.c
449
if (io_master(cpu))
arch/loongarch/kernel/smp.c
453
numa_remove_cpu(cpu);
arch/loongarch/kernel/smp.c
455
set_cpu_online(cpu, false);
arch/loongarch/kernel/smp.c
456
clear_cpu_sibling_map(cpu);
arch/loongarch/kernel/smp.c
457
clear_cpu_llc_shared_map(cpu);
arch/loongarch/kernel/smp.c
468
void loongson_cpu_die(unsigned int cpu)
arch/loongarch/kernel/smp.c
470
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
arch/loongarch/kernel/smp.c
586
unsigned int cpu, node, rr_node;
arch/loongarch/kernel/smp.c
594
for_each_possible_cpu(cpu) {
arch/loongarch/kernel/smp.c
595
node = early_cpu_to_node(cpu);
arch/loongarch/kernel/smp.c
611
set_cpu_numa_node(cpu, node);
arch/loongarch/kernel/smp.c
613
set_cpu_numa_node(cpu, rr_node);
arch/loongarch/kernel/smp.c
625
current_thread_info()->cpu = 0;
arch/loongarch/kernel/smp.c
636
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/loongarch/kernel/smp.c
638
loongson_boot_secondary(cpu, tidle);
arch/loongarch/kernel/smp.c
643
pr_crit("CPU%u: failed to start\n", cpu);
arch/loongarch/kernel/smp.c
659
unsigned int cpu;
arch/loongarch/kernel/smp.c
662
cpu = raw_smp_processor_id();
arch/loongarch/kernel/smp.c
663
set_my_cpu_offset(per_cpu_offset(cpu));
arch/loongarch/kernel/smp.c
669
set_cpu_sibling_map(cpu);
arch/loongarch/kernel/smp.c
670
set_cpu_llc_shared_map(cpu);
arch/loongarch/kernel/smp.c
671
set_cpu_core_map(cpu);
arch/loongarch/kernel/smp.c
673
notify_cpu_starting(cpu);
arch/loongarch/kernel/smp.c
679
set_cpu_online(cpu, true);
arch/loongarch/kernel/smp.c
748
unsigned int cpu;
arch/loongarch/kernel/smp.c
750
for_each_online_cpu(cpu) {
arch/loongarch/kernel/smp.c
751
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
arch/loongarch/kernel/smp.c
752
cpu_context(cpu, mm) = 0;
arch/loongarch/kernel/smp.c
787
unsigned int cpu;
arch/loongarch/kernel/smp.c
789
for_each_online_cpu(cpu) {
arch/loongarch/kernel/smp.c
790
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
arch/loongarch/kernel/smp.c
791
cpu_context(cpu, mm) = 0;
arch/loongarch/kernel/smp.c
833
unsigned int cpu;
arch/loongarch/kernel/smp.c
835
for_each_online_cpu(cpu) {
arch/loongarch/kernel/smp.c
836
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
arch/loongarch/kernel/smp.c
837
cpu_context(cpu, vma->vm_mm) = 0;
arch/loongarch/kernel/smp.c
88
unsigned int cpu, i;
arch/loongarch/kernel/smp.c
92
for_each_online_cpu(cpu)
arch/loongarch/kernel/smp.c
93
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
arch/loongarch/kernel/smp.c
98
static inline void set_cpu_core_map(int cpu)
arch/loongarch/kernel/time.c
107
static int arch_timer_starting(unsigned int cpu)
arch/loongarch/kernel/time.c
114
static int arch_timer_dying(unsigned int cpu)
arch/loongarch/kernel/time.c
146
unsigned int cpu = smp_processor_id();
arch/loongarch/kernel/time.c
162
cd = &per_cpu(constant_clockevent_device, cpu);
arch/loongarch/kernel/time.c
169
cd->cpumask = cpumask_of(cpu);
arch/loongarch/kernel/time.c
37
int cpu = smp_processor_id();
arch/loongarch/kernel/time.c
42
cd = &per_cpu(constant_clockevent_device, cpu);
arch/loongarch/kernel/topology.c
14
bool arch_cpu_is_hotpluggable(int cpu)
arch/loongarch/kernel/topology.c
16
return !io_master(cpu);
arch/loongarch/kernel/traps.c
1101
register int cpu;
arch/loongarch/kernel/traps.c
1105
cpu = smp_processor_id();
arch/loongarch/kernel/traps.c
1107
if (on_irq_stack(cpu, sp))
arch/loongarch/kernel/traps.c
1110
stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
arch/loongarch/kernel/traps.c
1146
void per_cpu_trap_init(int cpu)
arch/loongarch/kernel/traps.c
1154
if (!cpu_data[cpu].asid_cache)
arch/loongarch/kernel/traps.c
1155
cpu_data[cpu].asid_cache = asid_first_version(cpu);
arch/loongarch/kernel/traps.c
1163
if (cpu == 0)
arch/loongarch/kernel/traps.c
1167
tlb_init(cpu);
arch/loongarch/kernel/unwind_orc.c
354
int cpu;
arch/loongarch/kernel/unwind_orc.c
357
for_each_possible_cpu(cpu) {
arch/loongarch/kernel/unwind_orc.c
358
if (!pcpu_handlers[cpu])
arch/loongarch/kernel/unwind_orc.c
361
if (ra >= pcpu_handlers[cpu] &&
arch/loongarch/kernel/unwind_orc.c
362
ra < pcpu_handlers[cpu] + vec_sz) {
arch/loongarch/kernel/unwind_orc.c
363
ra = ra + eentry - pcpu_handlers[cpu];
arch/loongarch/kernel/unwind_prologue.c
65
int cpu;
arch/loongarch/kernel/unwind_prologue.c
67
for_each_possible_cpu(cpu) {
arch/loongarch/kernel/unwind_prologue.c
68
if (!pcpu_handlers[cpu])
arch/loongarch/kernel/unwind_prologue.c
70
if (scan_handlers(pc - pcpu_handlers[cpu]))
arch/loongarch/kernel/vdso.c
46
unsigned long i, cpu, pfn;
arch/loongarch/kernel/vdso.c
50
for_each_possible_cpu(cpu)
arch/loongarch/kernel/vdso.c
51
vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
arch/loongarch/kvm/exit.c
840
unsigned int min, cpu;
arch/loongarch/kvm/exit.c
848
for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
arch/loongarch/kvm/exit.c
849
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
arch/loongarch/kvm/intc/eiointc.c
100
s->sw_coremap[irq + i] = cpu;
arch/loongarch/kvm/intc/eiointc.c
103
s->sw_coremap[irq + i] = cpu;
arch/loongarch/kvm/intc/eiointc.c
12
int ipnum, cpu, cpuid, irq;
arch/loongarch/kvm/intc/eiointc.c
208
u8 cpu;
arch/loongarch/kvm/intc/eiointc.c
267
cpu = vcpu->vcpu_id;
arch/loongarch/kvm/intc/eiointc.c
268
old = s->coreisr[cpu][index];
arch/loongarch/kvm/intc/eiointc.c
27
cpu = vcpu->vcpu_id;
arch/loongarch/kvm/intc/eiointc.c
270
s->coreisr[cpu][index] = old & ~data;
arch/loongarch/kvm/intc/eiointc.c
28
if (test_bit(irq, (unsigned long *)s->coreisr[cpu]))
arch/loongarch/kvm/intc/eiointc.c
29
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
arch/loongarch/kvm/intc/eiointc.c
31
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
arch/loongarch/kvm/intc/eiointc.c
37
int ipnum, cpu, found;
arch/loongarch/kvm/intc/eiointc.c
463
int addr, cpu, offset, ret = 0;
arch/loongarch/kvm/intc/eiointc.c
47
cpu = s->sw_coremap[irq];
arch/loongarch/kvm/intc/eiointc.c
470
cpu = addr >> 16;
arch/loongarch/kvm/intc/eiointc.c
48
vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
arch/loongarch/kvm/intc/eiointc.c
494
if (cpu >= s->num_cpu)
arch/loongarch/kvm/intc/eiointc.c
498
p = (void *)s->coreisr[cpu] + offset * 4;
arch/loongarch/kvm/intc/eiointc.c
50
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
arch/loongarch/kvm/intc/eiointc.c
58
__set_bit(irq, (unsigned long *)s->coreisr[cpu]);
arch/loongarch/kvm/intc/eiointc.c
59
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
arch/loongarch/kvm/intc/eiointc.c
60
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
arch/loongarch/kvm/intc/eiointc.c
62
__clear_bit(irq, (unsigned long *)s->coreisr[cpu]);
arch/loongarch/kvm/intc/eiointc.c
63
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
arch/loongarch/kvm/intc/eiointc.c
64
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
arch/loongarch/kvm/intc/eiointc.c
77
int i, cpu, cpuid;
arch/loongarch/kvm/intc/eiointc.c
93
cpu = vcpu->vcpu_id;
arch/loongarch/kvm/intc/eiointc.c
94
if (s->sw_coremap[irq + i] == cpu)
arch/loongarch/kvm/intc/ipi.c
106
int i, cpu, mailbox, offset;
arch/loongarch/kvm/intc/ipi.c
110
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
arch/loongarch/kvm/intc/ipi.c
111
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
arch/loongarch/kvm/intc/ipi.c
113
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
arch/loongarch/kvm/intc/ipi.c
170
int cpu, offset;
arch/loongarch/kvm/intc/ipi.c
173
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
arch/loongarch/kvm/intc/ipi.c
174
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
arch/loongarch/kvm/intc/ipi.c
176
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
arch/loongarch/kvm/intc/ipi.c
27
int cpu;
arch/loongarch/kvm/intc/ipi.c
299
int cpu, addr;
arch/loongarch/kvm/intc/ipi.c
30
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
arch/loongarch/kvm/intc/ipi.c
304
cpu = (attr->attr >> 16) & 0x3ff;
arch/loongarch/kvm/intc/ipi.c
307
vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
arch/loongarch/kvm/intc/ipi.c
309
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
arch/loongarch/kvm/intc/ipi.c
31
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
arch/loongarch/kvm/intc/ipi.c
33
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
arch/loongarch/kvm/main.c
205
static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)
arch/loongarch/kvm/main.c
210
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
arch/loongarch/kvm/main.c
229
int cpu;
arch/loongarch/kvm/main.c
234
cpu = smp_processor_id();
arch/loongarch/kvm/main.c
239
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
arch/loongarch/kvm/main.c
240
migrated = (vcpu->cpu != cpu);
arch/loongarch/kvm/main.c
252
kvm_update_vpid(vcpu, cpu);
arch/loongarch/kvm/main.c
254
vcpu->cpu = cpu;
arch/loongarch/kvm/main.c
351
int cpu, order, ret;
arch/loongarch/kvm/main.c
398
for_each_possible_cpu(cpu) {
arch/loongarch/kvm/main.c
399
context = per_cpu_ptr(vmcs, cpu);
arch/loongarch/kvm/main.c
470
MODULE_DEVICE_TABLE(cpu, kvm_feature);
arch/loongarch/kvm/vcpu.c
1595
int cpu;
arch/loongarch/kvm/vcpu.c
1607
for_each_possible_cpu(cpu) {
arch/loongarch/kvm/vcpu.c
1608
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
arch/loongarch/kvm/vcpu.c
1614
static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/loongarch/kvm/vcpu.c
1624
migrated = (vcpu->arch.last_sched_cpu != cpu);
arch/loongarch/kvm/vcpu.c
1630
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
arch/loongarch/kvm/vcpu.c
1715
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/loongarch/kvm/vcpu.c
1721
_kvm_vcpu_load(vcpu, cpu);
arch/loongarch/kvm/vcpu.c
1725
static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
arch/loongarch/kvm/vcpu.c
1844
int cpu, idx;
arch/loongarch/kvm/vcpu.c
1858
cpu = smp_processor_id();
arch/loongarch/kvm/vcpu.c
1859
vcpu->arch.last_sched_cpu = cpu;
arch/loongarch/kvm/vcpu.c
1862
_kvm_vcpu_put(vcpu, cpu);
arch/loongarch/mm/tlb.c
115
int cpu = smp_processor_id();
arch/loongarch/mm/tlb.c
117
if (asid_valid(vma->vm_mm, cpu)) {
arch/loongarch/mm/tlb.c
120
newpid = cpu_asid(cpu, vma->vm_mm);
arch/loongarch/mm/tlb.c
124
cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
arch/loongarch/mm/tlb.c
266
static void setup_tlb_handler(int cpu)
arch/loongarch/mm/tlb.c
279
if (cpu == 0) {
arch/loongarch/mm/tlb.c
291
rcutree_report_cpu_starting(cpu);
arch/loongarch/mm/tlb.c
296
if (pcpu_handlers[cpu])
arch/loongarch/mm/tlb.c
299
page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
arch/loongarch/mm/tlb.c
304
pcpu_handlers[cpu] = (unsigned long)addr;
arch/loongarch/mm/tlb.c
307
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
arch/loongarch/mm/tlb.c
308
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
arch/loongarch/mm/tlb.c
309
csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
arch/loongarch/mm/tlb.c
314
void tlb_init(int cpu)
arch/loongarch/mm/tlb.c
320
setup_tlb_handler(cpu);
arch/loongarch/mm/tlb.c
43
int cpu;
arch/loongarch/mm/tlb.c
47
cpu = smp_processor_id();
arch/loongarch/mm/tlb.c
49
if (asid_valid(mm, cpu))
arch/loongarch/mm/tlb.c
50
drop_mmu_context(mm, cpu);
arch/loongarch/mm/tlb.c
52
cpumask_clear_cpu(cpu, mm_cpumask(mm));
arch/loongarch/mm/tlb.c
61
int cpu = smp_processor_id();
arch/loongarch/mm/tlb.c
63
if (asid_valid(mm, cpu)) {
arch/loongarch/mm/tlb.c
73
int asid = cpu_asid(cpu, mm);
arch/loongarch/mm/tlb.c
80
drop_mmu_context(mm, cpu);
arch/loongarch/mm/tlb.c
84
cpumask_clear_cpu(cpu, mm_cpumask(mm));
arch/loongarch/vdso/vgetcpu.c
30
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, void *unused);
arch/loongarch/vdso/vgetcpu.c
31
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, void *unused)
arch/loongarch/vdso/vgetcpu.c
37
if (cpu)
arch/loongarch/vdso/vgetcpu.c
38
*cpu = cpu_id;
arch/m68k/include/asm/thread_info.h
31
__u32 cpu; /* should always be 0 on m68k */
arch/m68k/kernel/setup_mm.c
378
const char *cpu, *mmu, *fpu;
arch/m68k/kernel/setup_mm.c
388
cpu = "68020";
arch/m68k/kernel/setup_mm.c
391
cpu = "68030";
arch/m68k/kernel/setup_mm.c
394
cpu = "68040";
arch/m68k/kernel/setup_mm.c
397
cpu = "68060";
arch/m68k/kernel/setup_mm.c
400
cpu = "ColdFire";
arch/m68k/kernel/setup_mm.c
403
cpu = "680x0";
arch/m68k/kernel/setup_mm.c
451
cpu, mmu, fpu,
arch/m68k/kernel/setup_no.c
174
char *cpu, *mmu, *fpu;
arch/m68k/kernel/setup_no.c
177
cpu = CPU_NAME;
arch/m68k/kernel/setup_no.c
188
cpu, mmu, fpu,
arch/microblaze/include/asm/cpuinfo.h
100
of_property_read_u32(cpu, n, &val);
arch/microblaze/include/asm/cpuinfo.h
93
void set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu);
arch/microblaze/include/asm/cpuinfo.h
94
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu);
arch/microblaze/include/asm/cpuinfo.h
96
static inline unsigned int fcpu(struct device_node *cpu, char *n)
arch/microblaze/include/asm/thread_info.h
63
__u32 cpu; /* current CPU */
arch/microblaze/include/asm/thread_info.h
76
.cpu = 0, \
arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
28
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
arch/microblaze/kernel/cpu/cpuinfo-static.c
101
ci->use_dopb = fcpu(cpu, "xlnx,d-opb");
arch/microblaze/kernel/cpu/cpuinfo-static.c
102
ci->use_iopb = fcpu(cpu, "xlnx,i-opb");
arch/microblaze/kernel/cpu/cpuinfo-static.c
103
ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb");
arch/microblaze/kernel/cpu/cpuinfo-static.c
104
ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb");
arch/microblaze/kernel/cpu/cpuinfo-static.c
106
ci->num_fsl = fcpu(cpu, "xlnx,fsl-links");
arch/microblaze/kernel/cpu/cpuinfo-static.c
107
ci->irq_edge = fcpu(cpu, "xlnx,interrupt-is-edge");
arch/microblaze/kernel/cpu/cpuinfo-static.c
108
ci->irq_positive = fcpu(cpu, "xlnx,edge-is-positive");
arch/microblaze/kernel/cpu/cpuinfo-static.c
111
ci->hw_debug = fcpu(cpu, "xlnx,debug-enabled");
arch/microblaze/kernel/cpu/cpuinfo-static.c
112
ci->num_pc_brk = fcpu(cpu, "xlnx,number-of-pc-brk");
arch/microblaze/kernel/cpu/cpuinfo-static.c
113
ci->num_rd_brk = fcpu(cpu, "xlnx,number-of-rd-addr-brk");
arch/microblaze/kernel/cpu/cpuinfo-static.c
114
ci->num_wr_brk = fcpu(cpu, "xlnx,number-of-wr-addr-brk");
arch/microblaze/kernel/cpu/cpuinfo-static.c
116
ci->pvr_user1 = fcpu(cpu, "xlnx,pvr-user1");
arch/microblaze/kernel/cpu/cpuinfo-static.c
117
ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2");
arch/microblaze/kernel/cpu/cpuinfo-static.c
119
ci->mmu = fcpu(cpu, "xlnx,use-mmu");
arch/microblaze/kernel/cpu/cpuinfo-static.c
120
ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr");
arch/microblaze/kernel/cpu/cpuinfo-static.c
121
ci->endian = fcpu(cpu, "xlnx,endianness");
arch/microblaze/kernel/cpu/cpuinfo-static.c
23
void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
arch/microblaze/kernel/cpu/cpuinfo-static.c
28
(fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
arch/microblaze/kernel/cpu/cpuinfo-static.c
29
(fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) |
arch/microblaze/kernel/cpu/cpuinfo-static.c
30
(fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) |
arch/microblaze/kernel/cpu/cpuinfo-static.c
31
(fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0);
arch/microblaze/kernel/cpu/cpuinfo-static.c
43
ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul");
arch/microblaze/kernel/cpu/cpuinfo-static.c
51
ci->use_fpu = fcpu(cpu, "xlnx,use-fpu");
arch/microblaze/kernel/cpu/cpuinfo-static.c
59
(fcpu(cpu, "xlnx,unaligned-exceptions") ?
arch/microblaze/kernel/cpu/cpuinfo-static.c
61
(fcpu(cpu, "xlnx,ill-opcode-exception") ?
arch/microblaze/kernel/cpu/cpuinfo-static.c
63
(fcpu(cpu, "xlnx,iopb-bus-exception") ?
arch/microblaze/kernel/cpu/cpuinfo-static.c
65
(fcpu(cpu, "xlnx,dopb-bus-exception") ?
arch/microblaze/kernel/cpu/cpuinfo-static.c
67
(fcpu(cpu, "xlnx,div-zero-exception") ?
arch/microblaze/kernel/cpu/cpuinfo-static.c
69
(fcpu(cpu, "xlnx,fpu-exception") ? PVR2_FPU_EXC_MASK : 0) |
arch/microblaze/kernel/cpu/cpuinfo-static.c
70
(fcpu(cpu, "xlnx,fsl-exception") ? PVR2_USE_EXTEND_FSL : 0);
arch/microblaze/kernel/cpu/cpuinfo-static.c
72
ci->use_icache = fcpu(cpu, "xlnx,use-icache");
arch/microblaze/kernel/cpu/cpuinfo-static.c
73
ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits");
arch/microblaze/kernel/cpu/cpuinfo-static.c
74
ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr");
arch/microblaze/kernel/cpu/cpuinfo-static.c
75
ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2;
arch/microblaze/kernel/cpu/cpuinfo-static.c
77
if (fcpu(cpu, "xlnx,icache-use-fsl"))
arch/microblaze/kernel/cpu/cpuinfo-static.c
82
ci->icache_size = fcpu(cpu, "i-cache-size");
arch/microblaze/kernel/cpu/cpuinfo-static.c
83
ci->icache_base = fcpu(cpu, "i-cache-baseaddr");
arch/microblaze/kernel/cpu/cpuinfo-static.c
84
ci->icache_high = fcpu(cpu, "i-cache-highaddr");
arch/microblaze/kernel/cpu/cpuinfo-static.c
86
ci->use_dcache = fcpu(cpu, "xlnx,use-dcache");
arch/microblaze/kernel/cpu/cpuinfo-static.c
87
ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag");
arch/microblaze/kernel/cpu/cpuinfo-static.c
88
ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr");
arch/microblaze/kernel/cpu/cpuinfo-static.c
89
ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2;
arch/microblaze/kernel/cpu/cpuinfo-static.c
91
if (fcpu(cpu, "xlnx,dcache-use-fsl"))
arch/microblaze/kernel/cpu/cpuinfo-static.c
96
ci->dcache_size = fcpu(cpu, "d-cache-size");
arch/microblaze/kernel/cpu/cpuinfo-static.c
97
ci->dcache_base = fcpu(cpu, "d-cache-baseaddr");
arch/microblaze/kernel/cpu/cpuinfo-static.c
98
ci->dcache_high = fcpu(cpu, "d-cache-highaddr");
arch/microblaze/kernel/cpu/cpuinfo-static.c
99
ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback");
arch/microblaze/kernel/cpu/cpuinfo.c
103
set_cpuinfo_static(&cpuinfo, cpu);
arch/microblaze/kernel/cpu/cpuinfo.c
110
set_cpuinfo_static(&cpuinfo, cpu);
arch/microblaze/kernel/cpu/cpuinfo.c
111
set_cpuinfo_pvr_full(&cpuinfo, cpu);
arch/microblaze/kernel/cpu/cpuinfo.c
115
set_cpuinfo_static(&cpuinfo, cpu);
arch/microblaze/kernel/cpu/cpuinfo.c
122
of_node_put(cpu);
arch/microblaze/kernel/cpu/cpuinfo.c
129
clk = of_clk_get(cpu, 0);
arch/microblaze/kernel/cpu/cpuinfo.c
133
cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency");
arch/microblaze/kernel/cpu/cpuinfo.c
89
static struct device_node *cpu;
arch/microblaze/kernel/cpu/cpuinfo.c
93
cpu = of_get_cpu_node(0, NULL);
arch/microblaze/kernel/cpu/cpuinfo.c
94
if (!cpu)
arch/mips/bcm63xx/cpu.c
297
unsigned int cpu = smp_processor_id();
arch/mips/bcm63xx/cpu.c
306
__cpu_name[cpu] = "Broadcom BCM6338";
arch/mips/bcm63xx/irq.c
119
int cpu; \
arch/mips/bcm63xx/irq.c
122
for_each_present_cpu(cpu) { \
arch/mips/bcm63xx/irq.c
123
if (!irq_mask_addr[cpu]) \
arch/mips/bcm63xx/irq.c
126
val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
arch/mips/bcm63xx/irq.c
128
bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
arch/mips/bcm63xx/irq.c
141
int cpu; \
arch/mips/bcm63xx/irq.c
144
for_each_present_cpu(cpu) { \
arch/mips/bcm63xx/irq.c
145
if (!irq_mask_addr[cpu]) \
arch/mips/bcm63xx/irq.c
148
val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
arch/mips/bcm63xx/irq.c
149
if (enable_irq_for_cpu(cpu, d, m)) \
arch/mips/bcm63xx/irq.c
153
bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
arch/mips/bcm63xx/irq.c
28
static void (*dispatch_internal)(int cpu);
arch/mips/bcm63xx/irq.c
53
static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
arch/mips/bcm63xx/irq.c
56
bool enable = cpu_online(cpu);
arch/mips/bcm63xx/irq.c
60
enable &= cpumask_test_cpu(cpu, m);
arch/mips/bcm63xx/irq.c
62
enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
arch/mips/bcm63xx/irq.c
75
static void __dispatch_internal_##width(int cpu) \
arch/mips/bcm63xx/irq.c
81
unsigned int *next = &i[cpu]; \
arch/mips/bcm63xx/irq.c
89
val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
arch/mips/bcm63xx/irq.c
90
val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
arch/mips/cavium-octeon/octeon-irq.c
1081
int cpu = octeon_cpu_for_coreid(coreid);
arch/mips/cavium-octeon/octeon-irq.c
1082
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
1085
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
1103
int cpu = octeon_cpu_for_coreid(coreid);
arch/mips/cavium-octeon/octeon-irq.c
1105
set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
143
static int octeon_coreid_for_cpu(int cpu)
arch/mips/cavium-octeon/octeon-irq.c
146
return cpu_logical_map(cpu);
arch/mips/cavium-octeon/octeon-irq.c
1679
int cpu = next_cpu_for_irq(data);
arch/mips/cavium-octeon/octeon-irq.c
1680
int coreid = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
1740
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
1747
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
1749
octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
arch/mips/cavium-octeon/octeon-irq.c
1756
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
1761
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
1763
octeon_coreid_for_cpu(cpu));
arch/mips/cavium-octeon/octeon-irq.c
1770
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
1775
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
1777
octeon_coreid_for_cpu(cpu));
arch/mips/cavium-octeon/octeon-irq.c
1808
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
1819
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
1821
if (cpumask_test_cpu(cpu, dest) && enable_one) {
arch/mips/cavium-octeon/octeon-irq.c
1824
octeon_coreid_for_cpu(cpu)) +
arch/mips/cavium-octeon/octeon-irq.c
1828
octeon_coreid_for_cpu(cpu)) +
arch/mips/cavium-octeon/octeon-irq.c
2407
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
2414
cpu = next_cpu_for_irq(data);
arch/mips/cavium-octeon/octeon-irq.c
2425
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2519
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
2530
cpu = cpumask_first(dest);
arch/mips/cavium-octeon/octeon-irq.c
2531
if (cpu >= nr_cpu_ids)
arch/mips/cavium-octeon/octeon-irq.c
2532
cpu = smp_processor_id();
arch/mips/cavium-octeon/octeon-irq.c
2533
cd->current_cpu = cpu;
arch/mips/cavium-octeon/octeon-irq.c
2542
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2655
static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
arch/mips/cavium-octeon/octeon-irq.c
2657
int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
arch/mips/cavium-octeon/octeon-irq.c
2695
void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
arch/mips/cavium-octeon/octeon-irq.c
2705
intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
arch/mips/cavium-octeon/octeon-irq.c
2706
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2716
static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
arch/mips/cavium-octeon/octeon-irq.c
2724
intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
arch/mips/cavium-octeon/octeon-irq.c
2725
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2735
unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2747
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
275
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
2752
for_each_online_cpu(cpu)
arch/mips/cavium-octeon/octeon-irq.c
2753
octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
arch/mips/cavium-octeon/octeon-irq.c
2758
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
2763
for_each_online_cpu(cpu)
arch/mips/cavium-octeon/octeon-irq.c
2764
octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
arch/mips/cavium-octeon/octeon-irq.c
281
cpu = cd->current_cpu;
arch/mips/cavium-octeon/octeon-irq.c
283
cpu = cpumask_next(cpu, mask);
arch/mips/cavium-octeon/octeon-irq.c
284
if (cpu >= nr_cpu_ids) {
arch/mips/cavium-octeon/octeon-irq.c
285
cpu = -1;
arch/mips/cavium-octeon/octeon-irq.c
287
} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
arch/mips/cavium-octeon/octeon-irq.c
292
cpu = cpumask_first(mask);
arch/mips/cavium-octeon/octeon-irq.c
294
cpu = smp_processor_id();
arch/mips/cavium-octeon/octeon-irq.c
296
cd->current_cpu = cpu;
arch/mips/cavium-octeon/octeon-irq.c
297
return cpu;
arch/mips/cavium-octeon/octeon-irq.c
305
int cpu = next_cpu_for_irq(data);
arch/mips/cavium-octeon/octeon-irq.c
306
int coreid = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
310
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
316
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
325
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
405
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
411
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
412
int coreid = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
413
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
415
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
417
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
438
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
444
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
445
int coreid = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
446
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
448
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
450
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
474
int cpu = next_cpu_for_irq(data);
arch/mips/cavium-octeon/octeon-irq.c
485
int index = octeon_coreid_for_cpu(cpu) * 2;
arch/mips/cavium-octeon/octeon-irq.c
486
set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
489
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
arch/mips/cavium-octeon/octeon-irq.c
490
set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
501
int cpu = next_cpu_for_irq(data);
arch/mips/cavium-octeon/octeon-irq.c
502
int index = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
517
int cpu = next_cpu_for_irq(data);
arch/mips/cavium-octeon/octeon-irq.c
518
int index = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
530
int cpu = next_cpu_for_irq(data);
arch/mips/cavium-octeon/octeon-irq.c
531
int index = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
542
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
549
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
550
int coreid = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
623
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
631
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
632
int index = octeon_coreid_for_cpu(cpu) * 2;
arch/mips/cavium-octeon/octeon-irq.c
634
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
638
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
639
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
arch/mips/cavium-octeon/octeon-irq.c
641
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
653
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
661
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
662
int index = octeon_coreid_for_cpu(cpu) * 2;
arch/mips/cavium-octeon/octeon-irq.c
664
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
668
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
669
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
arch/mips/cavium-octeon/octeon-irq.c
671
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
769
int cpu = smp_processor_id();
arch/mips/cavium-octeon/octeon-irq.c
773
if (!cpumask_test_cpu(cpu, mask))
arch/mips/cavium-octeon/octeon-irq.c
782
cpumask_clear_cpu(cpu, &new_affinity);
arch/mips/cavium-octeon/octeon-irq.c
794
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
815
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
816
int coreid = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
818
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
822
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
824
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
826
if (cpumask_test_cpu(cpu, dest) && enable_one) {
arch/mips/cavium-octeon/octeon-irq.c
856
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
868
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
869
unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
870
int index = octeon_coreid_for_cpu(cpu) * 2;
arch/mips/cavium-octeon/octeon-irq.c
871
if (cpumask_test_cpu(cpu, dest) && enable_one) {
arch/mips/cavium-octeon/octeon-irq.c
881
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
882
unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
883
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
arch/mips/cavium-octeon/octeon-irq.c
884
if (cpumask_test_cpu(cpu, dest) && enable_one) {
arch/mips/cavium-octeon/octeon-irq.c
901
int cpu;
arch/mips/cavium-octeon/octeon-irq.c
912
for_each_online_cpu(cpu) {
arch/mips/cavium-octeon/octeon-irq.c
913
int index = octeon_coreid_for_cpu(cpu);
arch/mips/cavium-octeon/octeon-irq.c
915
if (cpumask_test_cpu(cpu, dest) && enable_one) {
arch/mips/cavium-octeon/setup.c
199
int cpu;
arch/mips/cavium-octeon/setup.c
210
for_each_online_cpu(cpu)
arch/mips/cavium-octeon/setup.c
211
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
arch/mips/cavium-octeon/setup.c
277
int cpu;
arch/mips/cavium-octeon/setup.c
280
for_each_online_cpu(cpu)
arch/mips/cavium-octeon/setup.c
281
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
arch/mips/cavium-octeon/setup.c
434
int cpu;
arch/mips/cavium-octeon/setup.c
435
for_each_online_cpu(cpu)
arch/mips/cavium-octeon/setup.c
436
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
arch/mips/cavium-octeon/setup.c
91
int cpu = smp_processor_id();
arch/mips/cavium-octeon/setup.c
94
set_cpu_online(cpu, false);
arch/mips/cavium-octeon/smp.c
100
void octeon_send_ipi_single(int cpu, unsigned int action)
arch/mips/cavium-octeon/smp.c
102
int coreid = cpu_logical_map(cpu);
arch/mips/cavium-octeon/smp.c
209
static int octeon_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/cavium-octeon/smp.c
213
pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
arch/mips/cavium-octeon/smp.c
214
cpu_logical_map(cpu));
arch/mips/cavium-octeon/smp.c
218
octeon_processor_boot = cpu_logical_map(cpu);
arch/mips/cavium-octeon/smp.c
290
unsigned int cpu = smp_processor_id();
arch/mips/cavium-octeon/smp.c
295
set_cpu_online(cpu, false);
arch/mips/cavium-octeon/smp.c
305
static void octeon_cpu_die(unsigned int cpu)
arch/mips/cavium-octeon/smp.c
307
int coreid = cpu_logical_map(cpu);
arch/mips/cavium-octeon/smp.c
311
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
arch/mips/cavium-octeon/smp.c
345
int cpu = cpu_number_map(cvmx_get_core_num());
arch/mips/cavium-octeon/smp.c
350
per_cpu(cpu_state, cpu) = CPU_DEAD;
arch/mips/cavium-octeon/smp.c
363
static int octeon_update_boot_vector(unsigned int cpu)
arch/mips/cavium-octeon/smp.c
366
int coreid = cpu_logical_map(cpu);
arch/mips/cavium-octeon/smp.c
473
static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
arch/mips/cavium-octeon/smp.c
479
octeon_ciu3_mbox_send(cpu, i);
arch/mips/cavium-octeon/smp.c
487
unsigned int cpu;
arch/mips/cavium-octeon/smp.c
489
for_each_cpu(cpu, mask)
arch/mips/cavium-octeon/smp.c
490
octeon_78xx_send_ipi_single(cpu, action);
arch/mips/fw/cfe/cfe_api.c
103
xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
arch/mips/fw/cfe/cfe_api.c
73
int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1)
arch/mips/fw/cfe/cfe_api.c
82
xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
arch/mips/fw/cfe/cfe_api.c
94
int cfe_cpu_stop(int cpu)
arch/mips/include/asm/amon.h
10
int amon_cpu_avail(int cpu);
arch/mips/include/asm/amon.h
11
int amon_cpu_start(int cpu, unsigned long pc, unsigned long sp,
arch/mips/include/asm/cdmm.h
24
unsigned int cpu;
arch/mips/include/asm/fw/cfe/cfe_api.h
82
int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1);
arch/mips/include/asm/fw/cfe/cfe_api.h
83
int cfe_cpu_stop(int cpu);
arch/mips/include/asm/irq.h
38
static inline bool on_irq_stack(int cpu, unsigned long sp)
arch/mips/include/asm/irq.h
40
unsigned long low = (unsigned long)irq_stack[cpu];
arch/mips/include/asm/kvm_host.h
755
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
arch/mips/include/asm/kvm_host.h
756
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
arch/mips/include/asm/mach-ip27/topology.h
16
#define cpu_to_node(cpu) (cputonasid(cpu))
arch/mips/include/asm/mach-loongson64/topology.h
7
#define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2)
arch/mips/include/asm/mips-cm.h
455
static inline unsigned int mips_cm_vp_id(unsigned int cpu)
arch/mips/include/asm/mips-cm.h
457
unsigned int core = cpu_core(&cpu_data[cpu]);
arch/mips/include/asm/mips-cm.h
458
unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
arch/mips/include/asm/mips-cm.h
511
static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
arch/mips/include/asm/mips-cm.h
513
struct cpuinfo_mips *d = &cpu_data[cpu];
arch/mips/include/asm/mmu_context.h
101
static inline u64 asid_first_version(unsigned int cpu)
arch/mips/include/asm/mmu_context.h
103
return ~asid_version_mask(cpu) + 1;
arch/mips/include/asm/mmu_context.h
106
static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
arch/mips/include/asm/mmu_context.h
111
return mm->context.asid[cpu];
arch/mips/include/asm/mmu_context.h
114
static inline void set_cpu_context(unsigned int cpu,
arch/mips/include/asm/mmu_context.h
120
mm->context.asid[cpu] = ctx;
arch/mips/include/asm/mmu_context.h
123
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
arch/mips/include/asm/mmu_context.h
124
#define cpu_asid(cpu, mm) \
arch/mips/include/asm/mmu_context.h
125
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
arch/mips/include/asm/mmu_context.h
158
unsigned int cpu = smp_processor_id();
arch/mips/include/asm/mmu_context.h
169
cpumask_clear_cpu(cpu, mm_cpumask(prev));
arch/mips/include/asm/mmu_context.h
170
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/mips/include/asm/mmu_context.h
190
unsigned int cpu;
arch/mips/include/asm/mmu_context.h
196
cpu = smp_processor_id();
arch/mips/include/asm/mmu_context.h
197
ctx = cpu_context(cpu, mm);
arch/mips/include/asm/mmu_context.h
211
write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
arch/mips/include/asm/mmu_context.h
218
} else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
arch/mips/include/asm/mmu_context.h
225
write_c0_entryhi(cpu_asid(cpu, mm));
arch/mips/include/asm/mmu_context.h
229
set_cpu_context(cpu, mm, 0);
arch/mips/include/asm/mmu_context.h
94
static inline u64 asid_version_mask(unsigned int cpu)
arch/mips/include/asm/mmu_context.h
96
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
arch/mips/include/asm/octeon/octeon.h
309
void octeon_ciu3_mbox_send(int cpu, unsigned int mbox);
arch/mips/include/asm/r4k-timer.h
15
extern void synchronise_count_slave(int cpu);
arch/mips/include/asm/r4k-timer.h
19
static inline void synchronise_count_slave(int cpu)
arch/mips/include/asm/sibyte/bcm1480_regs.h
361
#define A_BCM1480_IMR_MAPPER(cpu) (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
arch/mips/include/asm/sibyte/bcm1480_regs.h
362
#define A_BCM1480_IMR_REGISTER(cpu, reg) (A_BCM1480_IMR_MAPPER(cpu)+(reg))
arch/mips/include/asm/sibyte/bcm1480_regs.h
401
#define A_BCM1480_IMR_ALIAS_MAILBOX(cpu) (A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE + \
arch/mips/include/asm/sibyte/bcm1480_regs.h
402
(cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
arch/mips/include/asm/sibyte/bcm1480_regs.h
403
#define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
arch/mips/include/asm/sibyte/bcm1480_regs.h
417
#define A_BCM1480_MAILBOX_REGISTER(num, reg, cpu) \
arch/mips/include/asm/sibyte/bcm1480_regs.h
420
(cpu * BCM1480_IMR_REGISTER_SPACING) + \
arch/mips/include/asm/sibyte/sb1250.h
35
extern void sb1250_mask_irq(int cpu, int irq);
arch/mips/include/asm/sibyte/sb1250.h
36
extern void sb1250_unmask_irq(int cpu, int irq);
arch/mips/include/asm/sibyte/sb1250.h
39
extern void bcm1480_mask_irq(int cpu, int irq);
arch/mips/include/asm/sibyte/sb1250.h
40
extern void bcm1480_unmask_irq(int cpu, int irq);
arch/mips/include/asm/sibyte/sb1250_regs.h
703
#define A_IMR_MAPPER(cpu) (A_IMR_CPU0_BASE+(cpu)*IMR_REGISTER_SPACING)
arch/mips/include/asm/sibyte/sb1250_regs.h
704
#define A_IMR_REGISTER(cpu, reg) (A_IMR_MAPPER(cpu)+(reg))
arch/mips/include/asm/sibyte/sb1250_regs.h
729
#define A_MAILBOX_REGISTER(reg,cpu) \
arch/mips/include/asm/sibyte/sb1250_regs.h
730
(A_IMR_CPU0_BASE + (cpu * IMR_REGISTER_SPACING) + reg)
arch/mips/include/asm/smp-ops.h
23
void (*send_ipi_single)(int cpu, unsigned int action);
arch/mips/include/asm/smp-ops.h
27
int (*boot_secondary)(int cpu, struct task_struct *idle);
arch/mips/include/asm/smp-ops.h
33
void (*cpu_die)(unsigned int cpu);
arch/mips/include/asm/smp-ops.h
34
void (*cleanup_dead_cpu)(unsigned cpu);
arch/mips/include/asm/smp-ops.h
50
extern void mips_smp_send_ipi_single(int cpu, unsigned int action);
arch/mips/include/asm/smp.h
125
static inline void arch_send_call_function_single_ipi(int cpu)
arch/mips/include/asm/smp.h
129
mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION);
arch/mips/include/asm/smp.h
33
return current_thread_info()->cpu;
arch/mips/include/asm/smp.h
41
#define cpu_number_map(cpu) __cpu_number_map[cpu]
arch/mips/include/asm/smp.h
45
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
arch/mips/include/asm/smp.h
70
static inline void arch_smp_send_reschedule(int cpu)
arch/mips/include/asm/smp.h
74
mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
arch/mips/include/asm/smp.h
85
static inline void __cpu_die(unsigned int cpu)
arch/mips/include/asm/smp.h
89
mp_ops->cpu_die(cpu);
arch/mips/include/asm/sn/arch.h
20
#define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid)
arch/mips/include/asm/sn/arch.h
21
#define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice)
arch/mips/include/asm/sn/launch.h
85
#define LAUNCH_SLAVE (*(void (*)(int nasid, int cpu, \
arch/mips/include/asm/sn/launch.h
92
#define LAUNCH_WAIT (*(void (*)(int nasid, int cpu, int timeout_msec)) \
arch/mips/include/asm/sn/launch.h
95
#define LAUNCH_POLL (*(launch_state_t (*)(int nasid, int cpu)) \
arch/mips/include/asm/sn/sn0/hubio.h
130
#define IIO_IGFX_INIT(widget, node, cpu, valid) (\
arch/mips/include/asm/sn/sn0/hubio.h
133
(((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) | \
arch/mips/include/asm/thread_info.h
29
__u32 cpu; /* current CPU */
arch/mips/include/asm/thread_info.h
42
.cpu = 0, \
arch/mips/include/asm/topology.h
15
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
arch/mips/include/asm/topology.h
16
#define topology_core_id(cpu) (cpu_core(&cpu_data[cpu]))
arch/mips/include/asm/topology.h
17
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
arch/mips/include/asm/topology.h
18
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
arch/mips/include/uapi/asm/kvm.h
221
__u32 cpu;
arch/mips/jazz/irq.c
131
unsigned int cpu = smp_processor_id();
arch/mips/jazz/irq.c
135
cd->cpumask = cpumask_of(cpu);
arch/mips/kernel/asm-offsets.c
109
OFFSET(TI_CPU, thread_info, cpu);
arch/mips/kernel/cacheinfo.c
105
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
arch/mips/kernel/cacheinfo.c
20
int init_cache_level(unsigned int cpu)
arch/mips/kernel/cacheinfo.c
23
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/mips/kernel/cacheinfo.c
58
static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
arch/mips/kernel/cacheinfo.c
63
if (cpus_are_siblings(cpu, cpu1))
arch/mips/kernel/cacheinfo.c
67
static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
arch/mips/kernel/cacheinfo.c
70
int cluster = cpu_cluster(&cpu_data[cpu]);
arch/mips/kernel/cacheinfo.c
77
int populate_cache_leaves(unsigned int cpu)
arch/mips/kernel/cacheinfo.c
80
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/mips/kernel/cacheinfo.c
86
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
arch/mips/kernel/cacheinfo.c
88
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
arch/mips/kernel/cacheinfo.c
98
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
arch/mips/kernel/cevt-bcm1480.c
100
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
arch/mips/kernel/cevt-bcm1480.c
101
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
arch/mips/kernel/cevt-bcm1480.c
104
BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
arch/mips/kernel/cevt-bcm1480.c
106
sprintf(name, "bcm1480-counter-%u", cpu);
arch/mips/kernel/cevt-bcm1480.c
117
cd->cpumask = cpumask_of(cpu);
arch/mips/kernel/cevt-bcm1480.c
124
bcm1480_mask_irq(cpu, irq);
arch/mips/kernel/cevt-bcm1480.c
130
IOADDR(A_BCM1480_IMR_REGISTER(cpu,
arch/mips/kernel/cevt-bcm1480.c
133
bcm1480_unmask_irq(cpu, irq);
arch/mips/kernel/cevt-bcm1480.c
135
irq_set_affinity(irq, cpumask_of(cpu));
arch/mips/kernel/cevt-bcm1480.c
33
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-bcm1480.c
36
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
arch/mips/kernel/cevt-bcm1480.c
37
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
arch/mips/kernel/cevt-bcm1480.c
47
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-bcm1480.c
50
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
arch/mips/kernel/cevt-bcm1480.c
59
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-bcm1480.c
62
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
arch/mips/kernel/cevt-bcm1480.c
63
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
arch/mips/kernel/cevt-bcm1480.c
74
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-bcm1480.c
85
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
arch/mips/kernel/cevt-bcm1480.c
98
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-bcm1480.c
99
unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
arch/mips/kernel/cevt-r4k.c
134
int cpu = smp_processor_id();
arch/mips/kernel/cevt-r4k.c
153
cd = &per_cpu(mips_clockevent_device, cpu);
arch/mips/kernel/cevt-r4k.c
249
int cpu;
arch/mips/kernel/cevt-r4k.c
258
for_each_cpu(cpu, freq->policy->cpus) {
arch/mips/kernel/cevt-r4k.c
259
cd = &per_cpu(mips_clockevent_device, cpu);
arch/mips/kernel/cevt-r4k.c
285
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-r4k.c
295
cd = &per_cpu(mips_clockevent_device, cpu);
arch/mips/kernel/cevt-r4k.c
305
cd->cpumask = cpumask_of(cpu);
arch/mips/kernel/cevt-sb1250.c
100
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
arch/mips/kernel/cevt-sb1250.c
104
BUG_ON(cpu > 2);
arch/mips/kernel/cevt-sb1250.c
106
sprintf(name, "sb1250-counter-%d", cpu);
arch/mips/kernel/cevt-sb1250.c
117
cd->cpumask = cpumask_of(cpu);
arch/mips/kernel/cevt-sb1250.c
124
sb1250_mask_irq(cpu, irq);
arch/mips/kernel/cevt-sb1250.c
130
IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
arch/mips/kernel/cevt-sb1250.c
133
sb1250_unmask_irq(cpu, irq);
arch/mips/kernel/cevt-sb1250.c
135
irq_set_affinity(irq, cpumask_of(cpu));
arch/mips/kernel/cevt-sb1250.c
43
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-sb1250.c
46
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
arch/mips/kernel/cevt-sb1250.c
47
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
arch/mips/kernel/cevt-sb1250.c
58
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-sb1250.c
61
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
arch/mips/kernel/cevt-sb1250.c
62
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
arch/mips/kernel/cevt-sb1250.c
73
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-sb1250.c
84
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
arch/mips/kernel/cevt-sb1250.c
97
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cevt-sb1250.c
98
unsigned int irq = K_INT_TIMER_0 + cpu;
arch/mips/kernel/cevt-sb1250.c
99
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
arch/mips/kernel/cpu-probe.c
1041
static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1046
__cpu_name[cpu] = "R2000";
arch/mips/kernel/cpu-probe.c
1058
__cpu_name[cpu] = "R3081";
arch/mips/kernel/cpu-probe.c
1061
__cpu_name[cpu] = "R3000A";
arch/mips/kernel/cpu-probe.c
1065
__cpu_name[cpu] = "R3000";
arch/mips/kernel/cpu-probe.c
1079
__cpu_name[cpu] = "R4400PC";
arch/mips/kernel/cpu-probe.c
1082
__cpu_name[cpu] = "R4000PC";
arch/mips/kernel/cpu-probe.c
1108
__cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
arch/mips/kernel/cpu-probe.c
1111
__cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
arch/mips/kernel/cpu-probe.c
1124
__cpu_name[cpu] = "R4300";
arch/mips/kernel/cpu-probe.c
1133
__cpu_name[cpu] = "R4600";
arch/mips/kernel/cpu-probe.c
1149
__cpu_name[cpu] = "R4650";
arch/mips/kernel/cpu-probe.c
1158
__cpu_name[cpu] = "R4700";
arch/mips/kernel/cpu-probe.c
1167
__cpu_name[cpu] = "R49XX";
arch/mips/kernel/cpu-probe.c
1177
__cpu_name[cpu] = "R5000";
arch/mips/kernel/cpu-probe.c
1185
__cpu_name[cpu] = "R5500";
arch/mips/kernel/cpu-probe.c
1193
__cpu_name[cpu] = "Nevada";
arch/mips/kernel/cpu-probe.c
1201
__cpu_name[cpu] = "RM7000";
arch/mips/kernel/cpu-probe.c
1217
__cpu_name[cpu] = "R10000";
arch/mips/kernel/cpu-probe.c
1227
__cpu_name[cpu] = "R12000";
arch/mips/kernel/cpu-probe.c
1239
__cpu_name[cpu] = "R16000";
arch/mips/kernel/cpu-probe.c
1242
__cpu_name[cpu] = "R14000";
arch/mips/kernel/cpu-probe.c
1256
__cpu_name[cpu] = "ICT Loongson-2";
arch/mips/kernel/cpu-probe.c
1257
set_elf_platform(cpu, "loongson2e");
arch/mips/kernel/cpu-probe.c
1263
__cpu_name[cpu] = "ICT Loongson-2";
arch/mips/kernel/cpu-probe.c
1264
set_elf_platform(cpu, "loongson2f");
arch/mips/kernel/cpu-probe.c
1270
__cpu_name[cpu] = "ICT Loongson-3";
arch/mips/kernel/cpu-probe.c
1271
set_elf_platform(cpu, "loongson3a");
arch/mips/kernel/cpu-probe.c
1279
__cpu_name[cpu] = "ICT Loongson-3";
arch/mips/kernel/cpu-probe.c
1280
set_elf_platform(cpu, "loongson3b");
arch/mips/kernel/cpu-probe.c
1301
__cpu_name[cpu] = "ICT Loongson-1";
arch/mips/kernel/cpu-probe.c
1309
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1316
__cpu_name[cpu] = "MIPS GENERIC QEMU";
arch/mips/kernel/cpu-probe.c
1321
__cpu_name[cpu] = "MIPS 4Kc";
arch/mips/kernel/cpu-probe.c
1327
__cpu_name[cpu] = "MIPS 4KEc";
arch/mips/kernel/cpu-probe.c
1333
__cpu_name[cpu] = "MIPS 4KSc";
arch/mips/kernel/cpu-probe.c
1338
__cpu_name[cpu] = "MIPS 5Kc";
arch/mips/kernel/cpu-probe.c
1343
__cpu_name[cpu] = "MIPS 5KE";
arch/mips/kernel/cpu-probe.c
1348
__cpu_name[cpu] = "MIPS 20Kc";
arch/mips/kernel/cpu-probe.c
1353
__cpu_name[cpu] = "MIPS 24Kc";
arch/mips/kernel/cpu-probe.c
1358
__cpu_name[cpu] = "MIPS 24KEc";
arch/mips/kernel/cpu-probe.c
1363
__cpu_name[cpu] = "MIPS 25Kc";
arch/mips/kernel/cpu-probe.c
1368
__cpu_name[cpu] = "MIPS 34Kc";
arch/mips/kernel/cpu-probe.c
1374
__cpu_name[cpu] = "MIPS 74Kc";
arch/mips/kernel/cpu-probe.c
1379
__cpu_name[cpu] = "MIPS M14Kc";
arch/mips/kernel/cpu-probe.c
1384
__cpu_name[cpu] = "MIPS M14KEc";
arch/mips/kernel/cpu-probe.c
1389
__cpu_name[cpu] = "MIPS 1004Kc";
arch/mips/kernel/cpu-probe.c
1395
__cpu_name[cpu] = "MIPS 1074Kc";
arch/mips/kernel/cpu-probe.c
1399
__cpu_name[cpu] = "MIPS interAptiv";
arch/mips/kernel/cpu-probe.c
1404
__cpu_name[cpu] = "MIPS interAptiv (multi)";
arch/mips/kernel/cpu-probe.c
1409
__cpu_name[cpu] = "MIPS proAptiv";
arch/mips/kernel/cpu-probe.c
1413
__cpu_name[cpu] = "MIPS proAptiv (multi)";
arch/mips/kernel/cpu-probe.c
1417
__cpu_name[cpu] = "MIPS P5600";
arch/mips/kernel/cpu-probe.c
1421
__cpu_name[cpu] = "MIPS P6600";
arch/mips/kernel/cpu-probe.c
1425
__cpu_name[cpu] = "MIPS I6400";
arch/mips/kernel/cpu-probe.c
1429
__cpu_name[cpu] = "MIPS I6500";
arch/mips/kernel/cpu-probe.c
1433
__cpu_name[cpu] = "MIPS M5150";
arch/mips/kernel/cpu-probe.c
1437
__cpu_name[cpu] = "MIPS M6250";
arch/mips/kernel/cpu-probe.c
1476
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1485
__cpu_name[cpu] = "Au1000";
arch/mips/kernel/cpu-probe.c
1488
__cpu_name[cpu] = "Au1500";
arch/mips/kernel/cpu-probe.c
1491
__cpu_name[cpu] = "Au1100";
arch/mips/kernel/cpu-probe.c
1494
__cpu_name[cpu] = "Au1550";
arch/mips/kernel/cpu-probe.c
1497
__cpu_name[cpu] = "Au1200";
arch/mips/kernel/cpu-probe.c
1499
__cpu_name[cpu] = "Au1250";
arch/mips/kernel/cpu-probe.c
1502
__cpu_name[cpu] = "Au1210";
arch/mips/kernel/cpu-probe.c
1505
__cpu_name[cpu] = "Au1xxx";
arch/mips/kernel/cpu-probe.c
1511
__cpu_name[cpu] = "Au1300";
arch/mips/kernel/cpu-probe.c
1516
static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1524
__cpu_name[cpu] = "SiByte SB1";
arch/mips/kernel/cpu-probe.c
1531
__cpu_name[cpu] = "SiByte SB1A";
arch/mips/kernel/cpu-probe.c
1536
static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1542
__cpu_name[cpu] = "Sandcraft SR71000";
arch/mips/kernel/cpu-probe.c
1549
static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1555
__cpu_name[cpu] = "Philips PR4450";
arch/mips/kernel/cpu-probe.c
1561
static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1568
__cpu_name[cpu] = "Broadcom BMIPS32";
arch/mips/kernel/cpu-probe.c
1569
set_elf_platform(cpu, "bmips32");
arch/mips/kernel/cpu-probe.c
1575
__cpu_name[cpu] = "Broadcom BMIPS3300";
arch/mips/kernel/cpu-probe.c
1576
set_elf_platform(cpu, "bmips3300");
arch/mips/kernel/cpu-probe.c
1585
__cpu_name[cpu] = "Broadcom BMIPS4380";
arch/mips/kernel/cpu-probe.c
1586
set_elf_platform(cpu, "bmips4380");
arch/mips/kernel/cpu-probe.c
1591
__cpu_name[cpu] = "Broadcom BMIPS4350";
arch/mips/kernel/cpu-probe.c
1592
set_elf_platform(cpu, "bmips4350");
arch/mips/kernel/cpu-probe.c
1600
__cpu_name[cpu] = "Broadcom BMIPS5200";
arch/mips/kernel/cpu-probe.c
1602
__cpu_name[cpu] = "Broadcom BMIPS5000";
arch/mips/kernel/cpu-probe.c
1603
set_elf_platform(cpu, "bmips5000");
arch/mips/kernel/cpu-probe.c
1610
static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1620
__cpu_name[cpu] = "Cavium Octeon";
arch/mips/kernel/cpu-probe.c
1627
__cpu_name[cpu] = "Cavium Octeon+";
arch/mips/kernel/cpu-probe.c
1629
set_elf_platform(cpu, "octeon");
arch/mips/kernel/cpu-probe.c
1637
__cpu_name[cpu] = "Cavium Octeon II";
arch/mips/kernel/cpu-probe.c
1638
set_elf_platform(cpu, "octeon2");
arch/mips/kernel/cpu-probe.c
1645
__cpu_name[cpu] = "Cavium Octeon III";
arch/mips/kernel/cpu-probe.c
1646
set_elf_platform(cpu, "octeon3");
arch/mips/kernel/cpu-probe.c
1682
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1697
__cpu_name[cpu] = "Loongson-2K";
arch/mips/kernel/cpu-probe.c
1698
set_elf_platform(cpu, "gs264e");
arch/mips/kernel/cpu-probe.c
1709
__cpu_name[cpu] = "ICT Loongson-3";
arch/mips/kernel/cpu-probe.c
1710
set_elf_platform(cpu, "loongson3a");
arch/mips/kernel/cpu-probe.c
1715
__cpu_name[cpu] = "ICT Loongson-3";
arch/mips/kernel/cpu-probe.c
1716
set_elf_platform(cpu, "loongson3a");
arch/mips/kernel/cpu-probe.c
1736
__cpu_name[cpu] = "ICT Loongson-3";
arch/mips/kernel/cpu-probe.c
1737
set_elf_platform(cpu, "loongson3a");
arch/mips/kernel/cpu-probe.c
1749
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
arch/mips/kernel/cpu-probe.c
1752
static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
arch/mips/kernel/cpu-probe.c
1822
__cpu_name[cpu] = "Ingenic XBurst";
arch/mips/kernel/cpu-probe.c
1828
__cpu_name[cpu] = "Ingenic XBurst II";
arch/mips/kernel/cpu-probe.c
1850
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cpu-probe.c
1856
set_elf_platform(cpu, "mips");
arch/mips/kernel/cpu-probe.c
1869
cpu_probe_legacy(c, cpu);
arch/mips/kernel/cpu-probe.c
1872
cpu_probe_mips(c, cpu);
arch/mips/kernel/cpu-probe.c
1876
cpu_probe_alchemy(c, cpu);
arch/mips/kernel/cpu-probe.c
1879
cpu_probe_sibyte(c, cpu);
arch/mips/kernel/cpu-probe.c
1882
cpu_probe_broadcom(c, cpu);
arch/mips/kernel/cpu-probe.c
1885
cpu_probe_sandcraft(c, cpu);
arch/mips/kernel/cpu-probe.c
1888
cpu_probe_nxp(c, cpu);
arch/mips/kernel/cpu-probe.c
1891
cpu_probe_cavium(c, cpu);
arch/mips/kernel/cpu-probe.c
1894
cpu_probe_loongson(c, cpu);
arch/mips/kernel/cpu-probe.c
1900
cpu_probe_ingenic(c, cpu);
arch/mips/kernel/cpu-probe.c
1904
BUG_ON(!__cpu_name[cpu]);
arch/mips/kernel/cpu-probe.c
198
static inline void set_elf_platform(int cpu, const char *plat)
arch/mips/kernel/cpu-probe.c
200
if (cpu == 0)
arch/mips/kernel/cpu-probe.c
2004
if (cpu == 0)
arch/mips/kernel/cpu-probe.c
2059
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cpu-probe.c
2079
cpu_data[cpu].asid_cache = asid_first_version(cpu);
arch/mips/kernel/cpu-r3k-probe.c
105
__cpu_name[cpu] = "R3081";
arch/mips/kernel/cpu-r3k-probe.c
108
__cpu_name[cpu] = "R3000A";
arch/mips/kernel/cpu-r3k-probe.c
112
__cpu_name[cpu] = "R3000";
arch/mips/kernel/cpu-r3k-probe.c
122
BUG_ON(!__cpu_name[cpu]);
arch/mips/kernel/cpu-r3k-probe.c
58
static inline void set_elf_platform(int cpu, const char *plat)
arch/mips/kernel/cpu-r3k-probe.c
60
if (cpu == 0)
arch/mips/kernel/cpu-r3k-probe.c
71
unsigned int cpu = smp_processor_id();
arch/mips/kernel/cpu-r3k-probe.c
77
set_elf_platform(cpu, "mips");
arch/mips/kernel/cpu-r3k-probe.c
94
__cpu_name[cpu] = "R2000";
arch/mips/kernel/crash.c
22
int cpu = smp_processor_id();
arch/mips/kernel/crash.c
36
if (!cpu_online(cpu))
arch/mips/kernel/crash.c
40
set_cpu_online(cpu, false);
arch/mips/kernel/crash.c
43
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
arch/mips/kernel/crash.c
44
crash_save_cpu(regs, cpu);
arch/mips/kernel/crash.c
45
cpumask_set_cpu(cpu, &cpus_in_crash);
arch/mips/kernel/machine_kexec.c
118
int cpu = smp_processor_id();
arch/mips/kernel/machine_kexec.c
120
if (!cpu_online(cpu))
arch/mips/kernel/machine_kexec.c
124
set_cpu_online(cpu, false);
arch/mips/kernel/mips-cm.c
264
unsigned cpu;
arch/mips/kernel/mips-cm.c
311
for_each_possible_cpu(cpu)
arch/mips/kernel/mips-cm.c
312
spin_lock_init(&per_cpu(cm_core_lock, cpu));
arch/mips/kernel/mips-cpc.c
74
unsigned int cpu;
arch/mips/kernel/mips-cpc.c
76
for_each_possible_cpu(cpu)
arch/mips/kernel/mips-cpc.c
77
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
arch/mips/kernel/perf_event_mipsxx.c
376
unsigned int cpu, ctrl;
arch/mips/kernel/perf_event_mipsxx.c
383
cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
arch/mips/kernel/perf_event_mipsxx.c
385
ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
arch/mips/kernel/perf_event_mipsxx.c
388
pr_debug("Enabling perf counter for CPU%d\n", cpu);
arch/mips/kernel/perf_event_mipsxx.c
670
if (event->cpu >= 0 && !cpu_online(event->cpu))
arch/mips/kernel/pm-cps.c
103
unsigned cpu = smp_processor_id();
arch/mips/kernel/pm-cps.c
117
entry = per_cpu(nc_asm_enter, cpu)[state];
arch/mips/kernel/pm-cps.c
123
if (cpu_online(cpu)) {
arch/mips/kernel/pm-cps.c
125
&cpu_sibling_map[cpu]);
arch/mips/kernel/pm-cps.c
127
cpumask_clear_cpu(cpu, coupled_mask);
arch/mips/kernel/pm-cps.c
150
cpumask_clear_cpu(cpu, &cpu_coherent_mask);
arch/mips/kernel/pm-cps.c
154
core_ready_count = per_cpu(ready_count, cpu);
arch/mips/kernel/pm-cps.c
162
barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
arch/mips/kernel/pm-cps.c
172
cpumask_set_cpu(cpu, &cpu_coherent_mask);
arch/mips/kernel/pm-cps.c
338
static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
arch/mips/kernel/pm-cps.c
443
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
arch/mips/kernel/pm-cps.c
464
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
arch/mips/kernel/pm-cps.c
468
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
arch/mips/kernel/pm-cps.c
481
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu]));
arch/mips/kernel/pm-cps.c
495
err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
arch/mips/kernel/pm-cps.c
632
static int cps_pm_online_cpu(unsigned int cpu)
arch/mips/kernel/pm-cps.c
638
core = cpu_core(&cpu_data[cpu]);
arch/mips/kernel/pm-cps.c
641
if (per_cpu(nc_asm_enter, cpu)[state])
arch/mips/kernel/pm-cps.c
646
entry_fn = cps_gen_entry_code(cpu, state);
arch/mips/kernel/pm-cps.c
653
for_each_cpu(sibling, &cpu_sibling_map[cpu])
arch/mips/kernel/pm-cps.c
657
if (!per_cpu(ready_count, cpu)) {
arch/mips/kernel/pm-cps.c
664
for_each_cpu(sibling, &cpu_sibling_map[cpu])
arch/mips/kernel/pm.c
41
unsigned int cpu = smp_processor_id();
arch/mips/kernel/pm.c
45
write_c0_entryhi(cpu_asid(cpu, current->mm));
arch/mips/kernel/process.c
640
int cpu;
arch/mips/kernel/process.c
642
for_each_possible_cpu(cpu) {
arch/mips/kernel/process.c
643
if (on_irq_stack(cpu, *sp)) {
arch/mips/kernel/process.c
644
stack_page = (unsigned long)irq_stack[cpu];
arch/mips/kernel/process.c
736
int cpu;
arch/mips/kernel/process.c
738
for_each_cpu(cpu, mask) {
arch/mips/kernel/process.c
745
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
arch/mips/kernel/process.c
747
cpu);
arch/mips/kernel/process.c
751
csd = &per_cpu(backtrace_csd, cpu);
arch/mips/kernel/process.c
752
smp_call_function_single_async(cpu, csd);
arch/mips/kernel/process.c
790
int cpu;
arch/mips/kernel/process.c
865
for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
arch/mips/kernel/process.c
866
work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
arch/mips/kernel/setup.c
822
unsigned int cpu = smp_processor_id();
arch/mips/kernel/setup.c
824
cpu_data[cpu].udelay_val = loops_per_jiffy;
arch/mips/kernel/smp-bmips.c
146
__cpu_number_map[i] = cpu;
arch/mips/kernel/smp-bmips.c
147
__cpu_logical_map[cpu] = i;
arch/mips/kernel/smp-bmips.c
148
cpu++;
arch/mips/kernel/smp-bmips.c
191
static int bmips_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/kernel/smp-bmips.c
212
pr_info("SMP: Booting CPU%d...\n", cpu);
arch/mips/kernel/smp-bmips.c
214
if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
arch/mips/kernel/smp-bmips.c
216
bmips_set_reset_vec(cpu, RESET_FROM_KSEG0);
arch/mips/kernel/smp-bmips.c
221
bmips43xx_send_ipi_single(cpu, 0);
arch/mips/kernel/smp-bmips.c
224
bmips5000_send_ipi_single(cpu, 0);
arch/mips/kernel/smp-bmips.c
228
bmips_set_reset_vec(cpu, RESET_FROM_KSEG1);
arch/mips/kernel/smp-bmips.c
234
if (cpu_logical_map(cpu) == 1)
arch/mips/kernel/smp-bmips.c
238
write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
arch/mips/kernel/smp-bmips.c
241
cpumask_set_cpu(cpu, &bmips_booted_mask);
arch/mips/kernel/smp-bmips.c
289
static void bmips5000_send_ipi_single(int cpu, unsigned int action)
arch/mips/kernel/smp-bmips.c
291
write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
arch/mips/kernel/smp-bmips.c
330
static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
arch/mips/kernel/smp-bmips.c
335
set_c0_cause(cpu ? C_SW1 : C_SW0);
arch/mips/kernel/smp-bmips.c
336
per_cpu(ipi_action_mask, cpu) |= action;
arch/mips/kernel/smp-bmips.c
344
int action, cpu = irq - IPI0_IRQ;
arch/mips/kernel/smp-bmips.c
348
per_cpu(ipi_action_mask, cpu) = 0;
arch/mips/kernel/smp-bmips.c
349
clear_c0_cause(cpu ? C_SW1 : C_SW0);
arch/mips/kernel/smp-bmips.c
373
unsigned int cpu = smp_processor_id();
arch/mips/kernel/smp-bmips.c
375
pr_info("SMP: CPU%d is offline\n", cpu);
arch/mips/kernel/smp-bmips.c
377
set_cpu_online(cpu, false);
arch/mips/kernel/smp-bmips.c
388
static void bmips_cpu_die(unsigned int cpu)
arch/mips/kernel/smp-bmips.c
484
int cpu;
arch/mips/kernel/smp-bmips.c
491
int shift = info->cpu & 0x01 ? 16 : 0;
arch/mips/kernel/smp-bmips.c
499
if (info->cpu & 0x02) {
arch/mips/kernel/smp-bmips.c
511
static void bmips_set_reset_vec(int cpu, u32 val)
arch/mips/kernel/smp-bmips.c
517
info.cpu = cpu;
arch/mips/kernel/smp-bmips.c
523
if (cpu == 0)
arch/mips/kernel/smp-bmips.c
53
static void bmips_set_reset_vec(int cpu, u32 val);
arch/mips/kernel/smp-bmips.c
63
static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
arch/mips/kernel/smp-bmips.c
64
static void bmips5000_send_ipi_single(int cpu, unsigned int action);
arch/mips/kernel/smp-bmips.c
72
#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
arch/mips/kernel/smp-bmips.c
73
#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
arch/mips/kernel/smp-bmips.c
74
#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
arch/mips/kernel/smp-bmips.c
75
#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
arch/mips/kernel/smp-bmips.c
79
int i, cpu = 1, boot_cpu = 0;
arch/mips/kernel/smp-cps.c
617
static int cps_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/kernel/smp-cps.c
619
unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
620
unsigned core = cpu_core(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
621
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
633
atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
arch/mips/kernel/smp-cps.c
653
if (!cpus_are_siblings(cpu, smp_processor_id())) {
arch/mips/kernel/smp-cps.c
656
if (!cpus_are_siblings(cpu, remote))
arch/mips/kernel/smp-cps.c
663
core, cpu);
arch/mips/kernel/smp-cps.c
735
unsigned int cpu, core, vpe_id;
arch/mips/kernel/smp-cps.c
737
cpu = smp_processor_id();
arch/mips/kernel/smp-cps.c
738
core = cpu_core(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
741
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
781
unsigned cpu = smp_processor_id();
arch/mips/kernel/smp-cps.c
792
set_cpu_online(cpu, false);
arch/mips/kernel/smp-cps.c
804
unsigned int cpu;
arch/mips/kernel/smp-cps.c
808
cpu = smp_processor_id();
arch/mips/kernel/smp-cps.c
811
pr_debug("CPU%d going offline\n", cpu);
arch/mips/kernel/smp-cps.c
816
if (!cpus_are_siblings(cpu, cpu_death_sibling))
arch/mips/kernel/smp-cps.c
833
panic("Failed to offline CPU %u", cpu);
arch/mips/kernel/smp-cps.c
838
unsigned cpu = (unsigned long)ptr_cpu;
arch/mips/kernel/smp-cps.c
839
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
851
static void cps_cpu_die(unsigned int cpu) { }
arch/mips/kernel/smp-cps.c
853
static void cps_cleanup_dead_cpu(unsigned cpu)
arch/mips/kernel/smp-cps.c
855
unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
856
unsigned core = cpu_core(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
857
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
arch/mips/kernel/smp-cps.c
910
cpu, stat))
arch/mips/kernel/smp-cps.c
923
(void *)(unsigned long)cpu, 1);
arch/mips/kernel/smp-mt.c
143
static int vsmp_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/kernel/smp-mt.c
149
settc(cpu);
arch/mips/kernel/smp-up.c
16
static void up_send_ipi_single(int cpu, unsigned int action)
arch/mips/kernel/smp-up.c
42
static int up_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/kernel/smp-up.c
61
static void up_cpu_die(unsigned int cpu)
arch/mips/kernel/smp.c
104
static inline void set_cpu_sibling_map(int cpu)
arch/mips/kernel/smp.c
108
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
arch/mips/kernel/smp.c
112
if (cpus_are_siblings(cpu, i)) {
arch/mips/kernel/smp.c
113
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
arch/mips/kernel/smp.c
114
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
arch/mips/kernel/smp.c
118
cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
arch/mips/kernel/smp.c
121
static inline void set_cpu_core_map(int cpu)
arch/mips/kernel/smp.c
125
cpumask_set_cpu(cpu, &cpu_core_setup_map);
arch/mips/kernel/smp.c
128
if (cpu_data[cpu].package == cpu_data[i].package) {
arch/mips/kernel/smp.c
129
cpumask_set_cpu(i, &cpu_core_map[cpu]);
arch/mips/kernel/smp.c
130
cpumask_set_cpu(cpu, &cpu_core_map[i]);
arch/mips/kernel/smp.c
172
void mips_smp_send_ipi_single(int cpu, unsigned int action)
arch/mips/kernel/smp.c
174
mips_smp_send_ipi_mask(cpumask_of(cpu), action);
arch/mips/kernel/smp.c
181
int cpu;
arch/mips/kernel/smp.c
199
for_each_cpu(cpu, mask) {
arch/mips/kernel/smp.c
200
if (cpus_are_siblings(cpu, smp_processor_id()))
arch/mips/kernel/smp.c
203
core = cpu_core(&cpu_data[cpu]);
arch/mips/kernel/smp.c
205
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
arch/mips/kernel/smp.c
206
mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
arch/mips/kernel/smp.c
289
int cpu;
arch/mips/kernel/smp.c
291
for_each_cpu(cpu, mask) {
arch/mips/kernel/smp.c
292
smp_ipi_init_one(call_virq + cpu, "IPI call",
arch/mips/kernel/smp.c
294
smp_ipi_init_one(sched_virq + cpu, "IPI resched",
arch/mips/kernel/smp.c
325
int cpu;
arch/mips/kernel/smp.c
327
for_each_cpu(cpu, mask) {
arch/mips/kernel/smp.c
328
free_irq(call_virq + cpu, NULL);
arch/mips/kernel/smp.c
329
free_irq(sched_virq + cpu, NULL);
arch/mips/kernel/smp.c
359
unsigned int cpu = raw_smp_processor_id();
arch/mips/kernel/smp.c
363
rcutree_report_cpu_starting(cpu);
arch/mips/kernel/smp.c
378
cpu_data[cpu].udelay_val = loops_per_jiffy;
arch/mips/kernel/smp.c
380
set_cpu_sibling_map(cpu);
arch/mips/kernel/smp.c
381
set_cpu_core_map(cpu);
arch/mips/kernel/smp.c
383
cpumask_set_cpu(cpu, &cpu_coherent_mask);
arch/mips/kernel/smp.c
384
notify_cpu_starting(cpu);
arch/mips/kernel/smp.c
391
synchronise_count_slave(cpu);
arch/mips/kernel/smp.c
394
set_cpu_online(cpu, true);
arch/mips/kernel/smp.c
441
current_thread_info()->cpu = 0;
arch/mips/kernel/smp.c
462
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
arch/mips/kernel/smp.c
464
return mp_ops->boot_secondary(cpu, tidle);
arch/mips/kernel/smp.c
467
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/mips/kernel/smp.c
471
err = mp_ops->boot_secondary(cpu, tidle);
arch/mips/kernel/smp.c
478
pr_crit("CPU%u: failed to start\n", cpu);
arch/mips/kernel/smp.c
574
unsigned int cpu;
arch/mips/kernel/smp.c
576
for_each_online_cpu(cpu) {
arch/mips/kernel/smp.c
577
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
arch/mips/kernel/smp.c
578
set_cpu_context(cpu, mm, 0);
arch/mips/kernel/smp.c
631
unsigned int cpu;
arch/mips/kernel/smp.c
634
for_each_online_cpu(cpu) {
arch/mips/kernel/smp.c
641
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
arch/mips/kernel/smp.c
642
set_cpu_context(cpu, mm, !exec);
arch/mips/kernel/smp.c
698
unsigned int cpu;
arch/mips/kernel/smp.c
700
for_each_online_cpu(cpu) {
arch/mips/kernel/smp.c
707
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
arch/mips/kernel/smp.c
708
set_cpu_context(cpu, vma->vm_mm, 1);
arch/mips/kernel/smp.c
731
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
arch/mips/kernel/smp.c
734
mp_ops->cleanup_dead_cpu(cpu);
arch/mips/kernel/smp.c
751
int cpu;
arch/mips/kernel/smp.c
753
for_each_cpu(cpu, mask) {
arch/mips/kernel/smp.c
754
csd = &per_cpu(tick_broadcast_csd, cpu);
arch/mips/kernel/smp.c
755
smp_call_function_single_async(cpu, csd);
arch/mips/kernel/sync-r4k.c
110
unsigned int cpu = (unsigned long)__cpu;
arch/mips/kernel/sync-r4k.c
138
smp_processor_id(), cpu);
arch/mips/kernel/sync-r4k.c
144
smp_processor_id(), cpu);
arch/mips/kernel/sync-r4k.c
174
void synchronise_count_slave(int cpu)
arch/mips/kernel/sync-r4k.c
185
(unsigned long *)(unsigned long)cpu, 0);
arch/mips/kernel/sync-r4k.c
241
pr_debug("Counter compensate: CPU%u observed %d warp\n", cpu, cur_max_warp);
arch/mips/kernel/time.c
42
int cpu;
arch/mips/kernel/time.c
56
for_each_online_cpu(cpu) {
arch/mips/kernel/time.c
57
per_cpu(pcp_lpj_ref, cpu) =
arch/mips/kernel/time.c
58
cpu_data[cpu].udelay_val;
arch/mips/kernel/time.c
59
per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
arch/mips/kernel/time.c
73
for_each_cpu(cpu, cpus) {
arch/mips/kernel/time.c
74
lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
arch/mips/kernel/time.c
75
per_cpu(pcp_lpj_ref_freq, cpu),
arch/mips/kernel/time.c
77
cpu_data[cpu].udelay_val = (unsigned int)lpj;
arch/mips/kernel/topology.c
16
struct cpu *c = &per_cpu(cpu_devices, i);
arch/mips/kernel/topology.c
9
static DEFINE_PER_CPU(struct cpu, cpu_devices);
arch/mips/kernel/traps.c
2235
unsigned int cpu = smp_processor_id();
arch/mips/kernel/traps.c
2265
cpu_data[cpu].asid_cache = 0;
arch/mips/kernel/traps.c
2266
else if (!cpu_data[cpu].asid_cache)
arch/mips/kernel/traps.c
2267
cpu_data[cpu].asid_cache = asid_first_version(cpu);
arch/mips/kvm/entry.c
313
uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP);
arch/mips/kvm/loongson_ipi.c
123
irq.cpu = id;
arch/mips/kvm/loongson_ipi.c
131
irq.cpu = id;
arch/mips/kvm/mips.c
489
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
arch/mips/kvm/mips.c
492
if (irq->cpu == -1)
arch/mips/kvm/mips.c
495
dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
arch/mips/kvm/mips.c
504
irq->cpu, irq->irq);
arch/mips/kvm/mmu.c
675
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/mips/kvm/mmu.c
679
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
arch/mips/kvm/mmu.c
683
vcpu->cpu = cpu;
arch/mips/kvm/mmu.c
684
if (vcpu->arch.last_sched_cpu != cpu) {
arch/mips/kvm/mmu.c
686
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
arch/mips/kvm/mmu.c
696
kvm_mips_callbacks->vcpu_load(vcpu, cpu);
arch/mips/kvm/mmu.c
705
int cpu;
arch/mips/kvm/mmu.c
709
cpu = smp_processor_id();
arch/mips/kvm/mmu.c
710
vcpu->arch.last_sched_cpu = cpu;
arch/mips/kvm/mmu.c
711
vcpu->cpu = -1;
arch/mips/kvm/mmu.c
714
kvm_mips_callbacks->vcpu_put(vcpu, cpu);
arch/mips/kvm/vz.c
2411
#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
arch/mips/kvm/vz.c
2412
static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
arch/mips/kvm/vz.c
2414
unsigned long guestid = guestid_cache(cpu);
arch/mips/kvm/vz.c
2430
guestid_cache(cpu) = guestid;
arch/mips/kvm/vz.c
2434
static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
arch/mips/kvm/vz.c
2502
static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
arch/mips/kvm/vz.c
2512
migrated = (vcpu->arch.last_exec_cpu != cpu);
arch/mips/kvm/vz.c
2513
vcpu->arch.last_exec_cpu = cpu;
arch/mips/kvm/vz.c
2530
(vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
arch/mips/kvm/vz.c
2532
kvm_vz_get_new_guestid(cpu, vcpu);
arch/mips/kvm/vz.c
2533
vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
arch/mips/kvm/vz.c
2535
vcpu->arch.vzguestid[cpu]);
arch/mips/kvm/vz.c
2539
change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
arch/mips/kvm/vz.c
2548
if (migrated || last_exec_vcpu[cpu] != vcpu)
arch/mips/kvm/vz.c
2550
last_exec_vcpu[cpu] = vcpu;
arch/mips/kvm/vz.c
2556
if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
arch/mips/kvm/vz.c
2563
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/mips/kvm/vz.c
2572
migrated = (vcpu->arch.last_sched_cpu != cpu);
arch/mips/kvm/vz.c
2578
all = migrated || (last_vcpu[cpu] != vcpu);
arch/mips/kvm/vz.c
2579
last_vcpu[cpu] = vcpu;
arch/mips/kvm/vz.c
2588
kvm_vz_vcpu_load_tlb(vcpu, cpu);
arch/mips/kvm/vz.c
2705
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
arch/mips/kvm/vz.c
3062
int cpu;
arch/mips/kvm/vz.c
3069
for_each_possible_cpu(cpu) {
arch/mips/kvm/vz.c
3070
if (last_vcpu[cpu] == vcpu)
arch/mips/kvm/vz.c
3071
last_vcpu[cpu] = NULL;
arch/mips/kvm/vz.c
3072
if (last_exec_vcpu[cpu] == vcpu)
arch/mips/kvm/vz.c
3073
last_exec_vcpu[cpu] = NULL;
arch/mips/kvm/vz.c
3236
int cpu = smp_processor_id();
arch/mips/kvm/vz.c
3239
preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
arch/mips/kvm/vz.c
3244
kvm_vz_vcpu_load_tlb(vcpu, cpu);
arch/mips/kvm/vz.c
3252
int cpu = smp_processor_id();
arch/mips/kvm/vz.c
3259
kvm_vz_check_requests(vcpu, cpu);
arch/mips/kvm/vz.c
3260
kvm_vz_vcpu_load_tlb(vcpu, cpu);
arch/mips/lantiq/clk.c
29
void clkdev_add_static(unsigned long cpu, unsigned long fpi,
arch/mips/lantiq/clk.c
32
cpu_clk_generic[0].rate = cpu;
arch/mips/lantiq/clk.h
72
extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c
111
unsigned int cpu = smp_processor_id();
arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c
113
cd->cpumask = cpumask_of(cpu);
arch/mips/loongson64/hpet.c
177
unsigned int cpu = smp_processor_id();
arch/mips/loongson64/hpet.c
183
cd = &per_cpu(hpet_clockevent_device, cpu);
arch/mips/loongson64/hpet.c
222
unsigned int cpu = smp_processor_id();
arch/mips/loongson64/hpet.c
227
cd = &per_cpu(hpet_clockevent_device, cpu);
arch/mips/loongson64/hpet.c
237
cd->cpumask = cpumask_of(cpu);
arch/mips/loongson64/numa.c
128
unsigned int node, cpu, active_cpu = 0;
arch/mips/loongson64/numa.c
142
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
arch/mips/loongson64/numa.c
143
node = cpu / loongson_sysconf.cores_per_node;
arch/mips/loongson64/numa.c
147
if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
arch/mips/loongson64/reset.c
110
int cpu;
arch/mips/loongson64/reset.c
113
for_each_possible_cpu(cpu)
arch/mips/loongson64/reset.c
114
if (!cpu_online(cpu))
arch/mips/loongson64/reset.c
115
cpu_device_up(get_cpu_device(cpu));
arch/mips/loongson64/smp.c
109
cpu, startargs[0], startargs[1], startargs[2]);
arch/mips/loongson64/smp.c
111
csr_mail_send(startargs[3], cpu_logical_map(cpu), 3);
arch/mips/loongson64/smp.c
112
csr_mail_send(startargs[2], cpu_logical_map(cpu), 2);
arch/mips/loongson64/smp.c
113
csr_mail_send(startargs[1], cpu_logical_map(cpu), 1);
arch/mips/loongson64/smp.c
114
csr_mail_send(startargs[0], cpu_logical_map(cpu), 0);
arch/mips/loongson64/smp.c
117
static u32 legacy_ipi_read_clear(int cpu)
arch/mips/loongson64/smp.c
122
action = readl_relaxed(ipi_status0_regs[cpu_logical_map(cpu)]);
arch/mips/loongson64/smp.c
124
writel_relaxed(action, ipi_clear0_regs[cpu_logical_map(cpu)]);
arch/mips/loongson64/smp.c
130
static void legacy_ipi_write_action(int cpu, u32 action)
arch/mips/loongson64/smp.c
132
writel_relaxed((u32)action, ipi_set0_regs[cpu]);
arch/mips/loongson64/smp.c
136
static void legacy_ipi_write_enable(int cpu)
arch/mips/loongson64/smp.c
138
writel_relaxed(0xffffffff, ipi_en0_regs[cpu_logical_map(cpu)]);
arch/mips/loongson64/smp.c
141
static void legacy_ipi_clear_buf(int cpu)
arch/mips/loongson64/smp.c
143
writeq_relaxed(0, ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
arch/mips/loongson64/smp.c
146
static void legacy_ipi_write_buf(int cpu, struct task_struct *idle)
arch/mips/loongson64/smp.c
157
cpu, startargs[0], startargs[1], startargs[2]);
arch/mips/loongson64/smp.c
160
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18);
arch/mips/loongson64/smp.c
162
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10);
arch/mips/loongson64/smp.c
164
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8);
arch/mips/loongson64/smp.c
166
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
arch/mips/loongson64/smp.c
37
static u32 (*ipi_read_clear)(int cpu);
arch/mips/loongson64/smp.c
370
static void loongson3_send_ipi_single(int cpu, unsigned int action)
arch/mips/loongson64/smp.c
372
ipi_write_action(cpu_logical_map(cpu), (u32)action);
arch/mips/loongson64/smp.c
38
static void (*ipi_write_action)(int cpu, u32 action);
arch/mips/loongson64/smp.c
386
int cpu = smp_processor_id();
arch/mips/loongson64/smp.c
389
action = ipi_read_clear(cpu);
arch/mips/loongson64/smp.c
39
static void (*ipi_write_enable)(int cpu);
arch/mips/loongson64/smp.c
40
static void (*ipi_clear_buf)(int cpu);
arch/mips/loongson64/smp.c
408
unsigned int cpu = smp_processor_id();
arch/mips/loongson64/smp.c
41
static void (*ipi_write_buf)(int cpu, struct task_struct *idle);
arch/mips/loongson64/smp.c
414
ipi_write_enable(cpu);
arch/mips/loongson64/smp.c
416
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/mips/loongson64/smp.c
417
cpu_set_core(&cpu_data[cpu],
arch/mips/loongson64/smp.c
418
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
arch/mips/loongson64/smp.c
419
cpu_data[cpu].package =
arch/mips/loongson64/smp.c
420
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
arch/mips/loongson64/smp.c
425
int cpu = smp_processor_id();
arch/mips/loongson64/smp.c
429
ipi_clear_buf(cpu);
arch/mips/loongson64/smp.c
44
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
arch/mips/loongson64/smp.c
503
static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/loongson64/smp.c
505
pr_info("Booting CPU#%d...\n", cpu);
arch/mips/loongson64/smp.c
507
ipi_write_buf(cpu, idle);
arch/mips/loongson64/smp.c
51
val |= (cpu << CSR_MAIL_SEND_CPU_SHIFT);
arch/mips/loongson64/smp.c
517
unsigned int cpu = smp_processor_id();
arch/mips/loongson64/smp.c
519
set_cpu_online(cpu, false);
arch/mips/loongson64/smp.c
530
static void loongson3_cpu_die(unsigned int cpu)
arch/mips/loongson64/smp.c
532
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
arch/mips/loongson64/smp.c
58
val |= (cpu << CSR_MAIL_SEND_CPU_SHIFT);
arch/mips/loongson64/smp.c
63
static u32 csr_ipi_read_clear(int cpu)
arch/mips/loongson64/smp.c
75
static void csr_ipi_write_action(int cpu, u32 action)
arch/mips/loongson64/smp.c
756
unsigned int cpu = smp_processor_id();
arch/mips/loongson64/smp.c
792
state_addr = &per_cpu(cpu_state, cpu);
arch/mips/loongson64/smp.c
798
static int loongson3_disable_clock(unsigned int cpu)
arch/mips/loongson64/smp.c
800
uint64_t core_id = cpu_core(&cpu_data[cpu]);
arch/mips/loongson64/smp.c
801
uint64_t package_id = cpu_data[cpu].package;
arch/mips/loongson64/smp.c
815
static int loongson3_enable_clock(unsigned int cpu)
arch/mips/loongson64/smp.c
817
uint64_t core_id = cpu_core(&cpu_data[cpu]);
arch/mips/loongson64/smp.c
818
uint64_t package_id = cpu_data[cpu].package;
arch/mips/loongson64/smp.c
82
val |= (cpu << CSR_IPI_SEND_CPU_SHIFT);
arch/mips/loongson64/smp.c
88
static void csr_ipi_write_enable(int cpu)
arch/mips/loongson64/smp.c
93
static void csr_ipi_clear_buf(int cpu)
arch/mips/loongson64/smp.c
98
static void csr_ipi_write_buf(int cpu, struct task_struct *idle)
arch/mips/math-emu/me-debugfs.c
16
int cpu;
arch/mips/math-emu/me-debugfs.c
19
for_each_online_cpu(cpu) {
arch/mips/math-emu/me-debugfs.c
23
ps = &per_cpu(fpuemustats, cpu);
arch/mips/mm/c-octeon.c
65
extern void octeon_send_ipi_single(int cpu, unsigned int action);
arch/mips/mm/c-octeon.c
67
int cpu;
arch/mips/mm/c-octeon.c
75
cpu = smp_processor_id();
arch/mips/mm/c-octeon.c
85
cpumask_clear_cpu(cpu, &mask);
arch/mips/mm/c-octeon.c
87
for_each_cpu(cpu, &mask)
arch/mips/mm/c-octeon.c
88
octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
arch/mips/mm/context.c
102
int cpu;
arch/mips/mm/context.c
114
for_each_possible_cpu(cpu) {
arch/mips/mm/context.c
115
if (per_cpu(reserved_mmids, cpu) == mmid) {
arch/mips/mm/context.c
117
per_cpu(reserved_mmids, cpu) = newmmid;
arch/mips/mm/context.c
16
static bool asid_versions_eq(int cpu, u64 a, u64 b)
arch/mips/mm/context.c
18
return ((a ^ b) & asid_version_mask(cpu)) == 0;
arch/mips/mm/context.c
181
unsigned int cpu = smp_processor_id();
arch/mips/mm/context.c
187
write_c0_entryhi(cpu_asid(cpu, mm));
arch/mips/mm/context.c
209
ctx = cpu_context(cpu, mm);
arch/mips/mm/context.c
210
old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
arch/mips/mm/context.c
212
!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
arch/mips/mm/context.c
213
!cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
arch/mips/mm/context.c
216
ctx = cpu_context(cpu, mm);
arch/mips/mm/context.c
217
if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
arch/mips/mm/context.c
220
WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
arch/mips/mm/context.c
229
if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
arch/mips/mm/context.c
23
unsigned int cpu;
arch/mips/mm/context.c
233
cpumask_clear_cpu(cpu, &tlb_flush_pending);
arch/mips/mm/context.c
249
cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
arch/mips/mm/context.c
33
cpu = smp_processor_id();
arch/mips/mm/context.c
34
asid = asid_cache(cpu);
arch/mips/mm/context.c
36
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
arch/mips/mm/context.c
42
set_cpu_context(cpu, mm, asid);
arch/mips/mm/context.c
43
asid_cache(cpu) = asid;
arch/mips/mm/context.c
49
unsigned int cpu = smp_processor_id();
arch/mips/mm/context.c
59
if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
arch/mips/mm/context.c
67
int cpu;
arch/mips/mm/context.c
75
for_each_possible_cpu(cpu) {
arch/mips/mm/context.c
76
mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
arch/mips/mm/context.c
86
mmid = per_cpu(reserved_mmids, cpu);
arch/mips/mm/context.c
88
__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
arch/mips/mm/context.c
89
per_cpu(reserved_mmids, cpu) = mmid;
arch/mips/mm/init.c
515
static int __init pcpu_cpu_to_node(int cpu)
arch/mips/mm/init.c
517
return cpu_to_node(cpu);
arch/mips/mm/init.c
523
unsigned int cpu;
arch/mips/mm/init.c
538
for_each_possible_cpu(cpu)
arch/mips/mm/init.c
539
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
arch/mips/mm/page.c
621
unsigned int cpu = smp_processor_id();
arch/mips/mm/page.c
627
page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
arch/mips/mm/page.c
629
page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
arch/mips/mm/page.c
630
__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
arch/mips/mm/page.c
636
while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
arch/mips/mm/page.c
639
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
arch/mips/mm/page.c
647
unsigned int cpu = smp_processor_id();
arch/mips/mm/page.c
654
page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
arch/mips/mm/page.c
656
page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
arch/mips/mm/page.c
657
__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
arch/mips/mm/page.c
663
while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
arch/mips/mm/page.c
666
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
arch/mips/mm/tlb-r3k.c
150
int cpu = smp_processor_id();
arch/mips/mm/tlb-r3k.c
152
if (cpu_context(cpu, vma->vm_mm) != 0) {
arch/mips/mm/tlb-r3k.c
157
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
arch/mips/mm/tlb-r3k.c
159
newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
arch/mips/mm/tlb-r3k.c
194
if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
arch/mips/mm/tlb-r3k.c
196
(cpu_context(cpu, vma->vm_mm)), pid);
arch/mips/mm/tlb-r3k.c
72
int cpu = smp_processor_id();
arch/mips/mm/tlb-r3k.c
74
if (cpu_context(cpu, mm) != 0) {
arch/mips/mm/tlb-r3k.c
79
cpu_context(cpu, mm) & asid_mask, start, end);
arch/mips/mm/tlb-r3k.c
85
int newpid = cpu_context(cpu, mm) & asid_mask;
arch/mips/mm/tlb-r4k.c
114
int cpu = smp_processor_id();
arch/mips/mm/tlb-r4k.c
116
if (cpu_context(cpu, mm) != 0) {
arch/mips/mm/tlb-r4k.c
127
int newpid = cpu_asid(cpu, mm);
arch/mips/mm/tlb-r4k.c
217
int cpu = smp_processor_id();
arch/mips/mm/tlb-r4k.c
219
if (cpu_context(cpu, vma->vm_mm) != 0) {
arch/mips/mm/tlb-r4k.c
231
write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
arch/mips/mm/tlb-r4k.c
233
write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
arch/mips/pci/pci-xtalk-bridge.c
310
int ret, cpu;
arch/mips/pci/pci-xtalk-bridge.c
314
cpu = cpumask_first_and(mask, cpu_online_mask);
arch/mips/pci/pci-xtalk-bridge.c
315
data->nasid = cpu_to_node(cpu);
arch/mips/sgi-ip27/ip27-berr.c
66
int cpu = LOCAL_HUB_L(PI_CPU_NUM);
arch/mips/sgi-ip27/ip27-berr.c
71
printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
arch/mips/sgi-ip27/ip27-berr.c
75
errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
arch/mips/sgi-ip27/ip27-berr.c
76
errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
arch/mips/sgi-ip27/ip27-berr.c
87
int cpu = LOCAL_HUB_L(PI_CPU_NUM);
arch/mips/sgi-ip27/ip27-berr.c
88
int cpuoff = cpu << 8;
arch/mips/sgi-ip27/ip27-berr.c
93
cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
arch/mips/sgi-ip27/ip27-init.c
73
int cpu = smp_processor_id();
arch/mips/sgi-ip27/ip27-init.c
80
pr_info("CPU %d clock is %dMHz.\n", cpu, sn_cpu_info[cpu].p_speed);
arch/mips/sgi-ip27/ip27-init.c
85
install_cpu_nmi_handler(cputoslice(cpu));
arch/mips/sgi-ip27/ip27-irq.c
108
irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
arch/mips/sgi-ip27/ip27-irq.c
147
info->nasid = cpu_to_node(hd->cpu);
arch/mips/sgi-ip27/ip27-irq.c
191
cpuid_t cpu = smp_processor_id();
arch/mips/sgi-ip27/ip27-irq.c
192
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip27/ip27-irq.c
231
cpuid_t cpu = smp_processor_id();
arch/mips/sgi-ip27/ip27-irq.c
232
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip27/ip27-irq.c
254
int cpu = smp_processor_id();
arch/mips/sgi-ip27/ip27-irq.c
255
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip27/ip27-irq.c
30
cpuid_t cpu;
arch/mips/sgi-ip27/ip27-irq.c
55
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip27/ip27-irq.c
65
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip27/ip27-irq.c
75
int cpu;
arch/mips/sgi-ip27/ip27-irq.c
77
cpu = cpumask_first_and(mask, cpu_online_mask);
arch/mips/sgi-ip27/ip27-irq.c
78
if (cpu >= nr_cpu_ids)
arch/mips/sgi-ip27/ip27-irq.c
79
cpu = cpumask_any(cpu_online_mask);
arch/mips/sgi-ip27/ip27-irq.c
81
nasid = cpu_to_node(cpu);
arch/mips/sgi-ip27/ip27-irq.c
82
hd->cpu = cpu;
arch/mips/sgi-ip27/ip27-irq.c
83
if (!cputoslice(cpu)) {
arch/mips/sgi-ip27/ip27-nmi.c
208
cpu = cpumask_first(cpumask_of_node(node));
arch/mips/sgi-ip27/ip27-nmi.c
209
for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
arch/mips/sgi-ip27/ip27-nmi.c
210
CPUMASK_SETB(nmied_cpus, cpu);
arch/mips/sgi-ip27/ip27-nmi.c
215
SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
arch/mips/sgi-ip27/ip27-smp.c
147
static int ip27_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/sgi-ip27/ip27-smp.c
152
LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
arch/mips/sgi-ip27/ip27-timer.c
32
unsigned int cpu = smp_processor_id();
arch/mips/sgi-ip27/ip27-timer.c
33
int slice = cputoslice(cpu);
arch/mips/sgi-ip27/ip27-timer.c
48
unsigned int cpu = smp_processor_id();
arch/mips/sgi-ip27/ip27-timer.c
49
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
arch/mips/sgi-ip27/ip27-timer.c
50
int slice = cputoslice(cpu);
arch/mips/sgi-ip27/ip27-timer.c
74
unsigned int cpu = smp_processor_id();
arch/mips/sgi-ip27/ip27-timer.c
75
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
arch/mips/sgi-ip27/ip27-timer.c
76
unsigned char *name = per_cpu(hub_rt_name, cpu);
arch/mips/sgi-ip27/ip27-timer.c
78
sprintf(name, "hub-rt %d", cpu);
arch/mips/sgi-ip27/ip27-timer.c
88
cd->cpumask = cpumask_of(cpu);
arch/mips/sgi-ip30/ip30-irq.c
105
mask = (heart_read(&heart_regs->imr[cpu]) &
arch/mips/sgi-ip30/ip30-irq.c
147
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip30/ip30-irq.c
150
heart_write(*mask, &heart_regs->imr[hd->cpu]);
arch/mips/sgi-ip30/ip30-irq.c
156
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip30/ip30-irq.c
159
heart_write(*mask, &heart_regs->imr[hd->cpu]);
arch/mips/sgi-ip30/ip30-irq.c
166
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip30/ip30-irq.c
169
heart_write(*mask, &heart_regs->imr[hd->cpu]);
arch/mips/sgi-ip30/ip30-irq.c
183
hd->cpu = cpumask_first_and(mask, cpu_online_mask);
arch/mips/sgi-ip30/ip30-irq.c
188
irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
arch/mips/sgi-ip30/ip30-irq.c
22
int cpu;
arch/mips/sgi-ip30/ip30-irq.c
249
int cpu = smp_processor_id();
arch/mips/sgi-ip30/ip30-irq.c
250
unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip30/ip30-irq.c
252
set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
arch/mips/sgi-ip30/ip30-irq.c
253
heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
arch/mips/sgi-ip30/ip30-irq.c
255
set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
arch/mips/sgi-ip30/ip30-irq.c
256
heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
arch/mips/sgi-ip30/ip30-irq.c
259
heart_write(*mask, &heart_regs->imr[cpu]);
arch/mips/sgi-ip30/ip30-irq.c
47
int cpu = smp_processor_id();
arch/mips/sgi-ip30/ip30-irq.c
51
mask = heart_read(&heart_regs->imr[cpu]);
arch/mips/sgi-ip30/ip30-irq.c
60
heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
arch/mips/sgi-ip30/ip30-irq.c
77
cpu, pending, mask, cause);
arch/mips/sgi-ip30/ip30-irq.c
94
heart_write(mask, &heart_regs->imr[cpu]);
arch/mips/sgi-ip30/ip30-irq.c
99
int cpu = smp_processor_id();
arch/mips/sgi-ip30/ip30-setup.c
100
pr_info("IP30: CPU%d: %d MHz CPU detected.\n", cpu,
arch/mips/sgi-ip30/ip30-setup.c
86
int cpu = smp_processor_id();
arch/mips/sgi-ip30/ip30-smp.c
110
static int __init ip30_smp_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/sgi-ip30/ip30-smp.c
112
struct mpconf *mpc = (struct mpconf *)MPCONF(cpu);
arch/mips/sgi-ip30/ip30-smp.c
46
static void ip30_smp_send_ipi_single(int cpu, u32 action)
arch/mips/sgi-ip30/ip30-smp.c
61
irq += cpu;
arch/mips/sibyte/bcm1480/irq.c
108
bcm1480_irq_owner[irq] = cpu;
arch/mips/sibyte/bcm1480/irq.c
111
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
arch/mips/sibyte/bcm1480/irq.c
113
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
arch/mips/sibyte/bcm1480/irq.c
239
unsigned int i, cpu;
arch/mips/sibyte/bcm1480/irq.c
247
for (cpu = 0; cpu < 4; cpu++) {
arch/mips/sibyte/bcm1480/irq.c
249
IOADDR(A_BCM1480_IMR_REGISTER(cpu,
arch/mips/sibyte/bcm1480/irq.c
256
for (cpu = 0; cpu < 4; cpu++) {
arch/mips/sibyte/bcm1480/irq.c
258
IOADDR(A_BCM1480_IMR_REGISTER(cpu,
arch/mips/sibyte/bcm1480/irq.c
270
for (cpu = 0; cpu < 4; cpu++) {
arch/mips/sibyte/bcm1480/irq.c
271
__raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
arch/mips/sibyte/bcm1480/irq.c
277
for (cpu = 0; cpu < 4; cpu++) {
arch/mips/sibyte/bcm1480/irq.c
279
IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
arch/mips/sibyte/bcm1480/irq.c
281
IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_1_CLR_CPU)));
arch/mips/sibyte/bcm1480/irq.c
287
for (cpu = 0; cpu < 4; cpu++) {
arch/mips/sibyte/bcm1480/irq.c
288
__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_H)));
arch/mips/sibyte/bcm1480/irq.c
291
for (cpu = 0; cpu < 4; cpu++) {
arch/mips/sibyte/bcm1480/irq.c
292
__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_L)));
arch/mips/sibyte/bcm1480/irq.c
310
unsigned int cpu = smp_processor_id();
arch/mips/sibyte/bcm1480/irq.c
318
base = A_BCM1480_IMR_MAPPER(cpu);
arch/mips/sibyte/bcm1480/irq.c
334
unsigned int cpu = smp_processor_id();
arch/mips/sibyte/bcm1480/irq.c
340
do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
arch/mips/sibyte/bcm1480/irq.c
42
void bcm1480_mask_irq(int cpu, int irq)
arch/mips/sibyte/bcm1480/irq.c
53
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
arch/mips/sibyte/bcm1480/irq.c
55
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
arch/mips/sibyte/bcm1480/irq.c
59
void bcm1480_unmask_irq(int cpu, int irq)
arch/mips/sibyte/bcm1480/irq.c
70
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
arch/mips/sibyte/bcm1480/irq.c
72
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
arch/mips/sibyte/bcm1480/irq.c
81
int i = 0, old_cpu, cpu, int_on, k;
arch/mips/sibyte/bcm1480/irq.c
88
cpu = cpu_logical_map(i);
arch/mips/sibyte/bcm1480/smp.c
107
static int bcm1480_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/sibyte/bcm1480/smp.c
111
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
arch/mips/sibyte/bcm1480/smp.c
115
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
arch/mips/sibyte/bcm1480/smp.c
160
int cpu = smp_processor_id();
arch/mips/sibyte/bcm1480/smp.c
166
action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
arch/mips/sibyte/bcm1480/smp.c
169
__raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
arch/mips/sibyte/bcm1480/smp.c
67
static void bcm1480_send_ipi_single(int cpu, unsigned int action)
arch/mips/sibyte/bcm1480/smp.c
69
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
arch/mips/sibyte/sb1250/irq.c
100
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
arch/mips/sibyte/sb1250/irq.c
103
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
arch/mips/sibyte/sb1250/irq.c
145
int cpu;
arch/mips/sibyte/sb1250/irq.c
147
cpu = cpu_logical_map(i);
arch/mips/sibyte/sb1250/irq.c
149
cpu = i;
arch/mips/sibyte/sb1250/irq.c
156
IOADDR(A_IMR_REGISTER(cpu,
arch/mips/sibyte/sb1250/irq.c
273
unsigned int cpu = smp_processor_id();
arch/mips/sibyte/sb1250/irq.c
281
mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
arch/mips/sibyte/sb1250/irq.c
289
unsigned int cpu = smp_processor_id();
arch/mips/sibyte/sb1250/irq.c
307
do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
arch/mips/sibyte/sb1250/irq.c
41
void sb1250_mask_irq(int cpu, int irq)
arch/mips/sibyte/sb1250/irq.c
47
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
arch/mips/sibyte/sb1250/irq.c
50
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
arch/mips/sibyte/sb1250/irq.c
55
void sb1250_unmask_irq(int cpu, int irq)
arch/mips/sibyte/sb1250/irq.c
61
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
arch/mips/sibyte/sb1250/irq.c
64
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
arch/mips/sibyte/sb1250/irq.c
73
int i = 0, old_cpu, cpu, int_on;
arch/mips/sibyte/sb1250/irq.c
81
cpu = cpu_logical_map(i);
arch/mips/sibyte/sb1250/irq.c
97
sb1250_irq_owner[irq] = cpu;
arch/mips/sibyte/sb1250/smp.c
100
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
arch/mips/sibyte/sb1250/smp.c
104
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
arch/mips/sibyte/sb1250/smp.c
149
int cpu = smp_processor_id();
arch/mips/sibyte/sb1250/smp.c
155
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
arch/mips/sibyte/sb1250/smp.c
158
____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
arch/mips/sibyte/sb1250/smp.c
56
static void sb1250_send_ipi_single(int cpu, unsigned int action)
arch/mips/sibyte/sb1250/smp.c
58
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
arch/mips/sibyte/sb1250/smp.c
96
static int sb1250_boot_secondary(int cpu, struct task_struct *idle)
arch/mips/sni/time.c
65
unsigned int cpu = smp_processor_id();
arch/mips/sni/time.c
67
cd->cpumask = cpumask_of(cpu);
arch/nios2/include/asm/thread_info.h
39
__u32 cpu; /* current CPU */
arch/nios2/include/asm/thread_info.h
53
.cpu = 0, \
arch/nios2/kernel/cpuinfo.c
100
cpuinfo.tlb_num_entries = fcpu(cpu, "altr,tlb-num-entries");
arch/nios2/kernel/cpuinfo.c
102
cpuinfo.tlb_ptr_sz = fcpu(cpu, "altr,tlb-ptr-sz");
arch/nios2/kernel/cpuinfo.c
104
cpuinfo.reset_addr = fcpu(cpu, "altr,reset-addr");
arch/nios2/kernel/cpuinfo.c
105
cpuinfo.exception_addr = fcpu(cpu, "altr,exception-addr");
arch/nios2/kernel/cpuinfo.c
106
cpuinfo.fast_tlb_miss_exc_addr = fcpu(cpu, "altr,fast-tlb-miss-addr");
arch/nios2/kernel/cpuinfo.c
108
of_node_put(cpu);
arch/nios2/kernel/cpuinfo.c
22
static inline u32 fcpu(struct device_node *cpu, const char *n)
arch/nios2/kernel/cpuinfo.c
26
of_property_read_u32(cpu, n, &val);
arch/nios2/kernel/cpuinfo.c
33
struct device_node *cpu;
arch/nios2/kernel/cpuinfo.c
37
cpu = of_get_cpu_node(0, NULL);
arch/nios2/kernel/cpuinfo.c
38
if (!cpu)
arch/nios2/kernel/cpuinfo.c
41
if (!of_property_read_bool(cpu, "altr,has-initda"))
arch/nios2/kernel/cpuinfo.c
46
cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency");
arch/nios2/kernel/cpuinfo.c
48
str = of_get_property(cpu, "altr,implementation", &len);
arch/nios2/kernel/cpuinfo.c
51
cpuinfo.has_div = of_property_read_bool(cpu, "altr,has-div");
arch/nios2/kernel/cpuinfo.c
52
cpuinfo.has_mul = of_property_read_bool(cpu, "altr,has-mul");
arch/nios2/kernel/cpuinfo.c
53
cpuinfo.has_mulx = of_property_read_bool(cpu, "altr,has-mulx");
arch/nios2/kernel/cpuinfo.c
54
cpuinfo.has_bmx = of_property_read_bool(cpu, "altr,has-bmx");
arch/nios2/kernel/cpuinfo.c
55
cpuinfo.has_cdx = of_property_read_bool(cpu, "altr,has-cdx");
arch/nios2/kernel/cpuinfo.c
56
cpuinfo.mmu = of_property_read_bool(cpu, "altr,has-mmu");
arch/nios2/kernel/cpuinfo.c
73
cpuinfo.tlb_num_ways = fcpu(cpu, "altr,tlb-num-ways");
arch/nios2/kernel/cpuinfo.c
77
cpuinfo.icache_line_size = fcpu(cpu, "icache-line-size");
arch/nios2/kernel/cpuinfo.c
78
cpuinfo.icache_size = fcpu(cpu, "icache-size");
arch/nios2/kernel/cpuinfo.c
85
cpuinfo.dcache_line_size = fcpu(cpu, "dcache-line-size");
arch/nios2/kernel/cpuinfo.c
91
cpuinfo.dcache_size = fcpu(cpu, "dcache-size");
arch/nios2/kernel/cpuinfo.c
98
cpuinfo.tlb_pid_num_bits = fcpu(cpu, "altr,pid-num-bits");
arch/openrisc/include/asm/smp.h
15
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/openrisc/include/asm/smp.h
20
extern void arch_send_call_function_single_ipi(int cpu);
arch/openrisc/include/asm/thread_info.h
46
__u32 cpu; /* current CPU */
arch/openrisc/include/asm/thread_info.h
66
.cpu = 0, \
arch/openrisc/include/asm/time.h
19
extern void synchronise_count_master(int cpu);
arch/openrisc/include/asm/time.h
20
extern void synchronise_count_slave(int cpu);
arch/openrisc/kernel/cacheinfo.c
18
unsigned int level, struct cache_desc *cache, int cpu)
arch/openrisc/kernel/cacheinfo.c
26
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
arch/openrisc/kernel/cacheinfo.c
29
int init_cache_level(unsigned int cpu)
arch/openrisc/kernel/cacheinfo.c
32
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/openrisc/kernel/cacheinfo.c
84
int populate_cache_leaves(unsigned int cpu)
arch/openrisc/kernel/cacheinfo.c
87
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/openrisc/kernel/cacheinfo.c
92
ci_leaf_init(this_leaf, CACHE_TYPE_DATA, level, &cpuinfo->dcache, cpu);
arch/openrisc/kernel/cacheinfo.c
99
ci_leaf_init(this_leaf, CACHE_TYPE_INST, level, &cpuinfo->icache, cpu);
arch/openrisc/kernel/setup.c
142
struct device_node *cpu;
arch/openrisc/kernel/setup.c
146
cpu = of_get_cpu_node(cpu_id, NULL);
arch/openrisc/kernel/setup.c
147
if (!cpu)
arch/openrisc/kernel/setup.c
150
if (of_property_read_u32(cpu, "clock-frequency",
arch/openrisc/kernel/setup.c
160
of_node_put(cpu);
arch/openrisc/kernel/setup.c
212
struct device_node *cpu = of_get_cpu_node(smp_processor_id(), NULL);
arch/openrisc/kernel/setup.c
214
val = of_get_property(cpu, "clock-frequency", NULL);
arch/openrisc/kernel/setup.c
222
of_node_put(cpu);
arch/openrisc/kernel/smp.c
102
int __cpu_up(unsigned int cpu, struct task_struct *idle)
arch/openrisc/kernel/smp.c
106
cpu);
arch/openrisc/kernel/smp.c
111
current_pgd[cpu] = init_mm.pgd;
arch/openrisc/kernel/smp.c
113
boot_secondary(cpu, idle);
arch/openrisc/kernel/smp.c
116
pr_crit("CPU%u: failed to start\n", cpu);
arch/openrisc/kernel/smp.c
119
synchronise_count_master(cpu);
arch/openrisc/kernel/smp.c
127
unsigned int cpu = smp_processor_id();
arch/openrisc/kernel/smp.c
134
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/openrisc/kernel/smp.c
136
pr_info("CPU%u: Booted secondary processor\n", cpu);
arch/openrisc/kernel/smp.c
141
notify_cpu_starting(cpu);
arch/openrisc/kernel/smp.c
148
synchronise_count_slave(cpu);
arch/openrisc/kernel/smp.c
150
set_cpu_online(cpu, true);
arch/openrisc/kernel/smp.c
161
unsigned int cpu = smp_processor_id();
arch/openrisc/kernel/smp.c
180
WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
arch/openrisc/kernel/smp.c
185
void arch_smp_send_reschedule(int cpu)
arch/openrisc/kernel/smp.c
187
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
arch/openrisc/kernel/smp.c
223
void arch_send_call_function_single_ipi(int cpu)
arch/openrisc/kernel/smp.c
225
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
arch/openrisc/kernel/smp.c
52
static void boot_secondary(unsigned int cpu, struct task_struct *idle)
arch/openrisc/kernel/smp.c
60
secondary_release = cpu;
arch/openrisc/kernel/smp.c
61
smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
arch/openrisc/kernel/smp.c
72
struct device_node *cpu;
arch/openrisc/kernel/smp.c
75
for_each_of_cpu_node(cpu) {
arch/openrisc/kernel/smp.c
76
cpu_id = of_get_cpu_hwid(cpu, 0);
arch/openrisc/kernel/smp.c
84
unsigned int cpu;
arch/openrisc/kernel/smp.c
90
for_each_possible_cpu(cpu) {
arch/openrisc/kernel/smp.c
91
if (cpu < max_cpus)
arch/openrisc/kernel/smp.c
92
set_cpu_present(cpu, true);
arch/openrisc/kernel/sync-timer.c
31
void synchronise_count_master(int cpu)
arch/openrisc/kernel/sync-timer.c
36
pr_info("Synchronize counters for CPU %u: ", cpu);
arch/openrisc/kernel/sync-timer.c
93
void synchronise_count_slave(int cpu)
arch/openrisc/kernel/time.c
116
unsigned int cpu = smp_processor_id();
arch/openrisc/kernel/time.c
118
&per_cpu(clockevent_openrisc_timer, cpu);
arch/openrisc/kernel/time.c
71
unsigned int cpu = smp_processor_id();
arch/openrisc/kernel/time.c
73
&per_cpu(clockevent_openrisc_timer, cpu);
arch/openrisc/kernel/time.c
74
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu];
arch/openrisc/kernel/time.c
86
evt->cpumask = cpumask_of(cpu);
arch/openrisc/mm/tlb.c
140
unsigned int cpu;
arch/openrisc/mm/tlb.c
145
cpu = smp_processor_id();
arch/openrisc/mm/tlb.c
147
cpumask_clear_cpu(cpu, mm_cpumask(prev));
arch/openrisc/mm/tlb.c
148
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/openrisc/mm/tlb.c
156
current_pgd[cpu] = next->pgd;
arch/parisc/include/asm/irq.h
45
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
arch/parisc/include/asm/smp.h
27
#define cpu_number_map(cpu) (cpu)
arch/parisc/include/asm/smp.h
28
#define cpu_logical_map(cpu) (cpu)
arch/parisc/include/asm/smp.h
32
extern void arch_send_call_function_single_ipi(int cpu);
arch/parisc/include/asm/smp.h
35
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/parisc/include/asm/smp.h
48
void __cpu_die(unsigned int cpu);
arch/parisc/include/asm/thread_info.h
13
unsigned int cpu;
arch/parisc/kernel/asm-offsets.c
45
DEFINE(TASK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
arch/parisc/kernel/hardware.c
1224
enum cpu_type cpu;
arch/parisc/kernel/hardware.c
1368
return ptr->cpu;
arch/parisc/kernel/irq.c
311
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
arch/parisc/kernel/irq.c
315
irq_data_update_affinity(d, cpumask_of(cpu));
arch/parisc/kernel/irq.c
318
return per_cpu(cpu_data, cpu).txn_addr;
arch/parisc/kernel/irq.c
385
int cpu = smp_processor_id();
arch/parisc/kernel/irq.c
403
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
arch/parisc/kernel/irq.c
406
last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
arch/parisc/kernel/irq.c
422
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
arch/parisc/kernel/irq.c
493
int irq, cpu = smp_processor_id();
arch/parisc/kernel/irq.c
503
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
arch/parisc/kernel/irq.c
518
int cpu = cpumask_first(&dest);
arch/parisc/kernel/irq.c
521
irq, smp_processor_id(), cpu);
arch/parisc/kernel/irq.c
523
per_cpu(cpu_data, cpu).hpa);
arch/parisc/kernel/irq.c
541
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
arch/parisc/kernel/irq.c
72
int cpu = smp_processor_id();
arch/parisc/kernel/irq.c
75
per_cpu(local_ack_eiem, cpu) &= ~mask;
arch/parisc/kernel/irq.c
78
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
arch/parisc/kernel/irq.c
87
int cpu = smp_processor_id();
arch/parisc/kernel/irq.c
90
per_cpu(local_ack_eiem, cpu) |= mask;
arch/parisc/kernel/irq.c
93
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
arch/parisc/kernel/processor.c
381
unsigned long cpu;
arch/parisc/kernel/processor.c
390
for_each_online_cpu(cpu) {
arch/parisc/kernel/processor.c
392
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
arch/parisc/kernel/processor.c
399
cpu, boot_cpu_data.family_name);
arch/parisc/kernel/processor.c
410
topology_physical_package_id(cpu));
arch/parisc/kernel/processor.c
412
cpumask_weight(topology_core_cpumask(cpu)));
arch/parisc/kernel/processor.c
413
seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
arch/parisc/kernel/smp.c
197
ipi_send(int cpu, enum ipi_message_type op)
arch/parisc/kernel/smp.c
199
struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
arch/parisc/kernel/smp.c
200
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
arch/parisc/kernel/smp.c
212
int cpu;
arch/parisc/kernel/smp.c
214
for_each_cpu(cpu, mask)
arch/parisc/kernel/smp.c
215
ipi_send(cpu, op);
arch/parisc/kernel/smp.c
250
arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
arch/parisc/kernel/smp.c
263
void arch_send_call_function_single_ipi(int cpu)
arch/parisc/kernel/smp.c
265
send_IPI_single(cpu, IPI_CALL_FUNC);
arch/parisc/kernel/smp.c
418
int cpu;
arch/parisc/kernel/smp.c
420
for_each_possible_cpu(cpu)
arch/parisc/kernel/smp.c
421
spin_lock_init(&per_cpu(ipi_lock, cpu));
arch/parisc/kernel/smp.c
432
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/parisc/kernel/smp.c
434
if (cpu_online(cpu))
arch/parisc/kernel/smp.c
439
smp_boot_one_cpu(cpu, tidle))
arch/parisc/kernel/smp.c
442
return cpu_online(cpu) ? 0 : -EIO;
arch/parisc/kernel/smp.c
451
unsigned int cpu = smp_processor_id();
arch/parisc/kernel/smp.c
453
remove_cpu_topology(cpu);
arch/parisc/kernel/smp.c
459
set_cpu_online(cpu, false);
arch/parisc/kernel/smp.c
462
if (cpu == time_keeper_id) {
arch/parisc/kernel/smp.c
493
void __cpu_die(unsigned int cpu)
arch/parisc/kernel/smp.c
498
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
arch/parisc/kernel/smp.c
500
pr_info("CPU%u: is shutting down\n", cpu);
arch/parisc/kernel/time.c
45
int cpu = smp_processor_id();
arch/parisc/kernel/time.c
47
cd = &per_cpu(parisc_clockevent_device, cpu);
arch/parisc/kernel/time.c
79
unsigned int cpu = smp_processor_id();
arch/parisc/kernel/time.c
84
cd = &per_cpu(parisc_clockevent_device, cpu);
arch/parisc/kernel/time.c
92
cd->cpumask = cpumask_of(cpu);
arch/parisc/kernel/topology.c
21
static DEFINE_PER_CPU(struct cpu, cpu_devices);
arch/parisc/kernel/topology.c
33
unsigned long cpu;
arch/parisc/kernel/topology.c
50
for_each_online_cpu(cpu) {
arch/parisc/kernel/topology.c
51
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
arch/parisc/kernel/topology.c
53
if (cpu == cpuid) /* ignore current cpu */
arch/parisc/kernel/topology.c
57
cpuid_topo->core_id = cpu_topology[cpu].core_id;
arch/parisc/kernel/topology.c
60
cpuid_topo->package_id = cpu_topology[cpu].package_id;
arch/parisc/kernel/topology.c
66
max_socket = max(max_socket, cpu_topology[cpu].package_id);
arch/parisc/lib/delay.c
33
int cpu;
arch/parisc/lib/delay.c
36
cpu = smp_processor_id();
arch/parisc/lib/delay.c
58
if (unlikely(cpu != smp_processor_id())) {
arch/parisc/lib/delay.c
60
cpu = smp_processor_id();
arch/powerpc/boot/4xx.c
322
u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
arch/powerpc/boot/4xx.c
328
cpu = plb = sys_clk;
arch/powerpc/boot/4xx.c
336
cpu = sys_clk * m / CPC0_SYS0_FWDVA(sys0);
arch/powerpc/boot/4xx.c
349
tb = cpu;
arch/powerpc/boot/4xx.c
368
dt_fixup_cpu_clocks(cpu, tb, 0);
arch/powerpc/boot/4xx.c
406
u32 cpu, plb, opb, ebc, vco;
arch/powerpc/boot/4xx.c
443
cpu = clk_a / pradv0;
arch/powerpc/boot/4xx.c
457
tb = cpu;
arch/powerpc/boot/4xx.c
459
dt_fixup_cpu_clocks(cpu, tb, 0);
arch/powerpc/boot/devtree.c
63
void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
arch/powerpc/boot/devtree.c
67
printf("CPU clock-frequency <- 0x%x (%dMHz)\n\r", cpu, MHZ(cpu));
arch/powerpc/boot/devtree.c
73
setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
arch/powerpc/include/asm/book3s/64/mmu.h
258
#define arch_clear_mm_cpumask_cpu(cpu, mm) \
arch/powerpc/include/asm/book3s/64/mmu.h
260
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
arch/powerpc/include/asm/book3s/64/mmu.h
262
cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
arch/powerpc/include/asm/cpuidle.h
87
unsigned long pnv_cpu_offline(unsigned int cpu);
arch/powerpc/include/asm/cputhreads.h
41
int cpu_core_index_of_thread(int cpu);
arch/powerpc/include/asm/cputhreads.h
44
static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
arch/powerpc/include/asm/cputhreads.h
48
static inline int cpu_thread_in_core(int cpu)
arch/powerpc/include/asm/cputhreads.h
50
return cpu & (threads_per_core - 1);
arch/powerpc/include/asm/cputhreads.h
53
static inline int cpu_thread_in_subcore(int cpu)
arch/powerpc/include/asm/cputhreads.h
55
return cpu & (threads_per_subcore - 1);
arch/powerpc/include/asm/cputhreads.h
58
static inline int cpu_first_thread_sibling(int cpu)
arch/powerpc/include/asm/cputhreads.h
60
return cpu & ~(threads_per_core - 1);
arch/powerpc/include/asm/cputhreads.h
63
static inline int cpu_last_thread_sibling(int cpu)
arch/powerpc/include/asm/cputhreads.h
65
return cpu | (threads_per_core - 1);
arch/powerpc/include/asm/cputhreads.h
74
static inline int cpu_first_tlb_thread_sibling(int cpu)
arch/powerpc/include/asm/cputhreads.h
77
return cpu & ~0x6; /* Big Core */
arch/powerpc/include/asm/cputhreads.h
79
return cpu_first_thread_sibling(cpu);
arch/powerpc/include/asm/cputhreads.h
82
static inline int cpu_last_tlb_thread_sibling(int cpu)
arch/powerpc/include/asm/cputhreads.h
85
return cpu | 0x6; /* Big Core */
arch/powerpc/include/asm/cputhreads.h
87
return cpu_last_thread_sibling(cpu);
arch/powerpc/include/asm/dbell.h
109
static inline void doorbell_global_ipi(int cpu)
arch/powerpc/include/asm/dbell.h
111
u32 tag = get_hard_smp_processor_id(cpu);
arch/powerpc/include/asm/dbell.h
113
kvmppc_set_host_ipi(cpu);
arch/powerpc/include/asm/dbell.h
124
static inline void doorbell_core_ipi(int cpu)
arch/powerpc/include/asm/dbell.h
126
u32 tag = cpu_thread_in_core(cpu);
arch/powerpc/include/asm/dbell.h
128
kvmppc_set_host_ipi(cpu);
arch/powerpc/include/asm/dbell.h
138
static inline int doorbell_try_core_ipi(int cpu)
arch/powerpc/include/asm/dbell.h
143
if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) {
arch/powerpc/include/asm/dbell.h
144
doorbell_core_ipi(cpu);
arch/powerpc/include/asm/dtl.h
40
extern void register_dtl_buffer(int cpu);
arch/powerpc/include/asm/fsl_pamu_stash.h
19
int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu);
arch/powerpc/include/asm/fsl_pm.h
25
void (*irq_mask)(int cpu);
arch/powerpc/include/asm/fsl_pm.h
28
void (*irq_unmask)(int cpu);
arch/powerpc/include/asm/fsl_pm.h
29
void (*cpu_enter_state)(int cpu, int state);
arch/powerpc/include/asm/fsl_pm.h
30
void (*cpu_exit_state)(int cpu, int state);
arch/powerpc/include/asm/fsl_pm.h
31
void (*cpu_up_prepare)(int cpu);
arch/powerpc/include/asm/fsl_pm.h
32
void (*cpu_die)(int cpu);
arch/powerpc/include/asm/hardirq.h
35
extern u64 arch_irq_stat_cpu(unsigned int cpu);
arch/powerpc/include/asm/kvm_book3s_64.h
546
extern void kvmhv_rm_send_ipi(int cpu);
arch/powerpc/include/asm/kvm_book3s_64.h
690
void kvmhv_set_l2_counters_status(int cpu, bool status);
arch/powerpc/include/asm/kvm_ppc.h
1116
extern void xics_wake_cpu(int cpu);
arch/powerpc/include/asm/kvm_ppc.h
126
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
arch/powerpc/include/asm/kvm_ppc.h
269
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
arch/powerpc/include/asm/kvm_ppc.h
446
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
arch/powerpc/include/asm/kvm_ppc.h
448
paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
arch/powerpc/include/asm/kvm_ppc.h
451
static inline void kvmppc_set_xive_tima(int cpu,
arch/powerpc/include/asm/kvm_ppc.h
455
paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
arch/powerpc/include/asm/kvm_ppc.h
456
paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
arch/powerpc/include/asm/kvm_ppc.h
542
static inline void kvmppc_set_host_ipi(int cpu)
arch/powerpc/include/asm/kvm_ppc.h
550
WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 1);
arch/powerpc/include/asm/kvm_ppc.h
553
static inline void kvmppc_clear_host_ipi(int cpu)
arch/powerpc/include/asm/kvm_ppc.h
555
WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 0);
arch/powerpc/include/asm/kvm_ppc.h
579
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
arch/powerpc/include/asm/kvm_ppc.h
582
static inline void kvmppc_set_xive_tima(int cpu,
arch/powerpc/include/asm/kvm_ppc.h
592
static inline void kvmppc_set_host_ipi(int cpu)
arch/powerpc/include/asm/kvm_ppc.h
595
static inline void kvmppc_clear_host_ipi(int cpu)
arch/powerpc/include/asm/kvm_ppc.h
679
struct kvm_vcpu *vcpu, u32 cpu);
arch/powerpc/include/asm/kvm_ppc.h
729
struct kvm_vcpu *vcpu, u32 cpu);
arch/powerpc/include/asm/kvm_ppc.h
750
struct kvm_vcpu *vcpu, u32 cpu);
arch/powerpc/include/asm/kvm_ppc.h
767
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
arch/powerpc/include/asm/kvm_ppc.h
785
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
arch/powerpc/include/asm/kvm_ppc.h
899
u32 cpu);
arch/powerpc/include/asm/kvm_ppc.h
909
struct kvm_vcpu *vcpu, u32 cpu)
arch/powerpc/include/asm/lppaca.h
105
#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
arch/powerpc/include/asm/machdep.h
46
unsigned long (*get_proc_freq)(unsigned int cpu);
arch/powerpc/include/asm/mce.h
123
u16 cpu;
arch/powerpc/include/asm/opal.h
233
int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
arch/powerpc/include/asm/opal.h
306
s64 opal_signal_system_reset(s32 cpu);
arch/powerpc/include/asm/opal.h
307
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
arch/powerpc/include/asm/paca.h
285
extern void initialise_paca(struct paca_struct *new_paca, int cpu);
arch/powerpc/include/asm/paca.h
288
extern void allocate_paca(int cpu);
arch/powerpc/include/asm/paca.h
293
static inline void allocate_paca(int cpu) { }
arch/powerpc/include/asm/paravirt.h
111
static inline void prod_cpu(int cpu)
arch/powerpc/include/asm/paravirt.h
127
static inline bool vcpu_is_preempted(int cpu)
arch/powerpc/include/asm/paravirt.h
149
if (vcpu_is_dispatched(cpu))
arch/powerpc/include/asm/paravirt.h
156
if (!is_vcpu_idle(cpu))
arch/powerpc/include/asm/paravirt.h
185
if (cpu_first_thread_sibling(cpu) == first_cpu)
arch/powerpc/include/asm/paravirt.h
195
first_cpu = cpu_first_thread_sibling(cpu);
arch/powerpc/include/asm/paravirt.h
197
if (i == cpu)
arch/powerpc/include/asm/paravirt.h
26
u64 pseries_paravirt_steal_clock(int cpu);
arch/powerpc/include/asm/paravirt.h
28
static inline u64 paravirt_steal_clock(int cpu)
arch/powerpc/include/asm/paravirt.h
30
return pseries_paravirt_steal_clock(cpu);
arch/powerpc/include/asm/paravirt.h
35
static inline u32 yield_count_of(int cpu)
arch/powerpc/include/asm/paravirt.h
37
__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
arch/powerpc/include/asm/paravirt.h
57
static inline void yield_to_preempted(int cpu, u32 yield_count)
arch/powerpc/include/asm/paravirt.h
59
plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
arch/powerpc/include/asm/paravirt.h
62
static inline void prod_cpu(int cpu)
arch/powerpc/include/asm/paravirt.h
64
plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
arch/powerpc/include/asm/paravirt.h
93
static inline u32 yield_count_of(int cpu)
arch/powerpc/include/asm/paravirt.h
99
static inline void yield_to_preempted(int cpu, u32 yield_count)
arch/powerpc/include/asm/plpar_wrappers.h
30
static inline long vpa_call(unsigned long flags, unsigned long cpu,
arch/powerpc/include/asm/plpar_wrappers.h
329
static inline long plpar_signal_sys_reset(long cpu)
arch/powerpc/include/asm/plpar_wrappers.h
331
return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
arch/powerpc/include/asm/plpar_wrappers.h
35
return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
arch/powerpc/include/asm/plpar_wrappers.h
38
static inline long unregister_vpa(unsigned long cpu)
arch/powerpc/include/asm/plpar_wrappers.h
40
return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
arch/powerpc/include/asm/plpar_wrappers.h
43
static inline long register_vpa(unsigned long cpu, unsigned long vpa)
arch/powerpc/include/asm/plpar_wrappers.h
45
return vpa_call(H_VPA_REG_VPA, cpu, vpa);
arch/powerpc/include/asm/plpar_wrappers.h
48
static inline long unregister_slb_shadow(unsigned long cpu)
arch/powerpc/include/asm/plpar_wrappers.h
50
return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
arch/powerpc/include/asm/plpar_wrappers.h
53
static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
arch/powerpc/include/asm/plpar_wrappers.h
55
return vpa_call(H_VPA_REG_SLB, cpu, vpa);
arch/powerpc/include/asm/plpar_wrappers.h
58
static inline long unregister_dtl(unsigned long cpu)
arch/powerpc/include/asm/plpar_wrappers.h
60
return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
arch/powerpc/include/asm/plpar_wrappers.h
63
static inline long register_dtl(unsigned long cpu, unsigned long vpa)
arch/powerpc/include/asm/plpar_wrappers.h
65
return vpa_call(H_VPA_REG_DTL, cpu, vpa);
arch/powerpc/include/asm/plpar_wrappers.h
97
extern void vpa_init(int cpu);
arch/powerpc/include/asm/powernv.h
12
void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
arch/powerpc/include/asm/ps3.h
180
int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
arch/powerpc/include/asm/ps3.h
183
int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq);
arch/powerpc/include/asm/ps3.h
187
int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
arch/powerpc/include/asm/ps3.h
190
int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
arch/powerpc/include/asm/ps3.h
193
int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
arch/powerpc/include/asm/ps3.h
198
enum ps3_cpu_binding cpu, unsigned int *virq);
arch/powerpc/include/asm/ps3.h
488
u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr);
arch/powerpc/include/asm/ps3.h
489
void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
arch/powerpc/include/asm/ps3.h
490
u32 ps3_read_ctr(u32 cpu, u32 ctr);
arch/powerpc/include/asm/ps3.h
491
void ps3_write_ctr(u32 cpu, u32 ctr, u32 val);
arch/powerpc/include/asm/ps3.h
493
u32 ps3_read_pm07_control(u32 cpu, u32 ctr);
arch/powerpc/include/asm/ps3.h
494
void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val);
arch/powerpc/include/asm/ps3.h
495
u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg);
arch/powerpc/include/asm/ps3.h
496
void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
arch/powerpc/include/asm/ps3.h
498
u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr);
arch/powerpc/include/asm/ps3.h
499
void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
arch/powerpc/include/asm/ps3.h
501
void ps3_enable_pm(u32 cpu);
arch/powerpc/include/asm/ps3.h
502
void ps3_disable_pm(u32 cpu);
arch/powerpc/include/asm/ps3.h
503
void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
arch/powerpc/include/asm/ps3.h
504
void ps3_disable_pm_interrupts(u32 cpu);
arch/powerpc/include/asm/ps3.h
506
u32 ps3_get_and_clear_pm_interrupts(u32 cpu);
arch/powerpc/include/asm/ps3.h
508
u32 ps3_get_hw_thread_id(int cpu);
arch/powerpc/include/asm/smp.h
100
return smp_hw_index[cpu];
arch/powerpc/include/asm/smp.h
103
static inline void set_hard_smp_processor_id(int cpu, int phys)
arch/powerpc/include/asm/smp.h
105
smp_hw_index[cpu] = phys;
arch/powerpc/include/asm/smp.h
114
static inline struct cpumask *cpu_sibling_mask(int cpu)
arch/powerpc/include/asm/smp.h
116
return per_cpu(cpu_sibling_map, cpu);
arch/powerpc/include/asm/smp.h
119
static inline struct cpumask *cpu_core_mask(int cpu)
arch/powerpc/include/asm/smp.h
121
return per_cpu(cpu_core_map, cpu);
arch/powerpc/include/asm/smp.h
124
static inline struct cpumask *cpu_l2_cache_mask(int cpu)
arch/powerpc/include/asm/smp.h
126
return per_cpu(cpu_l2_cache_map, cpu);
arch/powerpc/include/asm/smp.h
129
static inline struct cpumask *cpu_smallcore_mask(int cpu)
arch/powerpc/include/asm/smp.h
131
return per_cpu(cpu_smallcore_map, cpu);
arch/powerpc/include/asm/smp.h
134
extern int cpu_to_core_id(int cpu);
arch/powerpc/include/asm/smp.h
142
static inline const struct cpumask *cpu_smt_mask(int cpu)
arch/powerpc/include/asm/smp.h
145
return per_cpu(cpu_smallcore_map, cpu);
arch/powerpc/include/asm/smp.h
147
return per_cpu(cpu_sibling_map, cpu);
arch/powerpc/include/asm/smp.h
176
extern void smp_muxed_ipi_message_pass(int cpu, int msg);
arch/powerpc/include/asm/smp.h
177
extern void smp_muxed_ipi_set_message(int cpu, int msg);
arch/powerpc/include/asm/smp.h
186
extern void __cpu_die(unsigned int cpu);
arch/powerpc/include/asm/smp.h
194
static inline const struct cpumask *cpu_sibling_mask(int cpu)
arch/powerpc/include/asm/smp.h
196
return cpumask_of(cpu);
arch/powerpc/include/asm/smp.h
199
static inline const struct cpumask *cpu_smallcore_mask(int cpu)
arch/powerpc/include/asm/smp.h
201
return cpumask_of(cpu);
arch/powerpc/include/asm/smp.h
204
static inline const struct cpumask *cpu_l2_cache_mask(int cpu)
arch/powerpc/include/asm/smp.h
206
return cpumask_of(cpu);
arch/powerpc/include/asm/smp.h
211
static inline int get_hard_smp_processor_id(int cpu)
arch/powerpc/include/asm/smp.h
213
return paca_ptrs[cpu]->hw_cpu_id;
arch/powerpc/include/asm/smp.h
216
static inline void set_hard_smp_processor_id(int cpu, int phys)
arch/powerpc/include/asm/smp.h
218
paca_ptrs[cpu]->hw_cpu_id = phys;
arch/powerpc/include/asm/smp.h
224
static inline int get_hard_smp_processor_id(int cpu)
arch/powerpc/include/asm/smp.h
229
static inline void set_hard_smp_processor_id(int cpu, int phys)
arch/powerpc/include/asm/smp.h
245
extern void smp_mpic_setup_cpu(int cpu);
arch/powerpc/include/asm/smp.h
255
extern void arch_send_call_function_single_ipi(int cpu);
arch/powerpc/include/asm/smp.h
35
extern int cpu_to_chip_id(int cpu);
arch/powerpc/include/asm/smp.h
45
void (*message_pass)(int cpu, int msg);
arch/powerpc/include/asm/smp.h
47
void (*cause_ipi)(int cpu);
arch/powerpc/include/asm/smp.h
49
int (*cause_nmi_ipi)(int cpu);
arch/powerpc/include/asm/smp.h
68
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
arch/powerpc/include/asm/smp.h
69
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
arch/powerpc/include/asm/smp.h
79
void generic_cpu_die(unsigned int cpu);
arch/powerpc/include/asm/smp.h
80
void generic_set_cpu_dead(unsigned int cpu);
arch/powerpc/include/asm/smp.h
81
void generic_set_cpu_up(unsigned int cpu);
arch/powerpc/include/asm/smp.h
82
int generic_check_cpu_restart(unsigned int cpu);
arch/powerpc/include/asm/smp.h
83
int is_cpu_dead(unsigned int cpu);
arch/powerpc/include/asm/smp.h
95
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/powerpc/include/asm/smp.h
98
static inline int get_hard_smp_processor_id(int cpu)
arch/powerpc/include/asm/spu.h
191
void spu_irq_setaffinity(struct spu *spu, int cpu);
arch/powerpc/include/asm/spu_priv1.h
26
void (*cpu_affinity_set) (struct spu *spu, int cpu);
arch/powerpc/include/asm/spu_priv1.h
81
spu_cpu_affinity_set (struct spu *spu, int cpu)
arch/powerpc/include/asm/spu_priv1.h
83
spu_priv1_ops->cpu_affinity_set(spu, cpu);
arch/powerpc/include/asm/thread_info.h
58
unsigned int cpu;
arch/powerpc/include/asm/topology.h
105
static inline void map_cpu_to_node(int cpu, int node) {}
arch/powerpc/include/asm/topology.h
107
static inline void unmap_cpu_from_node(unsigned long cpu) {}
arch/powerpc/include/asm/topology.h
114
void find_and_update_cpu_nid(int cpu);
arch/powerpc/include/asm/topology.h
115
extern int cpu_to_coregroup_id(int cpu);
arch/powerpc/include/asm/topology.h
117
static inline void find_and_update_cpu_nid(int cpu) {}
arch/powerpc/include/asm/topology.h
118
static inline int cpu_to_coregroup_id(int cpu)
arch/powerpc/include/asm/topology.h
121
return cpu_to_core_id(cpu);
arch/powerpc/include/asm/topology.h
134
struct cpumask *cpu_coregroup_mask(int cpu);
arch/powerpc/include/asm/topology.h
135
const struct cpumask *cpu_die_mask(int cpu);
arch/powerpc/include/asm/topology.h
136
int cpu_die_id(int cpu);
arch/powerpc/include/asm/topology.h
141
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
arch/powerpc/include/asm/topology.h
142
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
arch/powerpc/include/asm/topology.h
143
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
arch/powerpc/include/asm/topology.h
144
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
arch/powerpc/include/asm/topology.h
145
#define topology_die_id(cpu) (cpu_die_id(cpu))
arch/powerpc/include/asm/topology.h
146
#define topology_die_cpumask(cpu) (cpu_die_mask(cpu))
arch/powerpc/include/asm/topology.h
156
static inline bool topology_is_primary_thread(unsigned int cpu)
arch/powerpc/include/asm/topology.h
158
return cpu == cpu_first_thread_sibling(cpu);
arch/powerpc/include/asm/topology.h
162
static inline bool topology_smt_thread_allowed(unsigned int cpu)
arch/powerpc/include/asm/topology.h
164
return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
arch/powerpc/include/asm/topology.h
168
static inline bool topology_is_core_online(unsigned int cpu)
arch/powerpc/include/asm/topology.h
170
int i, first_cpu = cpu_first_thread_sibling(cpu);
arch/powerpc/include/asm/topology.h
48
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
arch/powerpc/include/asm/topology.h
50
numa_cpu_lookup_table[cpu] = node;
arch/powerpc/include/asm/topology.h
53
static inline int early_cpu_to_node(int cpu)
arch/powerpc/include/asm/topology.h
57
nid = numa_cpu_lookup_table[cpu];
arch/powerpc/include/asm/topology.h
69
extern void map_cpu_to_node(int cpu, int node);
arch/powerpc/include/asm/topology.h
71
extern void unmap_cpu_from_node(unsigned long cpu);
arch/powerpc/include/asm/topology.h
76
static inline int early_cpu_to_node(int cpu) { return 0; }
arch/powerpc/include/asm/topology.h
90
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
arch/powerpc/include/asm/vphn.h
22
long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
arch/powerpc/include/asm/xics.h
60
void (*cause_ipi)(int cpu);
arch/powerpc/include/asm/xive.h
103
void xmon_xive_do_dump(int cpu);
arch/powerpc/include/asm/xive.h
157
static inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
arch/powerpc/include/asm/xive.h
95
int xive_smp_prepare_cpu(unsigned int cpu);
arch/powerpc/kernel/asm-offsets.c
432
OFFSET(VCPU_CPU, kvm_vcpu, cpu);
arch/powerpc/kernel/asm-offsets.c
95
OFFSET(TASK_CPU, task_struct, thread_info.cpu);
arch/powerpc/kernel/cacheinfo.c
194
static void cache_cpu_set(struct cache *cache, int cpu)
arch/powerpc/kernel/cacheinfo.c
199
WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
arch/powerpc/kernel/cacheinfo.c
201
cpu, next->ofnode,
arch/powerpc/kernel/cacheinfo.c
203
cpumask_set_cpu(cpu, &next->shared_cpu_map);
arch/powerpc/kernel/cacheinfo.c
890
static void cache_cpu_clear(struct cache *cache, int cpu)
arch/powerpc/kernel/cacheinfo.c
895
WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
arch/powerpc/kernel/cacheinfo.c
897
cpu, cache->ofnode,
arch/powerpc/kernel/cacheinfo.c
900
cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
arch/powerpc/kernel/cacheinfo.c
935
unsigned int cpu;
arch/powerpc/kernel/cacheinfo.c
939
for_each_online_cpu(cpu)
arch/powerpc/kernel/cacheinfo.c
940
cacheinfo_cpu_offline(cpu);
arch/powerpc/kernel/cacheinfo.c
945
unsigned int cpu;
arch/powerpc/kernel/cacheinfo.c
949
for_each_online_cpu(cpu)
arch/powerpc/kernel/cacheinfo.c
950
cacheinfo_cpu_online(cpu);
arch/powerpc/kernel/irq.c
165
u64 arch_irq_stat_cpu(unsigned int cpu)
arch/powerpc/kernel/irq.c
167
u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
arch/powerpc/kernel/irq.c
169
sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
arch/powerpc/kernel/irq.c
170
sum += per_cpu(irq_stat, cpu).pmu_irqs;
arch/powerpc/kernel/irq.c
171
sum += per_cpu(irq_stat, cpu).mce_exceptions;
arch/powerpc/kernel/irq.c
172
sum += per_cpu(irq_stat, cpu).spurious_irqs;
arch/powerpc/kernel/irq.c
173
sum += per_cpu(irq_stat, cpu).timer_irqs_others;
arch/powerpc/kernel/irq.c
175
sum += paca_ptrs[cpu]->hmi_irqs;
arch/powerpc/kernel/irq.c
177
sum += per_cpu(irq_stat, cpu).sreset_irqs;
arch/powerpc/kernel/irq.c
179
sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
arch/powerpc/kernel/irq.c
182
sum += per_cpu(irq_stat, cpu).doorbell_irqs;
arch/powerpc/kernel/irq.c
82
u32 tau_interrupts(unsigned long cpu);
arch/powerpc/kernel/mce.c
114
mce->cpu = get_paca()->paca_index;
arch/powerpc/kernel/mce.c
576
level, evt->cpu, sevstr, in_guest ? "Guest" : "",
arch/powerpc/kernel/mce.c
583
level, evt->cpu, current->pid, current->comm,
arch/powerpc/kernel/mce.c
587
level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
arch/powerpc/kernel/mce.c
590
printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
arch/powerpc/kernel/mce.c
594
printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
arch/powerpc/kernel/paca.c
121
static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
arch/powerpc/kernel/paca.c
131
lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
arch/powerpc/kernel/paca.c
133
lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
arch/powerpc/kernel/paca.c
149
static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
arch/powerpc/kernel/paca.c
153
if (cpu != boot_cpuid) {
arch/powerpc/kernel/paca.c
163
s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
arch/powerpc/kernel/paca.c
184
void __init initialise_paca(struct paca_struct *new_paca, int cpu)
arch/powerpc/kernel/paca.c
193
new_paca->paca_index = cpu;
arch/powerpc/kernel/paca.c
256
void __init allocate_paca(int cpu)
arch/powerpc/kernel/paca.c
261
BUG_ON(cpu >= paca_nr_cpu_ids);
arch/powerpc/kernel/paca.c
27
unsigned long limit, int cpu)
arch/powerpc/kernel/paca.c
274
limit, cpu);
arch/powerpc/kernel/paca.c
275
paca_ptrs[cpu] = paca;
arch/powerpc/kernel/paca.c
277
initialise_paca(paca, cpu);
arch/powerpc/kernel/paca.c
279
paca->lppaca_ptr = new_lppaca(cpu, limit);
arch/powerpc/kernel/paca.c
282
paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
arch/powerpc/kernel/paca.c
37
if (cpu == boot_cpuid) {
arch/powerpc/kernel/paca.c
41
nid = early_cpu_to_node(cpu);
arch/powerpc/kernel/paca.c
49
if (cpu == boot_cpuid)
arch/powerpc/kernel/paca.c
60
int cpu)
arch/powerpc/kernel/process.c
2132
unsigned long cpu = task_cpu(p);
arch/powerpc/kernel/process.c
2134
if (!hardirq_ctx[cpu] || !softirq_ctx[cpu])
arch/powerpc/kernel/process.c
2137
stack_page = (unsigned long)hardirq_ctx[cpu];
arch/powerpc/kernel/process.c
2141
stack_page = (unsigned long)softirq_ctx[cpu];
arch/powerpc/kernel/process.c
2153
unsigned long cpu = task_cpu(p);
arch/powerpc/kernel/process.c
2158
if (!paca_ptrs[cpu]->emergency_sp)
arch/powerpc/kernel/process.c
2162
if (!paca_ptrs[cpu]->nmi_emergency_sp || !paca_ptrs[cpu]->mc_emergency_sp)
arch/powerpc/kernel/process.c
2166
stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
arch/powerpc/kernel/process.c
2171
stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
arch/powerpc/kernel/process.c
2175
stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
arch/powerpc/kernel/process.c
2187
unsigned long cpu = task_cpu(p);
arch/powerpc/kernel/process.c
2192
stack_page = (unsigned long)emergency_ctx[cpu] - THREAD_SIZE;
arch/powerpc/kernel/prom.c
1013
int cpu_to_chip_id(int cpu)
arch/powerpc/kernel/prom.c
1018
idx = cpu / threads_per_core;
arch/powerpc/kernel/prom.c
1022
np = of_get_cpu_node(cpu, NULL);
arch/powerpc/kernel/prom.c
1035
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
arch/powerpc/kernel/prom.c
1044
return (int)phys_id == cpu_to_phys_id[cpu];
arch/powerpc/kernel/prom.c
1047
return (int)phys_id == get_hard_smp_processor_id(cpu);
arch/powerpc/kernel/prom_init.c
118
int cpu;
arch/powerpc/kernel/prom_init.c
2194
if (cpu_no != prom.cpu) {
arch/powerpc/kernel/prom_init.c
2760
hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
arch/powerpc/kernel/prom_init.c
3150
prom.cpu = be32_to_cpu(rval);
arch/powerpc/kernel/prom_init.c
3152
prom_debug("Booting CPU hw index = %d\n", prom.cpu);
arch/powerpc/kernel/rtasd.c
430
unsigned int cpu;
arch/powerpc/kernel/rtasd.c
437
cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
arch/powerpc/kernel/rtasd.c
438
if (cpu >= nr_cpu_ids) {
arch/powerpc/kernel/rtasd.c
439
cpu = cpumask_first(cpu_online_mask);
arch/powerpc/kernel/rtasd.c
453
schedule_delayed_work_on(cpu, &event_scan_work,
arch/powerpc/kernel/rtasd.c
454
__round_jiffies_relative(event_scan_delay, cpu));
arch/powerpc/kernel/security.c
648
int cpu;
arch/powerpc/kernel/security.c
681
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/security.c
682
struct paca_struct *paca = paca_ptrs[cpu];
arch/powerpc/kernel/setup-common.c
416
static int assign_threads(unsigned int cpu, unsigned int nthreads, bool present,
arch/powerpc/kernel/setup-common.c
419
for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) {
arch/powerpc/kernel/setup-common.c
424
DBG(" thread %d -> cpu %d (hard id %d)\n", i, cpu, hwid);
arch/powerpc/kernel/setup-common.c
426
set_cpu_present(cpu, present);
arch/powerpc/kernel/setup-common.c
427
set_cpu_possible(cpu, true);
arch/powerpc/kernel/setup-common.c
428
cpu_to_phys_id[cpu] = hwid;
arch/powerpc/kernel/setup-common.c
429
cpu++;
arch/powerpc/kernel/setup-common.c
432
return cpu;
arch/powerpc/kernel/setup-common.c
456
int cpu = 0;
arch/powerpc/kernel/setup-common.c
480
cpu_be = cpu_to_be32(cpu);
arch/powerpc/kernel/setup-common.c
495
if (cpu == 0) {
arch/powerpc/kernel/setup-common.c
497
cpu = nthreads;
arch/powerpc/kernel/setup-common.c
507
} else if (cpu >= nr_cpu_ids) {
arch/powerpc/kernel/setup-common.c
512
if (cpu < nr_cpu_ids)
arch/powerpc/kernel/setup-common.c
513
cpu = assign_threads(cpu, nthreads, avail, intserv);
arch/powerpc/kernel/setup-common.c
556
for (cpu = 0; cpu < maxcpus; cpu++)
arch/powerpc/kernel/setup-common.c
557
set_cpu_possible(cpu, true);
arch/powerpc/kernel/setup-common.c
889
int cpu;
arch/powerpc/kernel/setup-common.c
891
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/setup-common.c
892
if (cpu == smp_processor_id())
arch/powerpc/kernel/setup-common.c
894
allocate_paca(cpu);
arch/powerpc/kernel/setup-common.c
895
set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
arch/powerpc/kernel/setup.h
62
u32 cpu_temp(unsigned long cpu);
arch/powerpc/kernel/setup.h
63
u32 cpu_temp_both(unsigned long cpu);
arch/powerpc/kernel/setup.h
64
u32 tau_interrupts(unsigned long cpu);
arch/powerpc/kernel/setup_64.c
108
paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
arch/powerpc/kernel/setup_64.c
398
task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
arch/powerpc/kernel/setup_64.c
627
struct device_node *cpu = NULL, *l2, *l3 = NULL;
arch/powerpc/kernel/setup_64.c
645
cpu = of_find_node_by_type(NULL, "cpu");
arch/powerpc/kernel/setup_64.c
651
if (cpu) {
arch/powerpc/kernel/setup_64.c
652
if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
arch/powerpc/kernel/setup_64.c
655
if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
arch/powerpc/kernel/setup_64.c
662
l2 = of_find_next_cache_node(cpu);
arch/powerpc/kernel/setup_64.c
663
of_node_put(cpu);
arch/powerpc/kernel/setup_64.c
709
static void *__init alloc_stack(unsigned long limit, int cpu)
arch/powerpc/kernel/setup_64.c
717
early_cpu_to_node(cpu));
arch/powerpc/kernel/setup_64.c
823
static __init int pcpu_cpu_to_node(int cpu)
arch/powerpc/kernel/setup_64.c
825
return early_cpu_to_node(cpu);
arch/powerpc/kernel/setup_64.c
837
unsigned int cpu;
arch/powerpc/kernel/setup_64.c
875
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/setup_64.c
876
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
arch/powerpc/kernel/setup_64.c
877
paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
arch/powerpc/kernel/setup_64.c
93
int cpu;
arch/powerpc/kernel/setup_64.c
97
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/setup_64.c
98
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/kernel/smp.c
1033
static const struct cpumask *tl_cache_mask(struct sched_domain_topology_level *tl, int cpu)
arch/powerpc/kernel/smp.c
1035
return per_cpu(cpu_l2_cache_map, cpu);
arch/powerpc/kernel/smp.c
1039
static const struct cpumask *tl_smallcore_smt_mask(struct sched_domain_topology_level *tl, int cpu)
arch/powerpc/kernel/smp.c
1041
return cpu_smallcore_mask(cpu);
arch/powerpc/kernel/smp.c
1045
struct cpumask *cpu_coregroup_mask(int cpu)
arch/powerpc/kernel/smp.c
1047
return per_cpu(cpu_coregroup_map, cpu);
arch/powerpc/kernel/smp.c
1061
int cpu;
arch/powerpc/kernel/smp.c
1063
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/smp.c
1064
int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
arch/powerpc/kernel/smp.c
1069
zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
arch/powerpc/kernel/smp.c
1071
cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
1076
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/smp.c
1077
int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
arch/powerpc/kernel/smp.c
1095
const struct cpumask *cpu_die_mask(int cpu)
arch/powerpc/kernel/smp.c
1098
return per_cpu(cpu_coregroup_map, cpu);
arch/powerpc/kernel/smp.c
1100
return cpu_node_mask(cpu);
arch/powerpc/kernel/smp.c
1104
int cpu_die_id(int cpu)
arch/powerpc/kernel/smp.c
1107
return cpu_to_coregroup_id(cpu);
arch/powerpc/kernel/smp.c
1115
unsigned int cpu, num_threads;
arch/powerpc/kernel/smp.c
1129
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/smp.c
1130
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
arch/powerpc/kernel/smp.c
1131
GFP_KERNEL, cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
1132
zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
arch/powerpc/kernel/smp.c
1133
GFP_KERNEL, cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
1134
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
arch/powerpc/kernel/smp.c
1135
GFP_KERNEL, cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
1137
zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
arch/powerpc/kernel/smp.c
1138
GFP_KERNEL, cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
1144
if (cpu_present(cpu)) {
arch/powerpc/kernel/smp.c
1145
set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
arch/powerpc/kernel/smp.c
1146
set_cpu_numa_mem(cpu,
arch/powerpc/kernel/smp.c
1147
local_memory_node(numa_cpu_lookup_table[cpu]));
arch/powerpc/kernel/smp.c
1204
unsigned int cpu = smp_processor_id();
arch/powerpc/kernel/smp.c
1206
if (cpu == boot_cpuid)
arch/powerpc/kernel/smp.c
1209
set_cpu_online(cpu, false);
arch/powerpc/kernel/smp.c
1231
void generic_cpu_die(unsigned int cpu)
arch/powerpc/kernel/smp.c
1237
if (is_cpu_dead(cpu))
arch/powerpc/kernel/smp.c
1241
printk(KERN_ERR "CPU%d didn't die...\n", cpu);
arch/powerpc/kernel/smp.c
1244
void generic_set_cpu_dead(unsigned int cpu)
arch/powerpc/kernel/smp.c
1246
per_cpu(cpu_state, cpu) = CPU_DEAD;
arch/powerpc/kernel/smp.c
1254
void generic_set_cpu_up(unsigned int cpu)
arch/powerpc/kernel/smp.c
1256
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
arch/powerpc/kernel/smp.c
1259
int generic_check_cpu_restart(unsigned int cpu)
arch/powerpc/kernel/smp.c
1261
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
arch/powerpc/kernel/smp.c
1264
int is_cpu_dead(unsigned int cpu)
arch/powerpc/kernel/smp.c
1266
return per_cpu(cpu_state, cpu) == CPU_DEAD;
arch/powerpc/kernel/smp.c
1280
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
arch/powerpc/kernel/smp.c
1283
paca_ptrs[cpu]->__current = idle;
arch/powerpc/kernel/smp.c
1284
paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
arch/powerpc/kernel/smp.c
1287
task_thread_info(idle)->cpu = cpu;
arch/powerpc/kernel/smp.c
1288
secondary_current = current_set[cpu] = idle;
arch/powerpc/kernel/smp.c
1291
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/powerpc/kernel/smp.c
1304
cpu_thread_in_subcore(cpu))
arch/powerpc/kernel/smp.c
1308
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
arch/powerpc/kernel/smp.c
1311
cpu_idle_thread_init(cpu, tidle);
arch/powerpc/kernel/smp.c
1318
rc = smp_ops->prepare_cpu(cpu);
arch/powerpc/kernel/smp.c
1326
cpu_callin_map[cpu] = 0;
arch/powerpc/kernel/smp.c
1335
DBG("smp: kicking cpu %d\n", cpu);
arch/powerpc/kernel/smp.c
1336
rc = smp_ops->kick_cpu(cpu);
arch/powerpc/kernel/smp.c
1338
pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
arch/powerpc/kernel/smp.c
1350
spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
arch/powerpc/kernel/smp.c
1352
if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
arch/powerpc/kernel/smp.c
1357
while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
arch/powerpc/kernel/smp.c
1361
if (!cpu_callin_map[cpu]) {
arch/powerpc/kernel/smp.c
1362
printk(KERN_ERR "Processor %u is stuck.\n", cpu);
arch/powerpc/kernel/smp.c
1366
DBG("Processor %u found.\n", cpu);
arch/powerpc/kernel/smp.c
1372
spin_until_cond(cpu_online(cpu));
arch/powerpc/kernel/smp.c
1380
int cpu_to_core_id(int cpu)
arch/powerpc/kernel/smp.c
1385
np = of_get_cpu_node(cpu, NULL);
arch/powerpc/kernel/smp.c
1397
int cpu_core_index_of_thread(int cpu)
arch/powerpc/kernel/smp.c
1399
return cpu >> threads_shift;
arch/powerpc/kernel/smp.c
1412
static struct device_node *cpu_to_l2cache(int cpu)
arch/powerpc/kernel/smp.c
1417
if (!cpu_present(cpu))
arch/powerpc/kernel/smp.c
1420
np = of_get_cpu_node(cpu, NULL);
arch/powerpc/kernel/smp.c
1431
static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
arch/powerpc/kernel/smp.c
1445
cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
arch/powerpc/kernel/smp.c
1447
for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
arch/powerpc/kernel/smp.c
1449
set_cpus_related(i, cpu, cpu_l2_cache_mask);
arch/powerpc/kernel/smp.c
1453
if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
arch/powerpc/kernel/smp.c
1454
!cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
arch/powerpc/kernel/smp.c
1456
cpu);
arch/powerpc/kernel/smp.c
1462
l2_cache = cpu_to_l2cache(cpu);
arch/powerpc/kernel/smp.c
1465
for_each_cpu(i, cpu_sibling_mask(cpu))
arch/powerpc/kernel/smp.c
1466
set_cpus_related(cpu, i, cpu_l2_cache_mask);
arch/powerpc/kernel/smp.c
1471
cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu));
arch/powerpc/kernel/smp.c
1474
or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
arch/powerpc/kernel/smp.c
1477
cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
arch/powerpc/kernel/smp.c
1488
or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
arch/powerpc/kernel/smp.c
1502
static void remove_cpu_from_masks(int cpu)
arch/powerpc/kernel/smp.c
1507
unmap_cpu_from_node(cpu);
arch/powerpc/kernel/smp.c
1512
for_each_cpu(i, mask_fn(cpu)) {
arch/powerpc/kernel/smp.c
1513
set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
arch/powerpc/kernel/smp.c
1514
set_cpus_unrelated(cpu, i, cpu_sibling_mask);
arch/powerpc/kernel/smp.c
1516
set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
arch/powerpc/kernel/smp.c
1519
for_each_cpu(i, cpu_core_mask(cpu))
arch/powerpc/kernel/smp.c
1520
set_cpus_unrelated(cpu, i, cpu_core_mask);
arch/powerpc/kernel/smp.c
1523
for_each_cpu(i, cpu_coregroup_mask(cpu))
arch/powerpc/kernel/smp.c
1524
set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
arch/powerpc/kernel/smp.c
1529
static inline void add_cpu_to_smallcore_masks(int cpu)
arch/powerpc/kernel/smp.c
1536
cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
arch/powerpc/kernel/smp.c
1538
for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
arch/powerpc/kernel/smp.c
1540
set_cpus_related(i, cpu, cpu_smallcore_mask);
arch/powerpc/kernel/smp.c
1544
static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
arch/powerpc/kernel/smp.c
1547
int coregroup_id = cpu_to_coregroup_id(cpu);
arch/powerpc/kernel/smp.c
1555
for_each_cpu(i, submask_fn(cpu))
arch/powerpc/kernel/smp.c
1556
set_cpus_related(cpu, i, cpu_coregroup_mask);
arch/powerpc/kernel/smp.c
1561
cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu));
arch/powerpc/kernel/smp.c
1564
or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
arch/powerpc/kernel/smp.c
1567
cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
arch/powerpc/kernel/smp.c
1572
or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
arch/powerpc/kernel/smp.c
1580
static void add_cpu_to_masks(int cpu)
arch/powerpc/kernel/smp.c
1583
int first_thread = cpu_first_thread_sibling(cpu);
arch/powerpc/kernel/smp.c
1593
map_cpu_to_node(cpu, cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
1594
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
arch/powerpc/kernel/smp.c
1595
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
arch/powerpc/kernel/smp.c
1599
set_cpus_related(i, cpu, cpu_sibling_mask);
arch/powerpc/kernel/smp.c
1601
add_cpu_to_smallcore_masks(cpu);
arch/powerpc/kernel/smp.c
1604
ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
1605
update_mask_by_l2(cpu, &mask);
arch/powerpc/kernel/smp.c
1608
update_coregroup_mask(cpu, &mask);
arch/powerpc/kernel/smp.c
1611
chip_id = cpu_to_chip_id(cpu);
arch/powerpc/kernel/smp.c
1617
or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
arch/powerpc/kernel/smp.c
1620
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
arch/powerpc/kernel/smp.c
1624
cpumask_and(mask, mask, cpu_node_mask(cpu));
arch/powerpc/kernel/smp.c
1628
or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
arch/powerpc/kernel/smp.c
1642
unsigned int cpu = raw_smp_processor_id();
arch/powerpc/kernel/smp.c
1651
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
arch/powerpc/kernel/smp.c
1654
smp_store_cpu_info(cpu);
arch/powerpc/kernel/smp.c
1656
rcutree_report_cpu_starting(cpu);
arch/powerpc/kernel/smp.c
1657
cpu_callin_map[cpu] = 1;
arch/powerpc/kernel/smp.c
1660
smp_ops->setup_cpu(cpu);
arch/powerpc/kernel/smp.c
1674
set_numa_node(numa_cpu_lookup_table[cpu]);
arch/powerpc/kernel/smp.c
1675
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
arch/powerpc/kernel/smp.c
1678
add_cpu_to_masks(cpu);
arch/powerpc/kernel/smp.c
1686
struct cpumask *mask = cpu_l2_cache_mask(cpu);
arch/powerpc/kernel/smp.c
1691
if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
arch/powerpc/kernel/smp.c
1696
notify_cpu_starting(cpu);
arch/powerpc/kernel/smp.c
1697
set_cpu_online(cpu, true);
arch/powerpc/kernel/smp.c
1767
int arch_asym_cpu_priority(int cpu)
arch/powerpc/kernel/smp.c
1770
return -cpu / threads_per_core;
arch/powerpc/kernel/smp.c
1772
return -cpu;
arch/powerpc/kernel/smp.c
1778
int cpu = smp_processor_id();
arch/powerpc/kernel/smp.c
1791
remove_cpu_from_masks(cpu);
arch/powerpc/kernel/smp.c
1796
void __cpu_die(unsigned int cpu)
arch/powerpc/kernel/smp.c
1802
VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm)));
arch/powerpc/kernel/smp.c
1804
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
arch/powerpc/kernel/smp.c
1807
smp_ops->cpu_die(cpu);
arch/powerpc/kernel/smp.c
278
void smp_muxed_ipi_set_message(int cpu, int msg)
arch/powerpc/kernel/smp.c
280
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
arch/powerpc/kernel/smp.c
290
void smp_muxed_ipi_message_pass(int cpu, int msg)
arch/powerpc/kernel/smp.c
292
smp_muxed_ipi_set_message(cpu, msg);
arch/powerpc/kernel/smp.c
298
smp_ops->cause_ipi(cpu);
arch/powerpc/kernel/smp.c
352
static inline void do_message_pass(int cpu, int msg)
arch/powerpc/kernel/smp.c
355
smp_ops->message_pass(cpu, msg);
arch/powerpc/kernel/smp.c
358
smp_muxed_ipi_message_pass(cpu, msg);
arch/powerpc/kernel/smp.c
362
void arch_smp_send_reschedule(int cpu)
arch/powerpc/kernel/smp.c
365
do_message_pass(cpu, PPC_MSG_RESCHEDULE);
arch/powerpc/kernel/smp.c
369
void arch_send_call_function_single_ipi(int cpu)
arch/powerpc/kernel/smp.c
371
do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
arch/powerpc/kernel/smp.c
376
unsigned int cpu;
arch/powerpc/kernel/smp.c
378
for_each_cpu(cpu, mask)
arch/powerpc/kernel/smp.c
379
do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
arch/powerpc/kernel/smp.c
470
static void do_smp_send_nmi_ipi(int cpu, bool safe)
arch/powerpc/kernel/smp.c
472
if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
arch/powerpc/kernel/smp.c
475
if (cpu >= 0) {
arch/powerpc/kernel/smp.c
476
do_message_pass(cpu, PPC_MSG_NMI_IPI);
arch/powerpc/kernel/smp.c
494
static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
arch/powerpc/kernel/smp.c
501
BUG_ON(cpu == me);
arch/powerpc/kernel/smp.c
502
BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
arch/powerpc/kernel/smp.c
518
if (cpu < 0) {
arch/powerpc/kernel/smp.c
523
cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
arch/powerpc/kernel/smp.c
530
do_smp_send_nmi_ipi(cpu, safe);
arch/powerpc/kernel/smp.c
559
int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
arch/powerpc/kernel/smp.c
561
return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
arch/powerpc/kernel/smp.c
564
int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
arch/powerpc/kernel/smp.c
566
return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
arch/powerpc/kernel/smp.c
573
unsigned int cpu;
arch/powerpc/kernel/smp.c
575
for_each_cpu(cpu, mask)
arch/powerpc/kernel/smp.c
576
do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
arch/powerpc/kernel/smp.c
595
int cpu;
arch/powerpc/kernel/smp.c
599
for_each_present_cpu(cpu) {
arch/powerpc/kernel/smp.c
600
if (cpu_online(cpu))
arch/powerpc/kernel/smp.c
611
do_smp_send_nmi_ipi(cpu, false);
arch/powerpc/kernel/smp.c
869
static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
arch/powerpc/kernel/smp.c
871
int hw_cpu_id = get_hard_smp_processor_id(cpu);
arch/powerpc/kernel/smp.c
888
static struct thread_groups *__init get_thread_groups(int cpu,
arch/powerpc/kernel/smp.c
892
struct device_node *dn = of_get_cpu_node(cpu, NULL);
arch/powerpc/kernel/smp.c
893
struct thread_groups_list *cpu_tgl = &tgl[cpu];
arch/powerpc/kernel/smp.c
924
int cpu, int cpu_group_start)
arch/powerpc/kernel/smp.c
926
int first_thread = cpu_first_thread_sibling(cpu);
arch/powerpc/kernel/smp.c
929
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
arch/powerpc/kernel/smp.c
946
static int __init init_thread_group_cache_map(int cpu, int cache_property)
arch/powerpc/kernel/smp.c
957
tg = get_thread_groups(cpu, cache_property, &err);
arch/powerpc/kernel/smp.c
962
cpu_group_start = get_cpu_thread_group_start(cpu, tg);
arch/powerpc/kernel/smp.c
970
mask = &per_cpu(thread_group_l1_cache_map, cpu);
arch/powerpc/kernel/smp.c
971
update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
arch/powerpc/kernel/smp.c
974
mask = &per_cpu(thread_group_l2_cache_map, cpu);
arch/powerpc/kernel/smp.c
975
update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
arch/powerpc/kernel/smp.c
976
mask = &per_cpu(thread_group_l3_cache_map, cpu);
arch/powerpc/kernel/smp.c
977
update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
arch/powerpc/kernel/stacktrace.c
162
unsigned int cpu;
arch/powerpc/kernel/stacktrace.c
165
for_each_cpu(cpu, mask) {
arch/powerpc/kernel/stacktrace.c
166
if (cpu == smp_processor_id()) {
arch/powerpc/kernel/stacktrace.c
173
if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
arch/powerpc/kernel/stacktrace.c
175
while (cpumask_test_cpu(cpu, mask) && delay_us) {
arch/powerpc/kernel/stacktrace.c
185
p = paca_ptrs[cpu];
arch/powerpc/kernel/stacktrace.c
187
cpumask_clear_cpu(cpu, mask);
arch/powerpc/kernel/stacktrace.c
189
pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
arch/powerpc/kernel/sysfs.c
100
struct cpu *cpu = container_of(dev, struct cpu, dev); \
arch/powerpc/kernel/sysfs.c
102
smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
arch/powerpc/kernel/sysfs.c
1026
cacheinfo_cpu_offline(cpu);
arch/powerpc/kernel/sysfs.c
1057
int cpu;
arch/powerpc/kernel/sysfs.c
1061
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/sysfs.c
1062
device_create_file(get_cpu_device(cpu), attr);
arch/powerpc/kernel/sysfs.c
1072
int cpu;
arch/powerpc/kernel/sysfs.c
1078
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/sysfs.c
1079
dev = get_cpu_device(cpu);
arch/powerpc/kernel/sysfs.c
109
struct cpu *cpu = container_of(dev, struct cpu, dev); \
arch/powerpc/kernel/sysfs.c
1092
int cpu;
arch/powerpc/kernel/sysfs.c
1096
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/sysfs.c
1097
device_remove_file(get_cpu_device(cpu), attr);
arch/powerpc/kernel/sysfs.c
1106
int cpu;
arch/powerpc/kernel/sysfs.c
1111
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/sysfs.c
1112
dev = get_cpu_device(cpu);
arch/powerpc/kernel/sysfs.c
114
smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
arch/powerpc/kernel/sysfs.c
1144
struct cpu *cpu = container_of(dev, struct cpu, dev);
arch/powerpc/kernel/sysfs.c
1146
return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
arch/powerpc/kernel/sysfs.c
1152
int cpu, r;
arch/powerpc/kernel/sysfs.c
1154
for_each_possible_cpu(cpu) {
arch/powerpc/kernel/sysfs.c
1155
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/powerpc/kernel/sysfs.c
1169
if (cpu_online(cpu) || c->hotpluggable) {
arch/powerpc/kernel/sysfs.c
1170
register_cpu(c, cpu);
arch/powerpc/kernel/sysfs.c
222
int cpu;
arch/powerpc/kernel/sysfs.c
225
for_each_possible_cpu(cpu)
arch/powerpc/kernel/sysfs.c
226
paca_ptrs[cpu]->dscr_default = dscr_default;
arch/powerpc/kernel/sysfs.c
269
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
271
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
arch/powerpc/kernel/sysfs.c
298
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
306
smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
arch/powerpc/kernel/sysfs.c
318
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
321
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
arch/powerpc/kernel/sysfs.c
34
static DEFINE_PER_CPU(struct cpu, cpu_devices);
arch/powerpc/kernel/sysfs.c
367
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
381
smp_call_function_single(cpu, set_pw20_wait_entry_bit,
arch/powerpc/kernel/sysfs.c
391
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
393
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
arch/powerpc/kernel/sysfs.c
420
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
428
smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
arch/powerpc/kernel/sysfs.c
440
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
443
smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
arch/powerpc/kernel/sysfs.c
489
unsigned int cpu = dev->id;
arch/powerpc/kernel/sysfs.c
503
smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
arch/powerpc/kernel/sysfs.c
779
struct cpu *cpu = container_of(dev, struct cpu, dev);
arch/powerpc/kernel/sysfs.c
782
smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1);
arch/powerpc/kernel/sysfs.c
809
struct cpu *cpu = container_of(dev, struct cpu, dev);
arch/powerpc/kernel/sysfs.c
812
smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1);
arch/powerpc/kernel/sysfs.c
836
static int register_cpu_online(unsigned int cpu)
arch/powerpc/kernel/sysfs.c
838
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/powerpc/kernel/sysfs.c
845
s->of_node = of_get_cpu_node(cpu, NULL);
arch/powerpc/kernel/sysfs.c
930
cacheinfo_cpu_online(cpu);
arch/powerpc/kernel/sysfs.c
935
static int unregister_cpu_online(unsigned int cpu)
arch/powerpc/kernel/sysfs.c
937
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/powerpc/kernel/sysfs.c
942
if (WARN_RATELIMIT(!c->hotpluggable, "cpu %d can't be offlined\n", cpu))
arch/powerpc/kernel/tau_6xx.c
105
int cpu = smp_processor_id();
arch/powerpc/kernel/tau_6xx.c
107
tau[cpu].interrupts++;
arch/powerpc/kernel/tau_6xx.c
109
TAUupdate(cpu);
arch/powerpc/kernel/tau_6xx.c
115
int cpu;
arch/powerpc/kernel/tau_6xx.c
119
cpu = smp_processor_id();
arch/powerpc/kernel/tau_6xx.c
122
TAUupdate(cpu);
arch/powerpc/kernel/tau_6xx.c
127
size = tau[cpu].high - tau[cpu].low;
arch/powerpc/kernel/tau_6xx.c
128
if (size > min_window && ! tau[cpu].grew) {
arch/powerpc/kernel/tau_6xx.c
132
tau[cpu].low += shrink;
arch/powerpc/kernel/tau_6xx.c
133
tau[cpu].high -= shrink;
arch/powerpc/kernel/tau_6xx.c
135
tau[cpu].low += 1;
arch/powerpc/kernel/tau_6xx.c
137
if ((tau[cpu].high - tau[cpu].low) != min_window){
arch/powerpc/kernel/tau_6xx.c
144
tau[cpu].grew = 0;
arch/powerpc/kernel/tau_6xx.c
146
set_thresholds(cpu);
arch/powerpc/kernel/tau_6xx.c
179
unsigned long cpu = smp_processor_id();
arch/powerpc/kernel/tau_6xx.c
183
tau[cpu].low = 5;
arch/powerpc/kernel/tau_6xx.c
184
tau[cpu].high = 120;
arch/powerpc/kernel/tau_6xx.c
186
set_thresholds(cpu);
arch/powerpc/kernel/tau_6xx.c
224
u32 cpu_temp_both(unsigned long cpu)
arch/powerpc/kernel/tau_6xx.c
226
return ((tau[cpu].high << 16) | tau[cpu].low);
arch/powerpc/kernel/tau_6xx.c
229
u32 cpu_temp(unsigned long cpu)
arch/powerpc/kernel/tau_6xx.c
231
return ((tau[cpu].high + tau[cpu].low) / 2);
arch/powerpc/kernel/tau_6xx.c
234
u32 tau_interrupts(unsigned long cpu)
arch/powerpc/kernel/tau_6xx.c
236
return (tau[cpu].interrupts);
arch/powerpc/kernel/tau_6xx.c
55
static void set_thresholds(unsigned long cpu)
arch/powerpc/kernel/tau_6xx.c
60
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
arch/powerpc/kernel/tau_6xx.c
63
mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
arch/powerpc/kernel/tau_6xx.c
66
static void TAUupdate(int cpu)
arch/powerpc/kernel/tau_6xx.c
77
if (tau[cpu].low >= step_size) {
arch/powerpc/kernel/tau_6xx.c
78
tau[cpu].low -= step_size;
arch/powerpc/kernel/tau_6xx.c
79
tau[cpu].high -= (step_size - window_expand);
arch/powerpc/kernel/tau_6xx.c
81
tau[cpu].grew = 1;
arch/powerpc/kernel/tau_6xx.c
88
if (tau[cpu].high <= 127 - step_size) {
arch/powerpc/kernel/tau_6xx.c
89
tau[cpu].low += (step_size - window_expand);
arch/powerpc/kernel/tau_6xx.c
90
tau[cpu].high += step_size;
arch/powerpc/kernel/tau_6xx.c
92
tau[cpu].grew = 1;
arch/powerpc/kernel/time.c
682
struct device_node *cpu;
arch/powerpc/kernel/time.c
687
cpu = of_find_node_by_type(NULL, "cpu");
arch/powerpc/kernel/time.c
689
if (cpu) {
arch/powerpc/kernel/time.c
690
fp = of_get_property(cpu, name, NULL);
arch/powerpc/kernel/time.c
696
of_node_put(cpu);
arch/powerpc/kernel/time.c
830
static void register_decrementer_clockevent(int cpu)
arch/powerpc/kernel/time.c
832
struct clock_event_device *dec = &per_cpu(decrementers, cpu);
arch/powerpc/kernel/time.c
835
dec->cpumask = cpumask_of(cpu);
arch/powerpc/kernel/time.c
840
dec->name, dec->mult, dec->shift, cpu);
arch/powerpc/kernel/time.c
865
struct device_node *cpu;
arch/powerpc/kernel/time.c
872
cpu = of_find_node_by_type(NULL, "cpu");
arch/powerpc/kernel/time.c
874
if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
arch/powerpc/kernel/time.c
884
of_node_put(cpu);
arch/powerpc/kernel/traps.c
183
int cpu;
arch/powerpc/kernel/traps.c
190
cpu = smp_processor_id();
arch/powerpc/kernel/traps.c
192
if (cpu == die_owner)
arch/powerpc/kernel/traps.c
198
die_owner = cpu;
arch/powerpc/kernel/vdso.c
218
unsigned long cpu, node, val;
arch/powerpc/kernel/vdso.c
224
cpu = get_cpu();
arch/powerpc/kernel/vdso.c
225
WARN_ON_ONCE(cpu > 0xffff);
arch/powerpc/kernel/vdso.c
227
node = cpu_to_node(cpu);
arch/powerpc/kernel/vdso.c
230
val = (cpu & 0xffff) | ((node & 0xffff) << 16);
arch/powerpc/kernel/watchdog.c
149
int cpu = raw_smp_processor_id();
arch/powerpc/kernel/watchdog.c
152
pr_emerg("CPU %d Hard LOCKUP\n", cpu);
arch/powerpc/kernel/watchdog.c
154
cpu, tb, per_cpu(wd_timer_tb, cpu),
arch/powerpc/kernel/watchdog.c
155
tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
arch/powerpc/kernel/watchdog.c
183
static bool set_cpu_stuck(int cpu)
arch/powerpc/kernel/watchdog.c
185
cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
arch/powerpc/kernel/watchdog.c
186
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
arch/powerpc/kernel/watchdog.c
201
static void watchdog_smp_panic(int cpu)
arch/powerpc/kernel/watchdog.c
214
if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
arch/powerpc/kernel/watchdog.c
221
if (c == cpu)
arch/powerpc/kernel/watchdog.c
235
cpu, cpumask_pr_args(&wd_smp_cpus_ipi));
arch/powerpc/kernel/watchdog.c
237
cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000);
arch/powerpc/kernel/watchdog.c
241
trigger_allbutcpu_cpu_backtrace(cpu);
arch/powerpc/kernel/watchdog.c
266
static void wd_smp_clear_cpu_pending(int cpu)
arch/powerpc/kernel/watchdog.c
268
if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
arch/powerpc/kernel/watchdog.c
269
if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
arch/powerpc/kernel/watchdog.c
274
cpu, get_tb());
arch/powerpc/kernel/watchdog.c
282
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
arch/powerpc/kernel/watchdog.c
318
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
arch/powerpc/kernel/watchdog.c
349
static void watchdog_timer_interrupt(int cpu)
arch/powerpc/kernel/watchdog.c
353
per_cpu(wd_timer_tb, cpu) = tb;
arch/powerpc/kernel/watchdog.c
355
wd_smp_clear_cpu_pending(cpu);
arch/powerpc/kernel/watchdog.c
358
watchdog_smp_panic(cpu);
arch/powerpc/kernel/watchdog.c
375
int cpu = raw_smp_processor_id();
arch/powerpc/kernel/watchdog.c
381
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
arch/powerpc/kernel/watchdog.c
387
if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
arch/powerpc/kernel/watchdog.c
395
if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
arch/powerpc/kernel/watchdog.c
406
set_cpu_stuck(cpu);
arch/powerpc/kernel/watchdog.c
411
cpu, (void *)regs->nip);
arch/powerpc/kernel/watchdog.c
413
cpu, tb, per_cpu(wd_timer_tb, cpu),
arch/powerpc/kernel/watchdog.c
414
tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
arch/powerpc/kernel/watchdog.c
423
trigger_allbutcpu_cpu_backtrace(cpu);
arch/powerpc/kernel/watchdog.c
444
int cpu = smp_processor_id();
arch/powerpc/kernel/watchdog.c
449
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
arch/powerpc/kernel/watchdog.c
452
watchdog_timer_interrupt(cpu);
arch/powerpc/kernel/watchdog.c
462
int cpu = smp_processor_id();
arch/powerpc/kernel/watchdog.c
465
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
arch/powerpc/kernel/watchdog.c
469
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
arch/powerpc/kernel/watchdog.c
470
per_cpu(wd_timer_tb, cpu) = tb;
arch/powerpc/kernel/watchdog.c
471
wd_smp_clear_cpu_pending(cpu);
arch/powerpc/kernel/watchdog.c
479
int cpu = smp_processor_id();
arch/powerpc/kernel/watchdog.c
482
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
arch/powerpc/kernel/watchdog.c
490
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
arch/powerpc/kernel/watchdog.c
494
cpumask_set_cpu(cpu, &wd_cpus_enabled);
arch/powerpc/kernel/watchdog.c
496
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
arch/powerpc/kernel/watchdog.c
508
static int start_watchdog_on_cpu(unsigned int cpu)
arch/powerpc/kernel/watchdog.c
510
return smp_call_function_single(cpu, start_watchdog, NULL, true);
arch/powerpc/kernel/watchdog.c
516
int cpu = smp_processor_id();
arch/powerpc/kernel/watchdog.c
519
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
arch/powerpc/kernel/watchdog.c
525
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
arch/powerpc/kernel/watchdog.c
528
wd_smp_clear_cpu_pending(cpu);
arch/powerpc/kernel/watchdog.c
531
static int stop_watchdog_on_cpu(unsigned int cpu)
arch/powerpc/kernel/watchdog.c
533
return smp_call_function_single(cpu, stop_watchdog, NULL, true);
arch/powerpc/kernel/watchdog.c
555
int cpu;
arch/powerpc/kernel/watchdog.c
557
for_each_cpu(cpu, &wd_cpus_enabled)
arch/powerpc/kernel/watchdog.c
558
stop_watchdog_on_cpu(cpu);
arch/powerpc/kernel/watchdog.c
563
int cpu;
arch/powerpc/kernel/watchdog.c
566
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
arch/powerpc/kernel/watchdog.c
567
start_watchdog_on_cpu(cpu);
arch/powerpc/kexec/core_64.c
234
int cpu = 0;
arch/powerpc/kexec/core_64.c
238
for_each_present_cpu(cpu) {
arch/powerpc/kexec/core_64.c
239
if (!cpu_online(cpu)) {
arch/powerpc/kexec/core_64.c
241
cpu);
arch/powerpc/kexec/core_64.c
242
WARN_ON(add_cpu(cpu));
arch/powerpc/kexec/crash.c
246
noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu)
arch/powerpc/kexec/crash.c
253
if (i == cpu)
arch/powerpc/kexec/crash.c
267
static inline void crash_kexec_wait_realmode(int cpu) {}
arch/powerpc/kexec/crash.c
85
int cpu = smp_processor_id();
arch/powerpc/kexec/crash.c
88
if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
arch/powerpc/kexec/crash.c
89
crash_save_cpu(regs, cpu);
arch/powerpc/kexec/crash.c
90
cpumask_set_cpu(cpu, &cpus_state_saved);
arch/powerpc/kvm/book3s.c
791
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/book3s.c
793
vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
arch/powerpc/kvm/book3s_hv.c
1504
int thr, cpu, pcpu, nthreads;
arch/powerpc/kvm/book3s_hv.c
1510
cpu = vcpu->vcpu_id & ~(nthreads - 1);
arch/powerpc/kvm/book3s_hv.c
1511
for (thr = 0; thr < nthreads; ++thr, ++cpu) {
arch/powerpc/kvm/book3s_hv.c
1512
v = kvmppc_find_vcpu(vcpu->kvm, cpu);
arch/powerpc/kvm/book3s_hv.c
1520
pcpu = READ_ONCE(v->cpu);
arch/powerpc/kvm/book3s_hv.c
183
static bool kvmppc_ipi_thread(int cpu)
arch/powerpc/kvm/book3s_hv.c
193
msg |= get_hard_smp_processor_id(cpu);
arch/powerpc/kvm/book3s_hv.c
202
if (cpu_first_thread_sibling(cpu) ==
arch/powerpc/kvm/book3s_hv.c
204
msg |= cpu_thread_in_core(cpu);
arch/powerpc/kvm/book3s_hv.c
214
if (cpu >= 0 && cpu < nr_cpu_ids) {
arch/powerpc/kvm/book3s_hv.c
215
if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
arch/powerpc/kvm/book3s_hv.c
216
xics_wake_cpu(cpu);
arch/powerpc/kvm/book3s_hv.c
219
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
arch/powerpc/kvm/book3s_hv.c
229
int cpu;
arch/powerpc/kvm/book3s_hv.c
243
cpu = READ_ONCE(vcpu->arch.thread_cpu);
arch/powerpc/kvm/book3s_hv.c
244
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
arch/powerpc/kvm/book3s_hv.c
248
cpu = vcpu->cpu;
arch/powerpc/kvm/book3s_hv.c
249
if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
arch/powerpc/kvm/book3s_hv.c
250
smp_send_reschedule(cpu);
arch/powerpc/kvm/book3s_hv.c
318
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/book3s_hv.c
3193
static int kvmppc_grab_hwthread(int cpu)
arch/powerpc/kvm/book3s_hv.c
3198
tpaca = paca_ptrs[cpu];
arch/powerpc/kvm/book3s_hv.c
3219
pr_err("KVM: couldn't grab cpu %d\n", cpu);
arch/powerpc/kvm/book3s_hv.c
3227
static void kvmppc_release_hwthread(int cpu)
arch/powerpc/kvm/book3s_hv.c
3231
tpaca = paca_ptrs[cpu];
arch/powerpc/kvm/book3s_hv.c
3240
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
arch/powerpc/kvm/book3s_hv.c
3251
cpu = cpu_first_tlb_thread_sibling(cpu);
arch/powerpc/kvm/book3s_hv.c
3252
for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
arch/powerpc/kvm/book3s_hv.c
3265
for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
arch/powerpc/kvm/book3s_hv.c
3336
int cpu;
arch/powerpc/kvm/book3s_hv.c
3339
cpu = vc->pcpu;
arch/powerpc/kvm/book3s_hv.c
3345
cpu += vcpu->arch.ptid;
arch/powerpc/kvm/book3s_hv.c
3346
vcpu->cpu = vc->pcpu;
arch/powerpc/kvm/book3s_hv.c
3347
vcpu->arch.thread_cpu = cpu;
arch/powerpc/kvm/book3s_hv.c
3349
tpaca = paca_ptrs[cpu];
arch/powerpc/kvm/book3s_hv.c
3351
tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
arch/powerpc/kvm/book3s_hv.c
3356
if (cpu != smp_processor_id())
arch/powerpc/kvm/book3s_hv.c
3357
kvmppc_ipi_thread(cpu);
arch/powerpc/kvm/book3s_hv.c
3362
int cpu = smp_processor_id();
arch/powerpc/kvm/book3s_hv.c
3375
if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
arch/powerpc/kvm/book3s_hv.c
3385
if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
arch/powerpc/kvm/book3s_hv.c
3386
pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
arch/powerpc/kvm/book3s_hv.c
3396
int cpu = smp_processor_id();
arch/powerpc/kvm/book3s_hv.c
3400
if (cpu_thread_in_subcore(cpu))
arch/powerpc/kvm/book3s_hv.c
3405
if (cpu_online(cpu + thr))
arch/powerpc/kvm/book3s_hv.c
3410
if (kvmppc_grab_hwthread(cpu + thr)) {
arch/powerpc/kvm/book3s_hv.c
3413
kvmppc_release_hwthread(cpu + thr);
arch/powerpc/kvm/book3s_hv.c
3435
int cpu;
arch/powerpc/kvm/book3s_hv.c
3437
for_each_possible_cpu(cpu) {
arch/powerpc/kvm/book3s_hv.c
3438
struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
arch/powerpc/kvm/book3s_hv.c
3718
static inline int kvmppc_clear_host_core(unsigned int cpu)
arch/powerpc/kvm/book3s_hv.c
3722
if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
arch/powerpc/kvm/book3s_hv.c
3729
core = cpu >> threads_shift;
arch/powerpc/kvm/book3s_hv.c
3739
static inline int kvmppc_set_host_core(unsigned int cpu)
arch/powerpc/kvm/book3s_hv.c
3743
if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
arch/powerpc/kvm/book3s_hv.c
3750
core = cpu >> threads_shift;
arch/powerpc/kvm/book3s_hv.c
4123
void kvmhv_set_l2_counters_status(int cpu, bool status)
arch/powerpc/kvm/book3s_hv.c
4128
lppaca_of(cpu).l2_counters_enable = 1;
arch/powerpc/kvm/book3s_hv.c
4130
lppaca_of(cpu).l2_counters_enable = 0;
arch/powerpc/kvm/book3s_hv.c
4136
int cpu;
arch/powerpc/kvm/book3s_hv.c
4138
for_each_present_cpu(cpu) {
arch/powerpc/kvm/book3s_hv.c
4139
kvmhv_set_l2_counters_status(cpu, true);
arch/powerpc/kvm/book3s_hv.c
4146
int cpu;
arch/powerpc/kvm/book3s_hv.c
4148
for_each_present_cpu(cpu) {
arch/powerpc/kvm/book3s_hv.c
4149
kvmhv_set_l2_counters_status(cpu, false);
arch/powerpc/kvm/book3s_hv.c
4923
vcpu->cpu = pcpu;
arch/powerpc/kvm/book3s_hv.c
5000
vcpu->cpu = -1;
arch/powerpc/kvm/book3s_hv.c
5077
vcpu->cpu = -1;
arch/powerpc/kvm/book3s_hv.c
5630
int cpu, core;
arch/powerpc/kvm/book3s_hv.c
5654
for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
arch/powerpc/kvm/book3s_hv.c
5655
if (!cpu_online(cpu))
arch/powerpc/kvm/book3s_hv.c
5658
core = cpu >> threads_shift;
arch/powerpc/kvm/book3s_hv.c
6580
int cpu = first_cpu + j;
arch/powerpc/kvm/book3s_hv.c
6582
paca_ptrs[cpu]->sibling_subcore_state =
arch/powerpc/kvm/book3s_hv_builtin.c
202
void kvmhv_rm_send_ipi(int cpu)
arch/powerpc/kvm/book3s_hv_builtin.c
209
msg |= get_hard_smp_processor_id(cpu);
arch/powerpc/kvm/book3s_hv_builtin.c
216
cpu_first_thread_sibling(cpu) ==
arch/powerpc/kvm/book3s_hv_builtin.c
218
msg |= cpu_thread_in_core(cpu);
arch/powerpc/kvm/book3s_hv_builtin.c
228
xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
arch/powerpc/kvm/book3s_hv_builtin.c
232
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
arch/powerpc/kvm/book3s_hv_builtin.c
241
int cpu = vc->pcpu;
arch/powerpc/kvm/book3s_hv_builtin.c
245
for (; active; active >>= 1, ++cpu)
arch/powerpc/kvm/book3s_hv_builtin.c
247
kvmhv_rm_send_ipi(cpu);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
35
int cpu;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
57
cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
58
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
arch/powerpc/kvm/book3s_hv_rm_xics.c
131
int cpu;
arch/powerpc/kvm/book3s_hv_rm_xics.c
149
cpu = vcpu->arch.thread_cpu;
arch/powerpc/kvm/book3s_hv_rm_xics.c
150
if (cpu < 0 || cpu >= nr_cpu_ids) {
arch/powerpc/kvm/book3s_hv_rm_xics.c
164
kvmhv_rm_send_ipi(cpu);
arch/powerpc/kvm/book3s_hv_rm_xics.c
808
int cpu = smp_processor_id();
arch/powerpc/kvm/book3s_hv_rm_xics.c
810
raddr = per_cpu_ptr(addr, cpu);
arch/powerpc/kvm/book3s_hv_rm_xics.c
910
unsigned int cpu = smp_processor_id();
arch/powerpc/kvm/book3s_hv_rm_xics.c
913
core = cpu >> threads_shift;
arch/powerpc/kvm/book3s_pr.c
132
static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/book3s_pr.c
150
vcpu->cpu = smp_processor_id();
arch/powerpc/kvm/book3s_pr.c
188
vcpu->cpu = -1;
arch/powerpc/kvm/book3s_xive.c
1865
static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
arch/powerpc/kvm/book3s_xive.c
1870
return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
arch/powerpc/kvm/book3s_xive.c
1873
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
arch/powerpc/kvm/book3s_xive.c
1877
if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
arch/powerpc/kvm/book3s_xive.c
1890
vp_id = kvmppc_xive_vp(xive, cpu);
arch/powerpc/kvm/book3s_xive.c
1902
struct kvm_vcpu *vcpu, u32 cpu)
arch/powerpc/kvm/book3s_xive.c
1909
pr_devel("connect_vcpu(cpu=%d)\n", cpu);
arch/powerpc/kvm/book3s_xive.c
1923
r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
arch/powerpc/kvm/book3s_xive.c
1936
xc->server_num = cpu;
arch/powerpc/kvm/book3s_xive.c
1946
pr_err("inconsistent save-restore setup for VCPU %d\n", cpu);
arch/powerpc/kvm/book3s_xive.c
2005
i, cpu);
arch/powerpc/kvm/book3s_xive.h
303
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
arch/powerpc/kvm/booke.c
2109
void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/booke.c
2111
vcpu->cpu = smp_processor_id();
arch/powerpc/kvm/booke.c
2118
vcpu->cpu = -1;
arch/powerpc/kvm/booke.c
2175
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/booke.c
2177
vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
arch/powerpc/kvm/booke.h
85
void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
arch/powerpc/kvm/e500.c
299
static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/e500.c
301
kvmppc_booke_vcpu_load(vcpu, cpu);
arch/powerpc/kvm/e500mc.c
117
static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/e500mc.c
121
kvmppc_booke_vcpu_load(vcpu, cpu);
arch/powerpc/kvm/mpic.c
1127
int cpu)
arch/powerpc/kvm/mpic.c
1163
src->destmask &= ~(1 << cpu);
arch/powerpc/kvm/mpic.c
1179
int cpu = vcpu->arch.irq_cpu_id;
arch/powerpc/kvm/mpic.c
1185
kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
arch/powerpc/kvm/mpic.c
1733
u32 cpu)
arch/powerpc/kvm/mpic.c
1742
if (cpu < 0 || cpu >= MAX_CPU)
arch/powerpc/kvm/mpic.c
1747
if (opp->dst[cpu].vcpu) {
arch/powerpc/kvm/mpic.c
1756
opp->dst[cpu].vcpu = vcpu;
arch/powerpc/kvm/mpic.c
1757
opp->nb_cpus = max(opp->nb_cpus, cpu + 1);
arch/powerpc/kvm/mpic.c
1760
vcpu->arch.irq_cpu_id = cpu;
arch/powerpc/kvm/powerpc.c
817
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/powerpc/kvm/powerpc.c
829
kvmppc_core_vcpu_load(vcpu, cpu);
arch/powerpc/lib/code-patching.c
105
static int text_area_cpu_up(unsigned int cpu)
arch/powerpc/lib/code-patching.c
114
cpu);
arch/powerpc/lib/code-patching.c
133
static int text_area_cpu_down(unsigned int cpu)
arch/powerpc/lib/code-patching.c
151
static int text_area_cpu_up_mm(unsigned int cpu)
arch/powerpc/lib/code-patching.c
193
static int text_area_cpu_down_mm(unsigned int cpu)
arch/powerpc/lib/qspinlock.c
101
static inline u32 encode_tail_cpu(int cpu)
arch/powerpc/lib/qspinlock.c
103
return (cpu + 1) << _Q_TAIL_CPU_OFFSET;
arch/powerpc/lib/qspinlock.c
19
int cpu;
arch/powerpc/lib/qspinlock.c
450
int cpu = get_owner_cpu(val);
arch/powerpc/lib/qspinlock.c
451
if (numa_node_id() != cpu_to_node(cpu))
arch/powerpc/lib/qspinlock.c
558
node->cpu = smp_processor_id();
arch/powerpc/lib/qspinlock.c
562
tail = encode_tail_cpu(node->cpu);
arch/powerpc/lib/qspinlock.c
688
int next_cpu = next->cpu;
arch/powerpc/mm/book3s64/hash_utils.c
2329
int cpu = raw_smp_processor_id();
arch/powerpc/mm/book3s64/hash_utils.c
2334
last_group = stress_hpt_struct[cpu].last_group[g];
arch/powerpc/mm/book3s64/hash_utils.c
2342
stress_hpt_struct[cpu].last_group[g] = -1;
arch/powerpc/mm/book3s64/hash_utils.c
2350
int cpu = raw_smp_processor_id();
arch/powerpc/mm/book3s64/hash_utils.c
2352
last_group = stress_hpt_struct[cpu].last_group[stress_nr_groups() - 1];
arch/powerpc/mm/book3s64/hash_utils.c
2367
stress_hpt_struct[cpu].last_group[stress_nr_groups() - 1] = -1;
arch/powerpc/mm/book3s64/hash_utils.c
2383
memmove(&stress_hpt_struct[cpu].last_group[1],
arch/powerpc/mm/book3s64/hash_utils.c
2384
&stress_hpt_struct[cpu].last_group[0],
arch/powerpc/mm/book3s64/hash_utils.c
2386
stress_hpt_struct[cpu].last_group[0] = hpte_group;
arch/powerpc/mm/book3s64/mmu_context.c
340
int cpu = smp_processor_id();
arch/powerpc/mm/book3s64/mmu_context.c
342
clear_tasks_mm_cpumask(cpu);
arch/powerpc/mm/book3s64/radix_tlb.c
666
int cpu = smp_processor_id();
arch/powerpc/mm/book3s64/radix_tlb.c
703
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
arch/powerpc/mm/book3s64/radix_tlb.c
705
cpumask_clear_cpu(cpu, mm_cpumask(mm));
arch/powerpc/mm/book3s64/radix_tlb.c
768
int cpu = smp_processor_id();
arch/powerpc/mm/book3s64/radix_tlb.c
772
if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) {
arch/powerpc/mm/book3s64/radix_tlb.c
825
if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
arch/powerpc/mm/mmu_context.c
106
VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(prev)));
arch/powerpc/mm/mmu_context.c
46
int cpu = smp_processor_id();
arch/powerpc/mm/mmu_context.c
50
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
arch/powerpc/mm/mmu_context.c
52
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/powerpc/mm/mmu_decl.h
113
void mmu_init_secondary(int cpu);
arch/powerpc/mm/nohash/44x.c
220
void __init mmu_init_secondary(int cpu)
arch/powerpc/mm/nohash/mmu_context.c
100
unsigned int cpu, max, i;
arch/powerpc/mm/nohash/mmu_context.c
127
for_each_cpu(cpu, mm_cpumask(mm)) {
arch/powerpc/mm/nohash/mmu_context.c
128
for (i = cpu_first_thread_sibling(cpu);
arch/powerpc/mm/nohash/mmu_context.c
129
i <= cpu_last_thread_sibling(cpu); i++) {
arch/powerpc/mm/nohash/mmu_context.c
133
cpu = i - 1;
arch/powerpc/mm/nohash/mmu_context.c
152
int cpu = smp_processor_id();
arch/powerpc/mm/nohash/mmu_context.c
166
__clear_bit(id, stale_map[cpu]);
arch/powerpc/mm/nohash/mmu_context.c
185
int cpu = smp_processor_id();
arch/powerpc/mm/nohash/mmu_context.c
198
__clear_bit(id, stale_map[cpu]);
arch/powerpc/mm/nohash/mmu_context.c
223
unsigned int i, cpu = smp_processor_id();
arch/powerpc/mm/nohash/mmu_context.c
283
if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
arch/powerpc/mm/nohash/mmu_context.c
287
for (i = cpu_first_thread_sibling(cpu);
arch/powerpc/mm/nohash/mmu_context.c
288
i <= cpu_last_thread_sibling(cpu); i++) {
arch/powerpc/mm/nohash/mmu_context.c
339
static int mmu_ctx_cpu_prepare(unsigned int cpu)
arch/powerpc/mm/nohash/mmu_context.c
344
if (cpu == boot_cpuid)
arch/powerpc/mm/nohash/mmu_context.c
347
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
arch/powerpc/mm/nohash/mmu_context.c
351
static int mmu_ctx_cpu_dead(unsigned int cpu)
arch/powerpc/mm/nohash/mmu_context.c
354
if (cpu == boot_cpuid)
arch/powerpc/mm/nohash/mmu_context.c
357
kfree(stale_map[cpu]);
arch/powerpc/mm/nohash/mmu_context.c
358
stale_map[cpu] = NULL;
arch/powerpc/mm/nohash/mmu_context.c
361
clear_tasks_mm_cpumask(cpu);
arch/powerpc/mm/numa.c
1060
unsigned int cpu, count;
arch/powerpc/mm/numa.c
1073
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
arch/powerpc/mm/numa.c
1074
if (cpumask_test_cpu(cpu,
arch/powerpc/mm/numa.c
1077
pr_cont(" %u", cpu);
arch/powerpc/mm/numa.c
1081
pr_cont("-%u", cpu - 1);
arch/powerpc/mm/numa.c
1158
int cpu;
arch/powerpc/mm/numa.c
1190
for_each_possible_cpu(cpu) {
arch/powerpc/mm/numa.c
1199
numa_setup_cpu(cpu);
arch/powerpc/mm/numa.c
1379
static long vphn_get_associativity(unsigned long cpu,
arch/powerpc/mm/numa.c
138
unsigned int cpu;
arch/powerpc/mm/numa.c
1384
rc = hcall_vphn(get_hard_smp_processor_id(cpu),
arch/powerpc/mm/numa.c
140
for_each_possible_cpu(cpu)
arch/powerpc/mm/numa.c
141
numa_cpu_lookup_table[cpu] = -1;
arch/powerpc/mm/numa.c
1412
void find_and_update_cpu_nid(int cpu)
arch/powerpc/mm/numa.c
1418
if (vphn_get_associativity(cpu, associativity))
arch/powerpc/mm/numa.c
1429
set_cpu_numa_node(cpu, new_nid);
arch/powerpc/mm/numa.c
1431
pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
arch/powerpc/mm/numa.c
1434
int cpu_to_coregroup_id(int cpu)
arch/powerpc/mm/numa.c
1439
if (cpu < 0 || cpu > nr_cpu_ids)
arch/powerpc/mm/numa.c
144
void map_cpu_to_node(int cpu, int node)
arch/powerpc/mm/numa.c
1448
if (vphn_get_associativity(cpu, associativity))
arch/powerpc/mm/numa.c
1456
return cpu_to_core_id(cpu);
arch/powerpc/mm/numa.c
146
update_numa_cpu_lookup_table(cpu, node);
arch/powerpc/mm/numa.c
148
if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
arch/powerpc/mm/numa.c
149
pr_debug("adding cpu %d to node %d\n", cpu, node);
arch/powerpc/mm/numa.c
150
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
arch/powerpc/mm/numa.c
155
void unmap_cpu_from_node(unsigned long cpu)
arch/powerpc/mm/numa.c
157
int node = numa_cpu_lookup_table[cpu];
arch/powerpc/mm/numa.c
159
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
arch/powerpc/mm/numa.c
160
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
arch/powerpc/mm/numa.c
161
pr_debug("removing cpu %lu from node %d\n", cpu, node);
arch/powerpc/mm/numa.c
163
pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
arch/powerpc/mm/numa.c
704
struct device_node *cpu;
arch/powerpc/mm/numa.c
731
cpu = of_get_cpu_node(lcpu, NULL);
arch/powerpc/mm/numa.c
733
if (!cpu) {
arch/powerpc/mm/numa.c
741
nid = of_node_to_nid_single(cpu);
arch/powerpc/mm/numa.c
742
of_node_put(cpu);
arch/powerpc/mm/numa.c
766
static void verify_cpu_node_mapping(int cpu, int node)
arch/powerpc/mm/numa.c
771
base = cpu_first_thread_sibling(cpu);
arch/powerpc/mm/numa.c
776
if (sibling == cpu || cpu_is_offline(sibling))
arch/powerpc/mm/numa.c
781
" to the same node!\n", cpu, sibling);
arch/powerpc/mm/numa.c
788
static int ppc_numa_cpu_prepare(unsigned int cpu)
arch/powerpc/mm/numa.c
792
nid = numa_setup_cpu(cpu);
arch/powerpc/mm/numa.c
793
verify_cpu_node_mapping(cpu, nid);
arch/powerpc/mm/numa.c
797
static int ppc_numa_cpu_dead(unsigned int cpu)
arch/powerpc/mm/numa.c
933
struct device_node *cpu;
arch/powerpc/mm/numa.c
948
cpu = of_get_cpu_node(i, NULL);
arch/powerpc/mm/numa.c
949
BUG_ON(!cpu);
arch/powerpc/mm/numa.c
951
associativity = of_get_associativity(cpu);
arch/powerpc/mm/numa.c
956
of_node_put(cpu);
arch/powerpc/perf/core-book3s.c
2510
static int power_pmu_prepare_cpu(unsigned int cpu)
arch/powerpc/perf/core-book3s.c
2512
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/powerpc/perf/core-fsl-emb.c
673
static int fsl_emb_pmu_prepare_cpu(unsigned int cpu)
arch/powerpc/perf/core-fsl-emb.c
675
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/powerpc/perf/generic-compat-pmu.c
103
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
arch/powerpc/perf/hv-24x7.c
1653
static int ppc_hv_24x7_cpu_online(unsigned int cpu)
arch/powerpc/perf/hv-24x7.c
1656
cpumask_set_cpu(cpu, &hv_24x7_cpumask);
arch/powerpc/perf/hv-24x7.c
1661
static int ppc_hv_24x7_cpu_offline(unsigned int cpu)
arch/powerpc/perf/hv-24x7.c
1666
if (!cpumask_test_and_clear_cpu(cpu, &hv_24x7_cpumask))
arch/powerpc/perf/hv-24x7.c
1679
perf_pmu_migrate_context(&h_24x7_pmu, cpu, target);
arch/powerpc/perf/hv-gpci.c
845
static int ppc_hv_gpci_cpu_online(unsigned int cpu)
arch/powerpc/perf/hv-gpci.c
848
cpumask_set_cpu(cpu, &hv_gpci_cpumask);
arch/powerpc/perf/hv-gpci.c
853
static int ppc_hv_gpci_cpu_offline(unsigned int cpu)
arch/powerpc/perf/hv-gpci.c
858
if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask))
arch/powerpc/perf/hv-gpci.c
871
perf_pmu_migrate_context(&h_gpci_pmu, cpu, target);
arch/powerpc/perf/imc-pmu.c
1238
static int ppc_trace_imc_cpu_online(unsigned int cpu)
arch/powerpc/perf/imc-pmu.c
1240
return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
arch/powerpc/perf/imc-pmu.c
1243
static int ppc_trace_imc_cpu_offline(unsigned int cpu)
arch/powerpc/perf/imc-pmu.c
1528
int nid, i, cpu;
arch/powerpc/perf/imc-pmu.c
1556
for_each_possible_cpu(cpu) {
arch/powerpc/perf/imc-pmu.c
1557
nid = cpu_to_node(cpu);
arch/powerpc/perf/imc-pmu.c
1560
per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
arch/powerpc/perf/imc-pmu.c
1687
int nr_cores, cpu, res = -ENOMEM;
arch/powerpc/perf/imc-pmu.c
1736
for_each_online_cpu(cpu) {
arch/powerpc/perf/imc-pmu.c
1737
res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
arch/powerpc/perf/imc-pmu.c
1758
for_each_online_cpu(cpu) {
arch/powerpc/perf/imc-pmu.c
1759
res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
arch/powerpc/perf/imc-pmu.c
339
static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
arch/powerpc/perf/imc-pmu.c
341
return per_cpu(local_nest_imc_refc, cpu);
arch/powerpc/perf/imc-pmu.c
357
static int ppc_nest_imc_cpu_offline(unsigned int cpu)
arch/powerpc/perf/imc-pmu.c
367
if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
arch/powerpc/perf/imc-pmu.c
387
nid = cpu_to_node(cpu);
arch/powerpc/perf/imc-pmu.c
395
if (unlikely(target == cpu))
arch/powerpc/perf/imc-pmu.c
396
target = cpumask_any_but(l_cpumask, cpu);
arch/powerpc/perf/imc-pmu.c
404
nest_change_cpu_context(cpu, target);
arch/powerpc/perf/imc-pmu.c
407
get_hard_smp_processor_id(cpu));
arch/powerpc/perf/imc-pmu.c
412
ref = get_nest_pmu_ref(cpu);
arch/powerpc/perf/imc-pmu.c
421
static int ppc_nest_imc_cpu_online(unsigned int cpu)
arch/powerpc/perf/imc-pmu.c
428
l_cpumask = cpumask_of_node(cpu_to_node(cpu));
arch/powerpc/perf/imc-pmu.c
442
get_hard_smp_processor_id(cpu));
arch/powerpc/perf/imc-pmu.c
447
cpumask_set_cpu(cpu, &nest_imc_cpumask);
arch/powerpc/perf/imc-pmu.c
464
if (event->cpu < 0)
arch/powerpc/perf/imc-pmu.c
467
node_id = cpu_to_node(event->cpu);
arch/powerpc/perf/imc-pmu.c
475
ref = get_nest_pmu_ref(event->cpu);
arch/powerpc/perf/imc-pmu.c
498
get_hard_smp_processor_id(event->cpu));
arch/powerpc/perf/imc-pmu.c
527
if (event->cpu < 0)
arch/powerpc/perf/imc-pmu.c
540
chip_id = cpu_to_chip_id(event->cpu);
arch/powerpc/perf/imc-pmu.c
563
node_id = cpu_to_node(event->cpu);
arch/powerpc/perf/imc-pmu.c
569
ref = get_nest_pmu_ref(event->cpu);
arch/powerpc/perf/imc-pmu.c
576
get_hard_smp_processor_id(event->cpu));
arch/powerpc/perf/imc-pmu.c
599
static int core_imc_mem_init(int cpu, int size)
arch/powerpc/perf/imc-pmu.c
601
int nid, rc = 0, core_id = (cpu / threads_per_core);
arch/powerpc/perf/imc-pmu.c
609
nid = cpu_to_node(cpu);
arch/powerpc/perf/imc-pmu.c
626
get_hard_smp_processor_id(cpu));
arch/powerpc/perf/imc-pmu.c
635
static bool is_core_imc_mem_inited(int cpu)
arch/powerpc/perf/imc-pmu.c
638
int core_id = (cpu / threads_per_core);
arch/powerpc/perf/imc-pmu.c
647
static int ppc_core_imc_cpu_online(unsigned int cpu)
arch/powerpc/perf/imc-pmu.c
654
l_cpumask = cpu_sibling_mask(cpu);
arch/powerpc/perf/imc-pmu.c
660
if (!is_core_imc_mem_inited(cpu)) {
arch/powerpc/perf/imc-pmu.c
661
ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);
arch/powerpc/perf/imc-pmu.c
663
pr_info("core_imc memory allocation for cpu %d failed\n", cpu);
arch/powerpc/perf/imc-pmu.c
669
cpumask_set_cpu(cpu, &core_imc_cpumask);
arch/powerpc/perf/imc-pmu.c
673
static int ppc_core_imc_cpu_offline(unsigned int cpu)
arch/powerpc/perf/imc-pmu.c
683
if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
arch/powerpc/perf/imc-pmu.c
701
ncpu = cpumask_last(cpu_sibling_mask(cpu));
arch/powerpc/perf/imc-pmu.c
703
if (unlikely(ncpu == cpu))
arch/powerpc/perf/imc-pmu.c
704
ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
arch/powerpc/perf/imc-pmu.c
708
perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
arch/powerpc/perf/imc-pmu.c
715
get_hard_smp_processor_id(cpu));
arch/powerpc/perf/imc-pmu.c
716
core_id = cpu / threads_per_core;
arch/powerpc/perf/imc-pmu.c
766
if (event->cpu < 0)
arch/powerpc/perf/imc-pmu.c
774
core_id = event->cpu / threads_per_core;
arch/powerpc/perf/imc-pmu.c
799
get_hard_smp_processor_id(event->cpu));
arch/powerpc/perf/imc-pmu.c
829
if (event->cpu < 0)
arch/powerpc/perf/imc-pmu.c
839
if (!is_core_imc_mem_inited(event->cpu))
arch/powerpc/perf/imc-pmu.c
842
core_id = event->cpu / threads_per_core;
arch/powerpc/perf/imc-pmu.c
860
get_hard_smp_processor_id(event->cpu));
arch/powerpc/perf/imc-pmu.c
945
static int ppc_thread_imc_cpu_online(unsigned int cpu)
arch/powerpc/perf/imc-pmu.c
947
return thread_imc_mem_alloc(cpu, thread_imc_mem_size);
arch/powerpc/perf/imc-pmu.c
950
static int ppc_thread_imc_cpu_offline(unsigned int cpu)
arch/powerpc/perf/kvm-hv-pmu.c
180
__func__, event, event->id, event->cpu,
arch/powerpc/perf/power10-pmu.c
121
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
arch/powerpc/perf/power7-pmu.c
378
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
arch/powerpc/perf/power8-pmu.c
123
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
arch/powerpc/perf/power9-pmu.c
166
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
arch/powerpc/perf/vpa-dtl.c
203
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
arch/powerpc/perf/vpa-dtl.c
206
cur_idx = be64_to_cpu(lppaca_of(event->cpu).dtl_idx);
arch/powerpc/perf/vpa-dtl.c
332
static int vpa_dtl_mem_alloc(int cpu)
arch/powerpc/perf/vpa-dtl.c
334
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, cpu);
arch/powerpc/perf/vpa-dtl.c
341
buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL | GFP_ATOMIC, cpu_to_node(cpu));
arch/powerpc/perf/vpa-dtl.c
343
pr_warn("buffer allocation failed for cpu %d\n", cpu);
arch/powerpc/perf/vpa-dtl.c
398
if (vpa_dtl_mem_alloc(event->cpu)) {
arch/powerpc/perf/vpa-dtl.c
436
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
arch/powerpc/perf/vpa-dtl.c
447
hwcpu = get_hard_smp_processor_id(event->cpu);
arch/powerpc/perf/vpa-dtl.c
453
event->cpu, hwcpu, ret);
arch/powerpc/perf/vpa-dtl.c
458
lppaca_of(event->cpu).dtl_idx = 0;
arch/powerpc/perf/vpa-dtl.c
467
lppaca_of(event->cpu).dtl_enable_mask = event->attr.config;
arch/powerpc/perf/vpa-dtl.c
476
int hwcpu = get_hard_smp_processor_id(event->cpu);
arch/powerpc/perf/vpa-dtl.c
477
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
arch/powerpc/perf/vpa-dtl.c
483
lppaca_of(event->cpu).dtl_enable_mask = 0x0;
arch/powerpc/perf/vpa-dtl.c
511
int i, cpu = event->cpu;
arch/powerpc/perf/vpa-dtl.c
519
if (cpu == -1)
arch/powerpc/perf/vpa-dtl.c
520
cpu = raw_smp_processor_id();
arch/powerpc/perf/vpa-dtl.c
522
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
arch/powerpc/platforms/44x/iss4xx.c
101
pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
arch/powerpc/platforms/44x/iss4xx.c
109
pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
arch/powerpc/platforms/44x/iss4xx.c
111
spin_table[3] = cpu;
arch/powerpc/platforms/44x/iss4xx.c
80
static void smp_iss4xx_setup_cpu(int cpu)
arch/powerpc/platforms/44x/iss4xx.c
85
static int smp_iss4xx_kick_cpu(int cpu)
arch/powerpc/platforms/44x/iss4xx.c
87
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
arch/powerpc/platforms/44x/ppc476.c
149
static void smp_ppc47x_setup_cpu(int cpu)
arch/powerpc/platforms/44x/ppc476.c
154
static int smp_ppc47x_kick_cpu(int cpu)
arch/powerpc/platforms/44x/ppc476.c
156
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
arch/powerpc/platforms/44x/ppc476.c
172
cpu);
arch/powerpc/platforms/44x/ppc476.c
182
pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
arch/powerpc/platforms/44x/ppc476.c
184
spin_table[3] = cpu;
arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
23
static void mpc85xx_irq_mask(int cpu)
arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
28
static void mpc85xx_irq_unmask(int cpu)
arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
33
static void mpc85xx_cpu_die(int cpu)
arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
51
static void mpc85xx_cpu_up_prepare(int cpu)
arch/powerpc/platforms/85xx/smp.c
117
unsigned int cpu = smp_processor_id();
arch/powerpc/platforms/85xx/smp.c
122
qoriq_pm_ops->irq_mask(cpu);
arch/powerpc/platforms/85xx/smp.c
129
generic_set_cpu_dead(cpu);
arch/powerpc/platforms/85xx/smp.c
133
qoriq_pm_ops->cpu_die(cpu);
arch/powerpc/platforms/85xx/smp.c
139
static void qoriq_cpu_kill(unsigned int cpu)
arch/powerpc/platforms/85xx/smp.c
144
if (is_cpu_dead(cpu)) {
arch/powerpc/platforms/85xx/smp.c
146
paca_ptrs[cpu]->cpu_start = 0;
arch/powerpc/platforms/85xx/smp.c
152
pr_err("CPU%d didn't die...\n", cpu);
arch/powerpc/platforms/85xx/smp.c
181
int cpu = *(const int *)info;
arch/powerpc/platforms/85xx/smp.c
184
book3e_start_thread(cpu_thread_in_core(cpu), inia);
arch/powerpc/platforms/85xx/smp.c
188
static int smp_85xx_start_cpu(int cpu)
arch/powerpc/platforms/85xx/smp.c
195
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/85xx/smp.c
198
np = of_get_cpu_node(cpu, NULL);
arch/powerpc/platforms/85xx/smp.c
201
pr_err("No cpu-release-addr for cpu %d\n", cpu);
arch/powerpc/platforms/85xx/smp.c
224
qoriq_pm_ops->cpu_up_prepare(cpu);
arch/powerpc/platforms/85xx/smp.c
232
mpic_reset_core(cpu);
arch/powerpc/platforms/85xx/smp.c
389
int cpu = smp_processor_id();
arch/powerpc/platforms/85xx/smp.c
390
int sibling = cpu_last_thread_sibling(cpu);
arch/powerpc/platforms/85xx/smp.c
402
if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
arch/powerpc/platforms/85xx/smp.c
409
disable_cpu = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/85xx/smp.c
414
if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) {
arch/powerpc/platforms/8xx/m8xx_setup.c
120
cpu = of_get_cpu_node(0, NULL);
arch/powerpc/platforms/8xx/m8xx_setup.c
121
virq= irq_of_parse_and_map(cpu, 0);
arch/powerpc/platforms/8xx/m8xx_setup.c
122
of_node_put(cpu);
arch/powerpc/platforms/8xx/m8xx_setup.c
41
struct device_node *cpu;
arch/powerpc/platforms/8xx/m8xx_setup.c
46
cpu = of_get_cpu_node(0, NULL);
arch/powerpc/platforms/8xx/m8xx_setup.c
48
if (cpu) {
arch/powerpc/platforms/8xx/m8xx_setup.c
49
fp = of_get_property(cpu, name, NULL);
arch/powerpc/platforms/8xx/m8xx_setup.c
55
of_node_put(cpu);
arch/powerpc/platforms/8xx/m8xx_setup.c
67
struct device_node *cpu;
arch/powerpc/platforms/microwatt/smp.c
24
static void microwatt_smp_setup_cpu(int cpu)
arch/powerpc/platforms/microwatt/smp.c
26
if (cpu != 0)
arch/powerpc/platforms/pasemi/pasemi.h
21
extern void restore_astate(int cpu);
arch/powerpc/platforms/pasemi/pasemi.h
28
static inline void restore_astate(int cpu)
arch/powerpc/platforms/pasemi/setup.c
315
int cpu = smp_processor_id();
arch/powerpc/platforms/pasemi/setup.c
331
pr_err("Machine Check on CPU %d\n", cpu);
arch/powerpc/platforms/powermac/setup.c
251
struct device_node *cpu, *ic;
arch/powerpc/platforms/powermac/setup.c
261
for_each_of_cpu_node(cpu) {
arch/powerpc/platforms/powermac/setup.c
262
fp = of_get_property(cpu, "clock-frequency", NULL);
arch/powerpc/platforms/powermac/setup.c
273
of_node_put(cpu);
arch/powerpc/platforms/powermac/smp.c
1014
int cpu;
arch/powerpc/platforms/powermac/smp.c
1016
for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
arch/powerpc/platforms/powermac/smp.c
1017
set_cpu_possible(cpu, true);
arch/powerpc/platforms/powermac/smp.c
131
static inline void psurge_set_ipi(int cpu)
arch/powerpc/platforms/powermac/smp.c
135
if (cpu == 0)
arch/powerpc/platforms/powermac/smp.c
140
PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
arch/powerpc/platforms/powermac/smp.c
143
static inline void psurge_clr_ipi(int cpu)
arch/powerpc/platforms/powermac/smp.c
145
if (cpu > 0) {
arch/powerpc/platforms/powermac/smp.c
153
PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
arch/powerpc/platforms/powermac/smp.c
172
static void smp_psurge_cause_ipi(int cpu)
arch/powerpc/platforms/powermac/smp.c
174
psurge_set_ipi(cpu);
arch/powerpc/platforms/powermac/smp.c
661
static void core99_init_caches(int cpu)
arch/powerpc/platforms/powermac/smp.c
671
if (cpu == 0) {
arch/powerpc/platforms/powermac/smp.c
675
printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
arch/powerpc/platforms/powermac/smp.c
678
printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
arch/powerpc/platforms/powermac/smp.c
684
if (cpu == 0){
arch/powerpc/platforms/powermac/smp.c
688
printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
arch/powerpc/platforms/powermac/smp.c
691
printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
arch/powerpc/platforms/powermac/smp.c
723
struct device_node *cpu;
arch/powerpc/platforms/powermac/smp.c
727
cpu = of_find_node_by_type(NULL, "cpu");
arch/powerpc/platforms/powermac/smp.c
728
if (cpu != NULL) {
arch/powerpc/platforms/powermac/smp.c
729
tbprop = of_get_property(cpu, "timebase-enable", NULL);
arch/powerpc/platforms/powermac/smp.c
732
of_node_put(cpu);
arch/powerpc/platforms/powermac/smp.c
852
static int smp_core99_cpu_prepare(unsigned int cpu)
arch/powerpc/platforms/powermac/smp.c
868
static int smp_core99_cpu_online(unsigned int cpu)
arch/powerpc/platforms/powermac/smp.c
925
int cpu = smp_processor_id();
arch/powerpc/platforms/powermac/smp.c
929
pr_debug("CPU%d offline\n", cpu);
arch/powerpc/platforms/powermac/smp.c
930
generic_set_cpu_dead(cpu);
arch/powerpc/platforms/powermac/smp.c
940
int cpu = smp_processor_id();
arch/powerpc/platforms/powermac/smp.c
951
printk(KERN_INFO "CPU#%d offline\n", cpu);
arch/powerpc/platforms/powermac/smp.c
952
generic_set_cpu_dead(cpu);
arch/powerpc/platforms/powernv/idle.c
1040
void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
arch/powerpc/platforms/powernv/idle.c
1042
u64 pir = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/powernv/idle.c
105
if (cpu_thread_in_core(cpu) == 0) {
arch/powerpc/platforms/powernv/idle.c
1059
unsigned long pnv_cpu_offline(unsigned int cpu)
arch/powerpc/platforms/powernv/idle.c
1076
while (!generic_check_cpu_restart(cpu)) {
arch/powerpc/platforms/powernv/idle.c
1430
int cpu;
arch/powerpc/platforms/powernv/idle.c
1434
for_each_present_cpu(cpu) {
arch/powerpc/platforms/powernv/idle.c
1435
struct paca_struct *p = paca_ptrs[cpu];
arch/powerpc/platforms/powernv/idle.c
1438
if (cpu == cpu_first_thread_sibling(cpu))
arch/powerpc/platforms/powernv/idle.c
148
int cpu = smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
152
if (cpu_first_thread_sibling(cpu) != cpu)
arch/powerpc/platforms/powernv/idle.c
227
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
228
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/idle.c
229
int thread_nr = cpu_thread_in_core(cpu);
arch/powerpc/platforms/powernv/idle.c
237
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
238
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/idle.c
239
int thread_nr = cpu_thread_in_core(cpu);
arch/powerpc/platforms/powernv/idle.c
247
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
248
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/idle.c
257
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
258
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/idle.c
259
unsigned long thread = 1UL << cpu_thread_in_core(cpu);
arch/powerpc/platforms/powernv/idle.c
280
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
281
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/idle.c
316
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
317
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/idle.c
319
unsigned long thread = 1UL << cpu_thread_in_core(cpu);
arch/powerpc/platforms/powernv/idle.c
610
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
611
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/idle.c
66
int cpu;
arch/powerpc/platforms/powernv/idle.c
80
for_each_present_cpu(cpu) {
arch/powerpc/platforms/powernv/idle.c
81
uint64_t pir = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/powernv/idle.c
819
int cpu, cpu0, thr;
arch/powerpc/platforms/powernv/idle.c
82
uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
arch/powerpc/platforms/powernv/idle.c
824
cpu = smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
825
cpu0 = cpu & ~(threads_per_core - 1);
arch/powerpc/platforms/powernv/idle.c
827
if (cpu != cpu0 + thr)
arch/powerpc/platforms/powernv/idle.c
865
int cpu, cpu0, thr;
arch/powerpc/platforms/powernv/idle.c
867
cpu = smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
868
cpu0 = cpu & ~(threads_per_core - 1);
arch/powerpc/platforms/powernv/idle.c
872
if (cpu != cpu0 + thr)
arch/powerpc/platforms/powernv/idle.c
895
int cpu = raw_smp_processor_id();
arch/powerpc/platforms/powernv/idle.c
896
int first = cpu_first_thread_sibling(cpu);
arch/powerpc/platforms/powernv/opal-imc.c
184
int nid, cpu;
arch/powerpc/platforms/powernv/opal-imc.c
190
cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
arch/powerpc/platforms/powernv/opal-imc.c
191
if (cpu >= nr_cpu_ids)
arch/powerpc/platforms/powernv/opal-imc.c
194
get_hard_smp_processor_id(cpu));
arch/powerpc/platforms/powernv/opal-imc.c
201
int cpu, rc;
arch/powerpc/platforms/powernv/opal-imc.c
205
for_each_online_cpu(cpu) {
arch/powerpc/platforms/powernv/opal-imc.c
206
if (cpu_first_thread_sibling(cpu) != cpu)
arch/powerpc/platforms/powernv/opal-imc.c
209
get_hard_smp_processor_id(cpu));
arch/powerpc/platforms/powernv/opal-imc.c
212
__func__, cpu);
arch/powerpc/platforms/powernv/rng.c
103
int chip_id, cpu;
arch/powerpc/platforms/powernv/rng.c
109
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/powernv/rng.c
110
if (per_cpu(pnv_rng, cpu) == NULL ||
arch/powerpc/platforms/powernv/rng.c
111
cpu_to_chip_id(cpu) == chip_id) {
arch/powerpc/platforms/powernv/rng.c
112
per_cpu(pnv_rng, cpu) = rng;
arch/powerpc/platforms/powernv/setup.c
543
static unsigned long pnv_get_proc_freq(unsigned int cpu)
arch/powerpc/platforms/powernv/setup.c
547
ret_freq = cpufreq_get(cpu) * 1000ul;
arch/powerpc/platforms/powernv/smp.c
133
int cpu = smp_processor_id();
arch/powerpc/platforms/powernv/smp.c
139
set_cpu_online(cpu, false);
arch/powerpc/platforms/powernv/smp.c
143
if (cpu == boot_cpuid)
arch/powerpc/platforms/powernv/smp.c
170
unsigned int cpu;
arch/powerpc/platforms/powernv/smp.c
176
cpu = smp_processor_id();
arch/powerpc/platforms/powernv/smp.c
177
DBG("CPU%d offline\n", cpu);
arch/powerpc/platforms/powernv/smp.c
178
generic_set_cpu_dead(cpu);
arch/powerpc/platforms/powernv/smp.c
194
if (generic_check_cpu_restart(cpu))
arch/powerpc/platforms/powernv/smp.c
202
cpu, local_paca->irq_happened);
arch/powerpc/platforms/powernv/smp.c
217
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
arch/powerpc/platforms/powernv/smp.c
219
while (!generic_check_cpu_restart(cpu)) {
arch/powerpc/platforms/powernv/smp.c
227
kvmppc_clear_host_ipi(cpu);
arch/powerpc/platforms/powernv/smp.c
229
srr1 = pnv_cpu_offline(cpu);
arch/powerpc/platforms/powernv/smp.c
280
if (srr1 && !generic_check_cpu_restart(cpu))
arch/powerpc/platforms/powernv/smp.c
282
cpu, srr1);
arch/powerpc/platforms/powernv/smp.c
294
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
arch/powerpc/platforms/powernv/smp.c
296
DBG("CPU%d coming online...\n", cpu);
arch/powerpc/platforms/powernv/smp.c
315
static int pnv_smp_prepare_cpu(int cpu)
arch/powerpc/platforms/powernv/smp.c
318
return xive_smp_prepare_cpu(cpu);
arch/powerpc/platforms/powernv/smp.c
323
static void (*ic_cause_ipi)(int cpu);
arch/powerpc/platforms/powernv/smp.c
325
static void pnv_cause_ipi(int cpu)
arch/powerpc/platforms/powernv/smp.c
327
if (doorbell_try_core_ipi(cpu))
arch/powerpc/platforms/powernv/smp.c
330
ic_cause_ipi(cpu);
arch/powerpc/platforms/powernv/smp.c
358
static int pnv_cause_nmi_ipi(int cpu)
arch/powerpc/platforms/powernv/smp.c
362
if (cpu >= 0) {
arch/powerpc/platforms/powernv/smp.c
363
int h = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/powernv/smp.c
377
} else if (cpu == NMI_IPI_ALL_OTHERS) {
arch/powerpc/platforms/powernv/smp.c
50
static void pnv_smp_setup_cpu(int cpu)
arch/powerpc/platforms/powernv/smp.c
61
else if (cpu != boot_cpuid)
arch/powerpc/platforms/powernv/subcore.c
152
int i, cpu = smp_processor_id();
arch/powerpc/platforms/powernv/subcore.c
154
for (i = cpu + 1; i < cpu + threads_per_core; i++)
arch/powerpc/platforms/powernv/subcore.c
187
int i, cpu;
arch/powerpc/platforms/powernv/subcore.c
191
cpu = smp_processor_id();
arch/powerpc/platforms/powernv/subcore.c
192
if (cpu_thread_in_core(cpu) != 0) {
arch/powerpc/platforms/powernv/subcore.c
196
per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
arch/powerpc/platforms/powernv/subcore.c
209
for (i = cpu + 1; i < cpu + threads_per_core; i++)
arch/powerpc/platforms/powernv/subcore.c
221
int i, cpu;
arch/powerpc/platforms/powernv/subcore.c
228
cpu = smp_processor_id();
arch/powerpc/platforms/powernv/subcore.c
229
if (cpu_thread_in_core(cpu) != 0) {
arch/powerpc/platforms/powernv/subcore.c
230
split_core_secondary_loop(&per_cpu(split_state, cpu).step);
arch/powerpc/platforms/powernv/subcore.c
279
int cpu;
arch/powerpc/platforms/powernv/subcore.c
286
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/powernv/subcore.c
287
int tid = cpu_thread_in_core(cpu);
arch/powerpc/platforms/powernv/subcore.c
291
paca_ptrs[cpu]->subcore_sibling_mask = mask;
arch/powerpc/platforms/powernv/subcore.c
298
int cpu, new_mode = *(int *)data;
arch/powerpc/platforms/powernv/subcore.c
308
for_each_cpu(cpu, cpu_offline_mask)
arch/powerpc/platforms/powernv/subcore.c
309
smp_send_reschedule(cpu);
arch/powerpc/platforms/powernv/subcore.c
316
for_each_present_cpu(cpu) {
arch/powerpc/platforms/powernv/subcore.c
317
if (cpu >= setup_max_cpus)
arch/powerpc/platforms/powernv/subcore.c
320
while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED)
arch/powerpc/platforms/powernv/subcore.c
341
int cpu;
arch/powerpc/platforms/powernv/subcore.c
354
for_each_present_cpu(cpu) {
arch/powerpc/platforms/powernv/subcore.c
355
state = &per_cpu(split_state, cpu);
arch/powerpc/platforms/powernv/vas.c
137
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/powernv/vas.c
138
if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
arch/powerpc/platforms/powernv/vas.c
139
per_cpu(cpu_vas_id, cpu) = vasid;
arch/powerpc/platforms/powernv/vas.c
204
int cpu;
arch/powerpc/platforms/powernv/vas.c
206
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/powernv/vas.c
207
if (cpu_to_chip_id(cpu) == chipid)
arch/powerpc/platforms/powernv/vas.c
208
return per_cpu(cpu_vas_id, cpu);
arch/powerpc/platforms/powernv/vas.c
57
int rc, cpu, vasid;
arch/powerpc/platforms/ps3/interrupt.c
169
static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
arch/powerpc/platforms/ps3/interrupt.c
177
if (cpu == PS3_BINDING_CPU_ANY)
arch/powerpc/platforms/ps3/interrupt.c
178
cpu = 0;
arch/powerpc/platforms/ps3/interrupt.c
180
pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/interrupt.c
192
outlet, cpu, *virq);
arch/powerpc/platforms/ps3/interrupt.c
243
int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
arch/powerpc/platforms/ps3/interrupt.c
249
result = ps3_virq_setup(cpu, outlet, virq);
arch/powerpc/platforms/ps3/interrupt.c
321
int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
arch/powerpc/platforms/ps3/interrupt.c
335
result = ps3_irq_plug_setup(cpu, outlet, virq);
arch/powerpc/platforms/ps3/interrupt.c
391
enum ps3_cpu_binding cpu, unsigned int *virq)
arch/powerpc/platforms/ps3/interrupt.c
397
result = ps3_event_receive_port_setup(cpu, virq);
arch/powerpc/platforms/ps3/interrupt.c
466
int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
arch/powerpc/platforms/ps3/interrupt.c
480
result = ps3_irq_plug_setup(cpu, outlet, virq);
arch/powerpc/platforms/ps3/interrupt.c
523
int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
arch/powerpc/platforms/ps3/interrupt.c
542
result = ps3_irq_plug_setup(cpu, outlet, virq);
arch/powerpc/platforms/ps3/interrupt.c
579
int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
arch/powerpc/platforms/ps3/interrupt.c
595
result = ps3_irq_plug_setup(cpu, outlet, virq);
arch/powerpc/platforms/ps3/interrupt.c
618
static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
arch/powerpc/platforms/ps3/interrupt.c
622
func, line, header, cpu,
arch/powerpc/platforms/ps3/interrupt.c
628
const u64 *p, unsigned cpu, const char* func, int line)
arch/powerpc/platforms/ps3/interrupt.c
631
func, line, header, cpu, p[0], p[1], p[2], p[3]);
arch/powerpc/platforms/ps3/interrupt.c
682
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
arch/powerpc/platforms/ps3/interrupt.c
684
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/interrupt.c
689
cpu, virq, pd->ipi_debug_brk_mask);
arch/powerpc/platforms/ps3/interrupt.c
692
void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
arch/powerpc/platforms/ps3/interrupt.c
694
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/interrupt.c
699
cpu, virq, pd->ipi_mask);
arch/powerpc/platforms/ps3/interrupt.c
743
unsigned cpu;
arch/powerpc/platforms/ps3/interrupt.c
749
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/ps3/interrupt.c
750
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/interrupt.c
753
pd->thread_id = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/ps3/interrupt.c
772
void ps3_shutdown_IRQ(int cpu)
arch/powerpc/platforms/ps3/interrupt.c
776
u64 thread_id = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/ps3/interrupt.c
782
__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
arch/powerpc/platforms/ps3/platform.h
32
void ps3_shutdown_IRQ(int cpu);
arch/powerpc/platforms/ps3/platform.h
33
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
arch/powerpc/platforms/ps3/platform.h
34
void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
arch/powerpc/platforms/ps3/platform.h
40
void ps3_smp_cleanup_cpu(int cpu);
arch/powerpc/platforms/ps3/platform.h
42
static inline void ps3_smp_cleanup_cpu(int cpu) { }
arch/powerpc/platforms/ps3/setup.c
275
int cpu = smp_processor_id();
arch/powerpc/platforms/ps3/setup.c
277
DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
arch/powerpc/platforms/ps3/setup.c
279
ps3_smp_cleanup_cpu(cpu);
arch/powerpc/platforms/ps3/setup.c
280
ps3_shutdown_IRQ(cpu);
arch/powerpc/platforms/ps3/smp.c
106
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
arch/powerpc/platforms/ps3/smp.c
30
static void ps3_smp_message_pass(int cpu, int msg)
arch/powerpc/platforms/ps3/smp.c
40
virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
arch/powerpc/platforms/ps3/smp.c
45
" (%d)\n", __func__, __LINE__, cpu, msg, result);
arch/powerpc/platforms/ps3/smp.c
50
int cpu;
arch/powerpc/platforms/ps3/smp.c
52
for (cpu = 0; cpu < 2; cpu++) {
arch/powerpc/platforms/ps3/smp.c
54
unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
arch/powerpc/platforms/ps3/smp.c
57
DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
arch/powerpc/platforms/ps3/smp.c
71
result = ps3_event_receive_port_setup(cpu, &virqs[i]);
arch/powerpc/platforms/ps3/smp.c
77
__func__, __LINE__, cpu, i, virqs[i]);
arch/powerpc/platforms/ps3/smp.c
84
ps3_register_ipi_irq(cpu, virqs[i]);
arch/powerpc/platforms/ps3/smp.c
87
ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_NMI_IPI]);
arch/powerpc/platforms/ps3/smp.c
89
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
arch/powerpc/platforms/ps3/smp.c
93
void ps3_smp_cleanup_cpu(int cpu)
arch/powerpc/platforms/ps3/smp.c
95
unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
arch/powerpc/platforms/ps3/smp.c
98
DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
arch/powerpc/platforms/ps3/spu.c
506
static void cpu_affinity_set(struct spu *spu, int cpu)
arch/powerpc/platforms/pseries/dtl.c
104
lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
arch/powerpc/platforms/pseries/dtl.c
113
struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
arch/powerpc/platforms/pseries/dtl.c
121
lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
arch/powerpc/platforms/pseries/dtl.c
129
return per_cpu(dtl_rings, dtl->cpu).write_index;
arch/powerpc/platforms/pseries/dtl.c
143
hwcpu = get_hard_smp_processor_id(dtl->cpu);
arch/powerpc/platforms/pseries/dtl.c
148
"failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
arch/powerpc/platforms/pseries/dtl.c
153
lppaca_of(dtl->cpu).dtl_idx = 0;
arch/powerpc/platforms/pseries/dtl.c
160
lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
arch/powerpc/platforms/pseries/dtl.c
167
int hwcpu = get_hard_smp_processor_id(dtl->cpu);
arch/powerpc/platforms/pseries/dtl.c
169
lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
arch/powerpc/platforms/pseries/dtl.c
176
return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
arch/powerpc/platforms/pseries/dtl.c
198
buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
arch/powerpc/platforms/pseries/dtl.c
201
__func__, dtl->cpu);
arch/powerpc/platforms/pseries/dtl.c
24
int cpu;
arch/powerpc/platforms/pseries/dtl.c
336
sprintf(name, "cpu-%d", dtl->cpu);
arch/powerpc/platforms/pseries/dtl.c
359
dtl->cpu = i;
arch/powerpc/platforms/pseries/dtl.c
93
struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
arch/powerpc/platforms/pseries/hotplug-cpu.c
118
static void pseries_cpu_die(unsigned int cpu)
arch/powerpc/platforms/pseries/hotplug-cpu.c
121
unsigned int pcpu = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/pseries/hotplug-cpu.c
132
cpu, pcpu);
arch/powerpc/platforms/pseries/hotplug-cpu.c
141
cpu, pcpu);
arch/powerpc/platforms/pseries/hotplug-cpu.c
144
paca_ptrs[cpu]->cpu_start = 0;
arch/powerpc/platforms/pseries/hotplug-cpu.c
160
unsigned int cpu, node;
arch/powerpc/platforms/pseries/hotplug-cpu.c
167
for (cpu = 0; cpu < nthreads; cpu++)
arch/powerpc/platforms/pseries/hotplug-cpu.c
168
cpumask_set_cpu(cpu, *cpu_mask);
arch/powerpc/platforms/pseries/hotplug-cpu.c
216
int len, nthreads, node, cpu, assigned_node;
arch/powerpc/platforms/pseries/hotplug-cpu.c
258
for_each_cpu(cpu, cpu_mask) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
259
BUG_ON(cpu_present(cpu));
arch/powerpc/platforms/pseries/hotplug-cpu.c
260
set_cpu_present(cpu, true);
arch/powerpc/platforms/pseries/hotplug-cpu.c
261
set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
arch/powerpc/platforms/pseries/hotplug-cpu.c
273
cpu = cpumask_first(cpu_mask);
arch/powerpc/platforms/pseries/hotplug-cpu.c
275
cpu, cpu + nthreads - 1);
arch/powerpc/platforms/pseries/hotplug-cpu.c
298
unsigned int cpu;
arch/powerpc/platforms/pseries/hotplug-cpu.c
312
for_each_present_cpu(cpu) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
313
if (get_hard_smp_processor_id(cpu) != thread)
arch/powerpc/platforms/pseries/hotplug-cpu.c
315
BUG_ON(cpu_online(cpu));
arch/powerpc/platforms/pseries/hotplug-cpu.c
316
set_cpu_present(cpu, false);
arch/powerpc/platforms/pseries/hotplug-cpu.c
317
set_hard_smp_processor_id(cpu, -1);
arch/powerpc/platforms/pseries/hotplug-cpu.c
318
update_numa_cpu_lookup_table(cpu, -1);
arch/powerpc/platforms/pseries/hotplug-cpu.c
321
if (cpu >= nr_cpu_ids)
arch/powerpc/platforms/pseries/hotplug-cpu.c
331
unsigned int cpu;
arch/powerpc/platforms/pseries/hotplug-cpu.c
345
for_each_present_cpu(cpu) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
346
if (get_hard_smp_processor_id(cpu) != thread)
arch/powerpc/platforms/pseries/hotplug-cpu.c
349
if (!cpu_online(cpu))
arch/powerpc/platforms/pseries/hotplug-cpu.c
366
rc = device_offline(get_cpu_device(cpu));
arch/powerpc/platforms/pseries/hotplug-cpu.c
372
if (cpu == num_possible_cpus()) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
387
unsigned int cpu;
arch/powerpc/platforms/pseries/hotplug-cpu.c
401
for_each_present_cpu(cpu) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
402
if (get_hard_smp_processor_id(cpu) != thread)
arch/powerpc/platforms/pseries/hotplug-cpu.c
405
if (!topology_is_primary_thread(cpu)) {
arch/powerpc/platforms/pseries/hotplug-cpu.c
408
if (!topology_smt_thread_allowed(cpu))
arch/powerpc/platforms/pseries/hotplug-cpu.c
413
find_and_update_cpu_nid(cpu);
arch/powerpc/platforms/pseries/hotplug-cpu.c
414
rc = device_online(get_cpu_device(cpu));
arch/powerpc/platforms/pseries/hotplug-cpu.c
423
if (cpu == num_possible_cpus())
arch/powerpc/platforms/pseries/hotplug-cpu.c
84
int cpu = smp_processor_id();
arch/powerpc/platforms/pseries/hotplug-cpu.c
86
set_cpu_online(cpu, false);
arch/powerpc/platforms/pseries/hotplug-cpu.c
92
if (cpu == boot_cpuid)
arch/powerpc/platforms/pseries/hvCall_inst.c
116
int cpu;
arch/powerpc/platforms/pseries/hvCall_inst.c
131
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/hvCall_inst.c
132
snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
arch/powerpc/platforms/pseries/hvCall_inst.c
134
per_cpu(hcall_stats, cpu),
arch/powerpc/platforms/pseries/kexec.c
30
int cpu = smp_processor_id();
arch/powerpc/platforms/pseries/kexec.c
38
cpu, hwcpu, ret);
arch/powerpc/platforms/pseries/kexec.c
46
cpu, hwcpu, ret);
arch/powerpc/platforms/pseries/kexec.c
52
"(hw %d) failed with %d\n", cpu, hwcpu, ret);
arch/powerpc/platforms/pseries/lpar.c
118
void register_dtl_buffer(int cpu)
arch/powerpc/platforms/pseries/lpar.c
123
int hwcpu = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/pseries/lpar.c
125
pp = paca_ptrs[cpu];
arch/powerpc/platforms/pseries/lpar.c
130
lppaca_of(cpu).dtl_idx = 0;
arch/powerpc/platforms/pseries/lpar.c
137
cpu, hwcpu, ret);
arch/powerpc/platforms/pseries/lpar.c
139
lppaca_of(cpu).dtl_enable_mask = dtl_mask;
arch/powerpc/platforms/pseries/lpar.c
146
int cpu;
arch/powerpc/platforms/pseries/lpar.c
187
int cpu;
arch/powerpc/platforms/pseries/lpar.c
190
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/lpar.c
191
pp = paca_ptrs[cpu];
arch/powerpc/platforms/pseries/lpar.c
1998
int cpu = (long)filp->private_data;
arch/powerpc/platforms/pseries/lpar.c
1999
struct lppaca *lppaca = &lppaca_of(cpu);
arch/powerpc/platforms/pseries/lpar.c
230
static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
arch/powerpc/platforms/pseries/lpar.c
235
assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
arch/powerpc/platforms/pseries/lpar.c
237
rc = hcall_vphn(cpu, flag, &assoc[0]);
arch/powerpc/platforms/pseries/lpar.c
245
static __be32 *get_pcpu_associativity(int cpu)
arch/powerpc/platforms/pseries/lpar.c
247
return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
arch/powerpc/platforms/pseries/lpar.c
250
static __be32 *get_vcpu_associativity(int cpu)
arch/powerpc/platforms/pseries/lpar.c
252
return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
arch/powerpc/platforms/pseries/lpar.c
374
if (d->cpu != smp_processor_id()) {
arch/powerpc/platforms/pseries/lpar.c
389
d->cpu,
arch/powerpc/platforms/pseries/lpar.c
405
schedule_delayed_work_on(d->cpu, to_delayed_work(work),
arch/powerpc/platforms/pseries/lpar.c
409
static int dtl_worker_online(unsigned int cpu)
arch/powerpc/platforms/pseries/lpar.c
411
struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
arch/powerpc/platforms/pseries/lpar.c
415
d->cpu = cpu;
arch/powerpc/platforms/pseries/lpar.c
418
per_cpu(dtl_entry_ridx, cpu) = 0;
arch/powerpc/platforms/pseries/lpar.c
419
register_dtl_buffer(cpu);
arch/powerpc/platforms/pseries/lpar.c
421
per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
arch/powerpc/platforms/pseries/lpar.c
424
schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
arch/powerpc/platforms/pseries/lpar.c
428
static int dtl_worker_offline(unsigned int cpu)
arch/powerpc/platforms/pseries/lpar.c
430
struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
arch/powerpc/platforms/pseries/lpar.c
435
unregister_dtl(get_hard_smp_processor_id(cpu));
arch/powerpc/platforms/pseries/lpar.c
443
int cpu;
arch/powerpc/platforms/pseries/lpar.c
446
for_each_present_cpu(cpu)
arch/powerpc/platforms/pseries/lpar.c
447
lppaca_of(cpu).dtl_enable_mask = dtl_mask;
arch/powerpc/platforms/pseries/lpar.c
452
int cpu;
arch/powerpc/platforms/pseries/lpar.c
459
for_each_present_cpu(cpu)
arch/powerpc/platforms/pseries/lpar.c
460
lppaca_of(cpu).dtl_enable_mask = dtl_mask;
arch/powerpc/platforms/pseries/lpar.c
506
int rc, cmd, cpu;
arch/powerpc/platforms/pseries/lpar.c
535
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/lpar.c
536
disp = per_cpu_ptr(&vcpu_disp_data, cpu);
arch/powerpc/platforms/pseries/lpar.c
562
int cpu;
arch/powerpc/platforms/pseries/lpar.c
570
for_each_online_cpu(cpu) {
arch/powerpc/platforms/pseries/lpar.c
571
disp = per_cpu_ptr(&vcpu_disp_data, cpu);
arch/powerpc/platforms/pseries/lpar.c
572
seq_printf(p, "cpu%d", cpu);
arch/powerpc/platforms/pseries/lpar.c
662
u64 pseries_paravirt_steal_clock(int cpu)
arch/powerpc/platforms/pseries/lpar.c
664
struct lppaca *lppaca = &lppaca_of(cpu);
arch/powerpc/platforms/pseries/lpar.c
677
void vpa_init(int cpu)
arch/powerpc/platforms/pseries/lpar.c
679
int hwcpu = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/pseries/lpar.c
687
WARN_ON(cpu != smp_processor_id());
arch/powerpc/platforms/pseries/lpar.c
690
lppaca_of(cpu).vmxregs_in_use = 1;
arch/powerpc/platforms/pseries/lpar.c
693
lppaca_of(cpu).ebb_regs_in_use = 1;
arch/powerpc/platforms/pseries/lpar.c
695
addr = __pa(&lppaca_of(cpu));
arch/powerpc/platforms/pseries/lpar.c
700
"%lx failed with %ld\n", cpu, hwcpu, addr, ret);
arch/powerpc/platforms/pseries/lpar.c
710
addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
arch/powerpc/platforms/pseries/lpar.c
715
cpu, hwcpu, addr, ret);
arch/powerpc/platforms/pseries/lpar.c
722
register_dtl_buffer(cpu);
arch/powerpc/platforms/pseries/lpar.c
88
int cpu;
arch/powerpc/platforms/pseries/lpar.c
92
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/lpar.c
93
pp = paca_ptrs[cpu];
arch/powerpc/platforms/pseries/lpar.c
99
cpu);
arch/powerpc/platforms/pseries/lparcfg.c
474
int cpu;
arch/powerpc/platforms/pseries/lparcfg.c
483
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/lparcfg.c
484
cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
arch/powerpc/platforms/pseries/lparcfg.c
485
cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
arch/powerpc/platforms/pseries/lparcfg.c
498
int cpu;
arch/powerpc/platforms/pseries/lparcfg.c
502
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/lparcfg.c
503
dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
arch/powerpc/platforms/pseries/lparcfg.c
505
be32_to_cpu(lppaca_of(cpu).dispersion_count);
arch/powerpc/platforms/pseries/mobility.c
508
unsigned int cpu;
arch/powerpc/platforms/pseries/mobility.c
510
for_each_online_cpu(cpu) {
arch/powerpc/platforms/pseries/mobility.c
511
if (cpu != smp_processor_id())
arch/powerpc/platforms/pseries/mobility.c
512
prod_single(cpu);
arch/powerpc/platforms/pseries/pseries_energy.c
105
printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
arch/powerpc/platforms/pseries/pseries_energy.c
114
int thread_index = 0, cpu = 0;
arch/powerpc/platforms/pseries/pseries_energy.c
140
cpu += drc.num_sequential_elems;
arch/powerpc/platforms/pseries/pseries_energy.c
143
cpu += ((drc_index - drc.drc_index_start) /
arch/powerpc/platforms/pseries/pseries_energy.c
146
thread_index = cpu_first_thread_of_core(cpu);
arch/powerpc/platforms/pseries/pseries_energy.c
190
int rc, cnt, i, cpu;
arch/powerpc/platforms/pseries/pseries_energy.c
213
cpu = drc_index_to_cpu(buf_page[2*i+1]);
arch/powerpc/platforms/pseries/pseries_energy.c
214
if ((cpu_online(cpu) && !activate) ||
arch/powerpc/platforms/pseries/pseries_energy.c
215
(!cpu_online(cpu) && activate))
arch/powerpc/platforms/pseries/pseries_energy.c
216
s += sprintf(s, "%d,", cpu);
arch/powerpc/platforms/pseries/pseries_energy.c
302
int cpu, err;
arch/powerpc/platforms/pseries/pseries_energy.c
319
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/pseries_energy.c
320
cpu_dev = get_cpu_device(cpu);
arch/powerpc/platforms/pseries/pseries_energy.c
341
int cpu;
arch/powerpc/platforms/pseries/pseries_energy.c
355
for_each_possible_cpu(cpu) {
arch/powerpc/platforms/pseries/pseries_energy.c
356
cpu_dev = get_cpu_device(cpu);
arch/powerpc/platforms/pseries/pseries_energy.c
36
static u32 cpu_to_drc_index(int cpu)
arch/powerpc/platforms/pseries/pseries_energy.c
49
thread_index = cpu_core_index_of_thread(cpu);
arch/powerpc/platforms/pseries/rtas-fadump.c
367
int i, rc = 0, cpu = 0;
arch/powerpc/platforms/pseries/rtas-fadump.c
407
cpu = (be64_to_cpu(reg_entry->reg_value) &
arch/powerpc/platforms/pseries/rtas-fadump.c
409
if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_mask)) {
arch/powerpc/platforms/pseries/rtas-fadump.c
413
pr_debug("Reading register data for cpu %d...\n", cpu);
arch/powerpc/platforms/pseries/rtas-fadump.c
414
if (fdh && fdh->crashing_cpu == cpu) {
arch/powerpc/platforms/pseries/smp.c
124
static void smp_setup_cpu(int cpu)
arch/powerpc/platforms/pseries/smp.c
128
else if (cpu != boot_cpuid)
arch/powerpc/platforms/pseries/smp.c
132
vpa_init(cpu);
arch/powerpc/platforms/pseries/smp.c
134
cpumask_clear_cpu(cpu, of_spin_mask);
arch/powerpc/platforms/pseries/smp.c
155
static int pseries_smp_prepare_cpu(int cpu)
arch/powerpc/platforms/pseries/smp.c
158
return xive_smp_prepare_cpu(cpu);
arch/powerpc/platforms/pseries/smp.c
163
static void (*ic_cause_ipi)(int cpu) __ro_after_init;
arch/powerpc/platforms/pseries/smp.c
166
static void dbell_or_ic_cause_ipi(int cpu)
arch/powerpc/platforms/pseries/smp.c
168
if (doorbell_try_core_ipi(cpu))
arch/powerpc/platforms/pseries/smp.c
171
ic_cause_ipi(cpu);
arch/powerpc/platforms/pseries/smp.c
174
static int pseries_cause_nmi_ipi(int cpu)
arch/powerpc/platforms/pseries/smp.c
178
if (cpu == NMI_IPI_ALL_OTHERS) {
arch/powerpc/platforms/pseries/smp.c
181
if (cpu < 0) {
arch/powerpc/platforms/pseries/smp.c
182
WARN_ONCE(true, "incorrect cpu parameter %d", cpu);
arch/powerpc/platforms/pseries/smp.c
186
hwcpu = get_hard_smp_processor_id(cpu);
arch/powerpc/platforms/pseries/vphn.c
79
long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
arch/powerpc/platforms/pseries/vphn.c
84
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
arch/powerpc/sysdev/fsl_rcpm.c
105
static void rcpm_v2_cpu_enter_state(int cpu, int state)
arch/powerpc/sysdev/fsl_rcpm.c
107
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
108
u32 mask = 1 << cpu_core_index_of_thread(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
129
static void rcpm_v1_cpu_die(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
131
rcpm_v1_cpu_enter_state(cpu, E500_PM_PH15);
arch/powerpc/sysdev/fsl_rcpm.c
135
static void qoriq_disable_thread(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
137
int thread = cpu_thread_in_core(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
143
static void rcpm_v2_cpu_die(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
149
primary = cpu_first_thread_sibling(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
152
rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
arch/powerpc/sysdev/fsl_rcpm.c
155
qoriq_disable_thread(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
161
rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
arch/powerpc/sysdev/fsl_rcpm.c
164
static void rcpm_v1_cpu_exit_state(int cpu, int state)
arch/powerpc/sysdev/fsl_rcpm.c
166
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
182
static void rcpm_v1_cpu_up_prepare(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
184
rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
arch/powerpc/sysdev/fsl_rcpm.c
185
rcpm_v1_irq_unmask(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
188
static void rcpm_v2_cpu_exit_state(int cpu, int state)
arch/powerpc/sysdev/fsl_rcpm.c
190
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
191
u32 mask = 1 << cpu_core_index_of_thread(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
211
static void rcpm_v2_cpu_up_prepare(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
213
rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20);
arch/powerpc/sysdev/fsl_rcpm.c
214
rcpm_v2_irq_unmask(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
27
static void rcpm_v1_irq_mask(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
29
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
38
static void rcpm_v2_irq_mask(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
40
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
49
static void rcpm_v1_irq_unmask(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
51
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
60
static void rcpm_v2_irq_unmask(int cpu)
arch/powerpc/sysdev/fsl_rcpm.c
62
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
87
static void rcpm_v1_cpu_enter_state(int cpu, int state)
arch/powerpc/sysdev/fsl_rcpm.c
89
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/ge/ge_pic.c
43
#define GEF_PIC_INTR_MASK(cpu) (0x0010 + (0x4 * cpu))
arch/powerpc/sysdev/ge/ge_pic.c
47
#define GEF_PIC_MCP_MASK(cpu) (0x0018 + (0x4 * cpu))
arch/powerpc/sysdev/mpic.c
1074
int cpu;
arch/powerpc/sysdev/mpic.c
1077
cpu = mpic_processor_id(mpic);
arch/powerpc/sysdev/mpic.c
1081
mpic_set_destination(virq, cpu);
arch/powerpc/sysdev/mpic.c
1447
unsigned int cpu = get_hard_smp_processor_id(i);
arch/powerpc/sysdev/mpic.c
1449
mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
arch/powerpc/sysdev/mpic.c
1450
MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
arch/powerpc/sysdev/mpic.c
1551
int i, cpu;
arch/powerpc/sysdev/mpic.c
1606
cpu = mpic_processor_id(mpic);
arch/powerpc/sysdev/mpic.c
1619
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
arch/powerpc/sysdev/mpic.c
163
unsigned int cpu = 0;
arch/powerpc/sysdev/mpic.c
166
cpu = hard_smp_processor_id();
arch/powerpc/sysdev/mpic.c
168
return cpu;
arch/powerpc/sysdev/mpic.c
1860
void smp_mpic_message_pass(int cpu, int msg)
arch/powerpc/sysdev/mpic.c
1878
physmask = 1 << get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/mpic.c
1898
void smp_mpic_setup_cpu(int cpu)
arch/powerpc/sysdev/mpic.c
1903
void mpic_reset_core(int cpu)
arch/powerpc/sysdev/mpic.c
1907
int cpuid = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/mpic.c
256
unsigned int cpu = mpic_processor_id(mpic);
arch/powerpc/sysdev/mpic.c
258
return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
arch/powerpc/sysdev/mpic.c
263
unsigned int cpu = mpic_processor_id(mpic);
arch/powerpc/sysdev/mpic.c
265
_mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
arch/powerpc/sysdev/mpic.h
36
extern void mpic_reset_core(int cpu);
arch/powerpc/sysdev/xics/icp-hv.c
137
static void icp_hv_cause_ipi(int cpu)
arch/powerpc/sysdev/xics/icp-hv.c
139
icp_hv_set_qirr(cpu, IPI_PRIORITY);
arch/powerpc/sysdev/xics/icp-hv.c
144
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-hv.c
146
icp_hv_set_qirr(cpu, 0xff);
arch/powerpc/sysdev/xics/icp-hv.c
85
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-hv.c
88
icp_hv_set_qirr(cpu, 0xff);
arch/powerpc/sysdev/xics/icp-native.c
100
icp_native_set_qirr(cpu, 0xff);
arch/powerpc/sysdev/xics/icp-native.c
142
static void icp_native_cause_ipi(int cpu)
arch/powerpc/sysdev/xics/icp-native.c
144
kvmppc_set_host_ipi(cpu);
arch/powerpc/sysdev/xics/icp-native.c
145
icp_native_set_qirr(cpu, IPI_PRIORITY);
arch/powerpc/sysdev/xics/icp-native.c
161
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-native.c
162
kvmppc_clear_host_ipi(cpu);
arch/powerpc/sysdev/xics/icp-native.c
163
icp_native_set_qirr(cpu, 0xff);
arch/powerpc/sysdev/xics/icp-native.c
173
void xics_wake_cpu(int cpu)
arch/powerpc/sysdev/xics/icp-native.c
175
icp_native_set_qirr(cpu, IPI_PRIORITY);
arch/powerpc/sysdev/xics/icp-native.c
181
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-native.c
183
kvmppc_clear_host_ipi(cpu);
arch/powerpc/sysdev/xics/icp-native.c
184
icp_native_set_qirr(cpu, 0xff);
arch/powerpc/sysdev/xics/icp-native.c
195
int i, cpu = -1;
arch/powerpc/sysdev/xics/icp-native.c
204
cpu = i;
arch/powerpc/sysdev/xics/icp-native.c
212
if (cpu == -1)
arch/powerpc/sysdev/xics/icp-native.c
216
cpu, hw_id);
arch/powerpc/sysdev/xics/icp-native.c
222
cpu, hw_id);
arch/powerpc/sysdev/xics/icp-native.c
226
icp_native_regs[cpu] = ioremap(addr, size);
arch/powerpc/sysdev/xics/icp-native.c
227
kvmppc_set_xics_phys(cpu, addr);
arch/powerpc/sysdev/xics/icp-native.c
228
if (!icp_native_regs[cpu]) {
arch/powerpc/sysdev/xics/icp-native.c
230
cpu, hw_id, addr);
arch/powerpc/sysdev/xics/icp-native.c
50
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-native.c
58
return in_be32(&icp_native_regs[cpu]->xirr.word);
arch/powerpc/sysdev/xics/icp-native.c
63
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-native.c
65
out_be32(&icp_native_regs[cpu]->xirr.word, value);
arch/powerpc/sysdev/xics/icp-native.c
70
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-native.c
72
out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
arch/powerpc/sysdev/xics/icp-native.c
97
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-opal.c
126
static void icp_opal_cause_ipi(int cpu)
arch/powerpc/sysdev/xics/icp-opal.c
128
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/xics/icp-opal.c
130
kvmppc_set_host_ipi(cpu);
arch/powerpc/sysdev/xics/icp-opal.c
136
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-opal.c
138
kvmppc_clear_host_ipi(cpu);
arch/powerpc/sysdev/xics/icp-opal.c
139
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
arch/powerpc/sysdev/xics/icp-opal.c
160
int cpu = smp_processor_id();
arch/powerpc/sysdev/xics/icp-opal.c
161
kvmppc_clear_host_ipi(cpu);
arch/powerpc/sysdev/xics/icp-opal.c
162
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
arch/powerpc/sysdev/xics/xics-common.c
182
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
arch/powerpc/sysdev/xics/xics-common.c
186
pr_debug("%s: CPU %u\n", __func__, cpu);
arch/powerpc/sysdev/xics/xics-common.c
240
if (cpu_online(cpu))
arch/powerpc/sysdev/xics/xics-common.c
242
virq, cpu);
arch/powerpc/sysdev/xive/common.c
1054
static void xive_cause_ipi(int cpu)
arch/powerpc/sysdev/xive/common.c
1059
xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1062
smp_processor_id(), cpu, xc->hw_ipi);
arch/powerpc/sysdev/xive/common.c
1181
static int xive_request_ipi(unsigned int cpu)
arch/powerpc/sysdev/xive/common.c
1183
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
arch/powerpc/sysdev/xive/common.c
1197
static int xive_setup_cpu_ipi(unsigned int cpu)
arch/powerpc/sysdev/xive/common.c
1199
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
arch/powerpc/sysdev/xive/common.c
1203
pr_debug("Setting up IPI for CPU %d\n", cpu);
arch/powerpc/sysdev/xive/common.c
1205
xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1212
xive_request_ipi(cpu);
arch/powerpc/sysdev/xive/common.c
1215
if (xive_ops->get_ipi(cpu, xc))
arch/powerpc/sysdev/xive/common.c
1224
pr_err("Failed to populate IPI data on CPU %d\n", cpu);
arch/powerpc/sysdev/xive/common.c
1228
get_hard_smp_processor_id(cpu),
arch/powerpc/sysdev/xive/common.c
1231
pr_err("Failed to map IPI CPU %d\n", cpu);
arch/powerpc/sysdev/xive/common.c
1234
pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu,
arch/powerpc/sysdev/xive/common.c
1243
noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/common.c
1245
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
arch/powerpc/sysdev/xive/common.c
1269
xive_ops->put_ipi(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1473
static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/common.c
1476
xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
arch/powerpc/sysdev/xive/common.c
1479
static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/common.c
1485
rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
arch/powerpc/sysdev/xive/common.c
1490
static int xive_prepare_cpu(unsigned int cpu)
arch/powerpc/sysdev/xive/common.c
1494
xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1497
GFP_KERNEL, cpu_to_node(cpu));
arch/powerpc/sysdev/xive/common.c
1503
xive_ops->prepare_cpu(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1505
per_cpu(xive_cpu, cpu) = xc;
arch/powerpc/sysdev/xive/common.c
1509
return xive_setup_cpu_queues(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1536
int xive_smp_prepare_cpu(unsigned int cpu)
arch/powerpc/sysdev/xive/common.c
1541
rc = xive_prepare_cpu(cpu);
arch/powerpc/sysdev/xive/common.c
1546
return xive_setup_cpu_ipi(cpu);
arch/powerpc/sysdev/xive/common.c
1550
static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/common.c
1581
cpu, irq);
arch/powerpc/sysdev/xive/common.c
1607
unsigned int cpu = smp_processor_id();
arch/powerpc/sysdev/xive/common.c
1617
xive_flush_cpu_queue(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1627
unsigned int cpu = smp_processor_id();
arch/powerpc/sysdev/xive/common.c
1630
xive_flush_cpu_queue(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1640
unsigned int cpu = smp_processor_id();
arch/powerpc/sysdev/xive/common.c
1647
xive_ops->teardown_cpu(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1651
xive_cleanup_cpu_ipi(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1655
xive_cleanup_cpu_queues(cpu, xc);
arch/powerpc/sysdev/xive/common.c
1692
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
arch/powerpc/sysdev/xive/common.c
1699
pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
arch/powerpc/sysdev/xive/common.c
1729
static void xive_debug_show_ipi(struct seq_file *m, int cpu)
arch/powerpc/sysdev/xive/common.c
1731
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1733
seq_printf(m, "CPU %d: ", cpu);
arch/powerpc/sysdev/xive/common.c
1789
int cpu;
arch/powerpc/sysdev/xive/common.c
1794
for_each_online_cpu(cpu)
arch/powerpc/sysdev/xive/common.c
1795
xive_debug_show_ipi(m, cpu);
arch/powerpc/sysdev/xive/common.c
1818
int cpu = (long)m->private;
arch/powerpc/sysdev/xive/common.c
1819
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1832
long cpu;
arch/powerpc/sysdev/xive/common.c
1844
for_each_possible_cpu(cpu) {
arch/powerpc/sysdev/xive/common.c
1845
snprintf(name, sizeof(name), "cpu%ld", cpu);
arch/powerpc/sysdev/xive/common.c
1846
debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu,
arch/powerpc/sysdev/xive/common.c
270
notrace void xmon_xive_do_dump(int cpu)
arch/powerpc/sysdev/xive/common.c
272
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
274
xmon_printf("CPU %d:", cpu);
arch/powerpc/sysdev/xive/common.c
503
static bool xive_try_pick_target(int cpu)
arch/powerpc/sysdev/xive/common.c
505
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
527
static void xive_dec_target_count(int cpu)
arch/powerpc/sysdev/xive/common.c
529
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
532
if (WARN_ON(cpu < 0 || !xc)) {
arch/powerpc/sysdev/xive/common.c
533
pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
arch/powerpc/sysdev/xive/common.c
551
int cpu, first, num, i;
arch/powerpc/sysdev/xive/common.c
558
cpu = cpumask_first(mask);
arch/powerpc/sysdev/xive/common.c
559
for (i = 0; i < first && cpu < nr_cpu_ids; i++)
arch/powerpc/sysdev/xive/common.c
560
cpu = cpumask_next(cpu, mask);
arch/powerpc/sysdev/xive/common.c
563
if (WARN_ON(cpu >= nr_cpu_ids))
arch/powerpc/sysdev/xive/common.c
564
cpu = cpumask_first(cpu_online_mask);
arch/powerpc/sysdev/xive/common.c
567
first = cpu;
arch/powerpc/sysdev/xive/common.c
578
if (cpu_online(cpu) && xive_try_pick_target(cpu))
arch/powerpc/sysdev/xive/common.c
579
return cpu;
arch/powerpc/sysdev/xive/common.c
580
cpu = cpumask_next(cpu, mask);
arch/powerpc/sysdev/xive/common.c
582
if (cpu >= nr_cpu_ids)
arch/powerpc/sysdev/xive/common.c
583
cpu = cpumask_first(mask);
arch/powerpc/sysdev/xive/common.c
584
} while (cpu != first);
arch/powerpc/sysdev/xive/common.c
600
int cpu = -1;
arch/powerpc/sysdev/xive/common.c
609
for_each_cpu_and(cpu, affinity, cpu_online_mask) {
arch/powerpc/sysdev/xive/common.c
610
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
612
cpumask_set_cpu(cpu, mask);
arch/powerpc/sysdev/xive/common.c
616
cpu = -1;
arch/powerpc/sysdev/xive/common.c
618
cpu = xive_find_target_in_mask(mask, fuzz++);
arch/powerpc/sysdev/xive/common.c
620
if (cpu >= 0)
arch/powerpc/sysdev/xive/common.c
621
return cpu;
arch/powerpc/sysdev/xive/common.c
75
static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
arch/powerpc/sysdev/xive/common.c
77
return xive_ipis[early_cpu_to_node(cpu)].irq;
arch/powerpc/sysdev/xive/native.c
216
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
arch/powerpc/sysdev/xive/native.c
221
qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
arch/powerpc/sysdev/xive/native.c
225
return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
arch/powerpc/sysdev/xive/native.c
229
static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
arch/powerpc/sysdev/xive/native.c
238
__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
arch/powerpc/sysdev/xive/native.c
261
static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/native.c
273
pr_err("Failed to allocate IPI on CPU %d\n", cpu);
arch/powerpc/sysdev/xive/native.c
311
static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/native.c
386
static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/native.c
388
xc->chip_id = cpu_to_chip_id(cpu);
arch/powerpc/sysdev/xive/native.c
391
static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/native.c
406
vp = xive_pool_vps + cpu;
arch/powerpc/sysdev/xive/native.c
414
pr_err("Failed to enable pool VP on CPU %d\n", cpu);
arch/powerpc/sysdev/xive/native.c
421
pr_err("Failed to get pool VP info CPU %d\n", cpu);
arch/powerpc/sysdev/xive/native.c
431
static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/native.c
443
vp = xive_pool_vps + cpu;
arch/powerpc/sysdev/xive/native.c
563
u32 val, cpu;
arch/powerpc/sysdev/xive/native.c
605
for_each_possible_cpu(cpu)
arch/powerpc/sysdev/xive/native.c
606
kvmppc_set_xive_tima(cpu, r.start, tima);
arch/powerpc/sysdev/xive/spapr.c
531
static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
arch/powerpc/sysdev/xive/spapr.c
537
qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
arch/powerpc/sysdev/xive/spapr.c
541
return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
arch/powerpc/sysdev/xive/spapr.c
545
static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
arch/powerpc/sysdev/xive/spapr.c
551
int hw_cpu = get_hard_smp_processor_id(cpu);
arch/powerpc/sysdev/xive/spapr.c
572
static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/spapr.c
577
pr_err("Failed to allocate IPI on CPU %d\n", cpu);
arch/powerpc/sysdev/xive/spapr.c
585
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/spapr.c
647
static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/spapr.c
656
static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
arch/powerpc/sysdev/xive/xive-internal.h
45
int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
arch/powerpc/sysdev/xive/xive-internal.h
46
void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
arch/powerpc/sysdev/xive/xive-internal.h
47
void (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
arch/powerpc/sysdev/xive/xive-internal.h
48
void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
arch/powerpc/sysdev/xive/xive-internal.h
49
void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
arch/powerpc/sysdev/xive/xive-internal.h
57
int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
arch/powerpc/sysdev/xive/xive-internal.h
58
void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
arch/powerpc/sysdev/xive/xive-internal.h
67
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
arch/powerpc/xmon/xmon.c
1234
static int xmon_switch_cpu(unsigned long cpu)
arch/powerpc/xmon/xmon.c
1240
xmon_owner = cpu;
arch/powerpc/xmon/xmon.c
1249
printf("cpu 0x%lx didn't take control\n", cpu);
arch/powerpc/xmon/xmon.c
1259
unsigned long cpu;
arch/powerpc/xmon/xmon.c
1261
for_each_cpu_wrap(cpu, &xmon_batch_cpus, xmon_batch_start_cpu) {
arch/powerpc/xmon/xmon.c
1263
xmon_batch_start_cpu = cpu;
arch/powerpc/xmon/xmon.c
1264
if (xmon_switch_cpu(cpu))
arch/powerpc/xmon/xmon.c
1266
cpumask_clear_cpu(cpu, &xmon_batch_cpus);
arch/powerpc/xmon/xmon.c
1308
unsigned long cpu, first_cpu, last_cpu;
arch/powerpc/xmon/xmon.c
1310
cpu = skipbl();
arch/powerpc/xmon/xmon.c
1311
if (cpu == '#') {
arch/powerpc/xmon/xmon.c
1334
termch = cpu;
arch/powerpc/xmon/xmon.c
1336
if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
arch/powerpc/xmon/xmon.c
1340
for_each_possible_cpu(cpu) {
arch/powerpc/xmon/xmon.c
1341
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
arch/powerpc/xmon/xmon.c
1342
if (cpu == last_cpu + 1) {
arch/powerpc/xmon/xmon.c
1343
last_cpu = cpu;
arch/powerpc/xmon/xmon.c
1347
last_cpu = first_cpu = cpu;
arch/powerpc/xmon/xmon.c
1348
printf(" 0x%lx", cpu);
arch/powerpc/xmon/xmon.c
1358
if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
arch/powerpc/xmon/xmon.c
1359
printf("cpu 0x%lx isn't in xmon\n", cpu);
arch/powerpc/xmon/xmon.c
1361
printf("backtrace of paca[0x%lx].saved_r1 (possibly stale):\n", cpu);
arch/powerpc/xmon/xmon.c
1362
xmon_show_stack(paca_ptrs[cpu]->saved_r1, 0, 0);
arch/powerpc/xmon/xmon.c
1367
return xmon_switch_cpu(cpu);
arch/powerpc/xmon/xmon.c
2590
static void dump_one_paca(int cpu)
arch/powerpc/xmon/xmon.c
2598
printf("*** Error dumping paca for cpu 0x%x!\n", cpu);
arch/powerpc/xmon/xmon.c
2605
p = paca_ptrs[cpu];
arch/powerpc/xmon/xmon.c
2607
printf("paca for cpu 0x%x @ %px:\n", cpu, p);
arch/powerpc/xmon/xmon.c
2609
printf(" %-*s = %s\n", 25, "possible", str_yes_no(cpu_possible(cpu)));
arch/powerpc/xmon/xmon.c
2610
printf(" %-*s = %s\n", 25, "present", str_yes_no(cpu_present(cpu)));
arch/powerpc/xmon/xmon.c
2611
printf(" %-*s = %s\n", 25, "online", str_yes_no(cpu_online(cpu)));
arch/powerpc/xmon/xmon.c
2734
int cpu;
arch/powerpc/xmon/xmon.c
2741
for_each_possible_cpu(cpu)
arch/powerpc/xmon/xmon.c
2742
dump_one_paca(cpu);
arch/powerpc/xmon/xmon.c
2766
static void dump_one_xive(int cpu)
arch/powerpc/xmon/xmon.c
2768
unsigned int hwid = get_hard_smp_processor_id(cpu);
arch/powerpc/xmon/xmon.c
2782
printf("*** Error dumping xive on cpu %d\n", cpu);
arch/powerpc/xmon/xmon.c
2788
xmon_xive_do_dump(cpu);
arch/powerpc/xmon/xmon.c
2796
int cpu;
arch/powerpc/xmon/xmon.c
2803
for_each_online_cpu(cpu)
arch/powerpc/xmon/xmon.c
2804
dump_one_xive(cpu);
arch/powerpc/xmon/xmon.c
3646
unsigned long addr, cpu;
arch/powerpc/xmon/xmon.c
3684
if (scanhex(&cpu) && cpu < num_possible_cpus()) {
arch/powerpc/xmon/xmon.c
3685
addr = (unsigned long)per_cpu_ptr(ptr, cpu);
arch/powerpc/xmon/xmon.c
3687
cpu = raw_smp_processor_id();
arch/powerpc/xmon/xmon.c
3691
printf("%s for cpu 0x%lx: %lx\n", tmp, cpu, addr);
arch/powerpc/xmon/xmon.c
488
int cpu;
arch/powerpc/xmon/xmon.c
511
cpu = smp_processor_id();
arch/powerpc/xmon/xmon.c
512
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
arch/powerpc/xmon/xmon.c
523
cpu, regs->trap, getvecname(TRAP(regs)));
arch/powerpc/xmon/xmon.c
525
longjmp(xmon_fault_jmp[cpu], 1);
arch/powerpc/xmon/xmon.c
532
"on cpu 0x%x\n", cpu);
arch/powerpc/xmon/xmon.c
536
secondary = !(xmon_taken && cpu == xmon_owner);
arch/powerpc/xmon/xmon.c
540
xmon_fault_jmp[cpu] = recurse_jmp;
arch/powerpc/xmon/xmon.c
554
cpu, BP_NUM(bp));
arch/powerpc/xmon/xmon.c
563
cpumask_set_cpu(cpu, &cpus_in_xmon);
arch/powerpc/xmon/xmon.c
586
xmon_owner = cpu;
arch/powerpc/xmon/xmon.c
621
if (cpu == xmon_owner) {
arch/powerpc/xmon/xmon.c
628
while (cpu == xmon_owner)
arch/powerpc/xmon/xmon.c
652
cpumask_clear_cpu(cpu, &cpus_in_xmon);
arch/powerpc/xmon/xmon.c
653
xmon_fault_jmp[cpu] = NULL;
arch/riscv/include/asm/acpi.h
49
#define cpu_physical_id(cpu) cpuid_to_hartid_map(cpu)
arch/riscv/include/asm/acpi.h
63
struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu);
arch/riscv/include/asm/acpi.h
64
static inline u32 get_acpi_id_for_cpu(int cpu)
arch/riscv/include/asm/acpi.h
66
return acpi_cpu_get_madt_rintc(cpu)->uid;
arch/riscv/include/asm/acpi.h
70
unsigned int cpu, const char **isa);
arch/riscv/include/asm/acpi.h
76
static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
arch/riscv/include/asm/acpi.h
82
unsigned int cpu, const char **isa)
arch/riscv/include/asm/cpu_ops.h
23
int (*cpu_start)(unsigned int cpu,
arch/riscv/include/asm/cpu_ops.h
27
int (*cpu_is_stopped)(unsigned int cpu);
arch/riscv/include/asm/cpufeature.h
133
static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
arch/riscv/include/asm/cpufeature.h
141
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
arch/riscv/include/asm/cpufeature.h
144
static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
arch/riscv/include/asm/cpufeature.h
152
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
arch/riscv/include/asm/cpufeature.h
73
int cpu_online_unaligned_access_init(unsigned int cpu);
arch/riscv/include/asm/kvm_aia.h
130
void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
arch/riscv/include/asm/kvm_aia.h
164
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
arch/riscv/include/asm/kvm_aia.h
166
void kvm_riscv_aia_free_hgei(int cpu, int hgei);
arch/riscv/include/asm/kvm_aia.h
91
void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu);
arch/riscv/include/asm/sbi.h
587
void sbi_send_ipi(unsigned int cpu);
arch/riscv/include/asm/smp.h
109
bool cpu_has_hotplug(unsigned int cpu);
arch/riscv/include/asm/smp.h
111
static inline bool cpu_has_hotplug(unsigned int cpu)
arch/riscv/include/asm/smp.h
26
#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu]
arch/riscv/include/asm/smp.h
38
void arch_send_call_function_single_ipi(int cpu);
arch/riscv/include/asm/smp.h
64
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/riscv/include/asm/smp.h
68
static inline void __cpu_die(unsigned int cpu) { }
arch/riscv/include/asm/smp.h
84
static inline unsigned long cpuid_to_hartid_map(int cpu)
arch/riscv/include/asm/thread_info.h
63
int cpu;
arch/riscv/include/asm/vendor_extensions.h
101
return __riscv_isa_vendor_extension_available(cpu, vendor, ext);
arch/riscv/include/asm/vendor_extensions.h
44
bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit);
arch/riscv/include/asm/vendor_extensions.h
45
#define riscv_cpu_isa_vendor_extension_available(cpu, vendor, ext) \
arch/riscv/include/asm/vendor_extensions.h
46
__riscv_isa_vendor_extension_available(cpu, vendor, RISCV_ISA_VENDOR_EXT_##ext)
arch/riscv/include/asm/vendor_extensions.h
78
int cpu, const unsigned long ext)
arch/riscv/include/asm/vendor_extensions.h
87
return __riscv_isa_vendor_extension_available(cpu, vendor, ext);
arch/riscv/include/asm/vendor_extensions.h
91
int cpu,
arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
28
int cpu; \
arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
30
for_each_cpu(cpu, (cpus)) { \
arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
31
struct riscv_isavendorinfo *isainfo = &(per_hart_vendor_bitmap)[cpu]; \
arch/riscv/kernel/acpi.c
202
struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
arch/riscv/kernel/acpi.c
204
return &cpu_madt_rintc[cpu];
arch/riscv/kernel/acpi_numa.c
33
static int __init acpi_numa_get_nid(unsigned int cpu)
arch/riscv/kernel/acpi_numa.c
35
return acpi_early_node_map[cpu];
arch/riscv/kernel/acpi_numa.c
40
int cpu;
arch/riscv/kernel/acpi_numa.c
42
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
arch/riscv/kernel/acpi_numa.c
43
if (uid == get_acpi_id_for_cpu(cpu))
arch/riscv/kernel/acpi_numa.c
44
return cpu;
arch/riscv/kernel/acpi_numa.c
53
int cpu, pxm, node;
arch/riscv/kernel/acpi_numa.c
74
cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
arch/riscv/kernel/acpi_numa.c
75
if (cpu < 0)
arch/riscv/kernel/acpi_numa.c
78
acpi_early_node_map[cpu] = node;
arch/riscv/kernel/acpi_numa.c
80
cpuid_to_hartid_map(cpu), node);
arch/riscv/kernel/asm-offsets.c
40
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
arch/riscv/kernel/asm-offsets.c
53
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
arch/riscv/kernel/cacheinfo.c
107
np = of_cpu_device_node_get(cpu);
arch/riscv/kernel/cacheinfo.c
74
int init_cache_level(unsigned int cpu)
arch/riscv/kernel/cacheinfo.c
76
return init_of_cache_level(cpu);
arch/riscv/kernel/cacheinfo.c
79
int populate_cache_leaves(unsigned int cpu)
arch/riscv/kernel/cacheinfo.c
81
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/riscv/kernel/cacheinfo.c
89
ret = acpi_get_cache_info(cpu, &fw_levels, &split_levels);
arch/riscv/kernel/cpu-hotplug.c
19
bool cpu_has_hotplug(unsigned int cpu)
arch/riscv/kernel/cpu-hotplug.c
32
unsigned int cpu = smp_processor_id();
arch/riscv/kernel/cpu-hotplug.c
37
remove_cpu_topology(cpu);
arch/riscv/kernel/cpu-hotplug.c
38
numa_remove_cpu(cpu);
arch/riscv/kernel/cpu-hotplug.c
39
set_cpu_online(cpu, false);
arch/riscv/kernel/cpu-hotplug.c
51
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
arch/riscv/kernel/cpu-hotplug.c
55
pr_notice("CPU%u: off\n", cpu);
arch/riscv/kernel/cpu-hotplug.c
57
clear_tasks_mm_cpumask(cpu);
arch/riscv/kernel/cpu-hotplug.c
60
ret = cpu_ops->cpu_is_stopped(cpu);
arch/riscv/kernel/cpu-hotplug.c
62
pr_warn("CPU%u may not have stopped: %d\n", cpu, ret);
arch/riscv/kernel/cpu.c
195
static int riscv_cpuinfo_starting(unsigned int cpu)
arch/riscv/kernel/cpu.c
21
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
arch/riscv/kernel/cpu.c
23
return phys_id == cpuid_to_hartid_map(cpu);
arch/riscv/kernel/cpu.c
239
static void print_vendor_isa(struct seq_file *f, int cpu)
arch/riscv/kernel/cpu.c
249
if (cpu == ALL_CPUS)
arch/riscv/kernel/cpu.c
252
vendor_bitmap = &ext_list->per_hart_isa_bitmap[cpu];
arch/riscv/kernel/cpu.c
263
static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap, int cpu)
arch/riscv/kernel/cpu.c
282
print_vendor_isa(f, cpu);
arch/riscv/kernel/cpu.c
32
int cpu;
arch/riscv/kernel/cpu.c
40
cpu = riscv_hartid_to_cpuid(*hart);
arch/riscv/kernel/cpu.c
41
if (cpu < 0)
arch/riscv/kernel/cpu.c
42
return cpu;
arch/riscv/kernel/cpu.c
44
if (!cpu_possible(cpu))
arch/riscv/kernel/cpufeature.c
1023
unsigned int cpu;
arch/riscv/kernel/cpufeature.c
1026
for_each_possible_cpu(cpu) {
arch/riscv/kernel/cpufeature.c
1029
struct riscv_isainfo *isainfo = &hart_isa[cpu];
arch/riscv/kernel/cpufeature.c
1032
cpu_node = of_cpu_device_node_get(cpu);
arch/riscv/kernel/cpufeature.c
1054
riscv_fill_cpu_vendor_ext(cpu_node, cpu);
arch/riscv/kernel/cpufeature.c
1072
riscv_fill_vendor_ext_list(cpu);
arch/riscv/kernel/cpufeature.c
838
unsigned int cpu;
arch/riscv/kernel/cpufeature.c
851
for_each_possible_cpu(cpu) {
arch/riscv/kernel/cpufeature.c
852
struct riscv_isainfo *isainfo = &hart_isa[cpu];
arch/riscv/kernel/cpufeature.c
857
node = of_cpu_device_node_get(cpu);
arch/riscv/kernel/cpufeature.c
870
rc = acpi_get_riscv_isa(rhct, cpu, &isa);
arch/riscv/kernel/cpufeature.c
872
pr_warn("Unable to get ISA for the hart - %d\n", cpu);
arch/riscv/kernel/cpufeature.c
926
static void __init riscv_fill_cpu_vendor_ext(struct device_node *cpu_node, int cpu)
arch/riscv/kernel/cpufeature.c
936
struct riscv_isavendorinfo *isavendorinfo = &ext_list->per_hart_isa_bitmap[cpu];
arch/riscv/kernel/cpufeature.c
959
static void __init riscv_fill_vendor_ext_list(int cpu)
arch/riscv/kernel/cpufeature.c
969
ext_list->per_hart_isa_bitmap[cpu].isa,
arch/riscv/kernel/cpufeature.c
975
ext_list->per_hart_isa_bitmap[cpu].isa,
arch/riscv/kernel/cpufeature.c
983
int cpu;
arch/riscv/kernel/cpufeature.c
991
for_each_possible_cpu(cpu) {
arch/riscv/kernel/cpufeature.c
994
cpu_node = of_cpu_device_node_get(cpu);
arch/riscv/kernel/irq.c
100
per_cpu(irq_stack_ptr, cpu) = p;
arch/riscv/kernel/irq.c
109
int cpu;
arch/riscv/kernel/irq.c
111
for_each_possible_cpu(cpu)
arch/riscv/kernel/irq.c
112
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
arch/riscv/kernel/irq.c
80
int cpu;
arch/riscv/kernel/irq.c
85
for_each_possible_cpu(cpu)
arch/riscv/kernel/irq.c
86
per_cpu(irq_shadow_call_stack_ptr, cpu) =
arch/riscv/kernel/irq.c
87
scs_alloc(cpu_to_node(cpu));
arch/riscv/kernel/irq.c
95
int cpu;
arch/riscv/kernel/irq.c
98
for_each_possible_cpu(cpu) {
arch/riscv/kernel/irq.c
99
p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
arch/riscv/kernel/paravirt.c
66
static int pv_time_cpu_online(unsigned int cpu)
arch/riscv/kernel/paravirt.c
76
static int pv_time_cpu_down_prepare(unsigned int cpu)
arch/riscv/kernel/paravirt.c
82
static u64 pv_time_steal_clock(int cpu)
arch/riscv/kernel/paravirt.c
84
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
arch/riscv/kernel/sbi-ipi.c
33
static int sbi_ipi_starting_cpu(unsigned int cpu)
arch/riscv/kernel/sbi.c
106
static void __sbi_send_ipi_v01(unsigned int cpu)
arch/riscv/kernel/sbi.c
109
__sbi_v01_cpumask_to_hartmask(cpumask_of(cpu));
arch/riscv/kernel/sbi.c
160
static void __sbi_send_ipi_v01(unsigned int cpu)
arch/riscv/kernel/sbi.c
190
static void __sbi_send_ipi_v02(unsigned int cpu)
arch/riscv/kernel/sbi.c
196
1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0);
arch/riscv/kernel/sbi.c
200
__func__, cpuid_to_hartid_map(cpu), result);
arch/riscv/kernel/sbi.c
22
static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init;
arch/riscv/kernel/sbi.c
387
void sbi_send_ipi(unsigned int cpu)
arch/riscv/kernel/sbi.c
389
__sbi_send_ipi(cpu);
arch/riscv/kernel/setup.c
376
bool arch_cpu_is_hotpluggable(int cpu)
arch/riscv/kernel/setup.c
378
return cpu_has_hotplug(cpu);
arch/riscv/kernel/smp.c
101
if (cpu_has_hotplug(cpu))
arch/riscv/kernel/smp.c
109
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
arch/riscv/kernel/smp.c
120
static void send_ipi_single(int cpu, enum ipi_message_type op)
arch/riscv/kernel/smp.c
122
__ipi_send_mask(ipi_desc[op], cpumask_of(cpu));
arch/riscv/kernel/smp.c
134
unsigned int cpu = smp_processor_id();
arch/riscv/kernel/smp.c
148
ipi_cpu_crash_stop(cpu, get_irq_regs());
arch/riscv/kernel/smp.c
162
kgdb_nmicallback(cpu, get_irq_regs());
arch/riscv/kernel/smp.c
165
pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi);
arch/riscv/kernel/smp.c
226
unsigned int cpu, i;
arch/riscv/kernel/smp.c
231
for_each_online_cpu(cpu)
arch/riscv/kernel/smp.c
232
seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
arch/riscv/kernel/smp.c
242
void arch_send_call_function_single_ipi(int cpu)
arch/riscv/kernel/smp.c
244
send_ipi_single(cpu, IPI_CALL_FUNC);
arch/riscv/kernel/smp.c
337
void arch_smp_send_reschedule(int cpu)
arch/riscv/kernel/smp.c
339
send_ipi_single(cpu, IPI_RESCHEDULE);
arch/riscv/kernel/smp.c
357
int cpu;
arch/riscv/kernel/smp.c
359
for_each_online_cpu(cpu) {
arch/riscv/kernel/smp.c
361
if (cpu == this_cpu)
arch/riscv/kernel/smp.c
364
send_ipi_single(cpu, IPI_KGDB_ROUNDUP);
arch/riscv/kernel/smp.c
92
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
arch/riscv/kernel/smp.c
94
crash_save_cpu(regs, cpu);
arch/riscv/kernel/smpboot.c
176
static int start_secondary_cpu(int cpu, struct task_struct *tidle)
arch/riscv/kernel/smpboot.c
179
return cpu_ops->cpu_start(cpu, tidle);
arch/riscv/kernel/smpboot.c
185
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
arch/riscv/kernel/smpboot.c
187
return start_secondary_cpu(cpu, tidle);
arch/riscv/kernel/smpboot.c
190
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/riscv/kernel/smpboot.c
193
tidle->thread_info.cpu = cpu;
arch/riscv/kernel/smpboot.c
195
ret = start_secondary_cpu(cpu, tidle);
arch/riscv/kernel/smpboot.c
200
if (!cpu_online(cpu)) {
arch/riscv/kernel/smpboot.c
201
pr_crit("CPU%u: failed to come online\n", cpu);
arch/riscv/kernel/smpboot.c
205
pr_crit("CPU%u: failed to start\n", cpu);
arch/riscv/kernel/sys_hwprobe.c
101
for_each_cpu(cpu, cpus) {
arch/riscv/kernel/sys_hwprobe.c
102
struct riscv_isainfo *isainfo = &hart_isa[cpu];
arch/riscv/kernel/sys_hwprobe.c
189
int cpu;
arch/riscv/kernel/sys_hwprobe.c
198
for_each_cpu(cpu, cpus) {
arch/riscv/kernel/sys_hwprobe.c
199
struct riscv_isainfo *isainfo = &hart_isa[cpu];
arch/riscv/kernel/sys_hwprobe.c
225
int cpu;
arch/riscv/kernel/sys_hwprobe.c
228
for_each_cpu(cpu, cpus) {
arch/riscv/kernel/sys_hwprobe.c
229
int this_perf = per_cpu(misaligned_access_speed, cpu);
arch/riscv/kernel/sys_hwprobe.c
261
int cpu;
arch/riscv/kernel/sys_hwprobe.c
265
for_each_cpu(cpu, cpus) {
arch/riscv/kernel/sys_hwprobe.c
266
int this_perf = per_cpu(vector_misaligned_access, cpu);
arch/riscv/kernel/sys_hwprobe.c
40
int cpu;
arch/riscv/kernel/sys_hwprobe.c
466
int cpu;
arch/riscv/kernel/sys_hwprobe.c
47
for_each_cpu(cpu, cpus) {
arch/riscv/kernel/sys_hwprobe.c
485
for_each_cpu(cpu, &cpus) {
arch/riscv/kernel/sys_hwprobe.c
486
cpumask_set_cpu(cpu, &one_cpu);
arch/riscv/kernel/sys_hwprobe.c
491
cpumask_clear_cpu(cpu, &cpus);
arch/riscv/kernel/sys_hwprobe.c
493
cpumask_clear_cpu(cpu, &one_cpu);
arch/riscv/kernel/sys_hwprobe.c
52
cpu_id = riscv_cached_mvendorid(cpu);
arch/riscv/kernel/sys_hwprobe.c
55
cpu_id = riscv_cached_mimpid(cpu);
arch/riscv/kernel/sys_hwprobe.c
58
cpu_id = riscv_cached_marchid(cpu);
arch/riscv/kernel/sys_hwprobe.c
84
int cpu;
arch/riscv/kernel/time.c
22
struct device_node *cpu;
arch/riscv/kernel/time.c
28
cpu = of_find_node_by_path("/cpus");
arch/riscv/kernel/time.c
29
if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop))
arch/riscv/kernel/time.c
32
of_node_put(cpu);
arch/riscv/kernel/traps_misaligned.c
484
int cpu;
arch/riscv/kernel/traps_misaligned.c
492
for_each_online_cpu(cpu)
arch/riscv/kernel/traps_misaligned.c
493
if (per_cpu(vector_misaligned_access, cpu)
arch/riscv/kernel/traps_misaligned.c
508
int cpu;
arch/riscv/kernel/traps_misaligned.c
510
for_each_online_cpu(cpu)
arch/riscv/kernel/traps_misaligned.c
511
if (per_cpu(misaligned_access_speed, cpu) !=
arch/riscv/kernel/traps_misaligned.c
524
int cpu = smp_processor_id();
arch/riscv/kernel/traps_misaligned.c
525
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
arch/riscv/kernel/traps_misaligned.c
535
static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
arch/riscv/kernel/traps_misaligned.c
537
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
arch/riscv/kernel/traps_misaligned.c
579
static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
arch/riscv/kernel/traps_misaligned.c
589
static int cpu_online_sbi_unaligned_setup(unsigned int cpu)
arch/riscv/kernel/traps_misaligned.c
620
static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)
arch/riscv/kernel/traps_misaligned.c
627
int cpu_online_unaligned_access_init(unsigned int cpu)
arch/riscv/kernel/traps_misaligned.c
631
ret = cpu_online_sbi_unaligned_setup(cpu);
arch/riscv/kernel/traps_misaligned.c
635
return cpu_online_check_unaligned_access_emulated(cpu);
arch/riscv/kernel/unaligned_access_speed.c
109
cpu,
arch/riscv/kernel/unaligned_access_speed.c
114
per_cpu(misaligned_access_speed, cpu) = speed;
arch/riscv/kernel/unaligned_access_speed.c
121
cpumask_set_cpu(cpu, &fast_misaligned_access);
arch/riscv/kernel/unaligned_access_speed.c
123
cpumask_clear_cpu(cpu, &fast_misaligned_access);
arch/riscv/kernel/unaligned_access_speed.c
130
unsigned int cpu = smp_processor_id();
arch/riscv/kernel/unaligned_access_speed.c
134
check_unaligned_access(pages[cpu]);
arch/riscv/kernel/unaligned_access_speed.c
140
unsigned int cpu;
arch/riscv/kernel/unaligned_access_speed.c
153
for_each_cpu(cpu, cpu_online_mask) {
arch/riscv/kernel/unaligned_access_speed.c
154
bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
arch/riscv/kernel/unaligned_access_speed.c
155
if (!bufs[cpu]) {
arch/riscv/kernel/unaligned_access_speed.c
168
for_each_cpu(cpu, cpu_online_mask) {
arch/riscv/kernel/unaligned_access_speed.c
169
if (bufs[cpu])
arch/riscv/kernel/unaligned_access_speed.c
170
__free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
arch/riscv/kernel/unaligned_access_speed.c
191
static void set_unaligned_access_static_branches_except_cpu(int cpu)
arch/riscv/kernel/unaligned_access_speed.c
203
cpumask_clear_cpu(cpu, &fast_except_me);
arch/riscv/kernel/unaligned_access_speed.c
237
static int riscv_online_cpu(unsigned int cpu)
arch/riscv/kernel/unaligned_access_speed.c
239
int ret = cpu_online_unaligned_access_init(cpu);
arch/riscv/kernel/unaligned_access_speed.c
245
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
arch/riscv/kernel/unaligned_access_speed.c
248
per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
arch/riscv/kernel/unaligned_access_speed.c
273
static int riscv_offline_cpu(unsigned int cpu)
arch/riscv/kernel/unaligned_access_speed.c
275
set_unaligned_access_static_branches_except_cpu(cpu);
arch/riscv/kernel/unaligned_access_speed.c
283
int cpu = smp_processor_id();
arch/riscv/kernel/unaligned_access_speed.c
294
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
arch/riscv/kernel/unaligned_access_speed.c
35
int cpu = smp_processor_id();
arch/riscv/kernel/unaligned_access_speed.c
357
cpu);
arch/riscv/kernel/unaligned_access_speed.c
367
cpu,
arch/riscv/kernel/unaligned_access_speed.c
372
per_cpu(vector_misaligned_access, cpu) = speed;
arch/riscv/kernel/unaligned_access_speed.c
393
static int riscv_online_cpu_vec(unsigned int cpu)
arch/riscv/kernel/unaligned_access_speed.c
396
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
arch/riscv/kernel/unaligned_access_speed.c
401
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
arch/riscv/kernel/unaligned_access_speed.c
445
int cpu;
arch/riscv/kernel/unaligned_access_speed.c
452
for_each_online_cpu(cpu)
arch/riscv/kernel/unaligned_access_speed.c
453
per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
arch/riscv/kernel/unaligned_access_speed.c
46
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
arch/riscv/kernel/unaligned_access_speed.c
473
for_each_online_cpu(cpu)
arch/riscv/kernel/unaligned_access_speed.c
474
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
arch/riscv/kernel/unaligned_access_speed.c
99
cpu);
arch/riscv/kernel/vendor_extensions.c
44
bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit)
arch/riscv/kernel/vendor_extensions.c
78
if (cpu != -1)
arch/riscv/kernel/vendor_extensions.c
79
bmap = &cpu_bmap[cpu];
arch/riscv/kernel/vendor_extensions/thead.c
23
int cpu;
arch/riscv/kernel/vendor_extensions/thead.c
25
for_each_possible_cpu(cpu)
arch/riscv/kernel/vendor_extensions/thead.c
26
clear_bit(RISCV_ISA_VENDOR_EXT_XTHEADVECTOR, riscv_isa_vendor_ext_list_thead.per_hart_isa_bitmap[cpu].isa);
arch/riscv/kvm/aia.c
111
void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
arch/riscv/kvm/aia.c
143
kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
arch/riscv/kvm/aia.c
411
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
arch/riscv/kvm/aia.c
418
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
arch/riscv/kvm/aia.c
434
lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL;
arch/riscv/kvm/aia.c
445
void kvm_riscv_aia_free_hgei(int cpu, int hgei)
arch/riscv/kvm/aia.c
448
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
arch/riscv/kvm/aia.c
489
int cpu, rc;
arch/riscv/kvm/aia.c
494
for_each_possible_cpu(cpu) {
arch/riscv/kvm/aia.c
495
hgctrl = per_cpu_ptr(&aia_hgei, cpu);
arch/riscv/kvm/aia_imsic.c
701
if (imsic->vsfile_cpu != vcpu->cpu)
arch/riscv/kvm/aia_imsic.c
711
void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu)
arch/riscv/kvm/aia_imsic.c
811
if (old_vsfile_cpu == vcpu->cpu)
arch/riscv/kvm/aia_imsic.c
815
ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu,
arch/riscv/kvm/aia_imsic.c
822
run->fail_entry.cpu = vcpu->cpu;
arch/riscv/kvm/aia_imsic.c
860
imsic->vsfile_cpu = vcpu->cpu;
arch/riscv/kvm/aia_imsic.c
898
kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei);
arch/riscv/kvm/nacl.c
116
int cpu;
arch/riscv/kvm/nacl.c
138
for_each_possible_cpu(cpu) {
arch/riscv/kvm/nacl.c
139
nacl = per_cpu_ptr(&kvm_riscv_nacl, cpu);
arch/riscv/kvm/nacl.c
83
int cpu;
arch/riscv/kvm/nacl.c
90
for_each_possible_cpu(cpu) {
arch/riscv/kvm/nacl.c
91
nacl = per_cpu_ptr(&kvm_riscv_nacl, cpu);
arch/riscv/kvm/tlb.c
166
vcpu->arch.last_exit_cpu == vcpu->cpu)
arch/riscv/kvm/vcpu.c
578
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/riscv/kvm/vcpu.c
636
kvm_riscv_vcpu_aia_load(vcpu, cpu);
arch/riscv/kvm/vcpu.c
640
vcpu->cpu = cpu;
arch/riscv/kvm/vcpu.c
648
vcpu->cpu = -1;
arch/riscv/kvm/vcpu.c
859
vcpu->arch.last_exit_cpu = vcpu->cpu;
arch/riscv/kvm/vcpu.c
89
loaded = (vcpu->cpu != -1);
arch/riscv/mm/cacheflush.c
182
int cpu = get_cpu();
arch/riscv/mm/cacheflush.c
193
stale_cpu = cpumask_test_cpu(cpu, mask);
arch/riscv/mm/cacheflush.c
196
__assign_cpu(cpu, mask, stale_cpu);
arch/riscv/mm/cacheflush.c
59
unsigned int cpu;
arch/riscv/mm/cacheflush.c
68
cpu = smp_processor_id();
arch/riscv/mm/cacheflush.c
69
cpumask_clear_cpu(cpu, mask);
arch/riscv/mm/cacheflush.c
76
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
arch/riscv/mm/context.c
144
static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
arch/riscv/mm/context.c
168
old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
arch/riscv/mm/context.c
171
atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
arch/riscv/mm/context.c
184
if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending))
arch/riscv/mm/context.c
187
atomic_long_set(&per_cpu(active_context, cpu), cntx);
arch/riscv/mm/context.c
208
struct mm_struct *next, unsigned int cpu)
arch/riscv/mm/context.c
218
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/riscv/mm/context.c
220
set_mm_asid(next, cpu);
arch/riscv/mm/context.c
222
cpumask_clear_cpu(cpu, mm_cpumask(prev));
arch/riscv/mm/context.c
278
struct mm_struct *next, unsigned int cpu)
arch/riscv/mm/context.c
298
static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu,
arch/riscv/mm/context.c
302
if (cpumask_test_and_clear_cpu(cpu, &mm->context.icache_stale_mask)) {
arch/riscv/mm/context.c
321
unsigned int cpu;
arch/riscv/mm/context.c
333
cpu = smp_processor_id();
arch/riscv/mm/context.c
335
set_mm(prev, next, cpu);
arch/riscv/mm/context.c
337
flush_icache_deferred(next, cpu, task);
arch/riscv/mm/context.c
38
int cpu;
arch/riscv/mm/context.c
50
for_each_possible_cpu(cpu) {
arch/riscv/mm/context.c
51
if (per_cpu(reserved_context, cpu) == cntx) {
arch/riscv/mm/context.c
53
per_cpu(reserved_context, cpu) = newcntx;
arch/riscv/mm/tlbflush.c
124
unsigned int cpu;
arch/riscv/mm/tlbflush.c
129
cpu = get_cpu();
arch/riscv/mm/tlbflush.c
132
if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
arch/riscv/net/bpf_jit_comp64.c
1399
emit_lw(RV_REG_T1, offsetof(struct thread_info, cpu),
arch/riscv/net/bpf_jit_comp64.c
1806
emit_lw(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
arch/s390/boot/startup.c
87
unsigned int cpu;
arch/s390/boot/startup.c
90
cpu = stap();
arch/s390/boot/startup.c
97
: [cpu] "d" (cpu)
arch/s390/hypfs/hypfs_diag0c.c
33
unsigned int cpu_count, cpu, i;
arch/s390/hypfs/hypfs_diag0c.c
48
for_each_online_cpu(cpu) {
arch/s390/hypfs/hypfs_diag0c.c
49
diag0c_data->entry[i].cpu = cpu;
arch/s390/hypfs/hypfs_diag0c.c
50
cpu_vec[cpu] = &diag0c_data->entry[i++];
arch/s390/include/asm/abs_lowcore.h
12
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc);
arch/s390/include/asm/abs_lowcore.h
13
void abs_lowcore_unmap(int cpu);
arch/s390/include/asm/abs_lowcore.h
17
int cpu;
arch/s390/include/asm/abs_lowcore.h
19
cpu = get_cpu();
arch/s390/include/asm/abs_lowcore.h
20
return ((struct lowcore *)__abs_lowcore) + cpu;
arch/s390/include/asm/debug.h
37
unsigned short cpu;
arch/s390/include/asm/hiperdispatch.h
10
void hd_add_core(int cpu);
arch/s390/include/asm/kvm_host_types.h
76
struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
arch/s390/include/asm/kvm_host_types.h
86
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
arch/s390/include/asm/mmu_context.h
70
int cpu = smp_processor_id();
arch/s390/include/asm/mmu_context.h
76
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
arch/s390/include/asm/mmu_context.h
81
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
arch/s390/include/asm/preempt.h
144
#define init_idle_preempt_count(p, cpu) do { } while (0)
arch/s390/include/asm/processor.h
94
static __always_inline bool test_cpu_flag_of(int flag, int cpu)
arch/s390/include/asm/processor.h
96
return test_bit(flag, &per_cpu(pcpu_devices, cpu).flags);
arch/s390/include/asm/smp.h
18
unsigned int cpu;
arch/s390/include/asm/smp.h
20
BUILD_BUG_ON(sizeof_field(struct lowcore, cpu_nr) != sizeof(cpu));
arch/s390/include/asm/smp.h
26
: [cpu] "=d" (cpu)
arch/s390/include/asm/smp.h
30
return cpu;
arch/s390/include/asm/smp.h
41
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
arch/s390/include/asm/smp.h
43
extern void arch_send_call_function_single_ipi(int cpu);
arch/s390/include/asm/smp.h
50
extern int smp_store_status(int cpu);
arch/s390/include/asm/smp.h
53
extern void smp_yield_cpu(int cpu);
arch/s390/include/asm/smp.h
54
extern void smp_cpu_set_polarization(int cpu, int val);
arch/s390/include/asm/smp.h
55
extern int smp_cpu_get_polarization(int cpu);
arch/s390/include/asm/smp.h
56
extern void smp_cpu_set_capacity(int cpu, unsigned long val);
arch/s390/include/asm/smp.h
57
extern void smp_set_core_capacity(int cpu, unsigned long val);
arch/s390/include/asm/smp.h
58
extern unsigned long smp_cpu_get_capacity(int cpu);
arch/s390/include/asm/smp.h
59
extern int smp_cpu_get_cpu_address(int cpu);
arch/s390/include/asm/smp.h
74
static inline int smp_get_base_cpu(int cpu)
arch/s390/include/asm/smp.h
76
return cpu - (cpu % (smp_cpu_mtid + 1));
arch/s390/include/asm/smp.h
85
extern void __cpu_die(unsigned int cpu);
arch/s390/include/asm/smp.h
88
void notrace smp_yield_cpu(int cpu);
arch/s390/include/asm/spinlock.h
39
bool arch_vcpu_is_preempted(int cpu);
arch/s390/include/asm/spinlock.h
57
void arch_spin_lock_setup(int cpu);
arch/s390/include/asm/spinlock.h
59
static inline u32 arch_spin_lockval(int cpu)
arch/s390/include/asm/spinlock.h
61
return cpu + 1;
arch/s390/include/asm/sysinfo.h
203
struct topology_core cpu;
arch/s390/include/asm/thread_info.h
38
unsigned int cpu; /* current CPU */
arch/s390/include/asm/topology.h
29
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
arch/s390/include/asm/topology.h
30
#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
arch/s390/include/asm/topology.h
31
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
arch/s390/include/asm/topology.h
32
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
arch/s390/include/asm/topology.h
33
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
arch/s390/include/asm/topology.h
34
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
arch/s390/include/asm/topology.h
35
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
arch/s390/include/asm/topology.h
36
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
arch/s390/include/asm/topology.h
37
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
arch/s390/include/asm/topology.h
38
#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated)
arch/s390/include/asm/topology.h
39
#define topology_booted_cores(cpu) (cpu_topology[cpu].booted_cores)
arch/s390/include/asm/topology.h
44
int topology_cpu_init(struct cpu *);
arch/s390/include/asm/topology.h
50
const struct cpumask *cpu_coregroup_mask(int cpu);
arch/s390/include/asm/topology.h
56
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
arch/s390/include/asm/topology.h
64
static inline bool topology_is_primary_thread(unsigned int cpu)
arch/s390/include/asm/topology.h
66
return smp_get_base_cpu(cpu) == cpu;
arch/s390/include/asm/topology.h
84
static inline int cpu_to_node(int cpu)
arch/s390/include/asm/topology.h
9
struct cpu;
arch/s390/include/uapi/asm/hypfs.h
46
__u32 cpu; /* Linux logical CPU number */
arch/s390/kernel/abs_lowcore.c
11
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
arch/s390/kernel/abs_lowcore.c
38
void abs_lowcore_unmap(int cpu)
arch/s390/kernel/abs_lowcore.c
40
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
arch/s390/kernel/abs_lowcore.c
9
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
arch/s390/kernel/cache.c
104
enum cache_type type, unsigned int level, int cpu)
arch/s390/kernel/cache.c
120
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
arch/s390/kernel/cache.c
125
int init_cache_level(unsigned int cpu)
arch/s390/kernel/cache.c
127
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/s390/kernel/cache.c
147
int populate_cache_leaves(unsigned int cpu)
arch/s390/kernel/cache.c
149
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/s390/kernel/cache.c
163
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
arch/s390/kernel/cache.c
164
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
arch/s390/kernel/cache.c
166
ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
arch/s390/kernel/crash_dump.c
296
static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
arch/s390/kernel/crash_dump.c
306
nt_prstatus.common.pr_pid = cpu;
arch/s390/kernel/crash_dump.c
572
int cpu;
arch/s390/kernel/crash_dump.c
576
cpu = 1;
arch/s390/kernel/crash_dump.c
579
ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
arch/s390/kernel/debug.c
1094
active->cpu = smp_processor_id();
arch/s390/kernel/debug.c
1665
entry->cpu, (void *)caller);
arch/s390/kernel/diag/diag.c
81
int cpu, prec, tmp;
arch/s390/kernel/diag/diag.c
87
for_each_online_cpu(cpu) {
arch/s390/kernel/diag/diag.c
89
for (tmp = 10; cpu >= tmp; tmp *= 10)
arch/s390/kernel/diag/diag.c
91
seq_printf(m, "%*s%d", prec, "CPU", cpu);
arch/s390/kernel/diag/diag.c
96
for_each_online_cpu(cpu) {
arch/s390/kernel/diag/diag.c
97
stat = &per_cpu(diag_stat, cpu);
arch/s390/kernel/hiperdispatch.c
106
void hd_add_core(int cpu)
arch/s390/kernel/hiperdispatch.c
112
polarization = smp_cpu_get_polarization(cpu);
arch/s390/kernel/hiperdispatch.c
113
siblings = topology_sibling_cpumask(cpu);
arch/s390/kernel/hiperdispatch.c
123
cpumask_set_cpu(cpu, &hd_vl_coremask);
arch/s390/kernel/hiperdispatch.c
158
int cpu, upscaling_cores;
arch/s390/kernel/hiperdispatch.c
164
for_each_cpu(cpu, &hd_vl_coremask) {
arch/s390/kernel/hiperdispatch.c
165
smp_set_core_capacity(cpu, capacity);
arch/s390/kernel/hiperdispatch.c
210
int cpus, cpu;
arch/s390/kernel/hiperdispatch.c
216
for_each_cpu(cpu, &hd_vmvl_cpumask) {
arch/s390/kernel/hiperdispatch.c
217
steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
arch/s390/kernel/irq.c
221
int cpu;
arch/s390/kernel/irq.c
230
for_each_online_cpu(cpu)
arch/s390/kernel/irq.c
231
seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, cpu));
arch/s390/kernel/irq.c
251
int cpu, irq;
arch/s390/kernel/irq.c
256
for_each_online_cpu(cpu)
arch/s390/kernel/irq.c
257
seq_printf(p, "CPU%-8d", cpu);
arch/s390/kernel/irq.c
263
for_each_online_cpu(cpu)
arch/s390/kernel/irq.c
264
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
arch/s390/kernel/irq.c
275
for_each_online_cpu(cpu)
arch/s390/kernel/irq.c
277
per_cpu(irq_stat, cpu).irqs[irq]);
arch/s390/kernel/machine_kexec.c
84
int this_cpu, cpu;
arch/s390/kernel/machine_kexec.c
89
for_each_online_cpu(cpu) {
arch/s390/kernel/machine_kexec.c
90
if (cpu == this_cpu)
arch/s390/kernel/machine_kexec.c
92
if (smp_store_status(cpu))
arch/s390/kernel/perf_cpum_cf.c
1099
static int cfset_online_cpu(unsigned int cpu);
arch/s390/kernel/perf_cpum_cf.c
1101
static int cpum_cf_online_cpu(unsigned int cpu)
arch/s390/kernel/perf_cpum_cf.c
1111
rc = cpum_cf_alloc_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
1113
cfset_online_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
1119
static int cfset_offline_cpu(unsigned int cpu);
arch/s390/kernel/perf_cpum_cf.c
1121
static int cpum_cf_offline_cpu(unsigned int cpu)
arch/s390/kernel/perf_cpum_cf.c
1134
cfset_offline_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
1135
cpum_cf_free_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
145
static struct cpu_cf_events *get_cpu_cfhw(int cpu)
arch/s390/kernel/perf_cpum_cf.c
1477
unsigned int cpu, cpus, rc = 0;
arch/s390/kernel/perf_cpum_cf.c
1482
for_each_cpu(cpu, mask) {
arch/s390/kernel/perf_cpum_cf.c
1483
struct cpu_cf_events *cpuhw = get_cpu_cfhw(cpu);
arch/s390/kernel/perf_cpum_cf.c
1487
rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
arch/s390/kernel/perf_cpum_cf.c
150
struct cpu_cf_ptr *q = per_cpu_ptr(p, cpu);
arch/s390/kernel/perf_cpum_cf.c
1703
static int cfset_online_cpu(unsigned int cpu)
arch/s390/kernel/perf_cpum_cf.c
1712
cpumask_set_cpu(cpu, &rp->mask);
arch/s390/kernel/perf_cpum_cf.c
1722
static int cfset_offline_cpu(unsigned int cpu)
arch/s390/kernel/perf_cpum_cf.c
1731
cpumask_clear_cpu(cpu, &rp->mask);
arch/s390/kernel/perf_cpum_cf.c
1807
if (cpum_cf_alloc(event->cpu))
arch/s390/kernel/perf_cpum_cf.c
209
static void cpum_cf_free_cpu(int cpu)
arch/s390/kernel/perf_cpum_cf.c
222
p = per_cpu_ptr(cpu_cf_root.cfptr, cpu);
arch/s390/kernel/perf_cpum_cf.c
241
static int cpum_cf_alloc_cpu(int cpu)
arch/s390/kernel/perf_cpum_cf.c
251
p = per_cpu_ptr(cpu_cf_root.cfptr, cpu);
arch/s390/kernel/perf_cpum_cf.c
287
static int cpum_cf_alloc(int cpu)
arch/s390/kernel/perf_cpum_cf.c
292
if (cpu == -1) {
arch/s390/kernel/perf_cpum_cf.c
295
for_each_online_cpu(cpu) {
arch/s390/kernel/perf_cpum_cf.c
296
rc = cpum_cf_alloc_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
298
for_each_cpu(cpu, mask)
arch/s390/kernel/perf_cpum_cf.c
299
cpum_cf_free_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
302
cpumask_set_cpu(cpu, mask);
arch/s390/kernel/perf_cpum_cf.c
306
rc = cpum_cf_alloc_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
311
static void cpum_cf_free(int cpu)
arch/s390/kernel/perf_cpum_cf.c
313
if (cpu == -1) {
arch/s390/kernel/perf_cpum_cf.c
314
for_each_online_cpu(cpu)
arch/s390/kernel/perf_cpum_cf.c
315
cpum_cf_free_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
317
cpum_cf_free_cpu(cpu);
arch/s390/kernel/perf_cpum_cf.c
714
cpum_cf_free(event->cpu);
arch/s390/kernel/perf_cpum_cf.c
819
if (cpum_cf_alloc(event->cpu))
arch/s390/kernel/perf_cpum_cf.c
973
data.cpu_entry.cpu = event->cpu;
arch/s390/kernel/perf_cpum_sf.c
1703
if (event->cpu == -1) {
arch/s390/kernel/perf_cpum_sf.c
1709
struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
arch/s390/kernel/perf_cpum_sf.c
1970
static int cpusf_pmu_setup(unsigned int cpu, int flags)
arch/s390/kernel/perf_cpum_sf.c
1984
static int s390_pmu_sf_online_cpu(unsigned int cpu)
arch/s390/kernel/perf_cpum_sf.c
1986
return cpusf_pmu_setup(cpu, PMC_INIT);
arch/s390/kernel/perf_cpum_sf.c
1989
static int s390_pmu_sf_offline_cpu(unsigned int cpu)
arch/s390/kernel/perf_cpum_sf.c
1991
return cpusf_pmu_setup(cpu, PMC_RELEASE);
arch/s390/kernel/perf_cpum_sf.c
739
int cpu, err = 0;
arch/s390/kernel/perf_cpum_sf.c
760
if (event->cpu == -1) {
arch/s390/kernel/perf_cpum_sf.c
766
cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
arch/s390/kernel/perf_cpum_sf.c
819
for_each_online_cpu(cpu) {
arch/s390/kernel/perf_cpum_sf.c
820
cpuhw = &per_cpu(cpu_hw_sf, cpu);
arch/s390/kernel/perf_event.c
104
int cpu = smp_processor_id();
arch/s390/kernel/perf_event.c
109
cpu, cf_info.cfvn, cf_info.csvn,
arch/s390/kernel/perf_event.c
116
int cpu = smp_processor_id();
arch/s390/kernel/perf_event.c
123
cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
arch/s390/kernel/perf_event.c
128
" bsdes=%i tear=%016lx dear=%016lx\n", cpu,
arch/s390/kernel/perf_event.c
132
" dsdes=%i tear=%016lx dear=%016lx\n", cpu,
arch/s390/kernel/perf_pai.c
143
static void pai_event_destroy_cpu(struct perf_event *event, int cpu)
arch/s390/kernel/perf_pai.c
146
struct pai_mapptr *mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
arch/s390/kernel/perf_pai.c
152
event->cpu, cpump->active_events,
arch/s390/kernel/perf_pai.c
162
int cpu;
arch/s390/kernel/perf_pai.c
165
if (event->cpu == -1) {
arch/s390/kernel/perf_pai.c
168
for_each_cpu(cpu, mask)
arch/s390/kernel/perf_pai.c
169
pai_event_destroy_cpu(event, cpu);
arch/s390/kernel/perf_pai.c
172
pai_event_destroy_cpu(event, event->cpu);
arch/s390/kernel/perf_pai.c
237
static int pai_alloc_cpu(struct perf_event *event, int cpu)
arch/s390/kernel/perf_pai.c
251
mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
arch/s390/kernel/perf_pai.c
315
int cpu, rc = -ENOMEM;
arch/s390/kernel/perf_pai.c
321
for_each_online_cpu(cpu) {
arch/s390/kernel/perf_pai.c
322
rc = pai_alloc_cpu(event, cpu);
arch/s390/kernel/perf_pai.c
324
for_each_cpu(cpu, maskptr)
arch/s390/kernel/perf_pai.c
325
pai_event_destroy_cpu(event, cpu);
arch/s390/kernel/perf_pai.c
329
cpumask_set_cpu(cpu, maskptr);
arch/s390/kernel/perf_pai.c
387
if (event->cpu >= 0)
arch/s390/kernel/perf_pai.c
388
rc = pai_alloc_cpu(event, event->cpu);
arch/s390/kernel/perf_pai.c
626
data.cpu_entry.cpu = smp_processor_id();
arch/s390/kernel/processor.c
154
int i, cpu;
arch/s390/kernel/processor.c
170
for_each_online_cpu(cpu) {
arch/s390/kernel/processor.c
171
struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
arch/s390/kernel/processor.c
177
cpu, id->version, id->ident, id->machine);
arch/s390/kernel/processor.c
71
int cpu, this_cpu;
arch/s390/kernel/processor.c
76
cpu = cpumask_next_wrap(this_cpu, cpumask);
arch/s390/kernel/processor.c
77
if (cpu >= nr_cpu_ids)
arch/s390/kernel/processor.c
79
if (arch_vcpu_is_preempted(cpu))
arch/s390/kernel/processor.c
80
smp_yield_cpu(cpu);
arch/s390/kernel/smp.c
1012
int cpu, val, rc, i;
arch/s390/kernel/smp.c
1023
cpu = dev->id;
arch/s390/kernel/smp.c
1024
cpu = smp_get_base_cpu(cpu);
arch/s390/kernel/smp.c
1026
if (cpu_online(cpu + i))
arch/s390/kernel/smp.c
1028
pcpu = per_cpu_ptr(&pcpu_devices, cpu);
arch/s390/kernel/smp.c
1038
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
arch/s390/kernel/smp.c
1040
per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY;
arch/s390/kernel/smp.c
1041
smp_cpu_set_polarization(cpu + i,
arch/s390/kernel/smp.c
1053
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
arch/s390/kernel/smp.c
1055
per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED;
arch/s390/kernel/smp.c
1056
smp_cpu_set_polarization(cpu + i,
arch/s390/kernel/smp.c
1098
static int smp_cpu_online(unsigned int cpu)
arch/s390/kernel/smp.c
1100
struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
arch/s390/kernel/smp.c
1105
static int smp_cpu_pre_down(unsigned int cpu)
arch/s390/kernel/smp.c
1107
struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
arch/s390/kernel/smp.c
1113
bool arch_cpu_is_hotpluggable(int cpu)
arch/s390/kernel/smp.c
1115
return !!cpu;
arch/s390/kernel/smp.c
1118
int arch_register_cpu(int cpu)
arch/s390/kernel/smp.c
1120
struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
arch/s390/kernel/smp.c
1123
c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
arch/s390/kernel/smp.c
1124
rc = register_cpu(c, cpu);
arch/s390/kernel/smp.c
167
int cpu;
arch/s390/kernel/smp.c
169
for_each_cpu(cpu, mask)
arch/s390/kernel/smp.c
170
if (per_cpu(pcpu_devices, cpu).address == address)
arch/s390/kernel/smp.c
171
return &per_cpu(pcpu_devices, cpu);
arch/s390/kernel/smp.c
183
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
arch/s390/kernel/smp.c
199
lc->cpu_nr = cpu;
arch/s390/kernel/smp.c
200
lc->spinlock_lockval = arch_spin_lockval(cpu);
arch/s390/kernel/smp.c
207
if (abs_lowcore_map(cpu, lc, true))
arch/s390/kernel/smp.c
209
lowcore_ptr[cpu] = lc;
arch/s390/kernel/smp.c
223
static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu)
arch/s390/kernel/smp.c
228
lc = lowcore_ptr[cpu];
arch/s390/kernel/smp.c
233
lowcore_ptr[cpu] = NULL;
arch/s390/kernel/smp.c
234
abs_lowcore_unmap(cpu);
arch/s390/kernel/smp.c
242
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
arch/s390/kernel/smp.c
246
lc = lowcore_ptr[cpu];
arch/s390/kernel/smp.c
247
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
arch/s390/kernel/smp.c
248
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
arch/s390/kernel/smp.c
249
lc->cpu_nr = cpu;
arch/s390/kernel/smp.c
252
lc->spinlock_lockval = arch_spin_lockval(cpu);
arch/s390/kernel/smp.c
254
lc->percpu_offset = __per_cpu_offset[cpu];
arch/s390/kernel/smp.c
265
arch_spin_lock_setup(cpu);
arch/s390/kernel/smp.c
268
static void pcpu_attach_task(int cpu, struct task_struct *tsk)
arch/s390/kernel/smp.c
272
lc = lowcore_ptr[cpu];
arch/s390/kernel/smp.c
288
static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
arch/s390/kernel/smp.c
292
lc = lowcore_ptr[cpu];
arch/s390/kernel/smp.c
297
pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0);
arch/s390/kernel/smp.c
310
static void __noreturn pcpu_delegate(struct pcpu *pcpu, int cpu,
arch/s390/kernel/smp.c
317
lc = lowcore_ptr[cpu];
arch/s390/kernel/smp.c
387
int cpu;
arch/s390/kernel/smp.c
389
for_each_present_cpu(cpu)
arch/s390/kernel/smp.c
390
if (per_cpu(pcpu_devices, cpu).address == address)
arch/s390/kernel/smp.c
391
return cpu;
arch/s390/kernel/smp.c
400
bool notrace arch_vcpu_is_preempted(int cpu)
arch/s390/kernel/smp.c
402
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
arch/s390/kernel/smp.c
404
if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu)))
arch/s390/kernel/smp.c
410
void notrace smp_yield_cpu(int cpu)
arch/s390/kernel/smp.c
416
: : "d" (per_cpu(pcpu_devices, cpu).address));
arch/s390/kernel/smp.c
429
int cpu;
arch/s390/kernel/smp.c
436
for_each_cpu(cpu, &cpumask) {
arch/s390/kernel/smp.c
437
struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
arch/s390/kernel/smp.c
445
for_each_cpu(cpu, &cpumask)
arch/s390/kernel/smp.c
446
if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu)))
arch/s390/kernel/smp.c
447
cpumask_clear_cpu(cpu, &cpumask);
arch/s390/kernel/smp.c
462
int cpu;
arch/s390/kernel/smp.c
474
for_each_online_cpu(cpu) {
arch/s390/kernel/smp.c
475
if (cpu == smp_processor_id())
arch/s390/kernel/smp.c
477
pcpu = per_cpu_ptr(&pcpu_devices, cpu);
arch/s390/kernel/smp.c
515
int cpu;
arch/s390/kernel/smp.c
517
for_each_cpu(cpu, mask)
arch/s390/kernel/smp.c
518
pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
arch/s390/kernel/smp.c
521
void arch_send_call_function_single_ipi(int cpu)
arch/s390/kernel/smp.c
523
pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
arch/s390/kernel/smp.c
531
void arch_smp_send_reschedule(int cpu)
arch/s390/kernel/smp.c
533
pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule);
arch/s390/kernel/smp.c
545
int smp_store_status(int cpu)
arch/s390/kernel/smp.c
551
pcpu = per_cpu_ptr(&pcpu_devices, cpu);
arch/s390/kernel/smp.c
552
lc = lowcore_ptr[cpu];
arch/s390/kernel/smp.c
653
void smp_cpu_set_polarization(int cpu, int val)
arch/s390/kernel/smp.c
655
per_cpu(pcpu_devices, cpu).polarization = val;
arch/s390/kernel/smp.c
658
int smp_cpu_get_polarization(int cpu)
arch/s390/kernel/smp.c
660
return per_cpu(pcpu_devices, cpu).polarization;
arch/s390/kernel/smp.c
663
void smp_cpu_set_capacity(int cpu, unsigned long val)
arch/s390/kernel/smp.c
665
per_cpu(pcpu_devices, cpu).capacity = val;
arch/s390/kernel/smp.c
668
unsigned long smp_cpu_get_capacity(int cpu)
arch/s390/kernel/smp.c
670
return per_cpu(pcpu_devices, cpu).capacity;
arch/s390/kernel/smp.c
673
void smp_set_core_capacity(int cpu, unsigned long val)
arch/s390/kernel/smp.c
677
cpu = smp_get_base_cpu(cpu);
arch/s390/kernel/smp.c
678
for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++)
arch/s390/kernel/smp.c
682
int smp_cpu_get_cpu_address(int cpu)
arch/s390/kernel/smp.c
684
return per_cpu(pcpu_devices, cpu).address;
arch/s390/kernel/smp.c
713
int cpu, nr, i;
arch/s390/kernel/smp.c
719
cpu = cpumask_first(avail);
arch/s390/kernel/smp.c
721
for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
arch/s390/kernel/smp.c
724
pcpu = per_cpu_ptr(&pcpu_devices, cpu);
arch/s390/kernel/smp.c
730
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
arch/s390/kernel/smp.c
731
smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
arch/s390/kernel/smp.c
732
set_cpu_present(cpu, true);
arch/s390/kernel/smp.c
733
if (!early && arch_register_cpu(cpu))
arch/s390/kernel/smp.c
734
set_cpu_present(cpu, false);
arch/s390/kernel/smp.c
737
cpumask_clear_cpu(cpu, avail);
arch/s390/kernel/smp.c
738
cpu = cpumask_next(cpu, avail);
arch/s390/kernel/smp.c
780
unsigned int cpu, mtid, c_cpus, s_cpus;
arch/s390/kernel/smp.c
790
for (cpu = 0; cpu < info->combined; cpu++)
arch/s390/kernel/smp.c
791
if (info->core[cpu].core_id == address) {
arch/s390/kernel/smp.c
793
boot_core_type = info->core[cpu].type;
arch/s390/kernel/smp.c
796
if (cpu >= info->combined)
arch/s390/kernel/smp.c
808
for (cpu = 0; cpu < info->combined; cpu++) {
arch/s390/kernel/smp.c
810
info->core[cpu].type != boot_core_type)
arch/s390/kernel/smp.c
812
if (cpu < info->configured)
arch/s390/kernel/smp.c
827
int cpu = raw_smp_processor_id();
arch/s390/kernel/smp.c
837
rcutree_report_cpu_starting(cpu);
arch/s390/kernel/smp.c
842
cpumask_set_cpu(cpu, &cpu_setup_mask);
arch/s390/kernel/smp.c
844
notify_cpu_starting(cpu);
arch/s390/kernel/smp.c
845
if (topology_cpu_dedicated(cpu))
arch/s390/kernel/smp.c
849
set_cpu_online(cpu, true);
arch/s390/kernel/smp.c
856
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/s390/kernel/smp.c
858
struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
arch/s390/kernel/smp.c
867
rc = pcpu_alloc_lowcore(pcpu, cpu);
arch/s390/kernel/smp.c
875
pcpu_prepare_secondary(pcpu, cpu);
arch/s390/kernel/smp.c
876
pcpu_attach_task(cpu, tidle);
arch/s390/kernel/smp.c
877
pcpu_start_fn(cpu, smp_start_secondary, NULL);
arch/s390/kernel/smp.c
879
while (!cpu_online(cpu))
arch/s390/kernel/smp.c
897
int cpu;
arch/s390/kernel/smp.c
901
cpu = smp_processor_id();
arch/s390/kernel/smp.c
902
set_cpu_online(cpu, false);
arch/s390/kernel/smp.c
903
cpumask_clear_cpu(cpu, &cpu_setup_mask);
arch/s390/kernel/smp.c
917
void __cpu_die(unsigned int cpu)
arch/s390/kernel/smp.c
922
pcpu = per_cpu_ptr(&pcpu_devices, cpu);
arch/s390/kernel/smp.c
925
pcpu_free_lowcore(pcpu, cpu);
arch/s390/kernel/smp.c
926
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
arch/s390/kernel/smp.c
927
cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
arch/s390/kernel/smp.c
940
unsigned int possible, sclp_max, cpu;
arch/s390/kernel/smp.c
947
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
arch/s390/kernel/smp.c
948
set_cpu_possible(cpu, true);
arch/s390/kernel/time.c
146
int cpu;
arch/s390/kernel/time.c
151
cpu = smp_processor_id();
arch/s390/kernel/time.c
152
cd = &per_cpu(comparators, cpu);
arch/s390/kernel/time.c
162
cd->cpumask = cpumask_of(cpu);
arch/s390/kernel/topology.c
104
if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
arch/s390/kernel/topology.c
106
cpumask_set_cpu(cpu, &mask);
arch/s390/kernel/topology.c
109
cpu -= cpu % (smp_cpu_mtid + 1);
arch/s390/kernel/topology.c
110
max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
arch/s390/kernel/topology.c
111
for (; cpu <= max_cpu; cpu++) {
arch/s390/kernel/topology.c
112
if (cpumask_test_cpu(cpu, &cpu_setup_mask))
arch/s390/kernel/topology.c
113
cpumask_set_cpu(cpu, &mask);
arch/s390/kernel/topology.c
131
int cpu;
arch/s390/kernel/topology.c
134
cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
arch/s390/kernel/topology.c
135
if (cpu < 0)
arch/s390/kernel/topology.c
137
max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
arch/s390/kernel/topology.c
138
for (; cpu <= max_cpu; cpu++) {
arch/s390/kernel/topology.c
139
topo = &cpu_topology[cpu];
arch/s390/kernel/topology.c
144
topo->thread_id = cpu;
arch/s390/kernel/topology.c
146
cpumask_set_cpu(cpu, &drawer->mask);
arch/s390/kernel/topology.c
147
cpumask_set_cpu(cpu, &book->mask);
arch/s390/kernel/topology.c
148
cpumask_set_cpu(cpu, &socket->mask);
arch/s390/kernel/topology.c
149
smp_cpu_set_polarization(cpu, tl_core->pp);
arch/s390/kernel/topology.c
150
smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
arch/s390/kernel/topology.c
208
add_cpus_to_mask(&tle->cpu, drawer, book, socket);
arch/s390/kernel/topology.c
220
int cpu;
arch/s390/kernel/topology.c
222
for_each_possible_cpu(cpu)
arch/s390/kernel/topology.c
223
smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
arch/s390/kernel/topology.c
241
int cpu, rc;
arch/s390/kernel/topology.c
251
for_each_possible_cpu(cpu)
arch/s390/kernel/topology.c
252
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
arch/s390/kernel/topology.c
259
int cpu, sibling, pkg_first, smt_first, id;
arch/s390/kernel/topology.c
261
for_each_possible_cpu(cpu) {
arch/s390/kernel/topology.c
262
topo = &cpu_topology[cpu];
arch/s390/kernel/topology.c
263
cpu_thread_map(&topo->thread_mask, cpu);
arch/s390/kernel/topology.c
264
cpu_group_map(&topo->core_mask, &socket_info, cpu);
arch/s390/kernel/topology.c
265
cpu_group_map(&topo->book_mask, &book_info, cpu);
arch/s390/kernel/topology.c
266
cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
arch/s390/kernel/topology.c
269
id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
arch/s390/kernel/topology.c
270
topo->thread_id = cpu;
arch/s390/kernel/topology.c
271
topo->core_id = cpu;
arch/s390/kernel/topology.c
278
for_each_online_cpu(cpu) {
arch/s390/kernel/topology.c
279
topo = &cpu_topology[cpu];
arch/s390/kernel/topology.c
282
if (cpu == pkg_first) {
arch/s390/kernel/topology.c
441
int cpu = dev->id;
arch/s390/kernel/topology.c
445
switch (smp_cpu_get_polarization(cpu)) {
arch/s390/kernel/topology.c
479
int cpu = dev->id;
arch/s390/kernel/topology.c
483
count = sysfs_emit(buf, "%d\n", topology_cpu_dedicated(cpu));
arch/s390/kernel/topology.c
498
int topology_cpu_init(struct cpu *cpu)
arch/s390/kernel/topology.c
502
rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
arch/s390/kernel/topology.c
505
rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
arch/s390/kernel/topology.c
507
sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
arch/s390/kernel/topology.c
511
const struct cpumask *cpu_coregroup_mask(int cpu)
arch/s390/kernel/topology.c
513
return &cpu_topology[cpu].core_mask;
arch/s390/kernel/topology.c
516
static const struct cpumask *tl_book_mask(struct sched_domain_topology_level *tl, int cpu)
arch/s390/kernel/topology.c
518
return &cpu_topology[cpu].book_mask;
arch/s390/kernel/topology.c
521
static const struct cpumask *tl_drawer_mask(struct sched_domain_topology_level *tl, int cpu)
arch/s390/kernel/topology.c
523
return &cpu_topology[cpu].drawer_mask;
arch/s390/kernel/topology.c
67
static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
arch/s390/kernel/topology.c
72
if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
arch/s390/kernel/topology.c
74
cpumask_set_cpu(cpu, &mask);
arch/s390/kernel/topology.c
78
if (cpumask_test_cpu(cpu, &info->mask)) {
arch/s390/kernel/topology.c
98
static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
arch/s390/kernel/vdso/getcpu.c
14
if (cpu)
arch/s390/kernel/vdso/getcpu.c
15
*cpu = clk.pf;
arch/s390/kernel/vdso/getcpu.c
8
int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, void *unused)
arch/s390/kernel/vdso/vdso.h
7
int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, void *unused);
arch/s390/kernel/wti.c
102
static int wti_pending(unsigned int cpu)
arch/s390/kernel/wti.c
104
struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
arch/s390/kernel/wti.c
125
int cpu;
arch/s390/kernel/wti.c
129
for_each_online_cpu(cpu)
arch/s390/kernel/wti.c
130
seq_printf(seq, "CPU%-8d", cpu);
arch/s390/kernel/wti.c
132
for_each_online_cpu(cpu) {
arch/s390/kernel/wti.c
133
st = per_cpu_ptr(&wti_state, cpu);
arch/s390/kernel/wti.c
142
static void wti_thread_fn(unsigned int cpu)
arch/s390/kernel/wti.c
144
struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
arch/s390/kernel/wti.c
170
int cpu, rc;
arch/s390/kernel/wti.c
178
for_each_online_cpu(cpu) {
arch/s390/kernel/wti.c
179
st = per_cpu_ptr(&wti_state, cpu);
arch/s390/kvm/diag.c
205
tcpu_cpu = READ_ONCE(tcpu->cpu);
arch/s390/kvm/interrupt.c
48
union esca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
arch/s390/kvm/interrupt.c
64
union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
arch/s390/kvm/interrupt.c
87
union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
arch/s390/kvm/kvm-s390.c
3373
sca->cpu[vcpu->vcpu_id].sda = 0;
arch/s390/kvm/kvm-s390.c
3390
sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
arch/s390/kvm/kvm-s390.c
3478
WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
arch/s390/kvm/kvm-s390.c
3488
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/s390/kvm/kvm-s390.c
3494
vcpu->cpu = cpu;
arch/s390/kvm/kvm-s390.c
3499
vcpu->cpu = -1;
arch/s390/kvm/vsie.c
759
((gpa + offsetof(struct bsca_block, cpu[0]) - 1) & PAGE_MASK))
arch/s390/lib/spinlock.c
138
int ix, cpu;
arch/s390/lib/spinlock.c
141
cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
arch/s390/lib/spinlock.c
142
return per_cpu_ptr(&spin_wait[ix], cpu - 1);
arch/s390/lib/spinlock.c
293
int cpu = spinlock_lockval();
arch/s390/lib/spinlock.c
300
if (arch_try_cmpxchg(&lp->lock, &owner, cpu))
arch/s390/lib/spinlock.c
354
int cpu;
arch/s390/lib/spinlock.c
356
cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
arch/s390/lib/spinlock.c
357
if (!cpu)
arch/s390/lib/spinlock.c
359
if (machine_is_lpar() && !arch_vcpu_is_preempted(cpu - 1))
arch/s390/lib/spinlock.c
361
smp_yield_cpu(cpu - 1);
arch/s390/lib/spinlock.c
80
void arch_spin_lock_setup(int cpu)
arch/s390/lib/spinlock.c
85
node = per_cpu_ptr(&spin_wait[0], cpu);
arch/s390/lib/spinlock.c
88
node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
arch/s390/mm/init.c
183
static int __init pcpu_cpu_to_node(int cpu)
arch/s390/mm/init.c
191
unsigned int cpu;
arch/s390/mm/init.c
206
for_each_possible_cpu(cpu)
arch/s390/mm/init.c
207
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
arch/s390/mm/maccess.c
134
int cpu;
arch/s390/mm/maccess.c
136
for_each_online_cpu(cpu) {
arch/s390/mm/maccess.c
137
lc = virt_to_phys(lowcore_ptr[cpu]);
arch/s390/mm/maccess.c
140
return cpu;
arch/s390/mm/maccess.c
157
int this_cpu, cpu;
arch/s390/mm/maccess.c
162
cpu = get_swapped_owner(addr);
arch/s390/mm/maccess.c
163
if (cpu < 0)
arch/s390/mm/maccess.c
175
} else if (cpu == this_cpu) {
arch/s390/mm/maccess.c
176
ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
arch/s390/mm/pfault.c
210
static int pfault_cpu_dead(unsigned int cpu)
arch/s390/pci/pci_irq.c
154
int cpu = cpumask_first(irq_data_get_affinity_mask(data));
arch/s390/pci/pci_irq.c
157
msg->address_lo |= (smp_cpu_get_cpu_address(cpu) << 8);
arch/s390/pci/pci_irq.c
218
unsigned long cpu;
arch/s390/pci/pci_irq.c
221
for (cpu = 0;;) {
arch/s390/pci/pci_irq.c
222
cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
arch/s390/pci/pci_irq.c
223
if (cpu == -1UL) {
arch/s390/pci/pci_irq.c
230
cpu = 0;
arch/s390/pci/pci_irq.c
233
cpu_data = &per_cpu(irq_data, cpu);
arch/s390/pci/pci_irq.c
238
smp_call_function_single_async(cpu, &cpu_data->csd);
arch/s390/pci/pci_irq.c
411
unsigned int cpu, hwirq;
arch/s390/pci/pci_irq.c
427
for_each_possible_cpu(cpu) {
arch/s390/pci/pci_irq.c
428
airq_iv_set_ptr(zpci_ibv[cpu], bit + i,
arch/s390/pci/pci_irq.c
430
airq_iv_set_data(zpci_ibv[cpu], bit + i, hwirq + i);
arch/s390/pci/pci_irq.c
447
unsigned int cpu;
arch/s390/pci/pci_irq.c
454
for_each_possible_cpu(cpu) {
arch/s390/pci/pci_irq.c
455
airq_iv_set_ptr(zpci_ibv[cpu], bit + i, 0);
arch/s390/pci/pci_irq.c
456
airq_iv_set_data(zpci_ibv[cpu], bit + i, 0);
arch/s390/pci/pci_irq.c
555
unsigned int cpu;
arch/s390/pci/pci_irq.c
570
for_each_possible_cpu(cpu) {
arch/s390/pci/pci_irq.c
575
zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
arch/s390/pci/pci_irq.c
579
(!cpu ? AIRQ_IV_ALLOC : 0), NULL);
arch/s390/pci/pci_irq.c
580
if (!zpci_ibv[cpu])
arch/s390/pci/pci_irq.c
652
unsigned int cpu;
arch/s390/pci/pci_irq.c
655
for_each_possible_cpu(cpu) {
arch/s390/pci/pci_irq.c
656
airq_iv_release(zpci_ibv[cpu]);
arch/s390/pci/pci_report.c
71
entry->cpu);
arch/sh/boards/of-generic.c
28
static void dummy_start_cpu(unsigned int cpu, unsigned long entry_point)
arch/sh/boards/of-generic.c
37
static void dummy_send_ipi(unsigned int cpu, unsigned int message)
arch/sh/include/asm/irq.h
34
extern void irq_ctx_init(int cpu);
arch/sh/include/asm/irq.h
35
extern void irq_ctx_exit(int cpu);
arch/sh/include/asm/irq.h
37
# define irq_ctx_init(cpu) do { } while (0)
arch/sh/include/asm/irq.h
38
# define irq_ctx_exit(cpu) do { } while (0)
arch/sh/include/asm/mmu_context.h
103
static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
arch/sh/include/asm/mmu_context.h
105
get_mmu_context(mm, cpu);
arch/sh/include/asm/mmu_context.h
106
set_asid(cpu_asid(cpu, mm));
arch/sh/include/asm/mmu_context.h
113
unsigned int cpu = smp_processor_id();
arch/sh/include/asm/mmu_context.h
116
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/sh/include/asm/mmu_context.h
118
activate_context(next, cpu);
arch/sh/include/asm/mmu_context.h
120
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
arch/sh/include/asm/mmu_context.h
121
activate_context(next, cpu);
arch/sh/include/asm/mmu_context.h
130
#define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; })
arch/sh/include/asm/mmu_context.h
147
unsigned int cpu = smp_processor_id();
arch/sh/include/asm/mmu_context.h
153
if (asid_cache(cpu) == NO_CONTEXT)
arch/sh/include/asm/mmu_context.h
154
asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
arch/sh/include/asm/mmu_context.h
156
set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
arch/sh/include/asm/mmu_context.h
37
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
arch/sh/include/asm/mmu_context.h
40
#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
arch/sh/include/asm/mmu_context.h
42
#define cpu_asid(cpu, mm) \
arch/sh/include/asm/mmu_context.h
43
(cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
arch/sh/include/asm/mmu_context.h
55
static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
arch/sh/include/asm/mmu_context.h
57
unsigned long asid = asid_cache(cpu);
arch/sh/include/asm/mmu_context.h
60
if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
arch/sh/include/asm/mmu_context.h
80
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
arch/sh/include/asm/smp-ops.h
10
void (*send_ipi)(unsigned int cpu, unsigned int message);
arch/sh/include/asm/smp-ops.h
11
int (*cpu_disable)(unsigned int cpu);
arch/sh/include/asm/smp-ops.h
12
void (*cpu_die)(unsigned int cpu);
arch/sh/include/asm/smp-ops.h
9
void (*start_cpu)(unsigned int cpu, unsigned long entry_point);
arch/sh/include/asm/smp.h
15
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/sh/include/asm/smp.h
19
#define cpu_number_map(cpu) __cpu_number_map[cpu]
arch/sh/include/asm/smp.h
23
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
arch/sh/include/asm/smp.h
38
void arch_send_call_function_single_ipi(int cpu);
arch/sh/include/asm/smp.h
42
void native_cpu_die(unsigned int cpu);
arch/sh/include/asm/smp.h
43
int native_cpu_disable(unsigned int cpu);
arch/sh/include/asm/smp.h
49
static inline void __cpu_die(unsigned int cpu)
arch/sh/include/asm/smp.h
53
mp_ops->cpu_die(cpu);
arch/sh/include/asm/thread_info.h
31
__u32 cpu;
arch/sh/include/asm/thread_info.h
58
.cpu = 0, \
arch/sh/include/asm/topology.h
20
const struct cpumask *cpu_coregroup_mask(int cpu);
arch/sh/include/asm/topology.h
24
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
arch/sh/include/asm/topology.h
7
#define cpu_to_node(cpu) ((void)(cpu),0)
arch/sh/kernel/asm-offsets.c
27
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
arch/sh/kernel/cpu/init.c
297
current_thread_info()->cpu = hard_smp_processor_id();
arch/sh/kernel/cpu/proc.c
82
unsigned int cpu = c - cpu_data;
arch/sh/kernel/cpu/proc.c
84
if (!cpu_online(cpu))
arch/sh/kernel/cpu/proc.c
87
if (cpu == 0)
arch/sh/kernel/cpu/proc.c
92
seq_printf(m, "processor\t: %d\n", cpu);
arch/sh/kernel/cpu/sh2/probe.c
44
unsigned cpu = hard_smp_processor_id();
arch/sh/kernel/cpu/sh2/probe.c
46
unsigned cpu = 0;
arch/sh/kernel/cpu/sh2/probe.c
48
if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
arch/sh/kernel/cpu/sh2/probe.c
49
if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
arch/sh/kernel/cpu/sh2/probe.c
50
if (cpu != 0) return;
arch/sh/kernel/cpu/sh2/smp-j2.c
100
pr_info("J2 SMP: requested start of cpu %u\n", cpu);
arch/sh/kernel/cpu/sh2/smp-j2.c
108
static void j2_send_ipi(unsigned int cpu, unsigned int message)
arch/sh/kernel/cpu/sh2/smp-j2.c
116
pmsg = &per_cpu(j2_ipi_messages, cpu);
arch/sh/kernel/cpu/sh2/smp-j2.c
121
val = __raw_readl(j2_ipi_trigger + cpu);
arch/sh/kernel/cpu/sh2/smp-j2.c
122
__raw_writel(val | (1U<<28), j2_ipi_trigger + cpu);
arch/sh/kernel/cpu/sh2/smp-j2.c
23
unsigned cpu = hard_smp_processor_id();
arch/sh/kernel/cpu/sh2/smp-j2.c
24
volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu);
arch/sh/kernel/cpu/sh2/smp-j2.c
79
static void j2_start_cpu(unsigned int cpu, unsigned long entry_point)
arch/sh/kernel/cpu/sh2/smp-j2.c
85
if (!cpu) return;
arch/sh/kernel/cpu/sh2/smp-j2.c
87
np = of_get_cpu_node(cpu, NULL);
arch/sh/kernel/cpu/sh4/sq.c
341
unsigned int cpu = dev->id;
arch/sh/kernel/cpu/sh4/sq.c
345
sq_kobject[cpu] = kzalloc_obj(struct kobject);
arch/sh/kernel/cpu/sh4/sq.c
346
if (unlikely(!sq_kobject[cpu]))
arch/sh/kernel/cpu/sh4/sq.c
349
kobj = sq_kobject[cpu];
arch/sh/kernel/cpu/sh4/sq.c
359
unsigned int cpu = dev->id;
arch/sh/kernel/cpu/sh4/sq.c
360
struct kobject *kobj = sq_kobject[cpu];
arch/sh/kernel/cpu/sh4a/smp-shx3.c
106
static void shx3_send_ipi(unsigned int cpu, unsigned int message)
arch/sh/kernel/cpu/sh4a/smp-shx3.c
108
unsigned long addr = 0xfe410070 + (cpu * 4);
arch/sh/kernel/cpu/sh4a/smp-shx3.c
110
BUG_ON(cpu >= 4);
arch/sh/kernel/cpu/sh4a/smp-shx3.c
115
static void shx3_update_boot_vector(unsigned int cpu)
arch/sh/kernel/cpu/sh4a/smp-shx3.c
117
__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
arch/sh/kernel/cpu/sh4a/smp-shx3.c
118
while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
arch/sh/kernel/cpu/sh4a/smp-shx3.c
120
__raw_writel(STBCR_RESET, STBCR_REG(cpu));
arch/sh/kernel/cpu/sh4a/smp-shx3.c
123
static int shx3_cpu_prepare(unsigned int cpu)
arch/sh/kernel/cpu/sh4a/smp-shx3.c
125
shx3_update_boot_vector(cpu);
arch/sh/kernel/cpu/sh4a/smp-shx3.c
30
unsigned int cpu = hard_smp_processor_id();
arch/sh/kernel/cpu/sh4a/smp-shx3.c
31
unsigned int offs = 4 * cpu;
arch/sh/kernel/cpu/sh4a/smp-shx3.c
45
unsigned int cpu = 0;
arch/sh/kernel/cpu/sh4a/smp-shx3.c
48
init_cpu_possible(cpumask_of(cpu));
arch/sh/kernel/cpu/sh4a/smp-shx3.c
51
__raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
arch/sh/kernel/cpu/sh4a/smp-shx3.c
84
static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
arch/sh/kernel/cpu/sh4a/smp-shx3.c
87
__raw_writel(entry_point, RESET_REG(cpu));
arch/sh/kernel/cpu/sh4a/smp-shx3.c
89
__raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
arch/sh/kernel/cpu/sh4a/smp-shx3.c
91
if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
arch/sh/kernel/cpu/sh4a/smp-shx3.c
92
__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
arch/sh/kernel/cpu/sh4a/smp-shx3.c
94
while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
arch/sh/kernel/cpu/sh4a/smp-shx3.c
98
__raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
arch/sh/kernel/hw_breakpoint.c
275
int cpu, i, rc = NOTIFY_STOP;
arch/sh/kernel/hw_breakpoint.c
296
cpu = get_cpu();
arch/sh/kernel/hw_breakpoint.c
311
bp = per_cpu(bp_per_reg[i], cpu);
arch/sh/kernel/irq.c
120
void irq_ctx_init(int cpu)
arch/sh/kernel/irq.c
124
if (hardirq_ctx[cpu])
arch/sh/kernel/irq.c
127
irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
arch/sh/kernel/irq.c
129
irqctx->tinfo.cpu = cpu;
arch/sh/kernel/irq.c
133
hardirq_ctx[cpu] = irqctx;
arch/sh/kernel/irq.c
135
irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
arch/sh/kernel/irq.c
137
irqctx->tinfo.cpu = cpu;
arch/sh/kernel/irq.c
141
softirq_ctx[cpu] = irqctx;
arch/sh/kernel/irq.c
144
cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
arch/sh/kernel/irq.c
147
void irq_ctx_exit(int cpu)
arch/sh/kernel/irq.c
149
hardirq_ctx[cpu] = NULL;
arch/sh/kernel/irq.c
229
unsigned int irq, cpu = smp_processor_id();
arch/sh/kernel/irq.c
234
if (irq_data_get_node(data) == cpu) {
arch/sh/kernel/irq.c
240
irq, cpu);
arch/sh/kernel/perf_event.c
334
static int sh_pmu_prepare_cpu(unsigned int cpu)
arch/sh/kernel/perf_event.c
336
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/sh/kernel/smp.c
101
pr_err("CPU %u didn't die...\n", cpu);
arch/sh/kernel/smp.c
104
int native_cpu_disable(unsigned int cpu)
arch/sh/kernel/smp.c
106
return cpu == 0 ? -EPERM : 0;
arch/sh/kernel/smp.c
126
unsigned int cpu = smp_processor_id();
arch/sh/kernel/smp.c
129
ret = mp_ops->cpu_disable(cpu);
arch/sh/kernel/smp.c
137
set_cpu_online(cpu, false);
arch/sh/kernel/smp.c
153
clear_tasks_mm_cpumask(cpu);
arch/sh/kernel/smp.c
158
int native_cpu_disable(unsigned int cpu)
arch/sh/kernel/smp.c
163
void native_cpu_die(unsigned int cpu)
arch/sh/kernel/smp.c
177
unsigned int cpu = smp_processor_id();
arch/sh/kernel/smp.c
191
notify_cpu_starting(cpu);
arch/sh/kernel/smp.c
197
smp_store_cpu_info(cpu);
arch/sh/kernel/smp.c
199
set_cpu_online(cpu, true);
arch/sh/kernel/smp.c
200
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/sh/kernel/smp.c
214
int __cpu_up(unsigned int cpu, struct task_struct *tsk)
arch/sh/kernel/smp.c
218
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
arch/sh/kernel/smp.c
230
mp_ops->start_cpu(cpu, (unsigned long)_stext);
arch/sh/kernel/smp.c
234
if (cpu_online(cpu))
arch/sh/kernel/smp.c
241
if (cpu_online(cpu))
arch/sh/kernel/smp.c
250
int cpu;
arch/sh/kernel/smp.c
252
for_each_online_cpu(cpu)
arch/sh/kernel/smp.c
253
bogosum += cpu_data[cpu].loops_per_jiffy;
arch/sh/kernel/smp.c
261
void arch_smp_send_reschedule(int cpu)
arch/sh/kernel/smp.c
263
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
arch/sh/kernel/smp.c
273
int cpu;
arch/sh/kernel/smp.c
275
for_each_cpu(cpu, mask)
arch/sh/kernel/smp.c
276
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
arch/sh/kernel/smp.c
279
void arch_send_call_function_single_ipi(int cpu)
arch/sh/kernel/smp.c
281
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
arch/sh/kernel/smp.c
287
int cpu;
arch/sh/kernel/smp.c
289
for_each_cpu(cpu, mask)
arch/sh/kernel/smp.c
290
mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
arch/sh/kernel/smp.c
49
static inline void smp_store_cpu_info(unsigned int cpu)
arch/sh/kernel/smp.c
51
struct sh_cpuinfo *c = cpu_data + cpu;
arch/sh/kernel/smp.c
60
unsigned int cpu = smp_processor_id();
arch/sh/kernel/smp.c
63
current_thread_info()->cpu = cpu;
arch/sh/kernel/smp.c
73
unsigned int cpu = smp_processor_id();
arch/sh/kernel/smp.c
75
__cpu_number_map[0] = cpu;
arch/sh/kernel/smp.c
76
__cpu_logical_map[0] = cpu;
arch/sh/kernel/smp.c
78
set_cpu_online(cpu, true);
arch/sh/kernel/smp.c
79
set_cpu_possible(cpu, true);
arch/sh/kernel/smp.c
81
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/sh/kernel/smp.c
85
void native_cpu_die(unsigned int cpu)
arch/sh/kernel/smp.c
91
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
arch/sh/kernel/smp.c
93
pr_info("CPU %u is now offline\n", cpu);
arch/sh/kernel/topology.c
16
static DEFINE_PER_CPU(struct cpu, cpu_devices);
arch/sh/kernel/topology.c
21
static cpumask_t cpu_coregroup_map(int cpu)
arch/sh/kernel/topology.c
30
const struct cpumask *cpu_coregroup_mask(int cpu)
arch/sh/kernel/topology.c
32
return &cpu_core_map[cpu];
arch/sh/kernel/topology.c
37
unsigned int cpu;
arch/sh/kernel/topology.c
39
for_each_possible_cpu(cpu)
arch/sh/kernel/topology.c
40
cpu_core_map[cpu] = cpu_coregroup_map(cpu);
arch/sh/kernel/topology.c
50
struct cpu *c = &per_cpu(cpu_devices, i);
arch/sh/mm/cache-j2.c
29
unsigned cpu;
arch/sh/mm/cache-j2.c
30
for_each_possible_cpu(cpu)
arch/sh/mm/cache-j2.c
31
__raw_writel(CACHE_ENABLE | ICACHE_FLUSH, j2_ccr_base + cpu);
arch/sh/mm/cache-j2.c
36
unsigned cpu;
arch/sh/mm/cache-j2.c
37
for_each_possible_cpu(cpu)
arch/sh/mm/cache-j2.c
38
__raw_writel(CACHE_ENABLE | DCACHE_FLUSH, j2_ccr_base + cpu);
arch/sh/mm/cache-j2.c
43
unsigned cpu;
arch/sh/mm/cache-j2.c
44
for_each_possible_cpu(cpu)
arch/sh/mm/cache-j2.c
45
__raw_writel(CACHE_ENABLE | CACHE_FLUSH, j2_ccr_base + cpu);
arch/sh/mm/tlbflush_32.c
108
unsigned int cpu = smp_processor_id();
arch/sh/mm/tlbflush_32.c
112
if (cpu_context(cpu, mm) != NO_CONTEXT) {
arch/sh/mm/tlbflush_32.c
116
cpu_context(cpu, mm) = NO_CONTEXT;
arch/sh/mm/tlbflush_32.c
118
activate_context(mm, cpu);
arch/sh/mm/tlbflush_32.c
17
unsigned int cpu = smp_processor_id();
arch/sh/mm/tlbflush_32.c
19
if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
arch/sh/mm/tlbflush_32.c
24
asid = cpu_asid(cpu, vma->vm_mm);
arch/sh/mm/tlbflush_32.c
43
unsigned int cpu = smp_processor_id();
arch/sh/mm/tlbflush_32.c
45
if (cpu_context(cpu, mm) != NO_CONTEXT) {
arch/sh/mm/tlbflush_32.c
52
cpu_context(cpu, mm) = NO_CONTEXT;
arch/sh/mm/tlbflush_32.c
54
activate_context(mm, cpu);
arch/sh/mm/tlbflush_32.c
59
asid = cpu_asid(cpu, mm);
arch/sh/mm/tlbflush_32.c
80
unsigned int cpu = smp_processor_id();
arch/sh/mm/tlbflush_32.c
92
asid = cpu_asid(cpu, &init_mm);
arch/sparc/include/asm/cacheflush_64.h
40
void smp_flush_dcache_folio_impl(struct folio *folio, int cpu);
arch/sparc/include/asm/cacheflush_64.h
43
#define smp_flush_dcache_folio_impl(folio, cpu) flush_dcache_folio_impl(folio)
arch/sparc/include/asm/hvtramp.h
15
__u32 cpu;
arch/sparc/include/asm/leon.h
236
void leon_clear_profile_irq(int cpu);
arch/sparc/include/asm/leon.h
241
void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
arch/sparc/include/asm/mmu_context_64.h
130
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
arch/sparc/include/asm/mmu_context_64.h
131
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/sparc/include/asm/mmu_context_64.h
83
int cpu = smp_processor_id();
arch/sparc/include/asm/mmu_context_64.h
85
per_cpu(per_cpu_secondary_mm, cpu) = mm;
arch/sparc/include/asm/obio.h
121
static inline unsigned int bw_get_prof_limit(int cpu)
arch/sparc/include/asm/obio.h
127
"r" (CSR_BASE(cpu) + BW_PTIMER_LIMIT),
arch/sparc/include/asm/obio.h
132
static inline void bw_set_prof_limit(int cpu, unsigned int limit)
arch/sparc/include/asm/obio.h
136
"r" (CSR_BASE(cpu) + BW_PTIMER_LIMIT),
arch/sparc/include/asm/obio.h
140
static inline unsigned int bw_get_ctrl(int cpu)
arch/sparc/include/asm/obio.h
146
"r" (CSR_BASE(cpu) + BW_CTRL),
arch/sparc/include/asm/obio.h
151
static inline void bw_set_ctrl(int cpu, unsigned int ctrl)
arch/sparc/include/asm/obio.h
155
"r" (CSR_BASE(cpu) + BW_CTRL),
arch/sparc/include/asm/obio.h
27
#define CSR_BASE(cpu) (((CSR_BASE_ADDR >> CSR_CPU_SHIFT) + cpu) << CSR_CPU_SHIFT)
arch/sparc/include/asm/obio.h
40
#define ECSR_BASE(cpu) ((cpu) << ECSR_CPU_SHIFT)
arch/sparc/include/asm/setup.h
65
void sunhv_migrate_hvcons_irq(int cpu);
arch/sparc/include/asm/smp_32.h
104
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/sparc/include/asm/smp_32.h
60
void (*resched)(int cpu);
arch/sparc/include/asm/smp_32.h
61
void (*single)(int cpu);
arch/sparc/include/asm/smp_32.h
62
void (*mask_one)(int cpu);
arch/sparc/include/asm/smp_32.h
94
void arch_send_call_function_single_ipi(int cpu);
arch/sparc/include/asm/smp_32.h
97
static inline int cpu_logical_map(int cpu)
arch/sparc/include/asm/smp_32.h
99
return cpu;
arch/sparc/include/asm/smp_64.h
40
void arch_send_call_function_single_ipi(int cpu);
arch/sparc/include/asm/smp_64.h
48
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/sparc/include/asm/smp_64.h
68
void __cpu_die(unsigned int cpu);
arch/sparc/include/asm/switch_to_64.h
25
trap_block[current_thread_info()->cpu].thread = \
arch/sparc/include/asm/thread_info_32.h
32
int cpu; /* cpu we're on */
arch/sparc/include/asm/thread_info_32.h
62
.cpu = 0, \
arch/sparc/include/asm/thread_info_64.h
50
__u16 cpu;
arch/sparc/include/asm/timer_32.h
42
void register_percpu_ce(int cpu);
arch/sparc/include/asm/timer_64.h
35
unsigned long sparc64_get_clock_tick(unsigned int cpu);
arch/sparc/include/asm/topology_64.h
11
return numa_cpu_lookup_table[cpu];
arch/sparc/include/asm/topology_64.h
46
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
arch/sparc/include/asm/topology_64.h
47
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
arch/sparc/include/asm/topology_64.h
48
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
arch/sparc/include/asm/topology_64.h
49
#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu])
arch/sparc/include/asm/topology_64.h
50
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
arch/sparc/include/asm/topology_64.h
60
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
arch/sparc/include/asm/topology_64.h
62
return &cpu_core_sib_cache_map[cpu];
arch/sparc/include/asm/topology_64.h
9
static inline int cpu_to_node(int cpu)
arch/sparc/kernel/cpu.c
282
const struct cpu_info *cpu;
arch/sparc/kernel/cpu.c
285
cpu = &manuf->cpu_info[0];
arch/sparc/kernel/cpu.c
286
while (cpu->psr_vers != -1)
arch/sparc/kernel/cpu.c
288
if (cpu->psr_vers == psr_vers) {
arch/sparc/kernel/cpu.c
289
sparc_cpu_type = cpu->name;
arch/sparc/kernel/cpu.c
290
sparc_pmu_type = cpu->pmu_name;
arch/sparc/kernel/cpu.c
294
cpu++;
arch/sparc/kernel/cpumap.c
105
id = cpu_to_node(cpu);
arch/sparc/kernel/cpumap.c
108
id = cpu_data(cpu).core_id;
arch/sparc/kernel/cpumap.c
111
id = cpu_data(cpu).proc_id;
arch/sparc/kernel/cpumap.c
193
int n, id, cpu, prev_cpu, last_cpu, level;
arch/sparc/kernel/cpumap.c
204
prev_cpu = cpu = cpumask_first(cpu_online_mask);
arch/sparc/kernel/cpumap.c
213
id = cpuinfo_id(cpu, level);
arch/sparc/kernel/cpumap.c
227
? cpu : new_tree->level[level + 1].start_index;
arch/sparc/kernel/cpumap.c
238
while (++cpu <= last_cpu) {
arch/sparc/kernel/cpumap.c
239
if (!cpu_online(cpu))
arch/sparc/kernel/cpumap.c
244
id = cpuinfo_id(cpu, level);
arch/sparc/kernel/cpumap.c
250
if ((id != prev_id[level]) || (cpu == last_cpu)) {
arch/sparc/kernel/cpumap.c
256
if (cpu == last_cpu)
arch/sparc/kernel/cpumap.c
268
(cpu == last_cpu) ? cpu : prev_cpu;
arch/sparc/kernel/cpumap.c
285
? cpu : level_rover[level + 1];
arch/sparc/kernel/cpumap.c
290
prev_cpu = cpu;
arch/sparc/kernel/cpumap.c
96
static int cpuinfo_id(int cpu, int level)
arch/sparc/kernel/ds.c
406
__u32 cpu;
arch/sparc/kernel/ds.c
466
u32 cpu = list[i];
arch/sparc/kernel/ds.c
469
if (cpu == CPU_SENTINEL)
arch/sparc/kernel/ds.c
473
if (list[j] == cpu)
arch/sparc/kernel/ds.c
492
int i, cpu;
arch/sparc/kernel/ds.c
505
for_each_cpu(cpu, mask) {
arch/sparc/kernel/ds.c
506
ent[i].cpu = cpu;
arch/sparc/kernel/ds.c
514
static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
arch/sparc/kernel/ds.c
525
if (ent[i].cpu != cpu)
arch/sparc/kernel/ds.c
537
int resp_len, ncpus, cpu;
arch/sparc/kernel/ds.c
553
for_each_cpu(cpu, mask) {
arch/sparc/kernel/ds.c
557
dp->id, cpu);
arch/sparc/kernel/ds.c
558
err = add_cpu(cpu);
arch/sparc/kernel/ds.c
563
if (!cpu_present(cpu)) {
arch/sparc/kernel/ds.c
574
dr_cpu_mark(resp, cpu, ncpus, res, stat);
arch/sparc/kernel/ds.c
596
int resp_len, ncpus, cpu;
arch/sparc/kernel/ds.c
609
for_each_cpu(cpu, mask) {
arch/sparc/kernel/ds.c
613
dp->id, cpu);
arch/sparc/kernel/ds.c
614
err = remove_cpu(cpu);
arch/sparc/kernel/ds.c
616
dr_cpu_mark(resp, cpu, ncpus,
arch/sparc/kernel/iommu.c
247
void *cpu, dma_addr_t dvma,
arch/sparc/kernel/iommu.c
260
free_pages((unsigned long)cpu, order);
arch/sparc/kernel/irq.h
75
void (*load_profile_irq)(int cpu, unsigned int limit);
arch/sparc/kernel/irq_64.c
1061
int cpu;
arch/sparc/kernel/irq_64.c
1063
for_each_possible_cpu(cpu) {
arch/sparc/kernel/irq_64.c
1064
struct trap_per_cpu *tb = &trap_block[cpu];
arch/sparc/kernel/irq_64.c
1078
int cpu;
arch/sparc/kernel/irq_64.c
1080
for_each_possible_cpu(cpu) {
arch/sparc/kernel/irq_64.c
1081
struct trap_per_cpu *tb = &trap_block[cpu];
arch/sparc/kernel/irq_64.c
968
int cpu = hard_smp_processor_id();
arch/sparc/kernel/irq_64.c
970
trap_block[cpu].irq_worklist_pa = 0UL;
arch/sparc/kernel/kernel.h
103
void sun4m_clear_profile_irq(int cpu);
arch/sparc/kernel/kstack.h
23
if (hardirq_stack[tp->cpu]) {
arch/sparc/kernel/kstack.h
24
base = (unsigned long) hardirq_stack[tp->cpu];
arch/sparc/kernel/kstack.h
28
base = (unsigned long) softirq_stack[tp->cpu];
arch/sparc/kernel/kstack.h
46
if (hardirq_stack[tp->cpu]) {
arch/sparc/kernel/kstack.h
47
base = (unsigned long) hardirq_stack[tp->cpu];
arch/sparc/kernel/kstack.h
51
base = (unsigned long) softirq_stack[tp->cpu];
arch/sparc/kernel/leon_kernel.c
109
unsigned int cpu = cpumask_first_and(affinity, cpu_online_mask);
arch/sparc/kernel/leon_kernel.c
111
if (cpumask_subset(cpu_online_mask, affinity) || cpu >= nr_cpu_ids)
arch/sparc/kernel/leon_kernel.c
114
return cpu;
arch/sparc/kernel/leon_kernel.c
147
int cpu;
arch/sparc/kernel/leon_kernel.c
150
cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
arch/sparc/kernel/leon_kernel.c
152
oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
arch/sparc/kernel/leon_kernel.c
153
LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
arch/sparc/kernel/leon_kernel.c
160
int cpu;
arch/sparc/kernel/leon_kernel.c
163
cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
arch/sparc/kernel/leon_kernel.c
165
oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
arch/sparc/kernel/leon_kernel.c
166
LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
arch/sparc/kernel/leon_kernel.c
283
int cpu = smp_processor_id();
arch/sparc/kernel/leon_kernel.c
285
leon_clear_profile_irq(cpu);
arch/sparc/kernel/leon_kernel.c
287
if (cpu == boot_cpu_id)
arch/sparc/kernel/leon_kernel.c
290
ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/leon_kernel.c
42
#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
arch/sparc/kernel/leon_kernel.c
480
static void leon_load_profile_irq(int cpu, unsigned int limit)
arch/sparc/kernel/leon_kernel.c
485
void leon_clear_profile_irq(int cpu)
arch/sparc/kernel/leon_kernel.c
489
void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
arch/sparc/kernel/leon_kernel.c
49
static inline unsigned int leon_eirq_get(int cpu)
arch/sparc/kernel/leon_kernel.c
494
addr = (unsigned long *)LEON_IMASK(cpu);
arch/sparc/kernel/leon_kernel.c
51
return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
arch/sparc/kernel/leon_kernel.c
59
int cpu = sparc_leon3_cpuid();
arch/sparc/kernel/leon_kernel.c
61
eirq = leon_eirq_get(cpu);
arch/sparc/kernel/leon_smp.c
274
int cpu, len;
arch/sparc/kernel/leon_smp.c
297
for_each_possible_cpu(cpu) {
arch/sparc/kernel/leon_smp.c
298
work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
303
static void leon_send_ipi(int cpu, int level)
arch/sparc/kernel/leon_smp.c
307
LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
arch/sparc/kernel/leon_smp.c
310
static void leon_ipi_single(int cpu)
arch/sparc/kernel/leon_smp.c
312
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
318
leon_send_ipi(cpu, leon_ipi_irq);
arch/sparc/kernel/leon_smp.c
321
static void leon_ipi_mask_one(int cpu)
arch/sparc/kernel/leon_smp.c
323
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
329
leon_send_ipi(cpu, leon_ipi_irq);
arch/sparc/kernel/leon_smp.c
332
static void leon_ipi_resched(int cpu)
arch/sparc/kernel/leon_smp.c
334
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
340
leon_send_ipi(cpu, leon_ipi_irq);
arch/sparc/kernel/nmi.c
130
static inline unsigned int get_nmi_count(int cpu)
arch/sparc/kernel/nmi.c
132
return cpu_data(cpu).__nmi_count;
arch/sparc/kernel/nmi.c
141
static void report_broken_nmi(int cpu, int *prev_nmi_count)
arch/sparc/kernel/nmi.c
147
cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
arch/sparc/kernel/nmi.c
154
per_cpu(wd_enabled, cpu) = 0;
arch/sparc/kernel/nmi.c
170
int cpu, err;
arch/sparc/kernel/nmi.c
186
for_each_possible_cpu(cpu)
arch/sparc/kernel/nmi.c
187
prev_nmi_count[cpu] = get_nmi_count(cpu);
arch/sparc/kernel/nmi.c
191
for_each_online_cpu(cpu) {
arch/sparc/kernel/nmi.c
192
if (!per_cpu(wd_enabled, cpu))
arch/sparc/kernel/nmi.c
194
if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
arch/sparc/kernel/nmi.c
195
report_broken_nmi(cpu, prev_nmi_count);
arch/sparc/kernel/nmi.c
290
void watchdog_hardlockup_enable(unsigned int cpu)
arch/sparc/kernel/nmi.c
305
smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1);
arch/sparc/kernel/nmi.c
311
void watchdog_hardlockup_disable(unsigned int cpu)
arch/sparc/kernel/nmi.c
316
smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1);
arch/sparc/kernel/nmi.c
58
int cpu;
arch/sparc/kernel/nmi.c
60
for_each_present_cpu(cpu) {
arch/sparc/kernel/nmi.c
61
if (per_cpu(nmi_touch, cpu) != 1)
arch/sparc/kernel/nmi.c
62
per_cpu(nmi_touch, cpu) = 1;
arch/sparc/kernel/pci_sun4v.c
323
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
arch/sparc/kernel/pci_sun4v.c
352
free_pages((unsigned long)cpu, order);
arch/sparc/kernel/pcic.c
802
static void pcic_load_profile_irq(int cpu, unsigned int limit)
arch/sparc/kernel/perf_event.c
1593
int cpu, i;
arch/sparc/kernel/perf_event.c
1600
cpu = smp_processor_id();
arch/sparc/kernel/perf_event.c
1605
cpu, i, pcr_ops->read_pcr(i));
arch/sparc/kernel/perf_event.c
1608
cpu, i, pcr_ops->read_pic(i));
arch/sparc/kernel/process_64.c
244
int this_cpu, cpu;
arch/sparc/kernel/process_64.c
260
for_each_cpu(cpu, mask) {
arch/sparc/kernel/process_64.c
263
if (cpu == exclude_cpu)
arch/sparc/kernel/process_64.c
266
gp = &global_cpu_snapshot[cpu].reg;
arch/sparc/kernel/process_64.c
272
(cpu == this_cpu ? '*' : ' '), cpu,
arch/sparc/kernel/process_64.c
343
int this_cpu, cpu;
arch/sparc/kernel/process_64.c
355
for_each_online_cpu(cpu) {
arch/sparc/kernel/process_64.c
356
struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
arch/sparc/kernel/process_64.c
361
(cpu == this_cpu ? '*' : ' '), cpu,
arch/sparc/kernel/prom_64.c
384
int cpu, unsigned int *thread)
arch/sparc/kernel/prom_64.c
416
if (this_cpu_id == cpu) {
arch/sparc/kernel/prom_64.c
418
int proc_id = cpu_data(cpu).proc_id;
arch/sparc/kernel/setup_32.c
391
struct cpu *p = kzalloc_obj(*p);
arch/sparc/kernel/setup_64.c
354
int cpu;
arch/sparc/kernel/setup_64.c
361
cpu = hard_smp_processor_id();
arch/sparc/kernel/setup_64.c
362
if (cpu >= NR_CPUS) {
arch/sparc/kernel/setup_64.c
364
cpu, NR_CPUS);
arch/sparc/kernel/setup_64.c
367
current_thread_info()->cpu = cpu;
arch/sparc/kernel/smp_32.c
123
void arch_smp_send_reschedule(int cpu)
arch/sparc/kernel/smp_32.c
130
sparc32_ipi_ops->resched(cpu);
arch/sparc/kernel/smp_32.c
137
void arch_send_call_function_single_ipi(int cpu)
arch/sparc/kernel/smp_32.c
140
sparc32_ipi_ops->single(cpu);
arch/sparc/kernel/smp_32.c
145
int cpu;
arch/sparc/kernel/smp_32.c
148
for_each_cpu(cpu, mask)
arch/sparc/kernel/smp_32.c
149
sparc32_ipi_ops->mask_one(cpu);
arch/sparc/kernel/smp_32.c
248
current_thread_info()->cpu = cpuid;
arch/sparc/kernel/smp_32.c
253
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/sparc/kernel/smp_32.c
259
ret = smp4m_boot_one_cpu(cpu, tidle);
arch/sparc/kernel/smp_32.c
262
ret = smp4d_boot_one_cpu(cpu, tidle);
arch/sparc/kernel/smp_32.c
265
ret = leon_boot_one_cpu(cpu, tidle);
arch/sparc/kernel/smp_32.c
282
cpumask_set_cpu(cpu, &smp_commenced_mask);
arch/sparc/kernel/smp_32.c
283
while (!cpu_online(cpu))
arch/sparc/kernel/smp_32.c
338
unsigned int cpu;
arch/sparc/kernel/smp_32.c
346
cpu = smp_processor_id();
arch/sparc/kernel/smp_32.c
348
notify_cpu_starting(cpu);
arch/sparc/kernel/smp_32.c
352
set_cpu_online(cpu, true);
arch/sparc/kernel/smp_32.c
79
int cpu, num = 0;
arch/sparc/kernel/smp_32.c
81
for_each_online_cpu(cpu) {
arch/sparc/kernel/smp_32.c
83
bogosum += cpu_data(cpu).udelay_val;
arch/sparc/kernel/smp_64.c
1266
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/sparc/kernel/smp_64.c
1268
int ret = smp_boot_one_cpu(cpu, tidle);
arch/sparc/kernel/smp_64.c
1271
cpumask_set_cpu(cpu, &smp_commenced_mask);
arch/sparc/kernel/smp_64.c
1272
while (!cpu_online(cpu))
arch/sparc/kernel/smp_64.c
1274
if (!cpu_online(cpu)) {
arch/sparc/kernel/smp_64.c
1281
smp_synchronize_one_tick(cpu);
arch/sparc/kernel/smp_64.c
1290
int cpu = smp_processor_id();
arch/sparc/kernel/smp_64.c
1296
struct trap_per_cpu *tb = &trap_block[cpu];
arch/sparc/kernel/smp_64.c
1308
cpumask_clear_cpu(cpu, &smp_commenced_mask);
arch/sparc/kernel/smp_64.c
1325
int cpu = smp_processor_id();
arch/sparc/kernel/smp_64.c
1329
for_each_cpu(i, &cpu_core_map[cpu])
arch/sparc/kernel/smp_64.c
1330
cpumask_clear_cpu(cpu, &cpu_core_map[i]);
arch/sparc/kernel/smp_64.c
1331
cpumask_clear(&cpu_core_map[cpu]);
arch/sparc/kernel/smp_64.c
1333
for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
arch/sparc/kernel/smp_64.c
1334
cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
arch/sparc/kernel/smp_64.c
1335
cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
arch/sparc/kernel/smp_64.c
1337
c = &cpu_data(cpu);
arch/sparc/kernel/smp_64.c
1351
set_cpu_online(cpu, false);
arch/sparc/kernel/smp_64.c
1358
void __cpu_die(unsigned int cpu)
arch/sparc/kernel/smp_64.c
1364
if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
arch/sparc/kernel/smp_64.c
1368
if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
arch/sparc/kernel/smp_64.c
1369
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
arch/sparc/kernel/smp_64.c
1376
hv_err = sun4v_cpu_stop(cpu);
arch/sparc/kernel/smp_64.c
1378
set_cpu_present(cpu, false);
arch/sparc/kernel/smp_64.c
1395
static void send_cpu_ipi(int cpu)
arch/sparc/kernel/smp_64.c
1398
0, 0, cpumask_of(cpu));
arch/sparc/kernel/smp_64.c
1413
static unsigned long send_cpu_poke(int cpu)
arch/sparc/kernel/smp_64.c
1417
per_cpu(poke, cpu) = true;
arch/sparc/kernel/smp_64.c
1418
hv_err = sun4v_cpu_poke(cpu);
arch/sparc/kernel/smp_64.c
1420
per_cpu(poke, cpu) = false;
arch/sparc/kernel/smp_64.c
1428
void arch_smp_send_reschedule(int cpu)
arch/sparc/kernel/smp_64.c
1430
if (cpu == smp_processor_id()) {
arch/sparc/kernel/smp_64.c
1437
if (cpu_poke && idle_cpu(cpu)) {
arch/sparc/kernel/smp_64.c
1440
ret = send_cpu_poke(cpu);
arch/sparc/kernel/smp_64.c
1450
send_cpu_ipi(cpu);
arch/sparc/kernel/smp_64.c
1491
int cpu;
arch/sparc/kernel/smp_64.c
1498
for_each_online_cpu(cpu) {
arch/sparc/kernel/smp_64.c
1499
if (cpu == this_cpu)
arch/sparc/kernel/smp_64.c
1502
set_cpu_online(cpu, false);
arch/sparc/kernel/smp_64.c
1506
hv_err = sun4v_cpu_stop(cpu);
arch/sparc/kernel/smp_64.c
1512
prom_stopcpu_cpuid(cpu);
arch/sparc/kernel/smp_64.c
1526
static int __init pcpu_cpu_to_node(int cpu)
arch/sparc/kernel/smp_64.c
1528
return cpu_to_node(cpu);
arch/sparc/kernel/smp_64.c
1534
unsigned int cpu;
arch/sparc/kernel/smp_64.c
1554
for_each_possible_cpu(cpu)
arch/sparc/kernel/smp_64.c
1555
__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
arch/sparc/kernel/smp_64.c
255
static void smp_start_sync_tick_client(int cpu);
arch/sparc/kernel/smp_64.c
257
static void smp_synchronize_one_tick(int cpu)
arch/sparc/kernel/smp_64.c
263
smp_start_sync_tick_client(cpu);
arch/sparc/kernel/smp_64.c
288
static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
arch/sparc/kernel/smp_64.c
308
hdesc->cpu = cpu;
arch/sparc/kernel/smp_64.c
311
tb = &trap_block[cpu];
arch/sparc/kernel/smp_64.c
330
hv_err = sun4v_cpu_start(cpu, trampoline_ra,
arch/sparc/kernel/smp_64.c
347
static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
arch/sparc/kernel/smp_64.c
362
ldom_startcpu_cpuid(cpu,
arch/sparc/kernel/smp_64.c
367
prom_startcpu_cpuid(cpu, entry, cookie);
arch/sparc/kernel/smp_64.c
369
struct device_node *dp = of_find_node_by_cpuid(cpu);
arch/sparc/kernel/smp_64.c
383
printk("Processor %d is stuck.\n", cpu);
arch/sparc/kernel/smp_64.c
393
static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
arch/sparc/kernel/smp_64.c
400
cpu = (((cpu & 0x3c) << 1) |
arch/sparc/kernel/smp_64.c
401
((cpu & 0x40) >> 4) |
arch/sparc/kernel/smp_64.c
402
(cpu & 0x3));
arch/sparc/kernel/smp_64.c
405
target = (cpu << 14) | 0x70;
arch/sparc/kernel/smp_64.c
652
u16 cpu;
arch/sparc/kernel/smp_64.c
703
cpu = cpu_list[i];
arch/sparc/kernel/smp_64.c
704
if (likely(cpu == 0xffff)) {
arch/sparc/kernel/smp_64.c
707
(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
arch/sparc/kernel/smp_64.c
708
ecpuerror_id = cpu + 1;
arch/sparc/kernel/smp_64.c
709
} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
arch/sparc/kernel/smp_64.c
710
enocpu_id = cpu + 1;
arch/sparc/kernel/smp_64.c
712
cpu_list[rem++] = cpu;
arch/sparc/kernel/smp_64.c
847
static void smp_start_sync_tick_client(int cpu)
arch/sparc/kernel/smp_64.c
850
cpumask_of(cpu));
arch/sparc/kernel/smp_64.c
862
void arch_send_call_function_single_ipi(int cpu)
arch/sparc/kernel/smp_64.c
865
cpumask_of(cpu));
arch/sparc/kernel/smp_64.c
940
void smp_flush_dcache_folio_impl(struct folio *folio, int cpu)
arch/sparc/kernel/smp_64.c
953
if (cpu == this_cpu) {
arch/sparc/kernel/smp_64.c
955
} else if (cpu_online(cpu)) {
arch/sparc/kernel/smp_64.c
973
(u64) pg_addr, cpumask_of(cpu));
arch/sparc/kernel/sun4d_irq.c
273
static void sun4d_load_profile_irq(int cpu, unsigned int limit)
arch/sparc/kernel/sun4d_irq.c
276
bw_set_prof_limit(cpu, value);
arch/sparc/kernel/sun4d_irq.c
281
int cpu = 0, mid;
arch/sparc/kernel/sun4d_irq.c
283
while (!cpu_find_by_instance(cpu, NULL, &mid)) {
arch/sparc/kernel/sun4d_irq.c
285
cpu++;
arch/sparc/kernel/sun4d_smp.c
195
int cpu;
arch/sparc/kernel/sun4d_smp.c
200
for_each_possible_cpu(cpu) {
arch/sparc/kernel/sun4d_smp.c
201
work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
232
static void sun4d_send_ipi(int cpu, int level)
arch/sparc/kernel/sun4d_smp.c
234
cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
arch/sparc/kernel/sun4d_smp.c
237
static void sun4d_ipi_single(int cpu)
arch/sparc/kernel/sun4d_smp.c
239
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
245
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
arch/sparc/kernel/sun4d_smp.c
248
static void sun4d_ipi_mask_one(int cpu)
arch/sparc/kernel/sun4d_smp.c
250
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
256
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
arch/sparc/kernel/sun4d_smp.c
259
static void sun4d_ipi_resched(int cpu)
arch/sparc/kernel/sun4d_smp.c
261
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
267
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
arch/sparc/kernel/sun4d_smp.c
369
int cpu = hard_smp_processor_id();
arch/sparc/kernel/sun4d_smp.c
375
bw_get_prof_limit(cpu);
arch/sparc/kernel/sun4d_smp.c
378
cpu_tick[cpu]++;
arch/sparc/kernel/sun4d_smp.c
379
if (!(cpu_tick[cpu] & 15)) {
arch/sparc/kernel/sun4d_smp.c
380
if (cpu_tick[cpu] == 0x60)
arch/sparc/kernel/sun4d_smp.c
381
cpu_tick[cpu] = 0;
arch/sparc/kernel/sun4d_smp.c
382
cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
arch/sparc/kernel/sun4d_smp.c
383
show_leds(cpu);
arch/sparc/kernel/sun4d_smp.c
386
ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/sun4d_smp.c
85
while (current_set[cpuid]->cpu != cpuid)
arch/sparc/kernel/sun4m_irq.c
193
int cpu = smp_processor_id();
arch/sparc/kernel/sun4m_irq.c
201
sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set);
arch/sparc/kernel/sun4m_irq.c
212
int cpu = smp_processor_id();
arch/sparc/kernel/sun4m_irq.c
220
sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear);
arch/sparc/kernel/sun4m_irq.c
347
void sun4m_clear_profile_irq(int cpu)
arch/sparc/kernel/sun4m_irq.c
349
sbus_readl(&timers_percpu[cpu]->l14_limit);
arch/sparc/kernel/sun4m_irq.c
352
static void sun4m_load_profile_irq(int cpu, unsigned int limit)
arch/sparc/kernel/sun4m_irq.c
355
sbus_writel(value, &timers_percpu[cpu]->l14_limit);
arch/sparc/kernel/sun4m_smp.c
139
static void sun4m_send_ipi(int cpu, int level)
arch/sparc/kernel/sun4m_smp.c
141
sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
arch/sparc/kernel/sun4m_smp.c
144
static void sun4m_ipi_resched(int cpu)
arch/sparc/kernel/sun4m_smp.c
146
sun4m_send_ipi(cpu, IRQ_IPI_RESCHED);
arch/sparc/kernel/sun4m_smp.c
149
static void sun4m_ipi_single(int cpu)
arch/sparc/kernel/sun4m_smp.c
151
sun4m_send_ipi(cpu, IRQ_IPI_SINGLE);
arch/sparc/kernel/sun4m_smp.c
154
static void sun4m_ipi_mask_one(int cpu)
arch/sparc/kernel/sun4m_smp.c
156
sun4m_send_ipi(cpu, IRQ_IPI_MASK);
arch/sparc/kernel/sun4m_smp.c
247
int cpu = smp_processor_id();
arch/sparc/kernel/sun4m_smp.c
251
ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/sun4m_smp.c
254
sun4m_clear_profile_irq(cpu);
arch/sparc/kernel/sun4m_smp.c
256
sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
arch/sparc/kernel/sysfs.c
206
static DEFINE_PER_CPU(struct cpu, cpu_devices);
arch/sparc/kernel/sysfs.c
208
static int register_cpu_online(unsigned int cpu)
arch/sparc/kernel/sysfs.c
210
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/sparc/kernel/sysfs.c
221
static int unregister_cpu_online(unsigned int cpu)
arch/sparc/kernel/sysfs.c
224
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/sparc/kernel/sysfs.c
249
int cpu, ret;
arch/sparc/kernel/sysfs.c
253
for_each_possible_cpu(cpu) {
arch/sparc/kernel/sysfs.c
254
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/sparc/kernel/sysfs.c
256
register_cpu(c, cpu);
arch/sparc/kernel/time_32.c
187
int cpu = cpumask_first(evt->cpumask);
arch/sparc/kernel/time_32.c
189
sparc_config.load_profile_irq(cpu, 0);
arch/sparc/kernel/time_32.c
195
int cpu = cpumask_first(evt->cpumask);
arch/sparc/kernel/time_32.c
197
sparc_config.load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ);
arch/sparc/kernel/time_32.c
204
int cpu = cpumask_first(evt->cpumask);
arch/sparc/kernel/time_32.c
207
sparc_config.load_profile_irq(cpu, next);
arch/sparc/kernel/time_32.c
211
void register_percpu_ce(int cpu)
arch/sparc/kernel/time_32.c
213
struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/time_32.c
226
ce->cpumask = cpumask_of(cpu);
arch/sparc/kernel/time_64.c
642
unsigned long sparc64_get_clock_tick(unsigned int cpu)
arch/sparc/kernel/time_64.c
644
struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
arch/sparc/kernel/time_64.c
648
return cpu_data(cpu).clock_tick;
arch/sparc/kernel/time_64.c
658
unsigned int cpu;
arch/sparc/kernel/time_64.c
661
for_each_cpu(cpu, freq->policy->cpus) {
arch/sparc/kernel/time_64.c
662
ft = &per_cpu(sparc64_freq_table, cpu);
arch/sparc/kernel/time_64.c
666
ft->clock_tick_ref = cpu_data(cpu).clock_tick;
arch/sparc/kernel/time_64.c
671
cpu_data(cpu).clock_tick =
arch/sparc/kernel/time_64.c
722
int cpu = smp_processor_id();
arch/sparc/kernel/time_64.c
723
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
arch/sparc/kernel/time_64.c
734
"Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
arch/sparc/kernel/traps_32.c
375
TI_CPU != offsetof(struct thread_info, cpu) ||
arch/sparc/kernel/traps_64.c
1966
int cpu, const char *pfx, atomic_t *ocnt)
arch/sparc/kernel/traps_64.c
1972
printk("%s: Reporting on cpu %d\n", pfx, cpu);
arch/sparc/kernel/traps_64.c
2084
int cpu;
arch/sparc/kernel/traps_64.c
2086
cpu = get_cpu();
arch/sparc/kernel/traps_64.c
2088
tb = &trap_block[cpu];
arch/sparc/kernel/traps_64.c
2120
sun4v_log_error(regs, &local_copy, cpu,
arch/sparc/kernel/traps_64.c
2202
int cpu;
arch/sparc/kernel/traps_64.c
2204
cpu = get_cpu();
arch/sparc/kernel/traps_64.c
2206
tb = &trap_block[cpu];
arch/sparc/kernel/traps_64.c
2226
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
arch/sparc/kernel/traps_64.c
2234
sun4v_log_error(regs, &local_copy, cpu,
arch/sparc/kernel/traps_64.c
2828
int cpu = hard_smp_processor_id();
arch/sparc/kernel/traps_64.c
2829
struct trap_per_cpu *p = &trap_block[cpu];
arch/sparc/kernel/traps_64.c
2845
TI_CPU != offsetof(struct thread_info, cpu) ||
arch/sparc/kernel/traps_64.c
829
int cpu = smp_processor_id();
arch/sparc/kernel/traps_64.c
834
p = cheetah_error_log + (cpu * 2);
arch/sparc/mm/init_64.c
1429
int cpu;
arch/sparc/mm/init_64.c
1433
for_each_cpu(cpu, &mask)
arch/sparc/mm/init_64.c
1434
numa_cpu_lookup_table[cpu] = index;
arch/sparc/mm/init_64.c
1439
for_each_cpu(cpu, &mask)
arch/sparc/mm/init_64.c
1440
printk("%d ", cpu);
arch/sparc/mm/init_64.c
1507
unsigned long cpu, index;
arch/sparc/mm/init_64.c
1513
for_each_present_cpu(cpu) {
arch/sparc/mm/init_64.c
1514
numa_cpu_lookup_table[cpu] = index;
arch/sparc/mm/init_64.c
1515
cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
arch/sparc/mm/init_64.c
1517
node_masks[index].match = cpu << 36UL;
arch/sparc/mm/init_64.c
250
static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu)
arch/sparc/mm/init_64.c
268
: "r" (cpu), "r" (mask), "r" (&folio->flags.f),
arch/sparc/mm/init_64.c
297
int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
arch/sparc/mm/init_64.c
304
if (cpu == this_cpu)
arch/sparc/mm/init_64.c
307
smp_flush_dcache_folio_impl(folio, cpu);
arch/sparc/mm/init_64.c
309
clear_dcache_dirty_cpu(folio, cpu);
arch/sparc/mm/init_64.c
765
int cpu;
arch/sparc/mm/init_64.c
787
for_each_online_cpu(cpu) {
arch/sparc/mm/init_64.c
793
mm = per_cpu(per_cpu_secondary_mm, cpu);
arch/sparc/mm/srmmu.c
1022
int cpu = 0;
arch/sparc/mm/srmmu.c
1049
cpu++;
arch/sparc/mm/srmmu.c
1050
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
arch/sparc/net/bpf_jit_comp_32.c
221
emit_load32(G6, struct thread_info, cpu, REG)
arch/um/include/asm/smp.h
12
void arch_smp_send_reschedule(int cpu);
arch/um/include/asm/smp.h
14
void arch_send_call_function_single_ipi(int cpu);
arch/um/include/asm/thread_info.h
21
__u32 cpu; /* current CPU */
arch/um/include/asm/thread_info.h
29
.cpu = 0, \
arch/um/include/shared/os.h
276
extern int os_timer_set_interval(int cpu, unsigned long long nsecs);
arch/um/include/shared/os.h
277
extern int os_timer_one_shot(int cpu, unsigned long long nsecs);
arch/um/include/shared/os.h
278
extern void os_timer_disable(int cpu);
arch/um/include/shared/os.h
349
int os_start_cpu_thread(int cpu);
arch/um/include/shared/os.h
351
int os_send_ipi(int cpu, int vector);
arch/um/kernel/irq.c
714
int cpu;
arch/um/kernel/irq.c
717
for_each_online_cpu(cpu)
arch/um/kernel/irq.c
718
seq_printf(p, "%10u ", irq_stats(cpu)->irq_resched_count);
arch/um/kernel/irq.c
722
for_each_online_cpu(cpu)
arch/um/kernel/irq.c
723
seq_printf(p, "%10u ", irq_stats(cpu)->irq_call_count);
arch/um/kernel/process.c
70
cpu_tasks[task_thread_info(task)->cpu] = task;
arch/um/kernel/smp.c
121
int err, cpu = raw_smp_processor_id();
arch/um/kernel/smp.c
123
notify_cpu_starting(cpu);
arch/um/kernel/smp.c
124
set_cpu_online(cpu, true);
arch/um/kernel/smp.c
128
panic("CPU#%d failed to setup timer, err = %d", cpu, err);
arch/um/kernel/smp.c
139
int cpu = raw_smp_processor_id();
arch/um/kernel/smp.c
143
stack_protections((unsigned long) &cpu_irqstacks[cpu]);
arch/um/kernel/smp.c
144
set_sigstack(&cpu_irqstacks[cpu], THREAD_SIZE);
arch/um/kernel/smp.c
146
set_cpu_present(cpu, true);
arch/um/kernel/smp.c
147
os_futex_wait(&cpu_states[cpu], UML_CPU_PAUSED);
arch/um/kernel/smp.c
151
idle = cpu_tasks[cpu];
arch/um/kernel/smp.c
152
idle->thread_info.cpu = cpu;
arch/um/kernel/smp.c
167
int err, cpu, me = smp_processor_id();
arch/um/kernel/smp.c
172
for_each_possible_cpu(cpu) {
arch/um/kernel/smp.c
173
if (cpu == me)
arch/um/kernel/smp.c
176
pr_debug("Booting processor %d...\n", cpu);
arch/um/kernel/smp.c
177
err = os_start_cpu_thread(cpu);
arch/um/kernel/smp.c
180
cpu, err);
arch/um/kernel/smp.c
185
spin_until_cond(cpu_present(cpu) ||
arch/um/kernel/smp.c
188
if (!cpu_present(cpu))
arch/um/kernel/smp.c
189
pr_crit("CPU#%d failed to boot\n", cpu);
arch/um/kernel/smp.c
193
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
arch/um/kernel/smp.c
195
cpu_tasks[cpu] = tidle;
arch/um/kernel/smp.c
197
cpu_states[cpu] = UML_CPU_RUNNING;
arch/um/kernel/smp.c
198
os_futex_wake(&cpu_states[cpu]);
arch/um/kernel/smp.c
199
spin_until_cond(cpu_online(cpu));
arch/um/kernel/smp.c
213
int cpu;
arch/um/kernel/smp.c
215
for (cpu = 0; cpu < uml_ncpus; cpu++)
arch/um/kernel/smp.c
216
set_cpu_possible(cpu, true);
arch/um/kernel/smp.c
217
for (; cpu < NR_CPUS; cpu++)
arch/um/kernel/smp.c
218
set_cpu_possible(cpu, false);
arch/um/kernel/smp.c
32
void arch_smp_send_reschedule(int cpu)
arch/um/kernel/smp.c
34
os_send_ipi(cpu, UML_IPI_RES);
arch/um/kernel/smp.c
37
void arch_send_call_function_single_ipi(int cpu)
arch/um/kernel/smp.c
39
os_send_ipi(cpu, UML_IPI_CALL_SINGLE);
arch/um/kernel/smp.c
44
int cpu;
arch/um/kernel/smp.c
46
for_each_cpu(cpu, mask)
arch/um/kernel/smp.c
47
os_send_ipi(cpu, UML_IPI_CALL);
arch/um/kernel/smp.c
52
int cpu, me = smp_processor_id();
arch/um/kernel/smp.c
54
for_each_online_cpu(cpu) {
arch/um/kernel/smp.c
55
if (cpu == me)
arch/um/kernel/smp.c
57
os_send_ipi(cpu, UML_IPI_STOP);
arch/um/kernel/smp.c
64
int cpu = raw_smp_processor_id();
arch/um/kernel/smp.c
88
set_cpu_online(cpu, false);
arch/um/kernel/smp.c
94
pr_err("CPU#%d received unknown IPI (vector=%d)!\n", cpu, vector);
arch/um/kernel/time.c
628
int cpu = raw_smp_processor_id();
arch/um/kernel/time.c
631
os_timer_disable(cpu);
arch/um/kernel/time.c
642
os_timer_set_interval(cpu, time_travel_timer_interval);
arch/um/kernel/time.c
644
os_timer_one_shot(cpu, time_travel_timer_event.time - next);
arch/um/kernel/time.c
786
int cpu = evt - &timer_clockevent[0];
arch/um/kernel/time.c
793
os_timer_disable(cpu);
arch/um/kernel/time.c
801
int cpu = evt - &timer_clockevent[0];
arch/um/kernel/time.c
814
os_timer_set_interval(cpu, interval);
arch/um/kernel/time.c
864
int cpu = raw_smp_processor_id();
arch/um/kernel/time.c
865
struct clock_event_device *evt = &timer_clockevent[cpu];
arch/um/kernel/time.c
917
int cpu = raw_smp_processor_id();
arch/um/kernel/time.c
918
struct clock_event_device *evt = &timer_clockevent[cpu];
arch/um/kernel/time.c
926
evt->cpumask = cpumask_of(cpu);
arch/um/os-Linux/smp.c
18
int cpu;
arch/um/os-Linux/smp.c
35
__curr_cpu = data->cpu;
arch/um/os-Linux/smp.c
42
int os_start_cpu_thread(int cpu)
arch/um/os-Linux/smp.c
58
data->cpu = cpu;
arch/um/os-Linux/smp.c
61
err = pthread_create(&cpu_threads[cpu], NULL, cpu_thread, data);
arch/um/os-Linux/smp.c
92
int os_send_ipi(int cpu, int vector)
arch/um/os-Linux/smp.c
96
return pthread_sigqueue(cpu_threads[cpu], IPI_SIGNAL, value);
arch/um/os-Linux/time.c
100
timer_settime(event_high_res_timer[cpu], 0, &its, NULL);
arch/um/os-Linux/time.c
46
int cpu = uml_curr_cpu();
arch/um/os-Linux/time.c
47
timer_t *t = &event_high_res_timer[cpu];
arch/um/os-Linux/time.c
61
int os_timer_set_interval(int cpu, unsigned long long nsecs)
arch/um/os-Linux/time.c
71
if (timer_settime(event_high_res_timer[cpu], 0, &its, NULL) == -1)
arch/um/os-Linux/time.c
77
int os_timer_one_shot(int cpu, unsigned long long nsecs)
arch/um/os-Linux/time.c
87
timer_settime(event_high_res_timer[cpu], 0, &its, NULL);
arch/um/os-Linux/time.c
95
void os_timer_disable(int cpu)
arch/x86/boot/compressed/cpuflags.c
8
return test_bit(flag, cpu.flags);
arch/x86/boot/cpucheck.c
116
memset(&cpu.flags, 0, sizeof(cpu.flags));
arch/x86/boot/cpucheck.c
117
cpu.level = 3;
arch/x86/boot/cpucheck.c
120
cpu.level = 4;
arch/x86/boot/cpucheck.c
125
if (test_bit(X86_FEATURE_LM, cpu.flags))
arch/x86/boot/cpucheck.c
126
cpu.level = 64;
arch/x86/boot/cpucheck.c
145
is_centaur() && cpu.model >= 6) {
arch/x86/boot/cpucheck.c
155
set_bit(X86_FEATURE_CX8, cpu.flags);
arch/x86/boot/cpucheck.c
168
: "+a" (level), "=d" (cpu.flags[0])
arch/x86/boot/cpucheck.c
175
is_intel() && cpu.level == 6 &&
arch/x86/boot/cpucheck.c
176
(cpu.model == 9 || cpu.model == 13)) {
arch/x86/boot/cpucheck.c
180
set_bit(X86_FEATURE_PAE, cpu.flags);
arch/x86/boot/cpucheck.c
193
*cpu_level_ptr = cpu.level;
arch/x86/boot/cpucheck.c
197
return (cpu.level < req_level || err) ? -1 : 0;
arch/x86/boot/cpucheck.c
206
cpu.family != 6 ||
arch/x86/boot/cpucheck.c
207
cpu.model != 0x57 /*INTEL_XEON_PHI_KNL*/)
arch/x86/boot/cpucheck.c
96
err_flags[i] = req_flags[i] & ~cpu.flags[i];
arch/x86/boot/cpuflags.c
106
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
arch/x86/boot/cpuflags.c
107
&cpu.flags[1]);
arch/x86/boot/cpuflags.c
79
set_bit(X86_FEATURE_FPU, cpu.flags);
arch/x86/boot/cpuflags.c
87
cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
arch/x86/boot/cpuflags.c
88
&cpu.flags[0]);
arch/x86/boot/cpuflags.c
89
cpu.level = (tfms >> 8) & 15;
arch/x86/boot/cpuflags.c
9
struct cpu_features cpu;
arch/x86/boot/cpuflags.c
90
cpu.family = cpu.level;
arch/x86/boot/cpuflags.c
91
cpu.model = (tfms >> 4) & 15;
arch/x86/boot/cpuflags.c
92
if (cpu.level >= 6)
arch/x86/boot/cpuflags.c
93
cpu.model += ((tfms >> 16) & 0xf) << 4;
arch/x86/boot/cpuflags.c
98
&cpu.flags[16], &ignored);
arch/x86/boot/cpuflags.h
15
extern struct cpu_features cpu;
arch/x86/boot/main.c
89
if (cpu.level < 6)
arch/x86/coco/sev/core.c
1190
static void __init alloc_runtime_data(int cpu)
arch/x86/coco/sev/core.c
1194
data = memblock_alloc_node(sizeof(*data), PAGE_SIZE, cpu_to_node(cpu));
arch/x86/coco/sev/core.c
1198
per_cpu(runtime_data, cpu) = data;
arch/x86/coco/sev/core.c
1204
caa = cpu ? memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE)
arch/x86/coco/sev/core.c
1207
per_cpu(svsm_caa, cpu) = caa;
arch/x86/coco/sev/core.c
1208
per_cpu(svsm_caa_pa, cpu) = __pa(caa);
arch/x86/coco/sev/core.c
1212
static void __init init_ghcb(int cpu)
arch/x86/coco/sev/core.c
1217
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
1232
int cpu;
arch/x86/coco/sev/core.c
1254
for_each_possible_cpu(cpu) {
arch/x86/coco/sev/core.c
1255
alloc_runtime_data(cpu);
arch/x86/coco/sev/core.c
1256
init_ghcb(cpu);
arch/x86/coco/sev/core.c
555
int cpu;
arch/x86/coco/sev/core.c
577
for_each_possible_cpu(cpu) {
arch/x86/coco/sev/core.c
578
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
640
int apic_id, this_cpu, cpu;
arch/x86/coco/sev/core.c
648
for_each_present_cpu(cpu) {
arch/x86/coco/sev/core.c
649
vmsa = per_cpu(sev_vmsa, cpu);
arch/x86/coco/sev/core.c
661
if (this_cpu == cpu) {
arch/x86/coco/sev/core.c
677
apic_id = cpuid_to_apicid[cpu];
arch/x86/coco/sev/core.c
695
unsigned int level, cpu;
arch/x86/coco/sev/core.c
718
for_each_possible_cpu(cpu) {
arch/x86/coco/sev/core.c
719
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
737
static void *snp_alloc_vmsa_page(int cpu)
arch/x86/coco/sev/core.c
749
p = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
arch/x86/coco/sev/core.c
761
static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip, unsigned int cpu)
arch/x86/coco/sev/core.c
787
cur_vmsa = per_cpu(sev_vmsa, cpu);
arch/x86/coco/sev/core.c
796
vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(cpu);
arch/x86/coco/sev/core.c
801
caa = per_cpu(svsm_caa, cpu);
arch/x86/coco/sev/core.c
883
per_cpu(sev_vmsa, cpu) = vmsa;
arch/x86/coco/sev/core.c
947
int cpu;
arch/x86/coco/sev/core.c
956
for_each_possible_cpu(cpu) {
arch/x86/coco/sev/core.c
957
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
966
address = per_cpu(svsm_caa_pa, cpu);
arch/x86/entry/vdso/common/vgetcpu.c
13
__vdso_getcpu(unsigned *cpu, unsigned *node, void *unused)
arch/x86/entry/vdso/common/vgetcpu.c
15
vdso_read_cpunode(cpu, node);
arch/x86/entry/vdso/common/vgetcpu.c
20
long getcpu(unsigned *cpu, unsigned *node, void *tcache)
arch/x86/events/amd/core.c
535
static struct amd_nb *amd_alloc_nb(int cpu)
arch/x86/events/amd/core.c
540
nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
arch/x86/events/amd/core.c
559
static void amd_pmu_cpu_reset(int cpu)
arch/x86/events/amd/core.c
578
static int amd_pmu_cpu_prepare(int cpu)
arch/x86/events/amd/core.c
580
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/amd/core.c
583
cpu_to_node(cpu));
arch/x86/events/amd/core.c
592
cpuc->amd_nb = amd_alloc_nb(cpu);
arch/x86/events/amd/core.c
602
static void amd_pmu_cpu_starting(int cpu)
arch/x86/events/amd/core.c
604
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/amd/core.c
610
amd_pmu_cpu_reset(cpu);
arch/x86/events/amd/core.c
615
nb_id = topology_amd_node_id(cpu);
arch/x86/events/amd/core.c
634
static void amd_pmu_cpu_dead(int cpu)
arch/x86/events/amd/core.c
636
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/amd/ibs.c
1713
static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
arch/x86/events/amd/ibs.c
1753
static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
arch/x86/events/amd/iommu.c
224
if (event->cpu < 0)
arch/x86/events/amd/power.c
220
static int power_cpu_exit(unsigned int cpu)
arch/x86/events/amd/power.c
224
if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
arch/x86/events/amd/power.c
232
target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
arch/x86/events/amd/power.c
235
perf_pmu_migrate_context(&pmu_class, cpu, target);
arch/x86/events/amd/power.c
240
static int power_cpu_init(unsigned int cpu)
arch/x86/events/amd/power.c
253
target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
arch/x86/events/amd/power.c
255
cpumask_set_cpu(cpu, &cpu_mask);
arch/x86/events/amd/uncore.c
1002
info.split.gid = topology_logical_package_id(cpu);
arch/x86/events/amd/uncore.c
1003
info.split.cid = topology_logical_package_id(cpu);
arch/x86/events/amd/uncore.c
1004
*per_cpu_ptr(uncore->info, cpu) = info;
arch/x86/events/amd/uncore.c
1008
int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
1023
return amd_uncore_ctx_init(uncore, cpu);
arch/x86/events/amd/uncore.c
107
if (!ctx->nr_active || ctx->cpu != smp_processor_id())
arch/x86/events/amd/uncore.c
1089
return amd_uncore_ctx_init(uncore, cpu);
arch/x86/events/amd/uncore.c
166
struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
arch/x86/events/amd/uncore.c
184
struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
arch/x86/events/amd/uncore.c
205
struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
arch/x86/events/amd/uncore.c
252
struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
arch/x86/events/amd/uncore.c
276
if (event->cpu < 0)
arch/x86/events/amd/uncore.c
280
ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
arch/x86/events/amd/uncore.c
299
event->cpu = ctx->cpu;
arch/x86/events/amd/uncore.c
40
int cpu;
arch/x86/events/amd/uncore.c
452
int amd_uncore_ctx_cid(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
454
union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu);
arch/x86/events/amd/uncore.c
459
int amd_uncore_ctx_gid(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
461
union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu);
arch/x86/events/amd/uncore.c
466
int amd_uncore_ctx_num_pmcs(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
468
union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu);
arch/x86/events/amd/uncore.c
472
static void amd_uncore_ctx_free(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
483
ctx = *per_cpu_ptr(pmu->ctx, cpu);
arch/x86/events/amd/uncore.c
487
if (cpu == ctx->cpu)
arch/x86/events/amd/uncore.c
488
cpumask_clear_cpu(cpu, &pmu->active_mask);
arch/x86/events/amd/uncore.c
495
*per_cpu_ptr(pmu->ctx, cpu) = NULL;
arch/x86/events/amd/uncore.c
499
static int amd_uncore_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
508
cid = amd_uncore_ctx_cid(uncore, cpu);
arch/x86/events/amd/uncore.c
509
gid = amd_uncore_ctx_gid(uncore, cpu);
arch/x86/events/amd/uncore.c
513
*per_cpu_ptr(pmu->ctx, cpu) = NULL;
arch/x86/events/amd/uncore.c
522
if (cpu == j)
arch/x86/events/amd/uncore.c
537
node = cpu_to_node(cpu);
arch/x86/events/amd/uncore.c
542
curr->cpu = cpu;
arch/x86/events/amd/uncore.c
554
cpumask_set_cpu(cpu, &pmu->active_mask);
arch/x86/events/amd/uncore.c
558
*per_cpu_ptr(pmu->ctx, cpu) = curr;
arch/x86/events/amd/uncore.c
564
amd_uncore_ctx_free(uncore, cpu);
arch/x86/events/amd/uncore.c
569
static void amd_uncore_ctx_move(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
580
curr = *per_cpu_ptr(pmu->ctx, cpu);
arch/x86/events/amd/uncore.c
587
if (!next || cpu == j)
arch/x86/events/amd/uncore.c
591
perf_pmu_migrate_context(&pmu->pmu, cpu, j);
arch/x86/events/amd/uncore.c
592
cpumask_clear_cpu(cpu, &pmu->active_mask);
arch/x86/events/amd/uncore.c
594
next->cpu = j;
arch/x86/events/amd/uncore.c
601
static int amd_uncore_cpu_starting(unsigned int cpu)
arch/x86/events/amd/uncore.c
608
uncore->scan(uncore, cpu);
arch/x86/events/amd/uncore.c
614
static int amd_uncore_cpu_online(unsigned int cpu)
arch/x86/events/amd/uncore.c
621
if (uncore->init(uncore, cpu))
arch/x86/events/amd/uncore.c
628
static int amd_uncore_cpu_down_prepare(unsigned int cpu)
arch/x86/events/amd/uncore.c
635
uncore->move(uncore, cpu);
arch/x86/events/amd/uncore.c
641
static int amd_uncore_cpu_dead(unsigned int cpu)
arch/x86/events/amd/uncore.c
648
uncore->free(uncore, cpu);
arch/x86/events/amd/uncore.c
692
void amd_uncore_df_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
703
info.split.cid = topology_logical_package_id(cpu);
arch/x86/events/amd/uncore.c
710
*per_cpu_ptr(uncore->info, cpu) = info;
arch/x86/events/amd/uncore.c
714
int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
722
return amd_uncore_ctx_init(uncore, cpu);
arch/x86/events/amd/uncore.c
724
num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
arch/x86/events/amd/uncore.c
744
pmu->group = amd_uncore_ctx_gid(uncore, cpu);
arch/x86/events/amd/uncore.c
786
return amd_uncore_ctx_init(uncore, cpu);
arch/x86/events/amd/uncore.c
82
void (*scan)(struct amd_uncore *uncore, unsigned int cpu);
arch/x86/events/amd/uncore.c
829
void amd_uncore_l3_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
83
int (*init)(struct amd_uncore *uncore, unsigned int cpu);
arch/x86/events/amd/uncore.c
839
info.split.cid = per_cpu_llc_id(cpu);
arch/x86/events/amd/uncore.c
84
void (*move)(struct amd_uncore *uncore, unsigned int cpu);
arch/x86/events/amd/uncore.c
844
*per_cpu_ptr(uncore->info, cpu) = info;
arch/x86/events/amd/uncore.c
848
int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/amd/uncore.c
85
void (*free)(struct amd_uncore *uncore, unsigned int cpu);
arch/x86/events/amd/uncore.c
856
return amd_uncore_ctx_init(uncore, cpu);
arch/x86/events/amd/uncore.c
858
num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
arch/x86/events/amd/uncore.c
878
pmu->group = amd_uncore_ctx_gid(uncore, cpu);
arch/x86/events/amd/uncore.c
922
return amd_uncore_ctx_init(uncore, cpu);
arch/x86/events/amd/uncore.c
941
struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
arch/x86/events/amd/uncore.c
990
void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
arch/x86/events/core.c
1564
int cpu, idx;
arch/x86/events/core.c
1568
cpu = smp_processor_id();
arch/x86/events/core.c
1569
cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/core.c
1584
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
arch/x86/events/core.c
1585
pr_info("CPU#%d: status: %016llx\n", cpu, status);
arch/x86/events/core.c
1586
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
arch/x86/events/core.c
1587
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
arch/x86/events/core.c
1590
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
arch/x86/events/core.c
1594
pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
arch/x86/events/core.c
1597
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
arch/x86/events/core.c
1603
prev_left = per_cpu(pmc_prev_left[idx], cpu);
arch/x86/events/core.c
1606
cpu, idx, pmc_ctrl);
arch/x86/events/core.c
1608
cpu, idx, pmc_count);
arch/x86/events/core.c
1610
cpu, idx, prev_left);
arch/x86/events/core.c
1618
cpu, idx, pmc_count);
arch/x86/events/core.c
1826
static int x86_pmu_prepare_cpu(unsigned int cpu)
arch/x86/events/core.c
1828
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/core.c
1834
return x86_pmu.cpu_prepare(cpu);
arch/x86/events/core.c
1838
static int x86_pmu_dead_cpu(unsigned int cpu)
arch/x86/events/core.c
1841
x86_pmu.cpu_dead(cpu);
arch/x86/events/core.c
1845
static int x86_pmu_online_cpu(unsigned int cpu)
arch/x86/events/core.c
1847
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/core.c
1857
static int x86_pmu_starting_cpu(unsigned int cpu)
arch/x86/events/core.c
1860
x86_pmu.cpu_starting(cpu);
arch/x86/events/core.c
1864
static int x86_pmu_dying_cpu(unsigned int cpu)
arch/x86/events/core.c
1867
x86_pmu.cpu_dying(cpu);
arch/x86/events/core.c
1978
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
arch/x86/events/core.c
2393
int cpu;
arch/x86/events/core.c
2406
cpu = cpumask_first(&h_pmu->supported_cpus);
arch/x86/events/core.c
2408
cpu = raw_smp_processor_id();
arch/x86/events/core.c
2411
if (intel_cpuc_prepare(cpuc, cpu))
arch/x86/events/core.c
2519
if (is_hybrid() && (event->cpu != -1)) {
arch/x86/events/core.c
2521
if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus))
arch/x86/events/core.c
2764
static bool x86_pmu_filter(struct pmu *pmu, int cpu)
arch/x86/events/core.c
2768
static_call_cond(x86_pmu_filter)(pmu, cpu, &ret);
arch/x86/events/core.c
793
struct pmu *x86_get_pmu(unsigned int cpu)
arch/x86/events/core.c
795
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/bts.c
147
int cpu = raw_smp_processor_id();
arch/x86/events/intel/bts.c
148
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
arch/x86/events/intel/bts.c
186
int cpu = raw_smp_processor_id();
arch/x86/events/intel/bts.c
187
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
arch/x86/events/intel/bts.c
86
int cpu = event->cpu;
arch/x86/events/intel/bts.c
87
int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
arch/x86/events/intel/core.c
5572
static struct intel_shared_regs *allocate_shared_regs(int cpu)
arch/x86/events/intel/core.c
5578
GFP_KERNEL, cpu_to_node(cpu));
arch/x86/events/intel/core.c
5591
static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
arch/x86/events/intel/core.c
5596
GFP_KERNEL, cpu_to_node(cpu));
arch/x86/events/intel/core.c
5605
int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
arch/x86/events/intel/core.c
5610
cpuc->shared_regs = allocate_shared_regs(cpu);
arch/x86/events/intel/core.c
5618
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
arch/x86/events/intel/core.c
5624
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
arch/x86/events/intel/core.c
5645
static int intel_pmu_cpu_prepare(int cpu)
arch/x86/events/intel/core.c
5649
ret = intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
arch/x86/events/intel/core.c
5653
return alloc_arch_pebs_buf_on_cpu(cpu);
arch/x86/events/intel/core.c
6027
static bool init_hybrid_pmu(int cpu)
arch/x86/events/intel/core.c
6029
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/core.c
6056
cpumask_set_cpu(cpu, &pmu->supported_cpus);
arch/x86/events/intel/core.c
6062
static void intel_pmu_cpu_starting(int cpu)
arch/x86/events/intel/core.c
6064
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/core.c
6065
int core_id = topology_core_id(cpu);
arch/x86/events/intel/core.c
6068
if (is_hybrid() && !init_hybrid_pmu(cpu))
arch/x86/events/intel/core.c
6071
init_debug_store_on_cpu(cpu);
arch/x86/events/intel/core.c
6072
init_arch_pebs_on_cpu(cpu);
arch/x86/events/intel/core.c
6117
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
arch/x86/events/intel/core.c
6135
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
arch/x86/events/intel/core.c
6169
static void intel_pmu_cpu_dying(int cpu)
arch/x86/events/intel/core.c
6171
fini_debug_store_on_cpu(cpu);
arch/x86/events/intel/core.c
6172
fini_arch_pebs_on_cpu(cpu);
arch/x86/events/intel/core.c
6189
static void intel_pmu_cpu_dead(int cpu)
arch/x86/events/intel/core.c
6191
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/core.c
6193
release_arch_pebs_buf_on_cpu(cpu);
arch/x86/events/intel/core.c
6197
cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
arch/x86/events/intel/core.c
6228
static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
arch/x86/events/intel/core.c
6232
*ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
arch/x86/events/intel/core.c
7207
int cpu = cpumask_first(&pmu->supported_cpus);
arch/x86/events/intel/core.c
7209
return (cpu >= nr_cpu_ids) ? -1 : cpu;
arch/x86/events/intel/core.c
7218
int cpu = hybrid_find_supported_cpu(pmu);
arch/x86/events/intel/core.c
7220
return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
arch/x86/events/intel/core.c
7231
int cpu = hybrid_find_supported_cpu(pmu);
arch/x86/events/intel/core.c
7233
return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
arch/x86/events/intel/cstate.c
290
if (event->cpu < 0)
arch/x86/events/intel/ds.c
1014
for_each_possible_cpu(cpu) {
arch/x86/events/intel/ds.c
1015
if (alloc_ds_buffer(cpu)) {
arch/x86/events/intel/ds.c
1020
if (!bts_err && alloc_bts_buffer(cpu))
arch/x86/events/intel/ds.c
1024
alloc_pebs_buffer(cpu))
arch/x86/events/intel/ds.c
1032
for_each_possible_cpu(cpu)
arch/x86/events/intel/ds.c
1033
release_bts_buffer(cpu);
arch/x86/events/intel/ds.c
1037
for_each_possible_cpu(cpu)
arch/x86/events/intel/ds.c
1038
release_pebs_buffer(cpu);
arch/x86/events/intel/ds.c
1042
for_each_possible_cpu(cpu)
arch/x86/events/intel/ds.c
1043
release_ds_buffer(cpu);
arch/x86/events/intel/ds.c
1051
for_each_possible_cpu(cpu) {
arch/x86/events/intel/ds.c
1056
init_debug_store_on_cpu(cpu);
arch/x86/events/intel/ds.c
1061
inline int alloc_arch_pebs_buf_on_cpu(int cpu)
arch/x86/events/intel/ds.c
1066
return alloc_pebs_buffer(cpu);
arch/x86/events/intel/ds.c
1069
inline void release_arch_pebs_buf_on_cpu(int cpu)
arch/x86/events/intel/ds.c
1074
release_pebs_buffer(cpu);
arch/x86/events/intel/ds.c
1077
void init_arch_pebs_on_cpu(int cpu)
arch/x86/events/intel/ds.c
1079
struct cpu_hw_events *cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
arch/x86/events/intel/ds.c
1086
WARN(1, "Fail to allocate PEBS buffer on CPU %d\n", cpu);
arch/x86/events/intel/ds.c
1098
wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, (u32)arch_pebs_base,
arch/x86/events/intel/ds.c
1103
inline void fini_arch_pebs_on_cpu(int cpu)
arch/x86/events/intel/ds.c
1108
wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, 0, 0);
arch/x86/events/intel/ds.c
776
void init_debug_store_on_cpu(int cpu)
arch/x86/events/intel/ds.c
778
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
arch/x86/events/intel/ds.c
783
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
arch/x86/events/intel/ds.c
788
void fini_debug_store_on_cpu(int cpu)
arch/x86/events/intel/ds.c
790
if (!per_cpu(cpu_hw_events, cpu).ds)
arch/x86/events/intel/ds.c
793
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
arch/x86/events/intel/ds.c
831
static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
arch/x86/events/intel/ds.c
834
int node = cpu_to_node(cpu);
arch/x86/events/intel/ds.c
847
static int alloc_pebs_buffer(int cpu)
arch/x86/events/intel/ds.c
849
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
arch/x86/events/intel/ds.c
852
int max, node = cpu_to_node(cpu);
arch/x86/events/intel/ds.c
858
buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
arch/x86/events/intel/ds.c
877
per_cpu(insn_buffer, cpu) = insn_buff;
arch/x86/events/intel/ds.c
881
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
arch/x86/events/intel/ds.c
890
static void release_pebs_buffer(int cpu)
arch/x86/events/intel/ds.c
892
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
arch/x86/events/intel/ds.c
899
kfree(per_cpu(insn_buffer, cpu));
arch/x86/events/intel/ds.c
900
per_cpu(insn_buffer, cpu) = NULL;
arch/x86/events/intel/ds.c
903
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
arch/x86/events/intel/ds.c
911
static int alloc_bts_buffer(int cpu)
arch/x86/events/intel/ds.c
913
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
arch/x86/events/intel/ds.c
921
buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
arch/x86/events/intel/ds.c
928
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
arch/x86/events/intel/ds.c
940
static void release_bts_buffer(int cpu)
arch/x86/events/intel/ds.c
942
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
arch/x86/events/intel/ds.c
949
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
arch/x86/events/intel/ds.c
955
static int alloc_ds_buffer(int cpu)
arch/x86/events/intel/ds.c
957
struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
arch/x86/events/intel/ds.c
960
per_cpu(cpu_hw_events, cpu).ds = ds;
arch/x86/events/intel/ds.c
964
static void release_ds_buffer(int cpu)
arch/x86/events/intel/ds.c
966
per_cpu(cpu_hw_events, cpu).ds = NULL;
arch/x86/events/intel/ds.c
971
int cpu;
arch/x86/events/intel/ds.c
976
for_each_possible_cpu(cpu)
arch/x86/events/intel/ds.c
977
release_ds_buffer(cpu);
arch/x86/events/intel/ds.c
979
for_each_possible_cpu(cpu) {
arch/x86/events/intel/ds.c
985
fini_debug_store_on_cpu(cpu);
arch/x86/events/intel/ds.c
988
for_each_possible_cpu(cpu) {
arch/x86/events/intel/ds.c
990
release_pebs_buffer(cpu);
arch/x86/events/intel/ds.c
991
release_bts_buffer(cpu);
arch/x86/events/intel/ds.c
998
int cpu;
arch/x86/events/intel/lbr.c
624
int cpu;
arch/x86/events/intel/lbr.c
629
for_each_possible_cpu(cpu) {
arch/x86/events/intel/lbr.c
630
cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
arch/x86/events/intel/lbr.c
631
kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
arch/x86/events/intel/lbr.c
643
int cpu;
arch/x86/events/intel/lbr.c
648
for_each_possible_cpu(cpu) {
arch/x86/events/intel/lbr.c
649
cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
arch/x86/events/intel/lbr.c
650
kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
arch/x86/events/intel/lbr.c
656
cpu_to_node(cpu));
arch/x86/events/intel/p4.c
1101
static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
arch/x86/events/intel/p4.c
1108
if (!p4_should_swap_ts(hwc->config, cpu))
arch/x86/events/intel/p4.c
1119
if (p4_ht_thread(cpu)) {
arch/x86/events/intel/p4.c
1245
int cpu = smp_processor_id();
arch/x86/events/intel/p4.c
1259
thread = p4_ht_thread(cpu);
arch/x86/events/intel/p4.c
1276
if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
arch/x86/events/intel/p4.c
1311
if (p4_should_swap_ts(hwc->config, cpu))
arch/x86/events/intel/p4.c
1313
p4_pmu_swap_config_ts(hwc, cpu);
arch/x86/events/intel/p4.c
806
int cpu = get_cpu();
arch/x86/events/intel/p4.c
816
cccr = p4_default_cccr_conf(cpu);
arch/x86/events/intel/p4.c
817
escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
arch/x86/events/intel/p4.c
822
if (p4_ht_active() && p4_ht_thread(cpu))
arch/x86/events/intel/pt.c
1245
static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
arch/x86/events/intel/pt.c
1251
topa = topa_alloc(cpu, gfp);
arch/x86/events/intel/pt.c
1258
err = topa_insert_pages(buf, cpu, gfp);
arch/x86/events/intel/pt.c
1331
int node, ret, cpu = event->cpu;
arch/x86/events/intel/pt.c
1343
if (cpu == -1)
arch/x86/events/intel/pt.c
1344
cpu = raw_smp_processor_id();
arch/x86/events/intel/pt.c
1345
node = cpu_to_node(cpu);
arch/x86/events/intel/pt.c
1362
ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
arch/x86/events/intel/pt.c
1386
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
arch/x86/events/intel/pt.c
1833
int ret, cpu, prior_warn = 0;
arch/x86/events/intel/pt.c
1841
for_each_online_cpu(cpu) {
arch/x86/events/intel/pt.c
1844
ret = rdmsrq_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
arch/x86/events/intel/pt.c
680
static struct topa *topa_alloc(int cpu, gfp_t gfp)
arch/x86/events/intel/pt.c
682
int node = cpu_to_node(cpu);
arch/x86/events/intel/pt.c
770
static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
arch/x86/events/intel/pt.c
781
topa = topa_alloc(cpu, gfp);
arch/x86/events/intel/uncore.c
139
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
arch/x86/events/intel/uncore.c
141
unsigned int dieid = topology_logical_die_id(cpu);
arch/x86/events/intel/uncore.c
1468
WARN_ON_ONCE(box->cpu != -1);
arch/x86/events/intel/uncore.c
1470
box->cpu = new_cpu;
arch/x86/events/intel/uncore.c
1476
WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu);
arch/x86/events/intel/uncore.c
1477
box->cpu = -1;
arch/x86/events/intel/uncore.c
1486
box->cpu = new_cpu;
arch/x86/events/intel/uncore.c
1510
if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0)
arch/x86/events/intel/uncore.c
1516
static int uncore_event_cpu_offline(unsigned int cpu)
arch/x86/events/intel/uncore.c
1521
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
arch/x86/events/intel/uncore.c
1524
target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
arch/x86/events/intel/uncore.c
1532
uncore_change_context(uncore_msr_uncores, cpu, target);
arch/x86/events/intel/uncore.c
1533
uncore_change_context(uncore_mmio_uncores, cpu, target);
arch/x86/events/intel/uncore.c
1534
uncore_change_context(uncore_pci_uncores, cpu, target);
arch/x86/events/intel/uncore.c
1538
die = topology_logical_die_id(cpu);
arch/x86/events/intel/uncore.c
1545
unsigned int die, unsigned int cpu)
arch/x86/events/intel/uncore.c
1560
box = uncore_alloc_box(type, cpu_to_node(cpu));
arch/x86/events/intel/uncore.c
1584
int id, unsigned int cpu)
arch/x86/events/intel/uncore.c
1591
ret = allocate_boxes(types, id, cpu);
arch/x86/events/intel/uncore.c
1600
if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1)
arch/x86/events/intel/uncore.c
1607
static int uncore_event_cpu_online(unsigned int cpu)
arch/x86/events/intel/uncore.c
1611
die = topology_logical_die_id(cpu);
arch/x86/events/intel/uncore.c
1612
msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
arch/x86/events/intel/uncore.c
1613
mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
arch/x86/events/intel/uncore.c
1621
target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
arch/x86/events/intel/uncore.c
1625
cpumask_set_cpu(cpu, &uncore_cpu_mask);
arch/x86/events/intel/uncore.c
1628
uncore_change_context(uncore_msr_uncores, -1, cpu);
arch/x86/events/intel/uncore.c
1630
uncore_change_context(uncore_mmio_uncores, -1, cpu);
arch/x86/events/intel/uncore.c
1631
uncore_change_context(uncore_pci_uncores, -1, cpu);
arch/x86/events/intel/uncore.c
312
if (!box->n_active || box->cpu != smp_processor_id())
arch/x86/events/intel/uncore.c
362
box->cpu = -1;
arch/x86/events/intel/uncore.c
73
int cpu;
arch/x86/events/intel/uncore.c
75
for_each_cpu(cpu, cpumask_of_pcibus(dev->bus)) {
arch/x86/events/intel/uncore.c
751
if (event->cpu < 0)
arch/x86/events/intel/uncore.c
753
box = uncore_pmu_to_box(pmu, event->cpu);
arch/x86/events/intel/uncore.c
754
if (!box || box->cpu < 0)
arch/x86/events/intel/uncore.c
756
event->cpu = box->cpu;
arch/x86/events/intel/uncore.c
76
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/events/intel/uncore.c
78
if (c->initialized && cpu_to_node(cpu) == node)
arch/x86/events/intel/uncore.h
166
int cpu; /* cpu to collect events */
arch/x86/events/intel/uncore.h
590
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
arch/x86/events/intel/uncore_discovery.c
389
int cpu, die;
arch/x86/events/intel/uncore_discovery.c
398
for_each_online_cpu(cpu) {
arch/x86/events/intel/uncore_discovery.c
399
die = topology_logical_die_id(cpu);
arch/x86/events/intel/uncore_discovery.c
403
if (rdmsrq_safe_on_cpu(cpu, domain->discovery_base, &base))
arch/x86/events/intel/uncore_snb.c
953
if (event->cpu < 0)
arch/x86/events/intel/uncore_snb.c
960
box = uncore_pmu_to_box(pmu, event->cpu);
arch/x86/events/intel/uncore_snb.c
961
if (!box || box->cpu < 0)
arch/x86/events/intel/uncore_snb.c
964
event->cpu = box->cpu;
arch/x86/events/intel/uncore_snbep.c
3700
static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
arch/x86/events/intel/uncore_snbep.c
3704
if (rdmsrq_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
arch/x86/events/intel/uncore_snbep.c
3715
int res = 0, cpu, current_die;
arch/x86/events/intel/uncore_snbep.c
3721
for_each_online_cpu(cpu) {
arch/x86/events/intel/uncore_snbep.c
3722
current_die = topology_logical_die_id(cpu);
arch/x86/events/intel/uncore_snbep.c
3724
res = cpu;
arch/x86/events/perf_event.h
1035
void (*filter)(struct pmu *pmu, int cpu, bool *ret);
arch/x86/events/perf_event.h
1156
struct pmu *x86_get_pmu(unsigned int cpu);
arch/x86/events/perf_event.h
1645
extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
arch/x86/events/perf_event.h
1650
int alloc_arch_pebs_buf_on_cpu(int cpu);
arch/x86/events/perf_event.h
1652
void release_arch_pebs_buf_on_cpu(int cpu);
arch/x86/events/perf_event.h
1654
void init_arch_pebs_on_cpu(int cpu);
arch/x86/events/perf_event.h
1656
void fini_arch_pebs_on_cpu(int cpu);
arch/x86/events/perf_event.h
1658
void init_debug_store_on_cpu(int cpu);
arch/x86/events/perf_event.h
1660
void fini_debug_store_on_cpu(int cpu);
arch/x86/events/perf_event.h
1884
static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
arch/x86/events/perf_event.h
915
int (*cpu_prepare)(int cpu);
arch/x86/events/perf_event.h
916
void (*cpu_starting)(int cpu);
arch/x86/events/perf_event.h
917
void (*cpu_dying)(int cpu);
arch/x86/events/perf_event.h
918
void (*cpu_dead)(int cpu);
arch/x86/events/rapl.c
133
int cpu;
arch/x86/events/rapl.c
174
static inline unsigned int get_rapl_pmu_idx(int cpu, int scope)
arch/x86/events/rapl.c
183
return topology_logical_package_id(cpu);
arch/x86/events/rapl.c
185
return topology_logical_die_id(cpu);
arch/x86/events/rapl.c
187
return topology_logical_core_id(cpu);
arch/x86/events/rapl.c
385
if (event->cpu < 0)
arch/x86/events/rapl.c
414
rapl_pmu_idx = get_rapl_pmu_idx(event->cpu, rapl_pmus_scope);
arch/x86/hyperv/hv_apic.c
104
static bool cpu_is_self(int cpu)
arch/x86/hyperv/hv_apic.c
106
return cpu == smp_processor_id();
arch/x86/hyperv/hv_apic.c
238
static bool __send_ipi_one(int cpu, int vector)
arch/x86/hyperv/hv_apic.c
240
int vp = hv_cpu_number_to_vp_number(cpu);
arch/x86/hyperv/hv_apic.c
243
trace_hyperv_send_ipi_one(cpu, vector);
arch/x86/hyperv/hv_apic.c
258
return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
arch/x86/hyperv/hv_apic.c
264
static void hv_send_ipi(int cpu, int vector)
arch/x86/hyperv/hv_apic.c
266
if (!__send_ipi_one(cpu, vector))
arch/x86/hyperv/hv_apic.c
267
orig_apic.send_IPI(cpu, vector);
arch/x86/hyperv/hv_apic.c
56
void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set)
arch/x86/hyperv/hv_apic.c
58
apic_update_vector(cpu, vector, set);
arch/x86/hyperv/hv_init.c
118
static int hv_cpu_init(unsigned int cpu)
arch/x86/hyperv/hv_init.c
124
ret = hv_common_cpu_init(cpu);
arch/x86/hyperv/hv_init.c
131
hvp = &hv_vp_assist_page[cpu];
arch/x86/hyperv/hv_init.c
175
apic_update_vector(cpu, HYPERV_STIMER0_VECTOR, true);
arch/x86/hyperv/hv_init.c
271
static int hv_cpu_die(unsigned int cpu)
arch/x86/hyperv/hv_init.c
285
apic_update_vector(cpu, HYPERV_STIMER0_VECTOR, false);
arch/x86/hyperv/hv_init.c
287
hv_common_cpu_die(cpu);
arch/x86/hyperv/hv_init.c
289
if (hv_vp_assist_page && hv_vp_assist_page[cpu]) {
arch/x86/hyperv/hv_init.c
298
memunmap(hv_vp_assist_page[cpu]);
arch/x86/hyperv/hv_init.c
299
hv_vp_assist_page[cpu] = NULL;
arch/x86/hyperv/hv_init.c
310
if (re_ctrl.target_vp == hv_vp_index[cpu]) {
arch/x86/hyperv/hv_init.c
316
new_cpu = cpumask_any_but(cpu_online_mask, cpu);
arch/x86/hyperv/hv_spinlock.c
21
static void hv_qlock_kick(int cpu)
arch/x86/hyperv/hv_spinlock.c
23
__apic_send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
arch/x86/hyperv/hv_vtl.c
111
static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
arch/x86/hyperv/hv_vtl.c
125
struct task_struct *idle = idle_thread_get(cpu);
arch/x86/hyperv/hv_vtl.c
222
static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip, unsigned int cpu)
arch/x86/hyperv/hv_vtl.c
238
return hv_vtl_bringup_vcpu(vp_index, cpu, start_eip);
arch/x86/hyperv/irqdomain.c
18
int cpu, int vector, struct hv_interrupt_entry *entry)
arch/x86/hyperv/irqdomain.c
194
int cpu;
arch/x86/hyperv/irqdomain.c
199
cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
arch/x86/hyperv/irqdomain.c
201
return hv_map_interrupt(device_id, false, cpu, cfg->vector,
arch/x86/hyperv/irqdomain.c
407
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int cpu, int vector,
arch/x86/hyperv/irqdomain.c
416
return hv_map_interrupt(device_id, level, cpu, vector, entry);
arch/x86/hyperv/irqdomain.c
47
nr_bank = cpumask_to_vpset(&(intr_desc->target.vp_set), cpumask_of(cpu));
arch/x86/hyperv/ivm.c
295
int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu)
arch/x86/hyperv/ivm.c
378
cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
arch/x86/hyperv/ivm.c
384
per_cpu(hv_sev_vmsa, cpu) = vmsa;
arch/x86/hyperv/mmu.c
110
cpu = cpumask_last(cpus);
arch/x86/hyperv/mmu.c
112
if (cpu < nr_cpumask_bits && hv_cpu_number_to_vp_number(cpu) >= 64)
arch/x86/hyperv/mmu.c
115
for_each_cpu(cpu, cpus) {
arch/x86/hyperv/mmu.c
116
if (do_lazy && cpu_is_lazy(cpu))
arch/x86/hyperv/mmu.c
118
vcpu = hv_cpu_number_to_vp_number(cpu);
arch/x86/hyperv/mmu.c
54
static bool cpu_is_lazy(int cpu)
arch/x86/hyperv/mmu.c
56
return per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
arch/x86/hyperv/mmu.c
62
int cpu, vcpu, gva_n, max_gvas;
arch/x86/include/asm/apic.h
178
extern void topology_hotunplug_apic(unsigned int cpu);
arch/x86/include/asm/apic.h
281
void (*send_IPI)(int cpu, int vector);
arch/x86/include/asm/apic.h
293
u32 (*calc_dest_apicid)(unsigned int cpu);
arch/x86/include/asm/apic.h
314
int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip, unsigned int cpu);
arch/x86/include/asm/apic.h
316
int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip, unsigned int cpu);
arch/x86/include/asm/apic.h
318
void (*update_vector)(unsigned int cpu, unsigned int vector, bool set);
arch/x86/include/asm/apic.h
328
void (*send_IPI)(int cpu, int vector);
arch/x86/include/asm/apic.h
336
int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip, unsigned int cpu);
arch/x86/include/asm/apic.h
337
int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip, unsigned int cpu);
arch/x86/include/asm/apic.h
428
static __always_inline void __apic_send_IPI(int cpu, int vector)
arch/x86/include/asm/apic.h
430
static_call(apic_call_send_IPI)(cpu, vector);
arch/x86/include/asm/apic.h
473
static __always_inline void apic_update_vector(unsigned int cpu, unsigned int vector, bool set)
arch/x86/include/asm/apic.h
476
apic->update_vector(cpu, vector, set);
arch/x86/include/asm/apic.h
490
static inline void apic_update_vector(unsigned int cpu, unsigned int vector, bool set) { }
arch/x86/include/asm/apic.h
600
extern u32 apic_default_calc_apicid(unsigned int cpu);
arch/x86/include/asm/apic.h
601
extern u32 apic_flat_calc_apicid(unsigned int cpu);
arch/x86/include/asm/apic.h
605
void apic_send_nmi_to_offline_cpu(unsigned int cpu);
arch/x86/include/asm/cpu.h
13
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
arch/x86/include/asm/cpu.h
14
#define cpu_acpi_id(cpu) 0
arch/x86/include/asm/cpu_entry_area.h
140
extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
arch/x86/include/asm/cpu_entry_area.h
142
static __always_inline struct entry_stack *cpu_entry_stack(int cpu)
arch/x86/include/asm/cpu_entry_area.h
144
return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
arch/x86/include/asm/cpuidle_haltpoll.h
5
void arch_haltpoll_enable(unsigned int cpu);
arch/x86/include/asm/cpuidle_haltpoll.h
6
void arch_haltpoll_disable(unsigned int cpu);
arch/x86/include/asm/cpumask.h
16
static __always_inline bool arch_cpu_online(int cpu)
arch/x86/include/asm/cpumask.h
18
return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
arch/x86/include/asm/cpumask.h
21
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
arch/x86/include/asm/cpumask.h
23
arch_clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
arch/x86/include/asm/cpumask.h
26
static __always_inline bool arch_cpu_online(int cpu)
arch/x86/include/asm/cpumask.h
28
return cpu == 0;
arch/x86/include/asm/cpumask.h
31
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
arch/x86/include/asm/cpumask.h
37
#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))
arch/x86/include/asm/desc.h
119
#define load_TLS(t, cpu) native_load_tls(t, cpu)
arch/x86/include/asm/desc.h
180
static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
arch/x86/include/asm/desc.h
182
struct desc_struct *d = get_cpu_gdt_rw(cpu);
arch/x86/include/asm/desc.h
190
#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
arch/x86/include/asm/desc.h
197
unsigned cpu = smp_processor_id();
arch/x86/include/asm/desc.h
202
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT,
arch/x86/include/asm/desc.h
257
int cpu = raw_smp_processor_id();
arch/x86/include/asm/desc.h
262
fixmap_gdt = get_cpu_gdt_ro(cpu);
arch/x86/include/asm/desc.h
269
load_direct_gdt(cpu);
arch/x86/include/asm/desc.h
274
load_fixmap_gdt(cpu);
arch/x86/include/asm/desc.h
292
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
arch/x86/include/asm/desc.h
294
struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
arch/x86/include/asm/desc.h
51
static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
arch/x86/include/asm/desc.h
53
return per_cpu(gdt_page, cpu).gdt;
arch/x86/include/asm/desc.h
63
static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
arch/x86/include/asm/desc.h
65
return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
arch/x86/include/asm/desc.h
75
static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
arch/x86/include/asm/desc.h
77
return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
arch/x86/include/asm/espfix.h
13
extern void init_espfix_ap(int cpu);
arch/x86/include/asm/espfix.h
15
static inline void init_espfix_ap(int cpu) { }
arch/x86/include/asm/fpu/sched.h
32
static inline void switch_fpu(struct task_struct *old, int cpu)
arch/x86/include/asm/fpu/sched.h
49
old_fpu->last_cpu = cpu;
arch/x86/include/asm/gsseg.h
48
pv_ops.cpu.load_gs_index = native_lkgs;
arch/x86/include/asm/hardirq.h
65
extern u64 arch_irq_stat_cpu(unsigned int cpu);
arch/x86/include/asm/irq.h
26
extern int irq_init_percpu_irqstack(unsigned int cpu);
arch/x86/include/asm/irq_remapping.h
38
int cpu;
arch/x86/include/asm/kmsan.h
35
int cpu;
arch/x86/include/asm/kmsan.h
40
cpu = (addr64 - CPU_ENTRY_AREA_BASE) / CPU_ENTRY_AREA_SIZE;
arch/x86/include/asm/kmsan.h
41
off = addr64 - (unsigned long)get_cpu_entry_area(cpu);
arch/x86/include/asm/kmsan.h
46
return &per_cpu(metadata_array[off], cpu);
arch/x86/include/asm/kvm_host.h
1756
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
arch/x86/include/asm/mce.h
390
enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank);
arch/x86/include/asm/mshyperv.h
160
static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
arch/x86/include/asm/mshyperv.h
165
return hv_vp_assist_page[cpu];
arch/x86/include/asm/mshyperv.h
201
int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu);
arch/x86/include/asm/mshyperv.h
206
unsigned int cpu) { return 0; }
arch/x86/include/asm/mshyperv.h
257
static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
arch/x86/include/asm/msr.h
259
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
arch/x86/include/asm/msr.h
260
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
arch/x86/include/asm/msr.h
261
int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
arch/x86/include/asm/msr.h
262
int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
arch/x86/include/asm/msr.h
265
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
arch/x86/include/asm/msr.h
266
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
arch/x86/include/asm/msr.h
267
int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
arch/x86/include/asm/msr.h
268
int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
arch/x86/include/asm/msr.h
269
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
arch/x86/include/asm/msr.h
270
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
arch/x86/include/asm/msr.h
272
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
arch/x86/include/asm/msr.h
277
static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
arch/x86/include/asm/msr.h
282
static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/include/asm/msr.h
287
static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/include/asm/msr.h
302
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
arch/x86/include/asm/msr.h
307
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
arch/x86/include/asm/msr.h
311
static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/include/asm/msr.h
315
static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/include/asm/msr.h
319
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
arch/x86/include/asm/msr.h
323
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
arch/x86/include/asm/msr.h
332
#define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q)
arch/x86/include/asm/numa.h
32
extern int numa_cpu_node(int cpu);
arch/x86/include/asm/numa.h
39
static inline int numa_cpu_node(int cpu)
arch/x86/include/asm/numa.h
46
extern void numa_set_node(int cpu, int node);
arch/x86/include/asm/numa.h
47
extern void numa_clear_node(int cpu);
arch/x86/include/asm/numa.h
49
extern void numa_add_cpu(unsigned int cpu);
arch/x86/include/asm/numa.h
50
extern void numa_remove_cpu(unsigned int cpu);
arch/x86/include/asm/numa.h
54
static inline void numa_set_node(int cpu, int node) { }
arch/x86/include/asm/numa.h
55
static inline void numa_clear_node(int cpu) { }
arch/x86/include/asm/numa.h
57
static inline void numa_add_cpu(unsigned int cpu) { }
arch/x86/include/asm/numa.h
58
static inline void numa_remove_cpu(unsigned int cpu) { }
arch/x86/include/asm/numa.h
67
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
arch/x86/include/asm/paravirt-spinlock.h
18
void (*kick)(int cpu);
arch/x86/include/asm/paravirt-spinlock.h
45
static __always_inline bool pv_vcpu_is_preempted(long cpu)
arch/x86/include/asm/paravirt-spinlock.h
47
return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
arch/x86/include/asm/paravirt-spinlock.h
76
static inline bool vcpu_is_preempted(long cpu)
arch/x86/include/asm/paravirt-spinlock.h
78
return pv_vcpu_is_preempted(cpu);
arch/x86/include/asm/paravirt-spinlock.h
86
static __always_inline void pv_kick(int cpu)
arch/x86/include/asm/paravirt-spinlock.h
88
PVOP_VCALL1(pv_ops_lock, kick, cpu);
arch/x86/include/asm/paravirt-spinlock.h
92
bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
arch/x86/include/asm/paravirt-spinlock.h
98
__visible bool __native_vcpu_is_preempted(long cpu);
arch/x86/include/asm/paravirt.h
104
PVOP_VCALL2(pv_ops, cpu.set_debugreg, reg, val);
arch/x86/include/asm/paravirt.h
109
return PVOP_CALL0(unsigned long, pv_ops, cpu.read_cr0);
arch/x86/include/asm/paravirt.h
114
PVOP_VCALL1(pv_ops, cpu.write_cr0, x);
arch/x86/include/asm/paravirt.h
141
PVOP_VCALL1(pv_ops, cpu.write_cr4, x);
arch/x86/include/asm/paravirt.h
146
return PVOP_CALL1(u64, pv_ops, cpu.read_msr, msr);
arch/x86/include/asm/paravirt.h
151
PVOP_VCALL2(pv_ops, cpu.write_msr, msr, val);
arch/x86/include/asm/paravirt.h
156
return PVOP_CALL2(int, pv_ops, cpu.read_msr_safe, msr, val);
arch/x86/include/asm/paravirt.h
161
return PVOP_CALL2(int, pv_ops, cpu.write_msr_safe, msr, val);
arch/x86/include/asm/paravirt.h
208
return PVOP_CALL1(u64, pv_ops, cpu.read_pmc, counter);
arch/x86/include/asm/paravirt.h
213
PVOP_VCALL2(pv_ops, cpu.alloc_ldt, ldt, entries);
arch/x86/include/asm/paravirt.h
218
PVOP_VCALL2(pv_ops, cpu.free_ldt, ldt, entries);
arch/x86/include/asm/paravirt.h
223
PVOP_VCALL0(pv_ops, cpu.load_tr_desc);
arch/x86/include/asm/paravirt.h
227
PVOP_VCALL1(pv_ops, cpu.load_gdt, dtr);
arch/x86/include/asm/paravirt.h
231
PVOP_VCALL1(pv_ops, cpu.load_idt, dtr);
arch/x86/include/asm/paravirt.h
235
PVOP_VCALL2(pv_ops, cpu.set_ldt, addr, entries);
arch/x86/include/asm/paravirt.h
239
return PVOP_CALL0(unsigned long, pv_ops, cpu.store_tr);
arch/x86/include/asm/paravirt.h
243
static inline void load_TLS(struct thread_struct *t, unsigned cpu)
arch/x86/include/asm/paravirt.h
245
PVOP_VCALL2(pv_ops, cpu.load_tls, t, cpu);
arch/x86/include/asm/paravirt.h
25
PVOP_VCALL0(pv_ops, cpu.io_delay);
arch/x86/include/asm/paravirt.h
250
PVOP_VCALL1(pv_ops, cpu.load_gs_index, gs);
arch/x86/include/asm/paravirt.h
256
PVOP_VCALL3(pv_ops, cpu.write_ldt_entry, dt, entry, desc);
arch/x86/include/asm/paravirt.h
262
PVOP_VCALL4(pv_ops, cpu.write_gdt_entry, dt, entry, desc, type);
arch/x86/include/asm/paravirt.h
267
PVOP_VCALL3(pv_ops, cpu.write_idt_entry, dt, entry, g);
arch/x86/include/asm/paravirt.h
27
PVOP_VCALL0(pv_ops, cpu.io_delay);
arch/x86/include/asm/paravirt.h
273
PVOP_VCALL0(pv_ops, cpu.invalidate_io_bitmap);
arch/x86/include/asm/paravirt.h
278
PVOP_VCALL0(pv_ops, cpu.update_io_bitmap);
arch/x86/include/asm/paravirt.h
28
PVOP_VCALL0(pv_ops, cpu.io_delay);
arch/x86/include/asm/paravirt.h
29
PVOP_VCALL0(pv_ops, cpu.io_delay);
arch/x86/include/asm/paravirt.h
487
PVOP_VCALL1(pv_ops, cpu.start_context_switch, prev);
arch/x86/include/asm/paravirt.h
492
PVOP_VCALL1(pv_ops, cpu.end_context_switch, next);
arch/x86/include/asm/paravirt.h
84
PVOP_VCALL1(pv_ops, cpu.load_sp0, sp0);
arch/x86/include/asm/paravirt.h
91
PVOP_VCALL4(pv_ops, cpu.cpuid, eax, ebx, ecx, edx);
arch/x86/include/asm/paravirt.h
99
return PVOP_CALL1(unsigned long, pv_ops, cpu.get_debugreg, reg);
arch/x86/include/asm/paravirt_types.h
191
struct pv_cpu_ops cpu;
arch/x86/include/asm/paravirt_types.h
50
void (*load_tls)(struct thread_struct *t, unsigned int cpu);
arch/x86/include/asm/perf_event_p4.h
189
static inline int p4_ht_thread(int cpu)
arch/x86/include/asm/perf_event_p4.h
193
return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
arch/x86/include/asm/perf_event_p4.h
198
static inline int p4_should_swap_ts(u64 config, int cpu)
arch/x86/include/asm/perf_event_p4.h
200
return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
arch/x86/include/asm/perf_event_p4.h
203
static inline u32 p4_default_cccr_conf(int cpu)
arch/x86/include/asm/perf_event_p4.h
212
if (!p4_ht_thread(cpu))
arch/x86/include/asm/perf_event_p4.h
220
static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
arch/x86/include/asm/perf_event_p4.h
224
if (!p4_ht_thread(cpu)) {
arch/x86/include/asm/preempt.h
46
#define init_idle_preempt_count(p, cpu) do { \
arch/x86/include/asm/preempt.h
47
per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
arch/x86/include/asm/processor.h
216
#define cpu_data(cpu) per_cpu(cpu_info, cpu)
arch/x86/include/asm/processor.h
232
extern void identify_secondary_cpu(unsigned int cpu);
arch/x86/include/asm/processor.h
431
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
arch/x86/include/asm/processor.h
434
return per_cpu_offset(cpu);
arch/x86/include/asm/processor.h
692
static inline u32 per_cpu_llc_id(unsigned int cpu)
arch/x86/include/asm/processor.h
694
return per_cpu(cpu_info.topo.llc_id, cpu);
arch/x86/include/asm/processor.h
697
static inline u32 per_cpu_l2c_id(unsigned int cpu)
arch/x86/include/asm/processor.h
699
return per_cpu(cpu_info.topo.l2c_id, cpu);
arch/x86/include/asm/resctrl.h
139
static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid,
arch/x86/include/asm/resctrl.h
142
WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid);
arch/x86/include/asm/resctrl.h
143
WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid);
arch/x86/include/asm/segment.h
240
static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
arch/x86/include/asm/segment.h
242
return (node << VDSO_CPUNODE_BITS) | cpu;
arch/x86/include/asm/segment.h
245
static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
arch/x86/include/asm/segment.h
262
if (cpu)
arch/x86/include/asm/segment.h
263
*cpu = (p & VDSO_CPUNODE_MASK);
arch/x86/include/asm/smp.h
109
int native_kick_ap(unsigned int cpu, struct task_struct *tidle);
arch/x86/include/asm/smp.h
114
void wbinvd_on_cpu(int cpu);
arch/x86/include/asm/smp.h
123
void native_smp_send_reschedule(int cpu);
arch/x86/include/asm/smp.h
125
void native_send_call_func_single_ipi(int cpu);
arch/x86/include/asm/smp.h
132
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
arch/x86/include/asm/smp.h
133
#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
arch/x86/include/asm/smp.h
142
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
arch/x86/include/asm/smp.h
144
return per_cpu(cpu_llc_shared_map, cpu);
arch/x86/include/asm/smp.h
147
static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
arch/x86/include/asm/smp.h
149
return per_cpu(cpu_l2c_shared_map, cpu);
arch/x86/include/asm/smp.h
153
#define wbinvd_on_cpu(cpu) wbinvd()
arch/x86/include/asm/smp.h
174
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
arch/x86/include/asm/smp.h
31
void (*smp_send_reschedule)(int cpu);
arch/x86/include/asm/smp.h
33
void (*cleanup_dead_cpu)(unsigned cpu);
arch/x86/include/asm/smp.h
35
int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle);
arch/x86/include/asm/smp.h
37
void (*cpu_die)(unsigned int cpu);
arch/x86/include/asm/smp.h
42
void (*send_call_func_single_ipi)(int cpu);
arch/x86/include/asm/smp.h
46
extern void set_cpu_sibling_map(int cpu);
arch/x86/include/asm/smp.h
76
static inline void __cpu_die(unsigned int cpu)
arch/x86/include/asm/smp.h
79
smp_ops.cpu_die(cpu);
arch/x86/include/asm/smp.h
88
static inline void arch_smp_send_reschedule(int cpu)
arch/x86/include/asm/smp.h
90
smp_ops.smp_send_reschedule(cpu);
arch/x86/include/asm/smp.h
93
static inline void arch_send_call_function_single_ipi(int cpu)
arch/x86/include/asm/smp.h
95
smp_ops.send_call_func_single_ipi(cpu);
arch/x86/include/asm/stackprotector.h
44
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
arch/x86/include/asm/stackprotector.h
46
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
arch/x86/include/asm/stackprotector.h
53
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
arch/x86/include/asm/thread_info.h
67
u32 cpu; /* current CPU */
arch/x86/include/asm/topology.h
140
extern const struct cpumask *cpu_coregroup_mask(int cpu);
arch/x86/include/asm/topology.h
141
extern const struct cpumask *cpu_clustergroup_mask(int cpu);
arch/x86/include/asm/topology.h
143
#define topology_logical_package_id(cpu) (cpu_data(cpu).topo.logical_pkg_id)
arch/x86/include/asm/topology.h
144
#define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id)
arch/x86/include/asm/topology.h
145
#define topology_logical_die_id(cpu) (cpu_data(cpu).topo.logical_die_id)
arch/x86/include/asm/topology.h
146
#define topology_logical_core_id(cpu) (cpu_data(cpu).topo.logical_core_id)
arch/x86/include/asm/topology.h
147
#define topology_die_id(cpu) (cpu_data(cpu).topo.die_id)
arch/x86/include/asm/topology.h
148
#define topology_core_id(cpu) (cpu_data(cpu).topo.core_id)
arch/x86/include/asm/topology.h
149
#define topology_ppin(cpu) (cpu_data(cpu).ppin)
arch/x86/include/asm/topology.h
151
#define topology_amd_node_id(cpu) (cpu_data(cpu).topo.amd_node_id)
arch/x86/include/asm/topology.h
198
#define topology_cluster_id(cpu) (cpu_data(cpu).topo.l2c_id)
arch/x86/include/asm/topology.h
199
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
arch/x86/include/asm/topology.h
200
#define topology_cluster_cpumask(cpu) (cpu_clustergroup_mask(cpu))
arch/x86/include/asm/topology.h
201
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
arch/x86/include/asm/topology.h
202
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
arch/x86/include/asm/topology.h
240
static inline bool topology_is_primary_thread(unsigned int cpu)
arch/x86/include/asm/topology.h
242
return cpumask_test_cpu(cpu, cpu_primary_thread_mask);
arch/x86/include/asm/topology.h
246
int topology_get_primary_thread(unsigned int cpu);
arch/x86/include/asm/topology.h
248
static inline bool topology_is_core_online(unsigned int cpu)
arch/x86/include/asm/topology.h
250
int pcpu = topology_get_primary_thread(cpu);
arch/x86/include/asm/topology.h
305
static inline long arch_scale_freq_capacity(int cpu)
arch/x86/include/asm/topology.h
307
return per_cpu(arch_freq_scale, cpu);
arch/x86/include/asm/topology.h
312
void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap,
arch/x86/include/asm/topology.h
315
unsigned long arch_scale_cpu_capacity(int cpu);
arch/x86/include/asm/topology.h
322
static inline void arch_set_cpu_capacity(int cpu, unsigned long cap,
arch/x86/include/asm/topology.h
48
extern int __cpu_to_node(int cpu);
arch/x86/include/asm/topology.h
51
extern int early_cpu_to_node(int cpu);
arch/x86/include/asm/topology.h
56
static inline int early_cpu_to_node(int cpu)
arch/x86/include/asm/topology.h
58
return early_per_cpu(x86_cpu_to_node_map, cpu);
arch/x86/include/asm/topology.h
94
static inline int early_cpu_to_node(int cpu)
arch/x86/include/asm/trace/hyperv.h
75
TP_PROTO(int cpu,
arch/x86/include/asm/trace/hyperv.h
77
TP_ARGS(cpu, vector),
arch/x86/include/asm/trace/hyperv.h
79
__field(int, cpu)
arch/x86/include/asm/trace/hyperv.h
82
TP_fast_assign(__entry->cpu = cpu;
arch/x86/include/asm/trace/hyperv.h
86
__entry->cpu, __entry->vector)
arch/x86/include/asm/trace/irq_vectors.h
128
unsigned int cpu, unsigned int apicdest),
arch/x86/include/asm/trace/irq_vectors.h
130
TP_ARGS(irq, vector, cpu, apicdest),
arch/x86/include/asm/trace/irq_vectors.h
135
__field( unsigned int, cpu )
arch/x86/include/asm/trace/irq_vectors.h
142
__entry->cpu = cpu;
arch/x86/include/asm/trace/irq_vectors.h
147
__entry->irq, __entry->vector, __entry->cpu,
arch/x86/include/asm/trace/irq_vectors.h
154
unsigned int cpu, unsigned int prev_vector,
arch/x86/include/asm/trace/irq_vectors.h
157
TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
arch/x86/include/asm/trace/irq_vectors.h
162
__field( unsigned int, cpu )
arch/x86/include/asm/trace/irq_vectors.h
170
__entry->cpu = cpu;
arch/x86/include/asm/trace/irq_vectors.h
177
__entry->irq, __entry->vector, __entry->cpu,
arch/x86/include/asm/trace/irq_vectors.h
184
unsigned int cpu, unsigned int prev_vector, \
arch/x86/include/asm/trace/irq_vectors.h
186
TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \
arch/x86/include/asm/trace/irq_vectors.h
348
TP_PROTO(unsigned int irq, unsigned int cpu, unsigned int vector,
arch/x86/include/asm/trace/irq_vectors.h
351
TP_ARGS(irq, cpu, vector, is_managed),
arch/x86/include/asm/trace/irq_vectors.h
355
__field( unsigned int, cpu )
arch/x86/include/asm/trace/irq_vectors.h
362
__entry->cpu = cpu;
arch/x86/include/asm/trace/irq_vectors.h
368
__entry->irq, __entry->cpu, __entry->vector,
arch/x86/include/asm/uv/uv_geo.h
66
struct geo_cpu_s cpu;
arch/x86/include/asm/uv/uv_hub.h
192
#define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu))
arch/x86/include/asm/uv/uv_hub.h
207
static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
arch/x86/include/asm/uv/uv_hub.h
209
return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
arch/x86/include/asm/uv/uv_hub.h
645
static inline int uv_cpu_blade_processor_id(int cpu)
arch/x86/include/asm/uv/uv_hub.h
647
return uv_cpu_info_per(cpu)->blade_cpu_id;
arch/x86/include/asm/uv/uv_hub.h
675
static inline int uv_cpu_to_blade_id(int cpu)
arch/x86/include/asm/uv/uv_hub.h
677
return uv_cpu_hub_info(cpu)->numa_blade_id;
arch/x86/include/asm/uv/uv_hub.h
707
static inline int uv_cpu_to_pnode(int cpu)
arch/x86/include/asm/uv/uv_hub.h
709
return uv_cpu_hub_info(cpu)->pnode;
arch/x86/include/asm/uv/uv_hub.h
770
#define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu))
arch/x86/include/asm/uv/uv_hub.h
771
#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
arch/x86/include/asm/vdso/processor.h
21
notrace long __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused);
arch/x86/include/asm/x86_init.h
283
void (*pin_vcpu)(int cpu);
arch/x86/include/asm/x86_init.h
350
extern void x86_op_int_noop(int cpu);
arch/x86/include/uapi/asm/mce.h
29
__u8 cpu; /* CPU number; obsoleted by extcpu */
arch/x86/kernel/acpi/boot.c
771
static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
arch/x86/kernel/acpi/boot.c
779
numa_set_node(cpu, nid);
arch/x86/kernel/acpi/boot.c
787
int cpu = topology_hotplug_apic(physid, acpi_id);
arch/x86/kernel/acpi/boot.c
789
if (cpu < 0) {
arch/x86/kernel/acpi/boot.c
791
return cpu;
arch/x86/kernel/acpi/boot.c
795
acpi_map_cpu2node(handle, cpu, physid);
arch/x86/kernel/acpi/boot.c
797
*pcpu = cpu;
arch/x86/kernel/acpi/boot.c
802
int acpi_unmap_cpu(int cpu)
arch/x86/kernel/acpi/boot.c
805
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
arch/x86/kernel/acpi/boot.c
807
topology_hotunplug_apic(cpu);
arch/x86/kernel/acpi/cppc.c
144
int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
arch/x86/kernel/acpi/cppc.c
150
ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
arch/x86/kernel/acpi/cppc.c
156
ret = cppc_get_highest_perf(cpu, &val);
arch/x86/kernel/acpi/cppc.c
182
int cpu, count = 0;
arch/x86/kernel/acpi/cppc.c
199
for_each_online_cpu(cpu) {
arch/x86/kernel/acpi/cppc.c
203
ret = amd_get_highest_perf(cpu, &tmp);
arch/x86/kernel/acpi/cppc.c
242
int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
arch/x86/kernel/acpi/cppc.c
244
enum x86_topology_cpu_type core_type = get_topology_cpu_type(&cpu_data(cpu));
arch/x86/kernel/acpi/cppc.c
278
pr_warn("Undefined core type found for cpu %d\n", cpu);
arch/x86/kernel/acpi/cppc.c
286
ret = amd_get_highest_perf(cpu, &tmp);
arch/x86/kernel/acpi/cstate.c
170
int acpi_processor_ffh_cstate_probe(unsigned int cpu,
arch/x86/kernel/acpi/cstate.c
174
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/acpi/cstate.c
183
percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
arch/x86/kernel/acpi/cstate.c
189
retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
arch/x86/kernel/acpi/cstate.c
211
unsigned int cpu = smp_processor_id();
arch/x86/kernel/acpi/cstate.c
214
percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
arch/x86/kernel/acpi/cstate.c
221
unsigned int cpu = smp_processor_id();
arch/x86/kernel/acpi/cstate.c
224
percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
arch/x86/kernel/acpi/cstate.c
33
unsigned int cpu)
arch/x86/kernel/acpi/cstate.c
35
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/acpi/madt_wakeup.c
129
static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip, unsigned int cpu)
arch/x86/kernel/acpi/madt_wakeup.c
38
static void acpi_mp_cpu_die(unsigned int cpu)
arch/x86/kernel/acpi/madt_wakeup.c
40
u32 apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/acpi/madt_wakeup.c
59
pr_err("Failed to hand over CPU %d to BIOS\n", cpu);
arch/x86/kernel/amd_nb.c
171
int amd_get_subcaches(int cpu)
arch/x86/kernel/amd_nb.c
173
struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link;
arch/x86/kernel/amd_nb.c
181
return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
arch/x86/kernel/amd_nb.c
184
int amd_set_subcaches(int cpu, unsigned long mask)
arch/x86/kernel/amd_nb.c
187
struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu));
arch/x86/kernel/amd_nb.c
207
cuid = cpu_data(cpu).topo.core_id;
arch/x86/kernel/apic/apic.c
1505
int cpu = smp_processor_id();
arch/x86/kernel/apic/apic.c
1613
if (!cpu && (pic_mode || !value || ioapic_is_disabled)) {
arch/x86/kernel/apic/apic.c
1615
apic_pr_verbose("Enabled ExtINT on CPU#%d\n", cpu);
arch/x86/kernel/apic/apic.c
1618
apic_pr_verbose("Masked ExtINT on CPU#%d\n", cpu);
arch/x86/kernel/apic/apic.c
1626
if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
arch/x86/kernel/apic/apic.c
1639
if (!cpu)
arch/x86/kernel/apic/apic_common.c
12
u32 apic_default_calc_apicid(unsigned int cpu)
arch/x86/kernel/apic/apic_common.c
14
return per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/apic_common.c
17
u32 apic_flat_calc_apicid(unsigned int cpu)
arch/x86/kernel/apic/apic_common.c
19
return 1U << cpu;
arch/x86/kernel/apic/apic_noop.c
23
static void noop_send_IPI(int cpu, int vector) { }
arch/x86/kernel/apic/apic_noop.c
32
unsigned int cpu)
arch/x86/kernel/apic/apic_numachip.c
106
unsigned int cpu;
arch/x86/kernel/apic/apic_numachip.c
108
for_each_cpu(cpu, mask) {
arch/x86/kernel/apic/apic_numachip.c
109
if (cpu != this_cpu)
arch/x86/kernel/apic/apic_numachip.c
110
numachip_send_IPI_one(cpu, vector);
arch/x86/kernel/apic/apic_numachip.c
117
unsigned int cpu;
arch/x86/kernel/apic/apic_numachip.c
119
for_each_online_cpu(cpu) {
arch/x86/kernel/apic/apic_numachip.c
120
if (cpu != this_cpu)
arch/x86/kernel/apic/apic_numachip.c
121
numachip_send_IPI_one(cpu, vector);
arch/x86/kernel/apic/apic_numachip.c
60
static int numachip_wakeup_secondary(u32 phys_apicid, unsigned long start_rip, unsigned int cpu)
arch/x86/kernel/apic/apic_numachip.c
69
static void numachip_send_IPI_one(int cpu, int vector)
arch/x86/kernel/apic/apic_numachip.c
71
int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/apic_numachip.c
96
unsigned int cpu;
arch/x86/kernel/apic/apic_numachip.c
98
for_each_cpu(cpu, mask)
arch/x86/kernel/apic/apic_numachip.c
99
numachip_send_IPI_one(cpu, vector);
arch/x86/kernel/apic/ipi.c
101
void apic_send_nmi_to_offline_cpu(unsigned int cpu)
arch/x86/kernel/apic/ipi.c
105
if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
arch/x86/kernel/apic/ipi.c
107
apic->send_IPI(cpu, NMI_VECTOR);
arch/x86/kernel/apic/ipi.c
189
void default_send_IPI_single_phys(int cpu, int vector)
arch/x86/kernel/apic/ipi.c
194
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
arch/x86/kernel/apic/ipi.c
202
unsigned long cpu;
arch/x86/kernel/apic/ipi.c
205
for_each_cpu(cpu, mask) {
arch/x86/kernel/apic/ipi.c
207
cpu), vector, APIC_DEST_PHYSICAL);
arch/x86/kernel/apic/ipi.c
215
unsigned int cpu, this_cpu = smp_processor_id();
arch/x86/kernel/apic/ipi.c
219
for_each_cpu(cpu, mask) {
arch/x86/kernel/apic/ipi.c
220
if (cpu == this_cpu)
arch/x86/kernel/apic/ipi.c
223
cpu), vector, APIC_DEST_PHYSICAL);
arch/x86/kernel/apic/ipi.c
231
void default_send_IPI_single(int cpu, int vector)
arch/x86/kernel/apic/ipi.c
233
__apic_send_IPI_mask(cpumask_of(cpu), vector);
arch/x86/kernel/apic/ipi.c
255
unsigned int cpu;
arch/x86/kernel/apic/ipi.c
258
for_each_cpu(cpu, mask)
arch/x86/kernel/apic/ipi.c
259
__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
arch/x86/kernel/apic/ipi.c
266
unsigned int cpu, this_cpu = smp_processor_id();
arch/x86/kernel/apic/ipi.c
270
for_each_cpu(cpu, mask) {
arch/x86/kernel/apic/ipi.c
271
if (cpu == this_cpu)
arch/x86/kernel/apic/ipi.c
273
__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
arch/x86/kernel/apic/ipi.c
68
void native_smp_send_reschedule(int cpu)
arch/x86/kernel/apic/ipi.c
70
if (unlikely(cpu_is_offline(cpu))) {
arch/x86/kernel/apic/ipi.c
71
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
arch/x86/kernel/apic/ipi.c
74
__apic_send_IPI(cpu, RESCHEDULE_VECTOR);
arch/x86/kernel/apic/ipi.c
77
void native_send_call_func_single_ipi(int cpu)
arch/x86/kernel/apic/ipi.c
79
__apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
arch/x86/kernel/apic/ipi.c
85
unsigned int cpu = smp_processor_id();
arch/x86/kernel/apic/ipi.c
87
if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
arch/x86/kernel/apic/ipi.c
90
if (cpumask_test_cpu(cpu, mask))
arch/x86/kernel/apic/local.h
64
void default_send_IPI_single(int cpu, int vector);
arch/x86/kernel/apic/local.h
65
void default_send_IPI_single_phys(int cpu, int vector);
arch/x86/kernel/apic/msi.c
39
unsigned int cpu;
arch/x86/kernel/apic/msi.c
43
cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
arch/x86/kernel/apic/msi.c
78
if (WARN_ON_ONCE(cpu != smp_processor_id())) {
arch/x86/kernel/apic/vector.c
1008
__apic_send_IPI(apicd->cpu, apicd->vector);
arch/x86/kernel/apic/vector.c
1094
unsigned int cpu = apicd->prev_cpu;
arch/x86/kernel/apic/vector.c
1098
if (cpu_online(cpu)) {
arch/x86/kernel/apic/vector.c
1099
struct vector_cleanup *cl = per_cpu_ptr(&vector_cleanup, cpu);
arch/x86/kernel/apic/vector.c
1119
add_timer_on(&cl->timer, cpu);
arch/x86/kernel/apic/vector.c
1122
pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
arch/x86/kernel/apic/vector.c
1151
if (apicd->cpu == smp_processor_id())
arch/x86/kernel/apic/vector.c
1162
unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
arch/x86/kernel/apic/vector.c
1170
cpu, tomove, avl);
arch/x86/kernel/apic/vector.c
129
unsigned int cpu)
arch/x86/kernel/apic/vector.c
1307
int cpu;
arch/x86/kernel/apic/vector.c
1313
for_each_online_cpu(cpu) {
arch/x86/kernel/apic/vector.c
1314
if (cpu >= maxcpu)
arch/x86/kernel/apic/vector.c
1316
smp_call_function_single(cpu, print_local_APIC, NULL, 1);
arch/x86/kernel/apic/vector.c
136
apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
arch/x86/kernel/apic/vector.c
138
apic_update_vector(cpu, vector, true);
arch/x86/kernel/apic/vector.c
140
irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
arch/x86/kernel/apic/vector.c
141
trace_vector_config(irqd->irq, vector, cpu, apicd->hw_irq_cfg.dest_apicid);
arch/x86/kernel/apic/vector.c
144
static void apic_free_vector(unsigned int cpu, unsigned int vector, bool managed)
arch/x86/kernel/apic/vector.c
146
apic_update_vector(cpu, vector, false);
arch/x86/kernel/apic/vector.c
147
irq_matrix_free(vector_matrix, cpu, vector, managed);
arch/x86/kernel/apic/vector.c
159
apicd->cpu);
arch/x86/kernel/apic/vector.c
178
if (cpu_online(apicd->cpu)) {
arch/x86/kernel/apic/vector.c
181
apicd->prev_cpu = apicd->cpu;
arch/x86/kernel/apic/vector.c
182
WARN_ON_ONCE(apicd->cpu == newcpu);
arch/x86/kernel/apic/vector.c
184
apic_free_vector(apicd->cpu, apicd->vector, managed);
arch/x86/kernel/apic/vector.c
189
apicd->cpu = newcpu;
arch/x86/kernel/apic/vector.c
197
unsigned int cpu = cpumask_first(cpu_online_mask);
arch/x86/kernel/apic/vector.c
199
apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
arch/x86/kernel/apic/vector.c
244
unsigned int cpu = apicd->cpu;
arch/x86/kernel/apic/vector.c
254
if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
arch/x86/kernel/apic/vector.c
266
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
arch/x86/kernel/apic/vector.c
270
chip_data_update(irqd, vector, cpu);
arch/x86/kernel/apic/vector.c
30
unsigned int cpu;
arch/x86/kernel/apic/vector.c
334
int vector, cpu;
arch/x86/kernel/apic/vector.c
339
if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
arch/x86/kernel/apic/vector.c
342
&cpu);
arch/x86/kernel/apic/vector.c
346
chip_data_update(irqd, vector, cpu);
arch/x86/kernel/apic/vector.c
362
trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
arch/x86/kernel/apic/vector.c
365
per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
arch/x86/kernel/apic/vector.c
366
apic_free_vector(apicd->cpu, vector, managed);
arch/x86/kernel/apic/vector.c
527
apicd->cpu = 0;
arch/x86/kernel/apic/vector.c
536
apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
arch/x86/kernel/apic/vector.c
651
seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
arch/x86/kernel/apic/vector.c
900
unsigned int cpu = apicd->prev_cpu;
arch/x86/kernel/apic/vector.c
913
trace_vector_free_moved(apicd->irq, cpu, vector, managed);
arch/x86/kernel/apic/vector.c
914
apic_free_vector(cpu, vector, managed);
arch/x86/kernel/apic/vector.c
915
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
arch/x86/kernel/apic/vector.c
926
unsigned int cpu = smp_processor_id();
arch/x86/kernel/apic/vector.c
940
if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
arch/x86/kernel/apic/x2apic_cluster.c
105
static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster)
arch/x86/kernel/apic/x2apic_cluster.c
113
if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster)
arch/x86/kernel/apic/x2apic_cluster.c
124
static int alloc_clustermask(unsigned int cpu, u32 cluster, int node)
arch/x86/kernel/apic/x2apic_cluster.c
135
if (per_cpu(cluster_masks, cpu))
arch/x86/kernel/apic/x2apic_cluster.c
156
per_cpu(cluster_masks, cpu) = cmsk;
arch/x86/kernel/apic/x2apic_cluster.c
170
per_cpu(cluster_masks, cpu) = cmsk;
arch/x86/kernel/apic/x2apic_cluster.c
171
prefill_clustermask(cmsk, cpu, cluster);
arch/x86/kernel/apic/x2apic_cluster.c
176
static int x2apic_prepare_cpu(unsigned int cpu)
arch/x86/kernel/apic/x2apic_cluster.c
178
u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
arch/x86/kernel/apic/x2apic_cluster.c
181
int node = cpu_to_node(cpu);
arch/x86/kernel/apic/x2apic_cluster.c
183
x86_cpu_to_logical_apicid[cpu] = logical_apicid;
arch/x86/kernel/apic/x2apic_cluster.c
185
if (alloc_clustermask(cpu, cluster, node) < 0)
arch/x86/kernel/apic/x2apic_cluster.c
188
if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
arch/x86/kernel/apic/x2apic_cluster.c
29
static void x2apic_send_IPI(int cpu, int vector)
arch/x86/kernel/apic/x2apic_cluster.c
31
u32 dest = x86_cpu_to_logical_apicid[cpu];
arch/x86/kernel/apic/x2apic_cluster.c
41
unsigned int cpu, clustercpu;
arch/x86/kernel/apic/x2apic_cluster.c
57
for_each_cpu(cpu, tmpmsk) {
arch/x86/kernel/apic/x2apic_cluster.c
58
struct cpumask *cmsk = per_cpu(cluster_masks, cpu);
arch/x86/kernel/apic/x2apic_cluster.c
86
static u32 x2apic_calc_apicid(unsigned int cpu)
arch/x86/kernel/apic/x2apic_cluster.c
88
return x86_cpu_to_logical_apicid[cpu];
arch/x86/kernel/apic/x2apic_phys.c
44
static void x2apic_send_IPI(int cpu, int vector)
arch/x86/kernel/apic/x2apic_phys.c
46
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/x2apic_savic.c
136
static void send_ipi_dest(unsigned int cpu, unsigned int vector, bool nmi)
arch/x86/kernel/apic/x2apic_savic.c
139
apic_set_reg(per_cpu_ptr(savic_page, cpu), SAVIC_NMI_REQ, 1);
arch/x86/kernel/apic/x2apic_savic.c
141
update_vector(cpu, APIC_IRR, vector, true);
arch/x86/kernel/apic/x2apic_savic.c
146
unsigned int cpu, src_cpu;
arch/x86/kernel/apic/x2apic_savic.c
152
for_each_cpu(cpu, cpu_online_mask) {
arch/x86/kernel/apic/x2apic_savic.c
153
if (cpu == src_cpu)
arch/x86/kernel/apic/x2apic_savic.c
155
send_ipi_dest(cpu, vector, nmi);
arch/x86/kernel/apic/x2apic_savic.c
252
static void savic_send_ipi(int cpu, int vector)
arch/x86/kernel/apic/x2apic_savic.c
254
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/x2apic_savic.c
261
unsigned int cpu, this_cpu;
arch/x86/kernel/apic/x2apic_savic.c
267
for_each_cpu(cpu, mask) {
arch/x86/kernel/apic/x2apic_savic.c
268
if (excl_self && cpu == this_cpu)
arch/x86/kernel/apic/x2apic_savic.c
270
send_ipi(per_cpu(x86_cpu_to_apicid, cpu), vector, 0);
arch/x86/kernel/apic/x2apic_savic.c
299
static void savic_update_vector(unsigned int cpu, unsigned int vector, bool set)
arch/x86/kernel/apic/x2apic_savic.c
301
update_vector(cpu, SAVIC_ALLOWED_IRR, vector, set);
arch/x86/kernel/apic/x2apic_savic.c
306
unsigned int cpu;
arch/x86/kernel/apic/x2apic_savic.c
309
cpu = raw_smp_processor_id();
arch/x86/kernel/apic/x2apic_savic.c
31
static inline void *get_reg_bitmap(unsigned int cpu, unsigned int offset)
arch/x86/kernel/apic/x2apic_savic.c
310
vec = apic_find_highest_vector(get_reg_bitmap(cpu, APIC_ISR));
arch/x86/kernel/apic/x2apic_savic.c
315
if (apic_test_vector(vec, get_reg_bitmap(cpu, APIC_TMR))) {
arch/x86/kernel/apic/x2apic_savic.c
316
update_vector(cpu, APIC_ISR, vec, false);
arch/x86/kernel/apic/x2apic_savic.c
33
return &per_cpu_ptr(savic_page, cpu)->regs[offset];
arch/x86/kernel/apic/x2apic_savic.c
36
static inline void update_vector(unsigned int cpu, unsigned int offset,
arch/x86/kernel/apic/x2apic_savic.c
39
void *bitmap = get_reg_bitmap(cpu, offset);
arch/x86/kernel/apic/x2apic_uv_x.c
1650
int bytes, cpu, nodeid, bid;
arch/x86/kernel/apic/x2apic_uv_x.c
1770
for_each_possible_cpu(cpu) {
arch/x86/kernel/apic/x2apic_uv_x.c
1771
int apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/x2apic_uv_x.c
1778
uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list_blade[bid];
arch/x86/kernel/apic/x2apic_uv_x.c
1779
uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++;
arch/x86/kernel/apic/x2apic_uv_x.c
1780
if (uv_cpu_hub_info(cpu)->memory_nid == NUMA_NO_NODE)
arch/x86/kernel/apic/x2apic_uv_x.c
1781
uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
arch/x86/kernel/apic/x2apic_uv_x.c
1783
if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
arch/x86/kernel/apic/x2apic_uv_x.c
1784
uv_cpu_hub_info(cpu)->pnode = pnode;
arch/x86/kernel/apic/x2apic_uv_x.c
670
static int uv_wakeup_secondary(u32 phys_apicid, unsigned long start_rip, unsigned int cpu)
arch/x86/kernel/apic/x2apic_uv_x.c
694
static void uv_send_IPI_one(int cpu, int vector)
arch/x86/kernel/apic/x2apic_uv_x.c
696
unsigned long apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/x2apic_uv_x.c
715
unsigned int cpu;
arch/x86/kernel/apic/x2apic_uv_x.c
717
for_each_cpu(cpu, mask)
arch/x86/kernel/apic/x2apic_uv_x.c
718
uv_send_IPI_one(cpu, vector);
arch/x86/kernel/apic/x2apic_uv_x.c
724
unsigned int cpu;
arch/x86/kernel/apic/x2apic_uv_x.c
726
for_each_cpu(cpu, mask) {
arch/x86/kernel/apic/x2apic_uv_x.c
727
if (cpu != this_cpu)
arch/x86/kernel/apic/x2apic_uv_x.c
728
uv_send_IPI_one(cpu, vector);
arch/x86/kernel/apic/x2apic_uv_x.c
735
unsigned int cpu;
arch/x86/kernel/apic/x2apic_uv_x.c
737
for_each_online_cpu(cpu) {
arch/x86/kernel/apic/x2apic_uv_x.c
738
if (cpu != this_cpu)
arch/x86/kernel/apic/x2apic_uv_x.c
739
uv_send_IPI_one(cpu, vector);
arch/x86/kernel/apm_32.c
591
int cpu;
arch/x86/kernel/apm_32.c
597
cpu = get_cpu();
arch/x86/kernel/apm_32.c
598
BUG_ON(cpu != 0);
arch/x86/kernel/apm_32.c
599
gdt = get_cpu_gdt_rw(cpu);
arch/x86/kernel/apm_32.c
672
int cpu;
arch/x86/kernel/apm_32.c
678
cpu = get_cpu();
arch/x86/kernel/apm_32.c
679
BUG_ON(cpu != 0);
arch/x86/kernel/apm_32.c
680
gdt = get_cpu_gdt_rw(cpu);
arch/x86/kernel/callthunks.c
344
unsigned long cpu = (unsigned long)m->private;
arch/x86/kernel/callthunks.c
347
per_cpu(__x86_call_count, cpu),
arch/x86/kernel/callthunks.c
348
per_cpu(__x86_ret_count, cpu),
arch/x86/kernel/callthunks.c
349
per_cpu(__x86_stuffs_count, cpu),
arch/x86/kernel/callthunks.c
350
per_cpu(__x86_ctxsw_count, cpu));
arch/x86/kernel/callthunks.c
369
unsigned long cpu;
arch/x86/kernel/callthunks.c
372
for_each_possible_cpu(cpu) {
arch/x86/kernel/callthunks.c
373
void *arg = (void *)cpu;
arch/x86/kernel/callthunks.c
376
sprintf(name, "cpu%lu", cpu);
arch/x86/kernel/cpu/amd.c
1272
int cpu = smp_processor_id();
arch/x86/kernel/cpu/amd.c
1280
if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
arch/x86/kernel/cpu/amd.c
1284
per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
arch/x86/kernel/cpu/amd.c
305
int cpu = smp_processor_id();
arch/x86/kernel/cpu/amd.c
309
node = numa_cpu_node(cpu);
arch/x86/kernel/cpu/amd.c
311
node = per_cpu_llc_id(cpu);
arch/x86/kernel/cpu/amd.c
349
numa_set_node(cpu, node);
arch/x86/kernel/cpu/amd_cache_disable.c
109
wbinvd_on_cpu(cpu);
arch/x86/kernel/cpu/amd_cache_disable.c
126
static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
arch/x86/kernel/cpu/amd_cache_disable.c
143
amd_l3_disable_index(nb, cpu, slot, index);
arch/x86/kernel/cpu/amd_cache_disable.c
153
int cpu, err = 0;
arch/x86/kernel/cpu/amd_cache_disable.c
158
cpu = cpumask_first(&ci->shared_cpu_map);
arch/x86/kernel/cpu/amd_cache_disable.c
163
err = amd_set_l3_disable_slot(nb, cpu, slot, val);
arch/x86/kernel/cpu/amd_cache_disable.c
190
int cpu = cpumask_first(&ci->shared_cpu_map);
arch/x86/kernel/cpu/amd_cache_disable.c
192
return sysfs_emit(buf, "%x\n", amd_get_subcaches(cpu));
arch/x86/kernel/cpu/amd_cache_disable.c
200
int cpu = cpumask_first(&ci->shared_cpu_map);
arch/x86/kernel/cpu/amd_cache_disable.c
209
if (amd_set_subcaches(cpu, val))
arch/x86/kernel/cpu/amd_cache_disable.c
86
static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
arch/x86/kernel/cpu/aperfmperf.c
339
int cpu;
arch/x86/kernel/cpu/aperfmperf.c
347
for_each_possible_cpu(cpu)
arch/x86/kernel/cpu/aperfmperf.c
348
per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
arch/x86/kernel/cpu/aperfmperf.c
376
int cpu;
arch/x86/kernel/cpu/aperfmperf.c
387
for_each_possible_cpu(cpu) {
arch/x86/kernel/cpu/aperfmperf.c
388
per_cpu_ptr(arch_cpu_scale, cpu)->capacity = SCHED_CAPACITY_SCALE;
arch/x86/kernel/cpu/aperfmperf.c
389
per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio = arch_max_freq_ratio;
arch/x86/kernel/cpu/aperfmperf.c
415
void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap,
arch/x86/kernel/cpu/aperfmperf.c
419
WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity,
arch/x86/kernel/cpu/aperfmperf.c
421
WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio,
arch/x86/kernel/cpu/aperfmperf.c
428
unsigned long arch_scale_cpu_capacity(int cpu)
arch/x86/kernel/cpu/aperfmperf.c
431
return READ_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity);
arch/x86/kernel/cpu/aperfmperf.c
506
int arch_freq_get_on_cpu(int cpu)
arch/x86/kernel/cpu/aperfmperf.c
508
struct aperfmperf *s = per_cpu_ptr(&cpu_samples, cpu);
arch/x86/kernel/cpu/aperfmperf.c
533
freq = cpufreq_quick_get(cpu);
arch/x86/kernel/cpu/bus_lock.c
213
unsigned int cpu;
arch/x86/kernel/cpu/bus_lock.c
215
for_each_possible_cpu(cpu) {
arch/x86/kernel/cpu/bus_lock.c
216
struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
arch/x86/kernel/cpu/bus_lock.c
235
static int splitlock_cpu_offline(unsigned int cpu)
arch/x86/kernel/cpu/bus_lock.c
245
int cpu;
arch/x86/kernel/cpu/bus_lock.c
268
cpu = get_cpu();
arch/x86/kernel/cpu/bus_lock.c
269
work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu);
arch/x86/kernel/cpu/bus_lock.c
270
schedule_delayed_work_on(cpu, work, 2);
arch/x86/kernel/cpu/cacheinfo.c
488
static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
arch/x86/kernel/cpu/cacheinfo.c
500
for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
arch/x86/kernel/cpu/cacheinfo.c
506
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
arch/x86/kernel/cpu/cacheinfo.c
516
apicid = cpu_data(cpu).topo.apicid;
arch/x86/kernel/cpu/cacheinfo.c
547
static void __cache_cpumap_setup(unsigned int cpu, int index,
arch/x86/kernel/cpu/cacheinfo.c
550
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/x86/kernel/cpu/cacheinfo.c
551
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/cpu/cacheinfo.c
557
if (__cache_amd_cpumap_setup(cpu, index, id4))
arch/x86/kernel/cpu/cacheinfo.c
564
cpumask_set_cpu(cpu, &ci->shared_cpu_map);
arch/x86/kernel/cpu/cacheinfo.c
575
if (i == cpu || !sib_cpu_ci->info_list)
arch/x86/kernel/cpu/cacheinfo.c
580
cpumask_set_cpu(cpu, &sibling_ci->shared_cpu_map);
arch/x86/kernel/cpu/cacheinfo.c
599
int init_cache_level(unsigned int cpu)
arch/x86/kernel/cpu/cacheinfo.c
601
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
arch/x86/kernel/cpu/cacheinfo.c
610
int populate_cache_leaves(unsigned int cpu)
arch/x86/kernel/cpu/cacheinfo.c
612
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
arch/x86/kernel/cpu/cacheinfo.c
615
u32 apicid = cpu_data(cpu).topo.apicid;
arch/x86/kernel/cpu/cacheinfo.c
631
__cache_cpumap_setup(cpu, idx, &id4);
arch/x86/kernel/cpu/cacheinfo.c
766
static int cache_ap_online(unsigned int cpu)
arch/x86/kernel/cpu/cacheinfo.c
768
cpumask_set_cpu(cpu, cpu_cacheinfo_mask);
arch/x86/kernel/cpu/cacheinfo.c
792
static int cache_ap_offline(unsigned int cpu)
arch/x86/kernel/cpu/cacheinfo.c
794
cpumask_clear_cpu(cpu, cpu_cacheinfo_mask);
arch/x86/kernel/cpu/common.c
2131
int cpu;
arch/x86/kernel/cpu/common.c
2136
cpu = get_cpu();
arch/x86/kernel/cpu/common.c
2137
tss = &per_cpu(cpu_tss_rw, cpu);
arch/x86/kernel/cpu/common.c
2146
wrmsrq(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
arch/x86/kernel/cpu/common.c
2169
void identify_secondary_cpu(unsigned int cpu)
arch/x86/kernel/cpu/common.c
2171
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/cpu/common.c
2176
c->cpu_index = cpu;
arch/x86/kernel/cpu/common.c
2351
static inline void setup_getcpu(int cpu)
arch/x86/kernel/cpu/common.c
2353
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
arch/x86/kernel/cpu/common.c
2369
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
arch/x86/kernel/cpu/common.c
2410
int cpu = raw_smp_processor_id();
arch/x86/kernel/cpu/common.c
2413
setup_getcpu(cpu);
arch/x86/kernel/cpu/common.c
2419
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
arch/x86/kernel/cpu/common.c
2466
int cpu = raw_smp_processor_id();
arch/x86/kernel/cpu/common.c
2470
early_cpu_to_node(cpu) != NUMA_NO_NODE)
arch/x86/kernel/cpu/common.c
2471
set_numa_node(early_cpu_to_node(cpu));
arch/x86/kernel/cpu/common.c
2473
pr_debug("Initializing CPU#%d\n", cpu);
arch/x86/kernel/cpu/common.c
2503
load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
arch/x86/kernel/cpu/common.c
2515
load_fixmap_gdt(cpu);
arch/x86/kernel/cpu/common.c
760
void load_direct_gdt(int cpu)
arch/x86/kernel/cpu/common.c
764
gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
arch/x86/kernel/cpu/common.c
771
void load_fixmap_gdt(int cpu)
arch/x86/kernel/cpu/common.c
775
gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
arch/x86/kernel/cpu/common.c
790
void __init switch_gdt_and_percpu_base(int cpu)
arch/x86/kernel/cpu/common.c
792
load_direct_gdt(cpu);
arch/x86/kernel/cpu/common.c
809
wrmsrq(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
arch/x86/kernel/cpu/cpu.h
78
unsigned int aperfmperf_get_khz(int cpu);
arch/x86/kernel/cpu/debugfs.c
12
unsigned long cpu = (unsigned long)m->private;
arch/x86/kernel/cpu/debugfs.c
13
struct cpuinfo_x86 *c = per_cpu_ptr(&cpu_info, cpu);
arch/x86/kernel/cpu/debugfs.c
15
seq_printf(m, "online: %d\n", cpu_online(cpu));
arch/x86/kernel/cpu/hygon.c
49
int cpu = smp_processor_id();
arch/x86/kernel/cpu/hygon.c
53
node = numa_cpu_node(cpu);
arch/x86/kernel/cpu/hygon.c
92
numa_set_node(cpu, node);
arch/x86/kernel/cpu/intel.c
474
int cpu = smp_processor_id();
arch/x86/kernel/cpu/intel.c
478
node = numa_cpu_node(cpu);
arch/x86/kernel/cpu/intel.c
481
node = cpu_to_node(cpu);
arch/x86/kernel/cpu/intel.c
483
numa_set_node(cpu, node);
arch/x86/kernel/cpu/intel_epb.c
138
unsigned int cpu = dev->id;
arch/x86/kernel/cpu/intel_epb.c
142
ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
arch/x86/kernel/cpu/intel_epb.c
153
unsigned int cpu = dev->id;
arch/x86/kernel/cpu/intel_epb.c
164
ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
arch/x86/kernel/cpu/intel_epb.c
168
ret = wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
arch/x86/kernel/cpu/intel_epb.c
188
static int intel_epb_online(unsigned int cpu)
arch/x86/kernel/cpu/intel_epb.c
190
struct device *cpu_dev = get_cpu_device(cpu);
arch/x86/kernel/cpu/intel_epb.c
199
static int intel_epb_offline(unsigned int cpu)
arch/x86/kernel/cpu/intel_epb.c
201
struct device *cpu_dev = get_cpu_device(cpu);
arch/x86/kernel/cpu/mce/amd.c
1034
static const char *get_name(unsigned int cpu, unsigned int bank, struct threshold_block *b)
arch/x86/kernel/cpu/mce/amd.c
1045
bank_type = smca_get_bank_type(cpu, bank);
arch/x86/kernel/cpu/mce/amd.c
1062
if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1)
arch/x86/kernel/cpu/mce/amd.c
1067
per_cpu(smca_banks, cpu)[bank].sysfs_id);
arch/x86/kernel/cpu/mce/amd.c
1071
static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
arch/x86/kernel/cpu/mce/amd.c
1102
b->cpu = cpu;
arch/x86/kernel/cpu/mce/amd.c
1119
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(cpu, bank, b));
arch/x86/kernel/cpu/mce/amd.c
1123
address = get_block_address(address, low, high, bank, ++block, cpu);
arch/x86/kernel/cpu/mce/amd.c
1127
err = allocate_threshold_blocks(cpu, tb, bank, block, address);
arch/x86/kernel/cpu/mce/amd.c
1144
static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
arch/x86/kernel/cpu/mce/amd.c
1149
const char *name = get_name(cpu, bank, NULL);
arch/x86/kernel/cpu/mce/amd.c
1170
err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC));
arch/x86/kernel/cpu/mce/amd.c
1217
void mce_threshold_remove_device(unsigned int cpu)
arch/x86/kernel/cpu/mce/amd.c
1245
void mce_threshold_create_device(unsigned int cpu)
arch/x86/kernel/cpu/mce/amd.c
1265
if (threshold_create_bank(bp, cpu, bank)) {
arch/x86/kernel/cpu/mce/amd.c
141
enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank)
arch/x86/kernel/cpu/mce/amd.c
148
b = &per_cpu(smca_banks, cpu)[bank];
arch/x86/kernel/cpu/mce/amd.c
237
unsigned int cpu;
arch/x86/kernel/cpu/mce/amd.c
275
static void smca_configure(unsigned int bank, unsigned int cpu)
arch/x86/kernel/cpu/mce/amd.c
414
"for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
arch/x86/kernel/cpu/mce/amd.c
422
b->cpu, apic, b->bank, b->block, b->address, hi, lo);
arch/x86/kernel/cpu/mce/amd.c
532
unsigned int cpu)
arch/x86/kernel/cpu/mce/amd.c
536
if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
arch/x86/kernel/cpu/mce/amd.c
568
unsigned int cpu = smp_processor_id();
arch/x86/kernel/cpu/mce/amd.c
573
per_cpu(bank_map, cpu) |= BIT_ULL(bank);
arch/x86/kernel/cpu/mce/amd.c
576
b.cpu = cpu;
arch/x86/kernel/cpu/mce/amd.c
718
unsigned int bank, block, cpu = smp_processor_id();
arch/x86/kernel/cpu/mce/amd.c
730
smca_configure(bank, cpu);
arch/x86/kernel/cpu/mce/amd.c
739
address = get_block_address(address, low, high, bank, block, cpu);
arch/x86/kernel/cpu/mce/amd.c
929
if (smp_call_function_single(b->cpu, threshold_restart_block, &tr, 1))
arch/x86/kernel/cpu/mce/amd.c
954
if (smp_call_function_single(b->cpu, threshold_restart_block, &tr, 1))
arch/x86/kernel/cpu/mce/amd.c
965
if (rdmsr_on_cpu(b->cpu, b->address, &lo, &hi))
arch/x86/kernel/cpu/mce/apei.c
100
for_each_possible_cpu(cpu) {
arch/x86/kernel/cpu/mce/apei.c
101
if (cpu_data(cpu).topo.initial_apicid == lapic_id) {
arch/x86/kernel/cpu/mce/apei.c
113
mce_prep_record_per_cpu(cpu, m);
arch/x86/kernel/cpu/mce/apei.c
71
unsigned int cpu, num_regs;
arch/x86/kernel/cpu/mce/core.c
1106
int cpu;
arch/x86/kernel/cpu/mce/core.c
1113
for_each_possible_cpu(cpu) {
arch/x86/kernel/cpu/mce/core.c
1114
struct mce_hw_err *etmp = &per_cpu(hw_errs_seen, cpu);
arch/x86/kernel/cpu/mce/core.c
1119
err = &per_cpu(hw_errs_seen, cpu);
arch/x86/kernel/cpu/mce/core.c
1152
for_each_possible_cpu(cpu)
arch/x86/kernel/cpu/mce/core.c
1153
memset(&per_cpu(hw_errs_seen, cpu), 0, sizeof(struct mce_hw_err));
arch/x86/kernel/cpu/mce/core.c
130
void mce_prep_record_per_cpu(unsigned int cpu, struct mce *m)
arch/x86/kernel/cpu/mce/core.c
132
m->cpu = cpu;
arch/x86/kernel/cpu/mce/core.c
133
m->extcpu = cpu;
arch/x86/kernel/cpu/mce/core.c
1335
unsigned int cpu = smp_processor_id();
arch/x86/kernel/cpu/mce/core.c
1337
if (arch_cpu_is_offline(cpu) ||
arch/x86/kernel/cpu/mce/core.c
1338
(crashing_cpu != -1 && crashing_cpu != cpu)) {
arch/x86/kernel/cpu/mce/core.c
134
m->apicid = cpu_data(cpu).topo.initial_apicid;
arch/x86/kernel/cpu/mce/core.c
135
m->microcode = cpu_data(cpu).microcode;
arch/x86/kernel/cpu/mce/core.c
136
m->ppin = topology_ppin(cpu);
arch/x86/kernel/cpu/mce/core.c
137
m->socketid = topology_physical_package_id(cpu);
arch/x86/kernel/cpu/mce/core.c
1840
int cpu;
arch/x86/kernel/cpu/mce/core.c
1842
for_each_online_cpu(cpu)
arch/x86/kernel/cpu/mce/core.c
1843
timer_delete_sync(&per_cpu(mce_timer, cpu));
arch/x86/kernel/cpu/mce/core.c
2685
static int mce_device_create(unsigned int cpu)
arch/x86/kernel/cpu/mce/core.c
2691
dev = per_cpu(mce_device, cpu);
arch/x86/kernel/cpu/mce/core.c
2698
dev->id = cpu;
arch/x86/kernel/cpu/mce/core.c
2713
for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
arch/x86/kernel/cpu/mce/core.c
2718
cpumask_set_cpu(cpu, mce_device_initialized);
arch/x86/kernel/cpu/mce/core.c
2719
per_cpu(mce_device, cpu) = dev;
arch/x86/kernel/cpu/mce/core.c
2734
static void mce_device_remove(unsigned int cpu)
arch/x86/kernel/cpu/mce/core.c
2736
struct device *dev = per_cpu(mce_device, cpu);
arch/x86/kernel/cpu/mce/core.c
2739
if (!cpumask_test_cpu(cpu, mce_device_initialized))
arch/x86/kernel/cpu/mce/core.c
2745
for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
arch/x86/kernel/cpu/mce/core.c
2749
cpumask_clear_cpu(cpu, mce_device_initialized);
arch/x86/kernel/cpu/mce/core.c
2750
per_cpu(mce_device, cpu) = NULL;
arch/x86/kernel/cpu/mce/core.c
2783
static int mce_cpu_dead(unsigned int cpu)
arch/x86/kernel/cpu/mce/core.c
2791
static int mce_cpu_online(unsigned int cpu)
arch/x86/kernel/cpu/mce/core.c
2795
mce_device_create(cpu);
arch/x86/kernel/cpu/mce/core.c
2796
mce_threshold_create_device(cpu);
arch/x86/kernel/cpu/mce/core.c
2802
static int mce_cpu_pre_down(unsigned int cpu)
arch/x86/kernel/cpu/mce/core.c
2808
mce_threshold_remove_device(cpu);
arch/x86/kernel/cpu/mce/core.c
2809
mce_device_remove(cpu);
arch/x86/kernel/cpu/mce/inject.c
180
int cpu = smp_processor_id();
arch/x86/kernel/cpu/mce/inject.c
182
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
arch/x86/kernel/cpu/mce/inject.c
184
cpumask_clear_cpu(cpu, mce_inject_cpumask);
arch/x86/kernel/cpu/mce/inject.c
194
int cpu = smp_processor_id();
arch/x86/kernel/cpu/mce/inject.c
197
if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
arch/x86/kernel/cpu/mce/inject.c
199
cpumask_clear_cpu(cpu, mce_inject_cpumask);
arch/x86/kernel/cpu/mce/inject.c
210
int cpu = m->extcpu;
arch/x86/kernel/cpu/mce/inject.c
213
pr_info("Triggering MCE exception on CPU %d\n", cpu);
arch/x86/kernel/cpu/mce/inject.c
229
pr_info("MCE exception done on CPU %d\n", cpu);
arch/x86/kernel/cpu/mce/inject.c
231
pr_info("Starting machine check poll CPU %d\n", cpu);
arch/x86/kernel/cpu/mce/inject.c
233
pr_info("Machine check poll done on CPU %d\n", cpu);
arch/x86/kernel/cpu/mce/inject.c
251
int cpu;
arch/x86/kernel/cpu/mce/inject.c
256
for_each_online_cpu(cpu) {
arch/x86/kernel/cpu/mce/inject.c
257
struct mce *mcpu = &per_cpu(injectm, cpu);
arch/x86/kernel/cpu/mce/inject.c
260
cpumask_clear_cpu(cpu, mce_inject_cpumask);
arch/x86/kernel/cpu/mce/inject.c
317
static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
arch/x86/kernel/cpu/mce/inject.c
322
err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
arch/x86/kernel/cpu/mce/inject.c
330
err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
arch/x86/kernel/cpu/mce/inject.c
505
unsigned int cpu = i_mce.extcpu;
arch/x86/kernel/cpu/mce/inject.c
550
toggle_nb_mca_mst_cpu(topology_amd_node_id(cpu));
arch/x86/kernel/cpu/mce/inject.c
551
cpu = get_nbc_for_node(topology_amd_node_id(cpu));
arch/x86/kernel/cpu/mce/inject.c
555
if (!cpu_online(cpu))
arch/x86/kernel/cpu/mce/inject.c
558
toggle_hw_mce_inject(cpu, true);
arch/x86/kernel/cpu/mce/inject.c
562
smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
arch/x86/kernel/cpu/mce/inject.c
564
toggle_hw_mce_inject(cpu, false);
arch/x86/kernel/cpu/mce/inject.c
568
smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
arch/x86/kernel/cpu/mce/inject.c
571
smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
arch/x86/kernel/cpu/mce/inject.c
574
smp_call_function_single(cpu, trigger_mce, NULL, 0);
arch/x86/kernel/cpu/mce/inject.c
729
int cpu;
arch/x86/kernel/cpu/mce/inject.c
739
cpu = get_cpu();
arch/x86/kernel/cpu/mce/inject.c
749
toggle_hw_mce_inject(cpu, true);
arch/x86/kernel/cpu/mce/inject.c
761
toggle_hw_mce_inject(cpu, false);
arch/x86/kernel/cpu/mce/internal.h
267
void mce_prep_record_per_cpu(unsigned int cpu, struct mce *m);
arch/x86/kernel/cpu/mce/internal.h
270
void mce_threshold_create_device(unsigned int cpu);
arch/x86/kernel/cpu/mce/internal.h
271
void mce_threshold_remove_device(unsigned int cpu);
arch/x86/kernel/cpu/mce/internal.h
303
static inline void mce_threshold_create_device(unsigned int cpu) { }
arch/x86/kernel/cpu/mce/internal.h
304
static inline void mce_threshold_remove_device(unsigned int cpu) { }
arch/x86/kernel/cpu/microcode/amd.c
1000
cpu, mc_amd->hdr.patch_id);
arch/x86/kernel/cpu/microcode/amd.c
1020
unsigned int cpu = smp_processor_id();
arch/x86/kernel/cpu/microcode/amd.c
1022
ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
arch/x86/kernel/cpu/microcode/amd.c
1023
apply_microcode_amd(cpu);
arch/x86/kernel/cpu/microcode/amd.c
1170
unsigned int nid, cpu;
arch/x86/kernel/cpu/microcode/amd.c
1179
cpu = cpumask_first(cpumask_of_node(nid));
arch/x86/kernel/cpu/microcode/amd.c
1180
c = &cpu_data(cpu);
arch/x86/kernel/cpu/microcode/amd.c
1182
p = find_patch(cpu);
arch/x86/kernel/cpu/microcode/amd.c
1239
static enum ucode_state request_microcode_amd(int cpu, struct device *device)
arch/x86/kernel/cpu/microcode/amd.c
1242
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/cpu/microcode/amd.c
1270
static void microcode_fini_cpu_amd(int cpu)
arch/x86/kernel/cpu/microcode/amd.c
1272
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/amd.c
326
int cpu = smp_processor_id();
arch/x86/kernel/cpu/microcode/amd.c
328
if (!microcode_rev[cpu]) {
arch/x86/kernel/cpu/microcode/amd.c
332
microcode_rev[cpu] = base_rev;
arch/x86/kernel/cpu/microcode/amd.c
334
ucode_dbg("CPU%d, base_rev: 0x%x\n", cpu, base_rev);
arch/x86/kernel/cpu/microcode/amd.c
337
return microcode_rev[cpu];
arch/x86/kernel/cpu/microcode/amd.c
916
static struct ucode_patch *find_patch(unsigned int cpu)
arch/x86/kernel/cpu/microcode/amd.c
918
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/amd.c
932
void reload_ucode_amd(unsigned int cpu)
arch/x86/kernel/cpu/microcode/amd.c
938
p = find_patch(cpu);
arch/x86/kernel/cpu/microcode/amd.c
951
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
arch/x86/kernel/cpu/microcode/amd.c
953
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/amd.c
963
p = find_patch(cpu);
arch/x86/kernel/cpu/microcode/amd.c
970
static enum ucode_state apply_microcode_amd(int cpu)
arch/x86/kernel/cpu/microcode/amd.c
972
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/cpu/microcode/amd.c
979
BUG_ON(raw_smp_processor_id() != cpu);
arch/x86/kernel/cpu/microcode/amd.c
981
uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/amd.c
983
p = find_patch(cpu);
arch/x86/kernel/cpu/microcode/core.c
275
static void reload_early_microcode(unsigned int cpu)
arch/x86/kernel/cpu/microcode/core.c
289
reload_ucode_amd(cpu);
arch/x86/kernel/cpu/microcode/core.c
410
static noinstr void load_secondary(unsigned int cpu)
arch/x86/kernel/cpu/microcode/core.c
430
ret = microcode_ops->apply_microcode(cpu);
arch/x86/kernel/cpu/microcode/core.c
439
static void __load_primary(unsigned int cpu)
arch/x86/kernel/cpu/microcode/core.c
441
struct cpumask *secondaries = topology_sibling_cpumask(cpu);
arch/x86/kernel/cpu/microcode/core.c
453
ret = microcode_ops->apply_microcode(cpu);
arch/x86/kernel/cpu/microcode/core.c
469
if (sibling != cpu)
arch/x86/kernel/cpu/microcode/core.c
476
unsigned int cpu, timeout;
arch/x86/kernel/cpu/microcode/core.c
478
for_each_cpu(cpu, &cpu_offline_mask) {
arch/x86/kernel/cpu/microcode/core.c
480
per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
arch/x86/kernel/cpu/microcode/core.c
481
apic_send_nmi_to_offline_cpu(cpu);
arch/x86/kernel/cpu/microcode/core.c
496
unsigned int cpu;
arch/x86/kernel/cpu/microcode/core.c
498
for_each_cpu(cpu, &cpu_offline_mask)
arch/x86/kernel/cpu/microcode/core.c
499
per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
arch/x86/kernel/cpu/microcode/core.c
502
static void load_primary(unsigned int cpu)
arch/x86/kernel/cpu/microcode/core.c
508
if (!cpu && nr_offl)
arch/x86/kernel/cpu/microcode/core.c
513
__load_primary(cpu);
arch/x86/kernel/cpu/microcode/core.c
516
if (!cpu && nr_offl)
arch/x86/kernel/cpu/microcode/core.c
537
unsigned int cpu = raw_smp_processor_id();
arch/x86/kernel/cpu/microcode/core.c
539
if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) {
arch/x86/kernel/cpu/microcode/core.c
541
load_primary(cpu);
arch/x86/kernel/cpu/microcode/core.c
544
load_secondary(cpu);
arch/x86/kernel/cpu/microcode/core.c
590
unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
arch/x86/kernel/cpu/microcode/core.c
630
for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
arch/x86/kernel/cpu/microcode/core.c
631
switch (per_cpu(ucode_ctrl.result, cpu)) {
arch/x86/kernel/cpu/microcode/core.c
703
unsigned int cpu;
arch/x86/kernel/cpu/microcode/core.c
710
for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
arch/x86/kernel/cpu/microcode/core.c
724
if (!cpu_online(cpu)) {
arch/x86/kernel/cpu/microcode/core.c
725
if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
arch/x86/kernel/cpu/microcode/core.c
726
pr_err("CPU %u not online, loading aborted\n", cpu);
arch/x86/kernel/cpu/microcode/core.c
729
cpumask_set_cpu(cpu, &cpu_offline_mask);
arch/x86/kernel/cpu/microcode/core.c
730
per_cpu(ucode_ctrl, cpu) = ctrl;
arch/x86/kernel/cpu/microcode/core.c
738
ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
arch/x86/kernel/cpu/microcode/core.c
739
per_cpu(ucode_ctrl, cpu) = ctrl;
arch/x86/kernel/cpu/microcode/core.c
814
static void microcode_fini_cpu(int cpu)
arch/x86/kernel/cpu/microcode/core.c
817
microcode_ops->microcode_fini_cpu(cpu);
arch/x86/kernel/cpu/microcode/core.c
825
int cpu = smp_processor_id();
arch/x86/kernel/cpu/microcode/core.c
826
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/core.c
829
microcode_ops->apply_microcode(cpu);
arch/x86/kernel/cpu/microcode/core.c
831
reload_early_microcode(cpu);
arch/x86/kernel/cpu/microcode/core.c
847
static int mc_cpu_online(unsigned int cpu)
arch/x86/kernel/cpu/microcode/core.c
849
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/core.c
850
struct device *dev = get_cpu_device(cpu);
arch/x86/kernel/cpu/microcode/core.c
854
microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
arch/x86/kernel/cpu/microcode/core.c
855
cpu_data(cpu).microcode = uci->cpu_sig.rev;
arch/x86/kernel/cpu/microcode/core.c
856
if (!cpu)
arch/x86/kernel/cpu/microcode/core.c
860
pr_err("Failed to create group for CPU%d\n", cpu);
arch/x86/kernel/cpu/microcode/core.c
864
static int mc_cpu_down_prep(unsigned int cpu)
arch/x86/kernel/cpu/microcode/core.c
866
struct device *dev = get_cpu_device(cpu);
arch/x86/kernel/cpu/microcode/core.c
868
microcode_fini_cpu(cpu);
arch/x86/kernel/cpu/microcode/intel.c
609
int cpu, err;
arch/x86/kernel/cpu/microcode/intel.c
625
for_each_cpu(cpu, cpu_primary_thread_mask) {
arch/x86/kernel/cpu/microcode/intel.c
626
if (topology_logical_package_id(cpu) == pkg_id)
arch/x86/kernel/cpu/microcode/intel.c
629
pkg_id = topology_logical_package_id(cpu);
arch/x86/kernel/cpu/microcode/intel.c
631
err = rdmsrq_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &mmio_pa);
arch/x86/kernel/cpu/microcode/intel.c
638
err, cpu, pkg_id);
arch/x86/kernel/cpu/microcode/intel.c
784
static enum ucode_state apply_microcode_late(int cpu)
arch/x86/kernel/cpu/microcode/intel.c
786
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/intel.c
791
if (WARN_ON_ONCE(smp_processor_id() != cpu))
arch/x86/kernel/cpu/microcode/intel.c
798
cpu_data(cpu).microcode = uci->cpu_sig.rev;
arch/x86/kernel/cpu/microcode/intel.c
799
if (!cpu)
arch/x86/kernel/cpu/microcode/intel.c
831
static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
arch/x86/kernel/cpu/microcode/intel.c
833
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
arch/x86/kernel/cpu/microcode/intel.c
908
static bool is_blacklisted(unsigned int cpu)
arch/x86/kernel/cpu/microcode/intel.c
910
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/cpu/microcode/intel.c
930
static enum ucode_state request_microcode_fw(int cpu, struct device *device)
arch/x86/kernel/cpu/microcode/intel.c
932
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/cpu/microcode/intel.c
939
if (is_blacklisted(cpu))
arch/x86/kernel/cpu/microcode/intel.c
953
ret = parse_microcode_blobs(cpu, &iter);
arch/x86/kernel/cpu/microcode/internal.h
108
void reload_ucode_amd(unsigned int cpu);
arch/x86/kernel/cpu/microcode/internal.h
114
static inline void reload_ucode_amd(unsigned int cpu) { }
arch/x86/kernel/cpu/microcode/internal.h
25
enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
arch/x86/kernel/cpu/microcode/internal.h
26
void (*microcode_fini_cpu)(int cpu);
arch/x86/kernel/cpu/microcode/internal.h
33
enum ucode_state (*apply_microcode)(int cpu);
arch/x86/kernel/cpu/microcode/internal.h
35
int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
arch/x86/kernel/cpu/proc.c
101
show_cpuinfo_core(m, c, cpu);
arch/x86/kernel/cpu/proc.c
20
unsigned int cpu)
arch/x86/kernel/cpu/proc.c
25
cpumask_weight(topology_core_cpumask(cpu)));
arch/x86/kernel/cpu/proc.c
66
unsigned int cpu;
arch/x86/kernel/cpu/proc.c
69
cpu = c->cpu_index;
arch/x86/kernel/cpu/proc.c
75
cpu,
arch/x86/kernel/cpu/proc.c
89
int freq = arch_freq_get_on_cpu(cpu);
arch/x86/kernel/cpu/resctrl/core.c
454
static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
arch/x86/kernel/cpu/resctrl/core.c
459
return get_cpu_cacheinfo_id(cpu, scope);
arch/x86/kernel/cpu/resctrl/core.c
461
return cpu_to_node(cpu);
arch/x86/kernel/cpu/resctrl/core.c
463
return topology_physical_package_id(cpu);
arch/x86/kernel/cpu/resctrl/core.c
471
static void domain_add_cpu_ctrl(int cpu, struct rdt_resource *r)
arch/x86/kernel/cpu/resctrl/core.c
473
int id = get_domain_id_from_scope(cpu, r->ctrl_scope);
arch/x86/kernel/cpu/resctrl/core.c
484
cpu, r->ctrl_scope, r->name);
arch/x86/kernel/cpu/resctrl/core.c
494
cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
arch/x86/kernel/cpu/resctrl/core.c
500
hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
arch/x86/kernel/cpu/resctrl/core.c
508
cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
arch/x86/kernel/cpu/resctrl/core.c
527
static void l3_mon_domain_setup(int cpu, int id, struct rdt_resource *r, struct list_head *add_pos)
arch/x86/kernel/cpu/resctrl/core.c
534
hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
arch/x86/kernel/cpu/resctrl/core.c
542
ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
arch/x86/kernel/cpu/resctrl/core.c
544
pr_warn_once("Can't find L3 cache for CPU:%d resource %s\n", cpu, r->name);
arch/x86/kernel/cpu/resctrl/core.c
549
cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
arch/x86/kernel/cpu/resctrl/core.c
568
static void domain_add_cpu_mon(int cpu, struct rdt_resource *r)
arch/x86/kernel/cpu/resctrl/core.c
570
int id = get_domain_id_from_scope(cpu, r->mon_scope);
arch/x86/kernel/cpu/resctrl/core.c
578
cpu, r->mon_scope, r->name);
arch/x86/kernel/cpu/resctrl/core.c
584
cpumask_set_cpu(cpu, &hdr->cpu_mask);
arch/x86/kernel/cpu/resctrl/core.c
592
l3_mon_domain_setup(cpu, id, r, add_pos);
arch/x86/kernel/cpu/resctrl/core.c
596
intel_aet_mon_domain_setup(cpu, id, r, add_pos);
arch/x86/kernel/cpu/resctrl/core.c
604
static void domain_add_cpu(int cpu, struct rdt_resource *r)
arch/x86/kernel/cpu/resctrl/core.c
607
domain_add_cpu_ctrl(cpu, r);
arch/x86/kernel/cpu/resctrl/core.c
609
domain_add_cpu_mon(cpu, r);
arch/x86/kernel/cpu/resctrl/core.c
612
static void domain_remove_cpu_ctrl(int cpu, struct rdt_resource *r)
arch/x86/kernel/cpu/resctrl/core.c
614
int id = get_domain_id_from_scope(cpu, r->ctrl_scope);
arch/x86/kernel/cpu/resctrl/core.c
623
cpu, r->ctrl_scope, r->name);
arch/x86/kernel/cpu/resctrl/core.c
630
id, cpu, r->name);
arch/x86/kernel/cpu/resctrl/core.c
634
cpumask_clear_cpu(cpu, &hdr->cpu_mask);
arch/x86/kernel/cpu/resctrl/core.c
657
static void domain_remove_cpu_mon(int cpu, struct rdt_resource *r)
arch/x86/kernel/cpu/resctrl/core.c
659
int id = get_domain_id_from_scope(cpu, r->mon_scope);
arch/x86/kernel/cpu/resctrl/core.c
666
cpu, r->mon_scope, r->name);
arch/x86/kernel/cpu/resctrl/core.c
673
id, cpu, r->name);
arch/x86/kernel/cpu/resctrl/core.c
677
cpumask_clear_cpu(cpu, &hdr->cpu_mask);
arch/x86/kernel/cpu/resctrl/core.c
716
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
arch/x86/kernel/cpu/resctrl/core.c
719
domain_remove_cpu_ctrl(cpu, r);
arch/x86/kernel/cpu/resctrl/core.c
721
domain_remove_cpu_mon(cpu, r);
arch/x86/kernel/cpu/resctrl/core.c
724
static void clear_closid_rmid(int cpu)
arch/x86/kernel/cpu/resctrl/core.c
736
static int resctrl_arch_online_cpu(unsigned int cpu)
arch/x86/kernel/cpu/resctrl/core.c
742
domain_add_cpu(cpu, r);
arch/x86/kernel/cpu/resctrl/core.c
745
clear_closid_rmid(cpu);
arch/x86/kernel/cpu/resctrl/core.c
746
resctrl_online_cpu(cpu);
arch/x86/kernel/cpu/resctrl/core.c
751
static int resctrl_arch_offline_cpu(unsigned int cpu)
arch/x86/kernel/cpu/resctrl/core.c
755
resctrl_offline_cpu(cpu);
arch/x86/kernel/cpu/resctrl/core.c
759
domain_remove_cpu(cpu, r);
arch/x86/kernel/cpu/resctrl/core.c
762
clear_closid_rmid(cpu);
arch/x86/kernel/cpu/resctrl/core.c
770
int cpu;
arch/x86/kernel/cpu/resctrl/core.c
783
for_each_online_cpu(cpu)
arch/x86/kernel/cpu/resctrl/core.c
784
domain_add_cpu_mon(cpu, r);
arch/x86/kernel/cpu/resctrl/intel_aet.c
387
void intel_aet_mon_domain_setup(int cpu, int id, struct rdt_resource *r,
arch/x86/kernel/cpu/resctrl/intel_aet.c
393
d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
arch/x86/kernel/cpu/resctrl/intel_aet.c
400
cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
arch/x86/kernel/cpu/resctrl/internal.h
240
void intel_aet_mon_domain_setup(int cpu, int id, struct rdt_resource *r,
arch/x86/kernel/cpu/resctrl/internal.h
251
static inline void intel_aet_mon_domain_setup(int cpu, int id, struct rdt_resource *r,
arch/x86/kernel/cpu/resctrl/monitor.c
126
static int logical_rmid_to_physical_rmid(int cpu, int lrmid)
arch/x86/kernel/cpu/resctrl/monitor.c
133
return lrmid + (cpu_to_node(cpu) % snc_nodes_per_l3_cache) * r->mon.num_rmid;
arch/x86/kernel/cpu/resctrl/monitor.c
179
int cpu = cpumask_any(&d->hdr.cpu_mask);
arch/x86/kernel/cpu/resctrl/monitor.c
187
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
arch/x86/kernel/cpu/resctrl/monitor.c
250
int cpu;
arch/x86/kernel/cpu/resctrl/monitor.c
263
cpu = cpumask_any(&hdr->cpu_mask);
arch/x86/kernel/cpu/resctrl/monitor.c
264
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
322
miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
327
hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
arch/x86/kernel/cpu/resctrl/rdtgroup.c
138
int cpu;
arch/x86/kernel/cpu/resctrl/rdtgroup.c
157
for_each_cpu(cpu, &d->hdr.cpu_mask)
arch/x86/kernel/cpu/resctrl/rdtgroup.c
158
cpumask_set_cpu(cpu, cpu_mask);
arch/x86/kernel/cpu/topology.c
109
int cpu = topo_lookup_cpuid(apic_id);
arch/x86/kernel/cpu/topology.c
111
if (cpu >= 0)
arch/x86/kernel/cpu/topology.c
112
return cpu;
arch/x86/kernel/cpu/topology.c
117
static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
arch/x86/kernel/cpu/topology.c
120
early_per_cpu(x86_cpu_to_apicid, cpu) = apic_id;
arch/x86/kernel/cpu/topology.c
121
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
arch/x86/kernel/cpu/topology.c
123
set_cpu_present(cpu, true);
arch/x86/kernel/cpu/topology.c
220
int cpu, dom;
arch/x86/kernel/cpu/topology.c
232
cpu = 0;
arch/x86/kernel/cpu/topology.c
234
cpu = topo_get_cpunr(apic_id);
arch/x86/kernel/cpu/topology.c
236
cpuid_to_apicid[cpu] = apic_id;
arch/x86/kernel/cpu/topology.c
237
topo_set_cpuids(cpu, apic_id, acpi_id);
arch/x86/kernel/cpu/topology.c
358
int topology_get_primary_thread(unsigned int cpu)
arch/x86/kernel/cpu/topology.c
360
u32 apic_id = cpuid_to_apicid[cpu];
arch/x86/kernel/cpu/topology.c
378
int cpu;
arch/x86/kernel/cpu/topology.c
387
cpu = topo_lookup_cpuid(apic_id);
arch/x86/kernel/cpu/topology.c
388
if (cpu < 0)
arch/x86/kernel/cpu/topology.c
392
topo_set_cpuids(cpu, apic_id, acpi_id);
arch/x86/kernel/cpu/topology.c
393
cpu_mark_primary_thread(cpu, apic_id);
arch/x86/kernel/cpu/topology.c
394
return cpu;
arch/x86/kernel/cpu/topology.c
401
void topology_hotunplug_apic(unsigned int cpu)
arch/x86/kernel/cpu/topology.c
403
u32 apic_id = cpuid_to_apicid[cpu];
arch/x86/kernel/cpu/topology.c
408
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
arch/x86/kernel/cpu/topology.c
410
set_cpu_present(cpu, false);
arch/x86/kernel/cpu/topology.c
460
unsigned int cnta, cntb, cpu, allowed = 1;
arch/x86/kernel/cpu/topology.c
543
for (cpu = 0; cpu < allowed; cpu++) {
arch/x86/kernel/cpu/topology.c
544
apicid = cpuid_to_apicid[cpu];
arch/x86/kernel/cpu/topology.c
546
set_cpu_possible(cpu, true);
arch/x86/kernel/cpu/topology.c
551
cpu_mark_primary_thread(cpu, apicid);
arch/x86/kernel/cpu/topology.c
552
set_cpu_present(cpu, test_bit(apicid, phys_cpu_present_map));
arch/x86/kernel/cpu/topology.c
73
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
arch/x86/kernel/cpu/topology.c
75
return phys_id == (u64)cpuid_to_apicid[cpu];
arch/x86/kernel/cpu/topology.c
78
static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid)
arch/x86/kernel/cpu/topology.c
81
cpumask_set_cpu(cpu, &__cpu_primary_thread_mask);
arch/x86/kernel/cpu/topology_common.c
206
unsigned int dom, cpu = smp_processor_id();
arch/x86/kernel/cpu/topology_common.c
214
cpu, c->topo.initial_apicid, c->topo.apicid);
arch/x86/kernel/cpu/topology_common.c
217
if (c->topo.apicid != cpuid_to_apicid[cpu]) {
arch/x86/kernel/cpu/topology_common.c
219
cpu, cpuid_to_apicid[cpu], c->topo.apicid);
arch/x86/kernel/cpu/topology_common.c
226
pr_err(FW_BUG "CPU%d: Topology domain %u shift %u != %u\n", cpu, dom,
arch/x86/kernel/cpu/umwait.c
54
static int umwait_cpu_online(unsigned int cpu)
arch/x86/kernel/cpu/umwait.c
66
static int umwait_cpu_offline(unsigned int cpu)
arch/x86/kernel/cpu/vmware.c
228
static u64 vmware_steal_clock(int cpu)
arch/x86/kernel/cpu/vmware.c
230
struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
arch/x86/kernel/cpu/vmware.c
257
int cpu = smp_processor_id();
arch/x86/kernel/cpu/vmware.c
258
struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu);
arch/x86/kernel/cpu/vmware.c
269
cpu, (unsigned long long) slow_virt_to_phys(st));
arch/x86/kernel/cpu/vmware.c
310
static int vmware_cpu_online(unsigned int cpu)
arch/x86/kernel/cpu/vmware.c
318
static int vmware_cpu_down_prepare(unsigned int cpu)
arch/x86/kernel/cpu/vmware.c
342
pv_ops.cpu.io_delay = paravirt_nop;
arch/x86/kernel/cpuid.c
102
unsigned int cpu;
arch/x86/kernel/cpuid.c
105
cpu = iminor(file_inode(file));
arch/x86/kernel/cpuid.c
106
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
arch/x86/kernel/cpuid.c
109
c = &cpu_data(cpu);
arch/x86/kernel/cpuid.c
136
static int cpuid_device_create(unsigned int cpu)
arch/x86/kernel/cpuid.c
140
dev = device_create(&cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL,
arch/x86/kernel/cpuid.c
141
"cpu%d", cpu);
arch/x86/kernel/cpuid.c
145
static int cpuid_device_destroy(unsigned int cpu)
arch/x86/kernel/cpuid.c
147
device_destroy(&cpuid_class, MKDEV(CPUID_MAJOR, cpu));
arch/x86/kernel/cpuid.c
66
int cpu = iminor(file_inode(file));
arch/x86/kernel/cpuid.c
83
err = smp_call_function_single_async(cpu, &csd);
arch/x86/kernel/crash.c
55
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
arch/x86/kernel/crash.c
57
crash_save_cpu(regs, cpu);
arch/x86/kernel/doublefault_32.c
107
static void set_df_gdt_entry(unsigned int cpu)
arch/x86/kernel/doublefault_32.c
110
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS,
arch/x86/kernel/doublefault_32.c
111
&get_cpu_entry_area(cpu)->doublefault_stack.tss);
arch/x86/kernel/doublefault_32.c
117
unsigned int cpu = smp_processor_id();
arch/x86/kernel/doublefault_32.c
118
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
arch/x86/kernel/doublefault_32.c
128
set_df_gdt_entry(cpu);
arch/x86/kernel/doublefault_32.c
18
static void set_df_gdt_entry(unsigned int cpu);
arch/x86/kernel/dumpstack.c
351
int cpu;
arch/x86/kernel/dumpstack.c
358
cpu = smp_processor_id();
arch/x86/kernel/dumpstack.c
360
if (cpu == die_owner)
arch/x86/kernel/dumpstack.c
366
die_owner = cpu;
arch/x86/kernel/espfix_64.c
125
void init_espfix_ap(int cpu)
arch/x86/kernel/espfix_64.c
141
if (likely(per_cpu(espfix_stack, cpu)))
arch/x86/kernel/espfix_64.c
144
addr = espfix_base_addr(cpu);
arch/x86/kernel/espfix_64.c
145
page = cpu/ESPFIX_STACKS_PER_PAGE;
arch/x86/kernel/espfix_64.c
159
node = cpu_to_node(cpu);
arch/x86/kernel/espfix_64.c
202
per_cpu(espfix_stack, cpu) = addr;
arch/x86/kernel/espfix_64.c
203
per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
arch/x86/kernel/espfix_64.c
75
static inline unsigned long espfix_base_addr(unsigned int cpu)
arch/x86/kernel/espfix_64.c
80
page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
arch/x86/kernel/espfix_64.c
81
slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
arch/x86/kernel/fpu/context.h
36
static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
arch/x86/kernel/fpu/context.h
38
return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
arch/x86/kernel/fpu/context.h
57
int cpu = smp_processor_id();
arch/x86/kernel/fpu/context.h
62
if (!fpregs_state_valid(fpu, cpu)) {
arch/x86/kernel/fpu/context.h
77
fpu->last_cpu = cpu;
arch/x86/kernel/hpet.c
30
unsigned int cpu;
arch/x86/kernel/hpet.c
407
evt->cpumask = cpumask_of(hc->cpu);
arch/x86/kernel/hpet.c
425
hc->cpu = boot_cpu_data.cpu_index;
arch/x86/kernel/hpet.c
638
irq_set_affinity(hc->irq, cpumask_of(hc->cpu));
arch/x86/kernel/hpet.c
647
static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu)
arch/x86/kernel/hpet.c
651
hc->cpu = cpu;
arch/x86/kernel/hpet.c
652
per_cpu(cpu_hpet_channel, cpu) = hc;
arch/x86/kernel/hpet.c
677
static int hpet_cpuhp_online(unsigned int cpu)
arch/x86/kernel/hpet.c
682
init_one_hpet_msi_clockevent(hc, cpu);
arch/x86/kernel/hpet.c
686
static int hpet_cpuhp_dead(unsigned int cpu)
arch/x86/kernel/hpet.c
688
struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu);
arch/x86/kernel/hpet.c
694
per_cpu(cpu_hpet_channel, cpu) = NULL;
arch/x86/kernel/hw_breakpoint.c
266
int cpu;
arch/x86/kernel/hw_breakpoint.c
287
for_each_possible_cpu(cpu) {
arch/x86/kernel/hw_breakpoint.c
289
if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
arch/x86/kernel/hw_breakpoint.c
298
(unsigned long)&per_cpu(cpu_tss_rw, cpu),
arch/x86/kernel/hw_breakpoint.c
308
(unsigned long)&per_cpu(cpu_tlbstate, cpu),
arch/x86/kernel/hw_breakpoint.c
316
if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
arch/x86/kernel/irq.c
215
u64 arch_irq_stat_cpu(unsigned int cpu)
arch/x86/kernel/irq.c
217
u64 sum = irq_stats(cpu)->__nmi_count;
arch/x86/kernel/irq.c
220
sum += irq_stats(cpu)->apic_timer_irqs;
arch/x86/kernel/irq.c
221
sum += irq_stats(cpu)->irq_spurious_count;
arch/x86/kernel/irq.c
222
sum += irq_stats(cpu)->apic_perf_irqs;
arch/x86/kernel/irq.c
223
sum += irq_stats(cpu)->apic_irq_work_irqs;
arch/x86/kernel/irq.c
224
sum += irq_stats(cpu)->icr_read_retry_count;
arch/x86/kernel/irq.c
226
sum += irq_stats(cpu)->x86_platform_ipis;
arch/x86/kernel/irq.c
229
sum += irq_stats(cpu)->irq_resched_count;
arch/x86/kernel/irq.c
230
sum += irq_stats(cpu)->irq_call_count;
arch/x86/kernel/irq.c
233
sum += irq_stats(cpu)->irq_thermal_count;
arch/x86/kernel/irq.c
236
sum += irq_stats(cpu)->irq_threshold_count;
arch/x86/kernel/irq.c
239
sum += irq_stats(cpu)->irq_hv_callback_count;
arch/x86/kernel/irq.c
242
sum += irq_stats(cpu)->irq_hv_reenlightenment_count;
arch/x86/kernel/irq.c
243
sum += irq_stats(cpu)->hyperv_stimer0_count;
arch/x86/kernel/irq.c
246
sum += per_cpu(mce_exception_count, cpu);
arch/x86/kernel/irq.c
247
sum += per_cpu(mce_poll_count, cpu);
arch/x86/kernel/irq_32.c
107
int irq_init_percpu_irqstack(unsigned int cpu)
arch/x86/kernel/irq_32.c
109
int node = cpu_to_node(cpu);
arch/x86/kernel/irq_32.c
112
if (per_cpu(hardirq_stack_ptr, cpu))
arch/x86/kernel/irq_32.c
124
per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
arch/x86/kernel/irq_32.c
125
per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
arch/x86/kernel/irq_64.c
36
static int map_irq_stack(unsigned int cpu)
arch/x86/kernel/irq_64.c
38
char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
arch/x86/kernel/irq_64.c
54
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
arch/x86/kernel/irq_64.c
62
static int map_irq_stack(unsigned int cpu)
arch/x86/kernel/irq_64.c
64
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
arch/x86/kernel/irq_64.c
67
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
arch/x86/kernel/irq_64.c
72
int irq_init_percpu_irqstack(unsigned int cpu)
arch/x86/kernel/irq_64.c
74
if (per_cpu(hardirq_stack_ptr, cpu))
arch/x86/kernel/irq_64.c
76
return map_irq_stack(cpu);
arch/x86/kernel/itmt.c
168
int arch_asym_cpu_priority(int cpu)
arch/x86/kernel/itmt.c
170
return per_cpu(sched_core_priority, cpu);
arch/x86/kernel/itmt.c
187
void sched_set_itmt_core_prio(int prio, int cpu)
arch/x86/kernel/itmt.c
189
per_cpu(sched_core_priority, cpu) = prio;
arch/x86/kernel/itmt.c
64
int cpu;
arch/x86/kernel/itmt.c
67
for_each_possible_cpu(cpu)
arch/x86/kernel/itmt.c
68
seq_printf(s, "%d\t%d\n", cpu, arch_asym_cpu_priority(cpu));
arch/x86/kernel/jailhouse.c
105
for (cpu = 0; cpu < setup_data.v1.num_cpus; cpu++)
arch/x86/kernel/jailhouse.c
106
topology_register_apic(setup_data.v1.cpu_ids[cpu], CPU_ACPIID_INVALID, true);
arch/x86/kernel/jailhouse.c
99
unsigned int cpu;
arch/x86/kernel/kgdb.c
197
int cpu = raw_smp_processor_id();
arch/x86/kernel/kgdb.c
208
bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
arch/x86/kernel/kgdb.c
228
int cpu;
arch/x86/kernel/kgdb.c
235
for_each_online_cpu(cpu) {
arch/x86/kernel/kgdb.c
237
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
arch/x86/kernel/kgdb.c
245
for_each_online_cpu(cpu) {
arch/x86/kernel/kgdb.c
249
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
arch/x86/kernel/kgdb.c
258
int cpu;
arch/x86/kernel/kgdb.c
263
for_each_online_cpu(cpu) {
arch/x86/kernel/kgdb.c
264
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
arch/x86/kernel/kgdb.c
298
int cpu = raw_smp_processor_id();
arch/x86/kernel/kgdb.c
304
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
arch/x86/kernel/kgdb.c
384
int cpu = raw_smp_processor_id();
arch/x86/kernel/kgdb.c
397
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
arch/x86/kernel/kgdb.c
498
int cpu;
arch/x86/kernel/kgdb.c
504
cpu = raw_smp_processor_id();
arch/x86/kernel/kgdb.c
505
kgdb_nmicallback(cpu, regs);
arch/x86/kernel/kgdb.c
506
set_bit(cpu, was_in_debug_nmi);
arch/x86/kernel/kgdb.c
514
cpu = raw_smp_processor_id();
arch/x86/kernel/kgdb.c
516
if (__test_and_clear_bit(cpu, was_in_debug_nmi))
arch/x86/kernel/kgdb.c
640
int i, cpu;
arch/x86/kernel/kgdb.c
665
for_each_online_cpu(cpu) {
arch/x86/kernel/kgdb.c
666
pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
arch/x86/kernel/kvm.c
1072
static void kvm_kick_cpu(int cpu)
arch/x86/kernel/kvm.c
1077
apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/kvm.c
1173
void arch_haltpoll_enable(unsigned int cpu)
arch/x86/kernel/kvm.c
1182
smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
arch/x86/kernel/kvm.c
1186
void arch_haltpoll_disable(unsigned int cpu)
arch/x86/kernel/kvm.c
1192
smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
arch/x86/kernel/kvm.c
143
n->cpu = smp_processor_id();
arch/x86/kernel/kvm.c
200
if (n->cpu == smp_processor_id())
arch/x86/kernel/kvm.c
246
dummy->cpu = smp_processor_id();
arch/x86/kernel/kvm.c
330
pv_ops.cpu.io_delay = kvm_io_delay;
arch/x86/kernel/kvm.c
339
int cpu = smp_processor_id();
arch/x86/kernel/kvm.c
340
struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvm.c
346
pr_debug("stealtime: cpu %d, msr %llx\n", cpu,
arch/x86/kernel/kvm.c
420
static u64 kvm_steal_clock(int cpu)
arch/x86/kernel/kvm.c
426
src = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvm.c
452
int cpu;
arch/x86/kernel/kvm.c
458
for_each_possible_cpu(cpu) {
arch/x86/kernel/kvm.c
459
__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
arch/x86/kernel/kvm.c
460
__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
arch/x86/kernel/kvm.c
461
__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
arch/x86/kernel/kvm.c
478
static int kvm_cpu_online(unsigned int cpu)
arch/x86/kernel/kvm.c
521
int cpu, min = 0, max = 0;
arch/x86/kernel/kvm.c
544
for_each_cpu(cpu, mask) {
arch/x86/kernel/kvm.c
545
apic_id = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/kvm.c
652
int cpu;
arch/x86/kernel/kvm.c
657
for_each_cpu(cpu, mask) {
arch/x86/kernel/kvm.c
658
if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
arch/x86/kernel/kvm.c
659
kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
arch/x86/kernel/kvm.c
669
int cpu;
arch/x86/kernel/kvm.c
678
for_each_cpu(cpu, flushmask) {
arch/x86/kernel/kvm.c
684
src = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvm.c
689
__cpumask_clear_cpu(cpu, flushmask);
arch/x86/kernel/kvm.c
698
int cpu;
arch/x86/kernel/kvm.c
704
for_each_possible_cpu(cpu) {
arch/x86/kernel/kvm.c
705
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
arch/x86/kernel/kvm.c
706
GFP_KERNEL, cpu_to_node(cpu));
arch/x86/kernel/kvm.c
726
static int kvm_cpu_down_prepare(unsigned int cpu)
arch/x86/kernel/kvm.c
803
bool __kvm_vcpu_is_preempted(long cpu);
arch/x86/kernel/kvm.c
805
__visible bool __kvm_vcpu_is_preempted(long cpu)
arch/x86/kernel/kvm.c
807
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvm.c
92
int cpu;
arch/x86/kernel/kvmclock.c
265
static int kvmclock_setup_percpu(unsigned int cpu)
arch/x86/kernel/kvmclock.c
267
struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
arch/x86/kernel/kvmclock.c
274
if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
arch/x86/kernel/kvmclock.c
278
if (cpu < HVC_BOOT_ARRAY_SIZE)
arch/x86/kernel/kvmclock.c
279
p = &hv_clock_boot[cpu];
arch/x86/kernel/kvmclock.c
281
p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
arch/x86/kernel/kvmclock.c
285
per_cpu(hv_clock_per_cpu, cpu) = p;
arch/x86/kernel/msr.c
114
int cpu = iminor(file_inode(file));
arch/x86/kernel/msr.c
137
err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
arch/x86/kernel/msr.c
152
int cpu = iminor(file_inode(file));
arch/x86/kernel/msr.c
165
err = rdmsr_safe_regs_on_cpu(cpu, regs);
arch/x86/kernel/msr.c
191
err = wrmsr_safe_regs_on_cpu(cpu, regs);
arch/x86/kernel/msr.c
208
unsigned int cpu = iminor(file_inode(file));
arch/x86/kernel/msr.c
214
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
arch/x86/kernel/msr.c
217
c = &cpu_data(cpu);
arch/x86/kernel/msr.c
247
static int msr_device_create(unsigned int cpu)
arch/x86/kernel/msr.c
251
dev = device_create(&msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
arch/x86/kernel/msr.c
252
"msr%d", cpu);
arch/x86/kernel/msr.c
256
static int msr_device_destroy(unsigned int cpu)
arch/x86/kernel/msr.c
258
device_destroy(&msr_class, MKDEV(MSR_MAJOR, cpu));
arch/x86/kernel/msr.c
58
int cpu = iminor(file_inode(file));
arch/x86/kernel/msr.c
66
err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
arch/x86/kernel/nmi.c
642
int cpu;
arch/x86/kernel/nmi.c
645
for_each_cpu(cpu, btp) {
arch/x86/kernel/nmi.c
646
nsp = per_cpu_ptr(&nmi_stats, cpu);
arch/x86/kernel/nmi.c
656
int cpu;
arch/x86/kernel/nmi.c
665
for_each_cpu(cpu, btp) {
arch/x86/kernel/nmi.c
666
nsp = per_cpu_ptr(&nmi_stats, cpu);
arch/x86/kernel/nmi.c
675
(cpu_is_offline(cpu) << 1) |
arch/x86/kernel/nmi.c
689
pr_alert("%s: CPU %d: %s%s%s\n", __func__, cpu, msgp, modp, msghp);
arch/x86/kernel/nmi_selftest.c
54
int cpu = raw_smp_processor_id();
arch/x86/kernel/nmi_selftest.c
56
if (cpumask_test_and_clear_cpu(cpu, to_cpumask(nmi_ipi_mask)))
arch/x86/kernel/paravirt-spinlocks.c
34
__visible bool __native_vcpu_is_preempted(long cpu)
arch/x86/kernel/paravirt.c
104
.cpu.io_delay = native_io_delay,
arch/x86/kernel/paravirt.c
107
.cpu.cpuid = native_cpuid,
arch/x86/kernel/paravirt.c
108
.cpu.get_debugreg = pv_native_get_debugreg,
arch/x86/kernel/paravirt.c
109
.cpu.set_debugreg = pv_native_set_debugreg,
arch/x86/kernel/paravirt.c
110
.cpu.read_cr0 = native_read_cr0,
arch/x86/kernel/paravirt.c
111
.cpu.write_cr0 = native_write_cr0,
arch/x86/kernel/paravirt.c
112
.cpu.write_cr4 = native_write_cr4,
arch/x86/kernel/paravirt.c
113
.cpu.read_msr = native_read_msr,
arch/x86/kernel/paravirt.c
114
.cpu.write_msr = native_write_msr,
arch/x86/kernel/paravirt.c
115
.cpu.read_msr_safe = native_read_msr_safe,
arch/x86/kernel/paravirt.c
116
.cpu.write_msr_safe = native_write_msr_safe,
arch/x86/kernel/paravirt.c
117
.cpu.read_pmc = native_read_pmc,
arch/x86/kernel/paravirt.c
118
.cpu.load_tr_desc = native_load_tr_desc,
arch/x86/kernel/paravirt.c
119
.cpu.set_ldt = native_set_ldt,
arch/x86/kernel/paravirt.c
120
.cpu.load_gdt = native_load_gdt,
arch/x86/kernel/paravirt.c
121
.cpu.load_idt = native_load_idt,
arch/x86/kernel/paravirt.c
122
.cpu.store_tr = native_store_tr,
arch/x86/kernel/paravirt.c
123
.cpu.load_tls = native_load_tls,
arch/x86/kernel/paravirt.c
124
.cpu.load_gs_index = native_load_gs_index,
arch/x86/kernel/paravirt.c
125
.cpu.write_ldt_entry = native_write_ldt_entry,
arch/x86/kernel/paravirt.c
126
.cpu.write_gdt_entry = native_write_gdt_entry,
arch/x86/kernel/paravirt.c
127
.cpu.write_idt_entry = native_write_idt_entry,
arch/x86/kernel/paravirt.c
129
.cpu.alloc_ldt = paravirt_nop,
arch/x86/kernel/paravirt.c
130
.cpu.free_ldt = paravirt_nop,
arch/x86/kernel/paravirt.c
132
.cpu.load_sp0 = native_load_sp0,
arch/x86/kernel/paravirt.c
135
.cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
arch/x86/kernel/paravirt.c
136
.cpu.update_io_bitmap = native_tss_update_io_bitmap,
arch/x86/kernel/paravirt.c
139
.cpu.start_context_switch = paravirt_nop,
arch/x86/kernel/paravirt.c
140
.cpu.end_context_switch = paravirt_nop,
arch/x86/kernel/process.c
531
unsigned int cpu;
arch/x86/kernel/process.c
548
for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
arch/x86/kernel/process.c
549
if (cpu == this_cpu)
arch/x86/kernel/process.c
552
if (!per_cpu(ssb_state, cpu).shared_state)
arch/x86/kernel/process.c
556
st->shared_state = per_cpu(ssb_state, cpu).shared_state;
arch/x86/kernel/process.c
824
unsigned int cpu = smp_processor_id();
arch/x86/kernel/process.c
836
set_cpu_online(cpu, false);
arch/x86/kernel/process.c
849
cpumask_clear_cpu(cpu, &cpus_stop_mask);
arch/x86/kernel/process_32.c
159
int cpu = smp_processor_id();
arch/x86/kernel/process_32.c
163
switch_fpu(prev_p, cpu);
arch/x86/kernel/process_32.c
180
load_TLS(next, cpu);
arch/x86/kernel/process_64.c
614
int cpu = smp_processor_id();
arch/x86/kernel/process_64.c
619
switch_fpu(prev_p, cpu);
arch/x86/kernel/process_64.c
632
load_TLS(next, cpu);
arch/x86/kernel/reboot.c
861
int cpu;
arch/x86/kernel/reboot.c
863
cpu = raw_smp_processor_id();
arch/x86/kernel/reboot.c
870
if (cpu == crashing_cpu)
arch/x86/kernel/reboot.c
875
shootdown_callback(cpu, regs);
arch/x86/kernel/setup.c
1311
bool arch_cpu_is_hotpluggable(int cpu)
arch/x86/kernel/setup.c
1313
return cpu > 0;
arch/x86/kernel/setup_percpu.c
101
static inline void setup_percpu_segment(int cpu)
arch/x86/kernel/setup_percpu.c
105
per_cpu_offset(cpu), 0xFFFFF);
arch/x86/kernel/setup_percpu.c
107
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
arch/x86/kernel/setup_percpu.c
113
unsigned int cpu;
arch/x86/kernel/setup_percpu.c
164
for_each_possible_cpu(cpu) {
arch/x86/kernel/setup_percpu.c
165
per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
arch/x86/kernel/setup_percpu.c
166
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
arch/x86/kernel/setup_percpu.c
167
per_cpu(cpu_number, cpu) = cpu;
arch/x86/kernel/setup_percpu.c
168
setup_percpu_segment(cpu);
arch/x86/kernel/setup_percpu.c
177
per_cpu(x86_cpu_to_apicid, cpu) =
arch/x86/kernel/setup_percpu.c
178
early_per_cpu_map(x86_cpu_to_apicid, cpu);
arch/x86/kernel/setup_percpu.c
179
per_cpu(x86_cpu_to_acpiid, cpu) =
arch/x86/kernel/setup_percpu.c
180
early_per_cpu_map(x86_cpu_to_acpiid, cpu);
arch/x86/kernel/setup_percpu.c
183
per_cpu(x86_cpu_to_node_map, cpu) =
arch/x86/kernel/setup_percpu.c
184
early_per_cpu_map(x86_cpu_to_node_map, cpu);
arch/x86/kernel/setup_percpu.c
193
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
arch/x86/kernel/setup_percpu.c
199
if (!cpu)
arch/x86/kernel/setup_percpu.c
200
switch_gdt_and_percpu_base(cpu);
arch/x86/kernel/setup_percpu.c
63
unsigned int cpu;
arch/x86/kernel/setup_percpu.c
65
for_each_possible_cpu(cpu) {
arch/x86/kernel/setup_percpu.c
66
int node = early_cpu_to_node(cpu);
arch/x86/kernel/setup_percpu.c
91
static int __init pcpu_cpu_to_node(int cpu)
arch/x86/kernel/setup_percpu.c
93
return early_cpu_to_node(cpu);
arch/x86/kernel/smp.c
215
unsigned int cpu;
arch/x86/kernel/smp.c
219
for_each_cpu(cpu, &cpus_stop_mask)
arch/x86/kernel/smp.c
220
__apic_send_IPI(cpu, NMI_VECTOR);
arch/x86/kernel/smpboot.c
1012
early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
arch/x86/kernel/smpboot.c
1015
smpboot_control = cpu;
arch/x86/kernel/smpboot.c
1019
init_espfix_ap(cpu);
arch/x86/kernel/smpboot.c
1022
announce_cpu(cpu, apicid);
arch/x86/kernel/smpboot.c
1052
ret = apic->wakeup_secondary_cpu_64(apicid, start_ip, cpu);
arch/x86/kernel/smpboot.c
1054
ret = apic->wakeup_secondary_cpu(apicid, start_ip, cpu);
arch/x86/kernel/smpboot.c
1056
ret = wakeup_secondary_cpu_via_init(apicid, start_ip, cpu);
arch/x86/kernel/smpboot.c
1060
arch_cpuhp_cleanup_kick_cpu(cpu);
arch/x86/kernel/smpboot.c
1064
int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
arch/x86/kernel/smpboot.c
1066
u32 apicid = apic->cpu_present_to_apicid(cpu);
arch/x86/kernel/smpboot.c
1071
pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
arch/x86/kernel/smpboot.c
1074
pr_err("CPU %u has invalid APIC ID %x. Aborting bringup\n", cpu, apicid);
arch/x86/kernel/smpboot.c
1079
pr_err("CPU %u APIC ID %x is not present. Aborting bringup\n", cpu, apicid);
arch/x86/kernel/smpboot.c
1090
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
arch/x86/kernel/smpboot.c
1092
err = common_cpu_up(cpu, tidle);
arch/x86/kernel/smpboot.c
1096
err = do_boot_cpu(apicid, cpu, tidle);
arch/x86/kernel/smpboot.c
1098
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
arch/x86/kernel/smpboot.c
1103
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
arch/x86/kernel/smpboot.c
1105
return smp_ops.kick_ap_alive(cpu, tidle);
arch/x86/kernel/smpboot.c
1108
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
arch/x86/kernel/smpboot.c
1115
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
arch/x86/kernel/smpboot.c
1118
smp_ops.cleanup_dead_cpu(cpu);
arch/x86/kernel/smpboot.c
1121
pr_info("CPU %u is now offline\n", cpu);
arch/x86/kernel/smpboot.c
1157
unsigned int cpu, node;
arch/x86/kernel/smpboot.c
1160
for_each_possible_cpu(cpu) {
arch/x86/kernel/smpboot.c
1161
if (cpu)
arch/x86/kernel/smpboot.c
1162
per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids;
arch/x86/kernel/smpboot.c
1165
for_each_possible_cpu(cpu) {
arch/x86/kernel/smpboot.c
1166
node = cpu_to_node(cpu);
arch/x86/kernel/smpboot.c
1168
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1169
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1170
zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1171
zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1172
zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1282
int max_threads, cpu;
arch/x86/kernel/smpboot.c
1285
for_each_online_cpu (cpu) {
arch/x86/kernel/smpboot.c
1286
int threads = cpumask_weight(topology_sibling_cpumask(cpu));
arch/x86/kernel/smpboot.c
1294
static void remove_siblinginfo(int cpu)
arch/x86/kernel/smpboot.c
1297
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/smpboot.c
1299
for_each_cpu(sibling, topology_core_cpumask(cpu)) {
arch/x86/kernel/smpboot.c
1300
cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
arch/x86/kernel/smpboot.c
1304
if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
arch/x86/kernel/smpboot.c
1308
for_each_cpu(sibling, topology_die_cpumask(cpu))
arch/x86/kernel/smpboot.c
1309
cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
arch/x86/kernel/smpboot.c
1311
for_each_cpu(sibling, topology_sibling_cpumask(cpu)) {
arch/x86/kernel/smpboot.c
1312
cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
arch/x86/kernel/smpboot.c
1317
for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
arch/x86/kernel/smpboot.c
1318
cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
arch/x86/kernel/smpboot.c
1319
for_each_cpu(sibling, cpu_l2c_shared_mask(cpu))
arch/x86/kernel/smpboot.c
1320
cpumask_clear_cpu(cpu, cpu_l2c_shared_mask(sibling));
arch/x86/kernel/smpboot.c
1321
cpumask_clear(cpu_llc_shared_mask(cpu));
arch/x86/kernel/smpboot.c
1322
cpumask_clear(cpu_l2c_shared_mask(cpu));
arch/x86/kernel/smpboot.c
1323
cpumask_clear(topology_sibling_cpumask(cpu));
arch/x86/kernel/smpboot.c
1324
cpumask_clear(topology_core_cpumask(cpu));
arch/x86/kernel/smpboot.c
1325
cpumask_clear(topology_die_cpumask(cpu));
arch/x86/kernel/smpboot.c
1328
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
arch/x86/kernel/smpboot.c
1332
static void remove_cpu_from_maps(int cpu)
arch/x86/kernel/smpboot.c
1334
set_cpu_online(cpu, false);
arch/x86/kernel/smpboot.c
1335
numa_remove_cpu(cpu);
arch/x86/kernel/smpboot.c
1340
int cpu = smp_processor_id();
arch/x86/kernel/smpboot.c
1342
remove_siblinginfo(cpu);
arch/x86/kernel/smpboot.c
1352
remove_cpu_from_maps(cpu);
arch/x86/kernel/smpboot.c
1457
unsigned int cpu, i;
arch/x86/kernel/smpboot.c
1459
for_each_cpu_andnot(cpu, cpu_present_mask, cpu_online_mask) {
arch/x86/kernel/smpboot.c
1460
md = per_cpu_ptr(&mwait_cpu_dead, cpu);
arch/x86/kernel/smpboot.c
1474
pr_err_once("CPU%u is stuck in mwait_play_dead()\n", cpu);
arch/x86/kernel/smpboot.c
539
int cpu;
arch/x86/kernel/smpboot.c
541
for_each_cpu(cpu, cpus) {
arch/x86/kernel/smpboot.c
542
u32 id = topology_logical_package_id(cpu);
arch/x86/kernel/smpboot.c
674
void set_cpu_sibling_map(int cpu)
arch/x86/kernel/smpboot.c
678
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kernel/smpboot.c
682
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
arch/x86/kernel/smpboot.c
685
cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
arch/x86/kernel/smpboot.c
686
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
arch/x86/kernel/smpboot.c
687
cpumask_set_cpu(cpu, cpu_l2c_shared_mask(cpu));
arch/x86/kernel/smpboot.c
688
cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
arch/x86/kernel/smpboot.c
689
cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
arch/x86/kernel/smpboot.c
700
if ((i == cpu) || (has_smt && match_smt(c, o)))
arch/x86/kernel/smpboot.c
701
link_mask(topology_sibling_cpumask, cpu, i);
arch/x86/kernel/smpboot.c
703
if ((i == cpu) || (has_mp && match_llc(c, o)))
arch/x86/kernel/smpboot.c
704
link_mask(cpu_llc_shared_mask, cpu, i);
arch/x86/kernel/smpboot.c
706
if ((i == cpu) || (has_mp && match_l2c(c, o)))
arch/x86/kernel/smpboot.c
707
link_mask(cpu_l2c_shared_mask, cpu, i);
arch/x86/kernel/smpboot.c
709
if ((i == cpu) || (has_mp && match_die(c, o)))
arch/x86/kernel/smpboot.c
710
link_mask(topology_die_cpumask, cpu, i);
arch/x86/kernel/smpboot.c
713
threads = cpumask_weight(topology_sibling_cpumask(cpu));
arch/x86/kernel/smpboot.c
717
for_each_cpu(i, topology_sibling_cpumask(cpu))
arch/x86/kernel/smpboot.c
727
if ((i == cpu) || (has_mp && match_pkg(c, o))) {
arch/x86/kernel/smpboot.c
728
link_mask(topology_core_cpumask, cpu, i);
arch/x86/kernel/smpboot.c
745
if (i != cpu)
arch/x86/kernel/smpboot.c
747
} else if (i != cpu && !c->booted_cores)
arch/x86/kernel/smpboot.c
754
const struct cpumask *cpu_coregroup_mask(int cpu)
arch/x86/kernel/smpboot.c
756
return cpu_llc_shared_mask(cpu);
arch/x86/kernel/smpboot.c
759
const struct cpumask *cpu_clustergroup_mask(int cpu)
arch/x86/kernel/smpboot.c
761
return cpu_l2c_shared_mask(cpu);
arch/x86/kernel/smpboot.c
767
int cpu;
arch/x86/kernel/smpboot.c
773
for_each_online_cpu(cpu)
arch/x86/kernel/smpboot.c
774
bogosum += cpu_data(cpu).loops_per_jiffy;
arch/x86/kernel/smpboot.c
851
static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip, unsigned int cpu)
arch/x86/kernel/smpboot.c
934
static void announce_cpu(int cpu, int apicid)
arch/x86/kernel/smpboot.c
938
int node = early_cpu_to_node(cpu);
arch/x86/kernel/smpboot.c
964
pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
arch/x86/kernel/smpboot.c
967
node, cpu, apicid);
arch/x86/kernel/smpboot.c
970
int common_cpu_up(unsigned int cpu, struct task_struct *idle)
arch/x86/kernel/smpboot.c
977
per_cpu(current_task, cpu) = idle;
arch/x86/kernel/smpboot.c
978
cpu_init_stack_canary(cpu, idle);
arch/x86/kernel/smpboot.c
981
ret = irq_init_percpu_irqstack(cpu);
arch/x86/kernel/smpboot.c
987
per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
arch/x86/kernel/smpboot.c
998
static int do_boot_cpu(u32 apicid, unsigned int cpu, struct task_struct *idle)
arch/x86/kernel/tboot.c
336
static int tboot_dying_cpu(unsigned int cpu)
arch/x86/kernel/tls.c
107
load_TLS(t, cpu);
arch/x86/kernel/tls.c
90
int cpu;
arch/x86/kernel/tls.c
95
cpu = get_cpu();
arch/x86/kernel/tsc.c
1008
for_each_possible_cpu(cpu) {
arch/x86/kernel/tsc.c
1009
per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
arch/x86/kernel/tsc.c
1010
per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
arch/x86/kernel/tsc.c
1057
set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
arch/x86/kernel/tsc.c
1329
int cpu;
arch/x86/kernel/tsc.c
1401
for_each_possible_cpu(cpu)
arch/x86/kernel/tsc.c
1402
set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
arch/x86/kernel/tsc.c
149
static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
arch/x86/kernel/tsc.c
1585
int sibling, cpu = smp_processor_id();
arch/x86/kernel/tsc.c
1586
int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
arch/x86/kernel/tsc.c
1587
const struct cpumask *mask = topology_core_cpumask(cpu);
arch/x86/kernel/tsc.c
1607
sibling = cpumask_any_but(mask, cpu);
arch/x86/kernel/tsc.c
179
c2n = per_cpu_ptr(&cyc2ns, cpu);
arch/x86/kernel/tsc.c
188
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
arch/x86/kernel/tsc.c
196
__set_cyc2ns_scale(khz, cpu, tsc_now);
arch/x86/kernel/tsc.c
220
unsigned int cpu, this_cpu = smp_processor_id();
arch/x86/kernel/tsc.c
224
for_each_possible_cpu(cpu) {
arch/x86/kernel/tsc.c
225
if (cpu != this_cpu) {
arch/x86/kernel/tsc.c
227
c2n = per_cpu_ptr(&cyc2ns, cpu);
arch/x86/kernel/tsc.c
990
int cpu;
arch/x86/kernel/tsc_sync.c
124
unsigned int cpu, bool bootcpu)
arch/x86/kernel/tsc_sync.c
145
cpu, bootval);
arch/x86/kernel/tsc_sync.c
150
cpu, bootval);
arch/x86/kernel/tsc_sync.c
184
unsigned int refcpu, cpu = smp_processor_id();
arch/x86/kernel/tsc_sync.c
208
mask = topology_core_cpumask(cpu);
arch/x86/kernel/tsc_sync.c
209
refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
arch/x86/kernel/tsc_sync.c
340
static inline unsigned int loop_timeout(int cpu)
arch/x86/kernel/tsc_sync.c
342
return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
arch/x86/kernel/tsc_sync.c
357
unsigned int cpu = (unsigned long)__cpu;
arch/x86/kernel/tsc_sync.c
379
check_tsc_warp(loop_timeout(cpu));
arch/x86/kernel/tsc_sync.c
393
smp_processor_id(), cpu);
arch/x86/kernel/tsc_sync.c
400
smp_processor_id(), cpu);
arch/x86/kernel/tsc_sync.c
435
unsigned int cpu = smp_processor_id();
arch/x86/kernel/tsc_sync.c
457
(unsigned long *)(unsigned long)cpu, 0);
arch/x86/kernel/tsc_sync.c
467
cur_max_warp = check_tsc_warp(loop_timeout(cpu));
arch/x86/kernel/tsc_sync.c
520
cpu, cur_max_warp, cur->adjusted);
arch/x86/kernel/x86_init.c
37
void x86_op_int_noop(int cpu) { }
arch/x86/kvm/svm/avic.c
1001
static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu,
arch/x86/kvm/svm/avic.c
1005
int h_physical_id = kvm_cpu_get_apicid(cpu);
arch/x86/kvm/svm/avic.c
1054
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/svm/avic.c
1066
__avic_vcpu_load(vcpu, cpu, AVIC_START_RUNNING);
arch/x86/kvm/svm/avic.c
1185
__avic_vcpu_load(vcpu, vcpu->cpu, AVIC_ACTIVATE);
arch/x86/kvm/svm/avic.c
1221
avic_vcpu_load(vcpu, vcpu->cpu);
arch/x86/kvm/svm/avic.c
449
int cpu = READ_ONCE(vcpu->cpu);
arch/x86/kvm/svm/avic.c
451
if (cpu != get_cpu()) {
arch/x86/kvm/svm/avic.c
452
wrmsrq(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
arch/x86/kvm/svm/avic.c
453
trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu));
arch/x86/kvm/svm/avic.c
918
pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
arch/x86/kvm/svm/avic.c
920
pi_data.cpu = -1;
arch/x86/kvm/svm/avic.c
973
static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
arch/x86/kvm/svm/avic.c
993
WARN_ON_ONCE(amd_iommu_update_ga(data, cpu, ga_log_intr));
arch/x86/kvm/svm/avic.c
994
else if (cpu >= 0)
arch/x86/kvm/svm/avic.c
995
WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu, ga_log_intr));
arch/x86/kvm/svm/sev.c
276
int cpu;
arch/x86/kvm/svm/sev.c
282
for_each_possible_cpu(cpu) {
arch/x86/kvm/svm/sev.c
283
sd = per_cpu_ptr(&svm_data, cpu);
arch/x86/kvm/svm/sev.c
3559
int pre_sev_run(struct vcpu_svm *svm, int cpu)
arch/x86/kvm/svm/sev.c
3561
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
arch/x86/kvm/svm/sev.c
3579
if (!cpumask_test_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus))
arch/x86/kvm/svm/sev.c
3580
cpumask_set_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus);
arch/x86/kvm/svm/sev.c
3592
svm->vcpu.arch.last_vmentry_cpu == cpu)
arch/x86/kvm/svm/svm.c
1381
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
arch/x86/kvm/svm/svm.c
1423
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/svm/svm.c
1429
avic_vcpu_load(vcpu, cpu);
arch/x86/kvm/svm/svm.c
3630
kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
arch/x86/kvm/svm/svm.c
3643
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
arch/x86/kvm/svm/svm.c
3651
if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
arch/x86/kvm/svm/svm.c
3654
svm->current_vmcb->cpu = vcpu->cpu;
arch/x86/kvm/svm/svm.c
3658
return pre_sev_run(svm, vcpu->cpu);
arch/x86/kvm/svm/svm.c
425
int cpu = smp_processor_id();
arch/x86/kvm/svm/svm.c
4258
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
arch/x86/kvm/svm/svm.c
426
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kvm/svm/svm.c
430
pr_err("CPU %d isn't AMD or Hygon\n", cpu);
arch/x86/kvm/svm/svm.c
4317
smp_send_reschedule(vcpu->cpu);
arch/x86/kvm/svm/svm.c
4322
vcpu->run->fail_entry.cpu = vcpu->cpu;
arch/x86/kvm/svm/svm.c
435
pr_err("SVM not supported by CPU %d\n", cpu);
arch/x86/kvm/svm/svm.c
5403
int cpu, r;
arch/x86/kvm/svm/svm.c
5559
for_each_possible_cpu(cpu) {
arch/x86/kvm/svm/svm.c
5560
r = svm_cpu_init(cpu);
arch/x86/kvm/svm/svm.c
579
static void svm_cpu_uninit(int cpu)
arch/x86/kvm/svm/svm.c
581
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
arch/x86/kvm/svm/svm.c
592
static int svm_cpu_init(int cpu)
arch/x86/kvm/svm/svm.c
594
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
arch/x86/kvm/svm/svm.c
599
save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
arch/x86/kvm/svm/svm.c
951
int cpu;
arch/x86/kvm/svm/svm.c
957
for_each_possible_cpu(cpu)
arch/x86/kvm/svm/svm.c
958
svm_cpu_uninit(cpu);
arch/x86/kvm/svm/svm.h
138
int cpu;
arch/x86/kvm/svm/svm.h
843
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
arch/x86/kvm/svm/svm.h
859
int pre_sev_run(struct vcpu_svm *svm, int cpu);
arch/x86/kvm/svm/svm_onhyperv.c
48
int cpu;
arch/x86/kvm/svm/svm_onhyperv.c
51
for_each_online_cpu(cpu) {
arch/x86/kvm/svm/svm_onhyperv.c
53
hv_get_vp_assist_page(cpu);
arch/x86/kvm/vmx/common.h
143
__apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
arch/x86/kvm/vmx/main.c
103
vmx_vcpu_load(vcpu, cpu);
arch/x86/kvm/vmx/main.c
96
static void vt_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/vmx/main.c
99
tdx_vcpu_load(vcpu, cpu);
arch/x86/kvm/vmx/nested.c
301
int cpu;
arch/x86/kvm/vmx/nested.c
306
cpu = get_cpu();
arch/x86/kvm/vmx/nested.c
309
vmx_vcpu_load_vmcs(vcpu, cpu);
arch/x86/kvm/vmx/nested.c
4603
int cpu;
arch/x86/kvm/vmx/nested.c
4611
cpu = get_cpu();
arch/x86/kvm/vmx/nested.c
4613
vmx_vcpu_load_vmcs(vcpu, cpu);
arch/x86/kvm/vmx/nested.c
4618
vmx_vcpu_load_vmcs(vcpu, cpu);
arch/x86/kvm/vmx/posted_intr.c
109
dest = cpu_physical_id(cpu);
arch/x86/kvm/vmx/posted_intr.c
182
raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu),
arch/x86/kvm/vmx/posted_intr.c
185
&per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
arch/x86/kvm/vmx/posted_intr.c
186
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
arch/x86/kvm/vmx/posted_intr.c
255
int cpu = smp_processor_id();
arch/x86/kvm/vmx/posted_intr.c
256
struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu);
arch/x86/kvm/vmx/posted_intr.c
257
raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu);
arch/x86/kvm/vmx/posted_intr.c
269
void __init pi_init_cpu(int cpu)
arch/x86/kvm/vmx/posted_intr.c
271
INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu));
arch/x86/kvm/vmx/posted_intr.c
272
raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
arch/x86/kvm/vmx/posted_intr.c
57
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/vmx/posted_intr.c
79
if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR && vcpu->cpu == cpu) {
arch/x86/kvm/vmx/posted_intr.c
93
raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu);
arch/x86/kvm/vmx/posted_intr.h
11
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
arch/x86/kvm/vmx/posted_intr.h
14
void __init pi_init_cpu(int cpu);
arch/x86/kvm/vmx/tdx.c
2010
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
arch/x86/kvm/vmx/tdx.c
294
vcpu->cpu = -1;
arch/x86/kvm/vmx/tdx.c
3293
static int tdx_online_cpu(unsigned int cpu)
arch/x86/kvm/vmx/tdx.c
3308
static int tdx_offline_cpu(unsigned int cpu)
arch/x86/kvm/vmx/tdx.c
3327
if (i != cpu && topology_physical_package_id(i) ==
arch/x86/kvm/vmx/tdx.c
3328
topology_physical_package_id(cpu))
arch/x86/kvm/vmx/tdx.c
387
if (unlikely(vcpu->cpu != raw_smp_processor_id()))
arch/x86/kvm/vmx/tdx.c
422
int cpu = vcpu->cpu;
arch/x86/kvm/vmx/tdx.c
424
if (unlikely(cpu == -1))
arch/x86/kvm/vmx/tdx.c
427
smp_call_function_single(cpu, tdx_flush_vp, &arg, 1);
arch/x86/kvm/vmx/tdx.c
434
int cpu = raw_smp_processor_id();
arch/x86/kvm/vmx/tdx.c
435
struct list_head *tdvcpus = &per_cpu(associated_tdvcpus, cpu);
arch/x86/kvm/vmx/tdx.c
723
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/vmx/tdx.c
727
vmx_vcpu_pi_load(vcpu, cpu);
arch/x86/kvm/vmx/tdx.c
728
if (vcpu->cpu == cpu || !is_hkid_assigned(to_kvm_tdx(vcpu->kvm)))
arch/x86/kvm/vmx/tdx.c
733
KVM_BUG_ON(cpu != raw_smp_processor_id(), vcpu->kvm);
arch/x86/kvm/vmx/tdx.c
741
list_add(&tdx->cpu_list, &per_cpu(associated_tdvcpus, cpu));
arch/x86/kvm/vmx/tdx.c
877
if (vcpu->cpu != -1) {
arch/x86/kvm/vmx/vmcs.h
73
int cpu;
arch/x86/kvm/vmx/vmx.c
1355
int cpu = raw_smp_processor_id();
arch/x86/kvm/vmx/vmx.c
1396
gs_base = cpu_kernelmode_gs_base(cpu);
arch/x86/kvm/vmx/vmx.c
1526
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/vmx/vmx.c
1529
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
arch/x86/kvm/vmx/vmx.c
1545
&per_cpu(loaded_vmcss_on_cpu, cpu));
arch/x86/kvm/vmx/vmx.c
1549
prev = per_cpu(current_vmcs, cpu);
arch/x86/kvm/vmx/vmx.c
1551
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
arch/x86/kvm/vmx/vmx.c
1569
(unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
arch/x86/kvm/vmx/vmx.c
1575
(unsigned long)(cpu_entry_stack(cpu) + 1));
arch/x86/kvm/vmx/vmx.c
1578
vmx->loaded_vmcs->cpu = cpu;
arch/x86/kvm/vmx/vmx.c
1586
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/vmx/vmx.c
1591
vmx_vcpu_load_vmcs(vcpu, cpu);
arch/x86/kvm/vmx/vmx.c
1593
vmx_vcpu_pi_load(vcpu, cpu);
arch/x86/kvm/vmx/vmx.c
1607
int cpu;
arch/x86/kvm/vmx/vmx.c
1609
cpu = get_cpu();
arch/x86/kvm/vmx/vmx.c
1611
vmx_vcpu_load_vmcs(vcpu, cpu);
arch/x86/kvm/vmx/vmx.c
2923
int cpu = smp_processor_id();
arch/x86/kvm/vmx/vmx.c
2926
pr_err("VMX not supported by CPU %d\n", cpu);
arch/x86/kvm/vmx/vmx.c
2932
pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu);
arch/x86/kvm/vmx/vmx.c
2952
int cpu = raw_smp_processor_id();
arch/x86/kvm/vmx/vmx.c
2960
pr_err("Failed to setup VMCS config on CPU %d\n", cpu);
arch/x86/kvm/vmx/vmx.c
2973
pr_err("VMCS config on CPU %d doesn't match reference config:", cpu);
arch/x86/kvm/vmx/vmx.c
2979
i * (int)sizeof(u32), gold[i], cpu, mine[i], gold[i] ^ mine[i]);
arch/x86/kvm/vmx/vmx.c
3009
int cpu = raw_smp_processor_id();
arch/x86/kvm/vmx/vmx.c
3010
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
arch/x86/kvm/vmx/vmx.c
3020
if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
arch/x86/kvm/vmx/vmx.c
3036
int cpu = raw_smp_processor_id();
arch/x86/kvm/vmx/vmx.c
3039
list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
arch/x86/kvm/vmx/vmx.c
3056
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
arch/x86/kvm/vmx/vmx.c
3058
int node = cpu_to_node(cpu);
arch/x86/kvm/vmx/vmx.c
3109
loaded_vmcs->cpu = -1;
arch/x86/kvm/vmx/vmx.c
3133
int cpu;
arch/x86/kvm/vmx/vmx.c
3135
for_each_possible_cpu(cpu) {
arch/x86/kvm/vmx/vmx.c
3136
free_vmcs(per_cpu(vmxarea, cpu));
arch/x86/kvm/vmx/vmx.c
3137
per_cpu(vmxarea, cpu) = NULL;
arch/x86/kvm/vmx/vmx.c
3143
int cpu;
arch/x86/kvm/vmx/vmx.c
3145
for_each_possible_cpu(cpu) {
arch/x86/kvm/vmx/vmx.c
3148
vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
arch/x86/kvm/vmx/vmx.c
3167
per_cpu(vmxarea, cpu) = vmcs;
arch/x86/kvm/vmx/vmx.c
646
int cpu;
arch/x86/kvm/vmx/vmx.c
660
for_each_online_cpu(cpu) {
arch/x86/kvm/vmx/vmx.c
661
if (!hv_get_vp_assist_page(cpu)) {
arch/x86/kvm/vmx/vmx.c
6858
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
arch/x86/kvm/vmx/vmx.c
6867
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
arch/x86/kvm/vmx/vmx.c
7694
smp_send_reschedule(vcpu->cpu);
arch/x86/kvm/vmx/vmx.c
814
int cpu = raw_smp_processor_id();
arch/x86/kvm/vmx/vmx.c
828
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
arch/x86/kvm/vmx/vmx.c
841
int cpu = raw_smp_processor_id();
arch/x86/kvm/vmx/vmx.c
843
if (loaded_vmcs->cpu != cpu)
arch/x86/kvm/vmx/vmx.c
845
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
arch/x86/kvm/vmx/vmx.c
846
per_cpu(current_vmcs, cpu) = NULL;
arch/x86/kvm/vmx/vmx.c
863
loaded_vmcs->cpu = -1;
arch/x86/kvm/vmx/vmx.c
869
int cpu = loaded_vmcs->cpu;
arch/x86/kvm/vmx/vmx.c
871
if (cpu != -1)
arch/x86/kvm/vmx/vmx.c
872
smp_call_function_single(cpu,
arch/x86/kvm/vmx/vmx.c
8924
int r, cpu;
arch/x86/kvm/vmx/vmx.c
8953
for_each_possible_cpu(cpu) {
arch/x86/kvm/vmx/vmx.c
8954
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
arch/x86/kvm/vmx/vmx.c
8956
pi_init_cpu(cpu);
arch/x86/kvm/vmx/vmx.h
341
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
arch/x86/kvm/vmx/vmx.h
669
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
arch/x86/kvm/vmx/x86_ops.h
133
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
arch/x86/kvm/vmx/x86_ops.h
27
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
arch/x86/kvm/x86.c
10021
int cpu = smp_processor_id();
arch/x86/kvm/x86.c
10022
struct cpuinfo_x86 *c = &cpu_data(cpu);
arch/x86/kvm/x86.c
10030
WARN_ON(!cpu_online(cpu));
arch/x86/kvm/x86.c
10047
int r, cpu;
arch/x86/kvm/x86.c
10142
for_each_online_cpu(cpu) {
arch/x86/kvm/x86.c
10143
smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1);
arch/x86/kvm/x86.c
11439
vcpu->arch.last_vmentry_cpu = vcpu->cpu;
arch/x86/kvm/x86.c
12849
int idx, cpu;
arch/x86/kvm/x86.c
12856
for_each_possible_cpu(cpu)
arch/x86/kvm/x86.c
12857
cmpxchg(per_cpu_ptr(&last_vcpu, cpu), vcpu, NULL);
arch/x86/kvm/x86.c
13108
if (!stable && vcpu->cpu == smp_processor_id())
arch/x86/kvm/x86.c
5163
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
arch/x86/kvm/x86.c
5177
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
arch/x86/kvm/x86.c
5178
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
arch/x86/kvm/x86.c
5179
wbinvd_on_cpu(vcpu->cpu);
arch/x86/kvm/x86.c
5182
kvm_x86_call(vcpu_load)(vcpu, cpu);
arch/x86/kvm/x86.c
5184
if (vcpu != per_cpu(last_vcpu, cpu)) {
arch/x86/kvm/x86.c
5194
per_cpu(last_vcpu, cpu) = vcpu;
arch/x86/kvm/x86.c
5207
if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
arch/x86/kvm/x86.c
5228
if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
arch/x86/kvm/x86.c
5230
if (vcpu->cpu != cpu)
arch/x86/kvm/x86.c
5232
vcpu->cpu = cpu;
arch/x86/kvm/x86.c
583
int cpu;
arch/x86/kvm/x86.c
585
for_each_possible_cpu(cpu)
arch/x86/kvm/x86.c
586
WARN_ON_ONCE(per_cpu(user_return_msrs, cpu).registered);
arch/x86/kvm/x86.c
8514
int cpu = get_cpu();
arch/x86/kvm/x86.c
8516
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
arch/x86/kvm/x86.c
9746
static int kvmclock_cpu_down_prep(unsigned int cpu)
arch/x86/kvm/x86.c
9772
int cpu;
arch/x86/kvm/x86.c
9783
for_each_present_cpu(cpu)
arch/x86/kvm/x86.c
9784
per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
arch/x86/kvm/x86.c
9798
static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
arch/x86/kvm/x86.c
9844
smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
arch/x86/kvm/x86.c
9849
if (vcpu->cpu != cpu)
arch/x86/kvm/x86.c
9852
if (vcpu->cpu != raw_smp_processor_id())
arch/x86/kvm/x86.c
9871
smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
arch/x86/kvm/x86.c
9879
int cpu;
arch/x86/kvm/x86.c
9886
for_each_cpu(cpu, freq->policy->cpus)
arch/x86/kvm/x86.c
9887
__kvmclock_cpufreq_notifier(freq, cpu);
arch/x86/kvm/x86.c
9896
static int kvmclock_cpu_online(unsigned int cpu)
arch/x86/kvm/x86.c
9909
int cpu;
arch/x86/kvm/x86.c
9911
cpu = get_cpu();
arch/x86/kvm/x86.c
9912
policy = cpufreq_cpu_get(cpu);
arch/x86/lib/cache-smp.c
11
void wbinvd_on_cpu(int cpu)
arch/x86/lib/cache-smp.c
13
smp_call_function_single(cpu, __wbinvd, NULL, 1);
arch/x86/lib/delay.c
66
int cpu;
arch/x86/lib/delay.c
69
cpu = smp_processor_id();
arch/x86/lib/delay.c
90
if (unlikely(cpu != smp_processor_id())) {
arch/x86/lib/delay.c
92
cpu = smp_processor_id();
arch/x86/lib/msr-smp.c
167
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
arch/x86/lib/msr-smp.c
179
err = smp_call_function_single_async(cpu, &csd);
arch/x86/lib/msr-smp.c
191
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
arch/x86/lib/msr-smp.c
201
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
arch/x86/lib/msr-smp.c
207
int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/lib/msr-smp.c
217
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
arch/x86/lib/msr-smp.c
223
int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/lib/msr-smp.c
228
err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
arch/x86/lib/msr-smp.c
253
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
arch/x86/lib/msr-smp.c
260
err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
arch/x86/lib/msr-smp.c
266
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
arch/x86/lib/msr-smp.c
273
err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
arch/x86/lib/msr-smp.c
34
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
arch/x86/lib/msr-smp.c
42
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
arch/x86/lib/msr-smp.c
50
int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/lib/msr-smp.c
58
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
arch/x86/lib/msr-smp.c
65
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
arch/x86/lib/msr-smp.c
75
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
arch/x86/lib/msr-smp.c
81
int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/lib/msr-smp.c
91
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
arch/x86/mm/cpu_entry_area.c
105
static void __init percpu_setup_debug_store(unsigned int cpu)
arch/x86/mm/cpu_entry_area.c
114
cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
arch/x86/mm/cpu_entry_area.c
117
cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
arch/x86/mm/cpu_entry_area.c
120
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
arch/x86/mm/cpu_entry_area.c
139
static void __init percpu_setup_exception_stacks(unsigned int cpu)
arch/x86/mm/cpu_entry_area.c
141
struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
arch/x86/mm/cpu_entry_area.c
142
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
arch/x86/mm/cpu_entry_area.c
147
per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
arch/x86/mm/cpu_entry_area.c
167
static void __init percpu_setup_exception_stacks(unsigned int cpu)
arch/x86/mm/cpu_entry_area.c
169
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
arch/x86/mm/cpu_entry_area.c
172
&per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
arch/x86/mm/cpu_entry_area.c
177
static void __init setup_cpu_entry_area(unsigned int cpu)
arch/x86/mm/cpu_entry_area.c
179
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
arch/x86/mm/cpu_entry_area.c
197
early_cpu_to_node(cpu));
arch/x86/mm/cpu_entry_area.c
199
cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
arch/x86/mm/cpu_entry_area.c
202
per_cpu_ptr(&entry_stack_storage, cpu), 1,
arch/x86/mm/cpu_entry_area.c
23
static __always_inline unsigned int cea_offset(unsigned int cpu)
arch/x86/mm/cpu_entry_area.c
233
cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
arch/x86/mm/cpu_entry_area.c
237
per_cpu(cpu_entry_area, cpu) = cea;
arch/x86/mm/cpu_entry_area.c
240
percpu_setup_exception_stacks(cpu);
arch/x86/mm/cpu_entry_area.c
242
percpu_setup_debug_store(cpu);
arch/x86/mm/cpu_entry_area.c
25
return per_cpu(_cea_offset, cpu);
arch/x86/mm/cpu_entry_area.c
265
unsigned int cpu;
arch/x86/mm/cpu_entry_area.c
271
for_each_possible_cpu(cpu)
arch/x86/mm/cpu_entry_area.c
272
setup_cpu_entry_area(cpu);
arch/x86/mm/cpu_entry_area.c
62
static __always_inline unsigned int cea_offset(unsigned int cpu)
arch/x86/mm/cpu_entry_area.c
64
return cpu;
arch/x86/mm/cpu_entry_area.c
70
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
arch/x86/mm/cpu_entry_area.c
72
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
arch/x86/mm/fault.c
749
int cpu = raw_smp_processor_id();
arch/x86/mm/fault.c
767
printk(KERN_CONT " likely on CPU %d (core %d, socket %d)", cpu,
arch/x86/mm/fault.c
768
topology_core_id(cpu), topology_physical_package_id(cpu));
arch/x86/mm/mmio-mod.c
370
int cpu;
arch/x86/mm/mmio-mod.c
386
for_each_cpu(cpu, downed_cpus) {
arch/x86/mm/mmio-mod.c
387
err = remove_cpu(cpu);
arch/x86/mm/mmio-mod.c
389
pr_info("CPU%d is down.\n", cpu);
arch/x86/mm/mmio-mod.c
391
pr_err("Error taking CPU%d down: %d\n", cpu, err);
arch/x86/mm/mmio-mod.c
400
int cpu;
arch/x86/mm/mmio-mod.c
406
for_each_cpu(cpu, downed_cpus) {
arch/x86/mm/mmio-mod.c
407
err = add_cpu(cpu);
arch/x86/mm/mmio-mod.c
409
pr_info("enabled CPU%d.\n", cpu);
arch/x86/mm/mmio-mod.c
411
pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
arch/x86/mm/numa.c
100
numa_set_node(cpu, NUMA_NO_NODE);
arch/x86/mm/numa.c
298
int cpu;
arch/x86/mm/numa.c
303
for_each_possible_cpu(cpu) {
arch/x86/mm/numa.c
304
int node = numa_cpu_node(cpu);
arch/x86/mm/numa.c
321
numa_set_node(cpu, node);
arch/x86/mm/numa.c
328
void numa_add_cpu(unsigned int cpu)
arch/x86/mm/numa.c
330
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
arch/x86/mm/numa.c
333
void numa_remove_cpu(unsigned int cpu)
arch/x86/mm/numa.c
335
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
arch/x86/mm/numa.c
341
int __cpu_to_node(int cpu)
arch/x86/mm/numa.c
345
"cpu_to_node(%d): usage too early!\n", cpu);
arch/x86/mm/numa.c
347
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
arch/x86/mm/numa.c
349
return per_cpu(x86_cpu_to_node_map, cpu);
arch/x86/mm/numa.c
357
int early_cpu_to_node(int cpu)
arch/x86/mm/numa.c
360
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
arch/x86/mm/numa.c
362
if (!cpu_possible(cpu)) {
arch/x86/mm/numa.c
364
"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
arch/x86/mm/numa.c
368
return per_cpu(x86_cpu_to_node_map, cpu);
arch/x86/mm/numa.c
371
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
arch/x86/mm/numa.c
387
cpumask_set_cpu(cpu, mask);
arch/x86/mm/numa.c
389
cpumask_clear_cpu(cpu, mask);
arch/x86/mm/numa.c
393
cpu, node, cpumask_pr_args(mask));
arch/x86/mm/numa.c
398
static void numa_set_cpumask(int cpu, bool enable)
arch/x86/mm/numa.c
400
debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
arch/x86/mm/numa.c
403
void numa_add_cpu(unsigned int cpu)
arch/x86/mm/numa.c
405
numa_set_cpumask(cpu, true);
arch/x86/mm/numa.c
408
void numa_remove_cpu(unsigned int cpu)
arch/x86/mm/numa.c
410
numa_set_cpumask(cpu, false);
arch/x86/mm/numa.c
53
int numa_cpu_node(int cpu)
arch/x86/mm/numa.c
55
u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/mm/numa.c
76
void numa_set_node(int cpu, int node)
arch/x86/mm/numa.c
82
cpu_to_node_map[cpu] = node;
arch/x86/mm/numa.c
87
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
arch/x86/mm/numa.c
88
printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
arch/x86/mm/numa.c
93
per_cpu(x86_cpu_to_node_map, cpu) = node;
arch/x86/mm/numa.c
95
set_cpu_numa_node(cpu, node);
arch/x86/mm/numa.c
98
void numa_clear_node(int cpu)
arch/x86/mm/pti.c
449
unsigned int cpu;
arch/x86/mm/pti.c
453
for_each_possible_cpu(cpu) {
arch/x86/mm/pti.c
463
unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
arch/x86/mm/tlb.c
1297
static bool should_flush_tlb(int cpu, void *data)
arch/x86/mm/tlb.c
1299
struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
arch/x86/mm/tlb.c
1310
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
arch/x86/mm/tlb.c
1454
int cpu = get_cpu();
arch/x86/mm/tlb.c
1470
} else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
arch/x86/mm/tlb.c
1733
int cpu = get_cpu();
arch/x86/mm/tlb.c
1745
} else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
arch/x86/mm/tlb.c
1747
} else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
arch/x86/mm/tlb.c
351
int cpu;
arch/x86/mm/tlb.c
358
for_each_cpu(cpu, mm_cpumask(mm)) {
arch/x86/mm/tlb.c
360
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
arch/x86/mm/tlb.c
363
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
arch/x86/mm/tlb.c
467
int cpu;
arch/x86/mm/tlb.c
472
for_each_cpu(cpu, mm_cpumask(mm)) {
arch/x86/mm/tlb.c
478
while (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) == LOADED_MM_SWITCHING)
arch/x86/mm/tlb.c
481
if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
arch/x86/mm/tlb.c
492
if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid, cpu)) != bc_asid) {
arch/x86/mm/tlb.c
789
unsigned cpu = smp_processor_id();
arch/x86/mm/tlb.c
859
!cpumask_test_cpu(cpu, mm_cpumask(next))))
arch/x86/mm/tlb.c
860
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/x86/mm/tlb.c
935
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
arch/x86/mm/tlb.c
936
cpumask_set_cpu(cpu, mm_cpumask(next));
arch/x86/net/bpf_jit_comp.c
3677
int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
arch/x86/net/bpf_jit_comp.c
3680
for_each_possible_cpu(cpu) {
arch/x86/net/bpf_jit_comp.c
3681
stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
arch/x86/net/bpf_jit_comp.c
3690
int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
arch/x86/net/bpf_jit_comp.c
3693
for_each_possible_cpu(cpu) {
arch/x86/net/bpf_jit_comp.c
3694
stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
arch/x86/pci/amd_bus.c
340
static int amd_bus_cpu_online(unsigned int cpu)
arch/x86/pci/mmconfig_32.c
42
int cpu = smp_processor_id();
arch/x86/pci/mmconfig_32.c
44
cpu != mmcfg_last_accessed_cpu) {
arch/x86/pci/mmconfig_32.c
46
mmcfg_last_accessed_cpu = cpu;
arch/x86/platform/uv/uv_irq.c
181
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
arch/x86/platform/uv/uv_irq.c
190
init_irq_alloc_info(&info, cpumask_of(cpu));
arch/x86/platform/uv/uv_nmi.c
1055
int cpu;
arch/x86/platform/uv/uv_nmi.c
1061
for_each_present_cpu(cpu) {
arch/x86/platform/uv/uv_nmi.c
1062
int nid = cpu_to_node(cpu);
arch/x86/platform/uv/uv_nmi.c
1072
uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
arch/x86/platform/uv/uv_nmi.c
499
static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
arch/x86/platform/uv/uv_nmi.c
504
atomic_set(&hub_nmi->cpu_owner, cpu);
arch/x86/platform/uv/uv_nmi.c
506
atomic_set(&uv_nmi_cpu, cpu);
arch/x86/platform/uv/uv_nmi.c
516
int cpu = smp_processor_id();
arch/x86/platform/uv/uv_nmi.c
533
uv_set_in_nmi(cpu, hub_nmi);
arch/x86/platform/uv/uv_nmi.c
564
uv_set_in_nmi(cpu, hub_nmi);
arch/x86/platform/uv/uv_nmi.c
580
static inline void uv_clear_nmi(int cpu)
arch/x86/platform/uv/uv_nmi.c
584
if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
arch/x86/platform/uv/uv_nmi.c
598
int cpu;
arch/x86/platform/uv/uv_nmi.c
600
for_each_cpu(cpu, uv_nmi_cpu_mask)
arch/x86/platform/uv/uv_nmi.c
601
uv_cpu_nmi_per(cpu).pinging = 1;
arch/x86/platform/uv/uv_nmi.c
609
int cpu;
arch/x86/platform/uv/uv_nmi.c
611
for_each_cpu(cpu, uv_nmi_cpu_mask) {
arch/x86/platform/uv/uv_nmi.c
612
uv_cpu_nmi_per(cpu).pinging = 0;
arch/x86/platform/uv/uv_nmi.c
613
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
arch/x86/platform/uv/uv_nmi.c
614
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
arch/x86/platform/uv/uv_nmi.c
623
int cpu = smp_processor_id();
arch/x86/platform/uv/uv_nmi.c
634
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
arch/x86/platform/uv/uv_nmi.c
713
static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
arch/x86/platform/uv/uv_nmi.c
716
cpu, current->pid, current->comm, (void *)regs->ip);
arch/x86/platform/uv/uv_nmi.c
726
static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
arch/x86/platform/uv/uv_nmi.c
730
if (cpu == 0)
arch/x86/platform/uv/uv_nmi.c
734
uv_nmi_dump_cpu_ip(cpu, regs);
arch/x86/platform/uv/uv_nmi.c
737
pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
arch/x86/platform/uv/uv_nmi.c
745
static void uv_nmi_trigger_dump(int cpu)
arch/x86/platform/uv/uv_nmi.c
749
if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
arch/x86/platform/uv/uv_nmi.c
752
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
arch/x86/platform/uv/uv_nmi.c
756
if (uv_cpu_nmi_per(cpu).state
arch/x86/platform/uv/uv_nmi.c
761
pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
arch/x86/platform/uv/uv_nmi.c
762
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
arch/x86/platform/uv/uv_nmi.c
780
static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
arch/x86/platform/uv/uv_nmi.c
796
static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
arch/x86/platform/uv/uv_nmi.c
805
atomic_read(&uv_nmi_cpus_in_nmi), cpu);
arch/x86/platform/uv/uv_nmi.c
812
else if (tcpu == cpu)
arch/x86/platform/uv/uv_nmi.c
827
uv_nmi_dump_state_cpu(cpu, regs);
arch/x86/platform/uv/uv_nmi.c
840
static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
arch/x86/platform/uv/uv_nmi.c
851
pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
arch/x86/platform/uv/uv_nmi.c
895
static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
arch/x86/platform/uv/uv_nmi.c
905
ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
arch/x86/platform/uv/uv_nmi.c
922
kgdb_nmicallback(cpu, regs);
arch/x86/platform/uv/uv_nmi.c
928
static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
arch/x86/platform/uv/uv_nmi.c
940
int cpu = smp_processor_id();
arch/x86/platform/uv/uv_nmi.c
953
master = (atomic_read(&uv_nmi_cpu) == cpu);
arch/x86/platform/uv/uv_nmi.c
957
uv_nmi_kdump(cpu, master, regs);
arch/x86/platform/uv/uv_nmi.c
970
uv_nmi_action_health(cpu, regs, master);
arch/x86/platform/uv/uv_nmi.c
974
uv_nmi_dump_state(cpu, regs, master);
arch/x86/platform/uv/uv_nmi.c
978
uv_call_kgdb_kdb(cpu, regs, master);
arch/x86/platform/uv/uv_nmi.c
991
uv_clear_nmi(cpu);
arch/x86/platform/uv/uv_time.c
137
int cpu;
arch/x86/platform/uv/uv_time.c
143
for_each_present_cpu(cpu) {
arch/x86/platform/uv/uv_time.c
144
int nid = cpu_to_node(cpu);
arch/x86/platform/uv/uv_time.c
145
int bid = uv_cpu_to_blade_id(cpu);
arch/x86/platform/uv/uv_time.c
146
int bcpu = uv_cpu_blade_processor_id(cpu);
arch/x86/platform/uv/uv_time.c
150
head = kmalloc_node(struct_size(head, cpu,
arch/x86/platform/uv/uv_time.c
163
head->cpu[bcpu].lcpu = cpu;
arch/x86/platform/uv/uv_time.c
164
head->cpu[bcpu].expires = ULLONG_MAX;
arch/x86/platform/uv/uv_time.c
178
u64 exp = head->cpu[c].expires;
arch/x86/platform/uv/uv_time.c
186
c = head->cpu[bcpu].lcpu;
arch/x86/platform/uv/uv_time.c
201
static int uv_rtc_set_timer(int cpu, u64 expires)
arch/x86/platform/uv/uv_time.c
203
int pnode = uv_cpu_to_pnode(cpu);
arch/x86/platform/uv/uv_time.c
204
int bid = uv_cpu_to_blade_id(cpu);
arch/x86/platform/uv/uv_time.c
206
int bcpu = uv_cpu_blade_processor_id(cpu);
arch/x86/platform/uv/uv_time.c
207
u64 *t = &head->cpu[bcpu].expires;
arch/x86/platform/uv/uv_time.c
218
expires < head->cpu[next_cpu].expires) {
arch/x86/platform/uv/uv_time.c
220
if (uv_setup_intr(cpu, expires)) {
arch/x86/platform/uv/uv_time.c
237
static int uv_rtc_unset_timer(int cpu, int force)
arch/x86/platform/uv/uv_time.c
239
int pnode = uv_cpu_to_pnode(cpu);
arch/x86/platform/uv/uv_time.c
240
int bid = uv_cpu_to_blade_id(cpu);
arch/x86/platform/uv/uv_time.c
242
int bcpu = uv_cpu_blade_processor_id(cpu);
arch/x86/platform/uv/uv_time.c
243
u64 *t = &head->cpu[bcpu].expires;
arch/x86/platform/uv/uv_time.c
312
int cpu = smp_processor_id();
arch/x86/platform/uv/uv_time.c
313
struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
arch/x86/platform/uv/uv_time.c
318
if (uv_rtc_unset_timer(cpu, 0) != 1)
arch/x86/platform/uv/uv_time.c
56
} cpu[] __counted_by(ncpus);
arch/x86/platform/uv/uv_time.c
71
static void uv_rtc_send_IPI(int cpu)
arch/x86/platform/uv/uv_time.c
76
apicid = cpu_physical_id(cpu);
arch/x86/platform/uv/uv_time.c
93
static int uv_setup_intr(int cpu, u64 expires)
arch/x86/platform/uv/uv_time.c
96
unsigned long apicid = cpu_physical_id(cpu);
arch/x86/platform/uv/uv_time.c
97
int pnode = uv_cpu_to_pnode(cpu);
arch/x86/power/cpu.c
154
int cpu = smp_processor_id();
arch/x86/power/cpu.c
156
struct desc_struct *desc = get_cpu_gdt_rw(cpu);
arch/x86/power/cpu.c
167
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
arch/x86/power/cpu.c
186
load_fixmap_gdt(cpu);
arch/x86/virt/svm/sev.c
133
static int __mfd_enable(unsigned int cpu)
arch/x86/virt/svm/sev.c
154
static int __snp_enable(unsigned int cpu)
arch/x86/virt/vmx/tdx/tdx.c
1006
for_each_online_cpu(cpu) {
arch/x86/virt/vmx/tdx/tdx.c
1012
if (cpumask_test_and_set_cpu(topology_physical_package_id(cpu),
arch/x86/virt/vmx/tdx/tdx.c
1020
ret = smp_call_on_cpu(cpu, do_global_key_config, NULL, true);
arch/x86/virt/vmx/tdx/tdx.c
992
int cpu, ret = -EINVAL;
arch/x86/xen/apic.c
105
static u32 xen_cpu_present_to_apicid(int cpu)
arch/x86/xen/apic.c
107
if (cpu_present(cpu))
arch/x86/xen/apic.c
108
return cpu_data(cpu).topo.apicid;
arch/x86/xen/apic.c
45
int ret, cpu;
arch/x86/xen/apic.c
52
cpu = smp_processor_id();
arch/x86/xen/apic.c
54
return cpu ? cpuid_to_apicid[cpu] << 24 : 0;
arch/x86/xen/apic.c
56
op.u.pcpu_info.xen_cpuid = cpu;
arch/x86/xen/enlighten.c
132
static int xen_cpu_up_online(unsigned int cpu)
arch/x86/xen/enlighten.c
134
xen_init_lock_cpu(cpu);
arch/x86/xen/enlighten.c
157
static void xen_vcpu_setup_restore(int cpu)
arch/x86/xen/enlighten.c
160
xen_vcpu_info_reset(cpu);
arch/x86/xen/enlighten.c
167
(xen_hvm_domain() && cpu_online(cpu)))
arch/x86/xen/enlighten.c
168
xen_vcpu_setup(cpu);
arch/x86/xen/enlighten.c
178
int cpu;
arch/x86/xen/enlighten.c
180
for_each_possible_cpu(cpu) {
arch/x86/xen/enlighten.c
181
bool other_cpu = (cpu != smp_processor_id());
arch/x86/xen/enlighten.c
184
if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID)
arch/x86/xen/enlighten.c
189
xen_vcpu_nr(cpu), NULL) > 0;
arch/x86/xen/enlighten.c
192
HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
arch/x86/xen/enlighten.c
196
xen_setup_runstate_info(cpu);
arch/x86/xen/enlighten.c
198
xen_vcpu_setup_restore(cpu);
arch/x86/xen/enlighten.c
201
HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
arch/x86/xen/enlighten.c
206
void xen_vcpu_info_reset(int cpu)
arch/x86/xen/enlighten.c
208
if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) {
arch/x86/xen/enlighten.c
209
per_cpu(xen_vcpu, cpu) =
arch/x86/xen/enlighten.c
210
&HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
arch/x86/xen/enlighten.c
213
per_cpu(xen_vcpu, cpu) = NULL;
arch/x86/xen/enlighten.c
217
void xen_vcpu_setup(int cpu)
arch/x86/xen/enlighten.c
238
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
arch/x86/xen/enlighten.c
242
vcpup = &per_cpu(xen_vcpu_info, cpu);
arch/x86/xen/enlighten.c
253
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
arch/x86/xen/enlighten.c
256
panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err);
arch/x86/xen/enlighten.c
258
per_cpu(xen_vcpu, cpu) = vcpup;
arch/x86/xen/enlighten.c
303
int cpu;
arch/x86/xen/enlighten.c
305
for_each_online_cpu(cpu)
arch/x86/xen/enlighten.c
306
xen_pmu_finish(cpu);
arch/x86/xen/enlighten.c
358
void xen_pin_vcpu(int cpu)
arch/x86/xen/enlighten.c
367
pin_override.pcpu = cpu;
arch/x86/xen/enlighten.c
371
if (cpu < 0)
arch/x86/xen/enlighten.c
377
cpu);
arch/x86/xen/enlighten.c
387
cpu);
arch/x86/xen/enlighten_hvm.c
152
static int xen_cpu_up_prepare_hvm(unsigned int cpu)
arch/x86/xen/enlighten_hvm.c
162
xen_uninit_lock_cpu(cpu);
arch/x86/xen/enlighten_hvm.c
164
if (cpu_acpi_id(cpu) != CPU_ACPIID_INVALID)
arch/x86/xen/enlighten_hvm.c
165
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
arch/x86/xen/enlighten_hvm.c
167
per_cpu(xen_vcpu_id, cpu) = cpu;
arch/x86/xen/enlighten_hvm.c
168
xen_vcpu_setup(cpu);
arch/x86/xen/enlighten_hvm.c
173
rc = xen_set_upcall_vector(cpu);
arch/x86/xen/enlighten_hvm.c
176
" for CPU %d failed: %d\n", cpu, rc);
arch/x86/xen/enlighten_hvm.c
182
xen_setup_timer(cpu);
arch/x86/xen/enlighten_hvm.c
184
rc = xen_smp_intr_init(cpu);
arch/x86/xen/enlighten_hvm.c
187
cpu, rc);
arch/x86/xen/enlighten_hvm.c
192
static int xen_cpu_dead_hvm(unsigned int cpu)
arch/x86/xen/enlighten_hvm.c
194
xen_smp_intr_free(cpu);
arch/x86/xen/enlighten_hvm.c
197
xen_teardown_timer(cpu);
arch/x86/xen/enlighten_pv.c
1195
int cpu;
arch/x86/xen/enlighten_pv.c
1197
for_each_possible_cpu(cpu) {
arch/x86/xen/enlighten_pv.c
1199
per_cpu(xen_vcpu_id, cpu) = cpu;
arch/x86/xen/enlighten_pv.c
1200
xen_vcpu_setup(cpu);
arch/x86/xen/enlighten_pv.c
1313
static void __init xen_setup_gdt(int cpu)
arch/x86/xen/enlighten_pv.c
1315
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
arch/x86/xen/enlighten_pv.c
1316
pv_ops.cpu.load_gdt = xen_load_gdt_boot;
arch/x86/xen/enlighten_pv.c
1318
switch_gdt_and_percpu_base(cpu);
arch/x86/xen/enlighten_pv.c
1320
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
arch/x86/xen/enlighten_pv.c
1321
pv_ops.cpu.load_gdt = xen_load_gdt;
arch/x86/xen/enlighten_pv.c
1366
pv_ops.cpu.cpuid = xen_cpuid;
arch/x86/xen/enlighten_pv.c
1367
pv_ops.cpu.set_debugreg = xen_set_debugreg;
arch/x86/xen/enlighten_pv.c
1368
pv_ops.cpu.get_debugreg = xen_get_debugreg;
arch/x86/xen/enlighten_pv.c
1369
pv_ops.cpu.read_cr0 = xen_read_cr0;
arch/x86/xen/enlighten_pv.c
1370
pv_ops.cpu.write_cr0 = xen_write_cr0;
arch/x86/xen/enlighten_pv.c
1371
pv_ops.cpu.write_cr4 = xen_write_cr4;
arch/x86/xen/enlighten_pv.c
1372
pv_ops.cpu.read_msr = xen_read_msr;
arch/x86/xen/enlighten_pv.c
1373
pv_ops.cpu.write_msr = xen_write_msr;
arch/x86/xen/enlighten_pv.c
1374
pv_ops.cpu.read_msr_safe = xen_read_msr_safe;
arch/x86/xen/enlighten_pv.c
1375
pv_ops.cpu.write_msr_safe = xen_write_msr_safe;
arch/x86/xen/enlighten_pv.c
1376
pv_ops.cpu.read_pmc = xen_read_pmc;
arch/x86/xen/enlighten_pv.c
1377
pv_ops.cpu.load_tr_desc = paravirt_nop;
arch/x86/xen/enlighten_pv.c
1378
pv_ops.cpu.set_ldt = xen_set_ldt;
arch/x86/xen/enlighten_pv.c
1379
pv_ops.cpu.load_gdt = xen_load_gdt;
arch/x86/xen/enlighten_pv.c
1380
pv_ops.cpu.load_idt = xen_load_idt;
arch/x86/xen/enlighten_pv.c
1381
pv_ops.cpu.load_tls = xen_load_tls;
arch/x86/xen/enlighten_pv.c
1382
pv_ops.cpu.load_gs_index = xen_load_gs_index;
arch/x86/xen/enlighten_pv.c
1383
pv_ops.cpu.alloc_ldt = xen_alloc_ldt;
arch/x86/xen/enlighten_pv.c
1384
pv_ops.cpu.free_ldt = xen_free_ldt;
arch/x86/xen/enlighten_pv.c
1385
pv_ops.cpu.store_tr = xen_store_tr;
arch/x86/xen/enlighten_pv.c
1386
pv_ops.cpu.write_ldt_entry = xen_write_ldt_entry;
arch/x86/xen/enlighten_pv.c
1387
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
arch/x86/xen/enlighten_pv.c
1388
pv_ops.cpu.write_idt_entry = xen_write_idt_entry;
arch/x86/xen/enlighten_pv.c
1389
pv_ops.cpu.load_sp0 = xen_load_sp0;
arch/x86/xen/enlighten_pv.c
1391
pv_ops.cpu.invalidate_io_bitmap = xen_invalidate_io_bitmap;
arch/x86/xen/enlighten_pv.c
1392
pv_ops.cpu.update_io_bitmap = xen_update_io_bitmap;
arch/x86/xen/enlighten_pv.c
1394
pv_ops.cpu.io_delay = xen_io_delay;
arch/x86/xen/enlighten_pv.c
1395
pv_ops.cpu.start_context_switch = xen_start_context_switch;
arch/x86/xen/enlighten_pv.c
1396
pv_ops.cpu.end_context_switch = xen_end_context_switch;
arch/x86/xen/enlighten_pv.c
1573
static int xen_cpu_up_prepare_pv(unsigned int cpu)
arch/x86/xen/enlighten_pv.c
1577
if (per_cpu(xen_vcpu, cpu) == NULL)
arch/x86/xen/enlighten_pv.c
1580
xen_setup_timer(cpu);
arch/x86/xen/enlighten_pv.c
1582
rc = xen_smp_intr_init(cpu);
arch/x86/xen/enlighten_pv.c
1585
cpu, rc);
arch/x86/xen/enlighten_pv.c
1589
rc = xen_smp_intr_init_pv(cpu);
arch/x86/xen/enlighten_pv.c
1592
cpu, rc);
arch/x86/xen/enlighten_pv.c
1599
static int xen_cpu_dead_pv(unsigned int cpu)
arch/x86/xen/enlighten_pv.c
1601
xen_smp_intr_free(cpu);
arch/x86/xen/enlighten_pv.c
1602
xen_smp_intr_free_pv(cpu);
arch/x86/xen/enlighten_pv.c
1604
xen_teardown_timer(cpu);
arch/x86/xen/enlighten_pv.c
617
unsigned int cpu, unsigned int i)
arch/x86/xen/enlighten_pv.c
619
struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
arch/x86/xen/enlighten_pv.c
629
gdt = get_cpu_gdt_rw(cpu);
arch/x86/xen/enlighten_pv.c
636
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
arch/x86/xen/enlighten_pv.c
648
load_TLS_descriptor(t, cpu, 0);
arch/x86/xen/enlighten_pv.c
649
load_TLS_descriptor(t, cpu, 1);
arch/x86/xen/enlighten_pv.c
650
load_TLS_descriptor(t, cpu, 2);
arch/x86/xen/enlighten_pv.c
96
static int xen_cpu_up_prepare_pv(unsigned int cpu);
arch/x86/xen/enlighten_pv.c
97
static int xen_cpu_dead_pv(unsigned int cpu);
arch/x86/xen/mmu_pv.c
1011
for_each_online_cpu(cpu) {
arch/x86/xen/mmu_pv.c
1012
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
arch/x86/xen/mmu_pv.c
1013
cpumask_set_cpu(cpu, mask);
arch/x86/xen/mmu_pv.c
989
unsigned cpu;
arch/x86/xen/mmu_pv.c
995
for_each_online_cpu(cpu) {
arch/x86/xen/mmu_pv.c
996
if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
arch/x86/xen/mmu_pv.c
998
smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
arch/x86/xen/pmu.c
490
void xen_pmu_init(int cpu)
arch/x86/xen/pmu.c
499
if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu))
arch/x86/xen/pmu.c
510
xp.vcpu = cpu;
arch/x86/xen/pmu.c
517
per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
arch/x86/xen/pmu.c
518
per_cpu(xenpmu_shared, cpu).flags = 0;
arch/x86/xen/pmu.c
533
cpu, err);
arch/x86/xen/pmu.c
537
void xen_pmu_finish(int cpu)
arch/x86/xen/pmu.c
544
xp.vcpu = cpu;
arch/x86/xen/pmu.c
550
free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
arch/x86/xen/pmu.c
551
per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
arch/x86/xen/smp.c
100
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
arch/x86/xen/smp.c
106
per_cpu(xen_debug_irq, cpu).irq = rc;
arch/x86/xen/smp.c
109
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
arch/x86/xen/smp.c
113
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
arch/x86/xen/smp.c
115
cpu,
arch/x86/xen/smp.c
122
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
arch/x86/xen/smp.c
129
xen_smp_intr_free(cpu);
arch/x86/xen/smp.c
139
void xen_smp_send_reschedule(int cpu)
arch/x86/xen/smp.c
141
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
arch/x86/xen/smp.c
147
unsigned cpu;
arch/x86/xen/smp.c
149
for_each_cpu_and(cpu, mask, cpu_online_mask)
arch/x86/xen/smp.c
150
xen_send_IPI_one(cpu, vector);
arch/x86/xen/smp.c
155
int cpu;
arch/x86/xen/smp.c
160
for_each_cpu(cpu, mask) {
arch/x86/xen/smp.c
161
if (xen_vcpu_stolen(cpu)) {
arch/x86/xen/smp.c
168
void xen_smp_send_call_function_single_ipi(int cpu)
arch/x86/xen/smp.c
170
__xen_send_IPI_mask(cpumask_of(cpu),
arch/x86/xen/smp.c
234
unsigned cpu;
arch/x86/xen/smp.c
241
for_each_cpu_and(cpu, mask, cpu_online_mask) {
arch/x86/xen/smp.c
242
if (this_cpu == cpu)
arch/x86/xen/smp.c
245
xen_send_IPI_one(cpu, xen_vector);
arch/x86/xen/smp.c
32
void xen_smp_intr_free(unsigned int cpu)
arch/x86/xen/smp.c
34
kfree(per_cpu(xen_resched_irq, cpu).name);
arch/x86/xen/smp.c
35
per_cpu(xen_resched_irq, cpu).name = NULL;
arch/x86/xen/smp.c
36
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
37
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
arch/x86/xen/smp.c
38
per_cpu(xen_resched_irq, cpu).irq = -1;
arch/x86/xen/smp.c
40
kfree(per_cpu(xen_callfunc_irq, cpu).name);
arch/x86/xen/smp.c
41
per_cpu(xen_callfunc_irq, cpu).name = NULL;
arch/x86/xen/smp.c
42
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
43
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
arch/x86/xen/smp.c
44
per_cpu(xen_callfunc_irq, cpu).irq = -1;
arch/x86/xen/smp.c
46
kfree(per_cpu(xen_debug_irq, cpu).name);
arch/x86/xen/smp.c
47
per_cpu(xen_debug_irq, cpu).name = NULL;
arch/x86/xen/smp.c
48
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
49
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
arch/x86/xen/smp.c
50
per_cpu(xen_debug_irq, cpu).irq = -1;
arch/x86/xen/smp.c
52
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
arch/x86/xen/smp.c
53
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
arch/x86/xen/smp.c
54
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
55
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
arch/x86/xen/smp.c
57
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
arch/x86/xen/smp.c
61
int xen_smp_intr_init(unsigned int cpu)
arch/x86/xen/smp.c
66
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
arch/x86/xen/smp.c
69
per_cpu(xen_resched_irq, cpu).name = resched_name;
arch/x86/xen/smp.c
71
cpu,
arch/x86/xen/smp.c
78
per_cpu(xen_resched_irq, cpu).irq = rc;
arch/x86/xen/smp.c
80
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
arch/x86/xen/smp.c
83
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
arch/x86/xen/smp.c
85
cpu,
arch/x86/xen/smp.c
92
per_cpu(xen_callfunc_irq, cpu).irq = rc;
arch/x86/xen/smp.c
95
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
arch/x86/xen/smp.c
99
per_cpu(xen_debug_irq, cpu).name = debug_name;
arch/x86/xen/smp_hvm.c
37
int cpu;
arch/x86/xen/smp_hvm.c
46
for_each_possible_cpu(cpu) {
arch/x86/xen/smp_hvm.c
47
if (cpu == 0)
arch/x86/xen/smp_hvm.c
51
per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
arch/x86/xen/smp_hvm.c
56
static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
arch/x86/xen/smp_hvm.c
59
xen_smp_intr_free(cpu);
arch/x86/xen/smp_hvm.c
60
xen_uninit_lock_cpu(cpu);
arch/x86/xen/smp_hvm.c
61
xen_teardown_timer(cpu);
arch/x86/xen/smp_hvm.c
65
static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
arch/x86/xen/smp_pv.c
100
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
arch/x86/xen/smp_pv.c
101
per_cpu(xen_irq_work, cpu).irq = -1;
arch/x86/xen/smp_pv.c
104
kfree(per_cpu(xen_pmu_irq, cpu).name);
arch/x86/xen/smp_pv.c
105
per_cpu(xen_pmu_irq, cpu).name = NULL;
arch/x86/xen/smp_pv.c
106
if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
arch/x86/xen/smp_pv.c
107
unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
arch/x86/xen/smp_pv.c
108
per_cpu(xen_pmu_irq, cpu).irq = -1;
arch/x86/xen/smp_pv.c
112
int xen_smp_intr_init_pv(unsigned int cpu)
arch/x86/xen/smp_pv.c
117
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
arch/x86/xen/smp_pv.c
118
per_cpu(xen_irq_work, cpu).name = callfunc_name;
arch/x86/xen/smp_pv.c
120
cpu,
arch/x86/xen/smp_pv.c
127
per_cpu(xen_irq_work, cpu).irq = rc;
arch/x86/xen/smp_pv.c
130
pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
arch/x86/xen/smp_pv.c
131
per_cpu(xen_pmu_irq, cpu).name = pmu_name;
arch/x86/xen/smp_pv.c
132
rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
arch/x86/xen/smp_pv.c
138
per_cpu(xen_pmu_irq, cpu).irq = rc;
arch/x86/xen/smp_pv.c
144
xen_smp_intr_free_pv(cpu);
arch/x86/xen/smp_pv.c
185
unsigned cpu;
arch/x86/xen/smp_pv.c
214
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
arch/x86/xen/smp_pv.c
216
set_cpu_possible(cpu, false);
arch/x86/xen/smp_pv.c
219
for_each_possible_cpu(cpu)
arch/x86/xen/smp_pv.c
220
set_cpu_present(cpu, true);
arch/x86/xen/smp_pv.c
224
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
arch/x86/xen/smp_pv.c
230
if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
arch/x86/xen/smp_pv.c
235
cpumask_clear_cpu(cpu, xen_cpu_initialized_map);
arch/x86/xen/smp_pv.c
239
gdt = get_cpu_gdt_rw(cpu);
arch/x86/xen/smp_pv.c
274
ctxt->gs_base_kernel = per_cpu_offset(cpu);
arch/x86/xen/smp_pv.c
279
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
arch/x86/xen/smp_pv.c
282
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
arch/x86/xen/smp_pv.c
289
static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
arch/x86/xen/smp_pv.c
293
rc = common_cpu_up(cpu, idle);
arch/x86/xen/smp_pv.c
297
xen_setup_runstate_info(cpu);
arch/x86/xen/smp_pv.c
300
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
arch/x86/xen/smp_pv.c
302
rc = cpu_initialize_context(cpu, idle);
arch/x86/xen/smp_pv.c
306
xen_pmu_init(cpu);
arch/x86/xen/smp_pv.c
312
BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL));
arch/x86/xen/smp_pv.c
325
unsigned int cpu = smp_processor_id();
arch/x86/xen/smp_pv.c
326
if (cpu == 0)
arch/x86/xen/smp_pv.c
335
static void xen_pv_cpu_die(unsigned int cpu)
arch/x86/xen/smp_pv.c
337
while (HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu), NULL)) {
arch/x86/xen/smp_pv.c
343
static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
arch/x86/xen/smp_pv.c
345
xen_smp_intr_free(cpu);
arch/x86/xen/smp_pv.c
346
xen_uninit_lock_cpu(cpu);
arch/x86/xen/smp_pv.c
347
xen_teardown_timer(cpu);
arch/x86/xen/smp_pv.c
348
xen_pmu_finish(cpu);
arch/x86/xen/smp_pv.c
365
static void xen_pv_cpu_die(unsigned int cpu)
arch/x86/xen/smp_pv.c
370
static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
arch/x86/xen/smp_pv.c
383
int cpu = smp_processor_id();
arch/x86/xen/smp_pv.c
389
set_cpu_online(cpu, false);
arch/x86/xen/smp_pv.c
391
HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
arch/x86/xen/smp_pv.c
59
int cpu;
arch/x86/xen/smp_pv.c
71
cpu = smp_processor_id();
arch/x86/xen/smp_pv.c
72
identify_secondary_cpu(cpu);
arch/x86/xen/smp_pv.c
73
set_cpu_sibling_map(cpu);
arch/x86/xen/smp_pv.c
79
notify_cpu_starting(cpu);
arch/x86/xen/smp_pv.c
81
set_cpu_online(cpu, true);
arch/x86/xen/smp_pv.c
95
void xen_smp_intr_free_pv(unsigned int cpu)
arch/x86/xen/smp_pv.c
97
kfree(per_cpu(xen_irq_work, cpu).name);
arch/x86/xen/smp_pv.c
98
per_cpu(xen_irq_work, cpu).name = NULL;
arch/x86/xen/smp_pv.c
99
if (per_cpu(xen_irq_work, cpu).irq >= 0) {
arch/x86/xen/spinlock.c
100
per_cpu(irq_name, cpu) = NULL;
arch/x86/xen/spinlock.c
105
irq = per_cpu(lock_kicker_irq, cpu);
arch/x86/xen/spinlock.c
110
per_cpu(lock_kicker_irq, cpu) = -1;
arch/x86/xen/spinlock.c
21
static void xen_qlock_kick(int cpu)
arch/x86/xen/spinlock.c
23
int irq = per_cpu(lock_kicker_irq, cpu);
arch/x86/xen/spinlock.c
29
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
arch/x86/xen/spinlock.c
64
void xen_init_lock_cpu(int cpu)
arch/x86/xen/spinlock.c
72
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
arch/x86/xen/spinlock.c
73
cpu, per_cpu(lock_kicker_irq, cpu));
arch/x86/xen/spinlock.c
75
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
arch/x86/xen/spinlock.c
76
per_cpu(irq_name, cpu) = name;
arch/x86/xen/spinlock.c
78
cpu,
arch/x86/xen/spinlock.c
86
per_cpu(lock_kicker_irq, cpu) = irq;
arch/x86/xen/spinlock.c
89
printk("cpu %d spinlock event irq %d\n", cpu, irq);
arch/x86/xen/spinlock.c
92
void xen_uninit_lock_cpu(int cpu)
arch/x86/xen/spinlock.c
99
kfree(per_cpu(irq_name, cpu));
arch/x86/xen/suspend.c
67
int cpu;
arch/x86/xen/suspend.c
71
for_each_online_cpu(cpu)
arch/x86/xen/suspend.c
72
xen_pmu_init(cpu);
arch/x86/xen/suspend.c
77
int cpu;
arch/x86/xen/suspend.c
79
for_each_online_cpu(cpu)
arch/x86/xen/suspend.c
80
xen_pmu_finish(cpu);
arch/x86/xen/suspend_hvm.c
19
unsigned int cpu;
arch/x86/xen/suspend_hvm.c
21
for_each_online_cpu(cpu)
arch/x86/xen/suspend_hvm.c
22
BUG_ON(xen_set_upcall_vector(cpu));
arch/x86/xen/time.c
252
int cpu = smp_processor_id();
arch/x86/xen/time.c
254
if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
arch/x86/xen/time.c
256
HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
arch/x86/xen/time.c
265
int cpu = smp_processor_id();
arch/x86/xen/time.c
267
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
arch/x86/xen/time.c
277
int cpu = smp_processor_id();
arch/x86/xen/time.c
287
ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
arch/x86/xen/time.c
335
void xen_teardown_timer(int cpu)
arch/x86/xen/time.c
338
evt = &per_cpu(xen_clock_events, cpu).evt;
arch/x86/xen/time.c
346
void xen_setup_timer(int cpu)
arch/x86/xen/time.c
348
struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
arch/x86/xen/time.c
352
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
arch/x86/xen/time.c
354
xen_teardown_timer(cpu);
arch/x86/xen/time.c
356
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
arch/x86/xen/time.c
358
snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
arch/x86/xen/time.c
360
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
arch/x86/xen/time.c
368
evt->cpumask = cpumask_of(cpu);
arch/x86/xen/time.c
380
int cpu;
arch/x86/xen/time.c
385
for_each_online_cpu(cpu) {
arch/x86/xen/time.c
387
xen_vcpu_nr(cpu), NULL))
arch/x86/xen/time.c
514
int cpu = smp_processor_id();
arch/x86/xen/time.c
531
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
arch/x86/xen/time.c
555
xen_setup_runstate_info(cpu);
arch/x86/xen/time.c
556
xen_setup_timer(cpu);
arch/x86/xen/time.c
591
int cpu = smp_processor_id();
arch/x86/xen/time.c
592
xen_setup_runstate_info(cpu);
arch/x86/xen/xen-ops.h
100
void xen_init_lock_cpu(int cpu);
arch/x86/xen/xen-ops.h
101
void xen_uninit_lock_cpu(int cpu);
arch/x86/xen/xen-ops.h
106
static inline void xen_init_lock_cpu(int cpu)
arch/x86/xen/xen-ops.h
109
static inline void xen_uninit_lock_cpu(int cpu)
arch/x86/xen/xen-ops.h
153
void xen_pin_vcpu(int cpu);
arch/x86/xen/xen-ops.h
267
void xen_pmu_init(int cpu);
arch/x86/xen/xen-ops.h
268
void xen_pmu_finish(int cpu);
arch/x86/xen/xen-ops.h
270
static inline void xen_pmu_init(int cpu) {}
arch/x86/xen/xen-ops.h
271
static inline void xen_pmu_finish(int cpu) {}
arch/x86/xen/xen-ops.h
290
extern int xen_smp_intr_init(unsigned int cpu);
arch/x86/xen/xen-ops.h
291
extern void xen_smp_intr_free(unsigned int cpu);
arch/x86/xen/xen-ops.h
292
int xen_smp_intr_init_pv(unsigned int cpu);
arch/x86/xen/xen-ops.h
293
void xen_smp_intr_free_pv(unsigned int cpu);
arch/x86/xen/xen-ops.h
298
void xen_smp_send_reschedule(int cpu);
arch/x86/xen/xen-ops.h
300
void xen_smp_send_call_function_single_ipi(int cpu);
arch/x86/xen/xen-ops.h
310
static inline int xen_smp_intr_init(unsigned int cpu)
arch/x86/xen/xen-ops.h
314
static inline void xen_smp_intr_free(unsigned int cpu) {}
arch/x86/xen/xen-ops.h
316
static inline int xen_smp_intr_init_pv(unsigned int cpu)
arch/x86/xen/xen-ops.h
320
static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
arch/x86/xen/xen-ops.h
73
void xen_setup_timer(int cpu);
arch/x86/xen/xen-ops.h
74
void xen_setup_runstate_info(int cpu);
arch/x86/xen/xen-ops.h
75
void xen_teardown_timer(int cpu);
arch/x86/xen/xen-ops.h
84
void xen_vcpu_setup(int cpu);
arch/x86/xen/xen-ops.h
85
void xen_vcpu_info_reset(int cpu);
arch/xtensa/include/asm/mmu.h
18
unsigned int cpu;
arch/xtensa/include/asm/mmu_context.h
101
static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
arch/xtensa/include/asm/mmu_context.h
103
get_mmu_context(mm, cpu);
arch/xtensa/include/asm/mmu_context.h
104
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
arch/xtensa/include/asm/mmu_context.h
118
int cpu;
arch/xtensa/include/asm/mmu_context.h
119
for_each_possible_cpu(cpu) {
arch/xtensa/include/asm/mmu_context.h
120
mm->context.asid[cpu] = NO_CONTEXT;
arch/xtensa/include/asm/mmu_context.h
122
mm->context.cpu = -1;
arch/xtensa/include/asm/mmu_context.h
129
unsigned int cpu = smp_processor_id();
arch/xtensa/include/asm/mmu_context.h
130
int migrated = next->context.cpu != cpu;
arch/xtensa/include/asm/mmu_context.h
134
next->context.cpu = cpu;
arch/xtensa/include/asm/mmu_context.h
137
activate_context(next, cpu);
arch/xtensa/include/asm/mmu_context.h
35
#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
arch/xtensa/include/asm/mmu_context.h
70
static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
arch/xtensa/include/asm/mmu_context.h
72
unsigned long asid = cpu_asid_cache(cpu);
arch/xtensa/include/asm/mmu_context.h
81
cpu_asid_cache(cpu) = asid;
arch/xtensa/include/asm/mmu_context.h
82
mm->context.asid[cpu] = asid;
arch/xtensa/include/asm/mmu_context.h
83
mm->context.cpu = cpu;
arch/xtensa/include/asm/mmu_context.h
86
static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
arch/xtensa/include/asm/mmu_context.h
93
unsigned long asid = mm->context.asid[cpu];
arch/xtensa/include/asm/mmu_context.h
96
((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
arch/xtensa/include/asm/mmu_context.h
97
get_new_mmu_context(mm, cpu);
arch/xtensa/include/asm/mxregs.h
35
#define MIPICAUSE(cpu) (0x100 + (cpu))
arch/xtensa/include/asm/smp.h
14
#define raw_smp_processor_id() (current_thread_info()->cpu)
arch/xtensa/include/asm/smp.h
15
#define cpu_logical_map(cpu) (cpu)
arch/xtensa/include/asm/smp.h
24
void arch_send_call_function_single_ipi(int cpu);
arch/xtensa/include/asm/smp.h
35
void __cpu_die(unsigned int cpu);
arch/xtensa/include/asm/thread_info.h
52
__u32 cpu; /* current CPU */
arch/xtensa/include/asm/thread_info.h
89
.cpu = 0, \
arch/xtensa/include/asm/timex.h
32
void local_timer_setup(unsigned cpu);
arch/xtensa/kernel/asm-offsets.c
103
DEFINE(THREAD_CPU, offsetof(struct thread_info, cpu));
arch/xtensa/kernel/asm-offsets.c
90
OFFSET(TI_CPU, thread_info, cpu);
arch/xtensa/kernel/irq.c
169
unsigned int i, cpu = smp_processor_id();
arch/xtensa/kernel/irq.c
180
if (!cpumask_test_cpu(cpu, mask))
arch/xtensa/kernel/irq.c
187
i, cpu);
arch/xtensa/kernel/irq.c
54
unsigned cpu __maybe_unused;
arch/xtensa/kernel/irq.c
60
for_each_online_cpu(cpu)
arch/xtensa/kernel/irq.c
61
seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
arch/xtensa/kernel/perf_event.c
410
static int xtensa_pmu_setup(unsigned int cpu)
arch/xtensa/kernel/setup.c
374
static DEFINE_PER_CPU(struct cpu, cpu_data);
arch/xtensa/kernel/setup.c
381
struct cpu *cpu = &per_cpu(cpu_data, i);
arch/xtensa/kernel/setup.c
382
cpu->hotpluggable = !!i;
arch/xtensa/kernel/setup.c
383
register_cpu(cpu, i);
arch/xtensa/kernel/smp.c
108
unsigned int cpu = smp_processor_id();
arch/xtensa/kernel/smp.c
109
BUG_ON(cpu != 0);
arch/xtensa/kernel/smp.c
110
cpu_asid_cache(cpu) = ASID_USER_FIRST;
arch/xtensa/kernel/smp.c
123
unsigned int cpu = smp_processor_id();
arch/xtensa/kernel/smp.c
130
__func__, boot_secondary_processors, cpu);
arch/xtensa/kernel/smp.c
136
__func__, boot_secondary_processors, cpu);
arch/xtensa/kernel/smp.c
147
cpumask_set_cpu(cpu, mm_cpumask(mm));
arch/xtensa/kernel/smp.c
154
notify_cpu_starting(cpu);
arch/xtensa/kernel/smp.c
157
local_timer_setup(cpu);
arch/xtensa/kernel/smp.c
159
set_cpu_online(cpu, true);
arch/xtensa/kernel/smp.c
170
unsigned cpu = (unsigned)p;
arch/xtensa/kernel/smp.c
173
set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
arch/xtensa/kernel/smp.c
175
__func__, cpu, run_stall_mask, get_er(MPSCORE));
arch/xtensa/kernel/smp.c
180
unsigned cpu = (unsigned)p;
arch/xtensa/kernel/smp.c
183
set_er(run_stall_mask | (1u << cpu), MPSCORE);
arch/xtensa/kernel/smp.c
185
__func__, cpu, run_stall_mask, get_er(MPSCORE));
arch/xtensa/kernel/smp.c
193
static int boot_secondary(unsigned int cpu, struct task_struct *ts)
arch/xtensa/kernel/smp.c
200
WRITE_ONCE(cpu_start_id, cpu);
arch/xtensa/kernel/smp.c
206
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
arch/xtensa/kernel/smp.c
226
(void *)cpu, 1);
arch/xtensa/kernel/smp.c
234
int __cpu_up(unsigned int cpu, struct task_struct *idle)
arch/xtensa/kernel/smp.c
238
if (cpu_asid_cache(cpu) == 0)
arch/xtensa/kernel/smp.c
239
cpu_asid_cache(cpu) = ASID_USER_FIRST;
arch/xtensa/kernel/smp.c
245
__func__, cpu, idle, start_info.stack);
arch/xtensa/kernel/smp.c
248
ret = boot_secondary(cpu, idle);
arch/xtensa/kernel/smp.c
252
if (!cpu_online(cpu))
arch/xtensa/kernel/smp.c
257
pr_err("CPU %u failed to boot\n", cpu);
arch/xtensa/kernel/smp.c
269
unsigned int cpu = smp_processor_id();
arch/xtensa/kernel/smp.c
275
set_cpu_online(cpu, false);
arch/xtensa/kernel/smp.c
296
clear_tasks_mm_cpumask(cpu);
arch/xtensa/kernel/smp.c
301
static void platform_cpu_kill(unsigned int cpu)
arch/xtensa/kernel/smp.c
303
smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
arch/xtensa/kernel/smp.c
310
void __cpu_die(unsigned int cpu)
arch/xtensa/kernel/smp.c
318
if (READ_ONCE(cpu_start_id) == -cpu) {
arch/xtensa/kernel/smp.c
319
platform_cpu_kill(cpu);
arch/xtensa/kernel/smp.c
323
pr_err("CPU%u: unable to kill\n", cpu);
arch/xtensa/kernel/smp.c
390
void arch_send_call_function_single_ipi(int cpu)
arch/xtensa/kernel/smp.c
392
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
arch/xtensa/kernel/smp.c
395
void arch_smp_send_reschedule(int cpu)
arch/xtensa/kernel/smp.c
397
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
arch/xtensa/kernel/smp.c
409
static void ipi_cpu_stop(unsigned int cpu)
arch/xtensa/kernel/smp.c
411
set_cpu_online(cpu, false);
arch/xtensa/kernel/smp.c
417
unsigned int cpu = smp_processor_id();
arch/xtensa/kernel/smp.c
418
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
arch/xtensa/kernel/smp.c
423
msg = get_er(MIPICAUSE(cpu));
arch/xtensa/kernel/smp.c
424
set_er(msg, MIPICAUSE(cpu));
arch/xtensa/kernel/smp.c
441
ipi_cpu_stop(cpu);
arch/xtensa/kernel/smp.c
450
unsigned int cpu;
arch/xtensa/kernel/smp.c
455
for_each_online_cpu(cpu)
arch/xtensa/kernel/smp.c
457
per_cpu(ipi_data, cpu).ipi_count[i]);
arch/xtensa/kernel/time.c
127
void local_timer_setup(unsigned cpu)
arch/xtensa/kernel/time.c
129
struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
arch/xtensa/kernel/time.c
133
snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
arch/xtensa/kernel/time.c
135
clockevent->cpumask = cpumask_of(cpu);
arch/xtensa/kernel/time.c
147
struct device_node *cpu;
arch/xtensa/kernel/time.c
150
cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
arch/xtensa/kernel/time.c
151
if (cpu) {
arch/xtensa/kernel/time.c
152
clk = of_clk_get(cpu, 0);
arch/xtensa/kernel/time.c
153
of_node_put(cpu);
arch/xtensa/kernel/traps.c
440
unsigned int cpu; \
arch/xtensa/kernel/traps.c
442
for_each_possible_cpu(cpu) \
arch/xtensa/kernel/traps.c
443
per_cpu(exc_table, cpu).type[cause] = (handler);\
arch/xtensa/mm/tlb.c
100
(unsigned long)mm->context.asid[cpu], start, end);
arch/xtensa/mm/tlb.c
106
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
arch/xtensa/mm/tlb.c
129
int cpu = smp_processor_id();
arch/xtensa/mm/tlb.c
134
if (mm->context.asid[cpu] == NO_CONTEXT)
arch/xtensa/mm/tlb.c
140
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
arch/xtensa/mm/tlb.c
66
int cpu = smp_processor_id();
arch/xtensa/mm/tlb.c
71
mm->context.asid[cpu] = NO_CONTEXT;
arch/xtensa/mm/tlb.c
72
activate_context(mm, cpu);
arch/xtensa/mm/tlb.c
75
mm->context.asid[cpu] = NO_CONTEXT;
arch/xtensa/mm/tlb.c
76
mm->context.cpu = -1;
arch/xtensa/mm/tlb.c
92
int cpu = smp_processor_id();
arch/xtensa/mm/tlb.c
96
if (mm->context.asid[cpu] == NO_CONTEXT)
block/bio.c
785
static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
block/bio.c
791
struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
block/bio.c
800
int cpu;
block/bio.c
806
for_each_possible_cpu(cpu) {
block/bio.c
809
cache = per_cpu_ptr(bs->cache, cpu);
block/blk-cgroup.c
1043
static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
block/blk-cgroup.c
1045
struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
block/blk-cgroup.c
1108
plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
block/blk-cgroup.c
1119
static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
block/blk-cgroup.c
1123
__blkcg_rstat_flush(css_to_blkcg(css), cpu);
block/blk-cgroup.c
1148
int cpu;
block/blk-cgroup.c
1152
for_each_possible_cpu(cpu) {
block/blk-cgroup.c
1155
cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
block/blk-cgroup.c
167
int cpu;
block/blk-cgroup.c
178
for_each_possible_cpu(cpu)
block/blk-cgroup.c
179
__blkcg_rstat_flush(blkcg, cpu);
block/blk-cgroup.c
2191
int rwd = blk_cgroup_io_type(bio), cpu;
block/blk-cgroup.c
2202
cpu = get_cpu();
block/blk-cgroup.c
2203
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
block/blk-cgroup.c
2228
css_rstat_updated(&blkcg->css, cpu);
block/blk-cgroup.c
302
int i, cpu;
block/blk-cgroup.c
327
for_each_possible_cpu(cpu) {
block/blk-cgroup.c
328
u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
block/blk-cgroup.c
329
per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg;
block/blk-cgroup.c
37
static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu);
block/blk-cgroup.c
639
int cpu;
block/blk-cgroup.c
641
for_each_possible_cpu(cpu) {
block/blk-cgroup.c
642
struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
block/blk-cgroup.c
85
int cpu;
block/blk-cgroup.c
91
for_each_possible_cpu(cpu)
block/blk-cgroup.c
92
init_llist_head(per_cpu_ptr(blkcg->lhead, cpu));
block/blk-core.c
1122
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
block/blk-core.c
1125
return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
block/blk-iocost.c
1604
int cpu, rw;
block/blk-iocost.c
1606
for_each_online_cpu(cpu) {
block/blk-iocost.c
1607
struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
block/blk-iocost.c
1708
int cpu;
block/blk-iocost.c
1713
for_each_possible_cpu(cpu) {
block/blk-iocost.c
1715
per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
block/blk-iocost.c
2883
int i, cpu, ret;
block/blk-iocost.c
2895
for_each_possible_cpu(cpu) {
block/blk-iocost.c
2896
struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
block/blk-iolatency.c
522
int cpu;
block/blk-iolatency.c
526
for_each_online_cpu(cpu) {
block/blk-iolatency.c
528
s = per_cpu_ptr(iolat->stats, cpu);
block/blk-iolatency.c
921
int cpu;
block/blk-iolatency.c
925
for_each_online_cpu(cpu) {
block/blk-iolatency.c
927
s = per_cpu_ptr(iolat->stats, cpu);
block/blk-iolatency.c
989
int cpu;
block/blk-iolatency.c
993
for_each_possible_cpu(cpu) {
block/blk-iolatency.c
995
stat = per_cpu_ptr(iolat->stats, cpu);
block/blk-mq-cpumap.c
113
unsigned int queue, cpu;
block/blk-mq-cpumap.c
123
for_each_cpu(cpu, mask)
block/blk-mq-cpumap.c
124
qmap->mq_map[cpu] = qmap->queue_offset + queue;
block/blk-mq-cpumap.c
62
unsigned int queue, cpu, nr_masks;
block/blk-mq-cpumap.c
66
for_each_possible_cpu(cpu)
block/blk-mq-cpumap.c
67
qmap->mq_map[cpu] = qmap->queue_offset;
block/blk-mq-cpumap.c
72
for_each_cpu(cpu, &masks[queue % nr_masks])
block/blk-mq-cpumap.c
73
qmap->mq_map[cpu] = qmap->queue_offset + queue;
block/blk-mq-debugfs.c
652
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
block/blk-mq-sysfs.c
173
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
block/blk-mq-sysfs.c
196
int cpu;
block/blk-mq-sysfs.c
198
for_each_possible_cpu(cpu) {
block/blk-mq-sysfs.c
199
ctx = per_cpu_ptr(q->queue_ctx, cpu);
block/blk-mq-sysfs.c
208
int cpu;
block/blk-mq-sysfs.c
212
for_each_possible_cpu(cpu) {
block/blk-mq-sysfs.c
213
ctx = per_cpu_ptr(q->queue_ctx, cpu);
block/blk-mq-tag.c
235
sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
block/blk-mq-tag.c
237
sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
block/blk-mq.c
1261
static int blk_softirq_cpu_dead(unsigned int cpu)
block/blk-mq.c
1263
blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
block/blk-mq.c
1274
int cpu = raw_smp_processor_id();
block/blk-mq.c
1289
if (cpu == rq->mq_ctx->cpu ||
block/blk-mq.c
1291
cpus_share_cache(cpu, rq->mq_ctx->cpu) &&
block/blk-mq.c
1292
cpus_equal_capacity(cpu, rq->mq_ctx->cpu)))
block/blk-mq.c
1296
return cpu_online(rq->mq_ctx->cpu);
block/blk-mq.c
1301
unsigned int cpu;
block/blk-mq.c
1303
cpu = rq->mq_ctx->cpu;
block/blk-mq.c
1304
if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
block/blk-mq.c
1305
smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
block/blk-mq.c
1329
rq->mq_ctx->cpu == raw_smp_processor_id()) ||
block/blk-mq.c
2246
int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
block/blk-mq.c
2248
if (cpu >= nr_cpu_ids)
block/blk-mq.c
2249
cpu = cpumask_first(hctx->cpumask);
block/blk-mq.c
2250
return cpu;
block/blk-mq.c
3706
int cpu;
block/blk-mq.c
3713
for_each_online_cpu(cpu) {
block/blk-mq.c
3715
type, cpu);
block/blk-mq.c
3721
if (this_cpu != cpu)
block/blk-mq.c
3728
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
block/blk-mq.c
3734
if (!hctx->nr_ctx || blk_mq_hctx_has_online_cpu(hctx, cpu))
block/blk-mq.c
3780
static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu,
block/blk-mq.c
3784
hctx->type, cpu);
block/blk-mq.c
3789
static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
block/blk-mq.c
3794
if (blk_mq_cpu_mapped_to_hctx(cpu, hctx))
block/blk-mq.c
3804
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
block/blk-mq.c
3812
if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx))
block/blk-mq.c
3815
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
block/blk-mq.c
4098
__ctx->cpu = i;
block/blk-mq.c
4240
int cpu;
block/blk-mq.c
4275
for_each_cpu(cpu, hctx->cpumask) {
block/blk-mq.c
4276
if (cpu_is_isolated(cpu))
block/blk-mq.c
4277
cpumask_clear_cpu(cpu, hctx->cpumask);
block/blk-mq.c
4363
int cpu;
block/blk-mq.c
4373
for_each_possible_cpu(cpu) {
block/blk-mq.c
4374
struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
block/blk-mq.c
5273
return rq->mq_ctx->cpu;
block/blk-mq.c
716
unsigned int cpu;
block/blk-mq.c
749
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
block/blk-mq.c
750
if (cpu >= nr_cpu_ids)
block/blk-mq.c
752
data.ctx = __blk_mq_get_ctx(q, cpu);
block/blk-mq.h
144
unsigned int cpu)
block/blk-mq.h
146
return per_cpu_ptr(q->queue_ctx, cpu);
block/blk-mq.h
25
unsigned int cpu;
block/blk-mq.h
382
int cpu;
block/blk-mq.h
384
for_each_possible_cpu(cpu)
block/blk-mq.h
385
qmap->mq_map[cpu] = 0;
block/blk-mq.h
85
unsigned int cpu)
block/blk-mq.h
87
return queue_hctx((q), (q->tag_set->map[type].mq_map[cpu]));
block/blk-stat.c
137
int cpu;
block/blk-stat.c
139
for_each_possible_cpu(cpu) {
block/blk-stat.c
142
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
block/blk-stat.c
55
int bucket, cpu;
block/blk-stat.c
61
cpu = get_cpu();
block/blk-stat.c
70
stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
block/blk-stat.c
81
int cpu;
block/blk-stat.c
86
for_each_online_cpu(cpu) {
block/blk-stat.c
89
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
block/genhd.c
110
int cpu;
block/genhd.c
113
for_each_possible_cpu(cpu) {
block/genhd.c
114
struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu);
block/genhd.c
133
int cpu;
block/genhd.c
140
for_each_possible_cpu(cpu) {
block/genhd.c
141
read += part_stat_local_read_cpu(part, in_flight[READ], cpu);
block/genhd.c
142
write += part_stat_local_read_cpu(part, in_flight[WRITE], cpu);
block/kyber-iosched.c
274
int cpu;
block/kyber-iosched.c
278
for_each_online_cpu(cpu) {
block/kyber-iosched.c
281
cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
block/kyber-iosched.c
543
rq->mq_ctx->cpu);
crypto/acompress.c
362
int cpu;
crypto/acompress.c
364
for_each_cpu(cpu, &s->stream_want) {
crypto/acompress.c
368
ps = per_cpu_ptr(streams, cpu);
crypto/acompress.c
380
cpumask_clear_cpu(cpu, &s->stream_want);
crypto/acompress.c
450
int cpu = raw_smp_processor_id();
crypto/acompress.c
453
ps = per_cpu_ptr(streams, cpu);
crypto/acompress.c
459
cpumask_set_cpu(cpu, &s->stream_want);
crypto/cryptd.c
104
int cpu;
crypto/cryptd.c
110
for_each_possible_cpu(cpu) {
crypto/cryptd.c
111
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto/cryptd.c
122
int cpu;
crypto/cryptd.c
125
for_each_possible_cpu(cpu) {
crypto/cryptd.c
126
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto/scompress.c
102
scratch = per_cpu_ptr(&scomp_scratch, cpu);
crypto/scompress.c
105
if (scomp_alloc_scratch(scratch, cpu))
crypto/scompress.c
108
cpumask_clear_cpu(cpu, &scomp_scratch_want);
crypto/scompress.c
145
int cpu = raw_smp_processor_id();
crypto/scompress.c
148
scratch = per_cpu_ptr(&scomp_scratch, cpu);
crypto/scompress.c
154
cpumask_set_cpu(cpu, &scomp_scratch_want);
crypto/scompress.c
81
static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
crypto/scompress.c
83
int node = cpu_to_node(cpu);
crypto/scompress.c
97
int cpu;
crypto/scompress.c
99
for_each_cpu(cpu, &scomp_scratch_want) {
drivers/accel/ivpu/ivpu_mmu_context.c
51
void *cpu;
drivers/accel/ivpu/ivpu_mmu_context.c
63
cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
drivers/accel/ivpu/ivpu_mmu_context.c
64
if (!cpu)
drivers/accel/ivpu/ivpu_mmu_context.c
69
return cpu;
drivers/acpi/acpi_extlog.c
111
printk("%s""Hardware error detected on CPU%d\n", pfx_seq, cpu);
drivers/acpi/acpi_extlog.c
116
struct acpi_hest_generic_status *estatus, int cpu)
drivers/acpi/acpi_extlog.c
129
__print_extlog_rcd(pfx, estatus, cpu);
drivers/acpi/acpi_extlog.c
188
int cpu = mce->extcpu;
drivers/acpi/acpi_extlog.c
196
estatus = extlog_elog_entry_check(cpu, bank);
drivers/acpi/acpi_extlog.c
212
print_extlog_rcd(NULL, tmp, cpu);
drivers/acpi/acpi_extlog.c
65
#define ELOG_IDX(cpu, bank) \
drivers/acpi/acpi_extlog.c
66
(cpu_physical_id(cpu) * l1_percpu_entry + (bank))
drivers/acpi/acpi_extlog.c
74
static struct acpi_hest_generic_status *extlog_elog_entry_check(int cpu, int bank)
drivers/acpi/acpi_extlog.c
80
WARN_ON(cpu < 0);
drivers/acpi/acpi_extlog.c
81
idx = ELOG_IDX(cpu, bank);
drivers/acpi/acpi_extlog.c
97
struct acpi_hest_generic_status *estatus, int cpu)
drivers/acpi/acpi_pad.c
104
for_each_cpu(cpu, pad_busy_cpus)
drivers/acpi/acpi_pad.c
105
cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
drivers/acpi/acpi_pad.c
115
for_each_cpu(cpu, tmp) {
drivers/acpi/acpi_pad.c
116
if (cpu_weight[cpu] < min_weight) {
drivers/acpi/acpi_pad.c
117
min_weight = cpu_weight[cpu];
drivers/acpi/acpi_pad.c
118
preferred_cpu = cpu;
drivers/acpi/acpi_pad.c
95
int cpu;
drivers/acpi/acpi_processor.c
38
acpi_handle acpi_get_processor_handle(int cpu)
drivers/acpi/acpi_processor.c
42
pr = per_cpu(processors, cpu);
drivers/acpi/acpi_processor.c
837
int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
drivers/acpi/acpi_processor.c
933
if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
drivers/acpi/arm64/cpuidle.c
17
static int psci_acpi_cpu_init_idle(unsigned int cpu)
drivers/acpi/arm64/cpuidle.c
21
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/arm64/cpuidle.c
55
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
drivers/acpi/arm64/cpuidle.c
57
return psci_acpi_cpu_init_idle(cpu);
drivers/acpi/cppc_acpi.c
1002
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
drivers/acpi/cppc_acpi.c
1006
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1044
return cpc_read_ffh(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1079
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
drivers/acpi/cppc_acpi.c
1085
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1117
return cpc_write_ffh(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1123
cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1125
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
drivers/acpi/cppc_acpi.c
1181
static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val)
drivers/acpi/cppc_acpi.c
1183
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1197
ret = cpc_read(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1206
static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val)
drivers/acpi/cppc_acpi.c
1208
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1215
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
drivers/acpi/cppc_acpi.c
1229
return cppc_get_reg_val_in_pcc(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1231
return cpc_read(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1234
static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val)
drivers/acpi/cppc_acpi.c
1236
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1245
ret = cpc_write(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1259
static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val)
drivers/acpi/cppc_acpi.c
1261
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1265
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
drivers/acpi/cppc_acpi.c
1278
return cppc_set_reg_val_in_pcc(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1280
return cpc_write(cpu, reg, val);
drivers/acpi/cppc_acpi.c
1431
bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)
drivers/acpi/cppc_acpi.c
1433
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1462
int cpu;
drivers/acpi/cppc_acpi.c
1464
for_each_online_cpu(cpu) {
drivers/acpi/cppc_acpi.c
1465
if (cppc_perf_ctrs_in_pcc_cpu(cpu))
drivers/acpi/cppc_acpi.c
1557
int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
drivers/acpi/cppc_acpi.c
1559
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1562
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1567
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
drivers/acpi/cppc_acpi.c
1576
pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
drivers/acpi/cppc_acpi.c
1581
ret = cpc_write(cpu, auto_sel_reg, enable);
drivers/acpi/cppc_acpi.c
1587
ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
drivers/acpi/cppc_acpi.c
1600
ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
drivers/acpi/cppc_acpi.c
1615
int cppc_set_epp(int cpu, u64 epp_val)
drivers/acpi/cppc_acpi.c
1620
return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val);
drivers/acpi/cppc_acpi.c
1633
int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
drivers/acpi/cppc_acpi.c
1642
ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val);
drivers/acpi/cppc_acpi.c
1663
int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
drivers/acpi/cppc_acpi.c
1689
return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val);
drivers/acpi/cppc_acpi.c
1698
int cppc_get_auto_sel(int cpu, bool *enable)
drivers/acpi/cppc_acpi.c
1706
ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel);
drivers/acpi/cppc_acpi.c
1721
int cppc_set_auto_sel(int cpu, bool enable)
drivers/acpi/cppc_acpi.c
1723
return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable);
drivers/acpi/cppc_acpi.c
1735
int cppc_set_enable(int cpu, bool enable)
drivers/acpi/cppc_acpi.c
1737
return cppc_set_reg_val(cpu, ENABLE, enable);
drivers/acpi/cppc_acpi.c
1748
int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
drivers/acpi/cppc_acpi.c
1750
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1752
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1757
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
drivers/acpi/cppc_acpi.c
1795
cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
drivers/acpi/cppc_acpi.c
1803
cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
drivers/acpi/cppc_acpi.c
1805
cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
drivers/acpi/cppc_acpi.c
458
int cpu;
drivers/acpi/cppc_acpi.c
463
for_each_online_cpu(cpu) {
drivers/acpi/cppc_acpi.c
464
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
477
int cpu;
drivers/acpi/cppc_acpi.c
479
for_each_online_cpu(cpu) {
drivers/acpi/cppc_acpi.c
480
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
498
int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
drivers/acpi/cppc_acpi.c
509
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
514
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
drivers/acpi/cppc_acpi.c
528
if (i == cpu)
drivers/acpi/cppc_acpi.c
554
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
drivers/acpi/pptt.c
1018
int cpu;
drivers/acpi/pptt.c
1030
for_each_possible_cpu(cpu) {
drivers/acpi/pptt.c
1033
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/pptt.c
1058
cpumask_set_cpu(cpu, cpus);
drivers/acpi/pptt.c
458
unsigned int cpu)
drivers/acpi/pptt.c
461
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
drivers/acpi/pptt.c
462
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/pptt.c
467
while (index < get_cpu_cacheinfo(cpu)->num_leaves) {
drivers/acpi/pptt.c
483
struct acpi_pptt_processor *cpu)
drivers/acpi/pptt.c
492
if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) {
drivers/acpi/pptt.c
493
next = fetch_pptt_node(table_hdr, cpu->parent);
drivers/acpi/pptt.c
505
struct acpi_pptt_processor *cpu,
drivers/acpi/pptt.c
510
while (cpu && level) {
drivers/acpi/pptt.c
513
if (flag_identical(table_hdr, cpu))
drivers/acpi/pptt.c
515
} else if (cpu->flags & flag)
drivers/acpi/pptt.c
518
prev_node = fetch_pptt_node(table_hdr, cpu->parent);
drivers/acpi/pptt.c
521
cpu = prev_node;
drivers/acpi/pptt.c
524
return cpu;
drivers/acpi/pptt.c
546
unsigned int cpu, int level, int flag)
drivers/acpi/pptt.c
549
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/pptt.c
567
cpu, acpi_cpu_id);
drivers/acpi/pptt.c
593
static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
drivers/acpi/pptt.c
602
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
drivers/acpi/pptt.c
604
cpu, level, retval);
drivers/acpi/pptt.c
622
static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
drivers/acpi/pptt.c
625
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/pptt.c
657
int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
drivers/acpi/pptt.c
672
pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu);
drivers/acpi/pptt.c
674
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/pptt.c
700
int cache_setup_acpi(unsigned int cpu)
drivers/acpi/pptt.c
708
pr_debug("Cache Setup ACPI CPU %d\n", cpu);
drivers/acpi/pptt.c
710
cache_setup_acpi_cpu(table, cpu);
drivers/acpi/pptt.c
724
int acpi_pptt_cpu_is_thread(unsigned int cpu)
drivers/acpi/pptt.c
726
return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
drivers/acpi/pptt.c
747
int find_acpi_cpu_topology(unsigned int cpu, int level)
drivers/acpi/pptt.c
749
return find_acpi_cpu_topology_tag(cpu, level, 0);
drivers/acpi/pptt.c
765
int find_acpi_cpu_topology_package(unsigned int cpu)
drivers/acpi/pptt.c
767
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
drivers/acpi/pptt.c
788
int find_acpi_cpu_topology_cluster(unsigned int cpu)
drivers/acpi/pptt.c
800
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/pptt.c
846
int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
drivers/acpi/pptt.c
848
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
drivers/acpi/pptt.c
870
int cpu;
drivers/acpi/pptt.c
874
for_each_possible_cpu(cpu) {
drivers/acpi/pptt.c
875
acpi_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/pptt.c
880
cpumask_set_cpu(cpu, cpus);
drivers/acpi/pptt.c
956
int cpu;
drivers/acpi/pptt.c
966
for_each_possible_cpu(cpu) {
drivers/acpi/pptt.c
969
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/acpi/processor_driver.c
100
static int acpi_soft_cpu_online(unsigned int cpu)
drivers/acpi/processor_driver.c
102
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_driver.c
131
static int acpi_soft_cpu_dead(unsigned int cpu)
drivers/acpi/processor_driver.c
133
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
1079
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
drivers/acpi/processor_idle.c
1252
dev->cpu = pr->id;
drivers/acpi/processor_idle.c
1300
int cpu;
drivers/acpi/processor_idle.c
1323
for_each_online_cpu(cpu) {
drivers/acpi/processor_idle.c
1324
_pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
1327
dev = per_cpu(acpi_cpuidle_device, cpu);
drivers/acpi/processor_idle.c
1336
for_each_online_cpu(cpu) {
drivers/acpi/processor_idle.c
1337
_pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
1342
dev = per_cpu(acpi_cpuidle_device, cpu);
drivers/acpi/processor_idle.c
1358
int cpu;
drivers/acpi/processor_idle.c
1366
for_each_possible_cpu(cpu) {
drivers/acpi/processor_idle.c
1367
pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
583
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
680
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
694
cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
709
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
750
per_cpu(acpi_cstate[count], dev->cpu) = cx;
drivers/acpi/processor_perflib.c
152
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
drivers/acpi/processor_perflib.c
156
pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
174
unsigned int cpu;
drivers/acpi/processor_perflib.c
179
for_each_cpu(cpu, policy->related_cpus) {
drivers/acpi/processor_perflib.c
180
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
198
cpu, ret);
drivers/acpi/processor_perflib.c
206
cpu, ret);
drivers/acpi/processor_perflib.c
212
unsigned int cpu;
drivers/acpi/processor_perflib.c
214
for_each_cpu(cpu, policy->related_cpus) {
drivers/acpi/processor_perflib.c
215
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
743
*performance, unsigned int cpu)
drivers/acpi/processor_perflib.c
752
pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
778
void acpi_processor_unregister_performance(unsigned int cpu)
drivers/acpi/processor_perflib.c
784
pr = per_cpu(processors, cpu);
drivers/acpi/processor_thermal.c
101
(100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100;
drivers/acpi/processor_thermal.c
112
static int cpufreq_set_cur_state(unsigned int cpu, int state)
drivers/acpi/processor_thermal.c
117
if (!cpu_has_cpufreq(cpu))
drivers/acpi/processor_thermal.c
120
reduction_step(cpu) = state;
drivers/acpi/processor_thermal.c
129
topology_physical_package_id(cpu))
drivers/acpi/processor_thermal.c
162
unsigned int cpu;
drivers/acpi/processor_thermal.c
166
for_each_cpu(cpu, policy->related_cpus) {
drivers/acpi/processor_thermal.c
167
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_thermal.c
178
cpu, ret);
drivers/acpi/processor_thermal.c
188
unsigned int cpu;
drivers/acpi/processor_thermal.c
190
for_each_cpu(cpu, policy->related_cpus) {
drivers/acpi/processor_thermal.c
191
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_thermal.c
202
static int cpufreq_get_max_state(unsigned int cpu)
drivers/acpi/processor_thermal.c
207
static int cpufreq_get_cur_state(unsigned int cpu)
drivers/acpi/processor_thermal.c
212
static int cpufreq_set_cur_state(unsigned int cpu, int state)
drivers/acpi/processor_thermal.c
44
#define reduction_step(cpu) \
drivers/acpi/processor_thermal.c
45
per_cpu(cpufreq_thermal_reduction_step, phys_package_first_cpu(cpu))
drivers/acpi/processor_thermal.c
54
static int phys_package_first_cpu(int cpu)
drivers/acpi/processor_thermal.c
57
int id = topology_physical_package_id(cpu);
drivers/acpi/processor_thermal.c
65
static bool cpu_has_cpufreq(unsigned int cpu)
drivers/acpi/processor_thermal.c
70
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/acpi/processor_thermal.c
75
static int cpufreq_get_max_state(unsigned int cpu)
drivers/acpi/processor_thermal.c
77
if (!cpu_has_cpufreq(cpu))
drivers/acpi/processor_thermal.c
83
static int cpufreq_get_cur_state(unsigned int cpu)
drivers/acpi/processor_thermal.c
85
if (!cpu_has_cpufreq(cpu))
drivers/acpi/processor_thermal.c
88
return reduction_step(cpu);
drivers/acpi/processor_thermal.c
91
static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr)
drivers/acpi/processor_thermal.c
96
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/acpi/processor_throttling.c
1088
t_state.cpu = i;
drivers/acpi/processor_throttling.c
1146
t_state.cpu = i;
drivers/acpi/processor_throttling.c
205
unsigned int cpu;
drivers/acpi/processor_throttling.c
210
cpu = p_tstate->cpu;
drivers/acpi/processor_throttling.c
211
pr = per_cpu(processors, cpu);
drivers/acpi/processor_throttling.c
219
cpu);
drivers/acpi/processor_throttling.c
244
cpu, target_state);
drivers/acpi/processor_throttling.c
254
cpu, target_state);
drivers/acpi/processor_throttling.c
37
unsigned int cpu; /* cpu nr */
drivers/acpi/riscv/cppc.c
110
smp_call_function_single(cpu, sbi_cppc_read, &data, 1);
drivers/acpi/riscv/cppc.c
118
smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1);
drivers/acpi/riscv/cppc.c
128
int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
drivers/acpi/riscv/cppc.c
142
smp_call_function_single(cpu, sbi_cppc_write, &data, 1);
drivers/acpi/riscv/cppc.c
149
smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
drivers/acpi/riscv/cppc.c
97
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
drivers/acpi/riscv/cpuidle.c
22
static int acpi_cpu_init_idle(unsigned int cpu)
drivers/acpi/riscv/cpuidle.c
26
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/riscv/cpuidle.c
64
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
drivers/acpi/riscv/cpuidle.c
66
return acpi_cpu_init_idle(cpu);
drivers/acpi/riscv/rhct.c
38
int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const char **isa)
drivers/acpi/riscv/rhct.c
47
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
drivers/base/arch_numa.c
121
void numa_store_cpu_info(unsigned int cpu)
drivers/base/arch_numa.c
123
set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
drivers/base/arch_numa.c
126
void __init early_map_cpu_to_node(unsigned int cpu, int nid)
drivers/base/arch_numa.c
132
cpu_to_node_map[cpu] = nid;
drivers/base/arch_numa.c
139
if (!cpu)
drivers/base/arch_numa.c
140
set_cpu_numa_node(cpu, nid);
drivers/base/arch_numa.c
147
int early_cpu_to_node(int cpu)
drivers/base/arch_numa.c
149
return cpu_to_node_map[cpu];
drivers/base/arch_numa.c
160
unsigned int cpu;
drivers/base/arch_numa.c
187
for_each_possible_cpu(cpu)
drivers/base/arch_numa.c
188
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
drivers/base/arch_numa.c
356
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
drivers/base/arch_numa.c
371
cpumask_set_cpu(cpu, mask);
drivers/base/arch_numa.c
373
cpumask_clear_cpu(cpu, mask);
drivers/base/arch_numa.c
377
cpu, node, cpumask_pr_args(mask));
drivers/base/arch_numa.c
63
static void numa_update_cpu(unsigned int cpu, bool remove)
drivers/base/arch_numa.c
65
int nid = cpu_to_node(cpu);
drivers/base/arch_numa.c
71
cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
drivers/base/arch_numa.c
73
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
drivers/base/arch_numa.c
76
void numa_add_cpu(unsigned int cpu)
drivers/base/arch_numa.c
78
numa_update_cpu(cpu, false);
drivers/base/arch_numa.c
81
void numa_remove_cpu(unsigned int cpu)
drivers/base/arch_numa.c
83
numa_update_cpu(cpu, true);
drivers/base/arch_numa.c
87
void numa_clear_node(unsigned int cpu)
drivers/base/arch_numa.c
89
numa_remove_cpu(cpu);
drivers/base/arch_numa.c
90
set_cpu_numa_node(cpu, NUMA_NO_NODE);
drivers/base/arch_topology.c
105
int cpu;
drivers/base/arch_topology.c
109
for_each_cpu(cpu, cpus) {
drivers/base/arch_topology.c
110
sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
drivers/base/arch_topology.c
113
rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
drivers/base/arch_topology.c
114
cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
drivers/base/arch_topology.c
185
int cpu;
drivers/base/arch_topology.c
187
cpu = cpumask_first(cpus);
drivers/base/arch_topology.c
188
max_capacity = arch_scale_cpu_capacity(cpu);
drivers/base/arch_topology.c
189
max_freq = arch_scale_freq_ref(cpu);
drivers/base/arch_topology.c
202
trace_hw_pressure_update(cpu, pressure);
drivers/base/arch_topology.c
204
for_each_cpu(cpu, cpus)
drivers/base/arch_topology.c
205
WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
drivers/base/arch_topology.c
245
int cpu;
drivers/base/arch_topology.c
251
for_each_possible_cpu(cpu) {
drivers/base/arch_topology.c
252
capacity = raw_capacity[cpu] *
drivers/base/arch_topology.c
253
(per_cpu(capacity_freq_ref, cpu) ?: 1);
drivers/base/arch_topology.c
258
for_each_possible_cpu(cpu) {
drivers/base/arch_topology.c
259
capacity = raw_capacity[cpu] *
drivers/base/arch_topology.c
260
(per_cpu(capacity_freq_ref, cpu) ?: 1);
drivers/base/arch_topology.c
263
topology_set_cpu_scale(cpu, capacity);
drivers/base/arch_topology.c
265
cpu, topology_get_cpu_scale(cpu));
drivers/base/arch_topology.c
269
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
drivers/base/arch_topology.c
291
raw_capacity[cpu] = cpu_capacity;
drivers/base/arch_topology.c
293
cpu_node, raw_capacity[cpu]);
drivers/base/arch_topology.c
303
per_cpu(capacity_freq_ref, cpu) =
drivers/base/arch_topology.c
320
void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
drivers/base/arch_topology.c
331
int cpu;
drivers/base/arch_topology.c
341
for_each_possible_cpu(cpu) {
drivers/base/arch_topology.c
342
if (!cppc_get_perf_caps(cpu, &perf_caps) &&
drivers/base/arch_topology.c
345
raw_capacity[cpu] = perf_caps.highest_perf;
drivers/base/arch_topology.c
346
capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
drivers/base/arch_topology.c
348
per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
drivers/base/arch_topology.c
351
cpu, raw_capacity[cpu]);
drivers/base/arch_topology.c
355
pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
drivers/base/arch_topology.c
360
for_each_possible_cpu(cpu) {
drivers/base/arch_topology.c
361
freq_inv_set_max_ratio(cpu,
drivers/base/arch_topology.c
362
per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
drivers/base/arch_topology.c
364
capacity = raw_capacity[cpu];
drivers/base/arch_topology.c
367
topology_set_cpu_scale(cpu, capacity);
drivers/base/arch_topology.c
369
cpu, topology_get_cpu_scale(cpu));
drivers/base/arch_topology.c
395
int cpu;
drivers/base/arch_topology.c
406
for_each_cpu(cpu, policy->related_cpus) {
drivers/base/arch_topology.c
407
per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
drivers/base/arch_topology.c
408
freq_inv_set_max_ratio(cpu,
drivers/base/arch_topology.c
409
per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
drivers/base/arch_topology.c
483
int cpu;
drivers/base/arch_topology.c
490
cpu = of_cpu_node_to_id(cpu_node);
drivers/base/arch_topology.c
491
if (cpu >= 0)
drivers/base/arch_topology.c
492
topology_parse_cpu_capacity(cpu_node, cpu);
drivers/base/arch_topology.c
497
return cpu;
drivers/base/arch_topology.c
506
int cpu;
drivers/base/arch_topology.c
517
cpu = get_cpu_for_node(t);
drivers/base/arch_topology.c
518
if (cpu >= 0) {
drivers/base/arch_topology.c
519
cpu_topology[cpu].package_id = package_id;
drivers/base/arch_topology.c
520
cpu_topology[cpu].cluster_id = cluster_id;
drivers/base/arch_topology.c
521
cpu_topology[cpu].core_id = core_id;
drivers/base/arch_topology.c
522
cpu_topology[cpu].thread_id = i;
drivers/base/arch_topology.c
523
} else if (cpu != -ENODEV) {
drivers/base/arch_topology.c
532
cpu = get_cpu_for_node(core);
drivers/base/arch_topology.c
533
if (cpu >= 0) {
drivers/base/arch_topology.c
540
cpu_topology[cpu].package_id = package_id;
drivers/base/arch_topology.c
541
cpu_topology[cpu].cluster_id = cluster_id;
drivers/base/arch_topology.c
542
cpu_topology[cpu].core_id = core_id;
drivers/base/arch_topology.c
543
} else if (leaf && cpu != -ENODEV) {
drivers/base/arch_topology.c
661
int cpu;
drivers/base/arch_topology.c
690
for_each_possible_cpu(cpu)
drivers/base/arch_topology.c
691
if (cpu_topology[cpu].package_id < 0) {
drivers/base/arch_topology.c
705
const struct cpumask *cpu_coregroup_mask(int cpu)
drivers/base/arch_topology.c
707
const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
drivers/base/arch_topology.c
710
if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
drivers/base/arch_topology.c
712
core_mask = &cpu_topology[cpu].core_sibling;
drivers/base/arch_topology.c
715
if (last_level_cache_is_valid(cpu)) {
drivers/base/arch_topology.c
716
if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
drivers/base/arch_topology.c
717
core_mask = &cpu_topology[cpu].llc_sibling;
drivers/base/arch_topology.c
726
cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
drivers/base/arch_topology.c
727
core_mask = &cpu_topology[cpu].cluster_sibling;
drivers/base/arch_topology.c
732
const struct cpumask *cpu_clustergroup_mask(int cpu)
drivers/base/arch_topology.c
738
if (cpumask_subset(cpu_coregroup_mask(cpu),
drivers/base/arch_topology.c
739
&cpu_topology[cpu].cluster_sibling))
drivers/base/arch_topology.c
74
int cpu;
drivers/base/arch_topology.c
740
return topology_sibling_cpumask(cpu);
drivers/base/arch_topology.c
742
return &cpu_topology[cpu].cluster_sibling;
drivers/base/arch_topology.c
748
int cpu, ret;
drivers/base/arch_topology.c
755
for_each_online_cpu(cpu) {
drivers/base/arch_topology.c
756
cpu_topo = &cpu_topology[cpu];
drivers/base/arch_topology.c
758
if (last_level_cache_is_shared(cpu, cpuid)) {
drivers/base/arch_topology.c
759
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
drivers/base/arch_topology.c
767
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
drivers/base/arch_topology.c
773
cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
drivers/base/arch_topology.c
781
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
drivers/base/arch_topology.c
785
static void clear_cpu_topology(int cpu)
drivers/base/arch_topology.c
787
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
drivers/base/arch_topology.c
790
cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
drivers/base/arch_topology.c
793
cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
drivers/base/arch_topology.c
796
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
drivers/base/arch_topology.c
798
cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
drivers/base/arch_topology.c
803
unsigned int cpu;
drivers/base/arch_topology.c
805
for_each_possible_cpu(cpu) {
drivers/base/arch_topology.c
806
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
drivers/base/arch_topology.c
813
clear_cpu_topology(cpu);
drivers/base/arch_topology.c
817
void remove_cpu_topology(unsigned int cpu)
drivers/base/arch_topology.c
821
for_each_cpu(sibling, topology_core_cpumask(cpu))
drivers/base/arch_topology.c
822
cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
drivers/base/arch_topology.c
823
for_each_cpu(sibling, topology_sibling_cpumask(cpu))
drivers/base/arch_topology.c
824
cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
drivers/base/arch_topology.c
825
for_each_cpu(sibling, topology_cluster_cpumask(cpu))
drivers/base/arch_topology.c
826
cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
drivers/base/arch_topology.c
827
for_each_cpu(sibling, topology_llc_cpumask(cpu))
drivers/base/arch_topology.c
828
cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
drivers/base/arch_topology.c
830
clear_cpu_topology(cpu);
drivers/base/arch_topology.c
839
static bool __init acpi_cpu_is_threaded(int cpu)
drivers/base/arch_topology.c
841
int is_threaded = acpi_pptt_cpu_is_thread(cpu);
drivers/base/arch_topology.c
85
for_each_cpu(cpu, cpus) {
drivers/base/arch_topology.c
86
sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
drivers/base/arch_topology.c
863
int cpu, topology_id;
drivers/base/arch_topology.c
870
for_each_possible_cpu(cpu) {
drivers/base/arch_topology.c
871
topology_id = find_acpi_cpu_topology(cpu, 0);
drivers/base/arch_topology.c
875
if (acpi_cpu_is_threaded(cpu)) {
drivers/base/arch_topology.c
876
cpu_topology[cpu].thread_id = topology_id;
drivers/base/arch_topology.c
877
topology_id = find_acpi_cpu_topology(cpu, 1);
drivers/base/arch_topology.c
878
cpu_topology[cpu].core_id = topology_id;
drivers/base/arch_topology.c
892
hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
drivers/base/arch_topology.c
90
rcu_assign_pointer(per_cpu(sft_data, cpu), data);
drivers/base/arch_topology.c
908
cpu_topology[cpu].thread_id = -1;
drivers/base/arch_topology.c
909
cpu_topology[cpu].core_id = topology_id;
drivers/base/arch_topology.c
91
cpumask_set_cpu(cpu, &scale_freq_counters_mask);
drivers/base/arch_topology.c
911
topology_id = find_acpi_cpu_topology_cluster(cpu);
drivers/base/arch_topology.c
912
cpu_topology[cpu].cluster_id = topology_id;
drivers/base/arch_topology.c
913
topology_id = find_acpi_cpu_topology_package(cpu);
drivers/base/arch_topology.c
914
cpu_topology[cpu].package_id = topology_id;
drivers/base/arch_topology.c
935
int cpu, ret;
drivers/base/arch_topology.c
951
for_each_possible_cpu(cpu) {
drivers/base/arch_topology.c
952
ret = fetch_cache_info(cpu);
drivers/base/cacheinfo.c
1002
if (!cpu_online && icpu == cpu)
drivers/base/cacheinfo.c
1009
static int cacheinfo_cpu_online(unsigned int cpu)
drivers/base/cacheinfo.c
1011
int rc = detect_cache_attributes(cpu);
drivers/base/cacheinfo.c
1016
rc = cache_add_dev(cpu);
drivers/base/cacheinfo.c
1019
if (cpu_map_shared_cache(true, cpu, &cpu_map))
drivers/base/cacheinfo.c
1020
update_per_cpu_data_slice_size(true, cpu, cpu_map);
drivers/base/cacheinfo.c
1023
free_cache_attributes(cpu);
drivers/base/cacheinfo.c
1027
static int cacheinfo_cpu_pre_down(unsigned int cpu)
drivers/base/cacheinfo.c
1032
nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map);
drivers/base/cacheinfo.c
1033
if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
drivers/base/cacheinfo.c
1034
cpu_cache_sysfs_exit(cpu);
drivers/base/cacheinfo.c
1036
free_cache_attributes(cpu);
drivers/base/cacheinfo.c
1038
update_per_cpu_data_slice_size(false, cpu, cpu_map);
drivers/base/cacheinfo.c
187
static bool match_cache_node(struct device_node *cpu,
drivers/base/cacheinfo.c
190
struct device_node *prev, *cache = of_find_next_cache_node(cpu);
drivers/base/cacheinfo.c
213
struct device_node *cpu;
drivers/base/cacheinfo.c
216
for_each_of_cpu_node(cpu) {
drivers/base/cacheinfo.c
217
u64 id = of_get_cpu_hwid(cpu, 0);
drivers/base/cacheinfo.c
221
of_node_put(cpu);
drivers/base/cacheinfo.c
225
if (match_cache_node(cpu, cache_node))
drivers/base/cacheinfo.c
253
static int cache_setup_of_node(unsigned int cpu)
drivers/base/cacheinfo.c
258
struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
drivers/base/cacheinfo.c
26
#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
drivers/base/cacheinfo.c
260
pr_err("Failed to find cpu%d device node\n", cpu);
drivers/base/cacheinfo.c
268
while (index < cache_leaves(cpu)) {
drivers/base/cacheinfo.c
269
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
drivers/base/cacheinfo.c
27
#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
drivers/base/cacheinfo.c
28
#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
drivers/base/cacheinfo.c
281
if (index != cache_leaves(cpu)) /* not all OF nodes populated */
drivers/base/cacheinfo.c
29
#define per_cpu_cacheinfo_idx(cpu, idx) \
drivers/base/cacheinfo.c
30
(per_cpu_cacheinfo(cpu) + (idx))
drivers/base/cacheinfo.c
327
int init_of_cache_level(unsigned int cpu)
drivers/base/cacheinfo.c
329
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
drivers/base/cacheinfo.c
330
struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
drivers/base/cacheinfo.c
35
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
drivers/base/cacheinfo.c
365
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
drivers/base/cacheinfo.c
366
int init_of_cache_level(unsigned int cpu) { return 0; }
drivers/base/cacheinfo.c
369
int __weak cache_setup_acpi(unsigned int cpu)
drivers/base/cacheinfo.c
37
return ci_cacheinfo(cpu);
drivers/base/cacheinfo.c
376
static int cache_setup_properties(unsigned int cpu)
drivers/base/cacheinfo.c
381
ret = cache_setup_of_node(cpu);
drivers/base/cacheinfo.c
383
ret = cache_setup_acpi(cpu);
drivers/base/cacheinfo.c
392
static int cache_shared_cpu_map_setup(unsigned int cpu)
drivers/base/cacheinfo.c
394
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
drivers/base/cacheinfo.c
407
if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
drivers/base/cacheinfo.c
408
ret = cache_setup_properties(cpu);
drivers/base/cacheinfo.c
413
for (index = 0; index < cache_leaves(cpu); index++) {
drivers/base/cacheinfo.c
416
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
drivers/base/cacheinfo.c
418
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
drivers/base/cacheinfo.c
420
if (i == cpu || !per_cpu_cacheinfo(i))
drivers/base/cacheinfo.c
435
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
drivers/base/cacheinfo.c
451
static void cache_shared_cpu_map_remove(unsigned int cpu)
drivers/base/cacheinfo.c
453
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
drivers/base/cacheinfo.c
457
for (index = 0; index < cache_leaves(cpu); index++) {
drivers/base/cacheinfo.c
458
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
drivers/base/cacheinfo.c
460
if (sibling == cpu || !per_cpu_cacheinfo(sibling))
drivers/base/cacheinfo.c
476
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
drivers/base/cacheinfo.c
488
static void free_cache_attributes(unsigned int cpu)
drivers/base/cacheinfo.c
490
if (!per_cpu_cacheinfo(cpu))
drivers/base/cacheinfo.c
493
cache_shared_cpu_map_remove(cpu);
drivers/base/cacheinfo.c
496
int __weak early_cache_level(unsigned int cpu)
drivers/base/cacheinfo.c
501
int __weak init_cache_level(unsigned int cpu)
drivers/base/cacheinfo.c
506
int __weak populate_cache_leaves(unsigned int cpu)
drivers/base/cacheinfo.c
511
static inline int allocate_cache_info(int cpu)
drivers/base/cacheinfo.c
513
per_cpu_cacheinfo(cpu) = kzalloc_objs(struct cacheinfo,
drivers/base/cacheinfo.c
514
cache_leaves(cpu), GFP_ATOMIC);
drivers/base/cacheinfo.c
515
if (!per_cpu_cacheinfo(cpu)) {
drivers/base/cacheinfo.c
516
cache_leaves(cpu) = 0;
drivers/base/cacheinfo.c
523
int fetch_cache_info(unsigned int cpu)
drivers/base/cacheinfo.c
525
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
drivers/base/cacheinfo.c
530
ret = init_of_cache_level(cpu);
drivers/base/cacheinfo.c
532
ret = acpi_get_cache_info(cpu, &levels, &split_levels);
drivers/base/cacheinfo.c
545
if (ret || !cache_leaves(cpu)) {
drivers/base/cacheinfo.c
546
ret = early_cache_level(cpu);
drivers/base/cacheinfo.c
550
if (!cache_leaves(cpu))
drivers/base/cacheinfo.c
556
return allocate_cache_info(cpu);
drivers/base/cacheinfo.c
559
static inline int init_level_allocate_ci(unsigned int cpu)
drivers/base/cacheinfo.c
561
unsigned int early_leaves = cache_leaves(cpu);
drivers/base/cacheinfo.c
573
if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
drivers/base/cacheinfo.c
576
if (init_cache_level(cpu) || !cache_leaves(cpu))
drivers/base/cacheinfo.c
58
bool last_level_cache_is_valid(unsigned int cpu)
drivers/base/cacheinfo.c
584
ci_cacheinfo(cpu)->early_ci_levels = false;
drivers/base/cacheinfo.c
590
if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
drivers/base/cacheinfo.c
593
kfree(per_cpu_cacheinfo(cpu));
drivers/base/cacheinfo.c
594
return allocate_cache_info(cpu);
drivers/base/cacheinfo.c
597
int detect_cache_attributes(unsigned int cpu)
drivers/base/cacheinfo.c
601
ret = init_level_allocate_ci(cpu);
drivers/base/cacheinfo.c
609
if (!last_level_cache_is_valid(cpu)) {
drivers/base/cacheinfo.c
614
ret = populate_cache_leaves(cpu);
drivers/base/cacheinfo.c
62
if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
drivers/base/cacheinfo.c
624
ret = cache_shared_cpu_map_setup(cpu);
drivers/base/cacheinfo.c
626
pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
drivers/base/cacheinfo.c
633
free_cache_attributes(cpu);
drivers/base/cacheinfo.c
639
#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
drivers/base/cacheinfo.c
645
#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
drivers/base/cacheinfo.c
646
#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
drivers/base/cacheinfo.c
65
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
drivers/base/cacheinfo.c
855
static void cpu_cache_sysfs_exit(unsigned int cpu)
drivers/base/cacheinfo.c
860
if (per_cpu_index_dev(cpu)) {
drivers/base/cacheinfo.c
861
for (i = 0; i < cache_leaves(cpu); i++) {
drivers/base/cacheinfo.c
862
ci_dev = per_cache_index_dev(cpu, i);
drivers/base/cacheinfo.c
867
kfree(per_cpu_index_dev(cpu));
drivers/base/cacheinfo.c
868
per_cpu_index_dev(cpu) = NULL;
drivers/base/cacheinfo.c
870
device_unregister(per_cpu_cache_dev(cpu));
drivers/base/cacheinfo.c
871
per_cpu_cache_dev(cpu) = NULL;
drivers/base/cacheinfo.c
874
static int cpu_cache_sysfs_init(unsigned int cpu)
drivers/base/cacheinfo.c
876
struct device *dev = get_cpu_device(cpu);
drivers/base/cacheinfo.c
878
if (per_cpu_cacheinfo(cpu) == NULL)
drivers/base/cacheinfo.c
881
per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
drivers/base/cacheinfo.c
882
if (IS_ERR(per_cpu_cache_dev(cpu)))
drivers/base/cacheinfo.c
883
return PTR_ERR(per_cpu_cache_dev(cpu));
drivers/base/cacheinfo.c
886
per_cpu_index_dev(cpu) = kzalloc_objs(struct device *,
drivers/base/cacheinfo.c
887
cache_leaves(cpu));
drivers/base/cacheinfo.c
888
if (unlikely(per_cpu_index_dev(cpu) == NULL))
drivers/base/cacheinfo.c
894
cpu_cache_sysfs_exit(cpu);
drivers/base/cacheinfo.c
898
static int cache_add_dev(unsigned int cpu)
drivers/base/cacheinfo.c
906
rc = cpu_cache_sysfs_init(cpu);
drivers/base/cacheinfo.c
910
parent = per_cpu_cache_dev(cpu);
drivers/base/cacheinfo.c
911
for (i = 0; i < cache_leaves(cpu); i++) {
drivers/base/cacheinfo.c
912
this_leaf = per_cpu_cacheinfo_idx(cpu, i);
drivers/base/cacheinfo.c
924
per_cache_index_dev(cpu, i) = ci_dev;
drivers/base/cacheinfo.c
926
cpumask_set_cpu(cpu, &cache_dev_map);
drivers/base/cacheinfo.c
930
cpu_cache_sysfs_exit(cpu);
drivers/base/cacheinfo.c
934
static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu,
drivers/base/cacheinfo.c
940
if (!last_level_cache_is_valid(cpu))
drivers/base/cacheinfo.c
943
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
drivers/base/cacheinfo.c
955
if (sibling == cpu || !last_level_cache_is_valid(sibling))
drivers/base/cacheinfo.c
976
static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
drivers/base/cacheinfo.c
982
if (!last_level_cache_is_valid(cpu))
drivers/base/cacheinfo.c
985
ci = ci_cacheinfo(cpu);
drivers/base/cacheinfo.c
986
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
drivers/base/cacheinfo.c
996
static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu,
drivers/base/cpu.c
101
device_unregister(&cpu->dev);
drivers/base/cpu.c
155
struct cpu *cpu = container_of(dev, struct cpu, dev);
drivers/base/cpu.c
159
cpunum = cpu->dev.id;
drivers/base/cpu.c
39
static void change_cpu_under_node(struct cpu *cpu,
drivers/base/cpu.c
416
int register_cpu(struct cpu *cpu, int num)
drivers/base/cpu.c
42
int cpuid = cpu->dev.id;
drivers/base/cpu.c
420
cpu->node_id = cpu_to_node(num);
drivers/base/cpu.c
421
memset(&cpu->dev, 0x00, sizeof(struct device));
drivers/base/cpu.c
422
cpu->dev.id = num;
drivers/base/cpu.c
423
cpu->dev.bus = &cpu_subsys;
drivers/base/cpu.c
424
cpu->dev.release = cpu_device_release;
drivers/base/cpu.c
425
cpu->dev.offline_disabled = !cpu->hotpluggable;
drivers/base/cpu.c
426
cpu->dev.offline = !cpu_online(num);
drivers/base/cpu.c
427
cpu->dev.of_node = of_get_cpu_node(num, NULL);
drivers/base/cpu.c
428
cpu->dev.groups = common_cpu_attr_groups;
drivers/base/cpu.c
429
if (cpu->hotpluggable)
drivers/base/cpu.c
430
cpu->dev.groups = hotplugable_cpu_attr_groups;
drivers/base/cpu.c
431
error = device_register(&cpu->dev);
drivers/base/cpu.c
433
put_device(&cpu->dev);
drivers/base/cpu.c
437
per_cpu(cpu_sys_devices, num) = &cpu->dev;
drivers/base/cpu.c
439
dev_pm_qos_expose_latency_limit(&cpu->dev,
drivers/base/cpu.c
446
struct device *get_cpu_device(unsigned int cpu)
drivers/base/cpu.c
448
if (cpu < nr_cpu_ids && cpu_possible(cpu))
drivers/base/cpu.c
449
return per_cpu(cpu_sys_devices, cpu);
drivers/base/cpu.c
45
cpu->node_id = to_nid;
drivers/base/cpu.c
50
struct cpu *cpu = container_of(dev, struct cpu, dev);
drivers/base/cpu.c
547
bool cpu_is_hotpluggable(unsigned int cpu)
drivers/base/cpu.c
549
struct device *dev = get_cpu_device(cpu);
drivers/base/cpu.c
550
return dev && container_of(dev, struct cpu, dev)->hotpluggable
drivers/base/cpu.c
551
&& tick_nohz_cpu_hotpluggable(cpu);
drivers/base/cpu.c
556
DEFINE_PER_CPU(struct cpu, cpu_devices);
drivers/base/cpu.c
558
bool __weak arch_cpu_is_hotpluggable(int cpu)
drivers/base/cpu.c
563
int __weak arch_register_cpu(int cpu)
drivers/base/cpu.c
565
struct cpu *c = &per_cpu(cpu_devices, cpu);
drivers/base/cpu.c
567
c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
drivers/base/cpu.c
569
return register_cpu(c, cpu);
drivers/base/cpu.c
84
change_cpu_under_node(cpu, from_nid, to_nid);
drivers/base/cpu.c
94
void unregister_cpu(struct cpu *cpu)
drivers/base/cpu.c
96
int logical_cpu = cpu->dev.id;
drivers/base/node.c
684
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
drivers/base/node.c
692
obj = get_cpu_device(cpu);
drivers/base/node.c
757
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
drivers/base/node.c
764
obj = get_cpu_device(cpu);
drivers/base/node.c
875
int cpu;
drivers/base/node.c
901
for_each_present_cpu(cpu) {
drivers/base/node.c
902
if (cpu_to_node(cpu) == nid)
drivers/base/node.c
903
register_cpu_under_node(cpu, nid);
drivers/base/test/test_async_driver_probe.c
108
int err, nid, cpu;
drivers/base/test/test_async_driver_probe.c
112
for_each_online_cpu(cpu) {
drivers/base/test/test_async_driver_probe.c
113
nid = cpu_to_node(cpu);
drivers/base/test/test_async_driver_probe.c
146
for_each_online_cpu(cpu) {
drivers/base/test/test_async_driver_probe.c
147
nid = cpu_to_node(cpu);
drivers/base/test/test_async_driver_probe.c
175
nid = cpu_to_node(cpu);
drivers/base/topology.c
188
static int topology_add_dev(unsigned int cpu)
drivers/base/topology.c
190
struct device *dev = get_cpu_device(cpu);
drivers/base/topology.c
195
static int topology_remove_dev(unsigned int cpu)
drivers/base/topology.c
197
struct device *dev = get_cpu_device(cpu);
drivers/base/topology.c
215
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
drivers/base/topology.c
217
per_cpu(cpu_scale, cpu) = capacity;
drivers/base/topology.c
224
struct cpu *cpu = container_of(dev, struct cpu, dev);
drivers/base/topology.c
226
return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
drivers/base/topology.c
231
static int cpu_capacity_sysctl_add(unsigned int cpu)
drivers/base/topology.c
233
struct device *cpu_dev = get_cpu_device(cpu);
drivers/base/topology.c
243
static int cpu_capacity_sysctl_remove(unsigned int cpu)
drivers/base/topology.c
245
struct device *cpu_dev = get_cpu_device(cpu);
drivers/bcma/driver_mips.c
269
struct bcma_device *cpu, *pcie, *i2s;
drivers/bcma/driver_mips.c
278
cpu = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
drivers/bcma/driver_mips.c
281
if (cpu && pcie && i2s &&
drivers/bcma/driver_mips.c
282
bcma_aread32(cpu, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
drivers/bcma/driver_mips.c
285
bcma_awrite32(cpu, BCMA_MIPS_OOBSELINA74, 0x07060504);
drivers/block/drbd/drbd_main.c
480
unsigned int cpu, min = ~0;
drivers/block/drbd/drbd_main.c
484
for_each_cpu(cpu, resource->cpu_mask)
drivers/block/drbd/drbd_main.c
485
resources_per_cpu[cpu]++;
drivers/block/drbd/drbd_main.c
488
for_each_online_cpu(cpu) {
drivers/block/drbd/drbd_main.c
489
if (resources_per_cpu[cpu] < min) {
drivers/block/drbd/drbd_main.c
490
min = resources_per_cpu[cpu];
drivers/block/drbd/drbd_main.c
491
min_index = cpu;
drivers/block/mtip32xx/mtip32xx.c
3583
static void drop_cpu(int cpu)
drivers/block/mtip32xx/mtip32xx.c
3585
cpu_use[cpu]--;
drivers/block/mtip32xx/mtip32xx.c
3590
int cpu, least_used_cpu, least_cnt;
drivers/block/mtip32xx/mtip32xx.c
3596
cpu = least_used_cpu;
drivers/block/mtip32xx/mtip32xx.c
3598
for_each_cpu(cpu, node_mask) {
drivers/block/mtip32xx/mtip32xx.c
3599
if (cpu_use[cpu] < least_cnt) {
drivers/block/mtip32xx/mtip32xx.c
3600
least_used_cpu = cpu;
drivers/block/mtip32xx/mtip32xx.c
3601
least_cnt = cpu_use[cpu];
drivers/block/mtip32xx/mtip32xx.c
3695
int cpu, i = 0, j = 0;
drivers/block/mtip32xx/mtip32xx.c
3750
for_each_cpu(cpu, node_mask)
drivers/block/mtip32xx/mtip32xx.c
3752
snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
drivers/block/mtip32xx/mtip32xx.c
3779
for_each_present_cpu(cpu) {
drivers/block/mtip32xx/mtip32xx.c
3782
if (dd->work[i].cpu_binding == cpu) {
drivers/block/mtip32xx/mtip32xx.c
3788
dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
drivers/block/rnbd/rnbd-clt.c
1069
need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
drivers/block/rnbd/rnbd-clt.c
1071
set_bit(cpu_q->cpu, sess->cpu_queues_bm);
drivers/block/rnbd/rnbd-clt.c
1085
clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
drivers/block/rnbd/rnbd-clt.c
163
rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
drivers/block/rnbd/rnbd-clt.c
168
bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
drivers/block/rnbd/rnbd-clt.c
171
} else if (cpu != 0) {
drivers/block/rnbd/rnbd-clt.c
173
bit = find_first_bit(sess->cpu_queues_bm, cpu);
drivers/block/rnbd/rnbd-clt.c
174
if (bit < cpu)
drivers/block/rnbd/rnbd-clt.c
181
static inline int nxt_cpu(int cpu)
drivers/block/rnbd/rnbd-clt.c
183
return (cpu + 1) % nr_cpu_ids;
drivers/block/rnbd/rnbd-clt.c
215
cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
drivers/block/rnbd/rnbd-clt.c
218
if (!test_bit(cpu_q->cpu, sess->cpu_queues_bm))
drivers/block/rnbd/rnbd-clt.c
230
clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
drivers/block/rnbd/rnbd-clt.c
247
*cpup = cpu_q->cpu;
drivers/block/rnbd/rnbd-clt.c
725
unsigned int cpu;
drivers/block/rnbd/rnbd-clt.c
728
for_each_possible_cpu(cpu) {
drivers/block/rnbd/rnbd-clt.c
729
cpu_q = per_cpu_ptr(cpu_queues, cpu);
drivers/block/rnbd/rnbd-clt.c
731
cpu_q->cpu = cpu;
drivers/block/rnbd/rnbd-clt.c
782
int err, cpu;
drivers/block/rnbd/rnbd-clt.c
813
for_each_possible_cpu(cpu)
drivers/block/rnbd/rnbd-clt.c
814
* per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
drivers/block/rnbd/rnbd-clt.h
73
unsigned int cpu;
drivers/block/ublk_drv.c
4081
unsigned int cpu;
drivers/block/ublk_drv.c
4084
for_each_possible_cpu(cpu) {
drivers/block/ublk_drv.c
4085
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id)
drivers/block/ublk_drv.c
4086
return cpu_to_node(cpu);
drivers/block/zram/zcomp.c
169
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
drivers/block/zram/zcomp.c
172
struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
drivers/block/zram/zcomp.c
181
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/block/zram/zcomp.c
184
struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
drivers/block/zram/zcomp.c
194
int ret, cpu;
drivers/block/zram/zcomp.c
205
for_each_possible_cpu(cpu)
drivers/block/zram/zcomp.c
206
mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
drivers/block/zram/zcomp.h
80
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
drivers/block/zram/zcomp.h
81
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
drivers/bus/arm-cci.c
184
int port, cpu;
drivers/bus/arm-cci.c
194
for_each_possible_cpu(cpu) {
drivers/bus/arm-cci.c
196
cpun = of_get_cpu_node(cpu, NULL);
drivers/bus/arm-cci.c
205
init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
drivers/bus/arm-cci.c
208
for_each_possible_cpu(cpu) {
drivers/bus/arm-cci.c
209
WARN(!cpu_port_is_valid(&cpu_port[cpu]),
drivers/bus/arm-cci.c
211
cpu);
drivers/bus/arm-cci.c
267
int cpu;
drivers/bus/arm-cci.c
269
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
drivers/bus/arm-cci.c
270
is_valid = cpu_port_is_valid(&cpu_port[cpu]);
drivers/bus/arm-cci.c
271
if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
drivers/bus/arm-cci.c
272
cci_port_control(cpu_port[cpu].port, false);
drivers/bus/mips_cdmm.c
102
CDMM_ATTR(cpu, "%u\n", dev->cpu);
drivers/bus/mips_cdmm.c
202
_BUILD_RET_##_ret work_on_cpu(cdev->cpu, \
drivers/bus/mips_cdmm.c
293
unsigned int cpu;
drivers/bus/mips_cdmm.c
298
cpu = smp_processor_id();
drivers/bus/mips_cdmm.c
300
if (cpu == 0)
drivers/bus/mips_cdmm.c
304
bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
drivers/bus/mips_cdmm.c
519
unsigned int cpu = smp_processor_id();
drivers/bus/mips_cdmm.c
529
pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
drivers/bus/mips_cdmm.c
540
cpu, id, drb, drb * CDMM_DRB_SIZE,
drivers/bus/mips_cdmm.c
548
dev->cpu = cpu;
drivers/bus/mips_cdmm.c
555
dev->dev.parent = get_cpu_device(cpu);
drivers/bus/mips_cdmm.c
560
dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
drivers/bus/mips_cdmm.c
597
unsigned int cpu = *(unsigned int *)data; \
drivers/bus/mips_cdmm.c
599
if (cdev->cpu != cpu || !dev->driver) \
drivers/bus/mips_cdmm.c
620
static int mips_cdmm_cpu_down_prep(unsigned int cpu)
drivers/bus/mips_cdmm.c
626
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
drivers/bus/mips_cdmm.c
652
static int mips_cdmm_cpu_online(unsigned int cpu)
drivers/bus/mips_cdmm.c
669
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
drivers/bus/mips_cdmm.c
75
retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
drivers/char/random.c
1049
int __cold random_online_cpu(unsigned int cpu)
drivers/char/random.c
1062
per_cpu_ptr(&irq_randomness, cpu)->count = 0;
drivers/char/random.c
1289
int cpu = -1;
drivers/char/random.c
1331
cpu = cpumask_next(cpu, timer_cpus);
drivers/char/random.c
1332
if (cpu >= nr_cpu_ids)
drivers/char/random.c
1333
cpu = cpumask_first(timer_cpus);
drivers/char/random.c
1334
} while (cpu == smp_processor_id() && num_cpus > 1);
drivers/char/random.c
1339
add_timer_on(&stack->timer, cpu);
drivers/char/random.c
587
int __cold random_prepare_cpu(unsigned int cpu)
drivers/char/random.c
594
per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
drivers/char/random.c
595
per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
drivers/char/random.c
596
per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
drivers/char/random.c
597
per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
drivers/char/random.c
598
per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
drivers/clk/imx/clk-cpu.c
100
hw = &cpu->hw;
drivers/clk/imx/clk-cpu.c
104
kfree(cpu);
drivers/clk/imx/clk-cpu.c
28
struct clk_cpu *cpu = to_clk_cpu(hw);
drivers/clk/imx/clk-cpu.c
30
return clk_get_rate(cpu->div);
drivers/clk/imx/clk-cpu.c
36
struct clk_cpu *cpu = to_clk_cpu(hw);
drivers/clk/imx/clk-cpu.c
38
req->rate = clk_round_rate(cpu->pll, req->rate);
drivers/clk/imx/clk-cpu.c
46
struct clk_cpu *cpu = to_clk_cpu(hw);
drivers/clk/imx/clk-cpu.c
50
ret = clk_set_parent(cpu->mux, cpu->step);
drivers/clk/imx/clk-cpu.c
55
ret = clk_set_rate(cpu->pll, rate);
drivers/clk/imx/clk-cpu.c
57
clk_set_parent(cpu->mux, cpu->pll);
drivers/clk/imx/clk-cpu.c
61
clk_set_parent(cpu->mux, cpu->pll);
drivers/clk/imx/clk-cpu.c
64
clk_set_rate(cpu->div, rate);
drivers/clk/imx/clk-cpu.c
79
struct clk_cpu *cpu;
drivers/clk/imx/clk-cpu.c
84
cpu = kzalloc_obj(*cpu);
drivers/clk/imx/clk-cpu.c
85
if (!cpu)
drivers/clk/imx/clk-cpu.c
88
cpu->div = div;
drivers/clk/imx/clk-cpu.c
89
cpu->mux = mux;
drivers/clk/imx/clk-cpu.c
90
cpu->pll = pll;
drivers/clk/imx/clk-cpu.c
91
cpu->step = step;
drivers/clk/imx/clk-cpu.c
99
cpu->hw.init = &init;
drivers/clk/imx/clk-imx25.c
86
clk[cpu] = imx_clk_divider("cpu", "cpu_sel", ccm(CCM_CCTL), 30, 2);
drivers/clk/mvebu/ap-cpu-clk.c
257
u64 cpu;
drivers/clk/mvebu/ap-cpu-clk.c
259
cpu = of_get_cpu_hwid(dn, 0);
drivers/clk/mvebu/ap-cpu-clk.c
260
if (WARN_ON(cpu == OF_BAD_ADDR)) {
drivers/clk/mvebu/ap-cpu-clk.c
266
if (cpu & APN806_CLUSTER_NUM_MASK) {
drivers/clk/mvebu/ap-cpu-clk.c
292
u64 cpu;
drivers/clk/mvebu/ap-cpu-clk.c
294
cpu = of_get_cpu_hwid(dn, 0);
drivers/clk/mvebu/ap-cpu-clk.c
295
if (WARN_ON(cpu == OF_BAD_ADDR)) {
drivers/clk/mvebu/ap-cpu-clk.c
300
cluster_index = cpu & APN806_CLUSTER_NUM_MASK;
drivers/clk/mvebu/armada-37xx-periph.c
270
static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
drivers/clk/mvebu/armada-37xx-periph.c
289
REF_CLK_PM_CPU(cpu),
drivers/clk/mvebu/clk-cpu.c
150
return mvebu_pmsu_dfs_request(cpuclk->cpu);
drivers/clk/mvebu/clk-cpu.c
174
int cpu;
drivers/clk/mvebu/clk-cpu.c
194
for_each_possible_cpu(cpu) {
drivers/clk/mvebu/clk-cpu.c
202
sprintf(clk_name, "cpu%d", cpu);
drivers/clk/mvebu/clk-cpu.c
204
cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
drivers/clk/mvebu/clk-cpu.c
205
cpuclk[cpu].clk_name = clk_name;
drivers/clk/mvebu/clk-cpu.c
206
cpuclk[cpu].cpu = cpu;
drivers/clk/mvebu/clk-cpu.c
207
cpuclk[cpu].reg_base = clock_complex_base;
drivers/clk/mvebu/clk-cpu.c
209
cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
drivers/clk/mvebu/clk-cpu.c
210
cpuclk[cpu].hw.init = &init;
drivers/clk/mvebu/clk-cpu.c
212
init.name = cpuclk[cpu].clk_name;
drivers/clk/mvebu/clk-cpu.c
215
init.parent_names = &cpuclk[cpu].parent_name;
drivers/clk/mvebu/clk-cpu.c
218
clk = clk_register(NULL, &cpuclk[cpu].hw);
drivers/clk/mvebu/clk-cpu.c
221
clks[cpu] = clk;
drivers/clk/mvebu/clk-cpu.c
35
int cpu;
drivers/clk/mvebu/clk-cpu.c
55
div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
drivers/clk/mvebu/clk-cpu.c
86
& (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
drivers/clk/mvebu/clk-cpu.c
87
| (div << (cpuclk->cpu * 8));
drivers/clk/mvebu/clk-cpu.c
90
reload_mask = 1 << (20 + cpuclk->cpu);
drivers/clk/mxs/clk-imx23.c
123
clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
drivers/clk/mxs/clk-imx23.c
94
cpu, hbus, xbus, emi, uart,
drivers/clk/mxs/clk-imx28.c
149
cpu, hbus, xbus, emi, uart,
drivers/clk/mxs/clk-imx28.c
188
clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
drivers/clk/qcom/krait-cc.c
129
for_each_online_cpu(cpu)
drivers/clk/qcom/krait-cc.c
146
int cpu, ret;
drivers/clk/qcom/krait-cc.c
213
for_each_online_cpu(cpu)
drivers/clk/qcom/krait-cc.c
350
int cpu;
drivers/clk/qcom/krait-cc.c
372
for_each_possible_cpu(cpu) {
drivers/clk/qcom/krait-cc.c
373
mux = krait_add_clks(dev, cpu, unique_aux);
drivers/clk/qcom/krait-cc.c
376
clks[cpu] = mux->clk;
drivers/clk/qcom/krait-cc.c
391
for_each_online_cpu(cpu) {
drivers/clk/qcom/krait-cc.c
393
WARN(clk_prepare_enable(clks[cpu]),
drivers/clk/qcom/krait-cc.c
394
"Unable to turn on CPU%d clock", cpu);
drivers/clk/qcom/krait-cc.c
418
for_each_possible_cpu(cpu) {
drivers/clk/qcom/krait-cc.c
419
clk = clks[cpu];
drivers/clk/qcom/krait-cc.c
422
pr_info("CPU%d @ Undefined rate. Forcing new rate.\n", cpu);
drivers/clk/qcom/krait-cc.c
429
pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
drivers/clk/qcom/krait-cc.c
93
int cpu, ret;
drivers/clk/tegra/clk-tegra114.c
1081
static void tegra114_wait_cpu_in_reset(u32 cpu)
drivers/clk/tegra/clk-tegra114.c
1088
} while (!(reg & (1 << cpu))); /* check CPU been reset or not */
drivers/clk/tegra/clk-tegra114.c
1091
static void tegra114_disable_cpu_clock(u32 cpu)
drivers/clk/tegra/clk-tegra124.c
1235
static void tegra124_wait_cpu_in_reset(u32 cpu)
drivers/clk/tegra/clk-tegra124.c
1242
} while (!(reg & (1 << cpu))); /* check CPU been reset or not */
drivers/clk/tegra/clk-tegra124.c
1245
static void tegra124_disable_cpu_clock(u32 cpu)
drivers/clk/tegra/clk-tegra20.c
118
#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
drivers/clk/tegra/clk-tegra20.c
119
#define CPU_RESET(cpu) (0x1111ul << (cpu))
drivers/clk/tegra/clk-tegra20.c
881
static void tegra20_wait_cpu_in_reset(u32 cpu)
drivers/clk/tegra/clk-tegra20.c
889
} while (!(reg & (1 << cpu))); /* check CPU been reset or not */
drivers/clk/tegra/clk-tegra20.c
894
static void tegra20_put_cpu_in_reset(u32 cpu)
drivers/clk/tegra/clk-tegra20.c
896
writel(CPU_RESET(cpu),
drivers/clk/tegra/clk-tegra20.c
901
static void tegra20_cpu_out_of_reset(u32 cpu)
drivers/clk/tegra/clk-tegra20.c
903
writel(CPU_RESET(cpu),
drivers/clk/tegra/clk-tegra20.c
908
static void tegra20_enable_cpu_clock(u32 cpu)
drivers/clk/tegra/clk-tegra20.c
913
writel(reg & ~CPU_CLOCK(cpu),
drivers/clk/tegra/clk-tegra20.c
919
static void tegra20_disable_cpu_clock(u32 cpu)
drivers/clk/tegra/clk-tegra20.c
924
writel(reg | CPU_CLOCK(cpu),
drivers/clk/tegra/clk-tegra210.c
3424
static void tegra210_wait_cpu_in_reset(u32 cpu)
drivers/clk/tegra/clk-tegra210.c
3431
} while (!(reg & (1 << cpu))); /* check CPU been reset or not */
drivers/clk/tegra/clk-tegra210.c
3434
static void tegra210_disable_cpu_clock(u32 cpu)
drivers/clk/tegra/clk-tegra30.c
1076
static void tegra30_wait_cpu_in_reset(u32 cpu)
drivers/clk/tegra/clk-tegra30.c
1084
} while (!(reg & (1 << cpu))); /* check CPU been reset or not */
drivers/clk/tegra/clk-tegra30.c
1089
static void tegra30_put_cpu_in_reset(u32 cpu)
drivers/clk/tegra/clk-tegra30.c
1091
writel(CPU_RESET(cpu),
drivers/clk/tegra/clk-tegra30.c
1096
static void tegra30_cpu_out_of_reset(u32 cpu)
drivers/clk/tegra/clk-tegra30.c
1098
writel(CPU_RESET(cpu),
drivers/clk/tegra/clk-tegra30.c
1103
static void tegra30_enable_cpu_clock(u32 cpu)
drivers/clk/tegra/clk-tegra30.c
1105
writel(CPU_CLOCK(cpu),
drivers/clk/tegra/clk-tegra30.c
1110
static void tegra30_disable_cpu_clock(u32 cpu)
drivers/clk/tegra/clk-tegra30.c
1115
writel(reg | CPU_CLOCK(cpu),
drivers/clk/tegra/clk-tegra30.c
120
#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
drivers/clk/tegra/clk-tegra30.c
121
#define CPU_RESET(cpu) (0x1111ul << (cpu))
drivers/clocksource/arc_timer.c
305
static int arc_timer_starting_cpu(unsigned int cpu)
drivers/clocksource/arc_timer.c
316
static int arc_timer_dying_cpu(unsigned int cpu)
drivers/clocksource/arm_arch_timer.c
758
static int arch_timer_evtstrm_starting_cpu(unsigned int cpu)
drivers/clocksource/arm_arch_timer.c
764
static int arch_timer_evtstrm_dying_cpu(unsigned int cpu)
drivers/clocksource/arm_arch_timer.c
826
static int arch_timer_starting_cpu(unsigned int cpu)
drivers/clocksource/arm_arch_timer.c
961
static int arch_timer_dying_cpu(unsigned int cpu)
drivers/clocksource/arm_global_timer.c
173
static int gt_starting_cpu(unsigned int cpu)
drivers/clocksource/arm_global_timer.c
185
clk->cpumask = cpumask_of(cpu);
drivers/clocksource/arm_global_timer.c
194
static int gt_dying_cpu(unsigned int cpu)
drivers/clocksource/dummy_timer.c
16
static int dummy_timer_starting_cpu(unsigned int cpu)
drivers/clocksource/dummy_timer.c
18
struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
drivers/clocksource/dummy_timer.c
25
evt->cpumask = cpumask_of(cpu);
drivers/clocksource/dw_apb_timer.c
222
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
drivers/clocksource/dw_apb_timer.c
241
dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
drivers/clocksource/exynos_mct.c
455
static int exynos4_mct_starting_cpu(unsigned int cpu)
drivers/clocksource/exynos_mct.c
458
per_cpu_ptr(&percpu_mct_tick, cpu);
drivers/clocksource/exynos_mct.c
461
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
drivers/clocksource/exynos_mct.c
464
evt->cpumask = cpumask_of(cpu);
drivers/clocksource/exynos_mct.c
482
irq_force_affinity(evt->irq, cpumask_of(cpu));
drivers/clocksource/exynos_mct.c
493
static int exynos4_mct_dying_cpu(unsigned int cpu)
drivers/clocksource/exynos_mct.c
496
per_cpu_ptr(&percpu_mct_tick, cpu);
drivers/clocksource/exynos_mct.c
542
int nr_irqs, i, err, cpu;
drivers/clocksource/exynos_mct.c
571
for_each_possible_cpu(cpu) {
drivers/clocksource/exynos_mct.c
575
per_cpu_ptr(&percpu_mct_tick, cpu);
drivers/clocksource/exynos_mct.c
577
if (cpu >= nr_local) {
drivers/clocksource/exynos_mct.c
582
irq_idx = MCT_L0_IRQ + local_idx[cpu];
drivers/clocksource/exynos_mct.c
595
cpu);
drivers/clocksource/exynos_mct.c
603
for_each_possible_cpu(cpu) {
drivers/clocksource/exynos_mct.c
604
struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu);
drivers/clocksource/exynos_mct.c
606
if (cpu >= nr_local) {
drivers/clocksource/exynos_mct.c
611
mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]);
drivers/clocksource/exynos_mct.c
628
for_each_possible_cpu(cpu) {
drivers/clocksource/exynos_mct.c
630
per_cpu_ptr(&percpu_mct_tick, cpu);
drivers/clocksource/hyperv_timer.c
131
static int hv_stimer_init(unsigned int cpu)
drivers/clocksource/hyperv_timer.c
138
ce = per_cpu_ptr(hv_clock_event, cpu);
drivers/clocksource/hyperv_timer.c
141
ce->cpumask = cpumask_of(cpu);
drivers/clocksource/hyperv_timer.c
171
int hv_stimer_cleanup(unsigned int cpu)
drivers/clocksource/hyperv_timer.c
192
ce = per_cpu_ptr(hv_clock_event, cpu);
drivers/clocksource/hyperv_timer.c
196
clockevents_unbind_device(ce, cpu);
drivers/clocksource/hyperv_timer.c
323
void hv_stimer_legacy_init(unsigned int cpu, int sint)
drivers/clocksource/hyperv_timer.c
336
(void)hv_stimer_init(cpu);
drivers/clocksource/hyperv_timer.c
346
void hv_stimer_legacy_cleanup(unsigned int cpu)
drivers/clocksource/hyperv_timer.c
350
(void)hv_stimer_cleanup(cpu);
drivers/clocksource/hyperv_timer.c
360
int cpu;
drivers/clocksource/hyperv_timer.c
366
for_each_present_cpu(cpu) {
drivers/clocksource/hyperv_timer.c
367
hv_stimer_legacy_cleanup(cpu);
drivers/clocksource/ingenic-timer.c
120
csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
drivers/clocksource/ingenic-timer.c
123
smp_call_function_single_async(timer->cpu, csd);
drivers/clocksource/ingenic-timer.c
140
static int ingenic_tcu_setup_cevt(unsigned int cpu)
drivers/clocksource/ingenic-timer.c
143
struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
drivers/clocksource/ingenic-timer.c
182
timer->cpu = smp_processor_id();
drivers/clocksource/ingenic-timer.c
279
unsigned int cpu;
drivers/clocksource/ingenic-timer.c
315
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
drivers/clocksource/ingenic-timer.c
316
timer = &tcu->timers[cpu];
drivers/clocksource/ingenic-timer.c
318
timer->cpu = cpu;
drivers/clocksource/ingenic-timer.c
32
unsigned int cpu;
drivers/clocksource/ingenic-timer.c
374
unsigned int cpu;
drivers/clocksource/ingenic-timer.c
378
for (cpu = 0; cpu < num_online_cpus(); cpu++)
drivers/clocksource/ingenic-timer.c
379
clk_disable(tcu->timers[cpu].clk);
drivers/clocksource/ingenic-timer.c
387
unsigned int cpu;
drivers/clocksource/ingenic-timer.c
390
for (cpu = 0; cpu < num_online_cpus(); cpu++) {
drivers/clocksource/ingenic-timer.c
391
ret = clk_enable(tcu->timers[cpu].clk);
drivers/clocksource/ingenic-timer.c
403
for (; cpu > 0; cpu--)
drivers/clocksource/ingenic-timer.c
404
clk_disable(tcu->timers[cpu - 1].clk);
drivers/clocksource/ingenic-timer.c
69
return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
drivers/clocksource/jcore-pit.c
105
static int jcore_pit_local_init(unsigned cpu)
drivers/clocksource/jcore-pit.c
110
pr_info("Local J-Core PIT init on cpu %u\n", cpu);
drivers/clocksource/jcore-pit.c
122
static int jcore_pit_local_teardown(unsigned cpu)
drivers/clocksource/jcore-pit.c
126
pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
drivers/clocksource/jcore-pit.c
148
unsigned pit_irq, cpu;
drivers/clocksource/jcore-pit.c
227
for_each_present_cpu(cpu) {
drivers/clocksource/jcore-pit.c
228
struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
drivers/clocksource/jcore-pit.c
230
pit->base = of_iomap(node, cpu);
drivers/clocksource/jcore-pit.c
232
pr_err("Unable to map PIT for cpu %u\n", cpu);
drivers/clocksource/jcore-pit.c
240
pit->ced.cpumask = cpumask_of(cpu);
drivers/clocksource/mips-gic-timer.c
109
static int gic_starting_cpu(unsigned int cpu)
drivers/clocksource/mips-gic-timer.c
114
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
drivers/clocksource/mips-gic-timer.c
131
static int gic_dying_cpu(unsigned int cpu)
drivers/clocksource/mips-gic-timer.c
55
int cpu = cpumask_first(evt->cpumask);
drivers/clocksource/mips-gic-timer.c
61
if (cpu == raw_smp_processor_id()) {
drivers/clocksource/mips-gic-timer.c
64
write_gic_vl_other(mips_cm_vp_id(cpu));
drivers/clocksource/mips-gic-timer.c
80
static void gic_clockevent_cpu_init(unsigned int cpu,
drivers/clocksource/mips-gic-timer.c
89
cd->cpumask = cpumask_of(cpu);
drivers/clocksource/timer-armada-370-xp.c
170
static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-armada-370-xp.c
172
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
drivers/clocksource/timer-armada-370-xp.c
192
evt->cpumask = cpumask_of(cpu);
drivers/clocksource/timer-armada-370-xp.c
200
static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
drivers/clocksource/timer-armada-370-xp.c
202
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
drivers/clocksource/timer-clint.c
129
static int clint_timer_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-clint.c
131
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
drivers/clocksource/timer-clint.c
133
ce->cpumask = cpumask_of(cpu);
drivers/clocksource/timer-clint.c
143
static int clint_timer_dying_cpu(unsigned int cpu)
drivers/clocksource/timer-clint.c
49
static void clint_send_ipi(unsigned int cpu)
drivers/clocksource/timer-clint.c
51
writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
drivers/clocksource/timer-econet-en751221.c
101
pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);
drivers/clocksource/timer-econet-en751221.c
103
reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);
drivers/clocksource/timer-econet-en751221.c
104
iowrite32(reg, reg_ctl(cpu));
drivers/clocksource/timer-econet-en751221.c
123
static void __init cevt_dev_init(uint cpu)
drivers/clocksource/timer-econet-en751221.c
125
iowrite32(0, reg_count(cpu));
drivers/clocksource/timer-econet-en751221.c
126
iowrite32(U32_MAX, reg_compare(cpu));
drivers/clocksource/timer-econet-en751221.c
68
int cpu = cpumask_first(dev->cpumask);
drivers/clocksource/timer-econet-en751221.c
73
if (!cevt_is_pending(cpu))
drivers/clocksource/timer-econet-en751221.c
76
iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));
drivers/clocksource/timer-econet-en751221.c
84
int cpu;
drivers/clocksource/timer-econet-en751221.c
86
cpu = cpumask_first(dev->cpumask);
drivers/clocksource/timer-econet-en751221.c
87
next = ioread32(reg_count(cpu)) + delta;
drivers/clocksource/timer-econet-en751221.c
88
iowrite32(next, reg_compare(cpu));
drivers/clocksource/timer-econet-en751221.c
90
if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)
drivers/clocksource/timer-econet-en751221.c
96
static int cevt_init_cpu(uint cpu)
drivers/clocksource/timer-econet-en751221.c
98
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
drivers/clocksource/timer-mp-csky.c
120
int ret, cpu, cpu_rollback;
drivers/clocksource/timer-mp-csky.c
144
for_each_possible_cpu(cpu) {
drivers/clocksource/timer-mp-csky.c
145
to = per_cpu_ptr(&csky_to, cpu);
drivers/clocksource/timer-mp-csky.c
165
if (cpu_rollback == cpu)
drivers/clocksource/timer-mp-csky.c
76
static int csky_mptimer_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-mp-csky.c
78
struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
drivers/clocksource/timer-mp-csky.c
80
to->clkevt.cpumask = cpumask_of(cpu);
drivers/clocksource/timer-mp-csky.c
90
static int csky_mptimer_dying_cpu(unsigned int cpu)
drivers/clocksource/timer-nxp-pit.c
200
int irq, unsigned int cpu)
drivers/clocksource/timer-nxp-pit.c
221
pit->ced.cpumask = cpumask_of(cpu);
drivers/clocksource/timer-nxp-pit.c
231
per_cpu(pit_timers, cpu) = pit;
drivers/clocksource/timer-nxp-pit.c
236
static void pit_clockevent_per_cpu_exit(struct pit_timer *pit, unsigned int cpu)
drivers/clocksource/timer-nxp-pit.c
240
per_cpu(pit_timers, cpu) = NULL;
drivers/clocksource/timer-nxp-pit.c
243
static int pit_clockevent_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-nxp-pit.c
245
struct pit_timer *pit = per_cpu(pit_timers, cpu);
drivers/clocksource/timer-nxp-pit.c
251
ret = irq_force_affinity(pit->ced.irq, cpumask_of(cpu));
drivers/clocksource/timer-nxp-pit.c
253
pit_clockevent_per_cpu_exit(pit, cpu);
drivers/clocksource/timer-nxp-stm.c
301
struct clk *clk, int cpu)
drivers/clocksource/timer-nxp-stm.c
313
stm_timer->ced.cpumask = cpumask_of(cpu);
drivers/clocksource/timer-nxp-stm.c
318
per_cpu(stm_timers, cpu) = stm_timer;
drivers/clocksource/timer-nxp-stm.c
322
dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
drivers/clocksource/timer-nxp-stm.c
327
static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-nxp-stm.c
329
struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
drivers/clocksource/timer-nxp-stm.c
335
ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
drivers/clocksource/timer-qcom.c
101
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
drivers/clocksource/timer-qcom.c
112
evt->cpumask = cpumask_of(cpu);
drivers/clocksource/timer-qcom.c
129
static int msm_local_timer_dying_cpu(unsigned int cpu)
drivers/clocksource/timer-qcom.c
131
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
drivers/clocksource/timer-qcom.c
99
static int msm_local_timer_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-riscv.c
108
static int riscv_timer_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-riscv.c
110
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
drivers/clocksource/timer-riscv.c
115
ce->cpumask = cpumask_of(cpu);
drivers/clocksource/timer-riscv.c
128
static int riscv_timer_dying_cpu(unsigned int cpu)
drivers/clocksource/timer-rtl-otto.c
246
static int rttm_cpu_starting(unsigned int cpu)
drivers/clocksource/timer-rtl-otto.c
248
struct timer_of *to = per_cpu_ptr(&rttm_to, cpu);
drivers/clocksource/timer-rtl-otto.c
251
to->clkevt.cpumask = cpumask_of(cpu);
drivers/clocksource/timer-rtl-otto.c
262
unsigned int cpu, cpu_rollback;
drivers/clocksource/timer-rtl-otto.c
267
for_each_possible_cpu(cpu) {
drivers/clocksource/timer-rtl-otto.c
268
to = per_cpu_ptr(&rttm_to, cpu);
drivers/clocksource/timer-rtl-otto.c
269
to->of_irq.index = to->of_base.index = cpu;
drivers/clocksource/timer-rtl-otto.c
271
pr_err("setup of timer %d failed\n", cpu);
drivers/clocksource/timer-rtl-otto.c
294
if (cpu_rollback == cpu)
drivers/clocksource/timer-tegra.c
132
static int tegra_timer_setup(unsigned int cpu)
drivers/clocksource/timer-tegra.c
134
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
drivers/clocksource/timer-tegra.c
139
irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
drivers/clocksource/timer-tegra.c
157
static int tegra_timer_stop(unsigned int cpu)
drivers/clocksource/timer-tegra.c
159
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
drivers/clocksource/timer-tegra.c
211
static inline unsigned int tegra_base_for_cpu(int cpu, bool tegra20)
drivers/clocksource/timer-tegra.c
214
switch (cpu) {
drivers/clocksource/timer-tegra.c
226
return TIMER10_BASE + cpu * 8;
drivers/clocksource/timer-tegra.c
229
static inline unsigned int tegra_irq_idx_for_cpu(int cpu, bool tegra20)
drivers/clocksource/timer-tegra.c
232
return TIMER1_IRQ_IDX + cpu;
drivers/clocksource/timer-tegra.c
234
return TIMER10_IRQ_IDX + cpu;
drivers/clocksource/timer-tegra.c
254
int cpu, ret;
drivers/clocksource/timer-tegra.c
300
for_each_possible_cpu(cpu) {
drivers/clocksource/timer-tegra.c
301
struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
drivers/clocksource/timer-tegra.c
304
unsigned int base = tegra_base_for_cpu(cpu, tegra20);
drivers/clocksource/timer-tegra.c
305
unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
drivers/clocksource/timer-tegra.c
309
pr_err("failed to map irq for cpu%d\n", cpu);
drivers/clocksource/timer-tegra.c
316
cpu_to->clkevt.cpumask = cpumask_of(cpu);
drivers/clocksource/timer-tegra.c
327
cpu, ret);
drivers/clocksource/timer-tegra.c
355
for_each_possible_cpu(cpu) {
drivers/clocksource/timer-tegra.c
358
cpu_to = per_cpu_ptr(&tegra_to, cpu);
drivers/clocksource/timer-ti-dm-systimer.c
636
static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
drivers/clocksource/timer-ti-dm-systimer.c
641
if (!cpu_possible(cpu))
drivers/clocksource/timer-ti-dm-systimer.c
648
clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
drivers/clocksource/timer-ti-dm-systimer.c
651
cpumask_of(cpu), "percpu-dmtimer",
drivers/clocksource/timer-ti-dm-systimer.c
660
static int omap_dmtimer_starting_cpu(unsigned int cpu)
drivers/clocksource/timer-ti-dm-systimer.c
662
struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
drivers/clocksource/timer-ti-dm-systimer.c
667
irq_force_affinity(dev->irq, cpumask_of(cpu));
drivers/connector/cn_proc.c
380
ev->cpu = -1;
drivers/connector/cn_proc.c
95
((struct proc_event *)msg->data)->cpu = smp_processor_id();
drivers/cpufreq/acpi-cpufreq.c
184
struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
drivers/cpufreq/acpi-cpufreq.c
186
return cpu_has(cpu, X86_FEATURE_EST);
drivers/cpufreq/acpi-cpufreq.c
191
struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
drivers/cpufreq/acpi-cpufreq.c
193
return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
drivers/cpufreq/acpi-cpufreq.c
355
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
drivers/cpufreq/acpi-cpufreq.c
362
pr_debug("%s (%d)\n", __func__, cpu);
drivers/cpufreq/acpi-cpufreq.c
364
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/acpi-cpufreq.c
373
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
drivers/cpufreq/acpi-cpufreq.c
435
cpumask_of(policy->cpu) : policy->cpus;
drivers/cpufreq/acpi-cpufreq.c
442
pr_debug("%s (%d)\n", __func__, policy->cpu);
drivers/cpufreq/acpi-cpufreq.c
489
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
drivers/cpufreq/acpi-cpufreq.c
528
static int cpufreq_boost_down_prep(unsigned int cpu)
drivers/cpufreq/acpi-cpufreq.c
626
static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
drivers/cpufreq/acpi-cpufreq.c
635
ret = cppc_get_perf_caps(cpu, &perf_caps);
drivers/cpufreq/acpi-cpufreq.c
638
cpu, ret);
drivers/cpufreq/acpi-cpufreq.c
643
ret = amd_get_boost_ratio_numerator(cpu, &highest_perf);
drivers/cpufreq/acpi-cpufreq.c
646
cpu, ret);
drivers/cpufreq/acpi-cpufreq.c
659
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
drivers/cpufreq/acpi-cpufreq.c
664
pr_debug("CPU%d: nominal performance above highest\n", cpu);
drivers/cpufreq/acpi-cpufreq.c
672
static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
drivers/cpufreq/acpi-cpufreq.c
683
unsigned int cpu = policy->cpu;
drivers/cpufreq/acpi-cpufreq.c
684
struct cpuinfo_x86 *c = &cpu_data(cpu);
drivers/cpufreq/acpi-cpufreq.c
712
perf = per_cpu_ptr(acpi_perf_data, cpu);
drivers/cpufreq/acpi-cpufreq.c
713
data->acpi_perf_cpu = cpu;
drivers/cpufreq/acpi-cpufreq.c
719
result = acpi_processor_register_performance(perf, cpu);
drivers/cpufreq/acpi-cpufreq.c
739
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
drivers/cpufreq/acpi-cpufreq.c
74
static bool boost_state(unsigned int cpu)
drivers/cpufreq/acpi-cpufreq.c
742
if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
drivers/cpufreq/acpi-cpufreq.c
745
cpumask_set_cpu(cpu, policy->cpus);
drivers/cpufreq/acpi-cpufreq.c
747
topology_sibling_cpumask(cpu));
drivers/cpufreq/acpi-cpufreq.c
780
if (check_est_cpu(cpu)) {
drivers/cpufreq/acpi-cpufreq.c
786
if (check_amd_hwpstate_cpu(cpu)) {
drivers/cpufreq/acpi-cpufreq.c
82
rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
drivers/cpufreq/acpi-cpufreq.c
836
max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
drivers/cpufreq/acpi-cpufreq.c
86
rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
drivers/cpufreq/acpi-cpufreq.c
872
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
drivers/cpufreq/acpi-cpufreq.c
884
pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
drivers/cpufreq/acpi-cpufreq.c
911
if (policy->boost_enabled != boost_state(cpu))
drivers/cpufreq/acpi-cpufreq.c
921
acpi_processor_unregister_performance(cpu);
drivers/cpufreq/acpi-cpufreq.c
937
cpufreq_boost_down_prep(policy->cpu);
drivers/cpufreq/amd-pstate-ut.c
115
int cpu = 0, ret = 0;
drivers/cpufreq/amd-pstate-ut.c
121
for_each_online_cpu(cpu) {
drivers/cpufreq/amd-pstate-ut.c
125
policy = cpufreq_cpu_get(cpu);
drivers/cpufreq/amd-pstate-ut.c
131
ret = cppc_get_perf_caps(cpu, &cppc_perf);
drivers/cpufreq/amd-pstate-ut.c
142
ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
drivers/cpufreq/amd-pstate-ut.c
157
__func__, cpu, highest_perf, cur_perf.highest_perf);
drivers/cpufreq/amd-pstate-ut.c
164
__func__, cpu, nominal_perf, cur_perf.nominal_perf,
drivers/cpufreq/amd-pstate-ut.c
175
__func__, cpu, highest_perf, nominal_perf,
drivers/cpufreq/amd-pstate-ut.c
191
int cpu = 0;
drivers/cpufreq/amd-pstate-ut.c
193
for_each_online_cpu(cpu) {
drivers/cpufreq/amd-pstate-ut.c
197
policy = cpufreq_cpu_get(cpu);
drivers/cpufreq/amd-pstate-ut.c
207
__func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
drivers/cpufreq/amd-pstate-ut.c
214
__func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
drivers/cpufreq/amd-pstate-ut.c
222
__func__, cpu, policy->max, policy->cpuinfo.max_freq,
drivers/cpufreq/amd-pstate-ut.c
227
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
drivers/cpufreq/amd-pstate.c
1000
cpudata->cpu = policy->cpu;
drivers/cpufreq/amd-pstate.c
1016
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
drivers/cpufreq/amd-pstate.c
1017
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
drivers/cpufreq/amd-pstate.c
1064
pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
drivers/cpufreq/amd-pstate.c
1287
int cpu = 0;
drivers/cpufreq/amd-pstate.c
1294
for_each_online_cpu(cpu) {
drivers/cpufreq/amd-pstate.c
1295
cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
drivers/cpufreq/amd-pstate.c
1476
amd_perf_ctl_reset(policy->cpu);
drivers/cpufreq/amd-pstate.c
1477
dev = get_cpu_device(policy->cpu);
drivers/cpufreq/amd-pstate.c
1485
cpudata->cpu = policy->cpu;
drivers/cpufreq/amd-pstate.c
1543
pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
drivers/cpufreq/amd-pstate.c
1562
pr_debug("CPU %d exiting\n", policy->cpu);
drivers/cpufreq/amd-pstate.c
202
ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
drivers/cpufreq/amd-pstate.c
223
ret = cppc_get_epp_perf(cpudata->cpu, &epp);
drivers/cpufreq/amd-pstate.c
250
trace_amd_pstate_epp_perf(cpudata->cpu,
drivers/cpufreq/amd-pstate.c
266
int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
drivers/cpufreq/amd-pstate.c
301
trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
drivers/cpufreq/amd-pstate.c
314
ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
drivers/cpufreq/amd-pstate.c
346
trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
drivers/cpufreq/amd-pstate.c
360
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
drivers/cpufreq/amd-pstate.c
376
return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
drivers/cpufreq/amd-pstate.c
381
return cppc_set_enable(policy->cpu, 1);
drivers/cpufreq/amd-pstate.c
397
int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
drivers/cpufreq/amd-pstate.c
402
ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
drivers/cpufreq/amd-pstate.c
406
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
drivers/cpufreq/amd-pstate.c
441
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
drivers/cpufreq/amd-pstate.c
445
ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
drivers/cpufreq/amd-pstate.c
461
ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel);
drivers/cpufreq/amd-pstate.c
467
ret = cppc_set_auto_sel(cpudata->cpu,
drivers/cpufreq/amd-pstate.c
510
trace_amd_pstate_epp_perf(cpudata->cpu,
drivers/cpufreq/amd-pstate.c
526
ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
drivers/cpufreq/amd-pstate.c
571
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
drivers/cpufreq/amd-pstate.c
593
cpudata->cpu, fast_switch);
drivers/cpufreq/amd-pstate.c
608
cpufreq_cpu_get(policy_data->cpu);
drivers/cpufreq/amd-pstate.c
716
static void amd_pstate_adjust_perf(unsigned int cpu,
drivers/cpufreq/amd-pstate.c
722
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/amd-pstate.c
777
pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
drivers/cpufreq/amd-pstate.c
814
ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
drivers/cpufreq/amd-pstate.c
831
static void amd_perf_ctl_reset(unsigned int cpu)
drivers/cpufreq/amd-pstate.c
833
wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
drivers/cpufreq/amd-pstate.c
854
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
drivers/cpufreq/amd-pstate.c
862
unsigned int cpu = policy->cpu;
drivers/cpufreq/amd-pstate.c
867
if (amd_get_highest_perf(cpu, &cur_high))
drivers/cpufreq/amd-pstate.c
878
sched_set_itmt_core_prio((int)cur_high, cpu);
drivers/cpufreq/amd-pstate.c
879
sched_update_asym_prefer_cpu(cpu, prev_high, cur_high);
drivers/cpufreq/amd-pstate.c
888
static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
drivers/cpufreq/amd-pstate.c
892
transition_delay_ns = cppc_get_transition_latency(cpu);
drivers/cpufreq/amd-pstate.c
907
static u32 amd_pstate_get_transition_latency(unsigned int cpu)
drivers/cpufreq/amd-pstate.c
911
transition_latency = cppc_get_transition_latency(cpu);
drivers/cpufreq/amd-pstate.c
933
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
drivers/cpufreq/amd-pstate.c
991
amd_perf_ctl_reset(policy->cpu);
drivers/cpufreq/amd-pstate.c
992
dev = get_cpu_device(policy->cpu);
drivers/cpufreq/amd-pstate.h
86
int cpu;
drivers/cpufreq/amd_freq_sensitivity.c
46
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
drivers/cpufreq/amd_freq_sensitivity.c
54
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
drivers/cpufreq/amd_freq_sensitivity.c
56
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
drivers/cpufreq/apple-soc-cpufreq.c
135
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
drivers/cpufreq/apple-soc-cpufreq.c
142
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/apple-soc-cpufreq.c
218
ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
drivers/cpufreq/apple-soc-cpufreq.c
248
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/apple-soc-cpufreq.c
250
pr_err("failed to get cpu%d device\n", policy->cpu);
drivers/cpufreq/armada-8k-cpufreq.c
132
int ret = 0, opps_index = 0, cpu, nb_cpus;
drivers/cpufreq/armada-8k-cpufreq.c
156
for_each_cpu(cpu, &cpus) {
drivers/cpufreq/armada-8k-cpufreq.c
160
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/armada-8k-cpufreq.c
163
pr_err("Cannot get CPU %d\n", cpu);
drivers/cpufreq/armada-8k-cpufreq.c
170
pr_err("Cannot get clock for CPU %d\n", cpu);
drivers/cpufreq/armada-8k-cpufreq.c
48
int cpu;
drivers/cpufreq/armada-8k-cpufreq.c
50
for_each_present_cpu(cpu) {
drivers/cpufreq/armada-8k-cpufreq.c
54
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/armada-8k-cpufreq.c
56
pr_warn("Failed to get cpu%d device\n", cpu);
drivers/cpufreq/armada-8k-cpufreq.c
62
pr_warn("Cannot get clock for CPU %d\n", cpu);
drivers/cpufreq/armada-8k-cpufreq.c
65
cpumask_set_cpu(cpu, cpumask);
drivers/cpufreq/bmips-cpufreq.c
87
static unsigned int bmips_cpufreq_get(unsigned int cpu)
drivers/cpufreq/brcmstb-avs-cpufreq.c
481
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
drivers/cpufreq/brcmstb-avs-cpufreq.c
483
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cppc_cpufreq.c
151
int cpu, ret;
drivers/cpufreq/cppc_cpufreq.c
156
for_each_cpu(cpu, policy->cpus) {
drivers/cpufreq/cppc_cpufreq.c
157
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
drivers/cpufreq/cppc_cpufreq.c
158
cppc_fi->cpu = cpu;
drivers/cpufreq/cppc_cpufreq.c
160
if (cppc_perf_ctrs_in_pcc_cpu(cpu)) {
drivers/cpufreq/cppc_cpufreq.c
166
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
drivers/cpufreq/cppc_cpufreq.c
172
if (ret && cpu_online(cpu)) {
drivers/cpufreq/cppc_cpufreq.c
174
__func__, cpu, ret);
drivers/cpufreq/cppc_cpufreq.c
194
int cpu;
drivers/cpufreq/cppc_cpufreq.c
202
for_each_cpu(cpu, policy->related_cpus) {
drivers/cpufreq/cppc_cpufreq.c
203
if (!cppc_perf_ctrs_in_pcc_cpu(cpu))
drivers/cpufreq/cppc_cpufreq.c
205
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
drivers/cpufreq/cppc_cpufreq.c
295
unsigned int cpu = policy->cpu;
drivers/cpufreq/cppc_cpufreq.c
305
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
drivers/cpufreq/cppc_cpufreq.c
310
cpu, ret);
drivers/cpufreq/cppc_cpufreq.c
319
unsigned int cpu = policy->cpu;
drivers/cpufreq/cppc_cpufreq.c
325
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
drivers/cpufreq/cppc_cpufreq.c
329
cpu, ret);
drivers/cpufreq/cppc_cpufreq.c
342
static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
drivers/cpufreq/cppc_cpufreq.c
344
int transition_latency_ns = cppc_get_transition_latency(cpu);
drivers/cpufreq/cppc_cpufreq.c
361
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
drivers/cpufreq/cppc_cpufreq.c
374
return __cppc_cpufreq_get_transition_delay_us(cpu);
drivers/cpufreq/cppc_cpufreq.c
377
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
drivers/cpufreq/cppc_cpufreq.c
379
return __cppc_cpufreq_get_transition_delay_us(cpu);
drivers/cpufreq/cppc_cpufreq.c
400
int cpu = policy->cpu;
drivers/cpufreq/cppc_cpufreq.c
404
max_cap = arch_scale_cpu_capacity(cpu);
drivers/cpufreq/cppc_cpufreq.c
416
static inline unsigned long compute_cost(int cpu, int step)
drivers/cpufreq/cppc_cpufreq.c
418
return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
drivers/cpufreq/cppc_cpufreq.c
43
int cpu;
drivers/cpufreq/cppc_cpufreq.c
528
em_dev_register_perf_domain(get_cpu_device(policy->cpu),
drivers/cpufreq/cppc_cpufreq.c
537
int class, cpu, index;
drivers/cpufreq/cppc_cpufreq.c
539
for_each_possible_cpu(cpu) {
drivers/cpufreq/cppc_cpufreq.c
540
gicc = acpi_cpu_get_madt_gicc(cpu);
drivers/cpufreq/cppc_cpufreq.c
557
for_each_possible_cpu(cpu) {
drivers/cpufreq/cppc_cpufreq.c
558
gicc = acpi_cpu_get_madt_gicc(cpu);
drivers/cpufreq/cppc_cpufreq.c
560
per_cpu(efficiency_class, cpu) = index;
drivers/cpufreq/cppc_cpufreq.c
573
static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
drivers/cpufreq/cppc_cpufreq.c
585
ret = acpi_get_psd_map(cpu, cpu_data);
drivers/cpufreq/cppc_cpufreq.c
587
pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
drivers/cpufreq/cppc_cpufreq.c
591
ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
drivers/cpufreq/cppc_cpufreq.c
593
pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
drivers/cpufreq/cppc_cpufreq.c
618
unsigned int cpu = policy->cpu;
drivers/cpufreq/cppc_cpufreq.c
623
cpu_data = cppc_cpufreq_get_cpu_data(cpu);
drivers/cpufreq/cppc_cpufreq.c
625
pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
drivers/cpufreq/cppc_cpufreq.c
647
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
drivers/cpufreq/cppc_cpufreq.c
684
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
drivers/cpufreq/cppc_cpufreq.c
687
caps->highest_perf, cpu, ret);
drivers/cpufreq/cppc_cpufreq.c
703
unsigned int cpu = policy->cpu;
drivers/cpufreq/cppc_cpufreq.c
710
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
drivers/cpufreq/cppc_cpufreq.c
713
caps->lowest_perf, cpu, ret);
drivers/cpufreq/cppc_cpufreq.c
749
static int cppc_get_perf_ctrs_sample(int cpu,
drivers/cpufreq/cppc_cpufreq.c
755
ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0);
drivers/cpufreq/cppc_cpufreq.c
761
return cppc_get_perf_ctrs(cpu, fb_ctrs_t1);
drivers/cpufreq/cppc_cpufreq.c
764
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
drivers/cpufreq/cppc_cpufreq.c
766
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cppc_cpufreq.c
77
if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
drivers/cpufreq/cppc_cpufreq.c
777
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
drivers/cpufreq/cppc_cpufreq.c
800
if (cppc_get_desired_perf(cpu, &delivered_perf))
drivers/cpufreq/cppc_cpufreq.c
837
ret = cppc_get_auto_sel(policy->cpu, &val);
drivers/cpufreq/cppc_cpufreq.c
859
ret = cppc_set_auto_sel(policy->cpu, val);
drivers/cpufreq/cppc_cpufreq.c
866
static ssize_t cppc_cpufreq_sysfs_show_u64(unsigned int cpu,
drivers/cpufreq/cppc_cpufreq.c
871
int ret = get_func((int)cpu, &val);
drivers/cpufreq/cppc_cpufreq.c
882
static ssize_t cppc_cpufreq_sysfs_store_u64(unsigned int cpu,
drivers/cpufreq/cppc_cpufreq.c
893
ret = set_func((int)cpu, val);
drivers/cpufreq/cppc_cpufreq.c
901
return cppc_cpufreq_sysfs_show_u64(policy->cpu, _get_func, buf);\
drivers/cpufreq/cppc_cpufreq.c
906
return cppc_cpufreq_sysfs_store_u64(policy->cpu, _set_func, \
drivers/cpufreq/cppc_cpufreq.c
95
per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
drivers/cpufreq/cpufreq-dt.c
156
static int dt_cpufreq_early_init(struct device *dev, int cpu)
drivers/cpufreq/cpufreq-dt.c
165
if (cpufreq_dt_find_data(cpu))
drivers/cpufreq/cpufreq-dt.c
168
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/cpufreq-dt.c
179
cpumask_set_cpu(cpu, priv->cpus);
drivers/cpufreq/cpufreq-dt.c
283
int ret, cpu;
drivers/cpufreq/cpufreq-dt.c
286
for_each_present_cpu(cpu) {
drivers/cpufreq/cpufreq-dt.c
287
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
drivers/cpufreq/cpufreq-dt.c
39
static struct private_data *cpufreq_dt_find_data(int cpu)
drivers/cpufreq/cpufreq-dt.c
44
if (cpumask_test_cpu(cpu, priv->cpus))
drivers/cpufreq/cpufreq-dt.c
66
int cpu = dev->id;
drivers/cpufreq/cpufreq-dt.c
73
if (!cpu && of_property_present(np, "cpu0-supply"))
drivers/cpufreq/cpufreq-dt.c
79
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
drivers/cpufreq/cpufreq-dt.c
91
priv = cpufreq_dt_find_data(policy->cpu);
drivers/cpufreq/cpufreq-dt.c
93
pr_err("failed to find data for cpu%d\n", policy->cpu);
drivers/cpufreq/cpufreq-nforce2.c
236
static unsigned int nforce2_get(unsigned int cpu)
drivers/cpufreq/cpufreq-nforce2.c
238
if (cpu)
drivers/cpufreq/cpufreq-nforce2.c
264
freqs.old = nforce2_get(policy->cpu);
drivers/cpufreq/cpufreq-nforce2.c
315
if (policy->cpu != 0)
drivers/cpufreq/cpufreq.c
1045
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
drivers/cpufreq/cpufreq.c
1051
if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
drivers/cpufreq/cpufreq.c
1059
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
drivers/cpufreq/cpufreq.c
1064
cpumask_clear_cpu(cpu, policy->real_cpus);
drivers/cpufreq/cpufreq.c
1134
gov->name, policy->cpu);
drivers/cpufreq/cpufreq.c
1171
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
drivers/cpufreq/cpufreq.c
1176
if (cpumask_test_cpu(cpu, policy->cpus))
drivers/cpufreq/cpufreq.c
1184
cpumask_set_cpu(cpu, policy->cpus);
drivers/cpufreq/cpufreq.c
1198
pr_debug("updating policy for CPU %u\n", policy->cpu);
drivers/cpufreq/cpufreq.c
1210
pr_debug("handle_update for cpu %u called\n", policy->cpu);
drivers/cpufreq/cpufreq.c
1257
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
drivers/cpufreq/cpufreq.c
1260
struct device *dev = get_cpu_device(cpu);
drivers/cpufreq/cpufreq.c
1281
cpufreq_global_kobject, "policy%u", cpu);
drivers/cpufreq/cpufreq.c
1304
ret, cpu);
drivers/cpufreq/cpufreq.c
1312
ret, cpu);
drivers/cpufreq/cpufreq.c
133
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
drivers/cpufreq/cpufreq.c
1343
int cpu;
drivers/cpufreq/cpufreq.c
1356
for_each_cpu(cpu, policy->related_cpus)
drivers/cpufreq/cpufreq.c
1357
per_cpu(cpufreq_cpu_data, cpu) = NULL;
drivers/cpufreq/cpufreq.c
1390
unsigned int cpu, bool new_policy)
drivers/cpufreq/cpufreq.c
1398
policy->cpu = cpu;
drivers/cpufreq/cpufreq.c
1412
cpumask_copy(policy->cpus, cpumask_of(cpu));
drivers/cpufreq/cpufreq.c
142
kcpustat_cpu_fetch(&kcpustat, cpu);
drivers/cpufreq/cpufreq.c
1494
policy->cur = cpufreq_driver->get(policy->cpu);
drivers/cpufreq/cpufreq.c
1537
__func__, policy->cpu, old_freq, policy->cur);
drivers/cpufreq/cpufreq.c
1569
__func__, cpu, ret);
drivers/cpufreq/cpufreq.c
158
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
drivers/cpufreq/cpufreq.c
1593
static int cpufreq_online(unsigned int cpu)
drivers/cpufreq/cpufreq.c
1599
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
drivers/cpufreq/cpufreq.c
160
u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
drivers/cpufreq/cpufreq.c
1602
policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
1604
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
drivers/cpufreq/cpufreq.c
1606
return cpufreq_add_policy_cpu(policy, cpu);
drivers/cpufreq/cpufreq.c
1612
policy = cpufreq_policy_alloc(cpu);
drivers/cpufreq/cpufreq.c
1617
ret = cpufreq_policy_online(policy, cpu, new_policy);
drivers/cpufreq/cpufreq.c
163
return get_cpu_idle_time_jiffy(cpu, wall);
drivers/cpufreq/cpufreq.c
1643
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
drivers/cpufreq/cpufreq.c
165
idle_time += get_cpu_iowait_time_us(cpu, wall);
drivers/cpufreq/cpufreq.c
1661
unsigned cpu = dev->id;
drivers/cpufreq/cpufreq.c
1664
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
drivers/cpufreq/cpufreq.c
1666
if (cpu_online(cpu)) {
drivers/cpufreq/cpufreq.c
1667
ret = cpufreq_online(cpu);
drivers/cpufreq/cpufreq.c
1673
policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
1675
add_cpu_dev_symlink(policy, cpu, dev);
drivers/cpufreq/cpufreq.c
1680
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
drivers/cpufreq/cpufreq.c
1687
cpumask_clear_cpu(cpu, policy->cpus);
drivers/cpufreq/cpufreq.c
1691
if (cpu == policy->cpu)
drivers/cpufreq/cpufreq.c
1692
policy->cpu = cpumask_any(policy->cpus);
drivers/cpufreq/cpufreq.c
1727
static int cpufreq_offline(unsigned int cpu)
drivers/cpufreq/cpufreq.c
1731
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
drivers/cpufreq/cpufreq.c
1733
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/cpufreq.c
1741
__cpufreq_offline(cpu, policy);
drivers/cpufreq/cpufreq.c
1753
unsigned int cpu = dev->id;
drivers/cpufreq/cpufreq.c
1754
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
1760
if (cpu_online(cpu))
drivers/cpufreq/cpufreq.c
1761
__cpufreq_offline(cpu, policy);
drivers/cpufreq/cpufreq.c
1763
remove_cpu_dev_symlink(policy, cpu, dev);
drivers/cpufreq/cpufreq.c
1815
new_freq = cpufreq_driver->get(policy->cpu);
drivers/cpufreq/cpufreq.c
1852
unsigned int cpufreq_quick_get(unsigned int cpu)
drivers/cpufreq/cpufreq.c
1859
unsigned int ret_freq = cpufreq_driver->get(cpu);
drivers/cpufreq/cpufreq.c
1868
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cpufreq.c
1882
unsigned int cpufreq_quick_get_max(unsigned int cpu)
drivers/cpufreq/cpufreq.c
1884
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cpufreq.c
1898
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
drivers/cpufreq/cpufreq.c
1900
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cpufreq.c
1922
unsigned int cpufreq_get(unsigned int cpu)
drivers/cpufreq/cpufreq.c
1924
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cpufreq.c
193
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
drivers/cpufreq/cpufreq.c
195
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
197
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
drivers/cpufreq/cpufreq.c
201
struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu)
drivers/cpufreq/cpufreq.c
203
return per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
2038
__func__, policy->cpu);
drivers/cpufreq/cpufreq.c
207
unsigned int cpufreq_generic_get(unsigned int cpu)
drivers/cpufreq/cpufreq.c
209
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/cpufreq.c
213
__func__, policy ? "clk" : "policy", cpu);
drivers/cpufreq/cpufreq.c
2207
int cpu;
drivers/cpufreq/cpufreq.c
2217
arch_scale_freq_ref(policy->cpu));
drivers/cpufreq/cpufreq.c
2221
for_each_cpu(cpu, policy->cpus)
drivers/cpufreq/cpufreq.c
2222
trace_cpu_frequency(freq, cpu);
drivers/cpufreq/cpufreq.c
2250
void cpufreq_driver_adjust_perf(unsigned int cpu,
drivers/cpufreq/cpufreq.c
2255
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
drivers/cpufreq/cpufreq.c
2282
__func__, policy->cpu, freqs->old, freqs->new);
drivers/cpufreq/cpufreq.c
232
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
drivers/cpufreq/cpufreq.c
2325
__func__, policy->cpu, freqs.old, freqs.new);
drivers/cpufreq/cpufreq.c
2368
policy->cpu, target_freq, relation, old_target_freq);
drivers/cpufreq/cpufreq.c
237
if (WARN_ON(cpu >= nr_cpu_ids))
drivers/cpufreq/cpufreq.c
2444
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
drivers/cpufreq/cpufreq.c
245
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/cpufreq.c
2464
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
drivers/cpufreq/cpufreq.c
2482
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
drivers/cpufreq/cpufreq.c
2503
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
drivers/cpufreq/cpufreq.c
2514
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
drivers/cpufreq/cpufreq.c
2587
int cpu;
drivers/cpufreq/cpufreq.c
2589
cpu = cpumask_first(policy->related_cpus);
drivers/cpufreq/cpufreq.c
2590
max_freq = arch_scale_freq_ref(cpu);
drivers/cpufreq/cpufreq.c
2600
max_capacity = arch_scale_cpu_capacity(cpu);
drivers/cpufreq/cpufreq.c
2605
for_each_cpu(cpu, policy->related_cpus)
drivers/cpufreq/cpufreq.c
2606
WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
drivers/cpufreq/cpufreq.c
2634
new_data.cpu = policy->cpu;
drivers/cpufreq/cpufreq.c
2643
new_data.cpu, new_data.min, new_data.max);
drivers/cpufreq/cpufreq.c
2751
void cpufreq_update_policy(unsigned int cpu)
drivers/cpufreq/cpufreq.c
2753
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cpufreq.c
2768
void cpufreq_update_limits(unsigned int cpu)
drivers/cpufreq/cpufreq.c
2770
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cpufreq.c
2879
static int cpuhp_cpufreq_online(unsigned int cpu)
drivers/cpufreq/cpufreq.c
2881
cpufreq_online(cpu);
drivers/cpufreq/cpufreq.c
2886
static int cpuhp_cpufreq_offline(unsigned int cpu)
drivers/cpufreq/cpufreq.c
2888
cpufreq_offline(cpu);
drivers/cpufreq/cpufreq.c
3052
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
drivers/cpufreq/cpufreq.c
3054
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/cpufreq.c
3056
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
drivers/cpufreq/cpufreq.c
3065
unsigned int cpu;
drivers/cpufreq/cpufreq.c
3068
for_each_cpu(cpu, cpu_mask) {
drivers/cpufreq/cpufreq.c
3069
if (!cpufreq_policy_is_good_for_eas(cpu)) {
drivers/cpufreq/cpufreq.c
318
int cpu;
drivers/cpufreq/cpufreq.c
354
for_each_cpu(cpu, policy->cpus)
drivers/cpufreq/cpufreq.c
355
trace_cpu_frequency(freqs->new, cpu);
drivers/cpufreq/cpufreq.c
422
arch_scale_freq_ref(policy->cpu));
drivers/cpufreq/cpufreq.c
478
policy->cpu);
drivers/cpufreq/cpufreq.c
721
__weak int arch_freq_get_on_cpu(int cpu)
drivers/cpufreq/cpufreq.c
728
return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
drivers/cpufreq/cpufreq.c
737
? arch_freq_get_on_cpu(policy->cpu)
drivers/cpufreq/cpufreq.c
743
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
drivers/cpufreq/cpufreq.c
790
int avg_freq = arch_freq_get_on_cpu(policy->cpu);
drivers/cpufreq/cpufreq.c
886
unsigned int cpu;
drivers/cpufreq/cpufreq.c
888
for_each_cpu(cpu, mask) {
drivers/cpufreq/cpufreq.c
889
i += sysfs_emit_at(buf, i, "%u ", cpu);
drivers/cpufreq/cpufreq.c
952
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
drivers/cpufreq/cpufreq_governor.c
327
int cpu;
drivers/cpufreq/cpufreq_governor.c
332
for_each_cpu(cpu, policy->cpus) {
drivers/cpufreq/cpufreq_governor.c
333
struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
drivers/cpufreq/cpufreq_governor.c
335
cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
drivers/cpufreq/cpufreq_ondemand.c
390
unsigned int cpu;
drivers/cpufreq/cpufreq_ondemand.c
400
for_each_online_cpu(cpu) {
drivers/cpufreq/cpufreq_ondemand.c
406
if (cpumask_test_cpu(cpu, done))
drivers/cpufreq/cpufreq_ondemand.c
409
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/cpufreq_userspace.c
114
policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed);
drivers/cpufreq/cpufreq_userspace.c
36
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
drivers/cpufreq/cpufreq_userspace.c
86
pr_debug("started managing cpu %u\n", policy->cpu);
drivers/cpufreq/cpufreq_userspace.c
99
pr_debug("managing cpu %u stopped\n", policy->cpu);
drivers/cpufreq/davinci-cpufreq.c
75
if (policy->cpu != 0)
drivers/cpufreq/e_powersaver.c
155
unsigned int cpu = policy->cpu;
drivers/cpufreq/e_powersaver.c
159
if (unlikely(eps_cpu[cpu] == NULL))
drivers/cpufreq/e_powersaver.c
161
centaur = eps_cpu[cpu];
drivers/cpufreq/e_powersaver.c
190
if (policy->cpu != 0)
drivers/cpufreq/e_powersaver.c
288
if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
drivers/cpufreq/e_powersaver.c
363
unsigned int cpu = policy->cpu;
drivers/cpufreq/e_powersaver.c
366
kfree(eps_cpu[cpu]);
drivers/cpufreq/e_powersaver.c
367
eps_cpu[cpu] = NULL;
drivers/cpufreq/e_powersaver.c
90
static unsigned int eps_get(unsigned int cpu)
drivers/cpufreq/e_powersaver.c
95
if (cpu)
drivers/cpufreq/e_powersaver.c
97
centaur = eps_cpu[cpu];
drivers/cpufreq/elanfreq.c
76
static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
drivers/cpufreq/freq_table.c
133
target_freq, relation, policy->cpu);
drivers/cpufreq/freq_table.c
194
WARN(1, "Invalid frequency table: %u\n", policy->cpu);
drivers/cpufreq/freq_table.c
74
policy->min, policy->max, policy->cpu);
drivers/cpufreq/freq_table.c
96
policy->min, policy->max, policy->cpu);
drivers/cpufreq/gx-suspmod.c
202
static unsigned int gx_get_cpuspeed(unsigned int cpu)
drivers/cpufreq/gx-suspmod.c
339
policy->cpu = 0;
drivers/cpufreq/gx-suspmod.c
381
policy->cpu = 0;
drivers/cpufreq/gx-suspmod.c
402
if (!policy || policy->cpu != 0)
drivers/cpufreq/gx-suspmod.c
418
policy->cpu = 0;
drivers/cpufreq/intel_pstate.c
1007
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/intel_pstate.c
1012
cpumask_of(cpu), false))
drivers/cpufreq/intel_pstate.c
1022
unsigned int cpu;
drivers/cpufreq/intel_pstate.c
1024
for_each_online_cpu(cpu)
drivers/cpufreq/intel_pstate.c
1025
hybrid_register_perf_domain(cpu);
drivers/cpufreq/intel_pstate.c
1028
static void hybrid_update_perf_domain(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1030
if (cpu->pd_registered)
drivers/cpufreq/intel_pstate.c
1031
em_adjust_cpu_capacity(cpu->cpu);
drivers/cpufreq/intel_pstate.c
1034
static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; }
drivers/cpufreq/intel_pstate.c
1036
static inline void hybrid_update_perf_domain(struct cpudata *cpu) {}
drivers/cpufreq/intel_pstate.c
1039
static void hybrid_set_cpu_capacity(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1041
arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
drivers/cpufreq/intel_pstate.c
1043
cpu->capacity_perf,
drivers/cpufreq/intel_pstate.c
1044
cpu->pstate.max_pstate_physical);
drivers/cpufreq/intel_pstate.c
1045
hybrid_update_perf_domain(cpu);
drivers/cpufreq/intel_pstate.c
1047
topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
drivers/cpufreq/intel_pstate.c
1050
cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical,
drivers/cpufreq/intel_pstate.c
1059
static void hybrid_get_capacity_perf(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1062
cpu->capacity_perf = cpu->pstate.max_pstate_physical;
drivers/cpufreq/intel_pstate.c
1066
cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
drivers/cpufreq/intel_pstate.c
1074
struct cpudata *cpu = all_cpu_data[cpunum];
drivers/cpufreq/intel_pstate.c
1076
if (cpu)
drivers/cpufreq/intel_pstate.c
1077
hybrid_set_cpu_capacity(cpu);
drivers/cpufreq/intel_pstate.c
1088
struct cpudata *cpu = all_cpu_data[cpunum];
drivers/cpufreq/intel_pstate.c
1090
if (!cpu)
drivers/cpufreq/intel_pstate.c
1098
hybrid_get_capacity_perf(cpu);
drivers/cpufreq/intel_pstate.c
1105
if (cpu == hybrid_max_perf_cpu)
drivers/cpufreq/intel_pstate.c
1108
if (cpu->capacity_perf > max_cap_perf) {
drivers/cpufreq/intel_pstate.c
1109
max_cap_perf = cpu->capacity_perf;
drivers/cpufreq/intel_pstate.c
1110
max_perf_cpu = cpu;
drivers/cpufreq/intel_pstate.c
1186
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1190
rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
drivers/cpufreq/intel_pstate.c
1191
WRITE_ONCE(cpu->hwp_cap_cached, cap);
drivers/cpufreq/intel_pstate.c
1192
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
drivers/cpufreq/intel_pstate.c
1193
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
drivers/cpufreq/intel_pstate.c
1196
static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1198
int scaling = cpu->pstate.scaling;
drivers/cpufreq/intel_pstate.c
1200
__intel_pstate_get_hwp_cap(cpu);
drivers/cpufreq/intel_pstate.c
1202
cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
drivers/cpufreq/intel_pstate.c
1203
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
drivers/cpufreq/intel_pstate.c
1204
if (scaling != cpu->pstate.perf_ctl_scaling) {
drivers/cpufreq/intel_pstate.c
1205
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
1207
cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
drivers/cpufreq/intel_pstate.c
1209
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
drivers/cpufreq/intel_pstate.c
1214
static void hybrid_update_capacity(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1229
intel_pstate_get_hwp_cap(cpu);
drivers/cpufreq/intel_pstate.c
1231
hybrid_get_capacity_perf(cpu);
drivers/cpufreq/intel_pstate.c
1233
if (cpu->capacity_perf > max_cap_perf) {
drivers/cpufreq/intel_pstate.c
1234
hybrid_max_perf_cpu = cpu;
drivers/cpufreq/intel_pstate.c
1240
if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) {
drivers/cpufreq/intel_pstate.c
1245
hybrid_set_cpu_capacity(cpu);
drivers/cpufreq/intel_pstate.c
1252
if (hybrid_register_perf_domain(cpu->cpu))
drivers/cpufreq/intel_pstate.c
1259
static void intel_pstate_hwp_set(unsigned int cpu)
drivers/cpufreq/intel_pstate.c
1261
struct cpudata *cpu_data = all_cpu_data[cpu];
drivers/cpufreq/intel_pstate.c
1272
rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value);
drivers/cpufreq/intel_pstate.c
1318
wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
drivers/cpufreq/intel_pstate.c
1323
static void intel_pstate_hwp_offline(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1325
u64 value = READ_ONCE(cpu->hwp_req_cached);
drivers/cpufreq/intel_pstate.c
1328
intel_pstate_disable_hwp_interrupt(cpu);
drivers/cpufreq/intel_pstate.c
1337
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
drivers/cpufreq/intel_pstate.c
1343
cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
drivers/cpufreq/intel_pstate.c
1352
WRITE_ONCE(cpu->hwp_req_cached, value);
drivers/cpufreq/intel_pstate.c
1355
min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
drivers/cpufreq/intel_pstate.c
1365
wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
drivers/cpufreq/intel_pstate.c
1375
if (hybrid_max_perf_cpu == cpu)
drivers/cpufreq/intel_pstate.c
1381
hybrid_clear_cpu_capacity(cpu->cpu);
drivers/cpufreq/intel_pstate.c
1411
static void intel_pstate_hwp_reenable(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
1413
intel_pstate_hwp_enable(cpu);
drivers/cpufreq/intel_pstate.c
1414
wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
drivers/cpufreq/intel_pstate.c
1419
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
1421
pr_debug("CPU %d suspending\n", cpu->cpu);
drivers/cpufreq/intel_pstate.c
1423
cpu->suspended = true;
drivers/cpufreq/intel_pstate.c
1426
intel_pstate_disable_hwp_interrupt(cpu);
drivers/cpufreq/intel_pstate.c
1433
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
1435
pr_debug("CPU %d resuming\n", cpu->cpu);
drivers/cpufreq/intel_pstate.c
1443
if (cpu->suspended && hwp_active) {
drivers/cpufreq/intel_pstate.c
1447
intel_pstate_hwp_reenable(cpu);
drivers/cpufreq/intel_pstate.c
1452
cpu->suspended = false;
drivers/cpufreq/intel_pstate.c
1459
int cpu;
drivers/cpufreq/intel_pstate.c
1461
for_each_possible_cpu(cpu)
drivers/cpufreq/intel_pstate.c
1462
cpufreq_update_policy(cpu);
drivers/cpufreq/intel_pstate.c
1479
static bool intel_pstate_update_max_freq(int cpu)
drivers/cpufreq/intel_pstate.c
1481
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/intel_pstate.c
1485
__intel_pstate_update_max_freq(policy, all_cpu_data[cpu]);
drivers/cpufreq/intel_pstate.c
1492
struct cpudata *cpudata = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
1501
int cpu;
drivers/cpufreq/intel_pstate.c
1503
for_each_possible_cpu(cpu)
drivers/cpufreq/intel_pstate.c
1504
intel_pstate_update_max_freq(cpu);
drivers/cpufreq/intel_pstate.c
1551
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
1560
cpu = all_cpu_data[0];
drivers/cpufreq/intel_pstate.c
1562
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
drivers/cpufreq/intel_pstate.c
1563
no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
drivers/cpufreq/intel_pstate.c
1573
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
1581
cpu = all_cpu_data[0];
drivers/cpufreq/intel_pstate.c
1582
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
drivers/cpufreq/intel_pstate.c
1631
struct cpudata *cpu = all_cpu_data[0];
drivers/cpufreq/intel_pstate.c
1632
int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
drivers/cpufreq/intel_pstate.c
1647
static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
drivers/cpufreq/intel_pstate.c
1649
struct cpudata *cpudata = all_cpu_data[cpu];
drivers/cpufreq/intel_pstate.c
1653
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/intel_pstate.c
1674
pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
drivers/cpufreq/intel_pstate.c
1913
if (intel_pstate_update_max_freq(cpudata->cpu)) {
drivers/cpufreq/intel_pstate.c
1922
wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
drivers/cpufreq/intel_pstate.c
1973
wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
drivers/cpufreq/intel_pstate.c
1976
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
drivers/cpufreq/intel_pstate.c
1994
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
drivers/cpufreq/intel_pstate.c
2001
wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
drivers/cpufreq/intel_pstate.c
2002
wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
drivers/cpufreq/intel_pstate.c
2041
wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
drivers/cpufreq/intel_pstate.c
2043
wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
drivers/cpufreq/intel_pstate.c
2155
static int core_get_min_pstate(int cpu)
drivers/cpufreq/intel_pstate.c
2159
rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
drivers/cpufreq/intel_pstate.c
2163
static int core_get_max_pstate_physical(int cpu)
drivers/cpufreq/intel_pstate.c
2167
rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
drivers/cpufreq/intel_pstate.c
2171
static int core_get_tdp_ratio(int cpu, u64 plat_info)
drivers/cpufreq/intel_pstate.c
2181
err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
drivers/cpufreq/intel_pstate.c
2187
err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
drivers/cpufreq/intel_pstate.c
2204
static int core_get_max_pstate(int cpu)
drivers/cpufreq/intel_pstate.c
2212
rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
drivers/cpufreq/intel_pstate.c
2215
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
drivers/cpufreq/intel_pstate.c
2224
err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
drivers/cpufreq/intel_pstate.c
2239
static int core_get_turbo_pstate(int cpu)
drivers/cpufreq/intel_pstate.c
2244
rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
drivers/cpufreq/intel_pstate.c
2245
nont = core_get_max_pstate(cpu);
drivers/cpufreq/intel_pstate.c
2262
static int knl_get_turbo_pstate(int cpu)
drivers/cpufreq/intel_pstate.c
2267
rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
drivers/cpufreq/intel_pstate.c
2268
nont = core_get_max_pstate(cpu);
drivers/cpufreq/intel_pstate.c
2275
static int hwp_get_cpu_scaling(int cpu)
drivers/cpufreq/intel_pstate.c
2282
if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE)
drivers/cpufreq/intel_pstate.c
2297
return intel_pstate_cppc_get_scaling(cpu);
drivers/cpufreq/intel_pstate.c
230
int cpu;
drivers/cpufreq/intel_pstate.c
2300
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
drivers/cpufreq/intel_pstate.c
2302
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
drivers/cpufreq/intel_pstate.c
2303
cpu->pstate.current_pstate = pstate;
drivers/cpufreq/intel_pstate.c
2309
wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
drivers/cpufreq/intel_pstate.c
2310
pstate_funcs.get_val(cpu, pstate));
drivers/cpufreq/intel_pstate.c
2313
static void intel_pstate_set_min_pstate(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2315
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
drivers/cpufreq/intel_pstate.c
2318
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2322
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu);
drivers/cpufreq/intel_pstate.c
2323
cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
drivers/cpufreq/intel_pstate.c
2324
cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
2327
__intel_pstate_get_hwp_cap(cpu);
drivers/cpufreq/intel_pstate.c
2330
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
drivers/cpufreq/intel_pstate.c
2331
intel_pstate_hybrid_hwp_adjust(cpu);
drivers/cpufreq/intel_pstate.c
2333
cpu->pstate.scaling = perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
2339
hybrid_update_capacity(cpu);
drivers/cpufreq/intel_pstate.c
2341
cpu->pstate.scaling = perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
2342
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
drivers/cpufreq/intel_pstate.c
2343
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
drivers/cpufreq/intel_pstate.c
2346
if (cpu->pstate.scaling == perf_ctl_scaling) {
drivers/cpufreq/intel_pstate.c
2347
cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
2348
cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
2349
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
2353
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
drivers/cpufreq/intel_pstate.c
2356
pstate_funcs.get_vid(cpu);
drivers/cpufreq/intel_pstate.c
2358
intel_pstate_set_min_pstate(cpu);
drivers/cpufreq/intel_pstate.c
2369
static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2371
u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
drivers/cpufreq/intel_pstate.c
2372
u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
drivers/cpufreq/intel_pstate.c
2392
if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
drivers/cpufreq/intel_pstate.c
2395
if (!cpu->hwp_boost_min)
drivers/cpufreq/intel_pstate.c
2396
cpu->hwp_boost_min = min_limit;
drivers/cpufreq/intel_pstate.c
2401
if (cpu->hwp_boost_min < boost_level1)
drivers/cpufreq/intel_pstate.c
2402
cpu->hwp_boost_min = boost_level1;
drivers/cpufreq/intel_pstate.c
2403
else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
drivers/cpufreq/intel_pstate.c
2404
cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
drivers/cpufreq/intel_pstate.c
2405
else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
drivers/cpufreq/intel_pstate.c
2407
cpu->hwp_boost_min = max_limit;
drivers/cpufreq/intel_pstate.c
2411
hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
drivers/cpufreq/intel_pstate.c
2413
cpu->last_update = cpu->sample.time;
drivers/cpufreq/intel_pstate.c
2416
static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2418
if (cpu->hwp_boost_min) {
drivers/cpufreq/intel_pstate.c
2422
expired = time_after64(cpu->sample.time, cpu->last_update +
drivers/cpufreq/intel_pstate.c
2425
wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached);
drivers/cpufreq/intel_pstate.c
2426
cpu->hwp_boost_min = 0;
drivers/cpufreq/intel_pstate.c
2429
cpu->last_update = cpu->sample.time;
drivers/cpufreq/intel_pstate.c
2432
static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
drivers/cpufreq/intel_pstate.c
2435
cpu->sample.time = time;
drivers/cpufreq/intel_pstate.c
2437
if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
drivers/cpufreq/intel_pstate.c
2440
cpu->sched_flags = 0;
drivers/cpufreq/intel_pstate.c
2448
if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
drivers/cpufreq/intel_pstate.c
2451
cpu->last_io_update = time;
drivers/cpufreq/intel_pstate.c
2454
intel_pstate_hwp_boost_up(cpu);
drivers/cpufreq/intel_pstate.c
2457
intel_pstate_hwp_boost_down(cpu);
drivers/cpufreq/intel_pstate.c
2464
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
drivers/cpufreq/intel_pstate.c
2466
cpu->sched_flags |= flags;
drivers/cpufreq/intel_pstate.c
2468
if (smp_processor_id() == cpu->cpu)
drivers/cpufreq/intel_pstate.c
2469
intel_pstate_update_util_hwp_local(cpu, time);
drivers/cpufreq/intel_pstate.c
2472
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2474
struct sample *sample = &cpu->sample;
drivers/cpufreq/intel_pstate.c
2479
static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
drivers/cpufreq/intel_pstate.c
2489
if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
drivers/cpufreq/intel_pstate.c
2495
cpu->last_sample_time = cpu->sample.time;
drivers/cpufreq/intel_pstate.c
2496
cpu->sample.time = time;
drivers/cpufreq/intel_pstate.c
2497
cpu->sample.aperf = aperf;
drivers/cpufreq/intel_pstate.c
2498
cpu->sample.mperf = mperf;
drivers/cpufreq/intel_pstate.c
2499
cpu->sample.tsc = tsc;
drivers/cpufreq/intel_pstate.c
2500
cpu->sample.aperf -= cpu->prev_aperf;
drivers/cpufreq/intel_pstate.c
2501
cpu->sample.mperf -= cpu->prev_mperf;
drivers/cpufreq/intel_pstate.c
2502
cpu->sample.tsc -= cpu->prev_tsc;
drivers/cpufreq/intel_pstate.c
2504
cpu->prev_aperf = aperf;
drivers/cpufreq/intel_pstate.c
2505
cpu->prev_mperf = mperf;
drivers/cpufreq/intel_pstate.c
2506
cpu->prev_tsc = tsc;
drivers/cpufreq/intel_pstate.c
2514
if (likely(cpu->last_sample_time)) {
drivers/cpufreq/intel_pstate.c
2515
intel_pstate_calc_avg_perf(cpu);
drivers/cpufreq/intel_pstate.c
2521
static inline int32_t get_avg_frequency(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2523
return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
drivers/cpufreq/intel_pstate.c
2526
static inline int32_t get_avg_pstate(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2528
return mul_ext_fp(cpu->pstate.max_pstate_physical,
drivers/cpufreq/intel_pstate.c
2529
cpu->sample.core_avg_perf);
drivers/cpufreq/intel_pstate.c
2532
static inline int32_t get_target_pstate(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2534
struct sample *sample = &cpu->sample;
drivers/cpufreq/intel_pstate.c
2538
busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
drivers/cpufreq/intel_pstate.c
2541
if (busy_frac < cpu->iowait_boost)
drivers/cpufreq/intel_pstate.c
2542
busy_frac = cpu->iowait_boost;
drivers/cpufreq/intel_pstate.c
2547
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
drivers/cpufreq/intel_pstate.c
2550
if (target < cpu->pstate.min_pstate)
drivers/cpufreq/intel_pstate.c
2551
target = cpu->pstate.min_pstate;
drivers/cpufreq/intel_pstate.c
2560
avg_pstate = get_avg_pstate(cpu);
drivers/cpufreq/intel_pstate.c
2567
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
drivers/cpufreq/intel_pstate.c
2569
int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
drivers/cpufreq/intel_pstate.c
2570
int max_pstate = max(min_pstate, cpu->max_perf_ratio);
drivers/cpufreq/intel_pstate.c
2575
static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
drivers/cpufreq/intel_pstate.c
2577
if (pstate == cpu->pstate.current_pstate)
drivers/cpufreq/intel_pstate.c
2580
cpu->pstate.current_pstate = pstate;
drivers/cpufreq/intel_pstate.c
2581
wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
drivers/cpufreq/intel_pstate.c
2584
static void intel_pstate_adjust_pstate(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2586
int from = cpu->pstate.current_pstate;
drivers/cpufreq/intel_pstate.c
2590
target_pstate = get_target_pstate(cpu);
drivers/cpufreq/intel_pstate.c
2591
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
drivers/cpufreq/intel_pstate.c
2592
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
drivers/cpufreq/intel_pstate.c
2593
intel_pstate_update_pstate(cpu, target_pstate);
drivers/cpufreq/intel_pstate.c
2595
sample = &cpu->sample;
drivers/cpufreq/intel_pstate.c
2599
cpu->pstate.current_pstate,
drivers/cpufreq/intel_pstate.c
2603
get_avg_frequency(cpu),
drivers/cpufreq/intel_pstate.c
2604
fp_toint(cpu->iowait_boost * 100));
drivers/cpufreq/intel_pstate.c
2610
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
drivers/cpufreq/intel_pstate.c
2614
if (smp_processor_id() != cpu->cpu)
drivers/cpufreq/intel_pstate.c
2617
delta_ns = time - cpu->last_update;
drivers/cpufreq/intel_pstate.c
2621
cpu->iowait_boost = ONE_EIGHTH_FP;
drivers/cpufreq/intel_pstate.c
2622
} else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
drivers/cpufreq/intel_pstate.c
2623
cpu->iowait_boost <<= 1;
drivers/cpufreq/intel_pstate.c
2624
if (cpu->iowait_boost > int_tofp(1))
drivers/cpufreq/intel_pstate.c
2625
cpu->iowait_boost = int_tofp(1);
drivers/cpufreq/intel_pstate.c
2627
cpu->iowait_boost = ONE_EIGHTH_FP;
drivers/cpufreq/intel_pstate.c
2629
} else if (cpu->iowait_boost) {
drivers/cpufreq/intel_pstate.c
2632
cpu->iowait_boost = 0;
drivers/cpufreq/intel_pstate.c
2634
cpu->iowait_boost >>= 1;
drivers/cpufreq/intel_pstate.c
2636
cpu->last_update = time;
drivers/cpufreq/intel_pstate.c
2637
delta_ns = time - cpu->sample.time;
drivers/cpufreq/intel_pstate.c
2641
if (intel_pstate_sample(cpu, time))
drivers/cpufreq/intel_pstate.c
2642
intel_pstate_adjust_pstate(cpu);
drivers/cpufreq/intel_pstate.c
2745
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
2747
cpu = all_cpu_data[cpunum];
drivers/cpufreq/intel_pstate.c
2749
if (!cpu) {
drivers/cpufreq/intel_pstate.c
2750
cpu = kzalloc_obj(*cpu);
drivers/cpufreq/intel_pstate.c
2751
if (!cpu)
drivers/cpufreq/intel_pstate.c
2754
WRITE_ONCE(all_cpu_data[cpunum], cpu);
drivers/cpufreq/intel_pstate.c
2756
cpu->cpu = cpunum;
drivers/cpufreq/intel_pstate.c
2758
cpu->epp_default = -EINVAL;
drivers/cpufreq/intel_pstate.c
2761
intel_pstate_hwp_enable(cpu);
drivers/cpufreq/intel_pstate.c
2772
intel_pstate_hwp_reenable(cpu);
drivers/cpufreq/intel_pstate.c
2775
cpu->epp_powersave = -EINVAL;
drivers/cpufreq/intel_pstate.c
2776
cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
drivers/cpufreq/intel_pstate.c
2778
intel_pstate_get_cpu_pstates(cpu);
drivers/cpufreq/intel_pstate.c
2787
struct cpudata *cpu = all_cpu_data[cpu_num];
drivers/cpufreq/intel_pstate.c
2792
if (cpu->update_util_set)
drivers/cpufreq/intel_pstate.c
2796
cpu->sample.time = 0;
drivers/cpufreq/intel_pstate.c
2797
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
drivers/cpufreq/intel_pstate.c
2801
cpu->update_util_set = true;
drivers/cpufreq/intel_pstate.c
2804
static void intel_pstate_clear_update_util_hook(unsigned int cpu)
drivers/cpufreq/intel_pstate.c
2806
struct cpudata *cpu_data = all_cpu_data[cpu];
drivers/cpufreq/intel_pstate.c
2811
cpufreq_remove_update_util_hook(cpu);
drivers/cpufreq/intel_pstate.c
2816
static int intel_pstate_get_max_freq(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
2819
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
drivers/cpufreq/intel_pstate.c
2822
static void intel_pstate_update_perf_limits(struct cpudata *cpu,
drivers/cpufreq/intel_pstate.c
2826
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
2842
if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
drivers/cpufreq/intel_pstate.c
2846
max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
drivers/cpufreq/intel_pstate.c
2848
min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
drivers/cpufreq/intel_pstate.c
2852
cpu->cpu, min_policy_perf, max_policy_perf);
drivers/cpufreq/intel_pstate.c
2856
cpu->min_perf_ratio = min_policy_perf;
drivers/cpufreq/intel_pstate.c
2857
cpu->max_perf_ratio = max_policy_perf;
drivers/cpufreq/intel_pstate.c
2859
int turbo_max = cpu->pstate.turbo_pstate;
drivers/cpufreq/intel_pstate.c
2867
pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
drivers/cpufreq/intel_pstate.c
2870
cpu->min_perf_ratio = max(min_policy_perf, global_min);
drivers/cpufreq/intel_pstate.c
2871
cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
drivers/cpufreq/intel_pstate.c
2872
cpu->max_perf_ratio = min(max_policy_perf, global_max);
drivers/cpufreq/intel_pstate.c
2873
cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
drivers/cpufreq/intel_pstate.c
2876
cpu->min_perf_ratio = min(cpu->min_perf_ratio,
drivers/cpufreq/intel_pstate.c
2877
cpu->max_perf_ratio);
drivers/cpufreq/intel_pstate.c
288
int (*get_max)(int cpu);
drivers/cpufreq/intel_pstate.c
2880
pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
drivers/cpufreq/intel_pstate.c
2881
cpu->max_perf_ratio,
drivers/cpufreq/intel_pstate.c
2882
cpu->min_perf_ratio);
drivers/cpufreq/intel_pstate.c
2887
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
289
int (*get_max_physical)(int cpu);
drivers/cpufreq/intel_pstate.c
2895
cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
2896
cpu->policy = policy->policy;
drivers/cpufreq/intel_pstate.c
290
int (*get_min)(int cpu);
drivers/cpufreq/intel_pstate.c
2900
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
drivers/cpufreq/intel_pstate.c
2902
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
drivers/cpufreq/intel_pstate.c
2903
int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
drivers/cpufreq/intel_pstate.c
2909
intel_pstate_clear_update_util_hook(policy->cpu);
drivers/cpufreq/intel_pstate.c
291
int (*get_turbo)(int cpu);
drivers/cpufreq/intel_pstate.c
2910
intel_pstate_set_pstate(cpu, pstate);
drivers/cpufreq/intel_pstate.c
2912
intel_pstate_set_update_util_hook(policy->cpu);
drivers/cpufreq/intel_pstate.c
2922
intel_pstate_clear_update_util_hook(policy->cpu);
drivers/cpufreq/intel_pstate.c
2923
intel_pstate_hwp_set(policy->cpu);
drivers/cpufreq/intel_pstate.c
293
int (*get_cpu_scaling)(int cpu);
drivers/cpufreq/intel_pstate.c
2936
static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
drivers/cpufreq/intel_pstate.c
2940
cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
drivers/cpufreq/intel_pstate.c
2942
policy->max > cpu->pstate.max_freq) {
drivers/cpufreq/intel_pstate.c
2948
static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
drivers/cpufreq/intel_pstate.c
2954
intel_pstate_get_hwp_cap(cpu);
drivers/cpufreq/intel_pstate.c
2956
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
drivers/cpufreq/intel_pstate.c
2958
max_freq = intel_pstate_get_max_freq(cpu);
drivers/cpufreq/intel_pstate.c
2962
intel_pstate_adjust_policy_max(cpu, policy);
drivers/cpufreq/intel_pstate.c
2967
intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
drivers/cpufreq/intel_pstate.c
2974
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
2976
pr_debug("CPU %d going offline\n", cpu->cpu);
drivers/cpufreq/intel_pstate.c
2978
if (cpu->suspended)
drivers/cpufreq/intel_pstate.c
2988
intel_pstate_hwp_offline(cpu);
drivers/cpufreq/intel_pstate.c
2990
intel_pstate_set_min_pstate(cpu);
drivers/cpufreq/intel_pstate.c
2999
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3001
pr_debug("CPU %d going online\n", cpu->cpu);
drivers/cpufreq/intel_pstate.c
3010
intel_pstate_hwp_reenable(cpu);
drivers/cpufreq/intel_pstate.c
3011
cpu->suspended = false;
drivers/cpufreq/intel_pstate.c
3013
hybrid_update_capacity(cpu);
drivers/cpufreq/intel_pstate.c
3021
intel_pstate_clear_update_util_hook(policy->cpu);
drivers/cpufreq/intel_pstate.c
3028
pr_debug("CPU %d exiting\n", policy->cpu);
drivers/cpufreq/intel_pstate.c
3035
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
3038
rc = intel_pstate_init_cpu(policy->cpu);
drivers/cpufreq/intel_pstate.c
3042
cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3044
cpu->max_perf_ratio = 0xFF;
drivers/cpufreq/intel_pstate.c
3045
cpu->min_perf_ratio = 0;
drivers/cpufreq/intel_pstate.c
3048
policy->cpuinfo.min_freq = cpu->pstate.min_freq;
drivers/cpufreq/intel_pstate.c
3050
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
drivers/cpufreq/intel_pstate.c
3076
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3078
cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
drivers/cpufreq/intel_pstate.c
3100
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3102
intel_pstate_verify_cpu_policy(cpu, policy);
drivers/cpufreq/intel_pstate.c
3103
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
drivers/cpufreq/intel_pstate.c
3124
static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
drivers/cpufreq/intel_pstate.c
3131
if (!intel_pstate_sample(cpu, ktime_get()))
drivers/cpufreq/intel_pstate.c
3134
sample = &cpu->sample;
drivers/cpufreq/intel_pstate.c
3138
cpu->pstate.current_pstate,
drivers/cpufreq/intel_pstate.c
3142
get_avg_frequency(cpu),
drivers/cpufreq/intel_pstate.c
3143
fp_toint(cpu->iowait_boost * 100));
drivers/cpufreq/intel_pstate.c
3146
static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
drivers/cpufreq/intel_pstate.c
3149
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
drivers/cpufreq/intel_pstate.c
3163
WRITE_ONCE(cpu->hwp_req_cached, value);
drivers/cpufreq/intel_pstate.c
3167
wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
drivers/cpufreq/intel_pstate.c
3170
static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
drivers/cpufreq/intel_pstate.c
3175
pstate_funcs.get_val(cpu, target_pstate));
drivers/cpufreq/intel_pstate.c
3177
wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
drivers/cpufreq/intel_pstate.c
3178
pstate_funcs.get_val(cpu, target_pstate));
drivers/cpufreq/intel_pstate.c
3184
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3185
int old_pstate = cpu->pstate.current_pstate;
drivers/cpufreq/intel_pstate.c
3187
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
drivers/cpufreq/intel_pstate.c
3190
target_pstate : cpu->max_perf_ratio;
drivers/cpufreq/intel_pstate.c
3192
intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
drivers/cpufreq/intel_pstate.c
3195
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
drivers/cpufreq/intel_pstate.c
3198
cpu->pstate.current_pstate = target_pstate;
drivers/cpufreq/intel_pstate.c
3200
intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
drivers/cpufreq/intel_pstate.c
3210
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3219
target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
drivers/cpufreq/intel_pstate.c
3222
freqs.new = target_pstate * cpu->pstate.scaling;
drivers/cpufreq/intel_pstate.c
3232
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3235
target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
drivers/cpufreq/intel_pstate.c
3239
return target_pstate * cpu->pstate.scaling;
drivers/cpufreq/intel_pstate.c
3247
struct cpudata *cpu = all_cpu_data[cpunum];
drivers/cpufreq/intel_pstate.c
3248
u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
drivers/cpufreq/intel_pstate.c
3249
int old_pstate = cpu->pstate.current_pstate;
drivers/cpufreq/intel_pstate.c
3266
if (min_pstate < cpu->pstate.min_pstate)
drivers/cpufreq/intel_pstate.c
3267
min_pstate = cpu->pstate.min_pstate;
drivers/cpufreq/intel_pstate.c
3269
if (min_pstate < cpu->min_perf_ratio)
drivers/cpufreq/intel_pstate.c
3270
min_pstate = cpu->min_perf_ratio;
drivers/cpufreq/intel_pstate.c
3272
if (min_pstate > cpu->max_perf_ratio)
drivers/cpufreq/intel_pstate.c
3273
min_pstate = cpu->max_perf_ratio;
drivers/cpufreq/intel_pstate.c
3275
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
drivers/cpufreq/intel_pstate.c
3281
intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
drivers/cpufreq/intel_pstate.c
3283
cpu->pstate.current_pstate = target_pstate;
drivers/cpufreq/intel_pstate.c
3284
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
drivers/cpufreq/intel_pstate.c
3290
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
3294
dev = get_cpu_device(policy->cpu);
drivers/cpufreq/intel_pstate.c
3312
cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3319
intel_pstate_get_hwp_cap(cpu);
drivers/cpufreq/intel_pstate.c
3321
rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
drivers/cpufreq/intel_pstate.c
3322
WRITE_ONCE(cpu->hwp_req_cached, value);
drivers/cpufreq/intel_pstate.c
3324
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
drivers/cpufreq/intel_pstate.c
3329
freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
drivers/cpufreq/intel_pstate.c
3338
freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
drivers/cpufreq/intel_pstate.c
3379
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
3380
u64 value = READ_ONCE(cpu->hwp_req_cached);
drivers/cpufreq/intel_pstate.c
3388
wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
drivers/cpufreq/intel_pstate.c
3389
WRITE_ONCE(cpu->hwp_req_cached, value);
drivers/cpufreq/intel_pstate.c
3414
unsigned int cpu;
drivers/cpufreq/intel_pstate.c
3417
for_each_online_cpu(cpu) {
drivers/cpufreq/intel_pstate.c
3418
if (all_cpu_data[cpu]) {
drivers/cpufreq/intel_pstate.c
3420
intel_pstate_clear_update_util_hook(cpu);
drivers/cpufreq/intel_pstate.c
3422
kfree(all_cpu_data[cpu]);
drivers/cpufreq/intel_pstate.c
3423
WRITE_ONCE(all_cpu_data[cpu], NULL);
drivers/cpufreq/intel_pstate.c
362
static void intel_pstate_set_itmt_prio(int cpu)
drivers/cpufreq/intel_pstate.c
368
ret = cppc_get_perf_caps(cpu, &cppc_perf);
drivers/cpufreq/intel_pstate.c
377
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
drivers/cpufreq/intel_pstate.c
384
sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
drivers/cpufreq/intel_pstate.c
405
static int intel_pstate_get_cppc_guaranteed(int cpu)
drivers/cpufreq/intel_pstate.c
410
ret = cppc_get_perf_caps(cpu, &cppc_perf);
drivers/cpufreq/intel_pstate.c
420
static int intel_pstate_cppc_get_scaling(int cpu)
drivers/cpufreq/intel_pstate.c
428
if (!cppc_get_perf_caps(cpu, &cppc_perf) &&
drivers/cpufreq/intel_pstate.c
437
static inline void intel_pstate_set_itmt_prio(int cpu)
drivers/cpufreq/intel_pstate.c
444
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
449
intel_pstate_set_itmt_prio(policy->cpu);
drivers/cpufreq/intel_pstate.c
456
cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
458
ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
drivers/cpufreq/intel_pstate.c
459
policy->cpu);
drivers/cpufreq/intel_pstate.c
468
if (cpu->acpi_perf_data.control_register.space_id !=
drivers/cpufreq/intel_pstate.c
476
if (cpu->acpi_perf_data.state_count < 2)
drivers/cpufreq/intel_pstate.c
479
pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
drivers/cpufreq/intel_pstate.c
480
for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
drivers/cpufreq/intel_pstate.c
482
(i == cpu->acpi_perf_data.state ? '*' : ' '), i,
drivers/cpufreq/intel_pstate.c
483
(u32) cpu->acpi_perf_data.states[i].core_frequency,
drivers/cpufreq/intel_pstate.c
484
(u32) cpu->acpi_perf_data.states[i].power,
drivers/cpufreq/intel_pstate.c
485
(u32) cpu->acpi_perf_data.states[i].control);
drivers/cpufreq/intel_pstate.c
488
cpu->valid_pss_table = true;
drivers/cpufreq/intel_pstate.c
494
cpu->valid_pss_table = false;
drivers/cpufreq/intel_pstate.c
495
acpi_processor_unregister_performance(policy->cpu);
drivers/cpufreq/intel_pstate.c
500
struct cpudata *cpu;
drivers/cpufreq/intel_pstate.c
502
cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
503
if (!cpu->valid_pss_table)
drivers/cpufreq/intel_pstate.c
506
acpi_processor_unregister_performance(policy->cpu);
drivers/cpufreq/intel_pstate.c
524
static inline int intel_pstate_get_cppc_guaranteed(int cpu)
drivers/cpufreq/intel_pstate.c
529
static int intel_pstate_cppc_get_scaling(int cpu)
drivers/cpufreq/intel_pstate.c
535
static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
drivers/cpufreq/intel_pstate.c
538
if (freq == cpu->pstate.turbo_freq)
drivers/cpufreq/intel_pstate.c
539
return cpu->pstate.turbo_pstate;
drivers/cpufreq/intel_pstate.c
541
if (freq == cpu->pstate.max_freq)
drivers/cpufreq/intel_pstate.c
542
return cpu->pstate.max_pstate;
drivers/cpufreq/intel_pstate.c
546
return freq / cpu->pstate.scaling;
drivers/cpufreq/intel_pstate.c
548
return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
drivers/cpufreq/intel_pstate.c
551
return DIV_ROUND_UP(freq, cpu->pstate.scaling);
drivers/cpufreq/intel_pstate.c
554
static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
drivers/cpufreq/intel_pstate.c
556
return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
drivers/cpufreq/intel_pstate.c
570
static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
drivers/cpufreq/intel_pstate.c
572
int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
drivers/cpufreq/intel_pstate.c
573
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
574
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
drivers/cpufreq/intel_pstate.c
575
int scaling = cpu->pstate.scaling;
drivers/cpufreq/intel_pstate.c
578
pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
drivers/cpufreq/intel_pstate.c
579
pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo);
drivers/cpufreq/intel_pstate.c
580
pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling);
drivers/cpufreq/intel_pstate.c
581
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
drivers/cpufreq/intel_pstate.c
582
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
drivers/cpufreq/intel_pstate.c
583
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
drivers/cpufreq/intel_pstate.c
590
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
drivers/cpufreq/intel_pstate.c
592
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
drivers/cpufreq/intel_pstate.c
596
cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
drivers/cpufreq/intel_pstate.c
598
freq = cpu->pstate.min_pstate * perf_ctl_scaling;
drivers/cpufreq/intel_pstate.c
599
cpu->pstate.min_freq = freq;
drivers/cpufreq/intel_pstate.c
604
cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
drivers/cpufreq/intel_pstate.c
618
struct cpudata *cpu = all_cpu_data[0];
drivers/cpufreq/intel_pstate.c
619
int turbo_pstate = cpu->pstate.turbo_pstate;
drivers/cpufreq/intel_pstate.c
622
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
drivers/cpufreq/intel_pstate.c
635
epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
drivers/cpufreq/intel_pstate.c
720
static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
drivers/cpufreq/intel_pstate.c
729
u64 value = READ_ONCE(cpu->hwp_req_cached);
drivers/cpufreq/intel_pstate.c
738
WRITE_ONCE(cpu->hwp_req_cached, value);
drivers/cpufreq/intel_pstate.c
739
ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
drivers/cpufreq/intel_pstate.c
741
cpu->epp_cached = epp;
drivers/cpufreq/intel_pstate.c
797
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
833
ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
drivers/cpufreq/intel_pstate.c
842
epp = ret ? epp_values[ret] : cpu->epp_default;
drivers/cpufreq/intel_pstate.c
844
if (cpu->epp_cached != epp) {
drivers/cpufreq/intel_pstate.c
848
ret = intel_pstate_set_epp(cpu, epp);
drivers/cpufreq/intel_pstate.c
865
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
882
struct cpudata *cpu = all_cpu_data[policy->cpu];
drivers/cpufreq/intel_pstate.c
885
ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
drivers/cpufreq/intel_pstate.c
889
rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
drivers/cpufreq/intel_pstate.c
893
freq = ratio * cpu->pstate.scaling;
drivers/cpufreq/intel_pstate.c
894
if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
drivers/cpufreq/intel_pstate.c
895
freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
drivers/cpufreq/intel_pstate.c
917
static u8 hybrid_get_cpu_type(unsigned int cpu)
drivers/cpufreq/intel_pstate.c
919
return cpu_data(cpu).topo.intel_type;
drivers/cpufreq/intel_pstate.c
953
static bool hybrid_has_l3(unsigned int cpu)
drivers/cpufreq/intel_pstate.c
955
struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
drivers/cpufreq/intel_pstate.c
992
static bool hybrid_register_perf_domain(unsigned int cpu)
drivers/cpufreq/intel_pstate.c
996
struct cpudata *cpudata = all_cpu_data[cpu];
drivers/cpufreq/kirkwood-cpufreq.c
45
static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
drivers/cpufreq/longhaul.c
660
static unsigned int longhaul_get(unsigned int cpu)
drivers/cpufreq/longhaul.c
662
if (cpu)
drivers/cpufreq/longrun.c
130
policy->cpu = 0;
drivers/cpufreq/longrun.c
136
static unsigned int longrun_get(unsigned int cpu)
drivers/cpufreq/longrun.c
140
if (cpu)
drivers/cpufreq/longrun.c
257
if (policy->cpu != 0)
drivers/cpufreq/longrun.c
60
policy->cpu = 0;
drivers/cpufreq/loongson3_cpufreq.c
179
unsigned int cpu = raw_smp_processor_id();
drivers/cpufreq/loongson3_cpufreq.c
180
unsigned int package = cpu_data[cpu].package;
drivers/cpufreq/loongson3_cpufreq.c
220
static unsigned int loongson3_cpufreq_get(unsigned int cpu)
drivers/cpufreq/loongson3_cpufreq.c
224
ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_INFO, 0, 0);
drivers/cpufreq/loongson3_cpufreq.c
233
ret = do_service_request(cpu_data[policy->cpu].core,
drivers/cpufreq/loongson3_cpufreq.c
239
static int configure_freq_table(int cpu)
drivers/cpufreq/loongson3_cpufreq.c
245
if (per_cpu(freq_data, cpu))
drivers/cpufreq/loongson3_cpufreq.c
248
ret = do_service_request(cpu, 0, CMD_GET_FREQ_LEVEL_NUM, 0, 0);
drivers/cpufreq/loongson3_cpufreq.c
253
ret = do_service_request(cpu, 0, CMD_GET_FREQ_BOOST_LEVEL, 0, 0);
drivers/cpufreq/loongson3_cpufreq.c
266
ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_LEVEL_INFO, i, 0);
drivers/cpufreq/loongson3_cpufreq.c
279
per_cpu(freq_data, cpu) = data;
drivers/cpufreq/loongson3_cpufreq.c
286
int i, ret, cpu = policy->cpu;
drivers/cpufreq/loongson3_cpufreq.c
288
ret = configure_freq_table(cpu);
drivers/cpufreq/loongson3_cpufreq.c
293
policy->freq_table = per_cpu(freq_data, cpu)->table;
drivers/cpufreq/loongson3_cpufreq.c
294
policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency;
drivers/cpufreq/loongson3_cpufreq.c
295
cpumask_copy(policy->cpus, topology_sibling_cpumask(cpu));
drivers/cpufreq/loongson3_cpufreq.c
298
if (i != cpu)
drivers/cpufreq/loongson3_cpufreq.c
299
per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
drivers/cpufreq/loongson3_cpufreq.c
307
int cpu = policy->cpu;
drivers/cpufreq/loongson3_cpufreq.c
309
loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level);
drivers/cpufreq/mediatek-cpufreq-hw.c
128
unsigned int cpu;
drivers/cpufreq/mediatek-cpufreq-hw.c
131
for_each_cpu(cpu, policy->real_cpus) {
drivers/cpufreq/mediatek-cpufreq-hw.c
132
writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
drivers/cpufreq/mediatek-cpufreq-hw.c
152
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
drivers/cpufreq/mediatek-cpufreq-hw.c
158
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/mediatek-cpufreq-hw.c
236
ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
drivers/cpufreq/mediatek-cpufreq-hw.c
324
policy->cpu);
drivers/cpufreq/mediatek-cpufreq-hw.c
328
pr_info("SVS of CPU%d is not enabled\n", policy->cpu);
drivers/cpufreq/mediatek-cpufreq-hw.c
351
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
drivers/cpufreq/mediatek-cpufreq-hw.c
373
int ret, cpu;
drivers/cpufreq/mediatek-cpufreq-hw.c
378
for_each_present_cpu(cpu) {
drivers/cpufreq/mediatek-cpufreq-hw.c
379
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/mediatek-cpufreq-hw.c
382
"Failed to get cpu%d device\n", cpu);
drivers/cpufreq/mediatek-cpufreq-hw.c
387
"CPU%d regulator get failed\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
234
policy->cpu, freq_hz);
drivers/cpufreq/mediatek-cpufreq.c
258
"cpu%d: failed to scale up voltage!\n", policy->cpu);
drivers/cpufreq/mediatek-cpufreq.c
268
"cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
drivers/cpufreq/mediatek-cpufreq.c
277
"cpu%d: failed to scale cpu clock rate!\n", policy->cpu);
drivers/cpufreq/mediatek-cpufreq.c
287
"cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
drivers/cpufreq/mediatek-cpufreq.c
300
"cpu%d: failed to scale down voltage!\n", policy->cpu);
drivers/cpufreq/mediatek-cpufreq.c
384
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
drivers/cpufreq/mediatek-cpufreq.c
391
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/mediatek-cpufreq.c
393
return dev_err_probe(cpu_dev, -ENODEV, "failed to get cpu%d device\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
402
cpu);
drivers/cpufreq/mediatek-cpufreq.c
408
dev_err_probe(cpu_dev, ret, "cpu%d: failed to get cpu clk\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
416
"cpu%d: failed to get intermediate clk\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
424
"cpu%d: failed to get proc regulator\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
430
dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vproc\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
440
"cpu%d: Failed to get sram regulator\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
448
dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vsram\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
457
"cpu%d: failed to get OPP-sharing information\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
463
dev_err_probe(cpu_dev, ret, "cpu%d: no OPP table\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
469
dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable cpu clk\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
475
dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable inter clk\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
493
"cpu%d: failed to get intermediate opp\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
502
info->opp_cpu = cpu;
drivers/cpufreq/mediatek-cpufreq.c
506
dev_err_probe(cpu_dev, ret, "cpu%d: failed to register opp notifier\n", cpu);
drivers/cpufreq/mediatek-cpufreq.c
587
info = mtk_cpu_dvfs_info_lookup(policy->cpu);
drivers/cpufreq/mediatek-cpufreq.c
590
policy->cpu);
drivers/cpufreq/mediatek-cpufreq.c
598
policy->cpu, ret);
drivers/cpufreq/mediatek-cpufreq.c
634
int cpu, ret;
drivers/cpufreq/mediatek-cpufreq.c
641
for_each_present_cpu(cpu) {
drivers/cpufreq/mediatek-cpufreq.c
642
info = mtk_cpu_dvfs_info_lookup(cpu);
drivers/cpufreq/mediatek-cpufreq.c
654
ret = mtk_cpu_dvfs_info_init(info, cpu);
drivers/cpufreq/mediatek-cpufreq.c
66
static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
drivers/cpufreq/mediatek-cpufreq.c
71
if (cpumask_test_cpu(cpu, &info->cpus))
drivers/cpufreq/mvebu-cpufreq.c
27
int ret, cpu;
drivers/cpufreq/mvebu-cpufreq.c
59
for_each_present_cpu(cpu) {
drivers/cpufreq/mvebu-cpufreq.c
64
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/mvebu-cpufreq.c
66
pr_err("Cannot get CPU %d\n", cpu);
drivers/cpufreq/mvebu-cpufreq.c
72
pr_err("Cannot get clock for CPU %d\n", cpu);
drivers/cpufreq/p4-clockmod.c
157
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
drivers/cpufreq/p4-clockmod.c
162
cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
drivers/cpufreq/p4-clockmod.c
172
has_N44_O17_errata[policy->cpu] = 1;
drivers/cpufreq/p4-clockmod.c
179
cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
drivers/cpufreq/p4-clockmod.c
189
if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
drivers/cpufreq/p4-clockmod.c
206
static unsigned int cpufreq_p4_get(unsigned int cpu)
drivers/cpufreq/p4-clockmod.c
210
rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
drivers/cpufreq/p4-clockmod.c
50
static unsigned int cpufreq_p4_get(unsigned int cpu);
drivers/cpufreq/p4-clockmod.c
52
static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
drivers/cpufreq/p4-clockmod.c
59
rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
drivers/cpufreq/p4-clockmod.c
62
pr_debug("CPU#%d currently thermal throttled\n", cpu);
drivers/cpufreq/p4-clockmod.c
64
if (has_N44_O17_errata[cpu] &&
drivers/cpufreq/p4-clockmod.c
68
rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
drivers/cpufreq/p4-clockmod.c
70
pr_debug("CPU#%d disabling modulation\n", cpu);
drivers/cpufreq/p4-clockmod.c
71
wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
drivers/cpufreq/p4-clockmod.c
74
cpu, ((125 * newstate) / 10));
drivers/cpufreq/p4-clockmod.c
82
wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
drivers/cpufreq/pasemi-cpufreq.c
105
out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
drivers/cpufreq/pasemi-cpufreq.c
115
void restore_astate(int cpu)
drivers/cpufreq/pasemi-cpufreq.c
117
set_astate(cpu, current_astate);
drivers/cpufreq/pasemi-cpufreq.c
131
struct device_node *cpu, *dn;
drivers/cpufreq/pasemi-cpufreq.c
134
cpu = of_get_cpu_node(policy->cpu, NULL);
drivers/cpufreq/pasemi-cpufreq.c
135
if (!cpu)
drivers/cpufreq/pasemi-cpufreq.c
138
max_freqp = of_get_property(cpu, "clock-frequency", NULL);
drivers/cpufreq/pasemi-cpufreq.c
139
of_node_put(cpu);
drivers/cpufreq/pasemi-cpufreq.c
182
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
drivers/cpufreq/pasemi-cpufreq.c
192
cur_astate = get_cur_astate(policy->cpu);
drivers/cpufreq/pasemi-cpufreq.c
228
policy->cpu,
drivers/cpufreq/pasemi-cpufreq.c
70
static int get_cur_astate(int cpu)
drivers/cpufreq/pasemi-cpufreq.c
75
ret = (ret >> (cpu * 4)) & 0x7;
drivers/cpufreq/pasemi-cpufreq.c
95
static void set_astate(int cpu, unsigned int astate)
drivers/cpufreq/pcc-cpufreq.c
141
static unsigned int pcc_get_freq(unsigned int cpu)
drivers/cpufreq/pcc-cpufreq.c
152
pr_debug("get: get_freq for CPU %d\n", cpu);
drivers/cpufreq/pcc-cpufreq.c
153
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
drivers/cpufreq/pcc-cpufreq.c
171
cpu, status);
drivers/cpufreq/pcc-cpufreq.c
180
cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
drivers/cpufreq/pcc-cpufreq.c
186
" capped at %d\n", cpu, curr_freq);
drivers/cpufreq/pcc-cpufreq.c
206
int cpu;
drivers/cpufreq/pcc-cpufreq.c
208
cpu = policy->cpu;
drivers/cpufreq/pcc-cpufreq.c
209
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
drivers/cpufreq/pcc-cpufreq.c
213
cpu, target_freq,
drivers/cpufreq/pcc-cpufreq.c
240
cpu, status);
drivers/cpufreq/pcc-cpufreq.c
244
pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
drivers/cpufreq/pcc-cpufreq.c
249
static int pcc_get_offset(int cpu)
drivers/cpufreq/pcc-cpufreq.c
258
pr = per_cpu(processors, cpu);
drivers/cpufreq/pcc-cpufreq.c
259
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
drivers/cpufreq/pcc-cpufreq.c
295
cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
drivers/cpufreq/pcc-cpufreq.c
540
unsigned int cpu = policy->cpu;
drivers/cpufreq/pcc-cpufreq.c
548
result = pcc_get_offset(cpu);
drivers/cpufreq/pmac32-cpufreq.c
358
static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
drivers/cpufreq/pmac64-cpufreq.c
317
static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
drivers/cpufreq/powernow-k6.c
159
if (policy->cpu != 0)
drivers/cpufreq/powernow-k6.c
242
static unsigned int powernow_k6_get(unsigned int cpu)
drivers/cpufreq/powernow-k7.c
553
static unsigned int powernow_get(unsigned int cpu)
drivers/cpufreq/powernow-k7.c
558
if (cpu)
drivers/cpufreq/powernow-k7.c
598
if (policy->cpu != 0)
drivers/cpufreq/powernow-k8.c
1026
int rc, cpu;
drivers/cpufreq/powernow-k8.c
1028
smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
drivers/cpufreq/powernow-k8.c
1036
data->cpu = pol->cpu;
drivers/cpufreq/powernow-k8.c
1047
if (pol->cpu != 0) {
drivers/cpufreq/powernow-k8.c
1065
smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
drivers/cpufreq/powernow-k8.c
1071
cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu));
drivers/cpufreq/powernow-k8.c
1079
for_each_cpu(cpu, pol->cpus)
drivers/cpufreq/powernow-k8.c
1080
per_cpu(powernow_data, cpu) = data;
drivers/cpufreq/powernow-k8.c
1094
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
drivers/cpufreq/powernow-k8.c
1095
int cpu;
drivers/cpufreq/powernow-k8.c
1105
for_each_cpu(cpu, pol->related_cpus)
drivers/cpufreq/powernow-k8.c
1106
per_cpu(powernow_data, cpu) = NULL;
drivers/cpufreq/powernow-k8.c
1117
static unsigned int powernowk8_get(unsigned int cpu)
drivers/cpufreq/powernow-k8.c
1119
struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
drivers/cpufreq/powernow-k8.c
1126
smp_call_function_single(cpu, query_values_on_cpu, &err, true);
drivers/cpufreq/powernow-k8.c
609
if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
drivers/cpufreq/powernow-k8.c
732
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
drivers/cpufreq/powernow-k8.c
771
if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
drivers/cpufreq/powernow-k8.c
789
acpi_processor_unregister_performance(data->cpu);
drivers/cpufreq/powernow-k8.c
857
acpi_processor_unregister_performance(data->cpu);
drivers/cpufreq/powernow-k8.c
932
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
drivers/cpufreq/powernow-k8.c
949
pol->cpu, data->powernow_table[newstate].frequency, pol->min,
drivers/cpufreq/powernow-k8.c
988
return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
drivers/cpufreq/powernow-k8.h
7
unsigned int cpu;
drivers/cpufreq/powernv-cpufreq.c
1046
unsigned int cpu, i;
drivers/cpufreq/powernv-cpufreq.c
1062
for_each_possible_cpu(cpu) {
drivers/cpufreq/powernv-cpufreq.c
1063
unsigned int id = cpu_to_chip_id(cpu);
drivers/cpufreq/powernv-cpufreq.c
1069
cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
drivers/cpufreq/powernv-cpufreq.c
1082
for_each_cpu(cpu, &chips[i].mask)
drivers/cpufreq/powernv-cpufreq.c
1083
per_cpu(chip_info, cpu) = &chips[i];
drivers/cpufreq/powernv-cpufreq.c
399
struct chip *chip = per_cpu(chip_info, policy->cpu); \
drivers/cpufreq/powernv-cpufreq.c
505
static unsigned int powernv_cpufreq_get(unsigned int cpu)
drivers/cpufreq/powernv-cpufreq.c
509
smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
drivers/cpufreq/powernv-cpufreq.c
557
unsigned int cpu = smp_processor_id();
drivers/cpufreq/powernv-cpufreq.c
574
cpu, chip->id, pmsr_pmax,
drivers/cpufreq/powernv-cpufreq.c
834
base = cpu_first_thread_sibling(policy->cpu);
drivers/cpufreq/powernv-cpufreq.c
846
policy->cpu);
drivers/cpufreq/powernv-cpufreq.c
884
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
drivers/cpufreq/powernv-cpufreq.c
894
int cpu;
drivers/cpufreq/powernv-cpufreq.c
898
for_each_online_cpu(cpu) {
drivers/cpufreq/powernv-cpufreq.c
899
cpu_policy = cpufreq_cpu_get(cpu);
drivers/cpufreq/powernv-cpufreq.c
917
unsigned int cpu;
drivers/cpufreq/powernv-cpufreq.c
929
for_each_cpu(cpu, &mask) {
drivers/cpufreq/powernv-cpufreq.c
932
policy = cpufreq_cpu_get(cpu);
drivers/cpufreq/pxa2xx-cpufreq.c
177
static unsigned int pxa_cpufreq_get(unsigned int cpu)
drivers/cpufreq/pxa3xx-cpufreq.c
160
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
drivers/cpufreq/pxa3xx-cpufreq.c
170
if (policy->cpu != 0)
drivers/cpufreq/qcom-cpufreq-hw.c
179
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
drivers/cpufreq/qcom-cpufreq-hw.c
181
return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
drivers/cpufreq/qcom-cpufreq-hw.c
307
int cpu, ret;
drivers/cpufreq/qcom-cpufreq-hw.c
309
for_each_present_cpu(cpu) {
drivers/cpufreq/qcom-cpufreq-hw.c
310
cpu_np = of_cpu_device_node_get(cpu);
drivers/cpufreq/qcom-cpufreq-hw.c
322
cpumask_set_cpu(cpu, m);
drivers/cpufreq/qcom-cpufreq-hw.c
329
int cpu = cpumask_first(policy->related_cpus);
drivers/cpufreq/qcom-cpufreq-hw.c
330
struct device *dev = get_cpu_device(cpu);
drivers/cpufreq/qcom-cpufreq-hw.c
367
if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
drivers/cpufreq/qcom-cpufreq-hw.c
449
snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
drivers/cpufreq/qcom-cpufreq-hw.c
522
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/qcom-cpufreq-hw.c
525
policy->cpu);
drivers/cpufreq/qcom-cpufreq-hw.c
529
cpu_np = of_cpu_device_node_get(policy->cpu);
drivers/cpufreq/qcom-cpufreq-hw.c
574
struct device *cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/qcom-cpufreq-hw.c
80
dev = get_cpu_device(policy->cpu);
drivers/cpufreq/qcom-cpufreq-nvmem.c
466
static void qcom_cpufreq_suspend_pd_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
drivers/cpufreq/qcom-cpufreq-nvmem.c
468
struct dev_pm_domain_list *pd_list = drv->cpus[cpu].pd_list;
drivers/cpufreq/qcom-cpufreq-nvmem.c
485
unsigned cpu;
drivers/cpufreq/qcom-cpufreq-nvmem.c
528
for_each_present_cpu(cpu) {
drivers/cpufreq/qcom-cpufreq-nvmem.c
533
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/qcom-cpufreq-nvmem.c
548
drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
drivers/cpufreq/qcom-cpufreq-nvmem.c
549
if (drv->cpus[cpu].opp_token < 0) {
drivers/cpufreq/qcom-cpufreq-nvmem.c
550
ret = drv->cpus[cpu].opp_token;
drivers/cpufreq/qcom-cpufreq-nvmem.c
565
&drv->cpus[cpu].pd_list);
drivers/cpufreq/qcom-cpufreq-nvmem.c
582
for_each_present_cpu(cpu) {
drivers/cpufreq/qcom-cpufreq-nvmem.c
583
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
drivers/cpufreq/qcom-cpufreq-nvmem.c
584
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
drivers/cpufreq/qcom-cpufreq-nvmem.c
592
unsigned int cpu;
drivers/cpufreq/qcom-cpufreq-nvmem.c
596
for_each_present_cpu(cpu) {
drivers/cpufreq/qcom-cpufreq-nvmem.c
597
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
drivers/cpufreq/qcom-cpufreq-nvmem.c
598
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
drivers/cpufreq/qcom-cpufreq-nvmem.c
605
unsigned int cpu;
drivers/cpufreq/qcom-cpufreq-nvmem.c
607
for_each_present_cpu(cpu)
drivers/cpufreq/qcom-cpufreq-nvmem.c
608
qcom_cpufreq_suspend_pd_devs(drv, cpu);
drivers/cpufreq/qoriq-cpufreq.c
164
unsigned int cpu = policy->cpu;
drivers/cpufreq/qoriq-cpufreq.c
167
np = of_get_cpu_node(cpu, NULL);
drivers/cpufreq/qoriq-cpufreq.c
68
static struct clk *cpu_to_clk(int cpu)
drivers/cpufreq/qoriq-cpufreq.c
73
if (!cpu_present(cpu))
drivers/cpufreq/qoriq-cpufreq.c
76
np = of_get_cpu_node(cpu, NULL);
drivers/cpufreq/s3c64xx-cpufreq.c
150
if (policy->cpu != 0)
drivers/cpufreq/s5pv210-cpufreq.c
519
if (policy->cpu != 0) {
drivers/cpufreq/sc520_freq.c
36
static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
drivers/cpufreq/scmi-cpufreq.c
112
int cpu, tdomain;
drivers/cpufreq/scmi-cpufreq.c
115
for_each_present_cpu(cpu) {
drivers/cpufreq/scmi-cpufreq.c
116
if (cpu == cpu_dev->id)
drivers/cpufreq/scmi-cpufreq.c
119
tcpu_dev = get_cpu_device(cpu);
drivers/cpufreq/scmi-cpufreq.c
125
cpumask_set_cpu(cpu, cpumask);
drivers/cpufreq/scmi-cpufreq.c
207
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/scmi-cpufreq.c
209
pr_err("failed to get cpu%d device\n", policy->cpu);
drivers/cpufreq/scmi-cpufreq.c
378
em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
drivers/cpufreq/scmi-cpufreq.c
39
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
drivers/cpufreq/scmi-cpufreq.c
403
int cpu, idx;
drivers/cpufreq/scmi-cpufreq.c
408
for_each_possible_cpu(cpu) {
drivers/cpufreq/scmi-cpufreq.c
409
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/scmi-cpufreq.c
46
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/scpi-cpufreq.c
101
pr_err("failed to get cpu%d device\n", policy->cpu);
drivers/cpufreq/scpi-cpufreq.c
30
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
drivers/cpufreq/scpi-cpufreq.c
36
policy = cpufreq_cpu_get_raw(cpu);
drivers/cpufreq/scpi-cpufreq.c
68
int cpu, domain, tdomain;
drivers/cpufreq/scpi-cpufreq.c
75
for_each_present_cpu(cpu) {
drivers/cpufreq/scpi-cpufreq.c
76
if (cpu == cpu_dev->id)
drivers/cpufreq/scpi-cpufreq.c
79
tcpu_dev = get_cpu_device(cpu);
drivers/cpufreq/scpi-cpufreq.c
85
cpumask_set_cpu(cpu, cpumask);
drivers/cpufreq/scpi-cpufreq.c
99
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/sh-cpufreq.c
107
unsigned int cpu = policy->cpu;
drivers/cpufreq/sh-cpufreq.c
108
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
drivers/cpufreq/sh-cpufreq.c
112
dev = get_cpu_device(cpu);
drivers/cpufreq/sh-cpufreq.c
138
unsigned int cpu = policy->cpu;
drivers/cpufreq/sh-cpufreq.c
139
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
drivers/cpufreq/sh-cpufreq.c
37
static unsigned int sh_cpufreq_get(unsigned int cpu)
drivers/cpufreq/sh-cpufreq.c
39
return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
drivers/cpufreq/sh-cpufreq.c
46
int cpu = policy->cpu;
drivers/cpufreq/sh-cpufreq.c
47
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
drivers/cpufreq/sh-cpufreq.c
52
if (smp_processor_id() != cpu)
drivers/cpufreq/sh-cpufreq.c
55
dev = get_cpu_device(cpu);
drivers/cpufreq/sh-cpufreq.c
65
freqs.old = sh_cpufreq_get(cpu);
drivers/cpufreq/sh-cpufreq.c
86
return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
drivers/cpufreq/sh-cpufreq.c
91
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
drivers/cpufreq/sparc-us2e-cpufreq.c
232
static unsigned int us2e_freq_get(unsigned int cpu)
drivers/cpufreq/sparc-us2e-cpufreq.c
236
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
drivers/cpufreq/sparc-us2e-cpufreq.c
237
if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
drivers/cpufreq/sparc-us2e-cpufreq.c
245
unsigned int cpu = smp_processor_id();
drivers/cpufreq/sparc-us2e-cpufreq.c
250
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
drivers/cpufreq/sparc-us2e-cpufreq.c
267
unsigned int cpu = policy->cpu;
drivers/cpufreq/sparc-us2e-cpufreq.c
269
return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
drivers/cpufreq/sparc-us2e-cpufreq.c
274
unsigned int cpu = policy->cpu;
drivers/cpufreq/sparc-us2e-cpufreq.c
275
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
drivers/cpufreq/sparc-us2e-cpufreq.c
277
&us2e_freq_table[cpu].table[0];
drivers/cpufreq/sparc-us3-cpufreq.c
117
return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
drivers/cpufreq/sparc-us3-cpufreq.c
122
unsigned int cpu = policy->cpu;
drivers/cpufreq/sparc-us3-cpufreq.c
123
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
drivers/cpufreq/sparc-us3-cpufreq.c
125
&us3_freq_table[cpu].table[0];
drivers/cpufreq/sparc-us3-cpufreq.c
62
static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
drivers/cpufreq/sparc-us3-cpufreq.c
64
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
drivers/cpufreq/sparc-us3-cpufreq.c
84
static unsigned int us3_freq_get(unsigned int cpu)
drivers/cpufreq/sparc-us3-cpufreq.c
88
if (smp_call_function_single(cpu, read_safari_cfg, ®, 1))
drivers/cpufreq/sparc-us3-cpufreq.c
90
return get_current_freq(cpu, reg);
drivers/cpufreq/sparc-us3-cpufreq.c
95
unsigned int cpu = policy->cpu;
drivers/cpufreq/sparc-us3-cpufreq.c
98
new_freq = sparc64_get_clock_tick(cpu) / 1000;
drivers/cpufreq/speedstep-centrino.c
236
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
drivers/cpufreq/speedstep-centrino.c
240
if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
drivers/cpufreq/speedstep-centrino.c
242
strcmp(cpu->x86_model_id, model->model_name) == 0))
drivers/cpufreq/speedstep-centrino.c
249
cpu->x86_model_id);
drivers/cpufreq/speedstep-centrino.c
256
cpu->x86_model_id);
drivers/cpufreq/speedstep-centrino.c
261
per_cpu(centrino_model, policy->cpu) = model;
drivers/cpufreq/speedstep-centrino.c
287
static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
drivers/cpufreq/speedstep-centrino.c
296
if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
drivers/cpufreq/speedstep-centrino.c
297
(per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
drivers/cpufreq/speedstep-centrino.c
298
(per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
drivers/cpufreq/speedstep-centrino.c
303
if ((!per_cpu(centrino_model, cpu)) ||
drivers/cpufreq/speedstep-centrino.c
304
(!per_cpu(centrino_model, cpu)->op_points))
drivers/cpufreq/speedstep-centrino.c
309
per_cpu(centrino_model, cpu)->op_points[i].frequency
drivers/cpufreq/speedstep-centrino.c
312
if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
drivers/cpufreq/speedstep-centrino.c
313
return per_cpu(centrino_model, cpu)->
drivers/cpufreq/speedstep-centrino.c
317
return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
drivers/cpufreq/speedstep-centrino.c
323
static unsigned int get_cur_freq(unsigned int cpu)
drivers/cpufreq/speedstep-centrino.c
328
rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
drivers/cpufreq/speedstep-centrino.c
329
clock_freq = extract_clock(l, cpu, 0);
drivers/cpufreq/speedstep-centrino.c
338
rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
drivers/cpufreq/speedstep-centrino.c
339
clock_freq = extract_clock(l, cpu, 1);
drivers/cpufreq/speedstep-centrino.c
347
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
drivers/cpufreq/speedstep-centrino.c
352
if (cpu->x86_vendor != X86_VENDOR_INTEL ||
drivers/cpufreq/speedstep-centrino.c
353
!cpu_has(cpu, X86_FEATURE_EST))
drivers/cpufreq/speedstep-centrino.c
356
if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
drivers/cpufreq/speedstep-centrino.c
359
if (policy->cpu != 0)
drivers/cpufreq/speedstep-centrino.c
363
if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
drivers/cpufreq/speedstep-centrino.c
367
per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
drivers/cpufreq/speedstep-centrino.c
369
if (!per_cpu(centrino_cpu, policy->cpu)) {
drivers/cpufreq/speedstep-centrino.c
398
policy->freq_table = per_cpu(centrino_model, policy->cpu)->op_points;
drivers/cpufreq/speedstep-centrino.c
405
unsigned int cpu = policy->cpu;
drivers/cpufreq/speedstep-centrino.c
407
if (per_cpu(centrino_model, cpu))
drivers/cpufreq/speedstep-centrino.c
408
per_cpu(centrino_model, cpu) = NULL;
drivers/cpufreq/speedstep-centrino.c
420
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
drivers/cpufreq/speedstep-centrino.c
429
if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
drivers/cpufreq/speedstep-centrino.c
435
op_points = &per_cpu(centrino_model, cpu)->op_points[index];
drivers/cpufreq/speedstep-ich.c
241
static unsigned int speedstep_get(unsigned int cpu)
drivers/cpufreq/speedstep-ich.c
246
BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
drivers/cpufreq/speedstep-ich.c
296
cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
drivers/cpufreq/speedstep-smi.c
235
if (policy->cpu != 0)
drivers/cpufreq/speedstep-smi.c
272
static unsigned int speedstep_get(unsigned int cpu)
drivers/cpufreq/speedstep-smi.c
274
if (cpu)
drivers/cpufreq/sti-cpufreq.c
113
struct device *dev = ddata.cpu;
drivers/cpufreq/sti-cpufreq.c
153
struct device *dev = ddata.cpu;
drivers/cpufreq/sti-cpufreq.c
237
struct device *dev = ddata.cpu;
drivers/cpufreq/sti-cpufreq.c
264
ddata.cpu = get_cpu_device(0);
drivers/cpufreq/sti-cpufreq.c
265
if (!ddata.cpu) {
drivers/cpufreq/sti-cpufreq.c
266
dev_err(ddata.cpu, "Failed to get device for CPU0\n");
drivers/cpufreq/sti-cpufreq.c
270
if (!of_property_present(ddata.cpu->of_node, "operating-points-v2")) {
drivers/cpufreq/sti-cpufreq.c
271
dev_err(ddata.cpu, "OPP-v2 not supported\n");
drivers/cpufreq/sti-cpufreq.c
284
dev_err(ddata.cpu, "Not doing voltage scaling\n");
drivers/cpufreq/sti-cpufreq.c
50
struct device *cpu;
drivers/cpufreq/sti-cpufreq.c
56
struct device_node *np = ddata.cpu->of_node;
drivers/cpufreq/sti-cpufreq.c
57
struct device *dev = ddata.cpu;
drivers/cpufreq/sti-cpufreq.c
82
struct device *dev = ddata.cpu;
drivers/cpufreq/sun50i-cpufreq-nvmem.c
242
unsigned int cpu, supported_hw;
drivers/cpufreq/sun50i-cpufreq-nvmem.c
270
for_each_present_cpu(cpu) {
drivers/cpufreq/sun50i-cpufreq-nvmem.c
271
struct device *cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/sun50i-cpufreq-nvmem.c
282
opp_tokens[cpu] = ret;
drivers/cpufreq/sun50i-cpufreq-nvmem.c
296
for_each_present_cpu(cpu)
drivers/cpufreq/sun50i-cpufreq-nvmem.c
297
dev_pm_opp_clear_config(opp_tokens[cpu]);
drivers/cpufreq/sun50i-cpufreq-nvmem.c
306
unsigned int cpu;
drivers/cpufreq/sun50i-cpufreq-nvmem.c
310
for_each_present_cpu(cpu)
drivers/cpufreq/sun50i-cpufreq-nvmem.c
311
dev_pm_opp_clear_config(opp_tokens[cpu]);
drivers/cpufreq/tegra186-cpufreq.c
108
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/tegra186-cpufreq.c
110
pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
drivers/cpufreq/tegra186-cpufreq.c
177
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
drivers/cpufreq/tegra186-cpufreq.c
180
u32 cpu;
drivers/cpufreq/tegra186-cpufreq.c
187
for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
drivers/cpufreq/tegra186-cpufreq.c
188
if (data->cpus[cpu].bpmp_cluster_id == cluster)
drivers/cpufreq/tegra186-cpufreq.c
189
cpumask_set_cpu(cpu, policy->cpus);
drivers/cpufreq/tegra186-cpufreq.c
216
u32 cpu;
drivers/cpufreq/tegra186-cpufreq.c
218
for_each_cpu(cpu, policy->cpus) {
drivers/cpufreq/tegra186-cpufreq.c
219
edvd_offset = data->cpus[cpu].edvd_offset;
drivers/cpufreq/tegra186-cpufreq.c
230
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
drivers/cpufreq/tegra186-cpufreq.c
232
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
drivers/cpufreq/tegra186-cpufreq.c
241
edvd_offset = data->cpus[policy->cpu].edvd_offset;
drivers/cpufreq/tegra186-cpufreq.c
243
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
drivers/cpufreq/tegra186-cpufreq.c
357
u32 edvd_val, cpu;
drivers/cpufreq/tegra186-cpufreq.c
389
for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
drivers/cpufreq/tegra186-cpufreq.c
390
if (data->cpus[cpu].bpmp_cluster_id == i) {
drivers/cpufreq/tegra186-cpufreq.c
392
edvd_offset = data->cpus[cpu].edvd_offset;
drivers/cpufreq/tegra186-cpufreq.c
80
dev = get_cpu_device(policy->cpu);
drivers/cpufreq/tegra194-cpufreq.c
112
static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
drivers/cpufreq/tegra194-cpufreq.c
116
smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
drivers/cpufreq/tegra194-cpufreq.c
124
static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
drivers/cpufreq/tegra194-cpufreq.c
128
*ndiv = readl(data->cpu_data[cpu].freq_core_reg) & NDIV_MASK;
drivers/cpufreq/tegra194-cpufreq.c
136
u32 cpu;
drivers/cpufreq/tegra194-cpufreq.c
138
for_each_cpu(cpu, policy->cpus)
drivers/cpufreq/tegra194-cpufreq.c
139
writel(ndiv, data->cpu_data[cpu].freq_core_reg);
drivers/cpufreq/tegra194-cpufreq.c
157
actmon_reg = CORE_ACTMON_CNTR_REG(data, data->cpu_data[c->cpu].clusterid,
drivers/cpufreq/tegra194-cpufreq.c
158
data->cpu_data[c->cpu].cpuid);
drivers/cpufreq/tegra194-cpufreq.c
178
c->cpu, delta_refcnt, cnt);
drivers/cpufreq/tegra194-cpufreq.c
207
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
drivers/cpufreq/tegra194-cpufreq.c
211
smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
drivers/cpufreq/tegra194-cpufreq.c
268
c->cpu, delta_refcnt, cnt);
drivers/cpufreq/tegra194-cpufreq.c
30
#define CORE_OFFSET(cpu) (cpu * 8)
drivers/cpufreq/tegra194-cpufreq.c
32
#define SCRATCH_FREQ_CORE_REG(data, cpu) (data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu))
drivers/cpufreq/tegra194-cpufreq.c
320
static unsigned int tegra194_calculate_speed(u32 cpu)
drivers/cpufreq/tegra194-cpufreq.c
332
read_counters_work.c.cpu = cpu;
drivers/cpufreq/tegra194-cpufreq.c
334
queue_work_on(cpu, read_counters_wq, &read_counters_work.work);
drivers/cpufreq/tegra194-cpufreq.c
351
pr_debug("cpufreq: %d is idle, delta_refcnt: 0\n", cpu);
drivers/cpufreq/tegra194-cpufreq.c
368
static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
drivers/cpufreq/tegra194-cpufreq.c
37
#define CORE_ACTMON_CNTR_REG(data, cl, cpu) (CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu))
drivers/cpufreq/tegra194-cpufreq.c
370
return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
drivers/cpufreq/tegra194-cpufreq.c
385
static unsigned int tegra194_get_speed(u32 cpu)
drivers/cpufreq/tegra194-cpufreq.c
388
u32 clusterid = data->cpu_data[cpu].clusterid;
drivers/cpufreq/tegra194-cpufreq.c
395
rate = tegra194_calculate_speed(cpu);
drivers/cpufreq/tegra194-cpufreq.c
398
ret = data->soc->ops->get_cpu_ndiv(cpu, data->cpu_data[cpu].cpuid, clusterid, &ndiv);
drivers/cpufreq/tegra194-cpufreq.c
414
cpu, rate, pos->frequency, abs(rate - pos->frequency), ndiv);
drivers/cpufreq/tegra194-cpufreq.c
436
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/tegra194-cpufreq.c
438
pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
drivers/cpufreq/tegra194-cpufreq.c
49
u32 cpu;
drivers/cpufreq/tegra194-cpufreq.c
504
u32 clusterid = data->cpu_data[policy->cpu].clusterid;
drivers/cpufreq/tegra194-cpufreq.c
507
u32 start_cpu, cpu;
drivers/cpufreq/tegra194-cpufreq.c
513
start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
drivers/cpufreq/tegra194-cpufreq.c
515
for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
drivers/cpufreq/tegra194-cpufreq.c
516
if (cpu_possible(cpu))
drivers/cpufreq/tegra194-cpufreq.c
517
cpumask_set_cpu(cpu, policy->cpus);
drivers/cpufreq/tegra194-cpufreq.c
556
struct device *cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/tegra194-cpufreq.c
62
void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
drivers/cpufreq/tegra194-cpufreq.c
63
int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
drivers/cpufreq/tegra194-cpufreq.c
685
static int tegra194_cpufreq_store_physids(unsigned int cpu, struct tegra194_cpufreq_data *data)
drivers/cpufreq/tegra194-cpufreq.c
691
if (cpu > (num_cpus - 1)) {
drivers/cpufreq/tegra194-cpufreq.c
696
data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
drivers/cpufreq/tegra194-cpufreq.c
700
data->cpu_data[cpu].cpuid = cpuid;
drivers/cpufreq/tegra194-cpufreq.c
701
data->cpu_data[cpu].clusterid = clusterid;
drivers/cpufreq/tegra194-cpufreq.c
702
data->cpu_data[cpu].freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
drivers/cpufreq/tegra194-cpufreq.c
714
u32 cpu;
drivers/cpufreq/tegra194-cpufreq.c
769
for_each_possible_cpu(cpu) {
drivers/cpufreq/tegra194-cpufreq.c
770
err = tegra194_cpufreq_store_physids(cpu, data);
drivers/cpufreq/tegra194-cpufreq.c
91
dev = get_cpu_device(policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
102
return per_cpu(cpu_last_req_freq, cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
104
return clk_get_cpu_rate(cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
108
ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
drivers/cpufreq/vexpress-spc-cpufreq.c
117
prev_rate = per_cpu(cpu_last_req_freq, cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
118
per_cpu(cpu_last_req_freq, cpu) = rate;
drivers/cpufreq/vexpress-spc-cpufreq.c
119
per_cpu(physical_cluster, cpu) = new_cluster;
drivers/cpufreq/vexpress-spc-cpufreq.c
143
per_cpu(cpu_last_req_freq, cpu) = prev_rate;
drivers/cpufreq/vexpress-spc-cpufreq.c
144
per_cpu(physical_cluster, cpu) = old_cluster;
drivers/cpufreq/vexpress-spc-cpufreq.c
157
bL_switch_request(cpu, new_cluster);
drivers/cpufreq/vexpress-spc-cpufreq.c
180
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
drivers/cpufreq/vexpress-spc-cpufreq.c
183
cur_cluster = cpu_to_cluster(cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
184
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
196
return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
drivers/cpufreq/vexpress-spc-cpufreq.c
412
u32 cur_cluster = cpu_to_cluster(policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
416
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
419
policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
424
int cpu;
drivers/cpufreq/vexpress-spc-cpufreq.c
428
for_each_cpu(cpu, policy->cpus)
drivers/cpufreq/vexpress-spc-cpufreq.c
429
per_cpu(physical_cluster, cpu) = cur_cluster;
drivers/cpufreq/vexpress-spc-cpufreq.c
432
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
drivers/cpufreq/vexpress-spc-cpufreq.c
443
per_cpu(cpu_last_req_freq, policy->cpu) =
drivers/cpufreq/vexpress-spc-cpufreq.c
444
clk_get_cpu_rate(policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
446
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
454
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
457
policy->cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
60
static inline int raw_cpu_to_cluster(int cpu)
drivers/cpufreq/vexpress-spc-cpufreq.c
62
return topology_physical_package_id(cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
65
static inline int cpu_to_cluster(int cpu)
drivers/cpufreq/vexpress-spc-cpufreq.c
68
MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
87
static unsigned int clk_get_cpu_rate(unsigned int cpu)
drivers/cpufreq/vexpress-spc-cpufreq.c
89
u32 cur_cluster = per_cpu(physical_cluster, cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
99
static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
drivers/cpufreq/virtual-cpufreq.c
107
static u32 virt_cpufreq_get_perftbl_entry(int cpu, u32 idx)
drivers/cpufreq/virtual-cpufreq.c
109
writel_relaxed(idx, base + cpu * PER_CPU_OFFSET +
drivers/cpufreq/virtual-cpufreq.c
111
return readl_relaxed(base + cpu * PER_CPU_OFFSET +
drivers/cpufreq/virtual-cpufreq.c
136
int cpu;
drivers/cpufreq/virtual-cpufreq.c
138
cur_perf_domain = readl_relaxed(base + policy->cpu *
drivers/cpufreq/virtual-cpufreq.c
141
for_each_present_cpu(cpu) {
drivers/cpufreq/virtual-cpufreq.c
142
cpu_dev = get_cpu_device(cpu);
drivers/cpufreq/virtual-cpufreq.c
146
perf_domain = readl_relaxed(base + cpu *
drivers/cpufreq/virtual-cpufreq.c
150
cpumask_set_cpu(cpu, policy->cpus);
drivers/cpufreq/virtual-cpufreq.c
161
num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu);
drivers/cpufreq/virtual-cpufreq.c
165
policy->cpuinfo.max_freq = virt_cpufreq_get_perftbl_entry(policy->cpu, 0);
drivers/cpufreq/virtual-cpufreq.c
179
table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx);
drivers/cpufreq/virtual-cpufreq.c
192
cpu_dev = get_cpu_device(policy->cpu);
drivers/cpufreq/virtual-cpufreq.c
273
int ret, cpu;
drivers/cpufreq/virtual-cpufreq.c
279
for_each_possible_cpu(cpu) {
drivers/cpufreq/virtual-cpufreq.c
280
num_perftbl_entries = readl_relaxed(base + cpu * PER_CPU_OFFSET +
drivers/cpufreq/virtual-cpufreq.c
286
per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries;
drivers/cpufreq/virtual-cpufreq.c
72
int cpu = smp_processor_id();
drivers/cpufreq/virtual-cpufreq.c
73
u32 max_freq = (u32)cpufreq_get_hw_max_freq(cpu);
drivers/cpufreq/virtual-cpufreq.c
77
cur_freq = (u64)readl_relaxed(base + cpu * PER_CPU_OFFSET
drivers/cpufreq/virtual-cpufreq.c
96
base + policy->cpu * PER_CPU_OFFSET + REG_SET_PERF_STATE_OFFSET);
drivers/cpuidle/coupled.c
315
int cpu = (unsigned long)info;
drivers/cpuidle/coupled.c
316
cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
drivers/cpuidle/coupled.c
317
cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
drivers/cpuidle/coupled.c
332
static void cpuidle_coupled_poke(int cpu)
drivers/cpuidle/coupled.c
334
call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
drivers/cpuidle/coupled.c
336
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
drivers/cpuidle/coupled.c
337
smp_call_function_single_async(cpu, csd);
drivers/cpuidle/coupled.c
350
int cpu;
drivers/cpuidle/coupled.c
352
for_each_cpu(cpu, &coupled->coupled_cpus)
drivers/cpuidle/coupled.c
353
if (cpu != this_cpu && cpu_online(cpu))
drivers/cpuidle/coupled.c
354
cpuidle_coupled_poke(cpu);
drivers/cpuidle/coupled.c
366
static int cpuidle_coupled_set_waiting(int cpu,
drivers/cpuidle/coupled.c
369
coupled->requested_state[cpu] = next_state;
drivers/cpuidle/coupled.c
385
static void cpuidle_coupled_set_not_waiting(int cpu,
drivers/cpuidle/coupled.c
396
coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
drivers/cpuidle/coupled.c
408
static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
drivers/cpuidle/coupled.c
410
cpuidle_coupled_set_not_waiting(cpu, coupled);
drivers/cpuidle/coupled.c
427
static int cpuidle_coupled_clear_pokes(int cpu)
drivers/cpuidle/coupled.c
429
if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
drivers/cpuidle/coupled.c
433
while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
drivers/cpuidle/coupled.c
476
cpuidle_coupled_clear_pokes(dev->cpu);
drivers/cpuidle/coupled.c
490
cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
drivers/cpuidle/coupled.c
492
w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
drivers/cpuidle/coupled.c
501
cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
drivers/cpuidle/coupled.c
502
cpuidle_coupled_poke_others(dev->cpu, coupled);
drivers/cpuidle/coupled.c
515
!cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
drivers/cpuidle/coupled.c
516
if (cpuidle_coupled_clear_pokes(dev->cpu))
drivers/cpuidle/coupled.c
520
cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
drivers/cpuidle/coupled.c
525
cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
drivers/cpuidle/coupled.c
534
cpuidle_coupled_clear_pokes(dev->cpu);
drivers/cpuidle/coupled.c
536
cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
drivers/cpuidle/coupled.c
581
cpuidle_coupled_set_done(dev->cpu, coupled);
drivers/cpuidle/coupled.c
592
cpuidle_coupled_set_done(dev->cpu, coupled);
drivers/cpuidle/coupled.c
637
int cpu;
drivers/cpuidle/coupled.c
645
for_each_cpu(cpu, &dev->coupled_cpus) {
drivers/cpuidle/coupled.c
646
other_dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/coupled.c
669
csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
drivers/cpuidle/coupled.c
670
INIT_CSD(csd, cpuidle_coupled_handle_poke, (void *)(unsigned long)dev->cpu);
drivers/cpuidle/coupled.c
704
int cpu = get_cpu();
drivers/cpuidle/coupled.c
708
cpuidle_coupled_poke_others(cpu, coupled);
drivers/cpuidle/coupled.c
723
int cpu = get_cpu();
drivers/cpuidle/coupled.c
732
cpuidle_coupled_poke_others(cpu, coupled);
drivers/cpuidle/coupled.c
736
static int coupled_cpu_online(unsigned int cpu)
drivers/cpuidle/coupled.c
742
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/coupled.c
752
static int coupled_cpu_up_prepare(unsigned int cpu)
drivers/cpuidle/coupled.c
758
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-arm.c
106
ret = arm_cpuidle_init(cpu);
drivers/cpuidle/cpuidle-arm.c
119
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
drivers/cpuidle/cpuidle-arm.c
146
int cpu, ret;
drivers/cpuidle/cpuidle-arm.c
150
for_each_present_cpu(cpu) {
drivers/cpuidle/cpuidle-arm.c
151
ret = arm_idle_init_cpu(cpu);
drivers/cpuidle/cpuidle-arm.c
159
while (--cpu >= 0) {
drivers/cpuidle/cpuidle-arm.c
160
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-arm.c
78
static int __init arm_idle_init_cpu(int cpu)
drivers/cpuidle/cpuidle-arm.c
87
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
drivers/cpuidle/cpuidle-big_little.c
107
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
drivers/cpuidle/cpuidle-big_little.c
109
mcpm_set_entry_vector(cpu, cluster, cpu_resume);
drivers/cpuidle/cpuidle-big_little.c
145
int cpu;
drivers/cpuidle/cpuidle-big_little.c
151
for_each_present_cpu(cpu)
drivers/cpuidle/cpuidle-big_little.c
152
if (smp_cpuid_part(cpu) == part_id)
drivers/cpuidle/cpuidle-big_little.c
153
cpumask_set_cpu(cpu, cpumask);
drivers/cpuidle/cpuidle-cps.c
104
int cpu;
drivers/cpuidle/cpuidle-cps.c
107
for_each_possible_cpu(cpu) {
drivers/cpuidle/cpuidle-cps.c
108
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/cpuidle-cps.c
117
int err, cpu, i;
drivers/cpuidle/cpuidle-cps.c
158
for_each_possible_cpu(cpu) {
drivers/cpuidle/cpuidle-cps.c
159
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/cpuidle-cps.c
160
device->cpu = cpu;
drivers/cpuidle/cpuidle-cps.c
162
cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
drivers/cpuidle/cpuidle-cps.c
168
cpu);
drivers/cpuidle/cpuidle-cps.c
36
if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT))
drivers/cpuidle/cpuidle-exynos.c
43
ret = dev->cpu ? exynos_cpuidle_pdata->cpu1_powerdown()
drivers/cpuidle/cpuidle-exynos.c
65
if (num_online_cpus() > 1 || dev->cpu != 0)
drivers/cpuidle/cpuidle-haltpoll.c
56
static int haltpoll_cpu_online(unsigned int cpu)
drivers/cpuidle/cpuidle-haltpoll.c
60
dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-haltpoll.c
62
dev->cpu = cpu;
drivers/cpuidle/cpuidle-haltpoll.c
64
pr_notice("cpuidle_register_device %d failed!\n", cpu);
drivers/cpuidle/cpuidle-haltpoll.c
67
arch_haltpoll_enable(cpu);
drivers/cpuidle/cpuidle-haltpoll.c
73
static int haltpoll_cpu_offline(unsigned int cpu)
drivers/cpuidle/cpuidle-haltpoll.c
77
dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-haltpoll.c
79
arch_haltpoll_disable(cpu);
drivers/cpuidle/cpuidle-powernv.c
164
static int powernv_cpuidle_cpu_online(unsigned int cpu)
drivers/cpuidle/cpuidle-powernv.c
166
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-powernv.c
176
static int powernv_cpuidle_cpu_dead(unsigned int cpu)
drivers/cpuidle/cpuidle-powernv.c
178
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-psci.c
122
static int psci_idle_cpuhp_up(unsigned int cpu)
drivers/cpuidle/cpuidle-psci.c
136
static int psci_idle_cpuhp_down(unsigned int cpu)
drivers/cpuidle/cpuidle-psci.c
157
int cpu;
drivers/cpuidle/cpuidle-psci.c
159
for_each_possible_cpu(cpu) {
drivers/cpuidle/cpuidle-psci.c
160
dev = per_cpu_ptr(&psci_cpuidle_data, cpu)->dev;
drivers/cpuidle/cpuidle-psci.c
251
unsigned int state_count, int cpu)
drivers/cpuidle/cpuidle-psci.c
257
data->dev = dt_idle_attach_cpu(cpu, "psci");
drivers/cpuidle/cpuidle-psci.c
278
unsigned int state_count, int cpu)
drivers/cpuidle/cpuidle-psci.c
283
struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
drivers/cpuidle/cpuidle-psci.c
309
ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
drivers/cpuidle/cpuidle-psci.c
319
unsigned int cpu, unsigned int state_count)
drivers/cpuidle/cpuidle-psci.c
331
cpu_node = of_cpu_device_node_get(cpu);
drivers/cpuidle/cpuidle-psci.c
335
ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu);
drivers/cpuidle/cpuidle-psci.c
342
static void psci_cpu_deinit_idle(int cpu)
drivers/cpuidle/cpuidle-psci.c
344
struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
drivers/cpuidle/cpuidle-psci.c
350
static int psci_idle_init_cpu(struct device *dev, int cpu)
drivers/cpuidle/cpuidle-psci.c
357
cpu_node = of_cpu_device_node_get(cpu);
drivers/cpuidle/cpuidle-psci.c
379
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
drivers/cpuidle/cpuidle-psci.c
406
ret = psci_cpu_init_idle(dev, drv, cpu, ret);
drivers/cpuidle/cpuidle-psci.c
408
pr_err("CPU %d failed to PSCI idle\n", cpu);
drivers/cpuidle/cpuidle-psci.c
420
psci_cpu_deinit_idle(cpu);
drivers/cpuidle/cpuidle-psci.c
433
int cpu, ret;
drivers/cpuidle/cpuidle-psci.c
437
for_each_present_cpu(cpu) {
drivers/cpuidle/cpuidle-psci.c
438
ret = psci_idle_init_cpu(&fdev->dev, cpu);
drivers/cpuidle/cpuidle-psci.c
448
while (--cpu >= 0) {
drivers/cpuidle/cpuidle-psci.c
449
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-psci.c
452
psci_cpu_deinit_idle(cpu);
drivers/cpuidle/cpuidle-psci.c
89
trace_psci_domain_idle_enter(dev->cpu, state, s2idle);
drivers/cpuidle/cpuidle-psci.c
91
trace_psci_domain_idle_exit(dev->cpu, state, s2idle);
drivers/cpuidle/cpuidle-pseries.c
303
static int pseries_cpuidle_cpu_online(unsigned int cpu)
drivers/cpuidle/cpuidle-pseries.c
305
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-pseries.c
315
static int pseries_cpuidle_cpu_dead(unsigned int cpu)
drivers/cpuidle/cpuidle-pseries.c
317
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-qcom-spm.c
120
data->cpuidle_driver.cpumask = (struct cpumask *)cpumask_of(cpu);
drivers/cpuidle/cpuidle-qcom-spm.c
132
int cpu, ret;
drivers/cpuidle/cpuidle-qcom-spm.c
141
for_each_present_cpu(cpu) {
drivers/cpuidle/cpuidle-qcom-spm.c
142
ret = spm_cpuidle_register(&pdev->dev, cpu);
drivers/cpuidle/cpuidle-qcom-spm.c
145
"Cannot register for CPU%d: %d\n", cpu, ret);
drivers/cpuidle/cpuidle-qcom-spm.c
87
static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
drivers/cpuidle/cpuidle-qcom-spm.c
94
cpu_node = of_cpu_device_node_get(cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
147
static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
drivers/cpuidle/cpuidle-riscv-sbi.c
157
static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
drivers/cpuidle/cpuidle-riscv-sbi.c
210
unsigned int state_count, int cpu)
drivers/cpuidle/cpuidle-riscv-sbi.c
216
data->dev = dt_idle_attach_cpu(cpu, "sbi");
drivers/cpuidle/cpuidle-riscv-sbi.c
236
unsigned int cpu,
drivers/cpuidle/cpuidle-riscv-sbi.c
239
struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
244
struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
270
ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
280
static void sbi_cpuidle_deinit_cpu(int cpu)
drivers/cpuidle/cpuidle-riscv-sbi.c
282
struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
288
static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
drivers/cpuidle/cpuidle-riscv-sbi.c
300
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
320
cpuid_to_hartid_map(cpu));
drivers/cpuidle/cpuidle-riscv-sbi.c
326
ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
drivers/cpuidle/cpuidle-riscv-sbi.c
329
cpuid_to_hartid_map(cpu));
drivers/cpuidle/cpuidle-riscv-sbi.c
344
sbi_cpuidle_deinit_cpu(cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
492
int cpu, ret;
drivers/cpuidle/cpuidle-riscv-sbi.c
499
for_each_possible_cpu(cpu) {
drivers/cpuidle/cpuidle-riscv-sbi.c
500
struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
521
for_each_present_cpu(cpu) {
drivers/cpuidle/cpuidle-riscv-sbi.c
522
ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
525
cpuid_to_hartid_map(cpu));
drivers/cpuidle/cpuidle-riscv-sbi.c
541
while (--cpu >= 0) {
drivers/cpuidle/cpuidle-riscv-sbi.c
542
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
545
sbi_cpuidle_deinit_cpu(cpu);
drivers/cpuidle/cpuidle-tegra.c
102
cpu = cpu_logical_map(lcpu);
drivers/cpuidle/cpuidle-tegra.c
104
if (cpu > 0) {
drivers/cpuidle/cpuidle-tegra.c
105
tegra_enable_cpu_clock(cpu);
drivers/cpuidle/cpuidle-tegra.c
106
tegra_cpu_out_of_reset(cpu);
drivers/cpuidle/cpuidle-tegra.c
107
flowctrl_write_cpu_halt(cpu, 0);
drivers/cpuidle/cpuidle-tegra.c
112
static int tegra_cpuidle_cc6_enter(unsigned int cpu)
drivers/cpuidle/cpuidle-tegra.c
116
if (cpu > 0) {
drivers/cpuidle/cpuidle-tegra.c
117
ret = cpu_suspend(cpu, tegra_pm_park_secondary_cpu);
drivers/cpuidle/cpuidle-tegra.c
164
int index, unsigned int cpu)
drivers/cpuidle/cpuidle-tegra.c
194
err = tegra_cpuidle_cc6_enter(cpu);
drivers/cpuidle/cpuidle-tegra.c
211
static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
drivers/cpuidle/cpuidle-tegra.c
217
if (cpu > 0 || index != TEGRA_C7 || tegra_get_chip_id() != TEGRA30)
drivers/cpuidle/cpuidle-tegra.c
234
unsigned int cpu = cpu_logical_map(dev->cpu);
drivers/cpuidle/cpuidle-tegra.c
237
index = tegra_cpuidle_adjust_state_index(index, cpu);
drivers/cpuidle/cpuidle-tegra.c
248
ret = tegra_cpuidle_state_enter(dev, index, cpu);
drivers/cpuidle/cpuidle-tegra.c
53
unsigned long cpu, lcpu, csr;
drivers/cpuidle/cpuidle-tegra.c
56
cpu = cpu_logical_map(lcpu);
drivers/cpuidle/cpuidle-tegra.c
57
csr = flowctrl_read_cpu_csr(cpu);
drivers/cpuidle/cpuidle-tegra.c
60
cpu, cpu_online(lcpu), csr);
drivers/cpuidle/cpuidle-tegra.c
99
unsigned int cpu, lcpu;
drivers/cpuidle/cpuidle.c
248
trace_cpu_idle(index, dev->cpu);
drivers/cpuidle/cpuidle.c
283
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
drivers/cpuidle/cpuidle.c
316
trace_cpu_idle_miss(dev->cpu, entered_state, false);
drivers/cpuidle/cpuidle.c
330
trace_cpu_idle_miss(dev->cpu, entered_state, true);
drivers/cpuidle/cpuidle.c
617
per_cpu(cpuidle_devices, dev->cpu) = NULL;
drivers/cpuidle/cpuidle.c
640
unsigned int cpu = dev->cpu;
drivers/cpuidle/cpuidle.c
643
if (per_cpu(cpuidle_devices, cpu)) {
drivers/cpuidle/cpuidle.c
644
pr_info("CPU%d: cpuidle device already registered\n", cpu);
drivers/cpuidle/cpuidle.c
659
per_cpu(cpuidle_devices, cpu) = dev;
drivers/cpuidle/cpuidle.c
750
int cpu;
drivers/cpuidle/cpuidle.c
753
for_each_cpu(cpu, drv->cpumask) {
drivers/cpuidle/cpuidle.c
754
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/cpuidle.c
776
int ret, cpu;
drivers/cpuidle/cpuidle.c
785
for_each_cpu(cpu, drv->cpumask) {
drivers/cpuidle/cpuidle.c
786
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/cpuidle.c
787
device->cpu = cpu;
drivers/cpuidle/cpuidle.c
802
pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
drivers/cpuidle/driver.c
100
static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
drivers/cpuidle/driver.c
340
int cpu;
drivers/cpuidle/driver.c
342
cpu = get_cpu();
drivers/cpuidle/driver.c
343
drv = __cpuidle_get_cpu_driver(cpu);
drivers/cpuidle/driver.c
362
return __cpuidle_get_cpu_driver(dev->cpu);
drivers/cpuidle/driver.c
375
unsigned int cpu;
drivers/cpuidle/driver.c
38
static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
drivers/cpuidle/driver.c
386
for_each_cpu(cpu, drv->cpumask) {
drivers/cpuidle/driver.c
387
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/driver.c
40
return per_cpu(cpuidle_drivers, cpu);
drivers/cpuidle/driver.c
53
int cpu;
drivers/cpuidle/driver.c
55
for_each_cpu(cpu, drv->cpumask) {
drivers/cpuidle/driver.c
57
if (drv != __cpuidle_get_cpu_driver(cpu))
drivers/cpuidle/driver.c
60
per_cpu(cpuidle_drivers, cpu) = NULL;
drivers/cpuidle/driver.c
73
int cpu;
drivers/cpuidle/driver.c
75
for_each_cpu(cpu, drv->cpumask) {
drivers/cpuidle/driver.c
78
old_drv = __cpuidle_get_cpu_driver(cpu);
drivers/cpuidle/driver.c
83
for_each_cpu(cpu, drv->cpumask)
drivers/cpuidle/driver.c
84
per_cpu(cpuidle_drivers, cpu) = drv;
drivers/cpuidle/dt_idle_genpd.c
173
struct device *dt_idle_attach_cpu(int cpu, const char *name)
drivers/cpuidle/dt_idle_genpd.c
177
dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
drivers/cpuidle/dt_idle_genpd.c
182
if (cpu_online(cpu))
drivers/cpuidle/dt_idle_genpd.h
19
struct device *dt_idle_attach_cpu(int cpu, const char *name);
drivers/cpuidle/dt_idle_genpd.h
46
static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
drivers/cpuidle/dt_idle_states.c
109
cpu = cpumask_first(cpumask) + 1;
drivers/cpuidle/dt_idle_states.c
110
for_each_cpu_from(cpu, cpumask) {
drivers/cpuidle/dt_idle_states.c
111
cpu_node = of_cpu_device_node_get(cpu);
drivers/cpuidle/dt_idle_states.c
99
int cpu;
drivers/cpuidle/governor.c
109
s64 cpuidle_governor_latency_req(unsigned int cpu)
drivers/cpuidle/governor.c
111
struct device *device = get_cpu_device(cpu);
drivers/cpuidle/governors/haltpoll.c
53
if (cpuidle_governor_latency_req(dev->cpu) == 0) {
drivers/cpuidle/governors/ladder.c
138
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
drivers/cpuidle/governors/ladder.c
74
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
drivers/cpuidle/governors/menu.c
222
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
drivers/cpuidle/governors/menu.c
510
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
drivers/cpuidle/governors/teo.c
312
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
drivers/cpuidle/governors/teo.c
570
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
drivers/cpuidle/sysfs.c
700
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
drivers/crypto/caam/caamalg_qi.c
851
int cpu;
drivers/crypto/caam/caamalg_qi.c
858
cpu = smp_processor_id();
drivers/crypto/caam/caamalg_qi.c
859
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
drivers/crypto/caam/caamalg_qi2.c
4689
int err, i = 0, cpu;
drivers/crypto/caam/caamalg_qi2.c
4691
for_each_online_cpu(cpu) {
drivers/crypto/caam/caamalg_qi2.c
4692
ppriv = per_cpu_ptr(priv->ppriv, cpu);
drivers/crypto/caam/caamalg_qi2.c
4697
nctx->desired_cpu = cpu;
drivers/crypto/caam/caamalg_qi2.c
4701
ppriv->dpio = dpaa2_io_service_select(cpu);
drivers/crypto/caam/caamalg_qi2.c
4704
dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
drivers/crypto/caam/caamalg_qi2.c
4731
for_each_online_cpu(cpu) {
drivers/crypto/caam/caamalg_qi2.c
4732
ppriv = per_cpu_ptr(priv->ppriv, cpu);
drivers/crypto/caam/caamalg_qi2.c
4738
for_each_online_cpu(cpu) {
drivers/crypto/caam/caamalg_qi2.c
4739
ppriv = per_cpu_ptr(priv->ppriv, cpu);
drivers/crypto/caam/caamalg_qi2.c
4751
int i = 0, cpu;
drivers/crypto/caam/caamalg_qi2.c
4753
for_each_online_cpu(cpu) {
drivers/crypto/caam/caamalg_qi2.c
4754
ppriv = per_cpu_ptr(priv->ppriv, cpu);
drivers/crypto/caam/caamalg_qi2.c
4770
int err = 0, i = 0, cpu;
drivers/crypto/caam/caamalg_qi2.c
4773
for_each_online_cpu(cpu) {
drivers/crypto/caam/caamalg_qi2.c
4774
ppriv = per_cpu_ptr(priv->ppriv, cpu);
drivers/crypto/caam/caamalg_qi2.c
5018
int err, cpu;
drivers/crypto/caam/caamalg_qi2.c
5101
for_each_online_cpu(cpu) {
drivers/crypto/caam/caamalg_qi2.c
5106
ppriv = per_cpu_ptr(priv->ppriv, cpu);
drivers/crypto/caam/caamalg_qi2.c
5128
cpumask_set_cpu(cpu, priv->clean_mask);
drivers/crypto/caam/qi.c
403
int *cpu,
drivers/crypto/caam/qi.c
442
if (!cpumask_test_cpu(*cpu, cpus)) {
drivers/crypto/caam/qi.c
446
*cpu = *pcpu;
drivers/crypto/caam/qi.c
449
drv_ctx->cpu = *cpu;
drivers/crypto/caam/qi.c
452
drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
drivers/crypto/caam/qi.c
616
static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
drivers/crypto/caam/qi.c
642
qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
drivers/crypto/caam/qi.c
655
per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
drivers/crypto/caam/qi.c
657
dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
drivers/crypto/caam/qi.h
113
struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
drivers/crypto/caam/qi.h
71
int cpu;
drivers/crypto/cavium/cpt/cptvf_main.c
615
int cpu;
drivers/crypto/cavium/cpt/cptvf_main.c
624
cpu = cptvf->vfid % num_online_cpus();
drivers/crypto/cavium/cpt/cptvf_main.c
625
cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
drivers/crypto/cavium/nitrox/nitrox_isr.c
294
int nr_vecs, vec, cpu;
drivers/crypto/cavium/nitrox/nitrox_isr.c
347
cpu = qvec->ring % num_online_cpus();
drivers/crypto/cavium/nitrox/nitrox_isr.c
348
irq_set_affinity_hint(vec, get_cpu_mask(cpu));
drivers/crypto/cavium/nitrox/nitrox_isr.c
368
cpu = num_online_cpus();
drivers/crypto/cavium/nitrox/nitrox_isr.c
369
irq_set_affinity_hint(vec, get_cpu_mask(cpu));
drivers/crypto/cavium/nitrox/nitrox_isr.c
412
int vec, cpu;
drivers/crypto/cavium/nitrox/nitrox_isr.c
446
cpu = num_online_cpus();
drivers/crypto/cavium/nitrox/nitrox_isr.c
447
irq_set_affinity_hint(vec, get_cpu_mask(cpu));
drivers/crypto/chelsio/chcr_algo.c
1342
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
1344
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
1345
reqctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
1346
reqctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
1387
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
1389
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
1390
reqctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
1391
reqctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
1634
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
1636
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
1637
req_ctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
1638
req_ctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
1735
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
1737
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
1738
req_ctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
1739
req_ctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
1805
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
1807
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
1808
req_ctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
1809
req_ctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
1903
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
1905
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
1906
req_ctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
1907
req_ctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
2001
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
2003
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
2004
reqctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
2005
reqctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
3661
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
3663
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
3664
reqctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
3665
reqctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/chelsio/chcr_algo.c
3692
unsigned int cpu;
drivers/crypto/chelsio/chcr_algo.c
3694
cpu = get_cpu();
drivers/crypto/chelsio/chcr_algo.c
3695
reqctx->txqidx = cpu % ctx->ntxq;
drivers/crypto/chelsio/chcr_algo.c
3696
reqctx->rxqidx = cpu % ctx->nrxq;
drivers/crypto/inside-secure/safexcel.c
1145
int ret, irq, cpu;
drivers/crypto/inside-secure/safexcel.c
1181
cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
drivers/crypto/inside-secure/safexcel.c
1182
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
drivers/crypto/intel/iaa/iaa_crypto_main.c
1475
int nr_sgs, cpu, ret = 0;
drivers/crypto/intel/iaa/iaa_crypto_main.c
1492
cpu = get_cpu();
drivers/crypto/intel/iaa/iaa_crypto_main.c
1493
wq = wq_table_next_wq(cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
1496
pr_debug("no wq configured for cpu=%d\n", cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
1502
pr_debug("no wq available for cpu=%d\n", cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
1575
int nr_sgs, cpu, ret = 0;
drivers/crypto/intel/iaa/iaa_crypto_main.c
1590
cpu = get_cpu();
drivers/crypto/intel/iaa/iaa_crypto_main.c
1591
wq = wq_table_next_wq(cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
1594
pr_debug("no wq configured for cpu=%d\n", cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
1600
pr_debug("no wq available for cpu=%d\n", cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
40
static struct idxd_wq *wq_table_next_wq(int cpu)
drivers/crypto/intel/iaa/iaa_crypto_main.c
42
struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
52
entry->wqs[entry->cur_wq]->id, cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
57
static void wq_table_add(int cpu, struct idxd_wq *wq)
drivers/crypto/intel/iaa/iaa_crypto_main.c
59
struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
610
int cpu;
drivers/crypto/intel/iaa/iaa_crypto_main.c
612
for (cpu = 0; cpu < nr_cpus; cpu++)
drivers/crypto/intel/iaa/iaa_crypto_main.c
613
wq_table_clear_entry(cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
68
entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
700
int cpu;
drivers/crypto/intel/iaa/iaa_crypto_main.c
702
for (cpu = 0; cpu < nr_cpus; cpu++)
drivers/crypto/intel/iaa/iaa_crypto_main.c
703
wq_table_free_entry(cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
71
static void wq_table_free_entry(int cpu)
drivers/crypto/intel/iaa/iaa_crypto_main.c
713
int cpu;
drivers/crypto/intel/iaa/iaa_crypto_main.c
719
for (cpu = 0; cpu < nr_cpus; cpu++) {
drivers/crypto/intel/iaa/iaa_crypto_main.c
720
entry = per_cpu_ptr(wq_table, cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
73
struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
79
static void wq_table_clear_entry(int cpu)
drivers/crypto/intel/iaa/iaa_crypto_main.c
81
struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
drivers/crypto/intel/iaa/iaa_crypto_main.c
823
static int wq_table_add_wqs(int iaa, int cpu)
drivers/crypto/intel/iaa/iaa_crypto_main.c
866
wq_table_add(cpu, iaa_wq->wq);
drivers/crypto/intel/iaa/iaa_crypto_main.c
868
cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id);
drivers/crypto/intel/iaa/iaa_crypto_main.c
890
int node_cpu, node, cpu, iaa = 0;
drivers/crypto/intel/iaa/iaa_crypto_main.c
901
for_each_possible_cpu(cpu) {
drivers/crypto/intel/iaa/iaa_crypto_main.c
902
if (WARN_ON(wq_table_add_wqs(0, cpu)))
drivers/crypto/intel/iaa/iaa_crypto_main.c
910
cpu = 0;
drivers/crypto/intel/iaa/iaa_crypto_main.c
914
iaa = cpu / cpus_per_iaa;
drivers/crypto/intel/iaa/iaa_crypto_main.c
917
cpu++;
drivers/crypto/intel/iaa/iaa_crypto_main.c
923
pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
drivers/crypto/intel/qat/qat_common/adf_isr.c
216
unsigned int cpu, cpus = num_online_cpus();
drivers/crypto/intel/qat/qat_common/adf_isr.c
238
cpu = ((accel_dev->accel_id * hw_data->num_banks) +
drivers/crypto/intel/qat/qat_common/adf_isr.c
240
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
183
unsigned int cpu;
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
196
cpu = accel_dev->accel_id % num_online_cpus();
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
197
irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
drivers/crypto/marvell/cesa/cesa.c
420
int irq, ret, i, cpu;
drivers/crypto/marvell/cesa/cesa.c
526
cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
drivers/crypto/marvell/cesa/cesa.c
527
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
drivers/crypto/marvell/octeontx/otx_cptvf_main.c
598
int cpu;
drivers/crypto/marvell/octeontx/otx_cptvf_main.c
608
cpu = cptvf->vfid % num_online_cpus();
drivers/crypto/marvell/octeontx/otx_cptvf_main.c
609
cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
drivers/crypto/padlock-aes.c
113
int cpu;
drivers/crypto/padlock-aes.c
154
for_each_online_cpu(cpu)
drivers/crypto/padlock-aes.c
155
if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
drivers/crypto/padlock-aes.c
156
&ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
drivers/crypto/padlock-aes.c
157
per_cpu(paes_last_cword, cpu) = NULL;
drivers/crypto/padlock-aes.c
173
int cpu = raw_smp_processor_id();
drivers/crypto/padlock-aes.c
175
if (cword != per_cpu(paes_last_cword, cpu))
drivers/crypto/virtio/virtio_crypto_common.h
135
int cpu, node;
drivers/crypto/virtio/virtio_crypto_common.h
137
cpu = get_cpu();
drivers/crypto/virtio/virtio_crypto_common.h
138
node = cpu_to_node(cpu);
drivers/crypto/virtio/virtio_crypto_core.c
195
int cpu;
drivers/crypto/virtio/virtio_crypto_core.c
212
for_each_online_cpu(cpu) {
drivers/crypto/virtio/virtio_crypto_core.c
213
virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
drivers/dca/dca-core.c
258
static u8 dca_common_get_tag(struct device *dev, int cpu)
drivers/dca/dca-core.c
271
tag = dca->ops->get_tag(dca, dev, cpu);
drivers/dca/dca-core.c
283
u8 dca3_get_tag(struct device *dev, int cpu)
drivers/dca/dca-core.c
288
return dca_common_get_tag(dev, cpu);
drivers/dca/dca-core.c
296
u8 dca_get_tag(int cpu)
drivers/dca/dca-core.c
298
return dca_common_get_tag(NULL, cpu);
drivers/devfreq/event/exynos-ppmu.c
62
PPMU_EVENT(cpu),
drivers/devfreq/event/exynos-ppmu.c
90
PPMU_EVENT(d0-cpu),
drivers/devfreq/event/exynos-ppmu.c
93
PPMU_EVENT(d1-cpu),
drivers/devfreq/event/rockchip-dfi.c
111
unsigned int cpu;
drivers/devfreq/event/rockchip-dfi.c
357
return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu));
drivers/devfreq/event/rockchip-dfi.c
451
if (event->cpu < 0) {
drivers/devfreq/event/rockchip-dfi.c
595
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/devfreq/event/rockchip-dfi.c
600
if (cpu != dfi->cpu)
drivers/devfreq/event/rockchip-dfi.c
603
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/devfreq/event/rockchip-dfi.c
607
perf_pmu_migrate_context(&dfi->pmu, cpu, target);
drivers/devfreq/event/rockchip-dfi.c
608
dfi->cpu = target;
drivers/devfreq/event/rockchip-dfi.c
654
dfi->cpu = raw_smp_processor_id();
drivers/devfreq/governor_passive.c
109
unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent;
drivers/devfreq/governor_passive.c
114
for_each_online_cpu(cpu) {
drivers/devfreq/governor_passive.c
115
policy = cpufreq_cpu_get(cpu);
drivers/devfreq/governor_passive.c
286
unsigned int cpu;
drivers/devfreq/governor_passive.c
300
for_each_possible_cpu(cpu) {
drivers/devfreq/governor_passive.c
301
policy = cpufreq_cpu_get(cpu);
drivers/devfreq/governor_passive.c
319
cpu_dev = get_cpu_device(cpu);
drivers/devfreq/governor_passive.c
328
dev_err(dev, "failed to get opp_table of cpu%d\n", cpu);
drivers/devfreq/hisi_uncore_freq.c
464
char *property, int (*get_topo_id)(int cpu),
drivers/devfreq/hisi_uncore_freq.c
465
const struct cpumask *(*get_cpumask)(int cpu))
drivers/devfreq/hisi_uncore_freq.c
467
unsigned int i, cpu;
drivers/devfreq/hisi_uncore_freq.c
487
for_each_possible_cpu(cpu) {
drivers/devfreq/hisi_uncore_freq.c
488
if (get_topo_id(cpu) != num[i])
drivers/devfreq/hisi_uncore_freq.c
492
&uncore->related_cpus, get_cpumask(cpu));
drivers/devfreq/hisi_uncore_freq.c
500
static int get_package_id(int cpu)
drivers/devfreq/hisi_uncore_freq.c
502
return topology_physical_package_id(cpu);
drivers/devfreq/hisi_uncore_freq.c
505
static const struct cpumask *get_package_cpumask(int cpu)
drivers/devfreq/hisi_uncore_freq.c
507
return topology_core_cpumask(cpu);
drivers/devfreq/hisi_uncore_freq.c
510
static int get_cluster_id(int cpu)
drivers/devfreq/hisi_uncore_freq.c
512
return topology_cluster_id(cpu);
drivers/devfreq/hisi_uncore_freq.c
515
static const struct cpumask *get_cluster_cpumask(int cpu)
drivers/devfreq/hisi_uncore_freq.c
517
return topology_cluster_cpumask(cpu);
drivers/devfreq/imx8m-ddrc.c
105
int cpu;
drivers/devfreq/imx8m-ddrc.c
109
for_each_online_cpu(cpu)
drivers/devfreq/imx8m-ddrc.c
110
online_cpus |= (1 << (cpu * 8));
drivers/dma/cv1800b-dmamux.c
136
if (map->peripheral == devid && map->cpu == cpuid)
drivers/dma/cv1800b-dmamux.c
155
map->cpu = cpuid;
drivers/dma/cv1800b-dmamux.c
73
unsigned int cpu;
drivers/dma/cv1800b-dmamux.c
89
DMAMUX_INT_CH_MASK(map->channel, map->cpu),
drivers/dma/cv1800b-dmamux.c
90
DMAMUX_INTEN_BIT(map->cpu));
drivers/dma/cv1800b-dmamux.c
93
map->channel, map->peripheral, map->cpu);
drivers/dma/dmaengine.c
304
static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
drivers/dma/dmaengine.c
308
cpumask_test_cpu(cpu, cpumask_of_node(node));
drivers/dma/dmaengine.c
322
static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
drivers/dma/dmaengine.c
339
if (dma_chan_is_local(chan, cpu))
drivers/dma/dmaengine.c
367
int cpu;
drivers/dma/dmaengine.c
372
for_each_possible_cpu(cpu)
drivers/dma/dmaengine.c
373
per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
drivers/dma/dmaengine.c
388
for_each_online_cpu(cpu) {
drivers/dma/dmaengine.c
389
chan = min_chan(cap, cpu);
drivers/dma/dmaengine.c
390
per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
drivers/dma/fsl-qdma.c
809
int cpu;
drivers/dma/fsl-qdma.c
846
cpu = i % num_online_cpus();
drivers/dma/fsl-qdma.c
848
get_cpu_mask(cpu));
drivers/dma/fsl-qdma.c
852
cpu,
drivers/dma/idxd/device.c
152
desc->cpu = -1;
drivers/dma/idxd/idxd.h
431
int cpu;
drivers/dma/idxd/perfmon.c
183
if (event->cpu < 0)
drivers/dma/idxd/submit.c
11
static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
drivers/dma/idxd/submit.c
19
desc->cpu = cpu;
drivers/dma/idxd/submit.c
29
int cpu, idx;
drivers/dma/idxd/submit.c
39
idx = sbitmap_queue_get(sbq, &cpu);
drivers/dma/idxd/submit.c
44
return __get_desc(wq, idx, cpu);
drivers/dma/idxd/submit.c
52
idx = sbitmap_queue_get(sbq, &cpu);
drivers/dma/idxd/submit.c
62
return __get_desc(wq, idx, cpu);
drivers/dma/idxd/submit.c
68
int cpu = desc->cpu;
drivers/dma/idxd/submit.c
70
desc->cpu = -1;
drivers/dma/idxd/submit.c
71
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
drivers/dma/ioat/dca.c
178
int cpu)
drivers/dma/ioat/dca.c
187
apic_id = cpu_physical_id(cpu);
drivers/dma/ioat/dca.c
19
#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
drivers/edac/a72_edac.c
121
int cpu;
drivers/edac/a72_edac.c
124
for_each_cpu_and(cpu, cpu_online_mask, &compat_mask) {
drivers/edac/a72_edac.c
125
smp_call_function_single(cpu, read_errors, &mesr, true);
drivers/edac/a72_edac.c
126
report_errors(edac_ctl, cpu, &mesr);
drivers/edac/a72_edac.c
188
int cpu;
drivers/edac/a72_edac.c
190
for_each_possible_cpu(cpu) {
drivers/edac/a72_edac.c
191
struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
drivers/edac/a72_edac.c
195
cpumask_set_cpu(cpu, &compat_mask);
drivers/edac/a72_edac.c
198
pr_warn("failed to find device node for CPU %d\n", cpu);
drivers/edac/a72_edac.c
48
static void report_errors(struct edac_device_ctl_info *edac_ctl, int cpu,
drivers/edac/a72_edac.c
81
str, fatal ? "fatal" : "correctable", cpu);
drivers/edac/a72_edac.c
84
edac_device_handle_ue(edac_ctl, cpu, 0, msg);
drivers/edac/a72_edac.c
86
edac_device_handle_ce(edac_ctl, cpu, 0, msg);
drivers/edac/a72_edac.c
93
fatal ? "fatal" : "correctable", cpu,
drivers/edac/a72_edac.c
96
edac_device_handle_ue(edac_ctl, cpu, 1, msg);
drivers/edac/a72_edac.c
98
edac_device_handle_ce(edac_ctl, cpu, 1, msg);
drivers/edac/amd64_edac.c
2848
a_err.cpu = m->extcpu;
drivers/edac/amd64_edac.c
3197
int cpu;
drivers/edac/amd64_edac.c
3199
for_each_online_cpu(cpu)
drivers/edac/amd64_edac.c
3200
if (topology_amd_node_id(cpu) == nid)
drivers/edac/amd64_edac.c
3201
cpumask_set_cpu(cpu, mask);
drivers/edac/amd64_edac.c
3208
int cpu, nbe;
drivers/edac/amd64_edac.c
3220
for_each_cpu(cpu, mask) {
drivers/edac/amd64_edac.c
3221
struct msr *reg = per_cpu_ptr(msrs, cpu);
drivers/edac/amd64_edac.c
3225
cpu, reg->q, str_enabled_disabled(nbe));
drivers/edac/amd64_edac.c
3240
int cpu;
drivers/edac/amd64_edac.c
3251
for_each_cpu(cpu, cmask) {
drivers/edac/amd64_edac.c
3253
struct msr *reg = per_cpu_ptr(msrs, cpu);
drivers/edac/imh_base.c
101
int cpu;
drivers/edac/imh_base.c
104
for_each_online_cpu(cpu) {
drivers/edac/imh_base.c
105
if (reg->pkg == topology_physical_package_id(cpu))
drivers/edac/imh_base.c
109
if (cpu >= nr_cpu_ids)
drivers/edac/imh_base.c
119
smp_call_function_single(cpu, __read_local_reg, reg, 1);
drivers/edac/octeon_edac-pc.c
45
unsigned int cpu = smp_processor_id();
drivers/edac/octeon_edac-pc.c
59
(unsigned long long)icache_err, core, cpu,
drivers/edac/octeon_edac-pc.c
62
edac_device_handle_ce(p->ed, cpu, 1, "icache");
drivers/edac/octeon_edac-pc.c
67
(unsigned long long)dcache_err, core, cpu,
drivers/edac/octeon_edac-pc.c
70
edac_device_handle_ue(p->ed, cpu, 0, "dcache");
drivers/edac/octeon_edac-pc.c
72
edac_device_handle_ce(p->ed, cpu, 0, "dcache");
drivers/edac/skx_common.c
275
int cpu;
drivers/edac/skx_common.c
279
for_each_cpu(cpu, cpumask_of_pcibus(d->util_all->bus)) {
drivers/edac/skx_common.c
280
struct cpuinfo_x86 *c = &cpu_data(cpu);
drivers/edac/skx_common.c
282
if (c->initialized && cpu_to_node(cpu) == node) {
drivers/edac/skx_common.c
283
*id = topology_physical_package_id(cpu);
drivers/edac/xgene_edac.c
754
int cpu)
drivers/edac/xgene_edac.c
757
void __iomem *pg_f = ctx->pmd_csr + cpu * CPU_CSR_STRIDE +
drivers/firmware/arm_ffa/driver.c
1880
static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
drivers/firmware/arm_ffa/driver.c
1889
static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
drivers/firmware/arm_ffa/driver.c
1925
int ret, cpu;
drivers/firmware/arm_ffa/driver.c
1931
for_each_present_cpu(cpu)
drivers/firmware/arm_ffa/driver.c
1932
per_cpu_ptr(irq_pcpu, cpu)->info = drv_info;
drivers/firmware/arm_sdei.c
243
int cpu;
drivers/firmware/arm_sdei.c
252
for_each_possible_cpu(cpu) {
drivers/firmware/arm_sdei.c
253
reg = per_cpu_ptr(regs, cpu);
drivers/firmware/arm_sdei.c
656
static int sdei_cpuhp_down(unsigned int cpu)
drivers/firmware/arm_sdei.c
678
static int sdei_cpuhp_up(unsigned int cpu)
drivers/firmware/psci/psci.c
54
bool psci_tos_resident_on(int cpu)
drivers/firmware/psci/psci.c
56
return cpu == resident_cpu;
drivers/firmware/psci/psci.c
600
int type, cpu = -1;
drivers/firmware/psci/psci.c
627
cpu = get_logical_index(cpuid);
drivers/firmware/psci/psci.c
628
resident_cpu = cpu >= 0 ? cpu : -1;
drivers/firmware/psci/psci_checker.c
100
} else if (cpu == tos_resident_cpu) {
drivers/firmware/psci/psci_checker.c
104
ret, cpu);
drivers/firmware/psci/psci_checker.c
109
"to power down CPU %d\n", ret, cpu);
drivers/firmware/psci/psci_checker.c
114
cpumask_set_cpu(cpu, offlined_cpus);
drivers/firmware/psci/psci_checker.c
118
for_each_cpu(cpu, offlined_cpus) {
drivers/firmware/psci/psci_checker.c
119
int ret = add_cpu(cpu);
drivers/firmware/psci/psci_checker.c
123
"to power up CPU %d\n", ret, cpu);
drivers/firmware/psci/psci_checker.c
126
cpumask_clear_cpu(cpu, offlined_cpus);
drivers/firmware/psci/psci_checker.c
274
int cpu = (long)arg;
drivers/firmware/psci/psci_checker.c
291
cpu, drv->state_count - 1);
drivers/firmware/psci/psci_checker.c
334
cpu, ret, index, i);
drivers/firmware/psci/psci_checker.c
359
cpu, nb_suspend, nb_shallow_sleep, nb_err);
drivers/firmware/psci/psci_checker.c
368
int i, cpu, err = 0;
drivers/firmware/psci/psci_checker.c
385
for_each_online_cpu(cpu) {
drivers/firmware/psci/psci_checker.c
388
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/firmware/psci/psci_checker.c
393
cpu);
drivers/firmware/psci/psci_checker.c
398
(void *)(long)cpu, cpu,
drivers/firmware/psci/psci_checker.c
401
pr_err("Failed to create kthread on CPU %d\n", cpu);
drivers/firmware/psci/psci_checker.c
48
int cpu;
drivers/firmware/psci/psci_checker.c
61
for_each_online_cpu(cpu)
drivers/firmware/psci/psci_checker.c
62
if (psci_tos_resident_on(cpu)) {
drivers/firmware/psci/psci_checker.c
63
tos_resident_cpu = cpu;
drivers/firmware/psci/psci_checker.c
80
int cpu;
drivers/firmware/psci/psci_checker.c
86
for_each_cpu(cpu, cpus) {
drivers/firmware/psci/psci_checker.c
87
int ret = remove_cpu(cpu);
drivers/firmware/psci/psci_checker.c
97
ret, cpu);
drivers/firmware/qcom/qcom_scm.c
389
int cpu;
drivers/firmware/qcom/qcom_scm.c
398
for_each_present_cpu(cpu) {
drivers/firmware/qcom/qcom_scm.c
399
if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
drivers/firmware/qcom/qcom_scm.c
401
flags |= cpu_bits[cpu];
drivers/firmware/stratix10-svc.c
1712
unsigned int cpu = 0;
drivers/firmware/stratix10-svc.c
1724
cpu, "svc_smc_hvc_thread");
drivers/firmware/stratix10-svc.c
880
unsigned int cpu = 0;
drivers/firmware/stratix10-svc.c
887
cpu_to_node(cpu),
drivers/firmware/trusted_foundations.c
57
static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
drivers/fpga/dfl-fme-perf.c
157
unsigned int cpu;
drivers/fpga/dfl-fme-perf.c
186
return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
drivers/fpga/dfl-fme-perf.c
814
if (event->cpu < 0)
drivers/fpga/dfl-fme-perf.c
817
if (event->cpu != priv->cpu)
drivers/fpga/dfl-fme-perf.c
941
static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/fpga/dfl-fme-perf.c
948
if (cpu != priv->cpu)
drivers/fpga/dfl-fme-perf.c
951
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/fpga/dfl-fme-perf.c
955
priv->cpu = target;
drivers/fpga/dfl-fme-perf.c
956
perf_pmu_migrate_context(&priv->pmu, cpu, target);
drivers/fpga/dfl-fme-perf.c
974
priv->cpu = raw_smp_processor_id();
drivers/gpio/gpio-mvebu.c
1125
int i, cpu, id;
drivers/gpio/gpio-mvebu.c
1200
for (cpu = 0; cpu < 2; cpu++) {
drivers/gpio/gpio-mvebu.c
1202
GPIO_EDGE_MASK_MV78200_OFF(cpu), 0);
drivers/gpio/gpio-mvebu.c
1204
GPIO_LEVEL_MASK_MV78200_OFF(cpu), 0);
drivers/gpio/gpio-mvebu.c
1211
for (cpu = 0; cpu < 4; cpu++) {
drivers/gpio/gpio-mvebu.c
1213
GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu), 0);
drivers/gpio/gpio-mvebu.c
1215
GPIO_EDGE_MASK_ARMADAXP_OFF(cpu), 0);
drivers/gpio/gpio-mvebu.c
1217
GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu), 0);
drivers/gpio/gpio-mvebu.c
142
int cpu;
drivers/gpio/gpio-mvebu.c
152
cpu = smp_processor_id();
drivers/gpio/gpio-mvebu.c
154
*offset = GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
drivers/gpio/gpio-mvebu.c
188
int cpu;
drivers/gpio/gpio-mvebu.c
197
cpu = smp_processor_id();
drivers/gpio/gpio-mvebu.c
199
*offset = GPIO_EDGE_MASK_MV78200_OFF(cpu);
drivers/gpio/gpio-mvebu.c
202
cpu = smp_processor_id();
drivers/gpio/gpio-mvebu.c
204
*offset = GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
drivers/gpio/gpio-mvebu.c
238
int cpu;
drivers/gpio/gpio-mvebu.c
247
cpu = smp_processor_id();
drivers/gpio/gpio-mvebu.c
249
*offset = GPIO_LEVEL_MASK_MV78200_OFF(cpu);
drivers/gpio/gpio-mvebu.c
252
cpu = smp_processor_id();
drivers/gpio/gpio-mvebu.c
254
*offset = GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
drivers/gpio/gpio-mvebu.c
80
#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
drivers/gpio/gpio-mvebu.c
81
#define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C)
drivers/gpio/gpio-mvebu.c
87
#define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
drivers/gpio/gpio-mvebu.c
88
#define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
drivers/gpio/gpio-mvebu.c
89
#define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
drivers/gpio/gpio-realtek-otto.c
268
static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl, int cpu)
drivers/gpio/gpio-realtek-otto.c
270
return ctrl->cpumask_base + REALTEK_GPIO_PORTS_PER_BANK * cpu;
drivers/gpio/gpio-realtek-otto.c
280
int cpu;
drivers/gpio/gpio-realtek-otto.c
288
for_each_cpu(cpu, &ctrl->cpu_irq_maskable) {
drivers/gpio/gpio-realtek-otto.c
289
irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, cpu);
drivers/gpio/gpio-realtek-otto.c
292
if (cpumask_test_cpu(cpu, dest))
drivers/gpio/gpio-realtek-otto.c
312
int cpu;
drivers/gpio/gpio-realtek-otto.c
319
for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
drivers/gpio/gpio-realtek-otto.c
320
ctrl->bank_write(realtek_gpio_irq_cpu_mask(ctrl, cpu), mask_all);
drivers/gpio/gpio-realtek-otto.c
368
int cpu, err, irq;
drivers/gpio/gpio-realtek-otto.c
447
for (cpu = 0; cpu < nr_cpus; cpu++)
drivers/gpio/gpio-realtek-otto.c
448
cpumask_set_cpu(cpu, &ctrl->cpu_irq_maskable);
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
2022
struct acpi_srat_cpu_affinity *cpu;
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
2059
cpu = (struct acpi_srat_cpu_affinity *)sub_header;
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
2060
pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
2061
cpu->proximity_domain_lo;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
27
u32 *cpu;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
36
cpu = kmap_local_page(page) + offset_in_page(offset);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
39
drm_clflush_virt_range(cpu, sizeof(*cpu));
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
41
*cpu = v;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
44
drm_clflush_virt_range(cpu, sizeof(*cpu));
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
46
kunmap_local(cpu);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
58
u32 *cpu;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
67
cpu = kmap_local_page(page) + offset_in_page(offset);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
70
drm_clflush_virt_range(cpu, sizeof(*cpu));
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
72
*v = *cpu;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
74
kunmap_local(cpu);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
107
u32 *cpu;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
160
cpu = kmap(p) + offset_in_page(offset);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
161
drm_clflush_virt_range(cpu, sizeof(*cpu));
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
162
if (*cpu != (u32)page) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
173
(u32)page, *cpu);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
176
*cpu = 0;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
177
drm_clflush_virt_range(cpu, sizeof(*cpu));
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
223
u32 *cpu;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
256
cpu = kmap(p) + offset_in_page(offset);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
257
drm_clflush_virt_range(cpu, sizeof(*cpu));
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
258
if (*cpu != (u32)page) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
269
(u32)page, *cpu);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
272
*cpu = 0;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
273
drm_clflush_virt_range(cpu, sizeof(*cpu));
drivers/gpu/drm/i915/i915_pmu.c
641
if (event->cpu < 0)
drivers/gpu/drm/i915/i915_request.c
1869
static unsigned long local_clock_ns(unsigned int *cpu)
drivers/gpu/drm/i915/i915_request.c
1885
*cpu = get_cpu();
drivers/gpu/drm/i915/i915_request.c
1892
static bool busywait_stop(unsigned long timeout, unsigned int cpu)
drivers/gpu/drm/i915/i915_request.c
1899
return this_cpu != cpu;
drivers/gpu/drm/i915/i915_request.c
1905
unsigned int cpu;
drivers/gpu/drm/i915/i915_request.c
1933
timeout_ns += local_clock_ns(&cpu);
drivers/gpu/drm/i915/i915_request.c
1941
if (busywait_stop(timeout_ns, cpu))
drivers/gpu/drm/i915/i915_wait_util.h
64
int cpu, ret, timeout = (US) * 1000; \
drivers/gpu/drm/i915/i915_wait_util.h
69
cpu = smp_processor_id(); \
drivers/gpu/drm/i915/i915_wait_util.h
89
if (unlikely(cpu != smp_processor_id())) { \
drivers/gpu/drm/i915/i915_wait_util.h
91
cpu = smp_processor_id(); \
drivers/gpu/drm/lima/lima_vm.c
212
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
drivers/gpu/drm/lima/lima_vm.c
214
if (!vm->pd.cpu)
drivers/gpu/drm/lima/lima_vm.c
229
dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
drivers/gpu/drm/lima/lima_vm.c
243
if (vm->bts[i].cpu)
drivers/gpu/drm/lima/lima_vm.c
245
vm->bts[i].cpu, vm->bts[i].dma);
drivers/gpu/drm/lima/lima_vm.c
248
if (vm->pd.cpu)
drivers/gpu/drm/lima/lima_vm.c
249
dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
drivers/gpu/drm/lima/lima_vm.c
259
if (!vm->pd.cpu)
drivers/gpu/drm/lima/lima_vm.c
262
pd = vm->pd.cpu;
drivers/gpu/drm/lima/lima_vm.c
264
if (!vm->bts[i].cpu)
drivers/gpu/drm/lima/lima_vm.c
267
pt = vm->bts[i].cpu;
drivers/gpu/drm/lima/lima_vm.c
43
vm->bts[pbe].cpu[bte] = 0;
drivers/gpu/drm/lima/lima_vm.c
52
if (!vm->bts[pbe].cpu) {
drivers/gpu/drm/lima/lima_vm.c
57
vm->bts[pbe].cpu = dma_alloc_wc(
drivers/gpu/drm/lima/lima_vm.c
60
if (!vm->bts[pbe].cpu)
drivers/gpu/drm/lima/lima_vm.c
64
pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
drivers/gpu/drm/lima/lima_vm.c
71
vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
drivers/gpu/drm/lima/lima_vm.h
25
u32 *cpu;
drivers/gpu/drm/vc4/vc4_hdmi.c
2394
dai_link->cpus = &vc4_hdmi->audio.cpu;
drivers/gpu/drm/vc4/vc4_hdmi.h
104
struct snd_soc_dai_link_component cpu;
drivers/gpu/drm/xe/xe_pmu.c
248
if (event->cpu < 0)
drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
401
link->cpus = &audio->components[i].cpu;
drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
47
struct snd_soc_dai_link_component cpu;
drivers/hv/channel_mgmt.c
707
static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
drivers/hv/channel_mgmt.c
717
if (primary->target_cpu == cpu)
drivers/hv/channel_mgmt.c
721
if (sc != chn && sc->target_cpu == cpu)
drivers/hv/channel_mgmt.c
815
int cpu;
drivers/hv/channel_mgmt.c
842
for_each_present_cpu(cpu) {
drivers/hv/channel_mgmt.c
844
= per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/channel_mgmt.c
891
for_each_present_cpu(cpu) {
drivers/hv/channel_mgmt.c
893
= per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/hv.c
172
int cpu, ret = -ENOMEM;
drivers/hv/hv.c
181
for_each_present_cpu(cpu) {
drivers/hv/hv.c
182
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/hv.c
192
for_each_present_cpu(cpu) {
drivers/hv/hv.c
193
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/hv.c
245
int cpu;
drivers/hv/hv.c
248
for_each_present_cpu(cpu) {
drivers/hv/hv.c
250
per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/hv.c
276
void hv_hyp_synic_enable_regs(unsigned int cpu)
drivers/hv/hv.c
279
per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/hv.c
321
hv_enable_coco_interrupt(cpu, vmbus_interrupt, true);
drivers/hv/hv.c
345
static void hv_para_synic_enable_regs(unsigned int cpu)
drivers/hv/hv.c
350
= per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/hv.c
377
int hv_synic_init(unsigned int cpu)
drivers/hv/hv.c
380
hv_para_synic_enable_regs(cpu);
drivers/hv/hv.c
393
hv_hyp_synic_enable_regs(cpu);
drivers/hv/hv.c
399
hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
drivers/hv/hv.c
404
void hv_hyp_synic_disable_regs(unsigned int cpu)
drivers/hv/hv.c
407
per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hv/hv.c
419
hv_enable_coco_interrupt(cpu, vmbus_interrupt, false);
drivers/hv/hv.c
465
static void hv_para_synic_disable_regs(unsigned int cpu)
drivers/hv/hv.c
544
int cpu;
drivers/hv/hv.c
558
for_each_cpu_wrap(cpu, cpu_online_mask, start) {
drivers/hv/hv.c
559
if (channel->target_cpu == cpu ||
drivers/hv/hv.c
563
ret = vmbus_channel_set_cpu(channel, cpu);
drivers/hv/hv.c
577
int hv_synic_cleanup(unsigned int cpu)
drivers/hv/hv.c
592
if (cpu == VMBUS_CONNECT_CPU)
drivers/hv/hv.c
601
if (channel->target_cpu == cpu) {
drivers/hv/hv.c
609
if (sc->target_cpu == cpu) {
drivers/hv/hv.c
630
hv_stimer_legacy_cleanup(cpu);
drivers/hv/hv.c
638
hv_hyp_synic_disable_regs(cpu);
drivers/hv/hv.c
648
hv_para_synic_disable_regs(cpu);
drivers/hv/hv_common.c
472
int hv_common_cpu_init(unsigned int cpu)
drivers/hv/hv_common.c
531
hv_vp_index[cpu] = msr_vp_index;
drivers/hv/hv_common.c
548
int hv_common_cpu_die(unsigned int cpu)
drivers/hv/hv_common.c
721
void __weak hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set)
drivers/hv/hyperv_vmbus.h
194
extern void hv_hyp_synic_enable_regs(unsigned int cpu);
drivers/hv/hyperv_vmbus.h
195
extern int hv_synic_init(unsigned int cpu);
drivers/hv/hyperv_vmbus.h
197
extern void hv_hyp_synic_disable_regs(unsigned int cpu);
drivers/hv/hyperv_vmbus.h
198
extern int hv_synic_cleanup(unsigned int cpu);
drivers/hv/hyperv_vmbus.h
479
static inline bool hv_is_allocated_cpu(unsigned int cpu)
drivers/hv/hyperv_vmbus.h
491
if (channel->target_cpu == cpu)
drivers/hv/hyperv_vmbus.h
494
if (sc->target_cpu == cpu)
drivers/hv/hyperv_vmbus.h
501
static inline void hv_set_allocated_cpu(unsigned int cpu)
drivers/hv/hyperv_vmbus.h
503
cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
drivers/hv/hyperv_vmbus.h
506
static inline void hv_clear_allocated_cpu(unsigned int cpu)
drivers/hv/hyperv_vmbus.h
508
if (hv_is_allocated_cpu(cpu))
drivers/hv/hyperv_vmbus.h
510
cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
drivers/hv/mshv_root_main.c
2176
static int mshv_root_scheduler_init(unsigned int cpu)
drivers/hv/mshv_root_main.c
2194
static int mshv_root_scheduler_cleanup(unsigned int cpu)
drivers/hv/mshv_synic.c
456
static int mshv_synic_cpu_init(unsigned int cpu)
drivers/hv/mshv_synic.c
551
static int mshv_synic_cpu_exit(unsigned int cpu)
drivers/hv/mshv_vtl_main.c
196
static struct mshv_vtl_run *mshv_vtl_cpu_run(int cpu)
drivers/hv/mshv_vtl_main.c
198
return *per_cpu_ptr(&mshv_vtl_per_cpu.run, cpu);
drivers/hv/mshv_vtl_main.c
201
static struct page *mshv_vtl_cpu_reg_page(int cpu)
drivers/hv/mshv_vtl_main.c
203
return *per_cpu_ptr(&mshv_vtl_per_cpu.reg_page, cpu);
drivers/hv/mshv_vtl_main.c
234
static void mshv_vtl_synic_enable_regs(unsigned int cpu)
drivers/hv/mshv_vtl_main.c
326
static int mshv_vtl_alloc_context(unsigned int cpu)
drivers/hv/mshv_vtl_main.c
337
mshv_vtl_synic_enable_regs(cpu);
drivers/hv/mshv_vtl_main.c
428
static void mshv_vtl_cancel(int cpu)
drivers/hv/mshv_vtl_main.c
432
if (here != cpu) {
drivers/hv/mshv_vtl_main.c
433
if (!xchg_relaxed(&mshv_vtl_cpu_run(cpu)->cancel, 1))
drivers/hv/mshv_vtl_main.c
434
smp_send_reschedule(cpu);
drivers/hv/mshv_vtl_main.c
445
mshv_vtl_cancel(poll_file->cpu);
drivers/hv/mshv_vtl_main.c
468
if (input.cpu >= num_possible_cpus() || !cpu_online(input.cpu))
drivers/hv/mshv_vtl_main.c
480
poll_file = per_cpu_ptr(&mshv_vtl_poll_file, READ_ONCE(input.cpu));
drivers/hv/mshv_vtl_main.c
492
poll_file->cpu = input.cpu;
drivers/hv/mshv_vtl_main.c
75
int cpu;
drivers/hv/mshv_vtl_main.c
795
int cpu = vmf->pgoff & MSHV_PG_OFF_CPU_MASK;
drivers/hv/mshv_vtl_main.c
798
if (!cpu_online(cpu))
drivers/hv/mshv_vtl_main.c
806
page = virt_to_page(mshv_vtl_cpu_run(cpu));
drivers/hv/mshv_vtl_main.c
810
page = mshv_vtl_cpu_reg_page(cpu);
drivers/hv/vmbus_drv.c
1379
static void vmbus_irqd_setup(unsigned int cpu)
drivers/hv/vmbus_drv.c
1384
static int vmbus_irqd_should_run(unsigned int cpu)
drivers/hv/vmbus_drv.c
1389
static void run_vmbus_irqd(unsigned int cpu)
drivers/hv/vmbus_drv.c
1424
unsigned int cpu = smp_processor_id();
drivers/hv/vmbus_drv.c
1426
hv_synic_init(cpu);
drivers/hv/vmbus_drv.c
1431
int ret, cpu;
drivers/hv/vmbus_drv.c
1450
for_each_online_cpu(cpu) {
drivers/hv/vmbus_drv.c
1451
struct work_struct *work = per_cpu_ptr(works, cpu);
drivers/hv/vmbus_drv.c
1454
schedule_work_on(cpu, work);
drivers/hv/vmbus_drv.c
1457
for_each_online_cpu(cpu)
drivers/hv/vmbus_drv.c
1458
flush_work(per_cpu_ptr(works, cpu));
drivers/hv/vmbus_drv.c
1870
static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
drivers/hv/vmbus_drv.c
2905
int cpu;
drivers/hv/vmbus_drv.c
2913
cpu = smp_processor_id();
drivers/hv/vmbus_drv.c
2914
hv_stimer_cleanup(cpu);
drivers/hv/vmbus_drv.c
2915
hv_hyp_synic_disable_regs(cpu);
drivers/hv/vmbus_drv.c
3020
int cpu;
drivers/hv/vmbus_drv.c
3037
for_each_online_cpu(cpu) {
drivers/hv/vmbus_drv.c
3039
= per_cpu_ptr(hv_context.cpu_context, cpu);
drivers/hwmon/coretemp.c
279
struct cpuinfo_x86 *c = &cpu_data(tdata->cpu);
drivers/hwmon/coretemp.c
292
err = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
drivers/hwmon/coretemp.c
294
dev_warn_once(dev, "Unable to read TjMax from CPU %u\n", tdata->cpu);
drivers/hwmon/coretemp.c
310
tdata->tjmax = adjust_tjmax(c, tdata->cpu, dev);
drivers/hwmon/coretemp.c
327
ret = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
drivers/hwmon/coretemp.c
364
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
drivers/hwmon/coretemp.c
410
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
drivers/hwmon/coretemp.c
457
static int chk_ucode_version(unsigned int cpu)
drivers/hwmon/coretemp.c
459
struct cpuinfo_x86 *c = &cpu_data(cpu);
drivers/hwmon/coretemp.c
473
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
drivers/hwmon/coretemp.c
475
int id = topology_logical_die_id(cpu);
drivers/hwmon/coretemp.c
483
init_temp_data(struct platform_data *pdata, unsigned int cpu, int pkg_flag)
drivers/hwmon/coretemp.c
520
tdata->cpu = cpu;
drivers/hwmon/coretemp.c
521
tdata->cpu_core_id = topology_core_id(cpu);
drivers/hwmon/coretemp.c
541
static struct temp_data *get_temp_data(struct platform_data *pdata, int cpu)
drivers/hwmon/coretemp.c
546
if (cpu < 0)
drivers/hwmon/coretemp.c
551
pdata->core_data[i]->cpu_core_id == topology_core_id(cpu))
drivers/hwmon/coretemp.c
557
static int create_core_data(struct platform_device *pdev, unsigned int cpu,
drivers/hwmon/coretemp.c
56
#define for_each_sibling(i, cpu) \
drivers/hwmon/coretemp.c
562
struct cpuinfo_x86 *c = &cpu_data(cpu);
drivers/hwmon/coretemp.c
566
if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
drivers/hwmon/coretemp.c
569
tdata = init_temp_data(pdata, cpu, pkg_flag);
drivers/hwmon/coretemp.c
57
for_each_cpu(i, topology_sibling_cpumask(cpu))
drivers/hwmon/coretemp.c
574
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
drivers/hwmon/coretemp.c
59
#define for_each_sibling(i, cpu) for (i = 0; false; )
drivers/hwmon/coretemp.c
603
coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
drivers/hwmon/coretemp.c
605
if (create_core_data(pdev, cpu, pkg_flag))
drivers/hwmon/coretemp.c
606
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
drivers/hwmon/coretemp.c
666
static int coretemp_cpu_online(unsigned int cpu)
drivers/hwmon/coretemp.c
668
struct platform_device *pdev = coretemp_get_pdev(cpu);
drivers/hwmon/coretemp.c
669
struct cpuinfo_x86 *c = &cpu_data(cpu);
drivers/hwmon/coretemp.c
692
if (chk_ucode_version(cpu))
drivers/hwmon/coretemp.c
712
coretemp_add_core(pdev, cpu, 1);
drivers/hwmon/coretemp.c
719
if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
drivers/hwmon/coretemp.c
720
coretemp_add_core(pdev, cpu, 0);
drivers/hwmon/coretemp.c
722
cpumask_set_cpu(cpu, &pdata->cpumask);
drivers/hwmon/coretemp.c
726
static int coretemp_cpu_offline(unsigned int cpu)
drivers/hwmon/coretemp.c
728
struct platform_device *pdev = coretemp_get_pdev(cpu);
drivers/hwmon/coretemp.c
742
tdata = get_temp_data(pd, cpu);
drivers/hwmon/coretemp.c
744
cpumask_clear_cpu(cpu, &pd->cpumask);
drivers/hwmon/coretemp.c
751
target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
drivers/hwmon/coretemp.c
754
} else if (tdata && tdata->cpu == cpu) {
drivers/hwmon/coretemp.c
756
tdata->cpu = target;
drivers/hwmon/coretemp.c
776
if (tdata && tdata->cpu == cpu) {
drivers/hwmon/coretemp.c
779
tdata->cpu = target;
drivers/hwmon/coretemp.c
78
unsigned int cpu;
drivers/hwmon/fam15h_power.c
160
int ret, cpu;
drivers/hwmon/fam15h_power.c
178
for_each_online_cpu(cpu) {
drivers/hwmon/fam15h_power.c
179
this_core = topology_core_id(cpu);
drivers/hwmon/fam15h_power.c
187
cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
drivers/hwmon/ibmpowernv.c
174
int cpu;
drivers/hwmon/ibmpowernv.c
176
for_each_possible_cpu(cpu)
drivers/hwmon/ibmpowernv.c
177
if (get_hard_smp_processor_id(cpu) == hwcpu)
drivers/hwmon/ibmpowernv.c
178
return cpu;
drivers/hwmon/via-cputemp.c
206
unsigned int cpu;
drivers/hwmon/via-cputemp.c
212
static int via_cputemp_online(unsigned int cpu)
drivers/hwmon/via-cputemp.c
218
pdev = platform_device_alloc(DRVNAME, cpu);
drivers/hwmon/via-cputemp.c
238
pdev_entry->cpu = cpu;
drivers/hwmon/via-cputemp.c
253
static int via_cputemp_down_prep(unsigned int cpu)
drivers/hwmon/via-cputemp.c
259
if (p->cpu == cpu) {
drivers/hwtracing/coresight/coresight-core.c
1679
int cpu, trace_id;
drivers/hwtracing/coresight/coresight-core.c
1684
cpu = source_ops(csdev)->cpu_id(csdev);
drivers/hwtracing/coresight/coresight-core.c
1687
trace_id = coresight_trace_id_get_cpu_id(cpu);
drivers/hwtracing/coresight/coresight-core.c
1693
trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
drivers/hwtracing/coresight/coresight-core.c
1702
"Failed to allocate trace ID on CPU%d\n", cpu);
drivers/hwtracing/coresight/coresight-core.c
70
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev)
drivers/hwtracing/coresight/coresight-core.c
72
per_cpu(csdev_sink, cpu) = csdev;
drivers/hwtracing/coresight/coresight-core.c
76
struct coresight_device *coresight_get_percpu_sink(int cpu)
drivers/hwtracing/coresight/coresight-core.c
78
return per_cpu(csdev_sink, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
175
__func__, drvdata->cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
383
int cpu;
drivers/hwtracing/coresight/coresight-cpu-debug.c
395
for_each_possible_cpu(cpu) {
drivers/hwtracing/coresight/coresight-cpu-debug.c
396
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
400
dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
418
int cpu, ret = 0;
drivers/hwtracing/coresight/coresight-cpu-debug.c
427
for_each_possible_cpu(cpu) {
drivers/hwtracing/coresight/coresight-cpu-debug.c
428
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
436
cpumask_set_cpu(cpu, &mask);
drivers/hwtracing/coresight/coresight-cpu-debug.c
446
for_each_cpu(cpu, &mask) {
drivers/hwtracing/coresight/coresight-cpu-debug.c
447
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
457
int cpu;
drivers/hwtracing/coresight/coresight-cpu-debug.c
464
for_each_possible_cpu(cpu) {
drivers/hwtracing/coresight/coresight-cpu-debug.c
465
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
575
drvdata->cpu = coresight_get_cpu(dev);
drivers/hwtracing/coresight/coresight-cpu-debug.c
576
if (drvdata->cpu < 0)
drivers/hwtracing/coresight/coresight-cpu-debug.c
577
return drvdata->cpu;
drivers/hwtracing/coresight/coresight-cpu-debug.c
579
if (per_cpu(debug_drvdata, drvdata->cpu)) {
drivers/hwtracing/coresight/coresight-cpu-debug.c
581
drvdata->cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
593
per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
drivers/hwtracing/coresight/coresight-cpu-debug.c
594
ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
drivers/hwtracing/coresight/coresight-cpu-debug.c
599
dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
605
drvdata->cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
622
dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
628
per_cpu(debug_drvdata, drvdata->cpu) = NULL;
drivers/hwtracing/coresight/coresight-cpu-debug.c
641
per_cpu(debug_drvdata, drvdata->cpu) = NULL;
drivers/hwtracing/coresight/coresight-cpu-debug.c
92
int cpu;
drivers/hwtracing/coresight/coresight-cti-core.c
667
unsigned int cpu = smp_processor_id();
drivers/hwtracing/coresight/coresight-cti-core.c
670
if (!cti_cpu_drvdata[cpu])
drivers/hwtracing/coresight/coresight-cti-core.c
673
drvdata = cti_cpu_drvdata[cpu];
drivers/hwtracing/coresight/coresight-cti-core.c
676
if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
drivers/hwtracing/coresight/coresight-cti-core.c
728
static int cti_starting_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-cti-core.c
730
struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
drivers/hwtracing/coresight/coresight-cti-core.c
739
static int cti_dying_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-cti-core.c
741
struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
drivers/hwtracing/coresight/coresight-cti-core.c
758
if (drvdata->ctidev.cpu == -1)
drivers/hwtracing/coresight/coresight-cti-core.c
783
cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
drivers/hwtracing/coresight/coresight-cti-core.c
791
if (drvdata->ctidev.cpu == -1)
drivers/hwtracing/coresight/coresight-cti-core.c
794
cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
drivers/hwtracing/coresight/coresight-cti-core.c
887
drvdata->ctidev.cpu = -1;
drivers/hwtracing/coresight/coresight-cti-core.c
907
if (drvdata->ctidev.cpu >= 0)
drivers/hwtracing/coresight/coresight-cti-core.c
909
drvdata->ctidev.cpu);
drivers/hwtracing/coresight/coresight-cti-platform.c
189
cti_dev->cpu = cpuid;
drivers/hwtracing/coresight/coresight-cti-platform.c
394
drvdata->ctidev.cpu = cpuid;
drivers/hwtracing/coresight/coresight-cti-platform.c
44
int cpu;
drivers/hwtracing/coresight/coresight-cti-platform.c
54
cpu = of_cpu_node_to_id(dn);
drivers/hwtracing/coresight/coresight-cti-platform.c
58
return (cpu < 0) ? -1 : cpu;
drivers/hwtracing/coresight/coresight-cti.h
112
int cpu;
drivers/hwtracing/coresight/coresight-etb10.c
382
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
143
etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
drivers/hwtracing/coresight/coresight-etm-perf.c
145
return per_cpu_ptr(data->path, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
149
etm_event_cpu_path(struct etm_event_data *data, int cpu)
drivers/hwtracing/coresight/coresight-etm-perf.c
151
return *etm_event_cpu_path_ptr(data, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
159
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
200
int cpu;
drivers/hwtracing/coresight/coresight-etm-perf.c
210
cpu = cpumask_first(mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
211
sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
drivers/hwtracing/coresight/coresight-etm-perf.c
217
int cpu;
drivers/hwtracing/coresight/coresight-etm-perf.c
231
for_each_cpu(cpu, mask) {
drivers/hwtracing/coresight/coresight-etm-perf.c
234
ppath = etm_event_cpu_path_ptr(event_data, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
257
static void *alloc_event_data(int cpu)
drivers/hwtracing/coresight/coresight-etm-perf.c
269
if (cpu != -1)
drivers/hwtracing/coresight/coresight-etm-perf.c
270
cpumask_set_cpu(cpu, mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
322
int cpu = event->cpu;
drivers/hwtracing/coresight/coresight-etm-perf.c
328
event_data = alloc_event_data(cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
355
for_each_cpu(cpu, mask) {
drivers/hwtracing/coresight/coresight-etm-perf.c
359
csdev = per_cpu(csdev_src, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
366
cpumask_clear_cpu(cpu, mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
378
cpumask_clear_cpu(cpu, mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
399
cpumask_clear_cpu(cpu, mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
405
cpumask_clear_cpu(cpu, mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
418
cpumask_clear_cpu(cpu, mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
425
cpumask_clear_cpu(cpu, mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
431
*etm_event_cpu_path_ptr(event_data, cpu) = path;
drivers/hwtracing/coresight/coresight-etm-perf.c
439
cpu = cpumask_first(mask);
drivers/hwtracing/coresight/coresight-etm-perf.c
440
if (cpu >= nr_cpu_ids)
drivers/hwtracing/coresight/coresight-etm-perf.c
478
int cpu = smp_processor_id();
drivers/hwtracing/coresight/coresight-etm-perf.c
482
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
521
if (!cpumask_test_cpu(cpu, &event_data->mask))
drivers/hwtracing/coresight/coresight-etm-perf.c
524
path = etm_event_cpu_path(event_data, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
543
if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
drivers/hwtracing/coresight/coresight-etm-perf.c
544
cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
drivers/hwtracing/coresight/coresight-etm-perf.c
584
int cpu = smp_processor_id();
drivers/hwtracing/coresight/coresight-etm-perf.c
596
path = etm_event_cpu_path(ctxt->event_data, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
631
int cpu = smp_processor_id();
drivers/hwtracing/coresight/coresight-etm-perf.c
633
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
668
!cpumask_test_cpu(cpu, &event_data->mask)) {
drivers/hwtracing/coresight/coresight-etm-perf.c
677
path = etm_event_cpu_path(event_data, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
827
int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
drivers/hwtracing/coresight/coresight-etm-perf.c
831
sprintf(entry, "cpu%d", cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
840
per_cpu(csdev_src, cpu) = csdev;
drivers/hwtracing/coresight/coresight-etm-perf.c
843
per_cpu(csdev_src, cpu) = NULL;
drivers/hwtracing/coresight/coresight-etm.h
236
int cpu;
drivers/hwtracing/coresight/coresight-etm3x-core.c
438
drvdata->cpu, rc);
drivers/hwtracing/coresight/coresight-etm3x-core.c
473
return drvdata->cpu;
drivers/hwtracing/coresight/coresight-etm3x-core.c
478
coresight_trace_id_put_cpu_id(drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm3x-core.c
488
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
drivers/hwtracing/coresight/coresight-etm3x-core.c
522
if (cpu_online(drvdata->cpu)) {
drivers/hwtracing/coresight/coresight-etm3x-core.c
524
ret = smp_call_function_single(drvdata->cpu,
drivers/hwtracing/coresight/coresight-etm3x-core.c
584
"cpu: %d disable smp call done\n", drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm3x-core.c
600
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
drivers/hwtracing/coresight/coresight-etm3x-core.c
643
smp_call_function_single(drvdata->cpu, etm_disable_sysfs_smp_call,
drivers/hwtracing/coresight/coresight-etm3x-core.c
697
static int etm_online_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-etm3x-core.c
699
if (!etmdrvdata[cpu])
drivers/hwtracing/coresight/coresight-etm3x-core.c
702
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
drivers/hwtracing/coresight/coresight-etm3x-core.c
703
coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
drivers/hwtracing/coresight/coresight-etm3x-core.c
707
static int etm_starting_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-etm3x-core.c
709
if (!etmdrvdata[cpu])
drivers/hwtracing/coresight/coresight-etm3x-core.c
712
spin_lock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm3x-core.c
713
if (!etmdrvdata[cpu]->os_unlock) {
drivers/hwtracing/coresight/coresight-etm3x-core.c
714
etm_os_unlock(etmdrvdata[cpu]);
drivers/hwtracing/coresight/coresight-etm3x-core.c
715
etmdrvdata[cpu]->os_unlock = true;
drivers/hwtracing/coresight/coresight-etm3x-core.c
718
if (coresight_get_mode(etmdrvdata[cpu]->csdev))
drivers/hwtracing/coresight/coresight-etm3x-core.c
719
etm_enable_hw(etmdrvdata[cpu]);
drivers/hwtracing/coresight/coresight-etm3x-core.c
720
spin_unlock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm3x-core.c
724
static int etm_dying_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-etm3x-core.c
726
if (!etmdrvdata[cpu])
drivers/hwtracing/coresight/coresight-etm3x-core.c
729
spin_lock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm3x-core.c
730
if (coresight_get_mode(etmdrvdata[cpu]->csdev))
drivers/hwtracing/coresight/coresight-etm3x-core.c
731
etm_disable_hw(etmdrvdata[cpu]);
drivers/hwtracing/coresight/coresight-etm3x-core.c
732
spin_unlock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm3x-core.c
865
drvdata->cpu = coresight_get_cpu(dev);
drivers/hwtracing/coresight/coresight-etm3x-core.c
866
if (drvdata->cpu < 0)
drivers/hwtracing/coresight/coresight-etm3x-core.c
867
return drvdata->cpu;
drivers/hwtracing/coresight/coresight-etm3x-core.c
869
desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm3x-core.c
873
if (smp_call_function_single(drvdata->cpu,
drivers/hwtracing/coresight/coresight-etm3x-core.c
904
etmdrvdata[drvdata->cpu] = drvdata;
drivers/hwtracing/coresight/coresight-etm3x-core.c
919
int cpu = *(int *)info;
drivers/hwtracing/coresight/coresight-etm3x-core.c
921
etmdrvdata[cpu] = NULL;
drivers/hwtracing/coresight/coresight-etm3x-core.c
941
if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
drivers/hwtracing/coresight/coresight-etm3x-core.c
942
etmdrvdata[drvdata->cpu] = NULL;
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
1184
val = drvdata->cpu;
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
1188
static DEVICE_ATTR_RO(cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1064
"cpu: %d disable smp call done\n", drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1084
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
drivers/hwtracing/coresight/coresight-etm4x-core.c
1129
smp_call_function_single(drvdata->cpu, etm4_disable_sysfs_smp_call,
drivers/hwtracing/coresight/coresight-etm4x-core.c
1832
static int etm4_online_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-etm4x-core.c
1834
if (!etmdrvdata[cpu])
drivers/hwtracing/coresight/coresight-etm4x-core.c
1835
return etm4_probe_cpu(cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1837
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
drivers/hwtracing/coresight/coresight-etm4x-core.c
1838
coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1842
static int etm4_starting_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-etm4x-core.c
1844
if (!etmdrvdata[cpu])
drivers/hwtracing/coresight/coresight-etm4x-core.c
1847
raw_spin_lock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1848
if (!etmdrvdata[cpu]->os_unlock)
drivers/hwtracing/coresight/coresight-etm4x-core.c
1849
etm4_os_unlock(etmdrvdata[cpu]);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1851
if (coresight_get_mode(etmdrvdata[cpu]->csdev))
drivers/hwtracing/coresight/coresight-etm4x-core.c
1852
etm4_enable_hw(etmdrvdata[cpu]);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1853
raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1857
static int etm4_dying_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-etm4x-core.c
1859
if (!etmdrvdata[cpu])
drivers/hwtracing/coresight/coresight-etm4x-core.c
1862
raw_spin_lock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1863
if (coresight_get_mode(etmdrvdata[cpu]->csdev))
drivers/hwtracing/coresight/coresight-etm4x-core.c
1864
etm4_disable_hw(etmdrvdata[cpu]);
drivers/hwtracing/coresight/coresight-etm4x-core.c
1865
raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
drivers/hwtracing/coresight/coresight-etm4x-core.c
193
WARN_ON(drvdata->cpu != smp_processor_id());
drivers/hwtracing/coresight/coresight-etm4x-core.c
2141
unsigned int cpu = smp_processor_id();
drivers/hwtracing/coresight/coresight-etm4x-core.c
2143
if (!etmdrvdata[cpu])
drivers/hwtracing/coresight/coresight-etm4x-core.c
2146
drvdata = etmdrvdata[cpu];
drivers/hwtracing/coresight/coresight-etm4x-core.c
2148
if (WARN_ON_ONCE(drvdata->cpu != cpu))
drivers/hwtracing/coresight/coresight-etm4x-core.c
2245
"%s%d", type_name, drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
2280
etmdrvdata[drvdata->cpu] = drvdata;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2283
drvdata->cpu, type_name, major, minor);
drivers/hwtracing/coresight/coresight-etm4x-core.c
2321
drvdata->cpu = coresight_get_cpu(dev);
drivers/hwtracing/coresight/coresight-etm4x-core.c
2322
if (drvdata->cpu < 0)
drivers/hwtracing/coresight/coresight-etm4x-core.c
2323
return drvdata->cpu;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2333
if (smp_call_function_single(drvdata->cpu,
drivers/hwtracing/coresight/coresight-etm4x-core.c
234
return drvdata->cpu;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2344
per_cpu(delayed_probe, drvdata->cpu) = delayed;
drivers/hwtracing/coresight/coresight-etm4x-core.c
239
coresight_trace_id_put_cpu_id(drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
2410
static int etm4_probe_cpu(unsigned int cpu)
drivers/hwtracing/coresight/coresight-etm4x-core.c
2450
int cpu = *(int *)info;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2452
etmdrvdata[cpu] = NULL;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2453
per_cpu(delayed_probe, cpu) = NULL;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2465
had_delayed_probe = per_cpu(delayed_probe, drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
2473
if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
drivers/hwtracing/coresight/coresight-etm4x-core.c
2474
clear_etmdrvdata(&drvdata->cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
609
drvdata->cpu, rc);
drivers/hwtracing/coresight/coresight-etm4x-core.c
78
static int etm4_probe_cpu(unsigned int cpu);
drivers/hwtracing/coresight/coresight-etm4x-core.c
879
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
drivers/hwtracing/coresight/coresight-etm4x-core.c
932
ret = smp_call_function_single(drvdata->cpu,
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2306
val = drvdata->cpu;
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2310
static DEVICE_ATTR_RO(cpu);
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
2440
smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
drivers/hwtracing/coresight/coresight-etm4x.h
1038
int cpu;
drivers/hwtracing/coresight/coresight-platform.c
170
int cpu;
drivers/hwtracing/coresight/coresight-platform.c
180
cpu = of_cpu_node_to_id(dn);
drivers/hwtracing/coresight/coresight-platform.c
183
return cpu;
drivers/hwtracing/coresight/coresight-platform.c
765
int cpu;
drivers/hwtracing/coresight/coresight-platform.c
776
cpu = acpi_handle_to_logical_cpuid(cpu_handle);
drivers/hwtracing/coresight/coresight-platform.c
777
if (cpu >= nr_cpu_ids)
drivers/hwtracing/coresight/coresight-platform.c
779
return cpu;
drivers/hwtracing/coresight/coresight-priv.h
250
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
drivers/hwtracing/coresight/coresight-priv.h
251
struct coresight_device *coresight_get_percpu_sink(int cpu);
drivers/hwtracing/coresight/coresight-sysfs.c
170
int cpu, ret = 0;
drivers/hwtracing/coresight/coresight-sysfs.c
235
cpu = source_ops(csdev)->cpu_id(csdev);
drivers/hwtracing/coresight/coresight-sysfs.c
236
per_cpu(tracer_path, cpu) = path;
drivers/hwtracing/coresight/coresight-sysfs.c
270
int cpu, ret;
drivers/hwtracing/coresight/coresight-sysfs.c
285
cpu = source_ops(csdev)->cpu_id(csdev);
drivers/hwtracing/coresight/coresight-sysfs.c
286
path = per_cpu(tracer_path, cpu);
drivers/hwtracing/coresight/coresight-sysfs.c
287
per_cpu(tracer_path, cpu) = NULL;
drivers/hwtracing/coresight/coresight-tmc-etf.c
428
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1379
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-tmc-etr.c
1479
if (event->cpu == -1)
drivers/hwtracing/coresight/coresight-tmc-etr.c
1495
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-trace-id.c
132
int cpu;
drivers/hwtracing/coresight/coresight-trace-id.c
136
for_each_possible_cpu(cpu)
drivers/hwtracing/coresight/coresight-trace-id.c
137
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
drivers/hwtracing/coresight/coresight-trace-id.c
142
static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
drivers/hwtracing/coresight/coresight-trace-id.c
150
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
drivers/hwtracing/coresight/coresight-trace-id.c
165
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
drivers/hwtracing/coresight/coresight-trace-id.c
171
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
drivers/hwtracing/coresight/coresight-trace-id.c
176
DUMP_ID_CPU(cpu, id);
drivers/hwtracing/coresight/coresight-trace-id.c
181
static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
drivers/hwtracing/coresight/coresight-trace-id.c
187
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
drivers/hwtracing/coresight/coresight-trace-id.c
194
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
drivers/hwtracing/coresight/coresight-trace-id.c
197
DUMP_ID_CPU(cpu, id);
drivers/hwtracing/coresight/coresight-trace-id.c
230
int coresight_trace_id_get_cpu_id(int cpu)
drivers/hwtracing/coresight/coresight-trace-id.c
232
return _coresight_trace_id_get_cpu_id(cpu, &id_map_default);
drivers/hwtracing/coresight/coresight-trace-id.c
236
int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
drivers/hwtracing/coresight/coresight-trace-id.c
238
return _coresight_trace_id_get_cpu_id(cpu, id_map);
drivers/hwtracing/coresight/coresight-trace-id.c
242
void coresight_trace_id_put_cpu_id(int cpu)
drivers/hwtracing/coresight/coresight-trace-id.c
244
_coresight_trace_id_put_cpu_id(cpu, &id_map_default);
drivers/hwtracing/coresight/coresight-trace-id.c
248
void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
drivers/hwtracing/coresight/coresight-trace-id.c
250
_coresight_trace_id_put_cpu_id(cpu, id_map);
drivers/hwtracing/coresight/coresight-trace-id.c
254
int coresight_trace_id_read_cpu_id(int cpu)
drivers/hwtracing/coresight/coresight-trace-id.c
256
return _coresight_trace_id_read_cpu_id(cpu, &id_map_default);
drivers/hwtracing/coresight/coresight-trace-id.c
260
int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
drivers/hwtracing/coresight/coresight-trace-id.c
262
return _coresight_trace_id_read_cpu_id(cpu, id_map);
drivers/hwtracing/coresight/coresight-trace-id.c
38
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
drivers/hwtracing/coresight/coresight-trace-id.c
44
#define DUMP_ID_CPU(cpu, id)
drivers/hwtracing/coresight/coresight-trace-id.c
49
static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
drivers/hwtracing/coresight/coresight-trace-id.c
51
return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
drivers/hwtracing/coresight/coresight-trace-id.h
100
int coresight_trace_id_read_cpu_id(int cpu);
drivers/hwtracing/coresight/coresight-trace-id.h
106
int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
drivers/hwtracing/coresight/coresight-trace-id.h
59
int coresight_trace_id_get_cpu_id(int cpu);
drivers/hwtracing/coresight/coresight-trace-id.h
65
int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
drivers/hwtracing/coresight/coresight-trace-id.h
74
void coresight_trace_id_put_cpu_id(int cpu);
drivers/hwtracing/coresight/coresight-trace-id.h
80
void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
drivers/hwtracing/coresight/coresight-trbe.c
1023
WARN_ON(cpudata->cpu != smp_processor_id());
drivers/hwtracing/coresight/coresight-trbe.c
1042
WARN_ON(cpudata->cpu != smp_processor_id());
drivers/hwtracing/coresight/coresight-trbe.c
1101
int cpu = smp_processor_id();
drivers/hwtracing/coresight/coresight-trbe.c
1109
if (cpudata->cpu != cpu)
drivers/hwtracing/coresight/coresight-trbe.c
1112
if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
drivers/hwtracing/coresight/coresight-trbe.c
1254
static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
drivers/hwtracing/coresight/coresight-trbe.c
1256
struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1257
struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1269
desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1297
coresight_set_percpu_sink(cpu, trbe_csdev);
drivers/hwtracing/coresight/coresight-trbe.c
1300
cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
drivers/hwtracing/coresight/coresight-trbe.c
1309
int cpu = smp_processor_id();
drivers/hwtracing/coresight/coresight-trbe.c
1310
struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1317
pr_err("TRBE is not implemented on cpu %d\n", cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1323
pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1329
pr_err("Unsupported alignment on cpu %d\n", cpu);
drivers/hwtracing/coresight/coresight-trbe.c
133
int cpu;
drivers/hwtracing/coresight/coresight-trbe.c
1340
pr_err("Disabling TRBE on cpu%d due to erratum\n", cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1361
cpudata->cpu = cpu;
drivers/hwtracing/coresight/coresight-trbe.c
1365
cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
drivers/hwtracing/coresight/coresight-trbe.c
1368
static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
drivers/hwtracing/coresight/coresight-trbe.c
1370
struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1374
coresight_set_percpu_sink(cpu, NULL);
drivers/hwtracing/coresight/coresight-trbe.c
1380
int cpu;
drivers/hwtracing/coresight/coresight-trbe.c
1386
for_each_cpu(cpu, &drvdata->supported_cpus) {
drivers/hwtracing/coresight/coresight-trbe.c
1388
if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
drivers/hwtracing/coresight/coresight-trbe.c
1390
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
drivers/hwtracing/coresight/coresight-trbe.c
1391
arm_trbe_register_coresight_cpu(drvdata, cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1392
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
drivers/hwtracing/coresight/coresight-trbe.c
1393
smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
drivers/hwtracing/coresight/coresight-trbe.c
1400
int cpu;
drivers/hwtracing/coresight/coresight-trbe.c
1402
for_each_cpu(cpu, &drvdata->supported_cpus) {
drivers/hwtracing/coresight/coresight-trbe.c
1403
smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1);
drivers/hwtracing/coresight/coresight-trbe.c
1404
arm_trbe_remove_coresight_cpu(drvdata, cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1417
static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
drivers/hwtracing/coresight/coresight-trbe.c
1421
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
drivers/hwtracing/coresight/coresight-trbe.c
1427
if (!coresight_get_percpu_sink(cpu)) {
drivers/hwtracing/coresight/coresight-trbe.c
1429
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
drivers/hwtracing/coresight/coresight-trbe.c
1430
arm_trbe_register_coresight_cpu(drvdata, cpu);
drivers/hwtracing/coresight/coresight-trbe.c
1431
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
drivers/hwtracing/coresight/coresight-trbe.c
1440
static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/hwtracing/coresight/coresight-trbe.c
1444
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
drivers/hwtracing/coresight/coresight-trbe.c
207
if (event->cpu == -1)
drivers/hwtracing/coresight/coresight-trbe.c
209
return cpu_to_node(event->cpu);
drivers/hwtracing/coresight/coresight-trbe.c
799
WARN_ON(cpudata->cpu != smp_processor_id());
drivers/hwtracing/coresight/ultrasoc-smb.c
313
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
drivers/hwtracing/ptt/hisi_ptt.c
1004
if (event->cpu < 0) {
drivers/hwtracing/ptt/hisi_ptt.c
1089
int cpu = event->cpu;
drivers/hwtracing/ptt/hisi_ptt.c
1107
ret = irq_set_affinity(hisi_ptt->trace_irq, cpumask_of(cpu));
drivers/hwtracing/ptt/hisi_ptt.c
1111
hisi_ptt->trace_ctrl.on_cpu = cpu;
drivers/hwtracing/ptt/hisi_ptt.c
1165
int cpu = event->cpu;
drivers/hwtracing/ptt/hisi_ptt.c
1168
if (!cpumask_test_cpu(cpu, cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev))))
drivers/hwtracing/ptt/hisi_ptt.c
1383
static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/hwtracing/ptt/hisi_ptt.c
1393
if (!hisi_ptt->trace_ctrl.started || src != cpu)
drivers/hwtracing/ptt/hisi_ptt.c
1396
target = cpumask_any_but(cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev)), cpu);
drivers/hwtracing/stm/ftrace.c
42
unsigned int cpu = smp_processor_id();
drivers/hwtracing/stm/ftrace.c
44
stm_source_write(&stm->data, STM_FTRACE_CHAN + cpu, buf, len);
drivers/idle/intel_idle.c
1761
unsigned int cpu;
drivers/idle/intel_idle.c
1768
for_each_possible_cpu(cpu) {
drivers/idle/intel_idle.c
1769
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/idle/intel_idle.c
1774
if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
drivers/idle/intel_idle.c
1903
int cpu, package_num, num_sockets = 1;
drivers/idle/intel_idle.c
1905
for_each_online_cpu(cpu) {
drivers/idle/intel_idle.c
1906
package_num = topology_physical_package_id(cpu);
drivers/idle/intel_idle.c
2274
static int intel_idle_cpu_init(unsigned int cpu)
drivers/idle/intel_idle.c
2278
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
drivers/idle/intel_idle.c
2279
dev->cpu = cpu;
drivers/idle/intel_idle.c
2282
pr_debug("cpuidle_register_device %d failed!\n", cpu);
drivers/idle/intel_idle.c
2297
static int intel_idle_cpu_online(unsigned int cpu)
drivers/idle/intel_idle.c
2309
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
drivers/idle/intel_idle.c
2311
return intel_idle_cpu_init(cpu);
drivers/infiniband/hw/efa/efa_main.c
146
u32 cpu;
drivers/infiniband/hw/efa/efa_main.c
148
cpu = vector - EFA_COMP_EQS_VEC_BASE;
drivers/infiniband/hw/efa/efa_main.c
149
snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
drivers/infiniband/hw/efa/efa_main.c
155
cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
drivers/infiniband/hw/efa/efa_main.c
166
u32 cpu;
drivers/infiniband/hw/efa/efa_main.c
175
cpu = cpumask_first(cpu_online_mask);
drivers/infiniband/hw/efa/efa_main.c
176
cpumask_set_cpu(cpu,
drivers/infiniband/hw/hfi1/affinity.c
1137
cpu = cpumask_first(available_mask);
drivers/infiniband/hw/hfi1/affinity.c
1138
if (cpu >= nr_cpu_ids) /* empty */
drivers/infiniband/hw/hfi1/affinity.c
1139
cpu = -1;
drivers/infiniband/hw/hfi1/affinity.c
1141
cpumask_set_cpu(cpu, &set->used);
drivers/infiniband/hw/hfi1/affinity.c
1144
hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
drivers/infiniband/hw/hfi1/affinity.c
1154
return cpu;
drivers/infiniband/hw/hfi1/affinity.c
1157
void hfi1_put_proc_affinity(int cpu)
drivers/infiniband/hw/hfi1/affinity.c
1162
if (cpu < 0)
drivers/infiniband/hw/hfi1/affinity.c
1166
cpu_mask_set_put(set, cpu);
drivers/infiniband/hw/hfi1/affinity.c
1167
hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
drivers/infiniband/hw/hfi1/affinity.c
317
int cpu;
drivers/infiniband/hw/hfi1/affinity.c
322
cpu = -1;
drivers/infiniband/hw/hfi1/affinity.c
327
cpu = -1;
drivers/infiniband/hw/hfi1/affinity.c
340
cpu = cpumask_first(non_intr_cpus);
drivers/infiniband/hw/hfi1/affinity.c
343
if (cpu >= nr_cpu_ids)
drivers/infiniband/hw/hfi1/affinity.c
344
cpu = cpumask_first(available_cpus);
drivers/infiniband/hw/hfi1/affinity.c
346
if (cpu >= nr_cpu_ids) { /* empty */
drivers/infiniband/hw/hfi1/affinity.c
347
cpu = -1;
drivers/infiniband/hw/hfi1/affinity.c
350
cpumask_set_cpu(cpu, &set->used);
drivers/infiniband/hw/hfi1/affinity.c
353
return cpu;
drivers/infiniband/hw/hfi1/affinity.c
356
static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
drivers/infiniband/hw/hfi1/affinity.c
360
if (cpu < 0)
drivers/infiniband/hw/hfi1/affinity.c
363
cpu_mask_set_put(set, cpu);
drivers/infiniband/hw/hfi1/affinity.c
369
int i, cpu;
drivers/infiniband/hw/hfi1/affinity.c
375
cpu = dd->comp_vect_mappings[i];
drivers/infiniband/hw/hfi1/affinity.c
376
_dev_comp_vect_cpu_put(dd, cpu);
drivers/infiniband/hw/hfi1/affinity.c
380
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
drivers/infiniband/hw/hfi1/affinity.c
395
int i, cpu, ret;
drivers/infiniband/hw/hfi1/affinity.c
419
cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
drivers/infiniband/hw/hfi1/affinity.c
421
if (cpu < 0) {
drivers/infiniband/hw/hfi1/affinity.c
426
dd->comp_vect_mappings[i] = cpu;
drivers/infiniband/hw/hfi1/affinity.c
429
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
drivers/infiniband/hw/hfi1/affinity.c
554
int i, cpu;
drivers/infiniband/hw/hfi1/affinity.c
561
cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
drivers/infiniband/hw/hfi1/affinity.c
564
if (cpu >= 0)
drivers/infiniband/hw/hfi1/affinity.c
565
cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
drivers/infiniband/hw/hfi1/affinity.c
64
int cpu;
drivers/infiniband/hw/hfi1/affinity.c
730
static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
drivers/infiniband/hw/hfi1/affinity.c
738
if (cpu > num_online_cpus() || cpu == sde->cpu)
drivers/infiniband/hw/hfi1/affinity.c
74
cpu = cpumask_first(diff);
drivers/infiniband/hw/hfi1/affinity.c
746
old_cpu = sde->cpu;
drivers/infiniband/hw/hfi1/affinity.c
747
sde->cpu = cpu;
drivers/infiniband/hw/hfi1/affinity.c
749
cpumask_set_cpu(cpu, &msix->mask);
drivers/infiniband/hw/hfi1/affinity.c
75
if (cpu >= nr_cpu_ids) /* empty */
drivers/infiniband/hw/hfi1/affinity.c
752
sde->this_idx, cpu);
drivers/infiniband/hw/hfi1/affinity.c
76
cpu = -EINVAL;
drivers/infiniband/hw/hfi1/affinity.c
760
cpumask_set_cpu(cpu, &set->mask);
drivers/infiniband/hw/hfi1/affinity.c
761
cpumask_set_cpu(cpu, &set->used);
drivers/infiniband/hw/hfi1/affinity.c
78
cpumask_set_cpu(cpu, &set->used);
drivers/infiniband/hw/hfi1/affinity.c
781
int cpu = cpumask_first(mask);
drivers/infiniband/hw/hfi1/affinity.c
787
hfi1_update_sdma_affinity(msix, cpu);
drivers/infiniband/hw/hfi1/affinity.c
80
return cpu;
drivers/infiniband/hw/hfi1/affinity.c
83
static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
drivers/infiniband/hw/hfi1/affinity.c
833
int cpu = -1;
drivers/infiniband/hw/hfi1/affinity.c
847
cpu = cpumask_first(&entry->general_intr_mask);
drivers/infiniband/hw/hfi1/affinity.c
852
cpu = cpumask_first(&entry->general_intr_mask);
drivers/infiniband/hw/hfi1/affinity.c
872
if (cpu == -1 && set) {
drivers/infiniband/hw/hfi1/affinity.c
876
cpu = cpu_mask_set_get_first(set, diff);
drivers/infiniband/hw/hfi1/affinity.c
877
if (cpu < 0) {
drivers/infiniband/hw/hfi1/affinity.c
88
cpumask_clear_cpu(cpu, &set->used);
drivers/infiniband/hw/hfi1/affinity.c
880
return cpu;
drivers/infiniband/hw/hfi1/affinity.c
886
cpumask_set_cpu(cpu, &msix->mask);
drivers/infiniband/hw/hfi1/affinity.c
889
extra, cpu);
drivers/infiniband/hw/hfi1/affinity.c
893
sde->cpu = cpu;
drivers/infiniband/hw/hfi1/affinity.c
978
int cpu = -1, ret, i;
drivers/infiniband/hw/hfi1/affinity.c
998
cpu = cpumask_first(proc_mask);
drivers/infiniband/hw/hfi1/affinity.c
999
cpumask_set_cpu(cpu, &set->used);
drivers/infiniband/hw/hfi1/affinity.h
57
void hfi1_put_proc_affinity(int cpu);
drivers/infiniband/hw/hfi1/chip.c
1599
int cpu;
drivers/infiniband/hw/hfi1/chip.c
1602
for_each_possible_cpu(cpu)
drivers/infiniband/hw/hfi1/chip.c
1603
counter += *per_cpu_ptr(cntr, cpu);
drivers/infiniband/hw/hfi1/iowait.h
149
struct workqueue_struct *wq, int cpu)
drivers/infiniband/hw/hfi1/iowait.h
151
return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork);
drivers/infiniband/hw/hfi1/iowait.h
161
struct workqueue_struct *wq, int cpu)
drivers/infiniband/hw/hfi1/iowait.h
163
return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork);
drivers/infiniband/hw/hfi1/pio.c
633
int cpu;
drivers/infiniband/hw/hfi1/pio.c
636
for_each_possible_cpu(cpu)
drivers/infiniband/hw/hfi1/pio.c
637
ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
drivers/infiniband/hw/hfi1/pio.c
643
int cpu;
drivers/infiniband/hw/hfi1/pio.c
645
for_each_possible_cpu(cpu)
drivers/infiniband/hw/hfi1/pio.c
646
(*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
drivers/infiniband/hw/hfi1/qp.c
336
priv->s_sde->cpu :
drivers/infiniband/hw/hfi1/ruc.c
436
workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
drivers/infiniband/hw/hfi1/ruc.c
547
ps.cpu = priv->s_sde ? priv->s_sde->cpu :
drivers/infiniband/hw/hfi1/sdma.c
1000
rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
drivers/infiniband/hw/hfi1/sdma.c
2164
sde->cpu,
drivers/infiniband/hw/hfi1/sdma.c
2374
queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
drivers/infiniband/hw/hfi1/sdma.c
2472
queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
drivers/infiniband/hw/hfi1/sdma.c
898
unsigned long cpu;
drivers/infiniband/hw/hfi1/sdma.c
930
for_each_cpu(cpu, mask) {
drivers/infiniband/hw/hfi1/sdma.c
932
if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
drivers/infiniband/hw/hfi1/sdma.c
933
cpumask_set_cpu(cpu, new_mask);
drivers/infiniband/hw/hfi1/sdma.c
937
rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
drivers/infiniband/hw/hfi1/sdma.c
952
rht_node->cpu_id = cpu;
drivers/infiniband/hw/hfi1/sdma.c
964
cpu);
drivers/infiniband/hw/hfi1/sdma.c
989
cpumask_set_cpu(cpu, new_mask);
drivers/infiniband/hw/hfi1/sdma.c
993
for_each_online_cpu(cpu) {
drivers/infiniband/hw/hfi1/sdma.c
997
if (cpumask_test_cpu(cpu, mask))
drivers/infiniband/hw/hfi1/sdma.h
296
int cpu;
drivers/infiniband/hw/hfi1/tid_rdma.c
5376
ps.cpu = priv->s_sde ? priv->s_sde->cpu :
drivers/infiniband/hw/hfi1/tid_rdma.c
5430
priv->s_sde->cpu :
drivers/infiniband/hw/hfi1/tid_rdma.c
628
priv->s_sde->cpu :
drivers/infiniband/hw/hfi1/trace_tx.h
935
__entry->sde ? __entry->sde->cpu : 0,
drivers/infiniband/hw/hfi1/verbs.h
199
int cpu;
drivers/infiniband/sw/siw/siw.h
537
void siw_put_tx_cpu(int cpu);
drivers/infiniband/sw/siw/siw_main.c
163
int i, num_cpus, cpu, min_use, node = sdev->numa_node, tx_cpu = -1;
drivers/infiniband/sw/siw/siw_main.c
179
cpu = cpumask_first(tx_cpumask);
drivers/infiniband/sw/siw/siw_main.c
182
i++, cpu = cpumask_next(cpu, tx_cpumask)) {
drivers/infiniband/sw/siw/siw_main.c
186
if (!siw_tx_thread[cpu])
drivers/infiniband/sw/siw/siw_main.c
189
usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
drivers/infiniband/sw/siw/siw_main.c
191
tx_cpu = cpu;
drivers/infiniband/sw/siw/siw_main.c
207
void siw_put_tx_cpu(int cpu)
drivers/infiniband/sw/siw/siw_main.c
209
atomic_dec(&per_cpu(siw_use_cnt, cpu));
drivers/infiniband/sw/siw/siw_qp_tx.c
1205
int cpu, assigned = 0;
drivers/infiniband/sw/siw/siw_qp_tx.c
1207
for_each_online_cpu(cpu) {
drivers/infiniband/sw/siw/siw_qp_tx.c
1211
if (cpu % cpumask_weight(topology_sibling_cpumask(cpu)))
drivers/infiniband/sw/siw/siw_qp_tx.c
1214
tx_task = &per_cpu(siw_tx_task_g, cpu);
drivers/infiniband/sw/siw/siw_qp_tx.c
1218
siw_tx_thread[cpu] =
drivers/infiniband/sw/siw/siw_qp_tx.c
1220
(unsigned long *)(long)cpu,
drivers/infiniband/sw/siw/siw_qp_tx.c
1221
cpu, "siw_tx/%u");
drivers/infiniband/sw/siw/siw_qp_tx.c
1222
if (IS_ERR(siw_tx_thread[cpu])) {
drivers/infiniband/sw/siw/siw_qp_tx.c
1223
siw_tx_thread[cpu] = NULL;
drivers/infiniband/sw/siw/siw_qp_tx.c
1233
int cpu;
drivers/infiniband/sw/siw/siw_qp_tx.c
1235
for_each_possible_cpu(cpu) {
drivers/infiniband/sw/siw/siw_qp_tx.c
1236
if (siw_tx_thread[cpu]) {
drivers/infiniband/sw/siw/siw_qp_tx.c
1237
kthread_stop(siw_tx_thread[cpu]);
drivers/infiniband/sw/siw/siw_qp_tx.c
1238
wake_up(&per_cpu(siw_tx_task_g, cpu).waiting);
drivers/infiniband/sw/siw/siw_qp_tx.c
1239
siw_tx_thread[cpu] = NULL;
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
113
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
118
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
119
s = per_cpu_ptr(stats->pcpu_stats, cpu);
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
129
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
134
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
135
s = per_cpu_ptr(stats->pcpu_stats, cpu);
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
19
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
21
cpu = raw_smp_processor_id();
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
23
if (con->cpu != cpu) {
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
27
s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
43
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
46
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
47
s = per_cpu_ptr(stats->pcpu_stats, cpu);
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
62
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
65
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
66
s = per_cpu_ptr(stats->pcpu_stats, cpu);
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
85
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
89
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
90
r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1536
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1588
for_each_possible_cpu(cpu)
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1589
INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1627
con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1721
cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
drivers/infiniband/ulp/rtrs/rtrs-clt.c
2245
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-clt.c
2296
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-clt.c
2299
ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
drivers/infiniband/ulp/rtrs/rtrs-clt.h
75
unsigned int cpu;
drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
17
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
20
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
21
r = per_cpu_ptr(stats->rdma_stats, cpu);
drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
33
int cpu;
drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
39
for_each_possible_cpu(cpu) {
drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
40
r = per_cpu_ptr(stats->rdma_stats, cpu);
drivers/infiniband/ulp/srpt/ib_srpt.c
1286
int tag, cpu;
drivers/infiniband/ulp/srpt/ib_srpt.c
1290
tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
drivers/infiniband/ulp/srpt/ib_srpt.c
1308
ioctx->cmd.map_cpu = cpu;
drivers/interconnect/icc-kunit.c
188
struct icc_node *cpu = get_node(priv, NODE_CPU);
drivers/interconnect/icc-kunit.c
191
KUNIT_EXPECT_EQ(test, cpu->num_links, 1);
drivers/interconnect/icc-kunit.c
192
KUNIT_EXPECT_PTR_EQ(test, cpu->links[0], bus);
drivers/interconnect/icc-kunit.c
193
KUNIT_EXPECT_PTR_EQ(test, cpu->provider, &priv->provider);
drivers/iommu/amd/amd_iommu.h
40
int amd_iommu_enable_faulting(unsigned int cpu);
drivers/iommu/amd/init.c
3546
int amd_iommu_enable_faulting(unsigned int cpu)
drivers/iommu/amd/iommu.c
3934
static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu,
drivers/iommu/amd/iommu.c
3937
if (cpu >= 0) {
drivers/iommu/amd/iommu.c
3939
APICID_TO_IRTE_DEST_LO(cpu);
drivers/iommu/amd/iommu.c
3941
APICID_TO_IRTE_DEST_HI(cpu);
drivers/iommu/amd/iommu.c
3967
int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
drivers/iommu/amd/iommu.c
3981
__amd_iommu_update_ga(entry, cpu, ga_log_intr);
drivers/iommu/amd/iommu.c
3988
int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
drivers/iommu/amd/iommu.c
4011
__amd_iommu_update_ga(entry, cpu, ga_log_intr);
drivers/iommu/amd/iommu.c
4082
ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu,
drivers/iommu/dma-iommu.c
184
int cpu;
drivers/iommu/dma-iommu.c
192
for_each_possible_cpu(cpu)
drivers/iommu/dma-iommu.c
193
fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
drivers/iommu/dma-iommu.c
260
int cpu, idx;
drivers/iommu/dma-iommu.c
263
for_each_possible_cpu(cpu) {
drivers/iommu/dma-iommu.c
264
struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
drivers/iommu/dma-iommu.c
318
int cpu;
drivers/iommu/dma-iommu.c
325
for_each_possible_cpu(cpu)
drivers/iommu/dma-iommu.c
326
iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
drivers/iommu/fsl_pamu_domain.c
353
int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
drivers/iommu/fsl_pamu_domain.c
360
dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
drivers/iommu/hyperv-iommu.c
201
int cpu, ioapic_id;
drivers/iommu/hyperv-iommu.c
206
cpu = cpumask_first_and(affinity, cpu_online_mask);
drivers/iommu/hyperv-iommu.c
222
if (hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu,
drivers/iommu/intel/dmar.c
2038
int enable_drhd_fault_handling(unsigned int cpu)
drivers/iommu/intel/dmar.c
2051
if (iommu->irq || iommu->node != cpu_to_node(cpu))
drivers/iommu/intel/irq_remapping.c
1125
int cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
drivers/iommu/intel/irq_remapping.c
1127
if (WARN_ON(cpu >= nr_cpu_ids))
drivers/iommu/intel/irq_remapping.c
1130
return __pa(per_cpu_ptr(&posted_msi_pi_desc, cpu));
drivers/iommu/intel/perfmon.c
290
if (event->cpu < 0)
drivers/iommu/iova.c
29
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
drivers/iommu/iova.c
399
unsigned int cpu;
drivers/iommu/iova.c
406
for_each_online_cpu(cpu)
drivers/iommu/iova.c
407
free_cpu_cached_iovas(cpu, iovad);
drivers/iommu/iova.c
715
unsigned int cpu;
drivers/iommu/iova.c
737
for_each_possible_cpu(cpu) {
drivers/iommu/iova.c
738
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
drivers/iommu/iova.c
880
unsigned int cpu;
drivers/iommu/iova.c
886
for_each_possible_cpu(cpu) {
drivers/iommu/iova.c
887
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
drivers/iommu/iova.c
904
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
drivers/iommu/iova.c
913
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
drivers/iommu/iova.c
942
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
drivers/iommu/iova.c
948
free_cpu_cached_iovas(cpu, iovad);
drivers/irqchip/irq-aclint-sswi.c
22
static void aclint_sswi_ipi_send(unsigned int cpu)
drivers/irqchip/irq-aclint-sswi.c
24
writel(0x1, per_cpu(sswi_cpu_regs, cpu));
drivers/irqchip/irq-aclint-sswi.c
46
static int aclint_sswi_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-aclint-sswi.c
53
static int aclint_sswi_dying_cpu(unsigned int cpu)
drivers/irqchip/irq-aclint-sswi.c
75
int rc, cpu;
drivers/irqchip/irq-aclint-sswi.c
88
cpu = riscv_hartid_to_cpuid(hartid);
drivers/irqchip/irq-aclint-sswi.c
95
per_cpu(sswi_cpu_regs, cpu) = reg + hart_index * 4;
drivers/irqchip/irq-apple-aic.c
100
#define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
drivers/irqchip/irq-apple-aic.c
101
#define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
drivers/irqchip/irq-apple-aic.c
102
#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
drivers/irqchip/irq-apple-aic.c
103
#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
drivers/irqchip/irq-apple-aic.c
433
int cpu;
drivers/irqchip/irq-apple-aic.c
438
cpu = cpumask_first(mask_val);
drivers/irqchip/irq-apple-aic.c
440
cpu = cpumask_any_and(mask_val, cpu_online_mask);
drivers/irqchip/irq-apple-aic.c
442
aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
drivers/irqchip/irq-apple-aic.c
443
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-apple-aic.c
779
static void aic_ipi_send_fast(int cpu)
drivers/irqchip/irq-apple-aic.c
781
u64 mpidr = cpu_logical_map(cpu);
drivers/irqchip/irq-apple-aic.c
821
static void aic_ipi_send_single(unsigned int cpu)
drivers/irqchip/irq-apple-aic.c
824
aic_ipi_send_fast(cpu);
drivers/irqchip/irq-apple-aic.c
826
aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
drivers/irqchip/irq-apple-aic.c
842
static int aic_init_cpu(unsigned int cpu)
drivers/irqchip/irq-apple-aic.c
93
#define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
drivers/irqchip/irq-apple-aic.c
931
int cpu;
drivers/irqchip/irq-apple-aic.c
940
cpu = of_cpu_node_to_id(cpu_node);
drivers/irqchip/irq-apple-aic.c
942
if (WARN_ON(cpu < 0))
drivers/irqchip/irq-apple-aic.c
945
cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
drivers/irqchip/irq-armada-370-xp.c
135
#define MPIC_INT_CAUSE_PERF(cpu) BIT(cpu)
drivers/irqchip/irq-armada-370-xp.c
238
unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
drivers/irqchip/irq-armada-370-xp.c
243
msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start);
drivers/irqchip/irq-armada-370-xp.c
248
unsigned int cpu;
drivers/irqchip/irq-armada-370-xp.c
251
cpu = cpumask_any_and(mask, cpu_online_mask);
drivers/irqchip/irq-armada-370-xp.c
253
cpu = cpumask_first(mask);
drivers/irqchip/irq-armada-370-xp.c
255
if (cpu >= nr_cpu_ids)
drivers/irqchip/irq-armada-370-xp.c
258
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-armada-370-xp.c
424
unsigned int cpu;
drivers/irqchip/irq-armada-370-xp.c
428
for_each_cpu(cpu, mask)
drivers/irqchip/irq-armada-370-xp.c
429
map |= BIT(cpu_logical_map(cpu));
drivers/irqchip/irq-armada-370-xp.c
516
unsigned int cpu;
drivers/irqchip/irq-armada-370-xp.c
519
cpu = cpumask_any_and(mask_val, cpu_online_mask);
drivers/irqchip/irq-armada-370-xp.c
522
MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu)));
drivers/irqchip/irq-armada-370-xp.c
524
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-armada-370-xp.c
567
static int mpic_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-armada-370-xp.c
578
static int mpic_cascaded_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-bcm2836.c
142
int cpu = smp_processor_id();
drivers/irqchip/irq-bcm2836.c
145
stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
drivers/irqchip/irq-bcm2836.c
159
int cpu = smp_processor_id();
drivers/irqchip/irq-bcm2836.c
164
mbox_val = readl_relaxed(intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
drivers/irqchip/irq-bcm2836.c
175
int cpu = smp_processor_id();
drivers/irqchip/irq-bcm2836.c
178
intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
drivers/irqchip/irq-bcm2836.c
184
int cpu;
drivers/irqchip/irq-bcm2836.c
193
for_each_cpu(cpu, mask)
drivers/irqchip/irq-bcm2836.c
194
writel_relaxed(BIT(d->hwirq), mailbox0_base + 16 * cpu);
drivers/irqchip/irq-bcm2836.c
234
static int bcm2836_cpu_starting(unsigned int cpu)
drivers/irqchip/irq-bcm2836.c
237
cpu);
drivers/irqchip/irq-bcm2836.c
241
static int bcm2836_cpu_dying(unsigned int cpu)
drivers/irqchip/irq-bcm2836.c
244
cpu);
drivers/irqchip/irq-bcm2836.c
27
int cpu)
drivers/irqchip/irq-bcm2836.c
29
void __iomem *reg = intc.base + reg_offset + 4 * cpu;
drivers/irqchip/irq-bcm2836.c
36
int cpu)
drivers/irqchip/irq-bcm2836.c
38
void __iomem *reg = intc.base + reg_offset + 4 * cpu;
drivers/irqchip/irq-bcm6345-l1.c
118
struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
drivers/irqchip/irq-bcm6345-l1.c
119
struct bcm6345_l1_chip *intc = cpu->intc;
drivers/irqchip/irq-bcm6345-l1.c
130
pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
drivers/irqchip/irq-bcm6345-l1.c
131
pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
drivers/irqchip/irq-bcm6345-l1.c
228
struct bcm6345_l1_cpu *cpu;
drivers/irqchip/irq-bcm6345-l1.c
241
cpu = intc->cpus[idx] = kzalloc_flex(*cpu, enable_cache, n_words);
drivers/irqchip/irq-bcm6345-l1.c
242
if (!cpu)
drivers/irqchip/irq-bcm6345-l1.c
245
cpu->intc = intc;
drivers/irqchip/irq-bcm6345-l1.c
246
cpu->map_base = ioremap(res.start, sz);
drivers/irqchip/irq-bcm6345-l1.c
247
if (!cpu->map_base)
drivers/irqchip/irq-bcm6345-l1.c
254
cpu->enable_cache[i] = 0;
drivers/irqchip/irq-bcm6345-l1.c
255
__raw_writel(0, cpu->map_base + reg_enable(intc, i));
drivers/irqchip/irq-bcm6345-l1.c
258
cpu->parent_irq = irq_of_parse_and_map(dn, idx);
drivers/irqchip/irq-bcm6345-l1.c
259
if (!cpu->parent_irq) {
drivers/irqchip/irq-bcm6345-l1.c
260
pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
drivers/irqchip/irq-bcm6345-l1.c
263
irq_set_chained_handler_and_data(cpu->parent_irq,
drivers/irqchip/irq-bcm6345-l1.c
264
bcm6345_l1_irq_handle, cpu);
drivers/irqchip/irq-bcm6345-l1.c
329
struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
drivers/irqchip/irq-bcm6345-l1.c
331
pr_info(" CPU%u (irq = %d)\n", idx, cpu->parent_irq);
drivers/irqchip/irq-bcm6345-l1.c
338
struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
drivers/irqchip/irq-bcm6345-l1.c
340
if (cpu) {
drivers/irqchip/irq-bcm6345-l1.c
341
if (cpu->map_base)
drivers/irqchip/irq-bcm6345-l1.c
342
iounmap(cpu->map_base);
drivers/irqchip/irq-bcm6345-l1.c
343
kfree(cpu);
drivers/irqchip/irq-bcm7038-l1.c
116
struct bcm7038_l1_cpu *cpu;
drivers/irqchip/irq-bcm7038-l1.c
121
cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
drivers/irqchip/irq-bcm7038-l1.c
123
cpu = intc->cpus[0];
drivers/irqchip/irq-bcm7038-l1.c
134
pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
drivers/irqchip/irq-bcm7038-l1.c
135
~cpu->mask_cache[idx];
drivers/irqchip/irq-bcm7038-l1.c
221
struct bcm7038_l1_cpu *cpu;
drivers/irqchip/irq-bcm7038-l1.c
245
cpu = intc->cpus[idx] = kzalloc_flex(*cpu, mask_cache, n_words);
drivers/irqchip/irq-bcm7038-l1.c
246
if (!cpu)
drivers/irqchip/irq-bcm7038-l1.c
249
cpu->map_base = ioremap(res.start, sz);
drivers/irqchip/irq-bcm7038-l1.c
250
if (!cpu->map_base)
drivers/irqchip/irq-bcm7038-l1.c
255
cpu->map_base + reg_mask_set(intc, i));
drivers/irqchip/irq-bcm7038-l1.c
257
cpu->map_base + reg_mask_clr(intc, i));
drivers/irqchip/irq-bcm7038-l1.c
258
cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
drivers/irqchip/irq-bcm7038-l1.c
440
struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
drivers/irqchip/irq-bcm7038-l1.c
442
if (cpu) {
drivers/irqchip/irq-bcm7038-l1.c
443
if (cpu->map_base)
drivers/irqchip/irq-bcm7038-l1.c
444
iounmap(cpu->map_base);
drivers/irqchip/irq-bcm7038-l1.c
445
kfree(cpu);
drivers/irqchip/irq-csky-mpintc.c
131
unsigned int cpu;
drivers/irqchip/irq-csky-mpintc.c
135
cpu = cpumask_any_and(mask_val, cpu_online_mask);
drivers/irqchip/irq-csky-mpintc.c
137
cpu = cpumask_first(mask_val);
drivers/irqchip/irq-csky-mpintc.c
139
if (cpu >= nr_cpu_ids)
drivers/irqchip/irq-csky-mpintc.c
152
cpu = 0;
drivers/irqchip/irq-csky-mpintc.c
154
cpu |= BIT(31);
drivers/irqchip/irq-csky-mpintc.c
156
writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
drivers/irqchip/irq-csky-mpintc.c
158
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-csky-mpintc.c
231
unsigned int cpu, nr_irq;
drivers/irqchip/irq-csky-mpintc.c
264
for_each_present_cpu(cpu) {
drivers/irqchip/irq-csky-mpintc.c
265
per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
drivers/irqchip/irq-csky-mpintc.c
266
writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
drivers/irqchip/irq-gic-v3-its.c
1553
int cpu;
drivers/irqchip/irq-gic-v3-its.c
1556
cpu = irq_to_cpuid_lock(d, &flags);
drivers/irqchip/irq-gic-v3-its.c
1557
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
drivers/irqchip/irq-gic-v3-its.c
1559
rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
drivers/irqchip/irq-gic-v3-its.c
1563
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
drivers/irqchip/irq-gic-v3-its.c
1650
static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
drivers/irqchip/irq-gic-v3-its.c
1653
return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
drivers/irqchip/irq-gic-v3-its.c
1655
return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
drivers/irqchip/irq-gic-v3-its.c
1658
static void its_inc_lpi_count(struct irq_data *d, int cpu)
drivers/irqchip/irq-gic-v3-its.c
1661
atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
drivers/irqchip/irq-gic-v3-its.c
1663
atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
drivers/irqchip/irq-gic-v3-its.c
1666
static void its_dec_lpi_count(struct irq_data *d, int cpu)
drivers/irqchip/irq-gic-v3-its.c
1669
atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
drivers/irqchip/irq-gic-v3-its.c
1671
atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
drivers/irqchip/irq-gic-v3-its.c
1677
unsigned int cpu = nr_cpu_ids, tmp;
drivers/irqchip/irq-gic-v3-its.c
1683
cpu = tmp;
drivers/irqchip/irq-gic-v3-its.c
1688
return cpu;
drivers/irqchip/irq-gic-v3-its.c
1703
int cpu, node;
drivers/irqchip/irq-gic-v3-its.c
1731
cpu = cpumask_pick_least_loaded(d, tmpmask);
drivers/irqchip/irq-gic-v3-its.c
1732
if (cpu < nr_cpu_ids)
drivers/irqchip/irq-gic-v3-its.c
1749
cpu = cpumask_pick_least_loaded(d, tmpmask);
drivers/irqchip/irq-gic-v3-its.c
1758
cpu = cpumask_pick_least_loaded(d, tmpmask);
drivers/irqchip/irq-gic-v3-its.c
1763
pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
drivers/irqchip/irq-gic-v3-its.c
1764
return cpu;
drivers/irqchip/irq-gic-v3-its.c
1773
int cpu, prev_cpu;
drivers/irqchip/irq-gic-v3-its.c
1783
cpu = its_select_cpu(d, mask_val);
drivers/irqchip/irq-gic-v3-its.c
1785
cpu = cpumask_pick_least_loaded(d, mask_val);
drivers/irqchip/irq-gic-v3-its.c
1787
if (cpu < 0 || cpu >= nr_cpu_ids)
drivers/irqchip/irq-gic-v3-its.c
1791
if (cpu != prev_cpu) {
drivers/irqchip/irq-gic-v3-its.c
1792
target_col = &its_dev->its->collections[cpu];
drivers/irqchip/irq-gic-v3-its.c
1794
its_dev->event_map.col_map[id] = cpu;
drivers/irqchip/irq-gic-v3-its.c
1795
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-gic-v3-its.c
1798
its_inc_lpi_count(d, cpu);
drivers/irqchip/irq-gic-v3-its.c
207
#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
drivers/irqchip/irq-gic-v3-its.c
2795
int cpu;
drivers/irqchip/irq-gic-v3-its.c
2800
for_each_possible_cpu(cpu) {
drivers/irqchip/irq-gic-v3-its.c
2801
void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
drivers/irqchip/irq-gic-v3-its.c
2803
if (!base || cpu == smp_processor_id())
drivers/irqchip/irq-gic-v3-its.c
2819
gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
drivers/irqchip/irq-gic-v3-its.c
2820
*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
drivers/irqchip/irq-gic-v3-its.c
2822
*this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu);
drivers/irqchip/irq-gic-v3-its.c
2829
static bool allocate_vpe_l2_table(int cpu, u32 id)
drivers/irqchip/irq-gic-v3-its.c
2831
void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
drivers/irqchip/irq-gic-v3-its.c
2874
table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
drivers/irqchip/irq-gic-v3-its.c
3076
int err, cpu;
drivers/irqchip/irq-gic-v3-its.c
3098
for_each_possible_cpu(cpu) {
drivers/irqchip/irq-gic-v3-its.c
3103
pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
drivers/irqchip/irq-gic-v3-its.c
3107
gic_data_rdist_cpu(cpu)->pend_page = pend_page;
drivers/irqchip/irq-gic-v3-its.c
3288
int cpu = smp_processor_id();
drivers/irqchip/irq-gic-v3-its.c
3295
cpu_node = of_get_cpu_node(cpu, NULL);
drivers/irqchip/irq-gic-v3-its.c
3318
its->collections[cpu].target_address = target;
drivers/irqchip/irq-gic-v3-its.c
3319
its->collections[cpu].col_id = cpu;
drivers/irqchip/irq-gic-v3-its.c
3321
its_send_mapc(its, &its->collections[cpu], 1);
drivers/irqchip/irq-gic-v3-its.c
3322
its_send_invall(its, &its->collections[cpu]);
drivers/irqchip/irq-gic-v3-its.c
3427
int cpu;
drivers/irqchip/irq-gic-v3-its.c
3458
for_each_possible_cpu(cpu) {
drivers/irqchip/irq-gic-v3-its.c
3459
if (!allocate_vpe_l2_table(cpu, vpe_id))
drivers/irqchip/irq-gic-v3-its.c
3727
int cpu;
drivers/irqchip/irq-gic-v3-its.c
3729
cpu = its_select_cpu(d, cpu_online_mask);
drivers/irqchip/irq-gic-v3-its.c
3730
if (cpu < 0 || cpu >= nr_cpu_ids)
drivers/irqchip/irq-gic-v3-its.c
3733
its_inc_lpi_count(d, cpu);
drivers/irqchip/irq-gic-v3-its.c
3734
its_dev->event_map.col_map[event] = cpu;
drivers/irqchip/irq-gic-v3-its.c
3735
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-gic-v3-its.c
375
int cpu;
drivers/irqchip/irq-gic-v3-its.c
386
cpu = vpe_to_cpuid_lock(vpe, flags);
drivers/irqchip/irq-gic-v3-its.c
3895
static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
drivers/irqchip/irq-gic-v3-its.c
390
cpu = its_dev->event_map.col_map[its_get_event_id(d)];
drivers/irqchip/irq-gic-v3-its.c
3903
guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock);
drivers/irqchip/irq-gic-v3-its.c
3904
rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
drivers/irqchip/irq-gic-v3-its.c
3914
unsigned int from, cpu = nr_cpu_ids;
drivers/irqchip/irq-gic-v3-its.c
3931
cpu = cpumask_first(mask_val);
drivers/irqchip/irq-gic-v3-its.c
3932
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-gic-v3-its.c
395
return cpu;
drivers/irqchip/irq-gic-v3-its.c
3964
cpu = cpumask_any_and(mask_val, table_mask);
drivers/irqchip/irq-gic-v3-its.c
3965
if (cpu < nr_cpu_ids) {
drivers/irqchip/irq-gic-v3-its.c
3968
cpu = from;
drivers/irqchip/irq-gic-v3-its.c
3970
cpu = cpumask_first(mask_val);
drivers/irqchip/irq-gic-v3-its.c
3973
if (from == cpu)
drivers/irqchip/irq-gic-v3-its.c
3976
vpe->col_idx = cpu;
drivers/irqchip/irq-gic-v3-its.c
3982
its_vpe_4_1_invall_locked(cpu, vpe);
drivers/irqchip/irq-gic-v3-its.c
3984
its_vpe_db_proxy_move(vpe, from, cpu);
drivers/irqchip/irq-gic-v3-its.c
3987
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-gic-v3-its.c
4292
int cpu;
drivers/irqchip/irq-gic-v3-its.c
4295
cpu = vpe_to_cpuid_lock(vpe, &flags);
drivers/irqchip/irq-gic-v3-its.c
4296
its_vpe_4_1_invall_locked(cpu, vpe);
drivers/irqchip/irq-gic-v3-its.c
4415
int cpu;
drivers/irqchip/irq-gic-v3-its.c
4430
cpu = vpe_to_cpuid_lock(vpe, &flags);
drivers/irqchip/irq-gic-v3-its.c
4431
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
drivers/irqchip/irq-gic-v3-its.c
4432
base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
drivers/irqchip/irq-gic-v3-its.c
4449
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
drivers/irqchip/irq-gic-v3-its.c
5447
static int its_cpu_memreserve_lpi(unsigned int cpu)
drivers/irqchip/irq-gic-v3.c
1155
int i, cpu = smp_processor_id();
drivers/irqchip/irq-gic-v3.c
1156
u64 mpidr = gic_cpu_to_affinity(cpu);
drivers/irqchip/irq-gic-v3.c
1235
per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
drivers/irqchip/irq-gic-v3.c
1239
bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
drivers/irqchip/irq-gic-v3.c
1244
cpu, (unsigned long)mpidr,
drivers/irqchip/irq-gic-v3.c
1312
static int gic_check_rdist(unsigned int cpu)
drivers/irqchip/irq-gic-v3.c
1314
if (cpumask_test_cpu(cpu, &broken_rdists))
drivers/irqchip/irq-gic-v3.c
1320
static int gic_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-gic-v3.c
1334
int next_cpu, cpu = *base_cpu;
drivers/irqchip/irq-gic-v3.c
1338
mpidr = gic_cpu_to_affinity(cpu);
drivers/irqchip/irq-gic-v3.c
1340
while (cpu < nr_cpu_ids) {
drivers/irqchip/irq-gic-v3.c
1343
next_cpu = cpumask_next(cpu, mask);
drivers/irqchip/irq-gic-v3.c
1346
cpu = next_cpu;
drivers/irqchip/irq-gic-v3.c
1348
mpidr = gic_cpu_to_affinity(cpu);
drivers/irqchip/irq-gic-v3.c
1351
cpu--;
drivers/irqchip/irq-gic-v3.c
1356
*base_cpu = cpu;
drivers/irqchip/irq-gic-v3.c
1381
int cpu;
drivers/irqchip/irq-gic-v3.c
1392
for_each_cpu(cpu, mask) {
drivers/irqchip/irq-gic-v3.c
1393
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
drivers/irqchip/irq-gic-v3.c
1396
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
drivers/irqchip/irq-gic-v3.c
1431
unsigned int cpu;
drivers/irqchip/irq-gic-v3.c
1438
cpu = cpumask_first(mask_val);
drivers/irqchip/irq-gic-v3.c
1440
cpu = cpumask_any_and(mask_val, cpu_online_mask);
drivers/irqchip/irq-gic-v3.c
1442
if (cpu >= nr_cpu_ids)
drivers/irqchip/irq-gic-v3.c
1455
val = gic_cpu_to_affinity(cpu);
drivers/irqchip/irq-gic-v3.c
1466
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-gic-v3.c
2113
int err, cpu;
drivers/irqchip/irq-gic-v3.c
2126
cpu = of_cpu_node_to_id(cpu_node);
drivers/irqchip/irq-gic-v3.c
2127
if (WARN_ON(cpu < 0)) {
drivers/irqchip/irq-gic-v3.c
2132
pr_cont("%pOF[%d] ", cpu_node, cpu);
drivers/irqchip/irq-gic-v3.c
2134
cpumask_set_cpu(cpu, &part->mask);
drivers/irqchip/irq-gic-v3.c
2334
int cpu = get_cpu_for_acpi_id(gicc->uid);
drivers/irqchip/irq-gic-v3.c
2336
pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu);
drivers/irqchip/irq-gic-v3.c
2337
if (cpu >= 0)
drivers/irqchip/irq-gic-v3.c
2338
cpumask_set_cpu(cpu, &broken_rdists);
drivers/irqchip/irq-gic-v3.c
749
static u64 gic_cpu_to_affinity(int cpu)
drivers/irqchip/irq-gic-v3.c
751
u64 mpidr = cpu_logical_map(cpu);
drivers/irqchip/irq-gic-v5-irs.c
625
int cpu;
drivers/irqchip/irq-gic-v5-irs.c
633
cpu = of_cpu_node_to_id(cpu_node);
drivers/irqchip/irq-gic-v5-irs.c
635
if (cpu < 0)
drivers/irqchip/irq-gic-v5-irs.c
640
cpu, iaffids[i]);
drivers/irqchip/irq-gic-v5-irs.c
644
per_cpu(cpu_iaffid, cpu).iaffid = iaffids[i];
drivers/irqchip/irq-gic-v5-irs.c
645
per_cpu(cpu_iaffid, cpu).valid = true;
drivers/irqchip/irq-gic-v5-irs.c
648
per_cpu(per_cpu_irs_data, cpu) = irs_data;
drivers/irqchip/irq-gic-v5-irs.c
854
int cpu;
drivers/irqchip/irq-gic-v5-irs.c
862
cpu = get_logical_index(gicc->arm_mpidr);
drivers/irqchip/irq-gic-v5-irs.c
865
pr_warn("CPU %d iaffid 0x%x exceeds IRS iaffid bits\n", cpu, gicc->iaffid);
drivers/irqchip/irq-gic-v5-irs.c
870
per_cpu(cpu_iaffid, cpu).iaffid = gicc->iaffid;
drivers/irqchip/irq-gic-v5-irs.c
871
per_cpu(cpu_iaffid, cpu).valid = true;
drivers/irqchip/irq-gic-v5-irs.c
872
pr_debug("Processed IAFFID %u for CPU%d", per_cpu(cpu_iaffid, cpu).iaffid, cpu);
drivers/irqchip/irq-gic-v5-irs.c
875
per_cpu(per_cpu_irs_data, cpu) = current_irs_data;
drivers/irqchip/irq-gic-v5.c
501
static void gicv5_ipi_send_single(struct irq_data *d, unsigned int cpu)
drivers/irqchip/irq-gic-v5.c
979
static int gicv5_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-gic-v5.c
987
return gicv5_irs_register_cpu(cpu);
drivers/irqchip/irq-gic.c
1170
unsigned int cpu;
drivers/irqchip/irq-gic.c
1180
for_each_possible_cpu(cpu) {
drivers/irqchip/irq-gic.c
1181
u32 mpidr = cpu_logical_map(cpu);
drivers/irqchip/irq-gic.c
1184
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
drivers/irqchip/irq-gic.c
1186
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
drivers/irqchip/irq-gic.c
491
unsigned int cpu_mask, cpu = smp_processor_id();
drivers/irqchip/irq-gic.c
503
if (WARN_ON(cpu >= NR_GIC_CPU_IF))
drivers/irqchip/irq-gic.c
508
gic_cpu_map[cpu] = cpu_mask;
drivers/irqchip/irq-gic.c
515
if (i != cpu)
drivers/irqchip/irq-gic.c
798
unsigned int cpu;
drivers/irqchip/irq-gic.c
804
cpu = cpumask_any_and(mask_val, cpu_online_mask);
drivers/irqchip/irq-gic.c
806
cpu = cpumask_first(mask_val);
drivers/irqchip/irq-gic.c
808
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
drivers/irqchip/irq-gic.c
812
rmw_writeb(gic_cpu_map[cpu], reg);
drivers/irqchip/irq-gic.c
814
writeb_relaxed(gic_cpu_map[cpu], reg);
drivers/irqchip/irq-gic.c
815
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-gic.c
822
int cpu;
drivers/irqchip/irq-gic.c
835
for_each_cpu(cpu, mask)
drivers/irqchip/irq-gic.c
836
map |= gic_cpu_map[cpu];
drivers/irqchip/irq-gic.c
850
static int gic_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-gic.c
937
int gic_get_cpu_id(unsigned int cpu)
drivers/irqchip/irq-gic.c
941
if (cpu >= NR_GIC_CPU_IF)
drivers/irqchip/irq-gic.c
943
cpu_bit = gic_cpu_map[cpu];
drivers/irqchip/irq-gic.c
963
int i, ror_val, cpu = smp_processor_id();
drivers/irqchip/irq-gic.c
973
cur_cpu_id = __ffs(gic_cpu_map[cpu]);
drivers/irqchip/irq-gic.c
980
gic_cpu_map[cpu] = 1 << new_cpu_id;
drivers/irqchip/irq-hip04.c
151
unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
drivers/irqchip/irq-hip04.c
155
cpu = cpumask_any_and(mask_val, cpu_online_mask);
drivers/irqchip/irq-hip04.c
157
cpu = cpumask_first(mask_val);
drivers/irqchip/irq-hip04.c
159
if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
drivers/irqchip/irq-hip04.c
165
bit = hip04_cpu_map[cpu] << shift;
drivers/irqchip/irq-hip04.c
170
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-hip04.c
177
int cpu;
drivers/irqchip/irq-hip04.c
183
for_each_cpu(cpu, mask)
drivers/irqchip/irq-hip04.c
184
map |= hip04_cpu_map[cpu];
drivers/irqchip/irq-hip04.c
272
unsigned int cpu_mask, cpu = smp_processor_id();
drivers/irqchip/irq-hip04.c
278
BUG_ON(cpu >= NR_HIP04_CPU_IF);
drivers/irqchip/irq-hip04.c
280
hip04_cpu_map[cpu] = cpu_mask;
drivers/irqchip/irq-hip04.c
287
if (i != cpu)
drivers/irqchip/irq-hip04.c
341
static int hip04_irq_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-jcore-aic.c
77
unsigned cpu;
drivers/irqchip/irq-jcore-aic.c
79
for_each_present_cpu(cpu) {
drivers/irqchip/irq-jcore-aic.c
80
void __iomem *base = of_iomap(node, cpu);
drivers/irqchip/irq-jcore-aic.c
83
pr_err("Unable to map AIC for cpu %u\n", cpu);
drivers/irqchip/irq-loongarch-avec.c
104
int cpu, ret, vector;
drivers/irqchip/irq-loongarch-avec.c
113
if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
drivers/irqchip/irq-loongarch-avec.c
118
ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
drivers/irqchip/irq-loongarch-avec.c
123
adata->cpu = cpu;
drivers/irqchip/irq-loongarch-avec.c
125
per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
drivers/irqchip/irq-loongarch-avec.c
129
irq_data_update_effective_affinity(data, cpumask_of(cpu));
drivers/irqchip/irq-loongarch-avec.c
134
static int avecintc_cpu_online(unsigned int cpu)
drivers/irqchip/irq-loongarch-avec.c
145
pending_list_init(cpu);
drivers/irqchip/irq-loongarch-avec.c
150
static int avecintc_cpu_offline(unsigned int cpu)
drivers/irqchip/irq-loongarch-avec.c
152
struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
drivers/irqchip/irq-loongarch-avec.c
160
pr_warn("CPU#%d vector is busy\n", cpu);
drivers/irqchip/irq-loongarch-avec.c
171
int cpu, vector, bias;
drivers/irqchip/irq-loongarch-avec.c
177
cpu = adata->prev_cpu;
drivers/irqchip/irq-loongarch-avec.c
196
mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
drivers/irqchip/irq-loongarch-avec.c
200
irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
drivers/irqchip/irq-loongarch-avec.c
203
adata->prev_cpu = adata->cpu;
drivers/irqchip/irq-loongarch-avec.c
216
((cpu_logical_map(adata->cpu & AVEC_CPU_MASK)) << AVEC_CPU_SHIFT);
drivers/irqchip/irq-loongarch-avec.c
259
int cpu, ret;
drivers/irqchip/irq-loongarch-avec.c
263
ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
drivers/irqchip/irq-loongarch-avec.c
267
adata->prev_cpu = adata->cpu = cpu;
drivers/irqchip/irq-loongarch-avec.c
269
per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
drivers/irqchip/irq-loongarch-avec.c
304
per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
drivers/irqchip/irq-loongarch-avec.c
305
irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
drivers/irqchip/irq-loongarch-avec.c
52
unsigned int cpu;
drivers/irqchip/irq-loongarch-avec.c
83
static inline void pending_list_init(int cpu)
drivers/irqchip/irq-loongarch-avec.c
85
struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
drivers/irqchip/irq-loongson-eiointc.c
100
return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
drivers/irqchip/irq-loongson-eiointc.c
102
return cpu_logical_map(cpu) / CORES_PER_VEIO_NODE;
drivers/irqchip/irq-loongson-eiointc.c
106
static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
drivers/irqchip/irq-loongson-eiointc.c
117
cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
drivers/irqchip/irq-loongson-eiointc.c
118
coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
drivers/irqchip/irq-loongson-eiointc.c
132
static void veiointc_set_irq_route(unsigned int vector, unsigned int cpu)
drivers/irqchip/irq-loongson-eiointc.c
139
data |= cpu_logical_map(cpu) << EIOINTC_REG_ROUTE_VEC_SHIFT(vector);
drivers/irqchip/irq-loongson-eiointc.c
147
unsigned int cpu;
drivers/irqchip/irq-loongson-eiointc.c
154
cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask);
drivers/irqchip/irq-loongson-eiointc.c
155
if (cpu >= nr_cpu_ids) {
drivers/irqchip/irq-loongson-eiointc.c
165
veiointc_set_irq_route(vector, cpu);
drivers/irqchip/irq-loongson-eiointc.c
173
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
drivers/irqchip/irq-loongson-eiointc.c
180
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-loongson-eiointc.c
200
static int eiointc_router_init(unsigned int cpu)
drivers/irqchip/irq-loongson-eiointc.c
206
node = cpu_to_eio_node(cpu);
drivers/irqchip/irq-loongson-eiointc.c
226
if ((cpu_logical_map(cpu) % cores) == 0) {
drivers/irqchip/irq-loongson-eiointc.c
97
static int cpu_to_eio_node(int cpu)
drivers/irqchip/irq-ls-scfg-msi.c
108
u32 cpu;
drivers/irqchip/irq-ls-scfg-msi.c
114
cpu = cpumask_any_and(mask, cpu_online_mask);
drivers/irqchip/irq-ls-scfg-msi.c
116
cpu = cpumask_first(mask);
drivers/irqchip/irq-ls-scfg-msi.c
118
if (cpu >= msi_data->msir_num)
drivers/irqchip/irq-ls-scfg-msi.c
121
if (msi_data->msir[cpu].gic_irq <= 0) {
drivers/irqchip/irq-ls-scfg-msi.c
122
pr_warn("cannot bind the irq to cpu%d\n", cpu);
drivers/irqchip/irq-ls-scfg-msi.c
126
irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
drivers/irqchip/irq-mips-cpu.c
100
WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
drivers/irqchip/irq-mips-cpu.c
103
settc(cpu_vpe_id(&cpu_data[cpu]));
drivers/irqchip/irq-mips-cpu.c
91
static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
drivers/irqchip/irq-mips-gic.c
107
#define for_each_online_cpu_gic(cpu, gic_lock) \
drivers/irqchip/irq-mips-gic.c
109
for ((cpu) = __gic_with_next_online_cpu(-1); \
drivers/irqchip/irq-mips-gic.c
110
(cpu) < nr_cpu_ids; \
drivers/irqchip/irq-mips-gic.c
112
(cpu) = __gic_with_next_online_cpu(cpu))
drivers/irqchip/irq-mips-gic.c
136
unsigned int cpu, cl;
drivers/irqchip/irq-mips-gic.c
138
cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
drivers/irqchip/irq-mips-gic.c
139
BUG_ON(cpu >= NR_CPUS);
drivers/irqchip/irq-mips-gic.c
141
cl = cpu_cluster(&cpu_data[cpu]);
drivers/irqchip/irq-mips-gic.c
192
static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
drivers/irqchip/irq-mips-gic.c
282
unsigned int cpu;
drivers/irqchip/irq-mips-gic.c
292
cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
drivers/irqchip/irq-mips-gic.c
293
set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
drivers/irqchip/irq-mips-gic.c
372
unsigned int cpu, cl, old_cpu, old_cl;
drivers/irqchip/irq-mips-gic.c
381
cpu = cpumask_first(cpumask);
drivers/irqchip/irq-mips-gic.c
383
cpu = cpumask_first_and(cpumask, cpu_online_mask);
drivers/irqchip/irq-mips-gic.c
385
if (cpu >= NR_CPUS)
drivers/irqchip/irq-mips-gic.c
390
cl = cpu_cluster(&cpu_data[cpu]);
drivers/irqchip/irq-mips-gic.c
411
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-mips-gic.c
424
write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
drivers/irqchip/irq-mips-gic.c
429
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
drivers/irqchip/irq-mips-gic.c
434
write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
drivers/irqchip/irq-mips-gic.c
439
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
drivers/irqchip/irq-mips-gic.c
513
int intr, cpu;
drivers/irqchip/irq-mips-gic.c
522
for_each_online_cpu_gic(cpu, &gic_lock)
drivers/irqchip/irq-mips-gic.c
529
int intr, cpu;
drivers/irqchip/irq-mips-gic.c
538
for_each_online_cpu_gic(cpu, &gic_lock)
drivers/irqchip/irq-mips-gic.c
588
irq_hw_number_t hw, unsigned int cpu)
drivers/irqchip/irq-mips-gic.c
595
irq_data_update_effective_affinity(data, cpumask_of(cpu));
drivers/irqchip/irq-mips-gic.c
603
write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
drivers/irqchip/irq-mips-gic.c
607
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
drivers/irqchip/irq-mips-gic.c
639
int err, cpu;
drivers/irqchip/irq-mips-gic.c
702
for_each_online_cpu_gic(cpu, &gic_lock)
drivers/irqchip/irq-mips-gic.c
71
unsigned int cpu;
drivers/irqchip/irq-mips-gic.c
74
cpu = cpumask_next(prev, cpu_online_mask);
drivers/irqchip/irq-mips-gic.c
757
int cpu, ret, i;
drivers/irqchip/irq-mips-gic.c
77
if (cpu >= nr_cpu_ids)
drivers/irqchip/irq-mips-gic.c
772
for_each_cpu(cpu, ipimask) {
drivers/irqchip/irq-mips-gic.c
78
return cpu;
drivers/irqchip/irq-mips-gic.c
789
cpumask_of(cpu));
drivers/irqchip/irq-mips-gic.c
794
ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
drivers/irqchip/irq-mips-gic.c
86
write_gic_vl_other(mips_cm_vp_id(cpu));
drivers/irqchip/irq-mips-gic.c
88
return cpu;
drivers/irqchip/irq-mips-gic.c
885
static int gic_cpu_startup(unsigned int cpu)
drivers/irqchip/irq-ompic.c
123
unsigned int cpu = smp_processor_id();
drivers/irqchip/irq-ompic.c
124
unsigned long *pending_ops = &per_cpu(ops, cpu);
drivers/irqchip/irq-ompic.c
127
ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
drivers/irqchip/irq-ompic.c
72
#define OMPIC_CTRL(cpu) (0x0 + (cpu * OMPIC_CPUBYTES))
drivers/irqchip/irq-ompic.c
73
#define OMPIC_STAT(cpu) (0x4 + (cpu * OMPIC_CPUBYTES))
drivers/irqchip/irq-ompic.c
77
#define OMPIC_CTRL_DST(cpu) (((cpu) & 0x3fff) << 16)
drivers/irqchip/irq-riscv-aplic-direct.c
178
int cpu;
drivers/irqchip/irq-riscv-aplic-direct.c
180
for_each_cpu(cpu, &direct->lmask)
drivers/irqchip/irq-riscv-aplic-direct.c
181
aplic_idc_set_delivery(per_cpu_ptr(&aplic_idcs, cpu), true);
drivers/irqchip/irq-riscv-aplic-direct.c
184
static int aplic_direct_dying_cpu(unsigned int cpu)
drivers/irqchip/irq-riscv-aplic-direct.c
192
static int aplic_direct_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-riscv-aplic-direct.c
234
int i, j, rc, cpu, current_cpu, setup_count = 0;
drivers/irqchip/irq-riscv-aplic-direct.c
269
cpu = riscv_hartid_to_cpuid(hartid);
drivers/irqchip/irq-riscv-aplic-direct.c
270
if (cpu < 0) {
drivers/irqchip/irq-riscv-aplic-direct.c
275
cpumask_set_cpu(cpu, &direct->lmask);
drivers/irqchip/irq-riscv-aplic-direct.c
277
idc = per_cpu_ptr(&aplic_idcs, cpu);
drivers/irqchip/irq-riscv-aplic-direct.c
292
if (cpu == current_cpu && idc->hart_index) {
drivers/irqchip/irq-riscv-aplic-direct.c
58
unsigned int cpu, val;
drivers/irqchip/irq-riscv-aplic-direct.c
62
cpu = cpumask_first_and(&direct->lmask, mask_val);
drivers/irqchip/irq-riscv-aplic-direct.c
64
cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask);
drivers/irqchip/irq-riscv-aplic-direct.c
66
if (cpu >= nr_cpu_ids)
drivers/irqchip/irq-riscv-aplic-direct.c
69
idc = per_cpu_ptr(&aplic_idcs, cpu);
drivers/irqchip/irq-riscv-aplic-direct.c
75
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-riscv-imsic-early.c
142
static int imsic_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-riscv-imsic-early.c
156
static int imsic_dying_cpu(unsigned int cpu)
drivers/irqchip/irq-riscv-imsic-early.c
37
static void imsic_ipi_send(unsigned int cpu)
drivers/irqchip/irq-riscv-imsic-early.c
39
struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
drivers/irqchip/irq-riscv-imsic-platform.c
126
if (cpumask_test_cpu(old_vec->cpu, mask_val))
drivers/irqchip/irq-riscv-imsic-platform.c
157
tmp_vec.cpu = old_vec->cpu;
drivers/irqchip/irq-riscv-imsic-platform.c
171
irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
drivers/irqchip/irq-riscv-imsic-platform.c
182
unsigned int cpu = smp_processor_id();
drivers/irqchip/irq-riscv-imsic-platform.c
193
if (mvec->cpu != cpu)
drivers/irqchip/irq-riscv-imsic-platform.c
206
d->irq, mvec->cpu, mvec->local_id);
drivers/irqchip/irq-riscv-imsic-platform.c
242
irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
drivers/irqchip/irq-riscv-imsic-platform.c
26
static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
drivers/irqchip/irq-riscv-imsic-platform.c
33
local = per_cpu_ptr(global->local, cpu);
drivers/irqchip/irq-riscv-imsic-platform.c
62
local = per_cpu_ptr(imsic->global.local, vec->cpu);
drivers/irqchip/irq-riscv-imsic-platform.c
79
if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr)))
drivers/irqchip/irq-riscv-imsic-state.c
186
tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
190
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
195
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
211
static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
drivers/irqchip/irq-riscv-imsic-state.c
217
add_timer_on(&lpriv->timer, cpu);
drivers/irqchip/irq-riscv-imsic-state.c
221
static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
drivers/irqchip/irq-riscv-imsic-state.c
259
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
drivers/irqchip/irq-riscv-imsic-state.c
275
if (cpu_online(cpu)) {
drivers/irqchip/irq-riscv-imsic-state.c
276
if (cpu == smp_processor_id()) {
drivers/irqchip/irq-riscv-imsic-state.c
281
__imsic_local_timer_start(lpriv, cpu);
drivers/irqchip/irq-riscv-imsic-state.c
285
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
drivers/irqchip/irq-riscv-imsic-state.c
296
lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
309
__imsic_remote_sync(lpriv, vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
318
lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
331
__imsic_remote_sync(lpriv, vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
342
lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
372
__imsic_remote_sync(lpriv, vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
384
if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu))
drivers/irqchip/irq-riscv-imsic-state.c
387
old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
391
new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
412
lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
419
seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
426
seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu);
drivers/irqchip/irq-riscv-imsic-state.c
442
unsigned int cpu;
drivers/irqchip/irq-riscv-imsic-state.c
446
local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu);
drivers/irqchip/irq-riscv-imsic-state.c
451
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
drivers/irqchip/irq-riscv-imsic-state.c
467
irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
drivers/irqchip/irq-riscv-imsic-state.c
474
int cpu;
drivers/irqchip/irq-riscv-imsic-state.c
476
for_each_possible_cpu(cpu) {
drivers/irqchip/irq-riscv-imsic-state.c
477
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
drivers/irqchip/irq-riscv-imsic-state.c
491
int cpu, i;
drivers/irqchip/irq-riscv-imsic-state.c
499
for_each_possible_cpu(cpu) {
drivers/irqchip/irq-riscv-imsic-state.c
500
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
drivers/irqchip/irq-riscv-imsic-state.c
523
vec->cpu = cpu;
drivers/irqchip/irq-riscv-imsic-state.c
794
int rc, cpu;
drivers/irqchip/irq-riscv-imsic-state.c
889
cpu = riscv_hartid_to_cpuid(hartid);
drivers/irqchip/irq-riscv-imsic-state.c
890
if (cpu < 0) {
drivers/irqchip/irq-riscv-imsic-state.c
918
local = per_cpu_ptr(global->local, cpu);
drivers/irqchip/irq-riscv-imsic-state.h
20
unsigned int cpu;
drivers/irqchip/irq-sifive-plic.c
138
int cpu;
drivers/irqchip/irq-sifive-plic.c
140
for_each_cpu(cpu, mask) {
drivers/irqchip/irq-sifive-plic.c
141
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
drivers/irqchip/irq-sifive-plic.c
194
unsigned int cpu;
drivers/irqchip/irq-sifive-plic.c
198
cpu = cpumask_first_and(&priv->lmask, mask_val);
drivers/irqchip/irq-sifive-plic.c
200
cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask);
drivers/irqchip/irq-sifive-plic.c
202
if (cpu >= nr_cpu_ids)
drivers/irqchip/irq-sifive-plic.c
208
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/irqchip/irq-sifive-plic.c
286
unsigned int index, cpu;
drivers/irqchip/irq-sifive-plic.c
296
for_each_present_cpu(cpu) {
drivers/irqchip/irq-sifive-plic.c
297
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
drivers/irqchip/irq-sifive-plic.c
503
static int plic_dying_cpu(unsigned int cpu)
drivers/irqchip/irq-sifive-plic.c
511
static int plic_starting_cpu(unsigned int cpu)
drivers/irqchip/irq-sifive-plic.c
520
handler->priv->fwnode, cpu);
drivers/irqchip/irq-sifive-plic.c
619
int error = 0, nr_contexts, nr_handlers = 0, cpu, i;
drivers/irqchip/irq-sifive-plic.c
677
error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu,
drivers/irqchip/irq-sifive-plic.c
710
if (cpu < 0) {
drivers/irqchip/irq-sifive-plic.c
720
handler = per_cpu_ptr(&plic_handlers, cpu);
drivers/irqchip/irq-sifive-plic.c
727
cpumask_set_cpu(cpu, &priv->lmask);
drivers/irqchip/irq-sifive-plic.c
765
for_each_online_cpu(cpu) {
drivers/irqchip/irq-sifive-plic.c
766
handler = per_cpu_ptr(&plic_handlers, cpu);
drivers/irqchip/irq-sifive-plic.c
805
if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id))
drivers/irqchip/irq-sifive-plic.c
807
if (parent_hwirq != RV_IRQ_EXT || cpu < 0)
drivers/irqchip/irq-sifive-plic.c
810
handler = per_cpu_ptr(&plic_handlers, cpu);
drivers/irqchip/irq-xtensa-mx.c
134
int cpu = cpumask_any_and(dest, cpu_online_mask);
drivers/irqchip/irq-xtensa-mx.c
135
unsigned mask = 1u << cpu;
drivers/irqchip/irq-xtensa-mx.c
138
irq_data_update_effective_affinity(d, cpumask_of(cpu));
drivers/leds/trigger/ledtrig-cpu.c
123
static int ledtrig_online_cpu(unsigned int cpu)
drivers/leds/trigger/ledtrig-cpu.c
129
static int ledtrig_prepare_down_cpu(unsigned int cpu)
drivers/leds/trigger/ledtrig-cpu.c
137
unsigned int cpu;
drivers/leds/trigger/ledtrig-cpu.c
153
for_each_possible_cpu(cpu) {
drivers/leds/trigger/ledtrig-cpu.c
154
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
drivers/leds/trigger/ledtrig-cpu.c
156
if (cpu >= 8)
drivers/leds/trigger/ledtrig-cpu.c
159
snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
drivers/macintosh/rack-meter.c
217
unsigned int cpu = smp_processor_id();
drivers/macintosh/rack-meter.c
226
total_idle_nsecs = get_cpu_idle_time(cpu);
drivers/macintosh/rack-meter.c
236
offset = cpu << 3;
drivers/macintosh/rack-meter.c
246
pause = (rm->cpu[0].zero && rm->cpu[1].zero);
drivers/macintosh/rack-meter.c
249
pause = (rm->cpu[0].zero && rm->cpu[1].zero);
drivers/macintosh/rack-meter.c
253
schedule_delayed_work_on(cpu, &rcpu->sniffer,
drivers/macintosh/rack-meter.c
259
unsigned int cpu;
drivers/macintosh/rack-meter.c
267
rm->cpu[0].rm = rm;
drivers/macintosh/rack-meter.c
268
INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
drivers/macintosh/rack-meter.c
269
rm->cpu[1].rm = rm;
drivers/macintosh/rack-meter.c
270
INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
drivers/macintosh/rack-meter.c
272
for_each_online_cpu(cpu) {
drivers/macintosh/rack-meter.c
275
if (cpu > 1)
drivers/macintosh/rack-meter.c
277
rcpu = &rm->cpu[cpu];
drivers/macintosh/rack-meter.c
278
rcpu->prev_idle = get_cpu_idle_time(cpu);
drivers/macintosh/rack-meter.c
280
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
drivers/macintosh/rack-meter.c
287
cancel_delayed_work_sync(&rm->cpu[0].sniffer);
drivers/macintosh/rack-meter.c
288
cancel_delayed_work_sync(&rm->cpu[1].sniffer);
drivers/macintosh/rack-meter.c
67
struct rackmeter_cpu cpu[2];
drivers/macintosh/rack-meter.c
81
static inline u64 get_cpu_idle_time(unsigned int cpu)
drivers/macintosh/rack-meter.c
83
struct kernel_cpustat *kcpustat = &kcpustat_cpu(cpu);
drivers/macintosh/rack-meter.c
90
retval += kcpustat_field(kcpustat, CPUTIME_NICE, cpu);
drivers/macintosh/windfarm_ad7417_sensor.c
183
pv->sensors[index].name = kasprintf(GFP_KERNEL, "%s-%d", name, pv->cpu);
drivers/macintosh/windfarm_ad7417_sensor.c
270
pv->cpu = cpu_nr;
drivers/macintosh/windfarm_ad7417_sensor.c
30
u8 cpu;
drivers/macintosh/windfarm_mpu.h
76
static inline const struct mpu_data *wf_get_mpu(int cpu)
drivers/macintosh/windfarm_mpu.h
88
sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
drivers/macintosh/windfarm_pm112.c
126
static int create_cpu_loop(int cpu)
drivers/macintosh/windfarm_pm112.c
128
int chip = cpu / 2;
drivers/macintosh/windfarm_pm112.c
129
int core = cpu & 1;
drivers/macintosh/windfarm_pm112.c
180
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
drivers/macintosh/windfarm_pm112.c
270
int err, cpu;
drivers/macintosh/windfarm_pm112.c
279
for (cpu = 0; cpu < nr_cores; ++cpu) {
drivers/macintosh/windfarm_pm112.c
281
sr = sens_cpu_temp[cpu];
drivers/macintosh/windfarm_pm112.c
286
"sensor error %d\n", cpu, err);
drivers/macintosh/windfarm_pm112.c
296
sr = sens_cpu_power[cpu];
drivers/macintosh/windfarm_pm112.c
301
"sensor error %d\n", cpu, err);
drivers/macintosh/windfarm_pm112.c
308
sp = &cpu_pid[cpu];
drivers/macintosh/windfarm_pm112.c
311
if (cpu == 0 || sp->last_delta > greatest_delta) {
drivers/macintosh/windfarm_pm112.c
316
cpu, FIX32TOPRINT(power), FIX32TOPRINT(temp));
drivers/macintosh/windfarm_pm112.c
324
for (cpu = 0; cpu < nr_cores; ++cpu)
drivers/macintosh/windfarm_pm112.c
325
cpu_pid[cpu].target = target;
drivers/macintosh/windfarm_pm112.c
680
struct device_node *cpu;
drivers/macintosh/windfarm_pm112.c
687
for_each_node_by_type(cpu, "cpu")
drivers/macintosh/windfarm_pm72.c
208
static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power)
drivers/macintosh/windfarm_pm72.c
214
rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp);
drivers/macintosh/windfarm_pm72.c
216
DBG(" CPU%d: temp reading error !\n", cpu);
drivers/macintosh/windfarm_pm72.c
219
DBG_LOTS(" CPU%d: temp = %d.%03d\n", cpu, FIX32TOPRINT((dtemp)));
drivers/macintosh/windfarm_pm72.c
223
rc = wf_sensor_get(sens_cpu_volts[cpu], &volts);
drivers/macintosh/windfarm_pm72.c
225
DBG(" CPU%d, volts reading error !\n", cpu);
drivers/macintosh/windfarm_pm72.c
228
DBG_LOTS(" CPU%d: volts = %d.%03d\n", cpu, FIX32TOPRINT((volts)));
drivers/macintosh/windfarm_pm72.c
231
rc = wf_sensor_get(sens_cpu_amps[cpu], &s);
drivers/macintosh/windfarm_pm72.c
233
DBG(" CPU%d, current reading error !\n", cpu);
drivers/macintosh/windfarm_pm72.c
236
DBG_LOTS(" CPU%d: amps = %d.%03d\n", cpu, FIX32TOPRINT((amps)));
drivers/macintosh/windfarm_pm72.c
245
DBG_LOTS(" CPU%d: power = %d.%03d\n", cpu, FIX32TOPRINT((*power)));
drivers/macintosh/windfarm_pm72.c
253
int err, cpu;
drivers/macintosh/windfarm_pm72.c
258
for (cpu = 0; cpu < nr_chips; ++cpu) {
drivers/macintosh/windfarm_pm72.c
259
struct wf_cpu_pid_state *sp = &cpu_pid[cpu];
drivers/macintosh/windfarm_pm72.c
262
wf_control_get(cpu_rear_fans[cpu], &sp->target);
drivers/macintosh/windfarm_pm72.c
264
DBG_LOTS(" CPU%d: cur_target = %d RPM\n", cpu, sp->target);
drivers/macintosh/windfarm_pm72.c
266
err = read_one_cpu_vals(cpu, &temp, &power);
drivers/macintosh/windfarm_pm72.c
283
DBG_LOTS(" CPU%d: target = %d RPM\n", cpu, sp->target);
drivers/macintosh/windfarm_pm72.c
286
err = wf_control_set(cpu_rear_fans[cpu], sp->target);
drivers/macintosh/windfarm_pm72.c
289
cpu_rear_fans[cpu]->name, err);
drivers/macintosh/windfarm_pm72.c
296
DBG_LOTS(" CPU%d: intake = %d RPM\n", cpu, intake);
drivers/macintosh/windfarm_pm72.c
297
err = wf_control_set(cpu_front_fans[cpu], intake);
drivers/macintosh/windfarm_pm72.c
300
cpu_front_fans[cpu]->name, err);
drivers/macintosh/windfarm_pm72.c
313
int err, cpu;
drivers/macintosh/windfarm_pm72.c
367
for (cpu = 0; cpu < nr_chips; cpu++) {
drivers/macintosh/windfarm_pm72.c
368
err = wf_control_set(cpu_rear_fans[cpu], sp->target);
drivers/macintosh/windfarm_pm72.c
371
cpu_rear_fans[cpu]->name, err);
drivers/macintosh/windfarm_pm72.c
374
err = wf_control_set(cpu_front_fans[cpu], intake);
drivers/macintosh/windfarm_pm72.c
377
cpu_front_fans[cpu]->name, err);
drivers/macintosh/windfarm_pm72.c
381
if (cpu_pumps[cpu])
drivers/macintosh/windfarm_pm72.c
382
err = wf_control_set(cpu_pumps[cpu], pump);
drivers/macintosh/windfarm_pm72.c
385
cpu_pumps[cpu]->name, err);
drivers/macintosh/windfarm_pm72.c
392
static int cpu_setup_pid(int cpu)
drivers/macintosh/windfarm_pm72.c
395
const struct mpu_data *mpu = cpu_mpu_data[cpu];
drivers/macintosh/windfarm_pm72.c
405
cpu, FIX32TOPRINT(ttarget), FIX32TOPRINT(tmax));
drivers/macintosh/windfarm_pm72.c
412
fmin = wf_control_get_min(cpu_rear_fans[cpu]);
drivers/macintosh/windfarm_pm72.c
413
fmax = wf_control_get_max(cpu_rear_fans[cpu]);
drivers/macintosh/windfarm_pm72.c
414
DBG("wf_72: CPU%d max RPM range = [%d..%d]\n", cpu, fmin, fmax);
drivers/macintosh/windfarm_pm72.c
418
DBG("wf_72: CPU%d history size = %d\n", cpu, hsize);
drivers/macintosh/windfarm_pm72.c
432
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
drivers/macintosh/windfarm_pm72.c
433
cpu_pid[cpu].target = 1000;
drivers/macintosh/windfarm_pm72.c
793
struct device_node *cpu;
drivers/macintosh/windfarm_pm72.c
802
for_each_node_by_type(cpu, "cpu")
drivers/macintosh/windfarm_rm31.c
202
static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power)
drivers/macintosh/windfarm_rm31.c
208
rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp);
drivers/macintosh/windfarm_rm31.c
210
DBG(" CPU%d: temp reading error !\n", cpu);
drivers/macintosh/windfarm_rm31.c
213
DBG_LOTS(" CPU%d: temp = %d.%03d\n", cpu, FIX32TOPRINT((dtemp)));
drivers/macintosh/windfarm_rm31.c
217
rc = wf_sensor_get(sens_cpu_volts[cpu], &volts);
drivers/macintosh/windfarm_rm31.c
219
DBG(" CPU%d, volts reading error !\n", cpu);
drivers/macintosh/windfarm_rm31.c
222
DBG_LOTS(" CPU%d: volts = %d.%03d\n", cpu, FIX32TOPRINT((volts)));
drivers/macintosh/windfarm_rm31.c
225
rc = wf_sensor_get(sens_cpu_amps[cpu], &s);
drivers/macintosh/windfarm_rm31.c
227
DBG(" CPU%d, current reading error !\n", cpu);
drivers/macintosh/windfarm_rm31.c
230
DBG_LOTS(" CPU%d: amps = %d.%03d\n", cpu, FIX32TOPRINT((amps)));
drivers/macintosh/windfarm_rm31.c
239
DBG_LOTS(" CPU%d: power = %d.%03d\n", cpu, FIX32TOPRINT((*power)));
drivers/macintosh/windfarm_rm31.c
247
int err, cpu, i;
drivers/macintosh/windfarm_rm31.c
252
for (cpu = 0; cpu < nr_chips; ++cpu) {
drivers/macintosh/windfarm_rm31.c
253
struct wf_cpu_pid_state *sp = &cpu_pid[cpu];
drivers/macintosh/windfarm_rm31.c
256
wf_control_get(cpu_fans[cpu][0], &sp->target);
drivers/macintosh/windfarm_rm31.c
258
err = read_one_cpu_vals(cpu, &temp, &power);
drivers/macintosh/windfarm_rm31.c
275
DBG_LOTS(" CPU%d: target = %d RPM\n", cpu, sp->target);
drivers/macintosh/windfarm_rm31.c
282
err = wf_control_set(cpu_fans[cpu][i], speed);
drivers/macintosh/windfarm_rm31.c
285
cpu_fans[cpu][i]->name, err);
drivers/macintosh/windfarm_rm31.c
293
static int cpu_setup_pid(int cpu)
drivers/macintosh/windfarm_rm31.c
296
const struct mpu_data *mpu = cpu_mpu_data[cpu];
drivers/macintosh/windfarm_rm31.c
306
cpu, FIX32TOPRINT(ttarget), FIX32TOPRINT(tmax));
drivers/macintosh/windfarm_rm31.c
313
fmin = wf_control_get_min(cpu_fans[cpu][0]);
drivers/macintosh/windfarm_rm31.c
314
fmax = wf_control_get_max(cpu_fans[cpu][0]);
drivers/macintosh/windfarm_rm31.c
315
DBG("wf_72: CPU%d max RPM range = [%d..%d]\n", cpu, fmin, fmax);
drivers/macintosh/windfarm_rm31.c
319
DBG("wf_72: CPU%d history size = %d\n", cpu, hsize);
drivers/macintosh/windfarm_rm31.c
333
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
drivers/macintosh/windfarm_rm31.c
334
cpu_pid[cpu].target = 4000;
drivers/macintosh/windfarm_rm31.c
686
struct device_node *cpu;
drivers/macintosh/windfarm_rm31.c
694
for_each_node_by_type(cpu, "cpu")
drivers/macintosh/windfarm_smu_sat.c
202
int shift, cpu, index;
drivers/macintosh/windfarm_smu_sat.c
240
cpu = 2 * chip + core;
drivers/macintosh/windfarm_smu_sat.c
267
"%s sensor %d (no memory)\n", name, cpu);
drivers/macintosh/windfarm_smu_sat.c
276
snprintf((char *)sens->sens.name, 16, "%s-%d", name, cpu);
drivers/macintosh/windfarm_smu_sat.c
290
cpu = 2 * sat->nr + core;
drivers/macintosh/windfarm_smu_sat.c
294
"sensor %d (no memory)\n", cpu);
drivers/macintosh/windfarm_smu_sat.c
303
snprintf((char *)sens->sens.name, 16, "cpu-power-%d", cpu);
drivers/mailbox/cv1800-mailbox.c
107
int cpu = priv->cpu;
drivers/mailbox/cv1800-mailbox.c
114
writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu));
drivers/mailbox/cv1800-mailbox.c
115
en = readb(mbox->mbox_base + MBOX_EN_REG(cpu));
drivers/mailbox/cv1800-mailbox.c
116
writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu));
drivers/mailbox/cv1800-mailbox.c
129
en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu));
drivers/mailbox/cv1800-mailbox.c
145
int cpu = spec->args[1];
drivers/mailbox/cv1800-mailbox.c
151
priv->cpu = cpu;
drivers/mailbox/cv1800-mailbox.c
24
#define MBOX_EN_REG(cpu) (cpu << 2)
drivers/mailbox/cv1800-mailbox.c
25
#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2)
drivers/mailbox/cv1800-mailbox.c
26
#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4))
drivers/mailbox/cv1800-mailbox.c
27
#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4))
drivers/mailbox/cv1800-mailbox.c
43
int cpu;
drivers/mailbox/riscv-sbi-mpxy-mbox.c
339
static int mpxy_setup_shmem(unsigned int cpu)
drivers/mailbox/riscv-sbi-mpxy-mbox.c
345
mpxy = per_cpu_ptr(&mpxy_local, cpu);
drivers/mailbox/zynqmp-ipi-mailbox.c
778
static int xlnx_mbox_cpuhp_start(unsigned int cpu)
drivers/mailbox/zynqmp-ipi-mailbox.c
789
static int xlnx_mbox_cpuhp_down(unsigned int cpu)
drivers/mailbox/zynqmp-ipi-mailbox.c
814
int cpu;
drivers/mailbox/zynqmp-ipi-mailbox.c
852
for_each_possible_cpu(cpu)
drivers/mailbox/zynqmp-ipi-mailbox.c
853
per_cpu(per_cpu_pdata, cpu) = pdata;
drivers/md/dm-pcache/cache.c
286
u32 i, cpu;
drivers/md/dm-pcache/cache.c
320
for_each_possible_cpu(cpu) {
drivers/md/dm-pcache/cache.c
322
per_cpu_ptr(cache->data_heads, cpu);
drivers/md/dm-ps-io-affinity.c
140
unsigned int cpu;
drivers/md/dm-ps-io-affinity.c
142
for_each_cpu(cpu, s->path_mask)
drivers/md/dm-ps-io-affinity.c
143
ioa_free_path(s, cpu);
drivers/md/dm-ps-io-affinity.c
198
unsigned int cpu, node;
drivers/md/dm-ps-io-affinity.c
204
cpu = get_cpu();
drivers/md/dm-ps-io-affinity.c
206
pi = s->path_map[cpu];
drivers/md/dm-ps-io-affinity.c
217
node = cpu_to_node(cpu);
drivers/md/dm-ps-io-affinity.c
27
static void ioa_free_path(struct selector *s, unsigned int cpu)
drivers/md/dm-ps-io-affinity.c
29
struct path_info *pi = s->path_map[cpu];
drivers/md/dm-ps-io-affinity.c
35
cpumask_clear_cpu(cpu, s->path_mask);
drivers/md/dm-ps-io-affinity.c
39
s->path_map[cpu] = NULL;
drivers/md/dm-ps-io-affinity.c
48
unsigned int cpu;
drivers/md/dm-ps-io-affinity.c
79
for_each_cpu(cpu, pi->cpumask) {
drivers/md/dm-ps-io-affinity.c
80
if (cpu >= nr_cpu_ids) {
drivers/md/dm-ps-io-affinity.c
82
cpu, nr_cpu_ids);
drivers/md/dm-ps-io-affinity.c
86
if (s->path_map[cpu]) {
drivers/md/dm-ps-io-affinity.c
87
DMWARN("CPU mapping for %u exists. Ignoring.", cpu);
drivers/md/dm-ps-io-affinity.c
91
cpumask_set_cpu(cpu, s->path_mask);
drivers/md/dm-ps-io-affinity.c
92
s->path_map[cpu] = pi;
drivers/md/dm-stats.c
174
int cpu;
drivers/md/dm-stats.c
180
for_each_possible_cpu(cpu) {
drivers/md/dm-stats.c
181
dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
drivers/md/dm-stats.c
182
dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
drivers/md/dm-stats.c
196
int cpu;
drivers/md/dm-stats.c
206
for_each_possible_cpu(cpu) {
drivers/md/dm-stats.c
207
last = per_cpu_ptr(stats->last, cpu);
drivers/md/dm-stats.c
277
int cpu;
drivers/md/dm-stats.c
366
for_each_possible_cpu(cpu) {
drivers/md/dm-stats.c
367
p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
drivers/md/dm-stats.c
372
s->stat_percpu[cpu] = p;
drivers/md/dm-stats.c
376
hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
drivers/md/dm-stats.c
453
int cpu;
drivers/md/dm-stats.c
472
for_each_possible_cpu(cpu)
drivers/md/dm-stats.c
474
is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
drivers/md/dm-stats.c
723
int cpu;
drivers/md/dm-stats.c
747
for_each_possible_cpu(cpu) {
drivers/md/dm-stats.c
748
p = &s->stat_percpu[cpu][x];
drivers/md/dm.c
2769
int cpu;
drivers/md/dm.c
2772
for_each_possible_cpu(cpu)
drivers/md/dm.c
2773
sum += *per_cpu_ptr(md->pending_io, cpu);
drivers/md/raid5.c
184
int i, cpu = sh->cpu;
drivers/md/raid5.c
186
if (!cpu_online(cpu)) {
drivers/md/raid5.c
187
cpu = cpumask_any(cpu_online_mask);
drivers/md/raid5.c
188
sh->cpu = cpu;
drivers/md/raid5.c
193
group = conf->worker_groups + cpu_to_group(cpu);
drivers/md/raid5.c
207
group = conf->worker_groups + cpu_to_group(sh->cpu);
drivers/md/raid5.c
211
queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
drivers/md/raid5.c
218
queue_work_on(sh->cpu, raid5_wq,
drivers/md/raid5.c
2477
unsigned long cpu;
drivers/md/raid5.c
2488
for_each_present_cpu(cpu) {
drivers/md/raid5.c
2491
percpu = per_cpu_ptr(conf->percpu, cpu);
drivers/md/raid5.c
5584
!cpu_online(tmp->cpu) ||
drivers/md/raid5.c
5585
cpu_to_group(tmp->cpu) == group) {
drivers/md/raid5.c
618
sh->cpu = smp_processor_id();
drivers/md/raid5.c
65
#define cpu_to_group(cpu) cpu_to_node(cpu)
drivers/md/raid5.c
7354
static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/md/raid5.c
7358
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
drivers/md/raid5.c
7394
static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
drivers/md/raid5.c
7397
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
drivers/md/raid5.c
7401
__func__, cpu);
drivers/md/raid5.h
223
int cpu;
drivers/media/common/saa7146/saa7146_core.c
215
if (NULL == pt->cpu)
drivers/media/common/saa7146/saa7146_core.c
217
dma_free_coherent(&pci->dev, pt->size, pt->cpu, pt->dma);
drivers/media/common/saa7146/saa7146_core.c
218
pt->cpu = NULL;
drivers/media/common/saa7146/saa7146_core.c
223
__le32 *cpu;
drivers/media/common/saa7146/saa7146_core.c
226
cpu = dma_alloc_coherent(&pci->dev, PAGE_SIZE, &dma_addr, GFP_KERNEL);
drivers/media/common/saa7146/saa7146_core.c
227
if (NULL == cpu) {
drivers/media/common/saa7146/saa7146_core.c
231
pt->cpu = cpu;
drivers/media/common/saa7146/saa7146_core.c
253
ptr = pt->cpu;
drivers/media/common/saa7146/saa7146_vbi.c
10
u32 *cpu;
drivers/media/common/saa7146/saa7146_vbi.c
126
dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr);
drivers/media/common/saa7146/saa7146_vbi.c
131
dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr);
drivers/media/common/saa7146/saa7146_vbi.c
25
cpu = dma_alloc_coherent(&dev->pci->dev, 4096, &dma_addr, GFP_KERNEL);
drivers/media/common/saa7146/saa7146_vbi.c
26
if (NULL == cpu)
drivers/media/common/saa7146/saa7146_video.c
137
ptr1 = pt1->cpu;
drivers/media/common/saa7146/saa7146_video.c
138
ptr2 = pt2->cpu;
drivers/media/common/saa7146/saa7146_video.c
139
ptr3 = pt3->cpu;
drivers/media/common/saa7146/saa7146_video.c
151
ptr1 = pt1->cpu;
drivers/media/common/saa7146/saa7146_video.c
158
ptr1 = pt1->cpu;
drivers/media/common/saa7146/saa7146_video.c
165
ptr1 = pt1->cpu + m1;
drivers/media/common/saa7146/saa7146_video.c
166
fill = pt1->cpu[m1];
drivers/media/pci/bt8xx/btcx-risc.c
44
if (NULL == risc->cpu)
drivers/media/pci/bt8xx/btcx-risc.c
51
dma_free_coherent(&pci->dev, risc->size, risc->cpu, risc->dma);
drivers/media/pci/bt8xx/btcx-risc.c
59
__le32 *cpu;
drivers/media/pci/bt8xx/btcx-risc.c
62
if (NULL != risc->cpu && risc->size < size)
drivers/media/pci/bt8xx/btcx-risc.c
64
if (NULL == risc->cpu) {
drivers/media/pci/bt8xx/btcx-risc.c
65
cpu = dma_alloc_coherent(&pci->dev, size, &dma, GFP_KERNEL);
drivers/media/pci/bt8xx/btcx-risc.c
66
if (NULL == cpu)
drivers/media/pci/bt8xx/btcx-risc.c
68
risc->cpu = cpu;
drivers/media/pci/bt8xx/btcx-risc.c
74
memcnt, (unsigned long)dma, cpu, size);
drivers/media/pci/bt8xx/btcx-risc.h
4
__le32 *cpu;
drivers/media/pci/bt8xx/bttv-driver.c
2564
btv->c.v4l2_dev.name, risc->cpu, (unsigned long)risc->dma);
drivers/media/pci/bt8xx/bttv-driver.c
2569
n = bttv_risc_decode(le32_to_cpu(risc->cpu[i]));
drivers/media/pci/bt8xx/bttv-driver.c
2574
risc->cpu[i+j], j);
drivers/media/pci/bt8xx/bttv-driver.c
2575
if (0 == risc->cpu[i])
drivers/media/pci/bt8xx/bttv-driver.c
2637
(unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_VBI+1]),
drivers/media/pci/bt8xx/bttv-driver.c
2638
(unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_FIELD+1]),
drivers/media/pci/bt8xx/bttv-risc.c
109
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/bt8xx/bttv-risc.c
141
rp = risc->cpu;
drivers/media/pci/bt8xx/bttv-risc.c
230
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/bt8xx/bttv-risc.c
371
btv->main.cpu[RISC_SLOT_LOOP] = cpu_to_le32(cmd);
drivers/media/pci/bt8xx/bttv-risc.c
451
btv->main.cpu[0] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC |
drivers/media/pci/bt8xx/bttv-risc.c
453
btv->main.cpu[1] = cpu_to_le32(0);
drivers/media/pci/bt8xx/bttv-risc.c
454
btv->main.cpu[2] = cpu_to_le32(BT848_RISC_JUMP);
drivers/media/pci/bt8xx/bttv-risc.c
455
btv->main.cpu[3] = cpu_to_le32(btv->main.dma + (4<<2));
drivers/media/pci/bt8xx/bttv-risc.c
458
btv->main.cpu[4] = cpu_to_le32(BT848_RISC_JUMP);
drivers/media/pci/bt8xx/bttv-risc.c
459
btv->main.cpu[5] = cpu_to_le32(btv->main.dma + (6<<2));
drivers/media/pci/bt8xx/bttv-risc.c
460
btv->main.cpu[6] = cpu_to_le32(BT848_RISC_JUMP);
drivers/media/pci/bt8xx/bttv-risc.c
461
btv->main.cpu[7] = cpu_to_le32(btv->main.dma + (8<<2));
drivers/media/pci/bt8xx/bttv-risc.c
463
btv->main.cpu[8] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC |
drivers/media/pci/bt8xx/bttv-risc.c
465
btv->main.cpu[9] = cpu_to_le32(0);
drivers/media/pci/bt8xx/bttv-risc.c
468
btv->main.cpu[10] = cpu_to_le32(BT848_RISC_JUMP);
drivers/media/pci/bt8xx/bttv-risc.c
469
btv->main.cpu[11] = cpu_to_le32(btv->main.dma + (12<<2));
drivers/media/pci/bt8xx/bttv-risc.c
470
btv->main.cpu[12] = cpu_to_le32(BT848_RISC_JUMP);
drivers/media/pci/bt8xx/bttv-risc.c
471
btv->main.cpu[13] = cpu_to_le32(btv->main.dma + (14<<2));
drivers/media/pci/bt8xx/bttv-risc.c
474
btv->main.cpu[14] = cpu_to_le32(BT848_RISC_JUMP);
drivers/media/pci/bt8xx/bttv-risc.c
475
btv->main.cpu[15] = cpu_to_le32(btv->main.dma + (0<<2));
drivers/media/pci/bt8xx/bttv-risc.c
489
btv->main.cpu[slot+1] = cpu_to_le32(next);
drivers/media/pci/bt8xx/bttv-risc.c
502
btv->main.cpu[slot+1] = cpu_to_le32(risc->dma);
drivers/media/pci/bt8xx/bttv-risc.c
58
rp = risc->cpu;
drivers/media/pci/cx23885/cx23885-alsa.c
269
dma_free_coherent(&chip->pci->dev, risc->size, risc->cpu, risc->dma);
drivers/media/pci/cx23885/cx23885-core.c
1221
risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
drivers/media/pci/cx23885/cx23885-core.c
1223
if (risc->cpu == NULL)
drivers/media/pci/cx23885/cx23885-core.c
1227
rp = risc->cpu;
drivers/media/pci/cx23885/cx23885-core.c
1237
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/cx23885/cx23885-core.c
1259
risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
drivers/media/pci/cx23885/cx23885-core.c
1261
if (risc->cpu == NULL)
drivers/media/pci/cx23885/cx23885-core.c
1265
rp = risc->cpu;
drivers/media/pci/cx23885/cx23885-core.c
1271
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/cx23885/cx23885-core.c
1298
risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
drivers/media/pci/cx23885/cx23885-core.c
1300
if (risc->cpu == NULL)
drivers/media/pci/cx23885/cx23885-core.c
1303
rp = risc->cpu;
drivers/media/pci/cx23885/cx23885-core.c
1319
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/cx23885/cx23885-core.c
1328
if (risc->cpu)
drivers/media/pci/cx23885/cx23885-core.c
1329
dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu, risc->dma);
drivers/media/pci/cx23885/cx23885-core.c
1623
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
drivers/media/pci/cx23885/cx23885-core.c
1634
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
drivers/media/pci/cx23885/cx23885-core.c
595
dev->name, risc->cpu, (unsigned long)risc->dma);
drivers/media/pci/cx23885/cx23885-core.c
598
n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
drivers/media/pci/cx23885/cx23885-core.c
601
dev->name, i + j, risc->cpu[i + j], j);
drivers/media/pci/cx23885/cx23885-core.c
602
if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
drivers/media/pci/cx23885/cx23885-vbi.c
192
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
drivers/media/pci/cx23885/cx23885-vbi.c
205
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
drivers/media/pci/cx23885/cx23885-video.c
467
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
drivers/media/pci/cx23885/cx23885-video.c
478
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
drivers/media/pci/cx23885/cx23885.h
166
__le32 *cpu;
drivers/media/pci/cx25821/cx25821-alsa.c
405
dma_free_coherent(&chip->pci->dev, risc->size, risc->cpu, risc->dma);
drivers/media/pci/cx25821/cx25821-core.c
1081
rp = risc->cpu;
drivers/media/pci/cx25821/cx25821-core.c
1095
BUG_ON((risc->jmp - risc->cpu + 3) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/cx25821/cx25821-core.c
1184
rp = risc->cpu;
drivers/media/pci/cx25821/cx25821-core.c
1190
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/cx25821/cx25821-core.c
1199
dma_free_coherent(&dev->pci->dev, buf->risc.size, buf->risc.cpu,
drivers/media/pci/cx25821/cx25821-core.c
970
__le32 *cpu;
drivers/media/pci/cx25821/cx25821-core.c
973
if (risc->cpu && risc->size < size) {
drivers/media/pci/cx25821/cx25821-core.c
974
dma_free_coherent(&pci->dev, risc->size, risc->cpu, risc->dma);
drivers/media/pci/cx25821/cx25821-core.c
975
risc->cpu = NULL;
drivers/media/pci/cx25821/cx25821-core.c
977
if (NULL == risc->cpu) {
drivers/media/pci/cx25821/cx25821-core.c
978
cpu = dma_alloc_coherent(&pci->dev, size, &dma, GFP_KERNEL);
drivers/media/pci/cx25821/cx25821-core.c
979
if (NULL == cpu)
drivers/media/pci/cx25821/cx25821-core.c
981
risc->cpu = cpu;
drivers/media/pci/cx25821/cx25821-video.c
245
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
drivers/media/pci/cx25821/cx25821-video.c
253
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
drivers/media/pci/cx25821/cx25821.h
105
__le32 *cpu;
drivers/media/pci/cx88/cx88-alsa.c
359
if (risc->cpu)
drivers/media/pci/cx88/cx88-alsa.c
360
dma_free_coherent(&chip->pci->dev, risc->size, risc->cpu,
drivers/media/pci/cx88/cx88-blackbird.c
687
if (risc->cpu)
drivers/media/pci/cx88/cx88-blackbird.c
688
dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
drivers/media/pci/cx88/cx88-core.c
155
risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
drivers/media/pci/cx88/cx88-core.c
157
if (!risc->cpu)
drivers/media/pci/cx88/cx88-core.c
161
rp = risc->cpu;
drivers/media/pci/cx88/cx88-core.c
172
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/cx88/cx88-core.c
194
risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
drivers/media/pci/cx88/cx88-core.c
196
if (!risc->cpu)
drivers/media/pci/cx88/cx88-core.c
200
rp = risc->cpu;
drivers/media/pci/cx88/cx88-core.c
206
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
drivers/media/pci/cx88/cx88-dvb.c
105
if (risc->cpu)
drivers/media/pci/cx88/cx88-dvb.c
106
dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
drivers/media/pci/cx88/cx88-mpeg.c
231
if (risc->cpu)
drivers/media/pci/cx88/cx88-mpeg.c
233
risc->cpu, risc->dma);
drivers/media/pci/cx88/cx88-mpeg.c
248
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
drivers/media/pci/cx88/cx88-mpeg.c
259
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
drivers/media/pci/cx88/cx88-vbi.c
160
if (risc->cpu)
drivers/media/pci/cx88/cx88-vbi.c
161
dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
drivers/media/pci/cx88/cx88-vbi.c
175
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
drivers/media/pci/cx88/cx88-vbi.c
185
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
drivers/media/pci/cx88/cx88-video.c
495
if (risc->cpu)
drivers/media/pci/cx88/cx88-video.c
496
dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
drivers/media/pci/cx88/cx88-video.c
510
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
drivers/media/pci/cx88/cx88-video.c
520
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
drivers/media/pci/cx88/cx88.h
308
__le32 *cpu;
drivers/media/pci/saa7134/saa7134-core.c
219
__le32 *cpu;
drivers/media/pci/saa7134/saa7134-core.c
222
cpu = dma_alloc_coherent(&pci->dev, SAA7134_PGTABLE_SIZE, &dma_addr,
drivers/media/pci/saa7134/saa7134-core.c
224
if (NULL == cpu)
drivers/media/pci/saa7134/saa7134-core.c
227
pt->cpu = cpu;
drivers/media/pci/saa7134/saa7134-core.c
239
BUG_ON(NULL == pt || NULL == pt->cpu);
drivers/media/pci/saa7134/saa7134-core.c
241
ptr = pt->cpu + startpage;
drivers/media/pci/saa7134/saa7134-core.c
252
if (NULL == pt->cpu)
drivers/media/pci/saa7134/saa7134-core.c
254
dma_free_coherent(&pci->dev, pt->size, pt->cpu, pt->dma);
drivers/media/pci/saa7134/saa7134-core.c
255
pt->cpu = NULL;
drivers/media/pci/saa7134/saa7134.h
449
__le32 *cpu;
drivers/media/pci/saa7164/saa7164-buffer.c
104
buf->cpu, (long)buf->dma, buf->pci_size);
drivers/media/pci/saa7164/saa7164-buffer.c
120
dma_free_coherent(&port->dev->pci->dev, buf->pci_size, buf->cpu,
drivers/media/pci/saa7164/saa7164-buffer.c
144
dma_free_coherent(&dev->pci->dev, buf->pci_size, buf->cpu, buf->dma);
drivers/media/pci/saa7164/saa7164-buffer.c
86
buf->cpu = dma_alloc_coherent(&port->dev->pci->dev, buf->pci_size,
drivers/media/pci/saa7164/saa7164-buffer.c
88
if (!buf->cpu)
drivers/media/pci/saa7164/saa7164-buffer.c
97
memset(buf->cpu, 0xff, buf->pci_size);
drivers/media/pci/saa7164/saa7164-buffer.c
98
buf->crc = crc32(0, buf->cpu, buf->actual_size);
drivers/media/pci/saa7164/saa7164-core.c
111
u8 *bufcpu = (u8 *)buf->cpu;
drivers/media/pci/saa7164/saa7164-core.c
271
buf->crc = crc32(0, buf->cpu, buf->actual_size);
drivers/media/pci/saa7164/saa7164-core.c
275
p = (u8 *)buf->cpu;
drivers/media/pci/saa7164/saa7164-core.c
310
memcpy(ubuf->data, buf->cpu, ubuf->actual_size);
drivers/media/pci/saa7164/saa7164-core.c
337
memset(buf->cpu, 0xff, buf->pci_size);
drivers/media/pci/saa7164/saa7164-core.c
340
buf->crc = crc32(0, buf->cpu, buf->actual_size);
drivers/media/pci/saa7164/saa7164-core.c
513
dvb_dmx_swfilter_packets(&port->dvb.demux, (u8 *)buf->cpu,
drivers/media/pci/saa7164/saa7164-core.c
86
u8 *p = (u8 *)buf->cpu;
drivers/media/pci/saa7164/saa7164.h
317
u64 *cpu; /* Virtual address */
drivers/media/pci/tw68/tw68-risc.c
154
buf->cpu = dma_alloc_coherent(&pci->dev, buf->size, &buf->dma,
drivers/media/pci/tw68/tw68-risc.c
156
if (buf->cpu == NULL)
drivers/media/pci/tw68/tw68-risc.c
160
rp = buf->cpu;
drivers/media/pci/tw68/tw68-risc.c
170
buf->cpu[1] = cpu_to_le32(buf->dma + 8);
drivers/media/pci/tw68/tw68-risc.c
172
BUG_ON((buf->jmp - buf->cpu + 2) * sizeof(buf->cpu[0]) > buf->size);
drivers/media/pci/tw68/tw68-risc.c
219
core->name, buf, buf->cpu, buf->jmp);
drivers/media/pci/tw68/tw68-risc.c
220
for (addr = buf->cpu; addr <= buf->jmp; addr += 2)
drivers/media/pci/tw68/tw68-video.c
421
buf->cpu[0] |= cpu_to_le32(RISC_INT_BIT);
drivers/media/pci/tw68/tw68-video.c
490
if (buf->cpu)
drivers/media/pci/tw68/tw68-video.c
491
dma_free_coherent(&dev->pci->dev, buf->size, buf->cpu, buf->dma);
drivers/media/pci/tw68/tw68.h
116
__le32 *cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c
116
struct rkvdec_h264_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c
390
h264_ctx->priv_tbl.cpu = priv_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c
408
h264_ctx->priv_tbl.cpu, h264_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c
418
struct rkvdec_h264_priv_tbl *tbl = h264_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c
146
struct rkvdec_hevc_priv_tbl *priv_tbl = hevc_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c
319
struct rkvdec_hevc_priv_tbl *priv_tbl = hevc_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c
543
hevc_ctx->priv_tbl.cpu = priv_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c
557
hevc_ctx->priv_tbl.cpu, hevc_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c
566
struct rkvdec_hevc_priv_tbl *tbl = hevc_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
112
void *cpu = NULL;
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
122
cpu = gen_pool_dma_zalloc_align(ctx->dev->sram_pool,
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
129
if (cpu && rkvdec->iommu_domain) {
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
130
unsigned long virt_addr = (unsigned long)cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
137
(unsigned long)cpu,
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
139
cpu = NULL;
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
152
if (!cpu) {
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
153
cpu = dma_alloc_coherent(ctx->dev->dev,
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
160
if (!cpu) {
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
165
cfg->rcb_bufs[i].cpu = cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
56
if (!cfg->rcb_bufs[i].cpu)
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
61
virt_addr = (unsigned long)cfg->rcb_bufs[i].cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-rcb.c
70
cfg->rcb_bufs[i].cpu,
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-h264.c
398
h264_ctx->priv_tbl.cpu = priv_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-h264.c
416
h264_ctx->priv_tbl.cpu, h264_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-h264.c
425
struct rkvdec_h264_priv_tbl *tbl = h264_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-h264.c
94
struct rkvdec_h264_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-hevc.c
130
struct rkvdec_hevc_priv_tbl *priv_tbl = hevc_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-hevc.c
566
hevc_ctx->priv_tbl.cpu = priv_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-hevc.c
584
hevc_ctx->priv_tbl.cpu, hevc_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu381-hevc.c
593
struct rkvdec_hevc_priv_tbl *tbl = hevc_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-h264.c
202
struct rkvdec_h264_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-h264.c
466
h264_ctx->priv_tbl.cpu = priv_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-h264.c
485
h264_ctx->priv_tbl.cpu, h264_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-h264.c
495
struct rkvdec_h264_priv_tbl *tbl = h264_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-hevc.c
226
struct rkvdec_hevc_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-hevc.c
574
hevc_ctx->priv_tbl.cpu = priv_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-hevc.c
592
hevc_ctx->priv_tbl.cpu, hevc_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-vdpu383-hevc.c
601
struct rkvdec_hevc_priv_tbl *tbl = hevc_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
1000
vp9_ctx->count_tbl.cpu = count_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
1007
vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
1020
vp9_ctx->count_tbl.cpu, vp9_ctx->count_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
1022
vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
194
struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
244
struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
316
struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
536
memset(vp9_ctx->count_tbl.cpu, 0, vp9_ctx->count_tbl.size);
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
876
inter_cnts = vp9_ctx->count_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
900
struct rkvdec_vp9_intra_frame_symbol_counts *intra_cnts = vp9_ctx->count_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
901
struct rkvdec_vp9_inter_frame_symbol_counts *inter_cnts = vp9_ctx->count_tbl.cpu;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
990
vp9_ctx->priv_tbl.cpu = priv_tbl;
drivers/media/platform/rockchip/rkvdec/rkvdec.h
173
void *cpu;
drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
87
hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
449
if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
450
memset(ctx->vp8_dec.segment_map.cpu, 0,
drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
17
u16 *p = (u16 *)((u8 *)ctx->hevc_dec.tile_sizes.cpu);
drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
547
u8 *p = ((u8 *)ctx->hevc_dec.scaling_lists.cpu);
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
320
tile_mem = misc->cpu + vp9_ctx->tile_info_offset;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
636
struct hantro_g2_all_probs *all_probs = misc->cpu;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
759
memset(segment_map->cpu, 0, segment_map->size);
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
960
hantro_cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
drivers/media/platform/verisilicon/hantro_h264.c
210
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
drivers/media/platform/verisilicon/hantro_h264.c
237
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
drivers/media/platform/verisilicon/hantro_h264.c
501
dma_free_coherent(vpu->dev, priv->size, priv->cpu, priv->dma);
drivers/media/platform/verisilicon/hantro_h264.c
511
priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
drivers/media/platform/verisilicon/hantro_h264.c
513
if (!priv->cpu)
drivers/media/platform/verisilicon/hantro_h264.c
517
tbl = priv->cpu;
drivers/media/platform/verisilicon/hantro_hevc.c
100
hevc_dec->tile_sao.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
102
hevc_dec->tile_sao.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
105
if (hevc_dec->tile_bsd.cpu) {
drivers/media/platform/verisilicon/hantro_hevc.c
107
hevc_dec->tile_bsd.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
109
hevc_dec->tile_bsd.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
113
hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/hantro_hevc.c
116
if (!hevc_dec->tile_filter.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
121
hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/hantro_hevc.c
124
if (!hevc_dec->tile_sao.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
129
hevc_dec->tile_bsd.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/hantro_hevc.c
132
if (!hevc_dec->tile_bsd.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
141
if (hevc_dec->tile_sao.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
143
hevc_dec->tile_sao.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
145
hevc_dec->tile_sao.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
148
if (hevc_dec->tile_filter.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
150
hevc_dec->tile_filter.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
152
hevc_dec->tile_filter.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
220
if (hevc_dec->tile_sizes.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
222
hevc_dec->tile_sizes.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
224
hevc_dec->tile_sizes.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
226
if (hevc_dec->scaling_lists.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
228
hevc_dec->scaling_lists.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
230
hevc_dec->scaling_lists.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
232
if (hevc_dec->tile_filter.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
234
hevc_dec->tile_filter.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
236
hevc_dec->tile_filter.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
238
if (hevc_dec->tile_sao.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
240
hevc_dec->tile_sao.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
242
hevc_dec->tile_sao.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
244
if (hevc_dec->tile_bsd.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
246
hevc_dec->tile_bsd.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
248
hevc_dec->tile_bsd.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
265
hevc_dec->tile_sizes.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/hantro_hevc.c
268
if (!hevc_dec->tile_sizes.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
273
hevc_dec->scaling_lists.cpu = dma_alloc_coherent(vpu->dev, SCALING_LIST_SIZE,
drivers/media/platform/verisilicon/hantro_hevc.c
276
if (!hevc_dec->scaling_lists.cpu)
drivers/media/platform/verisilicon/hantro_hevc.c
91
if (hevc_dec->tile_filter.cpu) {
drivers/media/platform/verisilicon/hantro_hevc.c
93
hevc_dec->tile_filter.cpu,
drivers/media/platform/verisilicon/hantro_hevc.c
95
hevc_dec->tile_filter.cpu = NULL;
drivers/media/platform/verisilicon/hantro_hevc.c
98
if (hevc_dec->tile_sao.cpu) {
drivers/media/platform/verisilicon/hantro_hw.h
66
void *cpu;
drivers/media/platform/verisilicon/hantro_mpeg2.c
43
ctx->mpeg2_dec.qtable.cpu =
drivers/media/platform/verisilicon/hantro_mpeg2.c
48
if (!ctx->mpeg2_dec.qtable.cpu)
drivers/media/platform/verisilicon/hantro_mpeg2.c
59
ctx->mpeg2_dec.qtable.cpu,
drivers/media/platform/verisilicon/hantro_postproc.c
191
if (priv->cpu) {
drivers/media/platform/verisilicon/hantro_postproc.c
192
dma_free_attrs(vpu->dev, priv->size, priv->cpu,
drivers/media/platform/verisilicon/hantro_postproc.c
194
priv->cpu = NULL;
drivers/media/platform/verisilicon/hantro_postproc.c
238
priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
drivers/media/platform/verisilicon/hantro_postproc.c
240
if (!priv->cpu)
drivers/media/platform/verisilicon/hantro_postproc.c
274
if (priv->size < buf_size && priv->cpu) {
drivers/media/platform/verisilicon/hantro_postproc.c
276
dma_free_attrs(vpu->dev, priv->size, priv->cpu,
drivers/media/platform/verisilicon/hantro_postproc.c
278
priv->cpu = NULL;
drivers/media/platform/verisilicon/hantro_postproc.c
281
if (!priv->cpu) {
drivers/media/platform/verisilicon/hantro_postproc.c
288
if (!priv->cpu)
drivers/media/platform/verisilicon/hantro_vp8.c
111
dst = ctx->vp8_dec.prob_tbl.cpu;
drivers/media/platform/verisilicon/hantro_vp8.c
126
dst = ctx->vp8_dec.prob_tbl.cpu;
drivers/media/platform/verisilicon/hantro_vp8.c
164
aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
drivers/media/platform/verisilicon/hantro_vp8.c
166
if (!aux_buf->cpu)
drivers/media/platform/verisilicon/hantro_vp8.c
175
aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
drivers/media/platform/verisilicon/hantro_vp8.c
177
if (!aux_buf->cpu) {
drivers/media/platform/verisilicon/hantro_vp8.c
186
ctx->vp8_dec.segment_map.cpu,
drivers/media/platform/verisilicon/hantro_vp8.c
198
vp8_dec->segment_map.cpu, vp8_dec->segment_map.dma);
drivers/media/platform/verisilicon/hantro_vp8.c
200
vp8_dec->prob_tbl.cpu, vp8_dec->prob_tbl.dma);
drivers/media/platform/verisilicon/hantro_vp8.c
57
dst = ctx->vp8_dec.prob_tbl.cpu;
drivers/media/platform/verisilicon/hantro_vp9.c
120
struct symbol_counts *cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
drivers/media/platform/verisilicon/hantro_vp9.c
185
tile_edge->cpu = dma_alloc_coherent(vpu->dev, size, &tile_edge->dma, GFP_KERNEL);
drivers/media/platform/verisilicon/hantro_vp9.c
186
if (!tile_edge->cpu)
drivers/media/platform/verisilicon/hantro_vp9.c
190
memset(tile_edge->cpu, 0, size);
drivers/media/platform/verisilicon/hantro_vp9.c
196
segment_map->cpu = dma_alloc_coherent(vpu->dev, size, &segment_map->dma, GFP_KERNEL);
drivers/media/platform/verisilicon/hantro_vp9.c
197
if (!segment_map->cpu)
drivers/media/platform/verisilicon/hantro_vp9.c
201
memset(segment_map->cpu, 0, size);
drivers/media/platform/verisilicon/hantro_vp9.c
209
misc->cpu = dma_alloc_coherent(vpu->dev, size, &misc->dma, GFP_KERNEL);
drivers/media/platform/verisilicon/hantro_vp9.c
210
if (!misc->cpu)
drivers/media/platform/verisilicon/hantro_vp9.c
214
memset(misc->cpu, 0, size);
drivers/media/platform/verisilicon/hantro_vp9.c
221
dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
drivers/media/platform/verisilicon/hantro_vp9.c
224
dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
drivers/media/platform/verisilicon/hantro_vp9.c
237
dma_free_coherent(vpu->dev, misc->size, misc->cpu, misc->dma);
drivers/media/platform/verisilicon/hantro_vp9.c
238
dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
drivers/media/platform/verisilicon/hantro_vp9.c
239
dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
89
hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
526
if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
527
memset(ctx->vp8_dec.segment_map.cpu, 0,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
1137
struct av1cdfs *out_cdfs = (struct av1cdfs *)av1_dec->prob_tbl_out.cpu;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
1188
memcpy(av1_dec->prob_tbl.cpu, av1_dec->cdfs, sizeof(struct av1cdfs));
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
1193
memcpy(av1_dec->prob_tbl.cpu + mv_offset, av1_dec->cdfs_ndvc,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
1236
struct rockchip_av1_film_grain *fgmem = av1_dec->film_grain.cpu;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
234
if (av1_dec->db_data_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
236
av1_dec->db_data_col.cpu,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
238
av1_dec->db_data_col.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
240
if (av1_dec->db_ctrl_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
242
av1_dec->db_ctrl_col.cpu,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
244
av1_dec->db_ctrl_col.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
246
if (av1_dec->cdef_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
248
av1_dec->cdef_col.cpu, av1_dec->cdef_col.dma);
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
249
av1_dec->cdef_col.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
251
if (av1_dec->sr_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
253
av1_dec->sr_col.cpu, av1_dec->sr_col.dma);
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
254
av1_dec->sr_col.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
256
if (av1_dec->lr_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
258
av1_dec->lr_col.cpu, av1_dec->lr_col.dma);
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
259
av1_dec->lr_col.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
281
av1_dec->db_data_col.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
284
if (!av1_dec->db_data_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
289
av1_dec->db_ctrl_col.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
292
if (!av1_dec->db_ctrl_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
297
av1_dec->cdef_col.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
300
if (!av1_dec->cdef_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
305
av1_dec->sr_col.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
308
if (!av1_dec->sr_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
313
av1_dec->lr_col.cpu = dma_alloc_coherent(vpu->dev, size,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
316
if (!av1_dec->lr_col.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
333
if (av1_dec->global_model.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
335
av1_dec->global_model.cpu,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
337
av1_dec->global_model.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
339
if (av1_dec->tile_info.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
341
av1_dec->tile_info.cpu,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
343
av1_dec->tile_info.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
345
if (av1_dec->film_grain.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
347
av1_dec->film_grain.cpu,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
349
av1_dec->film_grain.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
351
if (av1_dec->prob_tbl.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
353
av1_dec->prob_tbl.cpu, av1_dec->prob_tbl.dma);
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
354
av1_dec->prob_tbl.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
356
if (av1_dec->prob_tbl_out.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
358
av1_dec->prob_tbl_out.cpu,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
360
av1_dec->prob_tbl_out.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
362
if (av1_dec->tile_buf.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
364
av1_dec->tile_buf.cpu, av1_dec->tile_buf.dma);
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
365
av1_dec->tile_buf.cpu = NULL;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
377
av1_dec->global_model.cpu = dma_alloc_coherent(vpu->dev, GLOBAL_MODEL_SIZE,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
380
if (!av1_dec->global_model.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
384
av1_dec->tile_info.cpu = dma_alloc_coherent(vpu->dev, AV1_TILE_INFO_SIZE,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
387
if (!av1_dec->tile_info.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
391
av1_dec->film_grain.cpu = dma_alloc_coherent(vpu->dev,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
395
if (!av1_dec->film_grain.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
399
av1_dec->prob_tbl.cpu = dma_alloc_coherent(vpu->dev,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
403
if (!av1_dec->prob_tbl.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
407
av1_dec->prob_tbl_out.cpu = dma_alloc_coherent(vpu->dev,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
411
if (!av1_dec->prob_tbl_out.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
419
av1_dec->tile_buf.cpu = dma_alloc_coherent(vpu->dev,
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
423
if (!av1_dec->tile_buf.cpu)
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
523
u8 *dst = av1_dec->global_model.cpu;
drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
587
u8 *dst = av1_dec->tile_info.cpu;
drivers/misc/lkdtm/bugs.c
348
unsigned int cpu, target;
drivers/misc/lkdtm/bugs.c
352
cpu = get_cpu();
drivers/misc/lkdtm/bugs.c
353
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/misc/sgi-gru/grufile.c
311
static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
drivers/misc/sgi-gru/grufile.c
322
core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
drivers/misc/sgi-gru/grufile.c
323
if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
drivers/misc/sgi-gru/grufile.c
341
irq_handler_t irq_handler, int cpu, int blade)
drivers/misc/sgi-gru/grufile.c
347
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
drivers/misc/sgi-gru/grufile.c
351
irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
drivers/misc/sgi-gru/grufile.c
369
static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
drivers/misc/sgi-gru/grufile.c
374
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
drivers/misc/sgi-gru/grufile.c
387
int cpu;
drivers/misc/sgi-gru/grufile.c
389
for_each_online_cpu(cpu) {
drivers/misc/sgi-gru/grufile.c
390
blade = uv_cpu_to_blade_id(cpu);
drivers/misc/sgi-gru/grufile.c
391
gru_chiplet_teardown_tlb_irq(0, cpu, blade);
drivers/misc/sgi-gru/grufile.c
392
gru_chiplet_teardown_tlb_irq(1, cpu, blade);
drivers/misc/sgi-gru/grufile.c
405
int cpu;
drivers/misc/sgi-gru/grufile.c
408
for_each_online_cpu(cpu) {
drivers/misc/sgi-gru/grufile.c
409
blade = uv_cpu_to_blade_id(cpu);
drivers/misc/sgi-gru/grufile.c
410
ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
drivers/misc/sgi-gru/grufile.c
414
ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
drivers/misc/sgi-gru/grumain.c
44
int cpu = smp_processor_id();
drivers/misc/sgi-gru/grumain.c
47
core = uv_cpu_core_number(cpu);
drivers/misc/sgi-gru/grumain.c
48
id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
drivers/misc/sgi-xp/xpc_uv.c
138
xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
drivers/misc/sgi-xp/xpc_uv.c
1654
int cpu;
drivers/misc/sgi-xp/xpc_uv.c
1658
for_each_cpu(cpu, cpumask_of_node(nid)) {
drivers/misc/sgi-xp/xpc_uv.c
1671
for_each_cpu(cpu, cpumask_of_node(nid)) {
drivers/misc/sgi-xp/xpc_uv.c
170
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
drivers/misc/sgi-xp/xpc_uv.c
172
nid = cpu_to_node(cpu);
drivers/misc/sgi-xp/xpc_uv.c
189
ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
drivers/misc/sgi-xp/xpc_uv.c
200
nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
drivers/misc/sgi-xp/xpc_uv.c
90
xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
drivers/misc/sgi-xp/xpc_uv.c
94
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
drivers/misc/vcpu_stall_detector.c
103
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_LOAD_CNT, ticks);
drivers/misc/vcpu_stall_detector.c
106
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_STATUS, 1);
drivers/misc/vcpu_stall_detector.c
123
static int stop_stall_detector_cpu(unsigned int cpu)
drivers/misc/vcpu_stall_detector.c
126
per_cpu_ptr(vcpu_stall_detectors, cpu);
drivers/misc/vcpu_stall_detector.c
133
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_STATUS, 0);
drivers/misc/vcpu_stall_detector.c
214
int cpu;
drivers/misc/vcpu_stall_detector.c
222
for_each_possible_cpu(cpu)
drivers/misc/vcpu_stall_detector.c
223
stop_stall_detector_cpu(cpu);
drivers/misc/vcpu_stall_detector.c
87
static int start_stall_detector_cpu(unsigned int cpu)
drivers/misc/vcpu_stall_detector.c
94
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_CLOCK_FREQ_HZ,
drivers/net/dsa/ocelot/felix.c
657
int cpu = felix_cpu_port_for_conduit(ds, conduit);
drivers/net/dsa/ocelot/felix.c
661
ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
drivers/net/dsa/realtek/rtl8365mb.c
1838
struct rtl8365mb_cpu *cpu = &mb->cpu;
drivers/net/dsa/realtek/rtl8365mb.c
1845
cpu->mask));
drivers/net/dsa/realtek/rtl8365mb.c
1849
val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
drivers/net/dsa/realtek/rtl8365mb.c
1850
FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
drivers/net/dsa/realtek/rtl8365mb.c
1851
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
drivers/net/dsa/realtek/rtl8365mb.c
1852
FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
drivers/net/dsa/realtek/rtl8365mb.c
1853
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
drivers/net/dsa/realtek/rtl8365mb.c
1854
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
drivers/net/dsa/realtek/rtl8365mb.c
1856
cpu->trap_port >> 3 & 0x1);
drivers/net/dsa/realtek/rtl8365mb.c
1868
struct rtl8365mb_cpu *cpu;
drivers/net/dsa/realtek/rtl8365mb.c
1872
cpu = &mb->cpu;
drivers/net/dsa/realtek/rtl8365mb.c
1876
cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
drivers/net/dsa/realtek/rtl8365mb.c
1877
cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
drivers/net/dsa/realtek/rtl8365mb.c
1880
cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
drivers/net/dsa/realtek/rtl8365mb.c
1881
cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
drivers/net/dsa/realtek/rtl8365mb.c
1944
struct rtl8365mb_cpu *cpu;
drivers/net/dsa/realtek/rtl8365mb.c
1951
cpu = &mb->cpu;
drivers/net/dsa/realtek/rtl8365mb.c
1975
cpu->mask |= BIT(cpu_dp->index);
drivers/net/dsa/realtek/rtl8365mb.c
1977
if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
drivers/net/dsa/realtek/rtl8365mb.c
1978
cpu->trap_port = cpu_dp->index;
drivers/net/dsa/realtek/rtl8365mb.c
1980
cpu->enable = cpu->mask > 0;
drivers/net/dsa/realtek/rtl8365mb.c
1993
ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
drivers/net/dsa/realtek/rtl8365mb.c
2013
ret = rtl8365mb_port_change_mtu(ds, cpu->trap_port, ETH_DATA_LEN);
drivers/net/dsa/realtek/rtl8365mb.c
2105
mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
drivers/net/dsa/realtek/rtl8365mb.c
2106
mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
drivers/net/dsa/realtek/rtl8365mb.c
2107
mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
drivers/net/dsa/realtek/rtl8365mb.c
2108
mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
drivers/net/dsa/realtek/rtl8365mb.c
2109
mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
drivers/net/dsa/realtek/rtl8365mb.c
645
struct rtl8365mb_cpu cpu;
drivers/net/dsa/realtek/rtl8365mb.c
856
struct rtl8365mb_cpu *cpu;
drivers/net/dsa/realtek/rtl8365mb.c
860
cpu = &mb->cpu;
drivers/net/dsa/realtek/rtl8365mb.c
862
if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
drivers/net/dsa/sja1105/sja1105_flower.c
379
int cpu = dsa_upstream_port(ds, port);
drivers/net/dsa/sja1105/sja1105_flower.c
385
&key, BIT(cpu), true);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1404
int cpu = get_cpu();
drivers/net/ethernet/amazon/ena/ena_netdev.c
1408
if (likely(tx_ring->cpu == cpu))
drivers/net/ethernet/amazon/ena/ena_netdev.c
1411
tx_ring->cpu = cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1413
rx_ring->cpu = cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1415
numa_node = cpu_to_node(cpu);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1588
u32 cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1598
cpu = cpumask_first(cpu_online_mask);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1599
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1600
cpumask_set_cpu(cpu,
drivers/net/ethernet/amazon/ena/ena_netdev.c
1607
int irq_idx, i, cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1615
cpu = i % num_online_cpus();
drivers/net/ethernet/amazon/ena/ena_netdev.c
1623
adapter->irq_tbl[irq_idx].cpu = cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1625
cpumask_set_cpu(cpu,
drivers/net/ethernet/amazon/ena/ena_netdev.c
177
ring->cpu = 0;
drivers/net/ethernet/amazon/ena/ena_netdev.c
247
node = cpu_to_node(ena_irq->cpu);
drivers/net/ethernet/amazon/ena/ena_netdev.c
281
tx_ring->cpu = ena_irq->cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.c
384
node = cpu_to_node(ena_irq->cpu);
drivers/net/ethernet/amazon/ena/ena_netdev.c
413
rx_ring->cpu = ena_irq->cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.h
119
int cpu;
drivers/net/ethernet/amazon/ena/ena_netdev.h
264
int cpu;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
108
cpumask_set_cpu(cpu, &channel->affinity_mask);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
136
"%s: cpu=%u, node=%d\n", channel->name, cpu, node);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
86
unsigned int cpu;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
92
cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
95
node = cpu_to_node(cpu);
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
832
int cpu = smp_processor_id();
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
846
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
988
aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
990
cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
368
int cpu = smp_processor_id();
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
373
vec = cpu % aq_cfg->vecs;
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
159
unsigned int cpu;
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
113
self->aq_ring_param.cpu =
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
116
cpumask_set_cpu(self->aq_ring_param.cpu,
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
547
hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
579
hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
783
hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
814
hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
drivers/net/ethernet/asix/ax88796c_main.c
655
int cpu;
drivers/net/ethernet/asix/ax88796c_main.c
657
for_each_possible_cpu(cpu) {
drivers/net/ethernet/asix/ax88796c_main.c
662
s = per_cpu_ptr(ax_local->stats, cpu);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
626
int i, rc = 0, agg_rings = 0, cpu;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
639
cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
640
cpu_node = cpu_to_node(cpu);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
12949
unsigned int q_idx, map_idx, cpu, i;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
12965
cpu = cpumask_local_spread(i, numa_node);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
12966
cpu_mask_ptr = get_cpu_mask(cpu);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3918
int i, rc = 0, agg_rings = 0, cpu;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3933
cpu = cpumask_local_spread(i, numa_node);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3934
cpu_node = cpu_to_node(cpu);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
326
int sqs, cpu;
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
338
for_each_possible_cpu(cpu)
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
339
tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1096
int vec, cpu;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1107
cpu = nicvf_netdev_qidx(nic, vec) + 1;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1109
cpu = 0;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1111
cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1451
int cpu, err, qidx;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1521
for_each_possible_cpu(cpu)
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1522
memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1652
int qidx, cpu;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1691
for_each_possible_cpu(cpu) {
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1692
drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
drivers/net/ethernet/chelsio/cxgb/sge.c
980
int cpu;
drivers/net/ethernet/chelsio/cxgb/sge.c
983
for_each_possible_cpu(cpu) {
drivers/net/ethernet/chelsio/cxgb/sge.c
984
struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
122
unsigned int cpu;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
128
cpu = get_cpu();
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
129
pool = per_cpu_ptr(ppm->pool, cpu);
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
148
__func__, cpu, i, count, i + cpu * ppm->pool_index_max,
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
151
i += cpu * ppm->pool_index_max;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
196
unsigned int cpu;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
199
cpu = i / ppm->pool_index_max;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
202
pool = per_cpu_ptr(ppm->pool, cpu);
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
211
__func__, cpu, i, pool->next);
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
354
unsigned int cpu;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
373
for_each_possible_cpu(cpu) {
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
374
struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
drivers/net/ethernet/cisco/enic/enic_main.c
2045
(*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
drivers/net/ethernet/cisco/enic/vnic_rss.h
29
} cpu[32];
drivers/net/ethernet/engleder/tsnep_main.c
2304
static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
drivers/net/ethernet/engleder/tsnep_main.c
2306
if (cpu >= TSNEP_MAX_QUEUES)
drivers/net/ethernet/engleder/tsnep_main.c
2307
cpu &= TSNEP_MAX_QUEUES - 1;
drivers/net/ethernet/engleder/tsnep_main.c
2309
while (cpu >= adapter->num_tx_queues)
drivers/net/ethernet/engleder/tsnep_main.c
2310
cpu -= adapter->num_tx_queues;
drivers/net/ethernet/engleder/tsnep_main.c
2312
return &adapter->tx[cpu];
drivers/net/ethernet/engleder/tsnep_main.c
2319
u32 cpu = smp_processor_id();
drivers/net/ethernet/engleder/tsnep_main.c
2328
tx = tsnep_xdp_get_tx(adapter, cpu);
drivers/net/ethernet/engleder/tsnep_main.c
2331
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3168
int cpu;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3170
for_each_possible_cpu(cpu) {
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3171
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3183
int cpu;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3185
for_each_possible_cpu(cpu) {
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3186
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
817
int cpu;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
819
for_each_cpu_and(cpu, cpus, cpu_online_mask) {
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
820
portal = qman_get_affine_portal(cpu);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
956
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
965
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
966
channels[num_portals++] = qman_affine_channel(cpu);
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
422
int cpu, res;
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
437
for_each_cpu_and(cpu, cpus, cpu_online_mask) {
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
438
portal = qman_get_affine_portal(cpu);
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
447
needs_revert[cpu] = true;
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
456
for_each_cpu_and(cpu, cpus, cpu_online_mask) {
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
457
if (!needs_revert[cpu])
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
459
portal = qman_get_affine_portal(cpu);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3290
int cpu)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3296
if (priv->channel[i]->nctx.desired_cpu == cpu)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3302
dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
drivers/net/ethernet/freescale/fec_main.c
1488
int cpu = smp_processor_id();
drivers/net/ethernet/freescale/fec_main.c
1498
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/freescale/fec_main.c
2026
int cpu = smp_processor_id();
drivers/net/ethernet/freescale/fec_main.c
2047
tx_qid = fec_enet_xdp_get_tx_queue(fep, cpu);
drivers/net/ethernet/freescale/fec_main.c
2120
err = fec_enet_xdp_tx_xmit(fep, cpu, &xdp, sync, tx_qid);
drivers/net/ethernet/freescale/fec_main.c
2203
struct xdp_buff *xsk, int cpu,
drivers/net/ethernet/freescale/fec_main.c
2215
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/freescale/fec_main.c
2277
int cpu = smp_processor_id();
drivers/net/ethernet/freescale/fec_main.c
2369
err = fec_enet_xsk_tx_xmit(fep, xsk, cpu, queue);
drivers/net/ethernet/freescale/fec_main.c
4763
int cpu, struct xdp_buff *xdp,
drivers/net/ethernet/freescale/fec_main.c
4770
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/freescale/fec_main.c
4788
int cpu = smp_processor_id();
drivers/net/ethernet/freescale/fec_main.c
4794
queue = fec_enet_xdp_get_tx_queue(fep, cpu);
drivers/net/ethernet/freescale/fec_main.c
4798
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/freescale/fec_main.c
82
int cpu, struct xdp_buff *xdp,
drivers/net/ethernet/fungible/funcore/fun_dev.c
179
int cpu;
drivers/net/ethernet/fungible/funcore/fun_dev.c
194
if (cmd_ctx->cpu < 0) {
drivers/net/ethernet/fungible/funcore/fun_dev.c
204
cpu = cmd_ctx->cpu;
drivers/net/ethernet/fungible/funcore/fun_dev.c
205
cmd_ctx->cpu = -1;
drivers/net/ethernet/fungible/funcore/fun_dev.c
206
sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
drivers/net/ethernet/fungible/funcore/fun_dev.c
218
fdev->cmd_ctx[i].cpu = -1;
drivers/net/ethernet/fungible/funcore/fun_dev.c
31
int cpu; /* CPU where the cmd's tag was allocated */
drivers/net/ethernet/fungible/funcore/fun_dev.c
369
int tag, cpu, rc = 0;
drivers/net/ethernet/fungible/funcore/fun_dev.c
374
tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
drivers/net/ethernet/fungible/funcore/fun_dev.c
378
tag = fun_wait_for_tag(fdev, &cpu);
drivers/net/ethernet/fungible/funcore/fun_dev.c
393
sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
drivers/net/ethernet/fungible/funcore/fun_dev.c
395
cmd_ctx->cpu = cpu;
drivers/net/ethernet/fungible/funeth/funeth_main.c
253
int cpu, res;
drivers/net/ethernet/fungible/funeth/funeth_main.c
255
cpu = cpumask_local_spread(idx, node);
drivers/net/ethernet/fungible/funeth/funeth_main.c
256
node = cpu_to_mem(cpu);
drivers/net/ethernet/fungible/funeth/funeth_main.c
271
cpumask_set_cpu(cpu, &irq->affinity_mask);
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1259
int cpu;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1267
cpu = ring_idx;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1269
cpu = ring_idx - q_num;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1272
cpu = ring_idx * 2;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1274
cpu = (ring_idx - q_num) * 2 + 1;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1278
cpumask_set_cpu(cpu, mask);
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1280
return cpu;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1305
int cpu;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1328
cpu = hns_nic_init_affinity_mask(h->q_num, i,
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1331
if (cpu_online(cpu))
drivers/net/ethernet/ibm/ibmvnic.c
217
unsigned int *cpu, int *stragglers,
drivers/net/ethernet/ibm/ibmvnic.c
237
for_each_online_cpu_wrap(i, *cpu) {
drivers/net/ethernet/ibm/ibmvnic.c
242
*cpu = i;
drivers/net/ethernet/ibm/ibmvnic.c
265
unsigned int num_cpu, cpu = 0;
drivers/net/ethernet/ibm/ibmvnic.c
296
rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
drivers/net/ethernet/ibm/ibmvnic.c
321
static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/ibm/ibmvnic.c
330
static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/ibm/ibmvnic.c
339
static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/intel/e1000/e1000.h
182
int cpu;
drivers/net/ethernet/intel/i40e/i40e_main.c
3401
int cpu;
drivers/net/ethernet/intel/i40e/i40e_main.c
3410
cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
drivers/net/ethernet/intel/i40e/i40e_main.c
3411
netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
drivers/net/ethernet/intel/i40e/i40e_main.c
4135
int cpu;
drivers/net/ethernet/intel/i40e/i40e_main.c
4178
cpu = cpumask_local_spread(q_vector->v_idx, -1);
drivers/net/ethernet/intel/i40e/i40e_main.c
4179
irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
drivers/net/ethernet/intel/idpf/idpf_txrx.h
982
u32 cpu;
drivers/net/ethernet/intel/idpf/idpf_txrx.h
987
cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
drivers/net/ethernet/intel/idpf/idpf_txrx.h
989
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
drivers/net/ethernet/intel/igb/igb.h
370
int cpu; /* CPU for DCA */
drivers/net/ethernet/intel/igb/igb_main.c
2960
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igb/igb_main.c
2977
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igb/igb_main.c
2990
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igb/igb_main.c
3014
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igb/igb_main.c
7169
int cpu)
drivers/net/ethernet/intel/igb/igb_main.c
7172
u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
drivers/net/ethernet/intel/igb/igb_main.c
7190
int cpu)
drivers/net/ethernet/intel/igb/igb_main.c
7193
u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
drivers/net/ethernet/intel/igb/igb_main.c
7211
int cpu = get_cpu();
drivers/net/ethernet/intel/igb/igb_main.c
7213
if (q_vector->cpu == cpu)
drivers/net/ethernet/intel/igb/igb_main.c
7217
igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
drivers/net/ethernet/intel/igb/igb_main.c
7220
igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
drivers/net/ethernet/intel/igb/igb_main.c
7222
q_vector->cpu = cpu;
drivers/net/ethernet/intel/igb/igb_main.c
7239
adapter->q_vector[i]->cpu = -1;
drivers/net/ethernet/intel/igb/igb_main.c
8338
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igb/igb_main.c
8448
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igb/igb_main.c
8987
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igb/igb_main.c
8997
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igc/igc.h
771
struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu);
drivers/net/ethernet/intel/igc/igc_main.c
2478
struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu)
drivers/net/ethernet/intel/igc/igc_main.c
2480
int index = cpu;
drivers/net/ethernet/intel/igc/igc_main.c
2494
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igc/igc_main.c
2502
ring = igc_get_tx_ring(adapter, cpu);
drivers/net/ethernet/intel/igc/igc_main.c
2505
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igc/igc_main.c
2574
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igc/igc_main.c
2579
ring = igc_get_tx_ring(adapter, cpu);
drivers/net/ethernet/intel/igc/igc_main.c
2582
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igc/igc_main.c
3042
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igc/igc_main.c
3049
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igc/igc_main.c
6875
int cpu = smp_processor_id();
drivers/net/ethernet/intel/igc/igc_main.c
6886
ring = igc_get_tx_ring(adapter, cpu);
drivers/net/ethernet/intel/igc/igc_main.c
6889
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igc/igc_tsn.c
100
ring = igc_get_tx_ring(adapter, cpu);
drivers/net/ethernet/intel/igc/igc_tsn.c
109
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/intel/igc/igc_tsn.c
94
int cpu = smp_processor_id();
drivers/net/ethernet/intel/ixgbe/ixgbe.h
505
int cpu; /* CPU for DCA */
drivers/net/ethernet/intel/ixgbe/ixgbe.h
860
static inline int ixgbe_determine_xdp_q_idx(int cpu)
drivers/net/ethernet/intel/ixgbe/ixgbe.h
863
return cpu % IXGBE_MAX_XDP_QS;
drivers/net/ethernet/intel/ixgbe/ixgbe.h
865
return cpu;
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
598
static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
602
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
609
unsigned int cpu)
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
615
snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
622
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
716
int cpu, i, ddp_max;
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
730
for_each_possible_cpu(cpu)
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
731
ixgbe_fcoe_dma_pool_free(fcoe, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
757
unsigned int cpu;
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
779
for_each_possible_cpu(cpu) {
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
780
int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
784
e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
841
int cpu = -1;
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
851
cpu = cpumask_local_spread(v_idx, node);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
852
node = cpu_to_node(cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
865
if (cpu != -1)
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
866
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
871
q_vector->cpu = -1;
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1484
int cpu)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1491
txctrl = dca3_get_tag(tx_ring->dev, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1521
int cpu)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1528
rxctrl = dca3_get_tag(rx_ring->dev, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1555
int cpu = get_cpu();
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1557
if (q_vector->cpu == cpu)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1561
ixgbe_update_tx_dca(adapter, ring, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1564
ixgbe_update_rx_dca(adapter, ring, cpu);
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1566
q_vector->cpu = cpu;
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1584
adapter->q_vector[i]->cpu = -1;
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
7829
unsigned int cpu;
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
7831
for_each_possible_cpu(cpu) {
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
7832
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
drivers/net/ethernet/marvell/mvneta.c
131
#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
drivers/net/ethernet/marvell/mvneta.c
1476
int cpu;
drivers/net/ethernet/marvell/mvneta.c
1496
for_each_present_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
1501
if ((rxq % max_cpu) == cpu)
drivers/net/ethernet/marvell/mvneta.c
1505
if ((txq % max_cpu) == cpu)
drivers/net/ethernet/marvell/mvneta.c
1513
txq_map = (cpu == pp->rxq_def) ?
drivers/net/ethernet/marvell/mvneta.c
1521
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
drivers/net/ethernet/marvell/mvneta.c
2179
int cpu, nxmit_byte = 0;
drivers/net/ethernet/marvell/mvneta.c
2187
cpu = smp_processor_id();
drivers/net/ethernet/marvell/mvneta.c
2188
txq = &pp->txqs[cpu % txq_number];
drivers/net/ethernet/marvell/mvneta.c
2191
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/marvell/mvneta.c
2218
int cpu = smp_processor_id();
drivers/net/ethernet/marvell/mvneta.c
2229
txq = &pp->txqs[cpu % txq_number];
drivers/net/ethernet/marvell/mvneta.c
2232
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/marvell/mvneta.c
3036
int cpu = smp_processor_id();
drivers/net/ethernet/marvell/mvneta.c
3042
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/marvell/mvneta.c
3537
int cpu, err;
drivers/net/ethernet/marvell/mvneta.c
3568
cpu = 0;
drivers/net/ethernet/marvell/mvneta.c
3570
cpu = txq->id % num_present_cpus();
drivers/net/ethernet/marvell/mvneta.c
3572
cpu = pp->rxq_def % num_present_cpus();
drivers/net/ethernet/marvell/mvneta.c
3573
cpumask_set_cpu(cpu, &txq->affinity_mask);
drivers/net/ethernet/marvell/mvneta.c
3757
int cpu;
drivers/net/ethernet/marvell/mvneta.c
3769
for_each_online_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
3771
per_cpu_ptr(pp->ports, cpu);
drivers/net/ethernet/marvell/mvneta.c
3798
unsigned int cpu;
drivers/net/ethernet/marvell/mvneta.c
3808
for_each_online_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
3810
per_cpu_ptr(pp->ports, cpu);
drivers/net/ethernet/marvell/mvneta.c
4380
int elected_cpu = 0, max_cpu, cpu;
drivers/net/ethernet/marvell/mvneta.c
4390
for_each_online_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
4395
if ((rxq % max_cpu) == cpu)
drivers/net/ethernet/marvell/mvneta.c
4398
if (cpu == elected_cpu)
drivers/net/ethernet/marvell/mvneta.c
4407
txq_map = (cpu == elected_cpu) ?
drivers/net/ethernet/marvell/mvneta.c
4410
txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
drivers/net/ethernet/marvell/mvneta.c
4413
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
drivers/net/ethernet/marvell/mvneta.c
4418
smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
drivers/net/ethernet/marvell/mvneta.c
4423
static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/marvell/mvneta.c
4428
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
drivers/net/ethernet/marvell/mvneta.c
4454
if (other_cpu != cpu) {
drivers/net/ethernet/marvell/mvneta.c
4490
static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/marvell/mvneta.c
4494
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
drivers/net/ethernet/marvell/mvneta.c
4512
static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/net/ethernet/marvell/mvneta.c
4863
int cpu;
drivers/net/ethernet/marvell/mvneta.c
4865
for_each_possible_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
4877
stats = per_cpu_ptr(pp->stats, cpu);
drivers/net/ethernet/marvell/mvneta.c
5022
int cpu;
drivers/net/ethernet/marvell/mvneta.c
5031
for_each_online_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
5033
per_cpu_ptr(pp->ports, cpu);
drivers/net/ethernet/marvell/mvneta.c
5059
for_each_online_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
5061
per_cpu_ptr(pp->ports, cpu);
drivers/net/ethernet/marvell/mvneta.c
5493
int cpu;
drivers/net/ethernet/marvell/mvneta.c
5711
for_each_present_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
5713
per_cpu_ptr(pp->ports, cpu);
drivers/net/ethernet/marvell/mvneta.c
809
int cpu;
drivers/net/ethernet/marvell/mvneta.c
811
for_each_possible_cpu(cpu) {
drivers/net/ethernet/marvell/mvneta.c
820
cpu_stats = per_cpu_ptr(pp->stats, cpu);
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
215
#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
217
#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
219
#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
221
#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1458
int nrxqs, cpu, cpus = num_possible_cpus();
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1464
cpu = rxq / nrxqs;
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1466
if (!cpu_online(cpu))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1430
int cpu = smp_processor_id();
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1434
if (cpu > port->priv->nthreads)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1437
thread = mvpp2_cpu_to_thread(port->priv, cpu);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1452
int cpu = smp_processor_id();
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1456
if (cpu >= port->priv->nthreads)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1459
thread = mvpp2_cpu_to_thread(port->priv, cpu);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2020
unsigned int cpu;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2023
for_each_possible_cpu(cpu) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2033
cpu_stats = per_cpu_ptr(port->stats, cpu);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4729
unsigned int cpu;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4731
for_each_present_cpu(cpu) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4732
if (mvpp2_cpu_to_thread(port->priv, cpu) ==
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4734
cpumask_set_cpu(cpu, qv->mask);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5147
unsigned int cpu;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5149
for_each_possible_cpu(cpu) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5156
cpu_stats = per_cpu_ptr(port->stats, cpu);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
89
static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
91
return cpu % priv->nthreads;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
48
int err, cpu;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
78
for_each_possible_cpu(cpu) {
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
79
lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
81
(cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
82
lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1948
int vec, cpu, irq, cint;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1951
cpu = cpumask_first(cpu_online_mask);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1958
cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1963
cpu = cpumask_next(cpu, cpu_online_mask);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1964
if (unlikely(cpu >= nr_cpu_ids))
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1965
cpu = 0;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2455
u32 cpu, dma;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2457
cpu = ring->last_free_ptr;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2460
desc = mtk_qdma_phys_to_virt(ring, cpu);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2463
while ((cpu != dma) && budget) {
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2487
cpu = next_cpu;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2491
ring->last_free_ptr = cpu;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2492
mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2504
u32 cpu, dma;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2506
cpu = ring->cpu_idx;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2510
while ((cpu != dma) && budget) {
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2511
tx_buf = &ring->buf[cpu];
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2522
desc = ring->dma + cpu * eth->soc->tx.desc_size;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2526
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2530
ring->cpu_idx = cpu;
drivers/net/ethernet/mellanox/mlx5/core/en.h
795
int cpu;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
613
.node = cpu_to_node(c->cpu),
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
134
int cpu = mlx5_comp_vector_get_cpu(priv->mdev, 0);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
139
t = kvzalloc_node(sizeof(*t), GFP_KERNEL, cpu_to_node(cpu));
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
105
err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), q_counter, xskrq);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
106
dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu,
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
133
dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu,
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
66
void (*work_fun)(struct work_struct *), int cpu,
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
73
dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1509
param->wq.db_numa_node = cpu_to_node(c->cpu);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1515
err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1583
param->wq.db_numa_node = cpu_to_node(c->cpu);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1589
err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1677
param->wq.db_numa_node = cpu_to_node(c->cpu);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1683
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2104
xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2519
return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2533
cpu_to_node(c->cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2696
static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2712
GFP_KERNEL, cpu_to_node(cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2778
int cpu;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2783
cpu = mlx5_comp_vector_get_cpu(mdev, vec_ix);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2789
err = mlx5e_channel_stats_alloc(priv, ix, cpu);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2793
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2809
c->cpu = cpu;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3163
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3165
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1122
int cpu;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1126
cpu = cpumask_first(mask);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1128
cpu = mlx5_cpumask_default_spread(dev, vector);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1130
return cpu;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
846
int cpu;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
849
cpu = mlx5_cpumask_default_spread(dev, vecidx);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
850
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
861
int cpu;
drivers/net/ethernet/mellanox/mlx5/core/eq.c
867
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
drivers/net/ethernet/mellanox/mlx5/core/eq.c
868
cpumask_clear_cpu(cpu, &table->used_cpus);
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
10
pool->irqs_per_cpu[cpu]--;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
13
static void cpu_get(struct mlx5_irq_pool *pool, int cpu)
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
15
pool->irqs_per_cpu[cpu]++;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
190
int cpu;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
192
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
200
cpu_put(pool, cpu);
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
23
int cpu;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
25
for_each_cpu_and(cpu, req_mask, cpu_online_mask) {
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
27
if (!pool->irqs_per_cpu[cpu]) {
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
28
best_cpu = cpu;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
32
best_cpu = cpu;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
33
if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu])
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
34
best_cpu = cpu;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
8
static void cpu_put(struct mlx5_irq_pool *pool, int cpu)
drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
35
struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
550
struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
568
cpumask_set_cpu(cpu, &af_desc->mask);
drivers/net/ethernet/microsoft/mana/gdma_main.c
1589
int cpu, weight;
drivers/net/ethernet/microsoft/mana/gdma_main.c
1599
for_each_cpu(cpu, cpus) {
drivers/net/ethernet/microsoft/mana/gdma_main.c
1600
cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
drivers/net/ethernet/microsoft/mana/gdma_main.c
1611
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
drivers/net/ethernet/microsoft/mana/gdma_main.c
1707
unsigned int cpu;
drivers/net/ethernet/microsoft/mana/gdma_main.c
1757
cpu = cpumask_local_spread(0, gc->numa_node);
drivers/net/ethernet/microsoft/mana/gdma_main.c
1758
irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
drivers/net/ethernet/mscc/ocelot.c
1766
struct ocelot_port *cpu)
drivers/net/ethernet/mscc/ocelot.c
1777
if (ocelot_port->dsa_8021q_cpu == cpu)
drivers/net/ethernet/mscc/ocelot.c
1781
if (cpu->bond)
drivers/net/ethernet/mscc/ocelot.c
1782
mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond);
drivers/net/ethernet/mscc/ocelot.c
1924
void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
drivers/net/ethernet/mscc/ocelot.c
1926
struct ocelot_port *cpu_port = ocelot->ports[cpu];
drivers/net/ethernet/mscc/ocelot.c
1934
ocelot_vlan_member_add(ocelot, cpu, vid, true);
drivers/net/ethernet/mscc/ocelot.c
1942
void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
drivers/net/ethernet/mscc/ocelot.c
1944
struct ocelot_port *cpu_port = ocelot->ports[cpu];
drivers/net/ethernet/mscc/ocelot.c
1961
int cpu)
drivers/net/ethernet/mscc/ocelot.c
1963
struct ocelot_port *cpu_port = ocelot->ports[cpu];
drivers/net/ethernet/mscc/ocelot.c
3080
int cpu = ocelot->num_phys_ports;
drivers/net/ethernet/mscc/ocelot.c
3083
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
drivers/net/ethernet/mscc/ocelot.c
3088
ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
drivers/net/ethernet/mscc/ocelot.c
3090
ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
drivers/net/ethernet/mscc/ocelot.c
3091
ANA_PORT_PORT_CFG, cpu);
drivers/net/ethernet/mscc/ocelot.c
3094
ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
drivers/net/ethernet/mscc/ocelot.c
3096
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
drivers/net/ethernet/mscc/ocelot.c
3098
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
drivers/net/ethernet/mscc/ocelot.c
3106
ANA_PORT_VLAN_CFG, cpu);
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
1097
myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
1105
int cpu = get_cpu();
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
1108
if (cpu != ss->cpu) {
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
1109
tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
1111
myri10ge_write_dca(ss, cpu, tag);
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
1112
ss->cpu = cpu;
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
1138
mgp->ss[i].cpu = -1;
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
191
int cpu;
drivers/net/ethernet/nvidia/forcedeth.c
1714
static void nv_get_stats(int cpu, struct fe_priv *np,
drivers/net/ethernet/nvidia/forcedeth.c
1717
struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
drivers/net/ethernet/nvidia/forcedeth.c
1759
int cpu;
drivers/net/ethernet/nvidia/forcedeth.c
1772
for_each_online_cpu(cpu)
drivers/net/ethernet/nvidia/forcedeth.c
1773
nv_get_stats(cpu, np, storage);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
171
int cpu;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
176
cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
177
queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
74
int cpu;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
76
cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
77
if (cpu >= nr_cpu_ids)
drivers/net/ethernet/pensando/ionic/ionic_dev.c
78
cpu = cpumask_local_spread(0, dev_to_node(ionic->dev));
drivers/net/ethernet/pensando/ionic/ionic_dev.c
80
return cpu;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
86
int cpu;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
91
cpu = ionic_get_preferred_cpu(ionic, &qcq->intr);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
92
queue_work_on(cpu, ionic->wq, &qcq->doorbell_napi_work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
533
unsigned int cpu;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
535
cpu = cpumask_local_spread(qcq->intr.index,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
537
if (cpu != -1)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
538
cpumask_set_cpu(cpu, *affinity_mask);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
405
int cpu;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
419
cpu = smp_processor_id();
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
420
qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
424
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
137
unsigned int cpu, start;
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
139
for_each_possible_cpu(cpu) {
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
140
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
drivers/net/ethernet/sfc/efx_channels.c
367
unsigned int cpu;
drivers/net/ethernet/sfc/efx_channels.c
373
cpu = -1;
drivers/net/ethernet/sfc/efx_channels.c
375
cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
drivers/net/ethernet/sfc/efx_channels.c
376
if (cpu >= nr_cpu_ids)
drivers/net/ethernet/sfc/efx_channels.c
377
cpu = cpumask_first_and(cpu_online_mask, numa_mask);
drivers/net/ethernet/sfc/efx_channels.c
378
irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
drivers/net/ethernet/sfc/efx_channels.c
59
int cpu;
drivers/net/ethernet/sfc/efx_channels.c
73
for_each_cpu(cpu, filter_mask) {
drivers/net/ethernet/sfc/efx_channels.c
75
cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
drivers/net/ethernet/sfc/falcon/efx.c
1318
int cpu;
drivers/net/ethernet/sfc/falcon/efx.c
1330
for_each_online_cpu(cpu) {
drivers/net/ethernet/sfc/falcon/efx.c
1331
if (!cpumask_test_cpu(cpu, thread_mask)) {
drivers/net/ethernet/sfc/falcon/efx.c
1334
topology_sibling_cpumask(cpu));
drivers/net/ethernet/sfc/falcon/selftest.c
139
int cpu;
drivers/net/ethernet/sfc/falcon/selftest.c
160
cpu = ef4_nic_irq_test_irq_cpu(efx);
drivers/net/ethernet/sfc/falcon/selftest.c
161
if (cpu >= 0)
drivers/net/ethernet/sfc/falcon/selftest.c
171
INT_MODE(efx), cpu);
drivers/net/ethernet/sfc/falcon/selftest.c
802
int cpu;
drivers/net/ethernet/sfc/falcon/selftest.c
805
cpu = ef4_nic_event_test_irq_cpu(channel);
drivers/net/ethernet/sfc/falcon/selftest.c
806
if (cpu < 0)
drivers/net/ethernet/sfc/falcon/selftest.c
813
channel->channel, cpu);
drivers/net/ethernet/sfc/selftest.c
139
int cpu;
drivers/net/ethernet/sfc/selftest.c
160
cpu = efx_nic_irq_test_irq_cpu(efx);
drivers/net/ethernet/sfc/selftest.c
161
if (cpu >= 0)
drivers/net/ethernet/sfc/selftest.c
171
INT_MODE(efx), cpu);
drivers/net/ethernet/sfc/selftest.c
795
int cpu;
drivers/net/ethernet/sfc/selftest.c
798
cpu = efx_nic_event_test_irq_cpu(channel);
drivers/net/ethernet/sfc/selftest.c
799
if (cpu < 0)
drivers/net/ethernet/sfc/selftest.c
806
channel->channel, cpu);
drivers/net/ethernet/sfc/siena/efx_channels.c
368
unsigned int cpu;
drivers/net/ethernet/sfc/siena/efx_channels.c
374
cpu = -1;
drivers/net/ethernet/sfc/siena/efx_channels.c
376
cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
drivers/net/ethernet/sfc/siena/efx_channels.c
377
if (cpu >= nr_cpu_ids)
drivers/net/ethernet/sfc/siena/efx_channels.c
378
cpu = cpumask_first_and(cpu_online_mask, numa_mask);
drivers/net/ethernet/sfc/siena/efx_channels.c
379
irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
drivers/net/ethernet/sfc/siena/efx_channels.c
59
int cpu;
drivers/net/ethernet/sfc/siena/efx_channels.c
73
for_each_cpu(cpu, filter_mask) {
drivers/net/ethernet/sfc/siena/efx_channels.c
75
cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
drivers/net/ethernet/sfc/siena/selftest.c
140
int cpu;
drivers/net/ethernet/sfc/siena/selftest.c
161
cpu = efx_nic_irq_test_irq_cpu(efx);
drivers/net/ethernet/sfc/siena/selftest.c
162
if (cpu >= 0)
drivers/net/ethernet/sfc/siena/selftest.c
172
INT_MODE(efx), cpu);
drivers/net/ethernet/sfc/siena/selftest.c
800
int cpu;
drivers/net/ethernet/sfc/siena/selftest.c
803
cpu = efx_nic_event_test_irq_cpu(channel);
drivers/net/ethernet/sfc/siena/selftest.c
804
if (cpu < 0)
drivers/net/ethernet/sfc/siena/selftest.c
811
channel->channel, cpu);
drivers/net/ethernet/sfc/siena/tx.c
217
int cpu;
drivers/net/ethernet/sfc/siena/tx.c
225
cpu = raw_smp_processor_id();
drivers/net/ethernet/sfc/siena/tx.c
226
if (unlikely(cpu >= efx->xdp_tx_queue_count))
drivers/net/ethernet/sfc/siena/tx.c
229
tx_queue = efx->xdp_tx_queues[cpu];
drivers/net/ethernet/sfc/siena/tx.c
237
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
drivers/net/ethernet/sfc/tx.c
422
int cpu;
drivers/net/ethernet/sfc/tx.c
430
cpu = raw_smp_processor_id();
drivers/net/ethernet/sfc/tx.c
431
if (unlikely(cpu >= efx->xdp_tx_queue_count))
drivers/net/ethernet/sfc/tx.c
434
tx_queue = efx->xdp_tx_queues[cpu];
drivers/net/ethernet/sfc/tx.c
442
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
drivers/net/ethernet/spacemit/k1_emac.c
1176
int cpu;
drivers/net/ethernet/spacemit/k1_emac.c
1178
for_each_possible_cpu(cpu) {
drivers/net/ethernet/spacemit/k1_emac.c
1179
stats = per_cpu_ptr(priv->ndev->dstats, cpu);
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
443
int cpu;
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
446
for_each_possible_cpu(cpu) {
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
451
pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
464
int cpu;
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
467
for_each_possible_cpu(cpu) {
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
472
pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5150
int cpu)
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5152
int index = cpu;
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5168
int cpu = smp_processor_id();
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5176
queue = stmmac_xdp_get_tx_queue(priv, cpu);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5179
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5256
int cpu = smp_processor_id();
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5259
queue = stmmac_xdp_get_tx_queue(priv, cpu);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
6892
int cpu = smp_processor_id();
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
6903
queue = stmmac_xdp_get_tx_queue(priv, cpu);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
6906
__netif_tx_lock(nq, cpu);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1177
int cpu = smp_processor_id();
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1197
tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1206
__netif_tx_lock(netif_txq, cpu);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1956
int cpu = smp_processor_id();
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1959
tx_chn = &common->tx_chns[cpu % common->tx_ch_num];
drivers/net/ethernet/ti/am65-cpsw-nuss.c
1962
__netif_tx_lock(netif_txq, cpu);
drivers/net/ethernet/ti/icssg/icssg_common.c
783
int cpu = smp_processor_id();
drivers/net/ethernet/ti/icssg/icssg_common.c
803
q_idx = cpu % emac->tx_ch_num;
drivers/net/ethernet/ti/icssg/icssg_common.c
805
__netif_tx_lock(netif_txq, cpu);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1194
int cpu = smp_processor_id();
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1201
q_idx = cpu % emac->tx_ch_num;
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1207
__netif_tx_lock(netif_txq, cpu);
drivers/net/ethernet/wangxun/libwx/wx_type.h
1109
int cpu; /* CPU for DCA */
drivers/net/hyperv/netvsc_drv.c
1486
int i, j, cpu;
drivers/net/hyperv/netvsc_drv.c
1532
for_each_present_cpu(cpu) {
drivers/net/hyperv/netvsc_drv.c
1533
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
drivers/net/hyperv/netvsc_drv.c
1547
int i, cpu;
drivers/net/hyperv/netvsc_drv.c
1571
for_each_present_cpu(cpu) {
drivers/net/hyperv/netvsc_drv.c
1573
ethtool_sprintf(&p, pcpu_stats[i].name, cpu);
drivers/net/macsec.c
2687
int cpu;
drivers/net/macsec.c
2705
for_each_possible_cpu(cpu) {
drivers/net/macsec.c
2707
per_cpu_ptr(tx_sa->stats, cpu);
drivers/net/macsec.c
2731
int cpu;
drivers/net/macsec.c
2750
for_each_possible_cpu(cpu) {
drivers/net/macsec.c
2752
per_cpu_ptr(rx_sa->stats, cpu);
drivers/net/macsec.c
2784
int cpu;
drivers/net/macsec.c
2801
for_each_possible_cpu(cpu) {
drivers/net/macsec.c
2806
stats = per_cpu_ptr(rx_sc->stats, cpu);
drivers/net/macsec.c
2866
int cpu;
drivers/net/macsec.c
2882
for_each_possible_cpu(cpu) {
drivers/net/macsec.c
2887
stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
drivers/net/macsec.c
2922
int cpu;
drivers/net/macsec.c
2938
for_each_possible_cpu(cpu) {
drivers/net/macsec.c
2943
stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
drivers/net/netconsole.c
1499
wctxt->cpu);
drivers/net/ppp/ppp_generic.c
1242
int cpu;
drivers/net/ppp/ppp_generic.c
1263
for_each_possible_cpu(cpu) {
drivers/net/ppp/ppp_generic.c
1266
xmit_recursion = per_cpu_ptr(ppp->xmit_recursion, cpu);
drivers/net/ppp/ppp_generic.c
3332
int cpu;
drivers/net/ppp/ppp_generic.c
3335
for_each_possible_cpu(cpu) {
drivers/net/ppp/ppp_generic.c
3336
struct pcpu_sw_netstats *p = per_cpu_ptr(ppp->dev->tstats, cpu);
drivers/net/virtio_net.c
1683
int cpu = smp_processor_id(); \
drivers/net/virtio_net.c
1690
qp += cpu; \
drivers/net/virtio_net.c
1694
qp = cpu % v->curr_queue_pairs; \
drivers/net/virtio_net.c
1696
__netif_tx_lock(txq, cpu); \
drivers/net/virtio_net.c
3943
int i, start = 0, cpu;
drivers/net/virtio_net.c
3961
for_each_online_cpu_wrap(cpu, start) {
drivers/net/virtio_net.c
3963
start = cpu;
drivers/net/virtio_net.c
3966
cpumask_set_cpu(cpu, mask);
drivers/net/virtio_net.c
3979
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/net/virtio_net.c
3987
static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
drivers/net/virtio_net.c
3995
static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
drivers/net/vmxnet3/vmxnet3_xdp.c
24
int cpu;
drivers/net/vmxnet3/vmxnet3_xdp.c
27
cpu = smp_processor_id();
drivers/net/vmxnet3/vmxnet3_xdp.c
28
if (likely(cpu < tq_number))
drivers/net/vmxnet3/vmxnet3_xdp.c
29
tq = &adapter->tx_queue[cpu];
drivers/net/vmxnet3/vmxnet3_xdp.c
31
tq = &adapter->tx_queue[cpu % tq_number];
drivers/net/wireguard/queueing.c
12
int cpu;
drivers/net/wireguard/queueing.c
18
for_each_possible_cpu(cpu) {
drivers/net/wireguard/queueing.c
19
per_cpu_ptr(worker, cpu)->ptr = ptr;
drivers/net/wireguard/queueing.c
20
INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
drivers/net/wireguard/queueing.h
107
unsigned int cpu = *stored_cpu;
drivers/net/wireguard/queueing.h
109
while (unlikely(cpu >= nr_cpu_ids || !cpu_online(cpu)))
drivers/net/wireguard/queueing.h
110
cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask);
drivers/net/wireguard/queueing.h
112
return cpu;
drivers/net/wireguard/queueing.h
122
int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
drivers/net/wireguard/queueing.h
123
if (cpu >= nr_cpu_ids)
drivers/net/wireguard/queueing.h
124
cpu = cpumask_first(cpu_online_mask);
drivers/net/wireguard/queueing.h
125
WRITE_ONCE(*last_cpu, cpu);
drivers/net/wireguard/queueing.h
126
return cpu;
drivers/net/wireguard/queueing.h
156
int cpu;
drivers/net/wireguard/queueing.h
168
cpu = wg_cpumask_next_online(&device_queue->last_cpu);
drivers/net/wireguard/queueing.h
171
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
drivers/net/wireguard/receive.c
550
int cpu, ret = -EBUSY;
drivers/net/wireguard/receive.c
568
cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
drivers/net/wireguard/receive.c
570
queue_work_on(cpu, wg->handshake_receive_wq,
drivers/net/wireguard/receive.c
571
&per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
1056
struct brcmf_core_priv *cpu;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
1070
cpu = container_of(core, struct brcmf_core_priv, pub);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
1073
val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
1675
int iter_rx_q, i, ret, cpu, offset;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
1686
cpu = cpumask_next(i - offset, cpu_online_mask);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
1687
cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
717
int cpu,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
724
if (cpu == 1) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
767
if (cpu == 1)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
774
if (cpu == 1)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
787
int cpu,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
793
if (cpu == 1)
drivers/net/xen-netfront.c
1393
int cpu;
drivers/net/xen-netfront.c
1395
for_each_possible_cpu(cpu) {
drivers/net/xen-netfront.c
1396
struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
drivers/net/xen-netfront.c
1397
struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
drivers/nvdimm/nd_perf.c
126
return cpumap_print_to_pagebuf(true, buf, cpumask_of(nd_pmu->cpu));
drivers/nvdimm/nd_perf.c
129
static int nvdimm_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
drivers/nvdimm/nd_perf.c
139
cpumask_test_and_clear_cpu(cpu, &nd_pmu->arch_cpumask);
drivers/nvdimm/nd_perf.c
145
if (cpu != nd_pmu->cpu)
drivers/nvdimm/nd_perf.c
156
nodeid = cpu_to_node(cpu);
drivers/nvdimm/nd_perf.c
158
target = cpumask_any_but(cpumask, cpu);
drivers/nvdimm/nd_perf.c
160
nd_pmu->cpu = target;
drivers/nvdimm/nd_perf.c
164
perf_pmu_migrate_context(&nd_pmu->pmu, cpu, target);
drivers/nvdimm/nd_perf.c
169
static int nvdimm_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/nvdimm/nd_perf.c
175
if (nd_pmu->cpu >= nr_cpu_ids)
drivers/nvdimm/nd_perf.c
176
nd_pmu->cpu = cpu;
drivers/nvdimm/nd_perf.c
229
nd_pmu->cpu = cpumask_any(&nd_pmu->arch_cpumask);
drivers/nvdimm/nd_perf.c
234
nd_pmu->cpu = cpumask_any(cpumask);
drivers/nvdimm/region_devs.c
922
unsigned int cpu, lane;
drivers/nvdimm/region_devs.c
925
cpu = smp_processor_id();
drivers/nvdimm/region_devs.c
929
lane = cpu % nd_region->num_lanes;
drivers/nvdimm/region_devs.c
930
ndl_count = per_cpu_ptr(nd_region->lane, cpu);
drivers/nvdimm/region_devs.c
935
lane = cpu;
drivers/nvdimm/region_devs.c
944
unsigned int cpu = smp_processor_id();
drivers/nvdimm/region_devs.c
947
ndl_count = per_cpu_ptr(nd_region->lane, cpu);
drivers/nvme/host/tcp.c
1643
int cpu, min_queues = INT_MAX, io_cpu;
drivers/nvme/host/tcp.c
1660
for_each_online_cpu(cpu) {
drivers/nvme/host/tcp.c
1661
int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
drivers/nvme/host/tcp.c
1663
if (mq_map[cpu] != qid)
drivers/nvme/host/tcp.c
1666
io_cpu = cpu;
drivers/nvme/host/tcp.c
3029
int cpu;
drivers/nvme/host/tcp.c
3047
for_each_possible_cpu(cpu)
drivers/nvme/host/tcp.c
3048
atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
drivers/of/base.c
2082
int of_find_last_cache_level(unsigned int cpu)
drivers/of/base.c
2085
struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
drivers/of/cpu.c
119
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
drivers/of/cpu.c
124
if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
drivers/of/cpu.c
140
struct device_node *of_cpu_device_node_get(int cpu)
drivers/of/cpu.c
143
cpu_dev = get_cpu_device(cpu);
drivers/of/cpu.c
145
return of_get_cpu_node(cpu, NULL);
drivers/of/cpu.c
160
int cpu;
drivers/of/cpu.c
164
for_each_possible_cpu(cpu) {
drivers/of/cpu.c
165
np = of_cpu_device_node_get(cpu);
drivers/of/cpu.c
169
return cpu;
drivers/of/cpu.c
42
bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
drivers/of/cpu.c
44
return (u32)phys_id == cpu;
drivers/of/cpu.c
53
const char *prop_name, int cpu, unsigned int *thread)
drivers/of/cpu.c
61
if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
drivers/of/cpu.c
68
if (arch_match_cpu_phys_id(cpu, hwid)) {
drivers/of/cpu.c
85
int cpu, unsigned int *thread)
drivers/of/cpu.c
94
cpu, thread))
drivers/of/cpu.c
97
return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
drivers/opp/cpu.c
111
int cpu;
drivers/opp/cpu.c
115
for_each_cpu(cpu, cpumask) {
drivers/opp/cpu.c
116
if (cpu == last_cpu)
drivers/opp/cpu.c
119
cpu_dev = get_cpu_device(cpu);
drivers/opp/cpu.c
122
cpu);
drivers/opp/cpu.c
159
int cpu;
drivers/opp/cpu.c
167
for_each_cpu(cpu, cpumask) {
drivers/opp/cpu.c
168
if (cpu == cpu_dev->id)
drivers/opp/cpu.c
171
dev = get_cpu_device(cpu);
drivers/opp/cpu.c
174
__func__, cpu);
drivers/opp/cpu.c
181
__func__, cpu);
drivers/opp/of.c
1217
int cpu, ret;
drivers/opp/of.c
1222
for_each_cpu(cpu, cpumask) {
drivers/opp/of.c
1223
cpu_dev = get_cpu_device(cpu);
drivers/opp/of.c
1226
cpu);
drivers/opp/of.c
1238
__func__, cpu, ret);
drivers/opp/of.c
1248
_dev_pm_opp_cpumask_remove_table(cpumask, cpu);
drivers/opp/of.c
1274
int cpu;
drivers/opp/of.c
1291
for_each_possible_cpu(cpu) {
drivers/opp/of.c
1292
if (cpu == cpu_dev->id)
drivers/opp/of.c
1296
of_cpu_device_node_get(cpu);
drivers/opp/of.c
1300
__func__, cpu);
drivers/opp/of.c
1315
cpumask_set_cpu(cpu, cpumask);
drivers/pci/controller/pci-hyperv.c
1819
int cpu;
drivers/pci/controller/pci-hyperv.c
1824
cpu = cpu_next;
drivers/pci/controller/pci-hyperv.c
1828
return cpu;
drivers/pci/controller/pci-hyperv.c
1832
struct pci_create_interrupt2 *int_pkt, int cpu,
drivers/pci/controller/pci-hyperv.c
1841
hv_cpu_number_to_vp_number(cpu);
drivers/pci/controller/pci-hyperv.c
1848
struct pci_create_interrupt3 *int_pkt, int cpu,
drivers/pci/controller/pci-hyperv.c
1858
hv_cpu_number_to_vp_number(cpu);
drivers/pci/controller/pci-hyperv.c
1904
int cpu;
drivers/pci/controller/pci-hyperv.c
1964
cpu = hv_compose_multi_msi_req_get_cpu();
drivers/pci/controller/pci-hyperv.c
1968
cpu = hv_compose_msi_req_get_cpu(dest);
drivers/pci/controller/pci-hyperv.c
1992
cpu,
drivers/pci/controller/pci-hyperv.c
2000
cpu,
drivers/pci/controller/pci-hyperv.c
622
int cpu, nr_bank;
drivers/pci/controller/pci-hyperv.c
683
for_each_cpu_and(cpu, dest, cpu_online_mask) {
drivers/pci/controller/pci-hyperv.c
685
(1ULL << hv_cpu_number_to_vp_number(cpu));
drivers/pci/controller/pci-hyperv.c
895
int cpu = cpumask_first(cpu_present_mask);
drivers/pci/controller/pci-hyperv.c
897
irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
drivers/pci/controller/pci-xgene-msi.c
134
int cpu;
drivers/pci/controller/pci-xgene-msi.c
136
cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
drivers/pci/controller/pci-xgene-msi.c
138
frame = FIELD_PREP(BIT(3), FIELD_GET(BIT(7), data->hwirq)) | cpu;
drivers/pci/controller/pcie-iproc-msi.c
470
static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
drivers/pci/controller/pcie-iproc-msi.c
474
for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
drivers/pci/controller/pcie-iproc-msi.c
480
static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
drivers/pci/controller/pcie-iproc-msi.c
486
for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
drivers/pci/controller/pcie-iproc-msi.c
493
cpumask_set_cpu(cpu, mask);
drivers/pci/controller/pcie-iproc-msi.c
507
iproc_msi_irq_free(msi, cpu);
drivers/pci/controller/pcie-iproc-msi.c
519
unsigned int cpu;
drivers/pci/controller/pcie-iproc-msi.c
625
for_each_online_cpu(cpu) {
drivers/pci/controller/pcie-iproc-msi.c
626
ret = iproc_msi_irq_setup(msi, cpu);
drivers/pci/controller/pcie-iproc-msi.c
636
for_each_online_cpu(cpu)
drivers/pci/controller/pcie-iproc-msi.c
637
iproc_msi_irq_free(msi, cpu);
drivers/pci/controller/pcie-iproc-msi.c
657
unsigned int i, cpu;
drivers/pci/controller/pcie-iproc-msi.c
664
for_each_online_cpu(cpu)
drivers/pci/controller/pcie-iproc-msi.c
665
iproc_msi_irq_free(msi, cpu);
drivers/pci/pci-driver.c
367
int error, node, cpu;
drivers/pci/pci-driver.c
398
cpu = cpumask_any_and(cpumask_of_node(node),
drivers/pci/pci-driver.c
401
if (cpu < nr_cpu_ids) {
drivers/pci/pci-driver.c
406
queue_work_on(cpu, wq, &arg.work);
drivers/perf/alibaba_uncore_drw_pmu.c
100
int cpu;
drivers/perf/alibaba_uncore_drw_pmu.c
224
return cpumap_print_to_pagebuf(true, buf, cpumask_of(drw_pmu->cpu));
drivers/perf/alibaba_uncore_drw_pmu.c
441
irq->cpu = smp_processor_id();
drivers/perf/alibaba_uncore_drw_pmu.c
458
ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
drivers/perf/alibaba_uncore_drw_pmu.c
545
event->cpu = drw_pmu->cpu;
drivers/perf/alibaba_uncore_drw_pmu.c
546
if (event->cpu < 0) {
drivers/perf/alibaba_uncore_drw_pmu.c
696
drw_pmu->cpu = smp_processor_id();
drivers/perf/alibaba_uncore_drw_pmu.c
737
static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/alibaba_uncore_drw_pmu.c
744
if (cpu != irq->cpu)
drivers/perf/alibaba_uncore_drw_pmu.c
747
target = cpumask_any_and_but(cpumask_of_node(cpu_to_node(cpu)),
drivers/perf/alibaba_uncore_drw_pmu.c
748
cpu_online_mask, cpu);
drivers/perf/alibaba_uncore_drw_pmu.c
750
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/alibaba_uncore_drw_pmu.c
758
perf_pmu_migrate_context(&drw_pmu->pmu, irq->cpu, target);
drivers/perf/alibaba_uncore_drw_pmu.c
762
irq->cpu = target;
drivers/perf/alibaba_uncore_drw_pmu.c
89
int cpu;
drivers/perf/amlogic/meson_ddr_pmu_core.c
130
if (event->cpu < 0)
drivers/perf/amlogic/meson_ddr_pmu_core.c
138
event->cpu = pmu->cpu;
drivers/perf/amlogic/meson_ddr_pmu_core.c
194
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
drivers/perf/amlogic/meson_ddr_pmu_core.c
29
int cpu; /* for cpu hotplug */
drivers/perf/amlogic/meson_ddr_pmu_core.c
393
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/amlogic/meson_ddr_pmu_core.c
398
if (cpu != pmu->cpu)
drivers/perf/amlogic/meson_ddr_pmu_core.c
401
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/amlogic/meson_ddr_pmu_core.c
405
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
drivers/perf/amlogic/meson_ddr_pmu_core.c
406
pmu->cpu = target;
drivers/perf/amlogic/meson_ddr_pmu_core.c
408
WARN_ON(irq_set_affinity(pmu->info.irq_num, cpumask_of(pmu->cpu)));
drivers/perf/amlogic/meson_ddr_pmu_core.c
514
pmu->cpu = raw_smp_processor_id();
drivers/perf/arm-cci.c
100
int cpu;
drivers/perf/arm-cci.c
1325
if (event->cpu < 0)
drivers/perf/arm-cci.c
1327
event->cpu = cci_pmu->cpu;
drivers/perf/arm-cci.c
1354
return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
drivers/perf/arm-cci.c
1431
static int cci_pmu_offline_cpu(unsigned int cpu)
drivers/perf/arm-cci.c
1435
if (!g_cci_pmu || cpu != g_cci_pmu->cpu)
drivers/perf/arm-cci.c
1438
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/arm-cci.c
1442
perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target);
drivers/perf/arm-cci.c
1443
g_cci_pmu->cpu = target;
drivers/perf/arm-cci.c
1672
cci_pmu->cpu = raw_smp_processor_id();
drivers/perf/arm-ccn.c
1191
static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm-ccn.c
1197
if (cpu != dt->cpu)
drivers/perf/arm-ccn.c
1199
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/arm-ccn.c
1202
perf_pmu_migrate_context(&dt->pmu, cpu, target);
drivers/perf/arm-ccn.c
1203
dt->cpu = target;
drivers/perf/arm-ccn.c
1205
WARN_ON(irq_set_affinity(ccn->irq, cpumask_of(dt->cpu)));
drivers/perf/arm-ccn.c
1281
ccn->dt.cpu = raw_smp_processor_id();
drivers/perf/arm-ccn.c
1285
err = irq_set_affinity(ccn->irq, cpumask_of(ccn->dt.cpu));
drivers/perf/arm-ccn.c
163
unsigned int cpu;
drivers/perf/arm-ccn.c
541
return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
drivers/perf/arm-ccn.c
728
if (event->cpu < 0) {
drivers/perf/arm-ccn.c
741
event->cpu = ccn->dt.cpu;
drivers/perf/arm-cmn.c
1329
return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu));
drivers/perf/arm-cmn.c
1781
event->cpu = cmn->cpu;
drivers/perf/arm-cmn.c
1782
if (event->cpu < 0)
drivers/perf/arm-cmn.c
2010
static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu)
drivers/perf/arm-cmn.c
2014
perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu);
drivers/perf/arm-cmn.c
2016
irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu));
drivers/perf/arm-cmn.c
2017
cmn->cpu = cpu;
drivers/perf/arm-cmn.c
2020
static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/arm-cmn.c
2027
if (cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
drivers/perf/arm-cmn.c
2028
arm_cmn_migrate(cmn, cpu);
drivers/perf/arm-cmn.c
2032
static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/arm-cmn.c
2039
if (cpu != cmn->cpu)
drivers/perf/arm-cmn.c
2044
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/arm-cmn.c
2046
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/arm-cmn.c
2109
err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
drivers/perf/arm-cmn.c
2575
cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
drivers/perf/arm-cmn.c
360
int cpu;
drivers/perf/arm-ni.c
131
int cpu;
drivers/perf/arm-ni.c
242
return cpumap_print_to_pagebuf(true, buf, cpumask_of(ni->cpu));
drivers/perf/arm-ni.c
343
event->cpu = ni->cpu;
drivers/perf/arm-ni.c
660
irq_set_affinity(cd->irq, cpumask_of(ni->cpu));
drivers/perf/arm-ni.c
731
ni->cpu = cpumask_local_spread(0, dev_to_node(ni->dev));
drivers/perf/arm-ni.c
791
static void arm_ni_pmu_migrate(struct arm_ni *ni, unsigned int cpu)
drivers/perf/arm-ni.c
794
perf_pmu_migrate_context(&cd->pmu, ni->cpu, cpu);
drivers/perf/arm-ni.c
795
irq_set_affinity(cd->irq, cpumask_of(cpu));
drivers/perf/arm-ni.c
797
ni->cpu = cpu;
drivers/perf/arm-ni.c
800
static int arm_ni_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/arm-ni.c
807
if (cpu_to_node(ni->cpu) != node && cpu_to_node(cpu) == node)
drivers/perf/arm-ni.c
808
arm_ni_pmu_migrate(ni, cpu);
drivers/perf/arm-ni.c
812
static int arm_ni_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/arm-ni.c
819
if (cpu != ni->cpu)
drivers/perf/arm-ni.c
823
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/arm-ni.c
825
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/arm_cspmu/arm_cspmu.c
1086
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
drivers/perf/arm_cspmu/arm_cspmu.c
1091
cpu_dev = get_cpu_device(cpu);
drivers/perf/arm_cspmu/arm_cspmu.c
1110
int cpu;
drivers/perf/arm_cspmu/arm_cspmu.c
1116
for_each_possible_cpu(cpu) {
drivers/perf/arm_cspmu/arm_cspmu.c
1118
get_acpi_id_for_cpu(cpu)) {
drivers/perf/arm_cspmu/arm_cspmu.c
1119
cpumask_set_cpu(cpu, &cspmu->associated_cpus);
drivers/perf/arm_cspmu/arm_cspmu.c
1124
for_each_possible_cpu(cpu) {
drivers/perf/arm_cspmu/arm_cspmu.c
1126
cpu, apmt_node->proc_affinity))
drivers/perf/arm_cspmu/arm_cspmu.c
1129
cpumask_set_cpu(cpu, &cspmu->associated_cpus);
drivers/perf/arm_cspmu/arm_cspmu.c
1145
int ret, cpu;
drivers/perf/arm_cspmu/arm_cspmu.c
1148
cpu = of_cpu_node_to_id(it.node);
drivers/perf/arm_cspmu/arm_cspmu.c
1149
if (cpu < 0)
drivers/perf/arm_cspmu/arm_cspmu.c
1151
cpumask_set_cpu(cpu, &cspmu->associated_cpus);
drivers/perf/arm_cspmu/arm_cspmu.c
1284
static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
drivers/perf/arm_cspmu/arm_cspmu.c
1286
cpumask_set_cpu(cpu, &cspmu->active_cpu);
drivers/perf/arm_cspmu/arm_cspmu.c
1291
static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_cspmu/arm_cspmu.c
1296
if (!cpumask_test_cpu(cpu, &cspmu->associated_cpus))
drivers/perf/arm_cspmu/arm_cspmu.c
1304
arm_cspmu_set_active_cpu(cpu, cspmu);
drivers/perf/arm_cspmu/arm_cspmu.c
1309
static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_cspmu/arm_cspmu.c
1317
if (!cpumask_test_and_clear_cpu(cpu, &cspmu->active_cpu))
drivers/perf/arm_cspmu/arm_cspmu.c
1322
cpu_online_mask, cpu);
drivers/perf/arm_cspmu/arm_cspmu.c
1327
perf_pmu_migrate_context(&cspmu->pmu, cpu, dst);
drivers/perf/arm_cspmu/arm_cspmu.c
658
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
drivers/perf/arm_cspmu/arm_cspmu.c
668
if (!cpumask_test_cpu(event->cpu, &cspmu->associated_cpus)) {
drivers/perf/arm_cspmu/arm_cspmu.c
675
event->cpu = cpumask_first(&cspmu->active_cpu);
drivers/perf/arm_cspmu/arm_cspmu.c
676
if (event->cpu >= nr_cpu_ids)
drivers/perf/arm_cspmu/nvidia_cspmu.c
375
const int cpu = cpumask_first(&cspmu->associated_cpus);
drivers/perf/arm_cspmu/nvidia_cspmu.c
376
const int socket = cpu_to_node(cpu);
drivers/perf/arm_dmc620_pmu.c
241
cpumask_of(dmc620_pmu->irq->cpu));
drivers/perf/arm_dmc620_pmu.c
442
irq->cpu = raw_smp_processor_id();
drivers/perf/arm_dmc620_pmu.c
451
ret = irq_set_affinity(irq_num, cpumask_of(irq->cpu));
drivers/perf/arm_dmc620_pmu.c
541
event->cpu = dmc620_pmu->irq->cpu;
drivers/perf/arm_dmc620_pmu.c
542
if (event->cpu < 0)
drivers/perf/arm_dmc620_pmu.c
635
static int dmc620_pmu_cpu_teardown(unsigned int cpu,
drivers/perf/arm_dmc620_pmu.c
643
if (cpu != irq->cpu)
drivers/perf/arm_dmc620_pmu.c
646
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/arm_dmc620_pmu.c
653
perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
drivers/perf/arm_dmc620_pmu.c
657
irq->cpu = target;
drivers/perf/arm_dmc620_pmu.c
83
unsigned int cpu;
drivers/perf/arm_dsu_pmu.c
539
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
drivers/perf/arm_dsu_pmu.c
549
if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
drivers/perf/arm_dsu_pmu.c
560
event->cpu = cpumask_first(&dsu_pmu->active_cpu);
drivers/perf/arm_dsu_pmu.c
561
if (event->cpu >= nr_cpu_ids)
drivers/perf/arm_dsu_pmu.c
594
int i = 0, n, cpu;
drivers/perf/arm_dsu_pmu.c
604
cpu = of_cpu_node_to_id(cpu_node);
drivers/perf/arm_dsu_pmu.c
611
if (cpu < 0)
drivers/perf/arm_dsu_pmu.c
613
cpumask_set_cpu(cpu, mask);
drivers/perf/arm_dsu_pmu.c
626
int cpu;
drivers/perf/arm_dsu_pmu.c
632
for_each_possible_cpu(cpu) {
drivers/perf/arm_dsu_pmu.c
634
struct device *cpu_dev = get_cpu_device(cpu);
drivers/perf/arm_dsu_pmu.c
641
cpumask_set_cpu(cpu, mask);
drivers/perf/arm_dsu_pmu.c
678
static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
drivers/perf/arm_dsu_pmu.c
680
cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
drivers/perf/arm_dsu_pmu.c
682
pr_warn("Failed to set irq affinity to %d\n", cpu);
drivers/perf/arm_dsu_pmu.c
802
static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_dsu_pmu.c
807
if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
drivers/perf/arm_dsu_pmu.c
815
dsu_pmu_set_active_cpu(cpu, dsu_pmu);
drivers/perf/arm_dsu_pmu.c
820
static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_dsu_pmu.c
827
if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
drivers/perf/arm_dsu_pmu.c
831
cpu_online_mask, cpu);
drivers/perf/arm_dsu_pmu.c
836
perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
drivers/perf/arm_pmu.c
35
void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
drivers/perf/arm_pmu.c
38
static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
drivers/perf/arm_pmu.c
40
free_irq(irq, per_cpu_ptr(devid, cpu));
drivers/perf/arm_pmu.c
49
static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
drivers/perf/arm_pmu.c
51
free_nmi(irq, per_cpu_ptr(devid, cpu));
drivers/perf/arm_pmu.c
523
if (event->cpu != -1 &&
drivers/perf/arm_pmu.c
524
!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
drivers/perf/arm_pmu.c
563
static bool armpmu_filter(struct pmu *pmu, int cpu)
drivers/perf/arm_pmu.c
566
return !cpumask_test_cpu(cpu, &armpmu->supported_cpus);
drivers/perf/arm_pmu.c
589
int cpu, count = 0;
drivers/perf/arm_pmu.c
591
for_each_cpu(cpu, affinity) {
drivers/perf/arm_pmu.c
592
if (per_cpu(cpu_irq, cpu) == irq)
drivers/perf/arm_pmu.c
603
int cpu;
drivers/perf/arm_pmu.c
605
for_each_cpu(cpu, affinity) {
drivers/perf/arm_pmu.c
606
if (per_cpu(cpu_irq, cpu) != irq)
drivers/perf/arm_pmu.c
609
ops = per_cpu(cpu_irq_ops, cpu);
drivers/perf/arm_pmu.c
617
void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu)
drivers/perf/arm_pmu.c
619
if (per_cpu(cpu_irq, cpu) == 0)
drivers/perf/arm_pmu.c
621
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
drivers/perf/arm_pmu.c
624
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu);
drivers/perf/arm_pmu.c
626
per_cpu(cpu_irq, cpu) = 0;
drivers/perf/arm_pmu.c
627
per_cpu(cpu_irq_ops, cpu) = NULL;
drivers/perf/arm_pmu.c
630
int armpmu_request_irq(struct arm_pmu * __percpu *pcpu_armpmu, int irq, int cpu)
drivers/perf/arm_pmu.c
633
struct arm_pmu **armpmu = per_cpu_ptr(pcpu_armpmu, cpu);
drivers/perf/arm_pmu.c
645
err = irq_force_affinity(irq, cpumask_of(cpu));
drivers/perf/arm_pmu.c
649
irq, cpu);
drivers/perf/arm_pmu.c
65
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
drivers/perf/arm_pmu.c
68
struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
drivers/perf/arm_pmu.c
691
per_cpu(cpu_irq, cpu) = irq;
drivers/perf/arm_pmu.c
692
per_cpu(cpu_irq_ops, cpu) = irq_ops;
drivers/perf/arm_pmu.c
700
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
drivers/perf/arm_pmu.c
703
return per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu.c
717
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_pmu.c
722
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
drivers/perf/arm_pmu.c
727
irq = armpmu_get_cpu_irq(pmu, cpu);
drivers/perf/arm_pmu.c
729
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
drivers/perf/arm_pmu.c
734
static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_pmu.c
739
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
drivers/perf/arm_pmu.c
742
irq = armpmu_get_cpu_irq(pmu, cpu);
drivers/perf/arm_pmu.c
744
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
drivers/perf/arm_pmu.c
865
int cpu;
drivers/perf/arm_pmu.c
902
for_each_possible_cpu(cpu) {
drivers/perf/arm_pmu.c
905
events = per_cpu_ptr(pmu->hw_events, cpu);
drivers/perf/arm_pmu.c
92
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
drivers/perf/arm_pmu.c
95
struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
drivers/perf/arm_pmu_acpi.c
101
this_hetid = find_acpi_cpu_topology_hetero_id(cpu);
drivers/perf/arm_pmu_acpi.c
202
int irq, cpu, irq_cpu, err;
drivers/perf/arm_pmu_acpi.c
204
for_each_possible_cpu(cpu) {
drivers/perf/arm_pmu_acpi.c
205
irq = arm_pmu_acpi_register_irq(cpu);
drivers/perf/arm_pmu_acpi.c
209
cpu, err);
drivers/perf/arm_pmu_acpi.c
212
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
drivers/perf/arm_pmu_acpi.c
22
static int arm_pmu_acpi_register_irq(int cpu)
drivers/perf/arm_pmu_acpi.c
220
per_cpu(pmu_irqs, cpu) = irq;
drivers/perf/arm_pmu_acpi.c
221
err = armpmu_request_irq(&probed_pmus, irq, cpu);
drivers/perf/arm_pmu_acpi.c
229
for_each_possible_cpu(cpu) {
drivers/perf/arm_pmu_acpi.c
230
irq = per_cpu(pmu_irqs, cpu);
drivers/perf/arm_pmu_acpi.c
234
arm_pmu_acpi_unregister_irq(cpu);
drivers/perf/arm_pmu_acpi.c
253
int cpu;
drivers/perf/arm_pmu_acpi.c
255
for_each_possible_cpu(cpu) {
drivers/perf/arm_pmu_acpi.c
256
pmu = per_cpu(probed_pmus, cpu);
drivers/perf/arm_pmu_acpi.c
27
gicc = acpi_cpu_get_madt_gicc(cpu);
drivers/perf/arm_pmu_acpi.c
273
int cpu;
drivers/perf/arm_pmu_acpi.c
278
for_each_cpu(cpu, &pmu->supported_cpus) {
drivers/perf/arm_pmu_acpi.c
279
int other_irq = per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu_acpi.c
296
unsigned int cpu)
drivers/perf/arm_pmu_acpi.c
298
int irq = per_cpu(pmu_irqs, cpu);
drivers/perf/arm_pmu_acpi.c
300
per_cpu(probed_pmus, cpu) = pmu;
drivers/perf/arm_pmu_acpi.c
305
per_cpu(hw_events->irq, cpu) = irq;
drivers/perf/arm_pmu_acpi.c
308
cpumask_set_cpu(cpu, &pmu->supported_cpus);
drivers/perf/arm_pmu_acpi.c
320
static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
drivers/perf/arm_pmu_acpi.c
325
if (per_cpu(probed_pmus, cpu))
drivers/perf/arm_pmu_acpi.c
331
cpu);
drivers/perf/arm_pmu_acpi.c
335
arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
drivers/perf/arm_pmu_acpi.c
342
int cpu;
drivers/perf/arm_pmu_acpi.c
344
for_each_online_cpu(cpu) {
drivers/perf/arm_pmu_acpi.c
345
unsigned long cpu_cpuid = per_cpu(cpu_data, cpu).reg_midr;
drivers/perf/arm_pmu_acpi.c
348
arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
drivers/perf/arm_pmu_acpi.c
355
unsigned int cpu;
drivers/perf/arm_pmu_acpi.c
381
for_each_online_cpu(cpu) {
drivers/perf/arm_pmu_acpi.c
382
struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
drivers/perf/arm_pmu_acpi.c
393
cpu);
drivers/perf/arm_pmu_acpi.c
397
cpuid = per_cpu(cpu_data, cpu).reg_midr;
drivers/perf/arm_pmu_acpi.c
407
pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
drivers/perf/arm_pmu_acpi.c
414
pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
drivers/perf/arm_pmu_acpi.c
420
pr_warn("Failed to register PMU for CPU%d\n", cpu);
drivers/perf/arm_pmu_acpi.c
60
static void arm_pmu_acpi_unregister_irq(int cpu)
drivers/perf/arm_pmu_acpi.c
65
gicc = acpi_cpu_get_madt_gicc(cpu);
drivers/perf/arm_pmu_acpi.c
76
int cpu, this_hetid, hetid, irq, ret;
drivers/perf/arm_pmu_acpi.c
93
for_each_possible_cpu(cpu) {
drivers/perf/arm_pmu_acpi.c
96
gicc = acpi_cpu_get_madt_gicc(cpu);
drivers/perf/arm_pmu_platform.c
129
int cpu, irq;
drivers/perf/arm_pmu_platform.c
140
cpu = pmu_parse_irq_affinity(dev, i);
drivers/perf/arm_pmu_platform.c
141
if (cpu < 0)
drivers/perf/arm_pmu_platform.c
142
return cpu;
drivers/perf/arm_pmu_platform.c
143
if (cpu >= nr_cpu_ids)
drivers/perf/arm_pmu_platform.c
146
if (per_cpu(hw_events->irq, cpu)) {
drivers/perf/arm_pmu_platform.c
151
per_cpu(hw_events->irq, cpu) = irq;
drivers/perf/arm_pmu_platform.c
152
cpumask_set_cpu(cpu, &pmu->supported_cpus);
drivers/perf/arm_pmu_platform.c
161
int cpu, err = 0;
drivers/perf/arm_pmu_platform.c
163
for_each_cpu(cpu, &armpmu->supported_cpus) {
drivers/perf/arm_pmu_platform.c
164
int irq = per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu_platform.c
168
err = armpmu_request_irq(&hw_events->percpu_pmu, irq, cpu);
drivers/perf/arm_pmu_platform.c
178
int cpu;
drivers/perf/arm_pmu_platform.c
181
for_each_cpu(cpu, &armpmu->supported_cpus) {
drivers/perf/arm_pmu_platform.c
182
int irq = per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu_platform.c
184
armpmu_free_irq(&hw_events->percpu_pmu, irq, cpu);
drivers/perf/arm_pmu_platform.c
28
int cpu = get_cpu();
drivers/perf/arm_pmu_platform.c
32
pr_info("probing PMU on CPU %d\n", cpu);
drivers/perf/arm_pmu_platform.c
49
int cpu;
drivers/perf/arm_pmu_platform.c
53
for_each_cpu(cpu, &pmu->supported_cpus)
drivers/perf/arm_pmu_platform.c
54
per_cpu(hw_events->irq, cpu) = irq;
drivers/perf/arm_pmu_platform.c
67
int cpu;
drivers/perf/arm_pmu_platform.c
83
cpu = of_cpu_node_to_id(dn);
drivers/perf/arm_pmu_platform.c
84
if (cpu < 0) {
drivers/perf/arm_pmu_platform.c
86
cpu = nr_cpu_ids;
drivers/perf/arm_pmu_platform.c
91
return cpu;
drivers/perf/arm_pmuv3.c
1358
int cpu;
drivers/perf/arm_pmuv3.c
1360
for_each_cpu(cpu, &armpmu->supported_cpus) {
drivers/perf/arm_pmuv3.c
1363
events_cpu = per_cpu_ptr(armpmu->hw_events, cpu);
drivers/perf/arm_smmuv3_pmu.c
412
if (event->cpu < 0) {
drivers/perf/arm_smmuv3_pmu.c
438
event->cpu = smmu_pmu->on_cpu;
drivers/perf/arm_smmuv3_pmu.c
668
static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_smmuv3_pmu.c
674
if (cpu != smmu_pmu->on_cpu)
drivers/perf/arm_smmuv3_pmu.c
677
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/arm_smmuv3_pmu.c
681
perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
drivers/perf/arm_spe_pmu.c
1016
if (cpu == -1)
drivers/perf/arm_spe_pmu.c
1017
cpu = raw_smp_processor_id();
drivers/perf/arm_spe_pmu.c
1019
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
drivers/perf/arm_spe_pmu.c
1270
static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_spe_pmu.c
1275
if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
drivers/perf/arm_spe_pmu.c
1282
static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
drivers/perf/arm_spe_pmu.c
1287
if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
drivers/perf/arm_spe_pmu.c
814
if (event->cpu >= 0 &&
drivers/perf/arm_spe_pmu.c
815
!cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
drivers/perf/arm_spe_pmu.c
971
int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
drivers/perf/arm_spe_pmu.c
973
if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
drivers/perf/arm_spe_pmu.c
999
int i, cpu = event->cpu;
drivers/perf/cxl_pmu.c
583
event->cpu = info->on_cpu;
drivers/perf/cxl_pmu.c
911
static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/cxl_pmu.c
918
info->on_cpu = cpu;
drivers/perf/cxl_pmu.c
923
WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu)));
drivers/perf/cxl_pmu.c
928
static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/cxl_pmu.c
933
if (info->on_cpu != cpu)
drivers/perf/cxl_pmu.c
937
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/cxl_pmu.c
943
perf_pmu_migrate_context(&info->pmu, cpu, target);
drivers/perf/dwc_pcie_pmu.c
447
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
drivers/perf/dwc_pcie_pmu.c
467
event->cpu = pcie_pmu->on_cpu;
drivers/perf/dwc_pcie_pmu.c
772
static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/dwc_pcie_pmu.c
784
static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/dwc_pcie_pmu.c
793
if (cpu != pcie_pmu->on_cpu)
drivers/perf/dwc_pcie_pmu.c
800
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/dwc_pcie_pmu.c
802
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/dwc_pcie_pmu.c
810
perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
drivers/perf/fsl_imx8_ddr_perf.c
132
unsigned int cpu;
drivers/perf/fsl_imx8_ddr_perf.c
240
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
drivers/perf/fsl_imx8_ddr_perf.c
452
if (event->cpu < 0) {
drivers/perf/fsl_imx8_ddr_perf.c
481
event->cpu = pmu->cpu;
drivers/perf/fsl_imx8_ddr_perf.c
758
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/fsl_imx8_ddr_perf.c
763
if (cpu != pmu->cpu)
drivers/perf/fsl_imx8_ddr_perf.c
766
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/fsl_imx8_ddr_perf.c
770
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
drivers/perf/fsl_imx8_ddr_perf.c
771
pmu->cpu = target;
drivers/perf/fsl_imx8_ddr_perf.c
773
WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
drivers/perf/fsl_imx8_ddr_perf.c
828
pmu->cpu = raw_smp_processor_id();
drivers/perf/fsl_imx8_ddr_perf.c
867
ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
drivers/perf/fsl_imx9_ddr_perf.c
162
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
drivers/perf/fsl_imx9_ddr_perf.c
569
if (event->cpu < 0) {
drivers/perf/fsl_imx9_ddr_perf.c
589
event->cpu = pmu->cpu;
drivers/perf/fsl_imx9_ddr_perf.c
759
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/fsl_imx9_ddr_perf.c
764
if (cpu != pmu->cpu)
drivers/perf/fsl_imx9_ddr_perf.c
767
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/fsl_imx9_ddr_perf.c
771
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
drivers/perf/fsl_imx9_ddr_perf.c
772
pmu->cpu = target;
drivers/perf/fsl_imx9_ddr_perf.c
774
WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
drivers/perf/fsl_imx9_ddr_perf.c
807
pmu->cpu = raw_smp_processor_id();
drivers/perf/fsl_imx9_ddr_perf.c
839
ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
drivers/perf/fsl_imx9_ddr_perf.c
86
unsigned int cpu;
drivers/perf/fujitsu_uncore_pmu.c
214
if (event->cpu < 0)
drivers/perf/fujitsu_uncore_pmu.c
223
event->cpu = uncorepmu->cpu;
drivers/perf/fujitsu_uncore_pmu.c
377
return cpumap_print_to_pagebuf(true, buf, cpumask_of(uncorepmu->cpu));
drivers/perf/fujitsu_uncore_pmu.c
404
static void fujitsu_uncore_pmu_migrate(struct uncore_pmu *uncorepmu, unsigned int cpu)
drivers/perf/fujitsu_uncore_pmu.c
406
perf_pmu_migrate_context(&uncorepmu->pmu, uncorepmu->cpu, cpu);
drivers/perf/fujitsu_uncore_pmu.c
407
irq_set_affinity(uncorepmu->irq, cpumask_of(cpu));
drivers/perf/fujitsu_uncore_pmu.c
408
uncorepmu->cpu = cpu;
drivers/perf/fujitsu_uncore_pmu.c
411
static int fujitsu_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/fujitsu_uncore_pmu.c
418
if (cpu_to_node(uncorepmu->cpu) != node && cpu_to_node(cpu) == node)
drivers/perf/fujitsu_uncore_pmu.c
419
fujitsu_uncore_pmu_migrate(uncorepmu, cpu);
drivers/perf/fujitsu_uncore_pmu.c
424
static int fujitsu_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
drivers/perf/fujitsu_uncore_pmu.c
431
if (cpu != uncorepmu->cpu)
drivers/perf/fujitsu_uncore_pmu.c
435
target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
drivers/perf/fujitsu_uncore_pmu.c
437
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/fujitsu_uncore_pmu.c
466
uncorepmu->cpu = cpumask_local_spread(0, dev_to_node(dev));
drivers/perf/fujitsu_uncore_pmu.c
532
ret = irq_set_affinity(irq, cpumask_of(uncorepmu->cpu));
drivers/perf/fujitsu_uncore_pmu.c
61
int cpu;
drivers/perf/hisilicon/hisi_pcie_pmu.c
405
event->cpu = pcie_pmu->on_cpu;
drivers/perf/hisilicon/hisi_pcie_pmu.c
691
static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_pcie_pmu.c
703
static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_pcie_pmu.c
710
if (pcie_pmu->on_cpu != cpu)
drivers/perf/hisilicon/hisi_pcie_pmu.c
719
cpu_online_mask, cpu);
drivers/perf/hisilicon/hisi_pcie_pmu.c
721
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/hisilicon/hisi_pcie_pmu.c
728
perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
894
static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
900
ret = hisi_uncore_pmu_online_cpu(cpu, node);
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
915
static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
921
ret = hisi_uncore_pmu_offline_cpu(cpu, node);
drivers/perf/hisilicon/hisi_uncore_pmu.c
227
if (event->cpu < 0)
drivers/perf/hisilicon/hisi_uncore_pmu.c
255
event->cpu = hisi_pmu->on_cpu;
drivers/perf/hisilicon/hisi_uncore_pmu.c
499
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_pmu.c
520
cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
drivers/perf/hisilicon/hisi_uncore_pmu.c
528
hisi_pmu->on_cpu = cpu;
drivers/perf/hisilicon/hisi_uncore_pmu.c
532
WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
drivers/perf/hisilicon/hisi_uncore_pmu.c
538
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hisi_uncore_pmu.c
545
if (hisi_pmu->on_cpu != cpu)
drivers/perf/hisilicon/hisi_uncore_pmu.c
557
cpu_online_mask, cpu);
drivers/perf/hisilicon/hisi_uncore_pmu.c
559
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/hisilicon/hisi_uncore_pmu.c
564
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
drivers/perf/hisilicon/hisi_uncore_pmu.h
162
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
drivers/perf/hisilicon/hisi_uncore_pmu.h
163
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
drivers/perf/hisilicon/hns3_pmu.c
1248
event->cpu = hns3_pmu->on_cpu;
drivers/perf/hisilicon/hns3_pmu.c
1462
static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hns3_pmu.c
1471
hns3_pmu->on_cpu = cpu;
drivers/perf/hisilicon/hns3_pmu.c
1472
irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu));
drivers/perf/hisilicon/hns3_pmu.c
1478
static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/hisilicon/hns3_pmu.c
1488
if (hns3_pmu->on_cpu != cpu)
drivers/perf/hisilicon/hns3_pmu.c
1492
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/hisilicon/hns3_pmu.c
1496
perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
drivers/perf/marvell_cn10k_ddr_pmu.c
1060
ddr_pmu->cpu = raw_smp_processor_id();
drivers/perf/marvell_cn10k_ddr_pmu.c
152
unsigned int cpu;
drivers/perf/marvell_cn10k_ddr_pmu.c
367
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
drivers/perf/marvell_cn10k_ddr_pmu.c
485
if (event->cpu < 0) {
drivers/perf/marvell_cn10k_ddr_pmu.c
498
event->cpu = pmu->cpu;
drivers/perf/marvell_cn10k_ddr_pmu.c
914
static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/marvell_cn10k_ddr_pmu.c
920
if (cpu != pmu->cpu)
drivers/perf/marvell_cn10k_ddr_pmu.c
923
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/marvell_cn10k_ddr_pmu.c
927
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
drivers/perf/marvell_cn10k_ddr_pmu.c
928
pmu->cpu = target;
drivers/perf/marvell_cn10k_tad_pmu.c
161
event->cpu = tad_pmu->cpu;
drivers/perf/marvell_cn10k_tad_pmu.c
261
return cpumap_print_to_pagebuf(true, buf, cpumask_of(tad_pmu->cpu));
drivers/perf/marvell_cn10k_tad_pmu.c
34
unsigned int cpu;
drivers/perf/marvell_cn10k_tad_pmu.c
382
tad_pmu->cpu = raw_smp_processor_id();
drivers/perf/marvell_cn10k_tad_pmu.c
449
static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/marvell_cn10k_tad_pmu.c
454
if (cpu != pmu->cpu)
drivers/perf/marvell_cn10k_tad_pmu.c
457
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/marvell_cn10k_tad_pmu.c
461
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
drivers/perf/marvell_cn10k_tad_pmu.c
462
pmu->cpu = target;
drivers/perf/marvell_pem_pmu.c
167
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
drivers/perf/marvell_pem_pmu.c
206
if (event->cpu < 0)
drivers/perf/marvell_pem_pmu.c
223
event->cpu = pmu->cpu;
drivers/perf/marvell_pem_pmu.c
297
static int pem_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/marvell_pem_pmu.c
302
if (cpu != pmu->cpu)
drivers/perf/marvell_pem_pmu.c
305
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/marvell_pem_pmu.c
309
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
drivers/perf/marvell_pem_pmu.c
310
pmu->cpu = target;
drivers/perf/marvell_pem_pmu.c
349
pem_pmu->cpu = raw_smp_processor_id();
drivers/perf/marvell_pem_pmu.c
83
unsigned int cpu;
drivers/perf/qcom_l2_pmu.c
164
struct l2cache_pmu *l2cache_pmu, int cpu)
drivers/perf/qcom_l2_pmu.c
166
return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
drivers/perf/qcom_l2_pmu.c
456
if (event->cpu < 0) {
drivers/perf/qcom_l2_pmu.c
488
cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
drivers/perf/qcom_l2_pmu.c
492
"CPU%d not associated with L2 cluster\n", event->cpu);
drivers/perf/qcom_l2_pmu.c
498
(cluster->on_cpu != event->group_leader->cpu)) {
drivers/perf/qcom_l2_pmu.c
501
event->cpu, event->group_leader->cpu);
drivers/perf/qcom_l2_pmu.c
536
event->cpu = cluster->on_cpu;
drivers/perf/qcom_l2_pmu.c
551
cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
drivers/perf/qcom_l2_pmu.c
595
cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
drivers/perf/qcom_l2_pmu.c
621
cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
drivers/perf/qcom_l2_pmu.c
735
struct l2cache_pmu *l2cache_pmu, int cpu)
drivers/perf/qcom_l2_pmu.c
757
"CPU%d associated with cluster %d\n", cpu,
drivers/perf/qcom_l2_pmu.c
759
cpumask_set_cpu(cpu, &cluster->cluster_cpus);
drivers/perf/qcom_l2_pmu.c
760
*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
drivers/perf/qcom_l2_pmu.c
767
static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l2_pmu.c
773
cluster = get_cluster_pmu(l2cache_pmu, cpu);
drivers/perf/qcom_l2_pmu.c
776
cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
drivers/perf/qcom_l2_pmu.c
779
WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
drivers/perf/qcom_l2_pmu.c
792
cluster->on_cpu = cpu;
drivers/perf/qcom_l2_pmu.c
793
cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
drivers/perf/qcom_l2_pmu.c
796
WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
drivers/perf/qcom_l2_pmu.c
802
static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l2_pmu.c
809
cluster = get_cluster_pmu(l2cache_pmu, cpu);
drivers/perf/qcom_l2_pmu.c
814
if (cluster->on_cpu != cpu)
drivers/perf/qcom_l2_pmu.c
818
cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
drivers/perf/qcom_l2_pmu.c
823
cpu_online_mask, cpu);
drivers/perf/qcom_l2_pmu.c
829
perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
drivers/perf/qcom_l3_pmu.c
499
if (event->cpu < 0)
drivers/perf/qcom_l3_pmu.c
519
event->cpu = cpumask_first(&l3pmu->cpumask);
drivers/perf/qcom_l3_pmu.c
694
static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l3_pmu.c
700
cpumask_set_cpu(cpu, &l3pmu->cpumask);
drivers/perf/qcom_l3_pmu.c
705
static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/qcom_l3_pmu.c
710
if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask))
drivers/perf/qcom_l3_pmu.c
712
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/qcom_l3_pmu.c
715
perf_pmu_migrate_context(&l3pmu->pmu, cpu, target);
drivers/perf/riscv_pmu_sbi.c
1148
static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/riscv_pmu_sbi.c
1172
return pmu_sbi_snapshot_setup(pmu, cpu);
drivers/perf/riscv_pmu_sbi.c
1177
static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/riscv_pmu_sbi.c
1481
int cpu;
drivers/perf/riscv_pmu_sbi.c
1487
cpu = get_cpu();
drivers/perf/riscv_pmu_sbi.c
1488
ret = pmu_sbi_snapshot_setup(pmu, cpu);
drivers/perf/riscv_pmu_sbi.c
658
int cpu;
drivers/perf/riscv_pmu_sbi.c
660
for_each_possible_cpu(cpu) {
drivers/perf/riscv_pmu_sbi.c
661
struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
drivers/perf/riscv_pmu_sbi.c
674
int cpu;
drivers/perf/riscv_pmu_sbi.c
677
for_each_possible_cpu(cpu) {
drivers/perf/riscv_pmu_sbi.c
678
struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
drivers/perf/riscv_pmu_sbi.c
706
static int pmu_sbi_snapshot_setup(struct riscv_pmu *pmu, int cpu)
drivers/perf/riscv_pmu_sbi.c
711
cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
drivers/perf/starfive_starlink_pmu.c
384
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
drivers/perf/starfive_starlink_pmu.c
392
event->cpu = cpumask_first(&starlink_pmu->cpumask);
drivers/perf/starfive_starlink_pmu.c
588
starlink_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/starfive_starlink_pmu.c
595
cpumask_set_cpu(cpu, &starlink_pmu->cpumask);
drivers/perf/starfive_starlink_pmu.c
597
WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(cpu)));
drivers/perf/starfive_starlink_pmu.c
603
starlink_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/starfive_starlink_pmu.c
610
if (!cpumask_test_and_clear_cpu(cpu, &starlink_pmu->cpumask))
drivers/perf/starfive_starlink_pmu.c
613
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/starfive_starlink_pmu.c
617
perf_pmu_migrate_context(&starlink_pmu->pmu, cpu, target);
drivers/perf/thunderx2_pmu.c
257
return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
drivers/perf/thunderx2_pmu.c
584
if (event->cpu < 0)
drivers/perf/thunderx2_pmu.c
588
if (tx2_pmu->cpu >= nr_cpu_ids)
drivers/perf/thunderx2_pmu.c
590
event->cpu = tx2_pmu->cpu;
drivers/perf/thunderx2_pmu.c
747
int ret, cpu;
drivers/perf/thunderx2_pmu.c
749
cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
drivers/perf/thunderx2_pmu.c
752
tx2_pmu->cpu = cpu;
drivers/perf/thunderx2_pmu.c
909
static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
drivers/perf/thunderx2_pmu.c
920
if ((tx2_pmu->cpu >= nr_cpu_ids) &&
drivers/perf/thunderx2_pmu.c
921
(tx2_pmu->node == cpu_to_node(cpu)))
drivers/perf/thunderx2_pmu.c
922
tx2_pmu->cpu = cpu;
drivers/perf/thunderx2_pmu.c
927
static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
drivers/perf/thunderx2_pmu.c
936
if (cpu != tx2_pmu->cpu)
drivers/perf/thunderx2_pmu.c
943
cpu_online_mask, cpu);
drivers/perf/thunderx2_pmu.c
945
tx2_pmu->cpu = new_cpu;
drivers/perf/thunderx2_pmu.c
948
perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
drivers/perf/thunderx2_pmu.c
96
int cpu;
drivers/perf/xgene_pmu.c
126
cpumask_t cpu;
drivers/perf/xgene_pmu.c
1780
static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/xgene_pmu.c
1785
if (cpumask_empty(&xgene_pmu->cpu))
drivers/perf/xgene_pmu.c
1786
cpumask_set_cpu(cpu, &xgene_pmu->cpu);
drivers/perf/xgene_pmu.c
1789
WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
drivers/perf/xgene_pmu.c
1794
static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
drivers/perf/xgene_pmu.c
1801
if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
drivers/perf/xgene_pmu.c
1803
target = cpumask_any_but(cpu_online_mask, cpu);
drivers/perf/xgene_pmu.c
1808
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
drivers/perf/xgene_pmu.c
1811
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
drivers/perf/xgene_pmu.c
1814
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
drivers/perf/xgene_pmu.c
1817
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
drivers/perf/xgene_pmu.c
1820
cpumask_set_cpu(target, &xgene_pmu->cpu);
drivers/perf/xgene_pmu.c
1822
WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
drivers/perf/xgene_pmu.c
598
return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
drivers/perf/xgene_pmu.c
894
if (event->cpu < 0)
drivers/perf/xgene_pmu.c
905
event->cpu = cpumask_first(&pmu_dev->parent->cpu);
drivers/pinctrl/tegra/pinctrl-tegra114.c
1463
FUNCTION(cpu),
drivers/pinctrl/tegra/pinctrl-tegra124.c
1623
FUNCTION(cpu),
drivers/pinctrl/tegra/pinctrl-tegra210.c
1199
FUNCTION(cpu),
drivers/platform/mips/cpu_hwmon.c
22
int loongson3_cpu_temp(int cpu)
drivers/platform/mips/cpu_hwmon.c
31
reg = LOONGSON_CHIPTEMP(cpu);
drivers/platform/x86/amd/hfi/hfi.c
105
int cpu;
drivers/platform/x86/amd/hfi/hfi.c
248
static int amd_set_hfi_ipcc_score(struct amd_hfi_cpuinfo *hfi_cpuinfo, int cpu)
drivers/platform/x86/amd/hfi/hfi.c
254
sched_set_itmt_core_prio(hfi_cpuinfo->ipcc_scores[0], cpu);
drivers/platform/x86/amd/hfi/hfi.c
259
static int amd_hfi_set_state(unsigned int cpu, bool state)
drivers/platform/x86/amd/hfi/hfi.c
263
ret = wrmsrq_on_cpu(cpu, MSR_AMD_WORKLOAD_CLASS_CONFIG, state ? 1 : 0);
drivers/platform/x86/amd/hfi/hfi.c
267
return wrmsrq_on_cpu(cpu, MSR_AMD_WORKLOAD_HRST, 0x1);
drivers/platform/x86/amd/hfi/hfi.c
276
static int amd_hfi_online(unsigned int cpu)
drivers/platform/x86/amd/hfi/hfi.c
278
struct amd_hfi_cpuinfo *hfi_info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu);
drivers/platform/x86/amd/hfi/hfi.c
295
ret = amd_hfi_set_state(cpu, true);
drivers/platform/x86/amd/hfi/hfi.c
297
pr_err("WCT enable failed for CPU %u\n", cpu);
drivers/platform/x86/amd/hfi/hfi.c
310
static int amd_hfi_offline(unsigned int cpu)
drivers/platform/x86/amd/hfi/hfi.c
312
struct amd_hfi_cpuinfo *hfi_info = &per_cpu(amd_hfi_cpuinfo, cpu);
drivers/platform/x86/amd/hfi/hfi.c
320
ret = amd_hfi_set_state(cpu, false);
drivers/platform/x86/amd/hfi/hfi.c
322
pr_err("WCT disable failed for CPU %u\n", cpu);
drivers/platform/x86/amd/hfi/hfi.c
329
int cpu;
drivers/platform/x86/amd/hfi/hfi.c
332
for_each_possible_cpu(cpu) {
drivers/platform/x86/amd/hfi/hfi.c
333
struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu);
drivers/platform/x86/amd/hfi/hfi.c
335
ret = amd_set_hfi_ipcc_score(hfi_cpuinfo, cpu);
drivers/platform/x86/amd/hfi/hfi.c
404
u32 cpu, idx;
drivers/platform/x86/amd/hfi/hfi.c
407
for_each_possible_cpu(cpu) {
drivers/platform/x86/amd/hfi/hfi.c
408
struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu);
drivers/platform/x86/amd/hfi/hfi.c
410
seq_printf(s, "%d", cpu);
drivers/platform/x86/amd/hfi/hfi.c
424
int ret, cpu;
drivers/platform/x86/amd/hfi/hfi.c
426
for_each_online_cpu(cpu) {
drivers/platform/x86/amd/hfi/hfi.c
427
ret = amd_hfi_set_state(cpu, true);
drivers/platform/x86/amd/hfi/hfi.c
439
int ret, cpu;
drivers/platform/x86/amd/hfi/hfi.c
441
for_each_online_cpu(cpu) {
drivers/platform/x86/amd/hfi/hfi.c
442
ret = amd_hfi_set_state(cpu, false);
drivers/platform/x86/compal-laptop.c
481
TEMPERATURE_SHOW_TEMP_AND_LABEL(cpu, TEMP_CPU, "CPU_TEMP");
drivers/platform/x86/intel/ifs/ifs.h
413
int do_core_test(int cpu, struct device *dev);
drivers/platform/x86/intel/ifs/load.c
328
int curr_pkg, cpu, ret;
drivers/platform/x86/intel/ifs/load.c
342
for_each_online_cpu(cpu) {
drivers/platform/x86/intel/ifs/load.c
343
curr_pkg = topology_physical_package_id(cpu);
drivers/platform/x86/intel/ifs/load.c
349
schedule_work_on(cpu, &local_work.w);
drivers/platform/x86/intel/ifs/runtest.c
102
cpumask_pr_args(cpu_smt_mask(cpu)),
drivers/platform/x86/intel/ifs/runtest.c
106
cpumask_pr_args(cpu_smt_mask(cpu)), status.data);
drivers/platform/x86/intel/ifs/runtest.c
110
static void message_fail(struct device *dev, int cpu, union ifs_status status)
drivers/platform/x86/intel/ifs/runtest.c
123
cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
drivers/platform/x86/intel/ifs/runtest.c
164
int cpu = smp_processor_id();
drivers/platform/x86/intel/ifs/runtest.c
165
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
drivers/platform/x86/intel/ifs/runtest.c
184
int cpu = smp_processor_id(), start, stop;
drivers/platform/x86/intel/ifs/runtest.c
201
first = cpumask_first(cpu_smt_mask(cpu));
drivers/platform/x86/intel/ifs/runtest.c
219
if (cpu == first)
drivers/platform/x86/intel/ifs/runtest.c
231
static void ifs_test_core(int cpu, struct device *dev)
drivers/platform/x86/intel/ifs/runtest.c
271
stop_core_cpuslocked(cpu, doscan, ¶ms);
drivers/platform/x86/intel/ifs/runtest.c
302
message_fail(dev, cpu, status);
drivers/platform/x86/intel/ifs/runtest.c
305
message_not_tested(dev, cpu, status);
drivers/platform/x86/intel/ifs/runtest.c
314
int cpu = smp_processor_id();
drivers/platform/x86/intel/ifs/runtest.c
322
first = cpumask_first(cpu_smt_mask(cpu));
drivers/platform/x86/intel/ifs/runtest.c
324
if (cpu == first) {
drivers/platform/x86/intel/ifs/runtest.c
333
static void ifs_array_test_core(int cpu, struct device *dev)
drivers/platform/x86/intel/ifs/runtest.c
351
stop_core_cpuslocked(cpu, do_array_test, &command);
drivers/platform/x86/intel/ifs/runtest.c
372
int cpu = smp_processor_id();
drivers/platform/x86/intel/ifs/runtest.c
375
first = cpumask_first(cpu_smt_mask(cpu));
drivers/platform/x86/intel/ifs/runtest.c
377
if (cpu == first) {
drivers/platform/x86/intel/ifs/runtest.c
385
static void ifs_array_test_gen1(int cpu, struct device *dev)
drivers/platform/x86/intel/ifs/runtest.c
390
stop_core_cpuslocked(cpu, do_array_test_gen1, &status);
drivers/platform/x86/intel/ifs/runtest.c
436
static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data)
drivers/platform/x86/intel/ifs/runtest.c
442
cpumask_pr_args(cpu_smt_mask(cpu)),
drivers/platform/x86/intel/ifs/runtest.c
446
cpumask_pr_args(cpu_smt_mask(cpu)));
drivers/platform/x86/intel/ifs/runtest.c
449
cpumask_pr_args(cpu_smt_mask(cpu)),
drivers/platform/x86/intel/ifs/runtest.c
453
cpumask_pr_args(cpu_smt_mask(cpu)), status.data);
drivers/platform/x86/intel/ifs/runtest.c
457
static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status)
drivers/platform/x86/intel/ifs/runtest.c
462
cpumask_pr_args(cpu_smt_mask(cpu)));
drivers/platform/x86/intel/ifs/runtest.c
468
cpumask_pr_args(cpu_smt_mask(cpu)));
drivers/platform/x86/intel/ifs/runtest.c
513
int cpu = smp_processor_id();
drivers/platform/x86/intel/ifs/runtest.c
521
first = cpumask_first(cpu_smt_mask(cpu));
drivers/platform/x86/intel/ifs/runtest.c
535
if (cpu == first)
drivers/platform/x86/intel/ifs/runtest.c
541
static void ifs_sbaf_test_core(int cpu, struct device *dev)
drivers/platform/x86/intel/ifs/runtest.c
572
stop_core_cpuslocked(cpu, dosbaf, &run_params);
drivers/platform/x86/intel/ifs/runtest.c
608
sbaf_message_fail(dev, cpu, status);
drivers/platform/x86/intel/ifs/runtest.c
612
sbaf_message_not_tested(dev, cpu, status.data);
drivers/platform/x86/intel/ifs/runtest.c
623
int do_core_test(int cpu, struct device *dev)
drivers/platform/x86/intel/ifs/runtest.c
632
if (!cpu_online(cpu)) {
drivers/platform/x86/intel/ifs/runtest.c
633
dev_info(dev, "cannot test on the offline cpu %d\n", cpu);
drivers/platform/x86/intel/ifs/runtest.c
643
ifs_test_core(cpu, dev);
drivers/platform/x86/intel/ifs/runtest.c
647
ifs_array_test_core(cpu, dev);
drivers/platform/x86/intel/ifs/runtest.c
649
ifs_array_test_gen1(cpu, dev);
drivers/platform/x86/intel/ifs/runtest.c
655
ifs_sbaf_test_core(cpu, dev);
drivers/platform/x86/intel/ifs/runtest.c
78
static void message_not_tested(struct device *dev, int cpu, union ifs_status status)
drivers/platform/x86/intel/ifs/runtest.c
89
cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
drivers/platform/x86/intel/ifs/runtest.c
95
cpumask_pr_args(cpu_smt_mask(cpu)),
drivers/platform/x86/intel/ifs/runtest.c
99
cpumask_pr_args(cpu_smt_mask(cpu)));
drivers/platform/x86/intel/ifs/sysfs.c
67
unsigned int cpu;
drivers/platform/x86/intel/ifs/sysfs.c
70
rc = kstrtouint(buf, 0, &cpu);
drivers/platform/x86/intel/ifs/sysfs.c
71
if (rc < 0 || cpu >= nr_cpu_ids)
drivers/platform/x86/intel/ifs/sysfs.c
77
rc = do_core_test(cpu, dev);
drivers/platform/x86/intel/plr_tpmi.c
121
static int plr_read_cpu_status(struct tpmi_plr_die *plr_die, int cpu,
drivers/platform/x86/intel/plr_tpmi.c
129
regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
drivers/platform/x86/intel/plr_tpmi.c
145
static int plr_clear_cpu_status(struct tpmi_plr_die *plr_die, int cpu)
drivers/platform/x86/intel/plr_tpmi.c
151
regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
drivers/platform/x86/intel/plr_tpmi.c
204
for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
drivers/platform/x86/intel/plr_tpmi.c
205
if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
drivers/platform/x86/intel/plr_tpmi.c
208
if (plr_die->package_id != topology_physical_package_id(cpu))
drivers/platform/x86/intel/plr_tpmi.c
211
seq_printf(s, "cpu%d", cpu);
drivers/platform/x86/intel/plr_tpmi.c
212
ret = plr_read_cpu_status(plr_die, cpu, &val);
drivers/platform/x86/intel/plr_tpmi.c
215
cpu, ret);
drivers/platform/x86/intel/plr_tpmi.c
244
for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
drivers/platform/x86/intel/plr_tpmi.c
245
if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
drivers/platform/x86/intel/plr_tpmi.c
248
if (plr_die->package_id != topology_physical_package_id(cpu))
drivers/platform/x86/intel/plr_tpmi.c
251
plr_clear_cpu_status(plr_die, cpu);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
135
int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
145
if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
154
ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
171
mbox_cmd.logical_cpu = sst_cmd->cpu;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
195
wrmsrq_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
289
static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
297
if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
298
cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
301
pkg_id = topology_logical_package_id(cpu);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
305
bus_number = isst_cpu_info[cpu].bus_info[bus_no];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
323
cpu, bus_no, dev, fn);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
327
if (node == isst_cpu_info[cpu].numa_node) {
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
367
struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
371
if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
372
cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
375
pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
380
return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
384
static int isst_if_cpu_online(unsigned int cpu)
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
389
isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
394
isst_cpu_info[cpu].bus_info[0] = -1;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
395
isst_cpu_info[cpu].bus_info[1] = -1;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
397
isst_cpu_info[cpu].bus_info[0] = data & 0xff;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
398
isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
399
isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
400
isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
412
isst_cpu_info[cpu].punit_cpu_id = -1;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
417
isst_cpu_info[cpu].punit_cpu_id = data;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
77
int cpu;
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
87
static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
96
sst_cmd->cpu = cpu;
drivers/platform/x86/intel/speed_select_if/isst_if_common.h
76
struct pci_dev *isst_if_get_pci_dev(int cpu, int bus, int dev, int fn);
drivers/platform/x86/intel/speed_select_if/isst_if_common.h
79
int isst_store_cmd(int cmd, int sub_command, u32 cpu, int mbox_cmd,
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
725
int offset, shift, cpu;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
735
cpu = clos_assoc.logical_cpu;
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
739
punit_cpu_no = cpu;
drivers/platform/x86/intel/tpmi_power_domains.c
166
static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
drivers/platform/x86/intel/tpmi_power_domains.c
181
info->pkg_id = topology_logical_package_id(cpu);
drivers/platform/x86/intel/tpmi_power_domains.c
182
info->linux_cpu = cpu;
drivers/platform/x86/intel/tpmi_power_domains.c
187
static int tpmi_cpu_online(unsigned int cpu)
drivers/platform/x86/intel/tpmi_power_domains.c
189
struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu);
drivers/platform/x86/intel/tpmi_power_domains.c
193
ret = tpmi_get_logical_id(cpu, info);
drivers/platform/x86/intel/tpmi_power_domains.c
200
cpumask_set_cpu(cpu, &tpmi_power_domain_mask[index]);
drivers/platform/x86/intel/tpmi_power_domains.c
204
topology_die_id(cpu);
drivers/platform/x86/intel/turbo_max_3.c
100
sched_set_itmt_core_prio(priority, cpu);
drivers/platform/x86/intel/turbo_max_3.c
36
static int get_oc_core_priority(unsigned int cpu)
drivers/platform/x86/intel/turbo_max_3.c
47
pr_debug("cpu %d OC mailbox write failed\n", cpu);
drivers/platform/x86/intel/turbo_max_3.c
54
pr_debug("cpu %d OC mailbox read failed\n", cpu);
drivers/platform/x86/intel/turbo_max_3.c
59
pr_debug("cpu %d OC mailbox still processing\n", cpu);
drivers/platform/x86/intel/turbo_max_3.c
65
pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
drivers/platform/x86/intel/turbo_max_3.c
71
pr_debug("cpu %d max_ratio %d\n", cpu, ret);
drivers/platform/x86/intel/turbo_max_3.c
91
static int itmt_legacy_cpu_online(unsigned int cpu)
drivers/platform/x86/intel/turbo_max_3.c
96
priority = get_oc_core_priority(cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
255
int uncore_freq_add_entry(struct uncore_data *data, int cpu)
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
262
data->control_cpu = cpu;
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
286
data->control_cpu = cpu;
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
114
int uncore_freq_add_entry(struct uncore_data *data, int cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
136
static struct uncore_data *uncore_get_instance(unsigned int cpu)
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
138
int id = topology_logical_die_id(cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
146
static int uncore_event_cpu_online(unsigned int cpu)
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
153
target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
157
data = uncore_get_instance(cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
161
data->package_id = topology_physical_package_id(cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
162
data->die_id = topology_die_id(cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
165
ret = uncore_freq_add_entry(data, cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
170
cpumask_set_cpu(cpu, &uncore_cpu_mask);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
175
static int uncore_event_cpu_offline(unsigned int cpu)
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
180
data = uncore_get_instance(cpu);
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
185
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
189
target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
drivers/platform/x86/intel_ips.c
628
static bool cpu_exceeded(struct ips_driver *ips, int cpu)
drivers/platform/x86/intel_ips.c
635
avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp;
drivers/platform/x86/intel_ips.c
865
static u16 read_ctv(struct ips_driver *ips, int cpu)
drivers/platform/x86/intel_ips.c
867
int reg = cpu ? THM_CTV2 : THM_CTV1;
drivers/platform/x86/msi-ec.c
1026
.cpu = {
drivers/platform/x86/msi-ec.c
1103
.cpu = {
drivers/platform/x86/msi-ec.c
171
.cpu = {
drivers/platform/x86/msi-ec.c
249
.cpu = {
drivers/platform/x86/msi-ec.c
327
.cpu = {
drivers/platform/x86/msi-ec.c
404
.cpu = {
drivers/platform/x86/msi-ec.c
483
.cpu = {
drivers/platform/x86/msi-ec.c
562
.cpu = {
drivers/platform/x86/msi-ec.c
642
.cpu = {
drivers/platform/x86/msi-ec.c
719
.cpu = {
drivers/platform/x86/msi-ec.c
796
.cpu = {
drivers/platform/x86/msi-ec.c
874
.cpu = {
drivers/platform/x86/msi-ec.c
91
.cpu = {
drivers/platform/x86/msi-ec.c
952
.cpu = {
drivers/platform/x86/msi-ec.h
116
struct msi_ec_cpu_conf cpu;
drivers/pmdomain/core.c
1875
int cpu, bool set, unsigned int depth)
drivers/pmdomain/core.c
1886
genpd_update_cpumask(parent, cpu, set, depth + 1);
drivers/pmdomain/core.c
1891
cpumask_set_cpu(cpu, genpd->cpus);
drivers/pmdomain/core.c
1893
cpumask_clear_cpu(cpu, genpd->cpus);
drivers/pmdomain/core.c
1896
static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
drivers/pmdomain/core.c
1898
if (cpu >= 0)
drivers/pmdomain/core.c
1899
genpd_update_cpumask(genpd, cpu, true, 0);
drivers/pmdomain/core.c
1902
static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
drivers/pmdomain/core.c
1904
if (cpu >= 0)
drivers/pmdomain/core.c
1905
genpd_update_cpumask(genpd, cpu, false, 0);
drivers/pmdomain/core.c
1910
int cpu;
drivers/pmdomain/core.c
1915
for_each_possible_cpu(cpu) {
drivers/pmdomain/core.c
1916
if (get_cpu_device(cpu) == dev)
drivers/pmdomain/core.c
1917
return cpu;
drivers/pmdomain/core.c
1936
gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
drivers/pmdomain/core.c
1946
genpd_set_cpumask(genpd, gpd_data->cpu);
drivers/pmdomain/core.c
2011
genpd_clear_cpumask(genpd, gpd_data->cpu);
drivers/pmdomain/governor.c
356
int cpu, i;
drivers/pmdomain/governor.c
376
for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
drivers/pmdomain/governor.c
377
dev = per_cpu(cpuidle_devices, cpu);
drivers/pmdomain/governor.c
384
cpu_dev = get_cpu_device(cpu);
drivers/pmdomain/renesas/rcar-sysc.c
482
int rcar_sysc_power_down_cpu(unsigned int cpu)
drivers/pmdomain/renesas/rcar-sysc.c
484
return rcar_sysc_power_cpu(cpu, false);
drivers/pmdomain/renesas/rcar-sysc.c
487
int rcar_sysc_power_up_cpu(unsigned int cpu)
drivers/pmdomain/renesas/rcar-sysc.c
489
return rcar_sysc_power_cpu(cpu, true);
drivers/pnp/pnpbios/bioscalls.c
100
get_cpu_gdt_rw(cpu)[0x40 / 8] = bad_bios_desc;
drivers/pnp/pnpbios/bioscalls.c
138
get_cpu_gdt_rw(cpu)[0x40 / 8] = save_desc_40;
drivers/pnp/pnpbios/bioscalls.c
56
#define Q2_SET_SEL(cpu, selname, address, size) \
drivers/pnp/pnpbios/bioscalls.c
58
struct desc_struct *gdt = get_cpu_gdt_rw((cpu)); \
drivers/pnp/pnpbios/bioscalls.c
89
int cpu;
drivers/pnp/pnpbios/bioscalls.c
98
cpu = get_cpu();
drivers/pnp/pnpbios/bioscalls.c
99
save_desc_40 = get_cpu_gdt_rw(cpu)[0x40 / 8];
drivers/power/reset/sc27xx-poweroff.c
34
int cpu;
drivers/power/reset/sc27xx-poweroff.c
36
for_each_online_cpu(cpu) {
drivers/power/reset/sc27xx-poweroff.c
37
if (cpu != smp_processor_id())
drivers/power/reset/sc27xx-poweroff.c
38
remove_cpu(cpu);
drivers/powercap/dtpm_cpu.c
104
freq = cpufreq_quick_get(dtpm_cpu->cpu);
drivers/powercap/dtpm_cpu.c
124
struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu);
drivers/powercap/dtpm_cpu.c
152
policy = cpufreq_cpu_get(dtpm_cpu->cpu);
drivers/powercap/dtpm_cpu.c
154
for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
drivers/powercap/dtpm_cpu.c
155
per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
drivers/powercap/dtpm_cpu.c
170
static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
drivers/powercap/dtpm_cpu.c
174
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
drivers/powercap/dtpm_cpu.c
181
static int cpuhp_dtpm_cpu_online(unsigned int cpu)
drivers/powercap/dtpm_cpu.c
185
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
drivers/powercap/dtpm_cpu.c
192
static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
drivers/powercap/dtpm_cpu.c
201
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
drivers/powercap/dtpm_cpu.c
205
policy = cpufreq_cpu_get(cpu);
drivers/powercap/dtpm_cpu.c
209
pd = em_cpu_get(cpu);
drivers/powercap/dtpm_cpu.c
222
dtpm_cpu->cpu = cpu;
drivers/powercap/dtpm_cpu.c
224
for_each_cpu(cpu, policy->related_cpus)
drivers/powercap/dtpm_cpu.c
225
per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu;
drivers/powercap/dtpm_cpu.c
227
snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu);
drivers/powercap/dtpm_cpu.c
250
for_each_cpu(cpu, policy->related_cpus)
drivers/powercap/dtpm_cpu.c
251
per_cpu(dtpm_per_cpu, cpu) = NULL;
drivers/powercap/dtpm_cpu.c
261
int cpu;
drivers/powercap/dtpm_cpu.c
263
cpu = of_cpu_node_to_id(np);
drivers/powercap/dtpm_cpu.c
264
if (cpu < 0)
drivers/powercap/dtpm_cpu.c
267
return __dtpm_cpu_setup(cpu, dtpm);
drivers/powercap/dtpm_cpu.c
31
int cpu;
drivers/powercap/dtpm_cpu.c
44
struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu);
drivers/powercap/dtpm_cpu.c
74
int cpu;
drivers/powercap/dtpm_cpu.c
82
for_each_cpu_and(cpu, pd_mask, cpu_online_mask)
drivers/powercap/dtpm_cpu.c
83
sum_util += sched_cpu_util(cpu);
drivers/powercap/dtpm_cpu.c
98
pd = em_cpu_get(dtpm_cpu->cpu);
drivers/powercap/idle_inject.c
105
unsigned int cpu;
drivers/powercap/idle_inject.c
107
for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
drivers/powercap/idle_inject.c
108
iit = per_cpu_ptr(&idle_inject_thread, cpu);
drivers/powercap/idle_inject.c
148
static void idle_inject_fn(unsigned int cpu)
drivers/powercap/idle_inject.c
153
ii_dev = per_cpu(idle_inject_device, cpu);
drivers/powercap/idle_inject.c
154
iit = per_cpu_ptr(&idle_inject_thread, cpu);
drivers/powercap/idle_inject.c
256
unsigned int cpu;
drivers/powercap/idle_inject.c
278
for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
drivers/powercap/idle_inject.c
279
iit = per_cpu_ptr(&idle_inject_thread, cpu);
drivers/powercap/idle_inject.c
296
static void idle_inject_setup(unsigned int cpu)
drivers/powercap/idle_inject.c
307
static int idle_inject_should_run(unsigned int cpu)
drivers/powercap/idle_inject.c
310
per_cpu_ptr(&idle_inject_thread, cpu);
drivers/powercap/idle_inject.c
334
int cpu, cpu_rb;
drivers/powercap/idle_inject.c
345
for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
drivers/powercap/idle_inject.c
347
if (per_cpu(idle_inject_device, cpu)) {
drivers/powercap/idle_inject.c
348
pr_err("cpu%d is already registered\n", cpu);
drivers/powercap/idle_inject.c
352
per_cpu(idle_inject_device, cpu) = ii_dev;
drivers/powercap/idle_inject.c
359
if (cpu == cpu_rb)
drivers/powercap/idle_inject.c
397
unsigned int cpu;
drivers/powercap/idle_inject.c
401
for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
drivers/powercap/idle_inject.c
402
per_cpu(idle_inject_device, cpu) = NULL;
drivers/powercap/intel_rapl_common.c
1594
int cpu;
drivers/powercap/intel_rapl_common.c
1604
for_each_online_cpu(cpu)
drivers/powercap/intel_rapl_common.c
1605
if (topology_physical_package_id(cpu) == rp->id)
drivers/powercap/intel_rapl_common.c
1606
cpumask_set_cpu(cpu, mask);
drivers/powercap/intel_rapl_common.c
1609
static bool is_rp_pmu_cpu(struct rapl_package *rp, int cpu)
drivers/powercap/intel_rapl_common.c
1619
return topology_physical_package_id(cpu) == rp->id;
drivers/powercap/intel_rapl_common.c
1804
if (event->cpu < 0)
drivers/powercap/intel_rapl_common.c
1809
if (is_rp_pmu_cpu(pos, event->cpu)) {
drivers/powercap/intel_rapl_msr.c
100
cpumask_clear_cpu(cpu, &rp->cpumask);
drivers/powercap/intel_rapl_msr.c
106
} else if (rp->lead_cpu == cpu) {
drivers/powercap/intel_rapl_msr.c
113
static int rapl_msr_read_raw(int cpu, struct reg_action *ra, bool pmu_ctx)
drivers/powercap/intel_rapl_msr.c
125
if (rdmsrq_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
drivers/powercap/intel_rapl_msr.c
126
pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu);
drivers/powercap/intel_rapl_msr.c
150
static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
drivers/powercap/intel_rapl_msr.c
154
ret = smp_call_function_single(cpu, rapl_msr_update_func, ra, 1);
drivers/powercap/intel_rapl_msr.c
75
static int rapl_cpu_online(unsigned int cpu)
drivers/powercap/intel_rapl_msr.c
79
rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
drivers/powercap/intel_rapl_msr.c
81
rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true);
drivers/powercap/intel_rapl_msr.c
87
cpumask_set_cpu(cpu, &rp->cpumask);
drivers/powercap/intel_rapl_msr.c
91
static int rapl_cpu_down_prep(unsigned int cpu)
drivers/powercap/intel_rapl_msr.c
96
rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
drivers/ps3/ps3-lpm.c
1030
u32 ps3_get_and_clear_pm_interrupts(u32 cpu)
drivers/ps3/ps3-lpm.c
1032
return ps3_read_pm(cpu, pm_status);
drivers/ps3/ps3-lpm.c
1043
void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
drivers/ps3/ps3-lpm.c
1046
ps3_write_pm(cpu, pm_status, mask);
drivers/ps3/ps3-lpm.c
1056
void ps3_disable_pm_interrupts(u32 cpu)
drivers/ps3/ps3-lpm.c
1058
ps3_get_and_clear_pm_interrupts(cpu);
drivers/ps3/ps3-lpm.c
1059
ps3_write_pm(cpu, pm_status, 0);
drivers/ps3/ps3-lpm.c
198
u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr)
drivers/ps3/ps3-lpm.c
242
void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
drivers/ps3/ps3-lpm.c
303
u32 ps3_read_ctr(u32 cpu, u32 ctr)
drivers/ps3/ps3-lpm.c
308
val = ps3_read_phys_ctr(cpu, phys_ctr);
drivers/ps3/ps3-lpm.c
310
if (ps3_get_ctr_size(cpu, phys_ctr) == 16)
drivers/ps3/ps3-lpm.c
324
void ps3_write_ctr(u32 cpu, u32 ctr, u32 val)
drivers/ps3/ps3-lpm.c
331
if (ps3_get_ctr_size(cpu, phys_ctr) == 16) {
drivers/ps3/ps3-lpm.c
332
phys_val = ps3_read_phys_ctr(cpu, phys_ctr);
drivers/ps3/ps3-lpm.c
340
ps3_write_phys_ctr(cpu, phys_ctr, val);
drivers/ps3/ps3-lpm.c
350
u32 ps3_read_pm07_control(u32 cpu, u32 ctr)
drivers/ps3/ps3-lpm.c
362
void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val)
drivers/ps3/ps3-lpm.c
387
u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
drivers/ps3/ps3-lpm.c
439
void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
drivers/ps3/ps3-lpm.c
509
u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr)
drivers/ps3/ps3-lpm.c
519
pm_ctrl = ps3_read_pm(cpu, pm_control);
drivers/ps3/ps3-lpm.c
528
void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
drivers/ps3/ps3-lpm.c
538
pm_ctrl = ps3_read_pm(cpu, pm_control);
drivers/ps3/ps3-lpm.c
543
ps3_write_pm(cpu, pm_control, pm_ctrl);
drivers/ps3/ps3-lpm.c
548
ps3_write_pm(cpu, pm_control, pm_ctrl);
drivers/ps3/ps3-lpm.c
826
u32 ps3_get_hw_thread_id(int cpu)
drivers/ps3/ps3-lpm.c
828
return get_hard_smp_processor_id(cpu);
drivers/ps3/ps3-lpm.c
838
void ps3_enable_pm(u32 cpu)
drivers/ps3/ps3-lpm.c
882
void ps3_disable_pm(u32 cpu)
drivers/ras/amd/atl/umc.c
390
return topology_amd_node_id(err->cpu) % topology_amd_nodes_per_pkg();
drivers/ras/amd/atl/umc.c
404
u8 socket_id = topology_physical_package_id(err->cpu);
drivers/ras/amd/fmpm.c
300
u64 addr, u64 id, unsigned int cpu)
drivers/ras/amd/fmpm.c
334
a_err.cpu = cpu;
drivers/ras/amd/fmpm.c
395
static void retire_dram_row(u64 addr, u64 id, u32 cpu)
drivers/ras/amd/fmpm.c
403
a_err.cpu = cpu;
drivers/ras/amd/fmpm.c
439
unsigned int i, cpu;
drivers/ras/amd/fmpm.c
452
for_each_online_cpu(cpu) {
drivers/ras/amd/fmpm.c
453
if (topology_ppin(cpu) == fmp->fru_id) {
drivers/ras/amd/fmpm.c
454
err_cpu = cpu;
drivers/ras/amd/fmpm.c
721
static void set_fmp_fields(struct fru_rec *rec, unsigned int cpu)
drivers/ras/amd/fmpm.c
735
fmp->fru_id = topology_ppin(cpu);
drivers/ras/amd/fmpm.c
742
unsigned int i, cpu;
drivers/ras/amd/fmpm.c
749
for_each_online_cpu(cpu) {
drivers/ras/amd/fmpm.c
750
if (topology_physical_package_id(cpu) == i) {
drivers/ras/amd/fmpm.c
751
fru_cpu = cpu;
drivers/ras/ras.c
62
int n, sz, cpu;
drivers/ras/ras.c
93
cpu = GET_LOGICAL_INDEX(err->mpidr);
drivers/ras/ras.c
94
if (cpu < 0)
drivers/ras/ras.c
95
cpu = -1;
drivers/ras/ras.c
98
ven_err_data, (u32)vsei_len, sev, cpu);
drivers/resctrl/mpam_devices.c
1537
int cpu = raw_smp_processor_id();
drivers/resctrl/mpam_devices.c
1539
if (cpumask_test_cpu(cpu, &msc->accessibility))
drivers/resctrl/mpam_devices.c
1540
return cpu;
drivers/resctrl/mpam_devices.c
1618
static int mpam_cpu_online(unsigned int cpu)
drivers/resctrl/mpam_devices.c
1625
if (!cpumask_test_cpu(cpu, &msc->accessibility))
drivers/resctrl/mpam_devices.c
1639
static int mpam_discovery_cpu_online(unsigned int cpu)
drivers/resctrl/mpam_devices.c
1651
if (!cpumask_test_cpu(cpu, &msc->accessibility))
drivers/resctrl/mpam_devices.c
1674
static int mpam_cpu_offline(unsigned int cpu)
drivers/resctrl/mpam_devices.c
1681
if (!cpumask_test_cpu(cpu, &msc->accessibility))
drivers/resctrl/mpam_devices.c
1732
int cpu;
drivers/resctrl/mpam_devices.c
1738
for_each_cpu(cpu, &msc->accessibility)
drivers/resctrl/mpam_devices.c
1739
*per_cpu_ptr(msc->error_dev_id, cpu) = msc;
drivers/resctrl/mpam_devices.c
460
int cpu;
drivers/resctrl/mpam_devices.c
462
for_each_possible_cpu(cpu) {
drivers/resctrl/mpam_devices.c
463
if (node_id == cpu_to_node(cpu))
drivers/resctrl/mpam_devices.c
464
cpumask_set_cpu(cpu, affinity);
drivers/s390/char/sclp_config.c
46
int cpu;
drivers/s390/char/sclp_config.c
52
for_each_online_cpu(cpu) {
drivers/s390/char/sclp_config.c
53
dev = get_cpu_device(cpu);
drivers/s390/char/sclp_early.c
30
u16 boot_cpu_address, cpu;
drivers/s390/char/sclp_early.c
87
for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
drivers/sbus/char/envctrl.c
362
static int envctrl_read_cpu_info(int cpu, struct i2c_child_t *pchild,
drivers/sbus/char/envctrl.c
372
if (++j == cpu) {
drivers/sbus/char/envctrl.c
378
if (j != cpu)
drivers/scsi/aacraid/aacraid.h
1349
__le32 cpu;
drivers/scsi/aha1740.c
103
static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu)
drivers/scsi/aha1740.c
108
offset = (char *) cpu - (char *) hdata->ecb;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2605
static int bnx2fc_cpu_online(unsigned int cpu)
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2610
p = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2613
(void *)p, cpu, "bnx2fc_thread/%d");
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2622
static int bnx2fc_cpu_offline(unsigned int cpu)
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2628
BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2631
p = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2674
unsigned int cpu = 0;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2716
for_each_possible_cpu(cpu) {
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2717
p = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1045
fps = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
993
unsigned int cpu = wqe % num_possible_cpus();
drivers/scsi/bnx2i/bnx2i_init.c
411
static int bnx2i_cpu_online(unsigned int cpu)
drivers/scsi/bnx2i/bnx2i_init.c
416
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/bnx2i/bnx2i_init.c
419
cpu, "bnx2i_thread/%d");
drivers/scsi/bnx2i/bnx2i_init.c
428
static int bnx2i_cpu_offline(unsigned int cpu)
drivers/scsi/bnx2i/bnx2i_init.c
435
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/bnx2i/bnx2i_init.c
466
unsigned cpu = 0;
drivers/scsi/bnx2i/bnx2i_init.c
489
for_each_possible_cpu(cpu) {
drivers/scsi/bnx2i/bnx2i_init.c
490
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1488
unsigned cpu = 0;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1497
for_each_online_cpu(cpu) {
drivers/scsi/bnx2i/bnx2i_iscsi.c
1498
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/csiostor/csio_scsi.c
1891
int cpu = smp_processor_id();
drivers/scsi/csiostor/csio_scsi.c
1893
struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
drivers/scsi/elx/efct/efct_hw_queues.c
118
for_each_cpu_and(cpu, maskp, cpu_present_mask) {
drivers/scsi/elx/efct/efct_hw_queues.c
119
efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
drivers/scsi/elx/efct/efct_hw_queues.c
120
hw->wq_cpu_array[cpu] = hw->hw_wq[i];
drivers/scsi/elx/efct/efct_hw_queues.c
99
u32 cpu = 0, i;
drivers/scsi/fcoe/fcoe.c
1285
static void fcoe_thread_cleanup_local(unsigned int cpu)
drivers/scsi/fcoe/fcoe.c
1290
p = per_cpu_ptr(&fcoe_percpu, cpu);
drivers/scsi/fcoe/fcoe.c
1324
unsigned int cpu;
drivers/scsi/fcoe/fcoe.c
1385
cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
drivers/scsi/fcoe/fcoe.c
1388
cpu = skb->alloc_cpu;
drivers/scsi/fcoe/fcoe.c
1390
cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
drivers/scsi/fcoe/fcoe.c
1393
if (cpu >= nr_cpu_ids)
drivers/scsi/fcoe/fcoe.c
1396
fps = &per_cpu(fcoe_percpu, cpu);
drivers/scsi/fcoe/fcoe.c
1412
schedule_work_on(cpu, &fps->work);
drivers/scsi/fcoe/fcoe.c
2305
unsigned int cpu;
drivers/scsi/fcoe/fcoe.c
2307
for_each_possible_cpu(cpu) {
drivers/scsi/fcoe/fcoe.c
2308
pp = &per_cpu(fcoe_percpu, cpu);
drivers/scsi/fcoe/fcoe.c
2438
unsigned int cpu;
drivers/scsi/fcoe/fcoe.c
2455
for_each_possible_cpu(cpu) {
drivers/scsi/fcoe/fcoe.c
2456
p = per_cpu_ptr(&fcoe_percpu, cpu);
drivers/scsi/fcoe/fcoe.c
2491
unsigned int cpu;
drivers/scsi/fcoe/fcoe.c
2507
for_each_possible_cpu(cpu)
drivers/scsi/fcoe/fcoe.c
2508
fcoe_thread_cleanup_local(cpu);
drivers/scsi/fcoe/fcoe_transport.c
173
unsigned int cpu;
drivers/scsi/fcoe/fcoe_transport.c
184
for_each_possible_cpu(cpu) {
drivers/scsi/fcoe/fcoe_transport.c
185
stats = per_cpu_ptr(lport->stats, cpu);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
3559
unsigned int queue, cpu;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
3566
for_each_cpu(cpu, mask)
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
3567
qmap->mq_map[cpu] = qmap->queue_offset + queue;
drivers/scsi/hpsa.c
2868
int cpu;
drivers/scsi/hpsa.c
2871
cpu = get_cpu();
drivers/scsi/hpsa.c
2872
lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
drivers/scsi/hpsa.c
7474
unsigned int queue, cpu;
drivers/scsi/hpsa.c
7481
for_each_cpu(cpu, mask)
drivers/scsi/hpsa.c
7482
h->reply_map[cpu] = queue;
drivers/scsi/hpsa.c
7487
for_each_possible_cpu(cpu)
drivers/scsi/hpsa.c
7488
h->reply_map[cpu] = 0;
drivers/scsi/hpsa.c
8241
int cpu;
drivers/scsi/hpsa.c
8243
for_each_online_cpu(cpu) {
drivers/scsi/hpsa.c
8245
lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
drivers/scsi/libfc/fc_exch.c
1982
unsigned int cpu;
drivers/scsi/libfc/fc_exch.c
1985
for_each_possible_cpu(cpu)
drivers/scsi/libfc/fc_exch.c
1987
per_cpu_ptr(ema->mp->pool, cpu),
drivers/scsi/libfc/fc_exch.c
2470
unsigned int cpu;
drivers/scsi/libfc/fc_exch.c
2522
for_each_possible_cpu(cpu) {
drivers/scsi/libfc/fc_exch.c
2523
pool = per_cpu_ptr(mp->pool, cpu);
drivers/scsi/libfc/fc_exch.c
816
unsigned int cpu;
drivers/scsi/libfc/fc_exch.c
828
cpu = raw_smp_processor_id();
drivers/scsi/libfc/fc_exch.c
829
pool = per_cpu_ptr(mp->pool, cpu);
drivers/scsi/libfc/fc_exch.c
875
ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
drivers/scsi/libfc/fc_exch.c
931
u16 cpu = xid & fc_cpu_mask;
drivers/scsi/libfc/fc_exch.c
936
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
drivers/scsi/libfc/fc_exch.c
938
lport->host->host_no, lport->port_id, xid, cpu);
drivers/scsi/libfc/fc_exch.c
943
pool = per_cpu_ptr(mp->pool, cpu);
drivers/scsi/libfc/fc_lport.c
303
unsigned int cpu;
drivers/scsi/libfc/fc_lport.c
312
for_each_possible_cpu(cpu) {
drivers/scsi/libfc/fc_lport.c
315
stats = per_cpu_ptr(lport->stats, cpu);
drivers/scsi/lpfc/lpfc_attr.c
127
int cpu;
drivers/scsi/lpfc/lpfc_attr.c
259
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_attr.c
260
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
drivers/scsi/lpfc/lpfc_attr.c
302
int len = 0, i, j, k, cpu;
drivers/scsi/lpfc/lpfc_attr.c
393
for_each_possible_cpu(cpu) {
drivers/scsi/lpfc/lpfc_attr.c
394
lta = per_cpu_ptr(vmp->last_io_time, cpu);
drivers/scsi/lpfc/lpfc_attr.c
5636
unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
drivers/scsi/lpfc/lpfc_attr.c
5651
for_each_possible_cpu(cpu) {
drivers/scsi/lpfc/lpfc_attr.c
5654
if (cpu_to_node(cpu) == numa_node)
drivers/scsi/lpfc/lpfc_attr.c
5655
cpumask_set_cpu(cpu, aff_mask);
drivers/scsi/lpfc/lpfc_attr.c
5658
sibling_mask = topology_sibling_cpumask(cpu);
drivers/scsi/lpfc/lpfc_hbadisc.c
321
int cpu;
drivers/scsi/lpfc/lpfc_hbadisc.c
335
for_each_possible_cpu(cpu) {
drivers/scsi/lpfc/lpfc_hbadisc.c
337
lta = per_cpu_ptr(vmp->last_io_time, cpu);
drivers/scsi/lpfc/lpfc_init.c
10346
int cpu;
drivers/scsi/lpfc/lpfc_init.c
10348
cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
drivers/scsi/lpfc/lpfc_init.c
10354
LPFC_CQE_EXP_COUNT, cpu);
drivers/scsi/lpfc/lpfc_init.c
10359
phba->sli4_hba.cq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10368
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10378
LPFC_WQE_EXP_COUNT, cpu);
drivers/scsi/lpfc/lpfc_init.c
10382
phba->sli4_hba.wq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10391
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10415
int idx, cpu, eqcpu;
drivers/scsi/lpfc/lpfc_init.c
10498
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
10503
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
10513
phba->sli4_hba.eq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10522
qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
drivers/scsi/lpfc/lpfc_init.c
10535
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
10536
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
10561
cpu = lpfc_find_cpu_handle(phba, idx,
drivers/scsi/lpfc/lpfc_init.c
10567
cpu);
drivers/scsi/lpfc/lpfc_init.c
10576
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10585
cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
drivers/scsi/lpfc/lpfc_init.c
10589
phba->sli4_hba.cq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10601
phba->sli4_hba.cq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10608
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10620
phba->sli4_hba.mq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10626
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10642
phba->sli4_hba.wq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10648
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10656
phba->sli4_hba.cq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10662
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10669
phba->sli4_hba.wq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10675
qdesc->chann = cpu;
drivers/scsi/lpfc/lpfc_init.c
10687
phba->sli4_hba.rq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10698
phba->sli4_hba.rq_ecount, cpu);
drivers/scsi/lpfc/lpfc_init.c
10709
cpu = lpfc_find_cpu_handle(phba, idx,
drivers/scsi/lpfc/lpfc_init.c
10716
cpu);
drivers/scsi/lpfc/lpfc_init.c
10729
cpu_to_node(cpu));
drivers/scsi/lpfc/lpfc_init.c
10745
cpu);
drivers/scsi/lpfc/lpfc_init.c
11052
int qidx, cpu;
drivers/scsi/lpfc/lpfc_init.c
11112
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
11113
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
11147
cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
drivers/scsi/lpfc/lpfc_init.c
11148
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12283
int cpu;
drivers/scsi/lpfc/lpfc_init.c
12286
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12287
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12296
return cpu;
drivers/scsi/lpfc/lpfc_init.c
12300
return cpu;
drivers/scsi/lpfc/lpfc_init.c
12314
lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
drivers/scsi/lpfc/lpfc_init.c
12325
(cpu != idx))
drivers/scsi/lpfc/lpfc_init.c
12343
unsigned int cpu)
drivers/scsi/lpfc/lpfc_init.c
12345
struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12353
cpu, eqhdl->irq, cpup->eq, cpup->flag);
drivers/scsi/lpfc/lpfc_init.c
12367
int cpu;
drivers/scsi/lpfc/lpfc_init.c
12369
for_each_possible_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12370
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12376
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
drivers/scsi/lpfc/lpfc_init.c
12414
int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
drivers/scsi/lpfc/lpfc_init.c
12429
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12430
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12432
cpup->phys_id = topology_physical_package_id(cpu);
drivers/scsi/lpfc/lpfc_init.c
12433
cpup->core_id = topology_core_id(cpu);
drivers/scsi/lpfc/lpfc_init.c
12434
if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
drivers/scsi/lpfc/lpfc_init.c
12439
cpup->core_id = cpu;
drivers/scsi/lpfc/lpfc_init.c
12444
cpu, cpup->phys_id, cpup->core_id, cpup->flag);
drivers/scsi/lpfc/lpfc_init.c
12465
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12466
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12503
cpu, cpup->eq, new_cpu,
drivers/scsi/lpfc/lpfc_init.c
12511
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12512
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12551
cpu, cpup->eq, new_cpu,
drivers/scsi/lpfc/lpfc_init.c
12560
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12561
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12573
cpu, cpup->phys_id, cpup->core_id,
drivers/scsi/lpfc/lpfc_init.c
12587
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12588
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12644
cpu, cpup->phys_id, cpup->core_id,
drivers/scsi/lpfc/lpfc_init.c
12653
for_each_possible_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
12654
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12656
c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
drivers/scsi/lpfc/lpfc_init.c
12669
cpu, cpup->hdwq);
drivers/scsi/lpfc/lpfc_init.c
12686
lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
drivers/scsi/lpfc/lpfc_init.c
12707
if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
drivers/scsi/lpfc/lpfc_init.c
12795
lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
drivers/scsi/lpfc/lpfc_init.c
12798
cpumask_set_cpu(cpu, &eqhdl->aff_mask);
drivers/scsi/lpfc/lpfc_init.c
12832
lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
drivers/scsi/lpfc/lpfc_init.c
12844
if (!cpumask_test_cpu(cpu, orig_mask))
drivers/scsi/lpfc/lpfc_init.c
12847
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
12854
cpu_next = cpumask_next_wrap(cpu, orig_mask);
drivers/scsi/lpfc/lpfc_init.c
12858
if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
drivers/scsi/lpfc/lpfc_init.c
12866
if (cpumask_test_cpu(cpu, aff_mask))
drivers/scsi/lpfc/lpfc_init.c
12877
lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
drivers/scsi/lpfc/lpfc_init.c
12881
static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
drivers/scsi/lpfc/lpfc_init.c
12896
lpfc_irq_rebalance(phba, cpu, true);
drivers/scsi/lpfc/lpfc_init.c
12898
retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
drivers/scsi/lpfc/lpfc_init.c
12911
static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
drivers/scsi/lpfc/lpfc_init.c
12926
lpfc_irq_rebalance(phba, cpu, false);
drivers/scsi/lpfc/lpfc_init.c
12930
if (n == cpu)
drivers/scsi/lpfc/lpfc_init.c
12971
unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
drivers/scsi/lpfc/lpfc_init.c
12990
cpu = cpumask_first(aff_mask);
drivers/scsi/lpfc/lpfc_init.c
12991
cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
drivers/scsi/lpfc/lpfc_init.c
13041
cpu);
drivers/scsi/lpfc/lpfc_init.c
13044
cpu = cpumask_next(cpu, aff_mask);
drivers/scsi/lpfc/lpfc_init.c
13047
cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
drivers/scsi/lpfc/lpfc_init.c
13049
cpu = cpumask_first(cpu_present_mask);
drivers/scsi/lpfc/lpfc_init.c
13051
cpu);
drivers/scsi/lpfc/lpfc_init.c
13056
for_each_cpu_and(cpu, maskp, cpu_present_mask) {
drivers/scsi/lpfc/lpfc_init.c
13057
cpup = &phba->sli4_hba.cpu_map[cpu];
drivers/scsi/lpfc/lpfc_init.c
13075
cpu);
drivers/scsi/lpfc/lpfc_init.c
13125
unsigned int cpu;
drivers/scsi/lpfc/lpfc_init.c
13159
cpu = cpumask_first(cpu_present_mask);
drivers/scsi/lpfc/lpfc_init.c
13160
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
drivers/scsi/lpfc/lpfc_init.c
13223
unsigned int cpu;
drivers/scsi/lpfc/lpfc_init.c
13240
cpu = cpumask_first(cpu_present_mask);
drivers/scsi/lpfc/lpfc_init.c
13242
cpu);
drivers/scsi/lpfc/lpfc_init.c
3188
int cpu;
drivers/scsi/lpfc/lpfc_init.c
3204
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
3205
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
drivers/scsi/lpfc/lpfc_init.c
3253
int cpu;
drivers/scsi/lpfc/lpfc_init.c
3269
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
3270
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
drivers/scsi/lpfc/lpfc_init.c
5964
int cpu;
drivers/scsi/lpfc/lpfc_init.c
6009
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_init.c
6010
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
drivers/scsi/lpfc/lpfc_nvme.c
1151
cpu = raw_smp_processor_id();
drivers/scsi/lpfc/lpfc_nvme.c
1153
if (lpfc_ncmd->cpu != cpu)
drivers/scsi/lpfc/lpfc_nvme.c
1158
cpu, lpfc_ncmd->cpu);
drivers/scsi/lpfc/lpfc_nvme.c
1527
int idx, cpu;
drivers/scsi/lpfc/lpfc_nvme.c
1667
cpu = raw_smp_processor_id();
drivers/scsi/lpfc/lpfc_nvme.c
1668
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
drivers/scsi/lpfc/lpfc_nvme.c
1775
cpu = raw_smp_processor_id();
drivers/scsi/lpfc/lpfc_nvme.c
1777
lpfc_ncmd->cpu = cpu;
drivers/scsi/lpfc/lpfc_nvme.c
1778
if (idx != cpu)
drivers/scsi/lpfc/lpfc_nvme.c
1783
lpfc_ncmd->cpu,
drivers/scsi/lpfc/lpfc_nvme.c
951
int cpu;
drivers/scsi/lpfc/lpfc_nvme.h
168
#define lpfc_get_ctx_list(phba, cpu, mrq) \
drivers/scsi/lpfc/lpfc_nvme.h
169
(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
drivers/scsi/lpfc/lpfc_nvme.h
208
uint16_t cpu;
drivers/scsi/lpfc/lpfc_nvmet.c
1050
ctxp->cpu = id; /* Setup cpu for cmpl check */
drivers/scsi/lpfc/lpfc_nvmet.c
1504
int i, j, idx, cpu;
drivers/scsi/lpfc/lpfc_nvmet.c
1567
cpu = cpumask_first(cpu_present_mask);
drivers/scsi/lpfc/lpfc_nvmet.c
1621
infop = lpfc_get_ctx_list(phba, cpu, idx);
drivers/scsi/lpfc/lpfc_nvmet.c
1631
cpu = cpumask_first(cpu_present_mask);
drivers/scsi/lpfc/lpfc_nvmet.c
1634
cpu = lpfc_next_present_cpu(cpu);
drivers/scsi/lpfc/lpfc_nvmet.c
395
int cpu;
drivers/scsi/lpfc/lpfc_nvmet.c
503
cpu = raw_smp_processor_id();
drivers/scsi/lpfc/lpfc_nvmet.c
504
infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
drivers/scsi/lpfc/lpfc_nvmet.c
836
if (ctxp->cpu != id)
drivers/scsi/lpfc/lpfc_nvmet.c
840
id, ctxp->cpu);
drivers/scsi/lpfc/lpfc_scsi.c
3787
int cpu;
drivers/scsi/lpfc/lpfc_scsi.c
3793
for_each_present_cpu(cpu) {
drivers/scsi/lpfc/lpfc_scsi.c
3794
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
drivers/scsi/lpfc/lpfc_scsi.c
611
uint32_t cpu, idx;
drivers/scsi/lpfc/lpfc_scsi.c
615
cpu = raw_smp_processor_id();
drivers/scsi/lpfc/lpfc_scsi.c
620
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
drivers/scsi/lpfc/lpfc_scsi.c
641
lpfc_cmd->cpu = cpu;
drivers/scsi/lpfc/lpfc_sli.c
15882
uint32_t entry_size, uint32_t entry_count, int cpu)
drivers/scsi/lpfc/lpfc_sli.c
15899
GFP_KERNEL, cpu_to_node(cpu));
drivers/scsi/lpfc/lpfc_sli.h
434
uint16_t cpu;
drivers/scsi/lpfc/lpfc_sli4.h
1082
uint32_t entry_count, int cpu);
drivers/scsi/lpfc/lpfc_vmid.c
300
u32 bucket, i, cpu;
drivers/scsi/lpfc/lpfc_vmid.c
316
for_each_possible_cpu(cpu)
drivers/scsi/lpfc/lpfc_vmid.c
317
*per_cpu_ptr(vmp->last_io_time, cpu) = 0;
drivers/scsi/megaraid/megaraid_sas_base.c
5841
unsigned int queue, cpu, low_latency_index_start;
drivers/scsi/megaraid/megaraid_sas_base.c
5850
for_each_cpu(cpu, mask)
drivers/scsi/megaraid/megaraid_sas_base.c
5851
instance->reply_map[cpu] = queue;
drivers/scsi/megaraid/megaraid_sas_base.c
5857
for_each_possible_cpu(cpu) {
drivers/scsi/megaraid/megaraid_sas_base.c
5858
instance->reply_map[cpu] = queue;
drivers/scsi/mpt3sas/mpt3sas_base.c
3214
unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
drivers/scsi/mpt3sas/mpt3sas_base.c
3264
for_each_cpu_and(cpu, mask, cpu_online_mask) {
drivers/scsi/mpt3sas/mpt3sas_base.c
3265
if (cpu >= ioc->cpu_msix_table_sz)
drivers/scsi/mpt3sas/mpt3sas_base.c
3267
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
drivers/scsi/mpt3sas/mpt3sas_base.c
3274
cpu = cpumask_first(cpu_online_mask);
drivers/scsi/mpt3sas/mpt3sas_base.c
3285
if (cpu >= nr_cpus)
drivers/scsi/mpt3sas/mpt3sas_base.c
3292
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
drivers/scsi/mpt3sas/mpt3sas_base.c
3293
cpu = cpumask_next(cpu, cpu_online_mask);
drivers/scsi/myrs.c
1345
if (info->cpu[0].cpu_count) {
drivers/scsi/myrs.c
1348
if (tbl[i].type == info->cpu[0].cpu_type) {
drivers/scsi/myrs.c
1354
if (info->cpu[1].cpu_count) {
drivers/scsi/myrs.c
1357
if (tbl[i].type == info->cpu[1].cpu_type) {
drivers/scsi/myrs.c
1366
info->cpu[0].cpu_name,
drivers/scsi/myrs.c
1367
first_processor, info->cpu[0].cpu_count,
drivers/scsi/myrs.c
1368
info->cpu[1].cpu_name,
drivers/scsi/myrs.c
1369
second_processor, info->cpu[1].cpu_count);
drivers/scsi/myrs.c
1372
info->cpu[0].cpu_name,
drivers/scsi/myrs.c
1373
first_processor, info->cpu[0].cpu_count);
drivers/scsi/myrs.c
1376
info->cpu[1].cpu_name,
drivers/scsi/myrs.c
1377
second_processor, info->cpu[1].cpu_count);
drivers/scsi/myrs.h
271
} __packed cpu[2];
drivers/scsi/qedf/qedf.h
169
unsigned int cpu;
drivers/scsi/qedf/qedf_els.c
78
els_req->cpu = smp_processor_id();
drivers/scsi/qedf/qedf_io.c
2346
io_req->cpu = smp_processor_id();
drivers/scsi/qedf/qedf_io.c
460
io_req->cpu = 0;
drivers/scsi/qedf/qedf_io.c
828
io_log->req_cpu = io_req->cpu;
drivers/scsi/qedf/qedf_io.c
832
io_log->req_cpu = io_req->cpu;
drivers/scsi/qedf/qedf_io.c
863
io_req->cpu = smp_processor_id();
drivers/scsi/qedf/qedf_main.c
2240
unsigned int cpu;
drivers/scsi/qedf/qedf_main.c
2293
cpu = 0;
drivers/scsi/qedf/qedf_main.c
2295
cpu = io_req->cpu;
drivers/scsi/qedf/qedf_main.c
2315
queue_work_on(cpu, qedf_io_wq, &io_work->work);
drivers/scsi/qedf/qedf_main.c
2409
int i, rc, cpu;
drivers/scsi/qedf/qedf_main.c
2413
cpu = cpumask_first(cpu_online_mask);
drivers/scsi/qedf/qedf_main.c
2431
rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
drivers/scsi/qedf/qedf_main.c
2432
cpu = cpumask_next(cpu, cpu_online_mask);
drivers/scsi/qedi/qedi_main.c
1270
int cpu;
drivers/scsi/qedi/qedi_main.c
1285
cpu = smp_processor_id();
drivers/scsi/qedi/qedi_main.c
1286
p = &per_cpu(qedi_percpu, cpu);
drivers/scsi/qedi/qedi_main.c
1408
int i, rc, cpu;
drivers/scsi/qedi/qedi_main.c
1411
cpu = cpumask_first(cpu_online_mask);
drivers/scsi/qedi/qedi_main.c
1431
get_cpu_mask(cpu));
drivers/scsi/qedi/qedi_main.c
1432
cpu = cpumask_next(cpu, cpu_online_mask);
drivers/scsi/qedi/qedi_main.c
1949
static int qedi_cpu_online(unsigned int cpu)
drivers/scsi/qedi/qedi_main.c
1955
cpu, "qedi_thread/%d");
drivers/scsi/qedi/qedi_main.c
1964
static int qedi_cpu_offline(unsigned int cpu)
drivers/scsi/qedi/qedi_main.c
2886
int cpu, rc = 0;
drivers/scsi/qedi/qedi_main.c
2905
for_each_possible_cpu(cpu) {
drivers/scsi/qedi/qedi_main.c
2906
p = &per_cpu(qedi_percpu, cpu);
drivers/scsi/qla2xxx/qla_inline.h
595
unsigned int cpu;
drivers/scsi/qla2xxx/qla_inline.h
603
for_each_cpu(cpu, mask) {
drivers/scsi/qla2xxx/qla_inline.h
604
ha->qp_cpu_map[cpu] = qpair;
drivers/scsi/qla2xxx/tcm_qla2xxx.c
281
int tag, cpu;
drivers/scsi/qla2xxx/tcm_qla2xxx.c
283
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
drivers/scsi/qla2xxx/tcm_qla2xxx.c
290
cmd->se_cmd.map_cpu = cpu;
drivers/scsi/sgiwd93.c
104
(unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
drivers/scsi/sgiwd93.c
115
pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu);
drivers/scsi/sgiwd93.c
185
struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu;
drivers/scsi/sgiwd93.c
245
hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
drivers/scsi/sgiwd93.c
247
if (!hdata->cpu) {
drivers/scsi/sgiwd93.c
285
dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
drivers/scsi/sgiwd93.c
302
dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
drivers/scsi/sgiwd93.c
41
void *cpu;
drivers/scsi/sgiwd93.c
81
hcp = hd->cpu;
drivers/scsi/smartpqi/smartpqi_init.c
7424
int cpu;
drivers/scsi/smartpqi/smartpqi_init.c
7443
for_each_online_cpu(cpu) {
drivers/scsi/smartpqi/smartpqi_init.c
7444
raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
drivers/scsi/smartpqi/smartpqi_init.c
7540
int cpu;
drivers/scsi/smartpqi/smartpqi_init.c
7559
for_each_online_cpu(cpu) {
drivers/scsi/smartpqi/smartpqi_init.c
7560
write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
drivers/scsi/storvsc_drv.c
614
int cpu;
drivers/scsi/storvsc_drv.c
652
for_each_possible_cpu(cpu) {
drivers/scsi/storvsc_drv.c
653
if (stor_device->stor_chns[cpu] && !cpumask_test_cpu(
drivers/scsi/storvsc_drv.c
654
cpu, &stor_device->alloced_cpus))
drivers/scsi/storvsc_drv.c
655
WRITE_ONCE(stor_device->stor_chns[cpu], NULL);
drivers/sh/intc/chip.c
21
unsigned int cpu;
drivers/sh/intc/chip.c
23
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
drivers/sh/intc/chip.c
25
if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
drivers/sh/intc/chip.c
28
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
drivers/sh/intc/chip.c
47
unsigned int cpu;
drivers/sh/intc/chip.c
51
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
drivers/sh/intc/chip.c
53
if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
drivers/sh/intc/chip.c
56
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
drivers/sh/intc/handle.c
210
unsigned int cpu;
drivers/sh/intc/handle.c
217
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
drivers/sh/intc/handle.c
218
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
drivers/sh/intc/handle.c
223
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
drivers/sh/intc/handle.c
224
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
drivers/soc/bcm/brcmstb/biuctrl.c
178
unsigned int cpu;
drivers/soc/bcm/brcmstb/biuctrl.c
188
for_each_possible_cpu(cpu) {
drivers/soc/bcm/brcmstb/biuctrl.c
189
shift = cpu * RAC_CPU_SHIFT + RACPREFDATA_SHIFT;
drivers/soc/bcm/brcmstb/biuctrl.c
190
enable |= RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT);
drivers/soc/bcm/brcmstb/biuctrl.c
194
pref_dist |= 1 << (cpu + DPREF_LINE_2_SHIFT);
drivers/soc/fsl/dpio/dpio-driver.c
113
if (irq_set_affinity_hint(irq->virq, cpumask_of(cpu)))
drivers/soc/fsl/dpio/dpio-driver.c
116
irq->virq, cpu);
drivers/soc/fsl/dpio/dpio-driver.c
184
desc.cpu = possible_next_cpu;
drivers/soc/fsl/dpio/dpio-driver.c
187
sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu);
drivers/soc/fsl/dpio/dpio-driver.c
194
desc.cpu);
drivers/soc/fsl/dpio/dpio-driver.c
240
err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
drivers/soc/fsl/dpio/dpio-driver.c
277
int err = 0, cpu;
drivers/soc/fsl/dpio/dpio-driver.c
281
cpu = dpaa2_io_get_cpu(priv->io);
drivers/soc/fsl/dpio/dpio-driver.c
287
cpumask_set_cpu(cpu, cpus_unused_mask);
drivers/soc/fsl/dpio/dpio-driver.c
55
static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu)
drivers/soc/fsl/dpio/dpio-driver.c
72
return cluster_base + cpu / cluster_size;
drivers/soc/fsl/dpio/dpio-driver.c
93
static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
drivers/soc/fsl/dpio/dpio-service.c
103
struct dpaa2_io *dpaa2_io_service_select(int cpu)
drivers/soc/fsl/dpio/dpio-service.c
105
if (cpu == DPAA2_IO_ANY_CPU)
drivers/soc/fsl/dpio/dpio-service.c
108
return service_select_by_cpu(NULL, cpu);
drivers/soc/fsl/dpio/dpio-service.c
143
if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
drivers/soc/fsl/dpio/dpio-service.c
182
if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
drivers/soc/fsl/dpio/dpio-service.c
183
dpio_by_cpu[desc->cpu] = obj;
drivers/soc/fsl/dpio/dpio-service.c
209
dpio_by_cpu[d->dpio_desc.cpu] = NULL;
drivers/soc/fsl/dpio/dpio-service.c
273
return d->dpio_desc.cpu;
drivers/soc/fsl/dpio/dpio-service.c
58
int cpu)
drivers/soc/fsl/dpio/dpio-service.c
63
if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
drivers/soc/fsl/dpio/dpio-service.c
70
if (cpu < 0)
drivers/soc/fsl/dpio/dpio-service.c
71
cpu = raw_smp_processor_id();
drivers/soc/fsl/dpio/dpio-service.c
74
return dpio_by_cpu[cpu];
drivers/soc/fsl/qbman/bman.c
560
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
drivers/soc/fsl/qbman/bman.c
566
if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
drivers/soc/fsl/qbman/bman.c
599
portal = &per_cpu(bman_affine_portal, c->cpu);
drivers/soc/fsl/qbman/bman.c
605
cpumask_set_cpu(c->cpu, &affine_mask);
drivers/soc/fsl/qbman/bman_portal.c
103
int irq, cpu, err, i;
drivers/soc/fsl/qbman/bman_portal.c
135
pcfg->cpu = -1;
drivers/soc/fsl/qbman/bman_portal.c
158
cpu = cpumask_first_zero(&portal_cpus);
drivers/soc/fsl/qbman/bman_portal.c
159
if (cpu >= nr_cpu_ids) {
drivers/soc/fsl/qbman/bman_portal.c
166
cpumask_set_cpu(cpu, &portal_cpus);
drivers/soc/fsl/qbman/bman_portal.c
168
pcfg->cpu = cpu;
drivers/soc/fsl/qbman/bman_portal.c
176
if (!cpu_online(cpu))
drivers/soc/fsl/qbman/bman_portal.c
177
bman_offline_cpu(cpu);
drivers/soc/fsl/qbman/bman_portal.c
45
__func__, pcfg->cpu);
drivers/soc/fsl/qbman/bman_portal.c
50
affine_bportals[pcfg->cpu] = p;
drivers/soc/fsl/qbman/bman_portal.c
52
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
drivers/soc/fsl/qbman/bman_portal.c
57
static int bman_offline_cpu(unsigned int cpu)
drivers/soc/fsl/qbman/bman_portal.c
59
struct bman_portal *p = affine_bportals[cpu];
drivers/soc/fsl/qbman/bman_portal.c
70
cpu = cpumask_any_but(cpu_online_mask, cpu);
drivers/soc/fsl/qbman/bman_portal.c
71
irq_set_affinity(pcfg->irq, cpumask_of(cpu));
drivers/soc/fsl/qbman/bman_portal.c
75
static int bman_online_cpu(unsigned int cpu)
drivers/soc/fsl/qbman/bman_portal.c
77
struct bman_portal *p = affine_bportals[cpu];
drivers/soc/fsl/qbman/bman_portal.c
87
irq_set_affinity(pcfg->irq, cpumask_of(cpu));
drivers/soc/fsl/qbman/bman_priv.h
57
int cpu;
drivers/soc/fsl/qbman/dpaa_sys.h
115
int irq, int cpu)
drivers/soc/fsl/qbman/dpaa_sys.h
124
if (cpu == -1 || !cpu_online(cpu))
drivers/soc/fsl/qbman/dpaa_sys.h
125
cpu = cpumask_any(cpu_online_mask);
drivers/soc/fsl/qbman/dpaa_sys.h
127
ret = irq_set_affinity(irq, cpumask_of(cpu));
drivers/soc/fsl/qbman/dpaa_sys.h
129
dev_err(dev, "irq_set_affinity() on CPU %d failed\n", cpu);
drivers/soc/fsl/qbman/qman.c
1295
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
drivers/soc/fsl/qbman/qman.c
1302
if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
drivers/soc/fsl/qbman/qman.c
1361
portal = &per_cpu(qman_affine_portal, c->cpu);
drivers/soc/fsl/qbman/qman.c
1367
cpumask_set_cpu(c->cpu, &affine_mask);
drivers/soc/fsl/qbman/qman.c
1368
affine_channels[c->cpu] = c->channel;
drivers/soc/fsl/qbman/qman.c
1369
affine_portals[c->cpu] = portal;
drivers/soc/fsl/qbman/qman.c
1410
int cpu;
drivers/soc/fsl/qbman/qman.c
1413
cpu = pcfg->cpu;
drivers/soc/fsl/qbman/qman.c
1418
cpumask_clear_cpu(cpu, &affine_mask);
drivers/soc/fsl/qbman/qman.c
1731
u16 qman_affine_channel(int cpu)
drivers/soc/fsl/qbman/qman.c
1733
if (cpu < 0) {
drivers/soc/fsl/qbman/qman.c
1736
cpu = portal->config->cpu;
drivers/soc/fsl/qbman/qman.c
1739
WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
drivers/soc/fsl/qbman/qman.c
1740
return affine_channels[cpu];
drivers/soc/fsl/qbman/qman.c
1744
struct qman_portal *qman_get_affine_portal(int cpu)
drivers/soc/fsl/qbman/qman.c
1746
return affine_portals[cpu];
drivers/soc/fsl/qbman/qman_portal.c
122
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
drivers/soc/fsl/qbman/qman_portal.c
128
unsigned int cpu)
drivers/soc/fsl/qbman/qman_portal.c
132
if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
drivers/soc/fsl/qbman/qman_portal.c
139
qman_set_sdest(pcfg->channel, cpu);
drivers/soc/fsl/qbman/qman_portal.c
142
static int qman_offline_cpu(unsigned int cpu)
drivers/soc/fsl/qbman/qman_portal.c
147
p = affine_portals[cpu];
drivers/soc/fsl/qbman/qman_portal.c
152
cpu = cpumask_any_but(cpu_online_mask, cpu);
drivers/soc/fsl/qbman/qman_portal.c
153
irq_set_affinity(pcfg->irq, cpumask_of(cpu));
drivers/soc/fsl/qbman/qman_portal.c
154
qman_portal_update_sdest(pcfg, cpu);
drivers/soc/fsl/qbman/qman_portal.c
160
static int qman_online_cpu(unsigned int cpu)
drivers/soc/fsl/qbman/qman_portal.c
165
p = affine_portals[cpu];
drivers/soc/fsl/qbman/qman_portal.c
169
irq_set_affinity(pcfg->irq, cpumask_of(cpu));
drivers/soc/fsl/qbman/qman_portal.c
170
qman_portal_update_sdest(pcfg, cpu);
drivers/soc/fsl/qbman/qman_portal.c
188
int irq, cpu, err, i;
drivers/soc/fsl/qbman/qman_portal.c
228
pcfg->cpu = -1;
drivers/soc/fsl/qbman/qman_portal.c
252
cpu = cpumask_first_zero(&portal_cpus);
drivers/soc/fsl/qbman/qman_portal.c
253
if (cpu >= nr_cpu_ids) {
drivers/soc/fsl/qbman/qman_portal.c
260
cpumask_set_cpu(cpu, &portal_cpus);
drivers/soc/fsl/qbman/qman_portal.c
262
pcfg->cpu = cpu;
drivers/soc/fsl/qbman/qman_portal.c
275
if (!cpu_online(cpu))
drivers/soc/fsl/qbman/qman_portal.c
276
qman_offline_cpu(cpu);
drivers/soc/fsl/qbman/qman_portal.c
45
static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
drivers/soc/fsl/qbman/qman_portal.c
57
ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
drivers/soc/fsl/qbman/qman_portal.c
72
qman_set_sdest(pcfg->channel, cpu);
drivers/soc/fsl/qbman/qman_portal.c
92
portal_set_cpu(pcfg, pcfg->cpu);
drivers/soc/fsl/qbman/qman_portal.c
97
__func__, pcfg->cpu);
drivers/soc/fsl/qbman/qman_priv.h
165
int cpu;
drivers/soc/fsl/qbman/qman_test_stash.c
104
int cpu;
drivers/soc/fsl/qbman/qman_test_stash.c
106
for_each_online_cpu(cpu) {
drivers/soc/fsl/qbman/qman_test_stash.c
112
cpu, "hotpotato%d");
drivers/soc/qcom/spm.c
390
int cpu;
drivers/soc/qcom/spm.c
393
for_each_possible_cpu(cpu) {
drivers/soc/qcom/spm.c
396
cpu_node = of_cpu_device_node_get(cpu);
drivers/soc/qcom/spm.c
406
return cpu;
drivers/soc/renesas/r9a06g032-smp.c
35
r9a06g032_smp_boot_secondary(unsigned int cpu,
drivers/soc/renesas/r9a06g032-smp.c
44
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
drivers/soc/samsung/exynos-asv.c
101
ret = exynos_asv_update_cpu_opps(asv, cpu);
drivers/soc/samsung/exynos-asv.c
107
em_dev_update_chip_binning(cpu);
drivers/soc/samsung/exynos-asv.c
28
struct device *cpu)
drivers/soc/samsung/exynos-asv.c
36
if (of_device_is_compatible(cpu->of_node,
drivers/soc/samsung/exynos-asv.c
51
opp = dev_pm_opp_find_freq_exact(cpu, opp_freq * MHZ, true);
drivers/soc/samsung/exynos-asv.c
54
cpu->id, i, opp_freq);
drivers/soc/samsung/exynos-asv.c
66
ret = dev_pm_opp_adjust_voltage(cpu, opp_freq * MHZ,
drivers/soc/samsung/exynos-asv.c
71
opp_freq, new_volt, cpu->id);
drivers/soc/samsung/exynos-asv.c
75
opp_freq, volt, new_volt, cpu->id);
drivers/soc/samsung/exynos-asv.c
84
struct device *cpu;
drivers/soc/samsung/exynos-asv.c
90
cpu = get_cpu_device(cpuid);
drivers/soc/samsung/exynos-asv.c
91
if (!cpu)
drivers/soc/samsung/exynos-asv.c
94
opp_table = dev_pm_opp_get_opp_table(cpu);
drivers/soc/samsung/exynos-pmu.c
235
static int __gs101_cpu_pmu_online(unsigned int cpu)
drivers/soc/samsung/exynos-pmu.c
245
mask = BIT(cpu);
drivers/soc/samsung/exynos-pmu.c
248
mask, (0 << cpu));
drivers/soc/samsung/exynos-pmu.c
261
int cpu;
drivers/soc/samsung/exynos-pmu.c
270
cpu = smp_processor_id();
drivers/soc/samsung/exynos-pmu.c
271
__gs101_cpu_pmu_online(cpu);
drivers/soc/samsung/exynos-pmu.c
278
static int gs101_cpuhp_pmu_online(unsigned int cpu)
drivers/soc/samsung/exynos-pmu.c
284
__gs101_cpu_pmu_online(cpu);
drivers/soc/samsung/exynos-pmu.c
289
clear_bit(cpu, pmu_context->in_cpuhp);
drivers/soc/samsung/exynos-pmu.c
296
static int __gs101_cpu_pmu_offline(unsigned int cpu)
drivers/soc/samsung/exynos-pmu.c
306
mask = BIT(cpu);
drivers/soc/samsung/exynos-pmu.c
308
mask, BIT(cpu));
drivers/soc/samsung/exynos-pmu.c
314
mask = (BIT(cpu + 8));
drivers/soc/samsung/exynos-pmu.c
325
int cpu;
drivers/soc/samsung/exynos-pmu.c
328
cpu = smp_processor_id();
drivers/soc/samsung/exynos-pmu.c
330
if (test_bit(cpu, pmu_context->in_cpuhp)) {
drivers/soc/samsung/exynos-pmu.c
341
__gs101_cpu_pmu_offline(cpu);
drivers/soc/samsung/exynos-pmu.c
348
static int gs101_cpuhp_pmu_offline(unsigned int cpu)
drivers/soc/samsung/exynos-pmu.c
357
set_bit(cpu, pmu_context->in_cpuhp);
drivers/soc/samsung/exynos-pmu.c
358
__gs101_cpu_pmu_offline(cpu);
drivers/soc/samsung/exynos-pmu.c
415
int ret, cpu;
drivers/soc/samsung/exynos-pmu.c
460
for_each_online_cpu(cpu)
drivers/soc/samsung/exynos-pmu.c
461
gs101_cpuhp_pmu_online(cpu);
drivers/soc/samsung/gs101-pmu.c
194
#define CLUSTER_CPU_RANGE(cl, cpu) \
drivers/soc/samsung/gs101-pmu.c
195
regmap_reg_range(GS101_CLUSTER_CPU_IN(cl, cpu), \
drivers/soc/samsung/gs101-pmu.c
196
GS101_CLUSTER_CPU_IN(cl, cpu)), \
drivers/soc/samsung/gs101-pmu.c
197
regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \
drivers/soc/samsung/gs101-pmu.c
198
GS101_CLUSTER_CPU_INT_IN(cl, cpu))
drivers/soc/samsung/gs101-pmu.c
39
#define CLUSTER_CPU_RANGE(cl, cpu) \
drivers/soc/samsung/gs101-pmu.c
40
regmap_reg_range(GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu), \
drivers/soc/samsung/gs101-pmu.c
41
GS101_CLUSTER_CPU_OPTION(cl, cpu)), \
drivers/soc/samsung/gs101-pmu.c
42
regmap_reg_range(GS101_CLUSTER_CPU_OUT(cl, cpu), \
drivers/soc/samsung/gs101-pmu.c
43
GS101_CLUSTER_CPU_IN(cl, cpu)), \
drivers/soc/samsung/gs101-pmu.c
44
regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \
drivers/soc/samsung/gs101-pmu.c
45
GS101_CLUSTER_CPU_INT_DIR(cl, cpu))
drivers/soc/ti/knav_qmss_queue.c
433
int cpu = 0;
drivers/soc/ti/knav_qmss_queue.c
446
for_each_possible_cpu(cpu) {
drivers/soc/ti/knav_qmss_queue.c
447
pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
drivers/soc/ti/knav_qmss_queue.c
448
pops += per_cpu_ptr(qh->stats, cpu)->pops;
drivers/soc/ti/knav_qmss_queue.c
449
push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
drivers/soc/ti/knav_qmss_queue.c
450
pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
drivers/soc/ti/knav_qmss_queue.c
451
notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
drivers/soc/xilinx/xlnx_event_manager.c
553
static int xlnx_event_cpuhp_start(unsigned int cpu)
drivers/soc/xilinx/xlnx_event_manager.c
560
static int xlnx_event_cpuhp_down(unsigned int cpu)
drivers/spi/spi.c
102
int cpu;
drivers/spi/spi.c
104
for_each_possible_cpu(cpu) {
drivers/spi/spi.c
107
stat = per_cpu_ptr(pcpu_stats, cpu);
drivers/target/iscsi/iscsi_target.c
3570
int ord, cpu;
drivers/target/iscsi/iscsi_target.c
3583
for_each_online_cpu(cpu) {
drivers/target/iscsi/iscsi_target.c
3585
cpumask_set_cpu(cpu, conn->conn_cpumask);
drivers/target/iscsi/iscsi_target.c
3595
for_each_cpu(cpu, conn_allowed_cpumask) {
drivers/target/iscsi/iscsi_target.c
3597
cpumask_set_cpu(cpu, conn->conn_cpumask);
drivers/target/iscsi/iscsi_target_util.c
159
int size, tag, cpu;
drivers/target/iscsi/iscsi_target_util.c
161
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
drivers/target/iscsi/iscsi_target_util.c
163
tag = iscsit_wait_for_tag(se_sess, state, &cpu);
drivers/target/iscsi/iscsi_target_util.c
172
cmd->se_cmd.map_cpu = cpu;
drivers/target/sbp/sbp_target.c
914
int tag, cpu;
drivers/target/sbp/sbp_target.c
916
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
drivers/target/sbp/sbp_target.c
923
req->se_cmd.map_cpu = cpu;
drivers/target/target_core_stat.c
285
unsigned int cpu; \
drivers/target/target_core_stat.c
288
for_each_possible_cpu(cpu) { \
drivers/target/target_core_stat.c
289
stats = per_cpu_ptr(per_cpu_stats, cpu); \
drivers/target/target_core_transport.c
1920
int cpu = se_cmd->cpuid;
drivers/target/target_core_transport.c
1923
sq = &se_dev->queues[cpu].sq;
drivers/target/target_core_transport.c
1925
queue_work_on(cpu, target_submission_wq, &sq->work);
drivers/target/target_core_transport.c
910
int success, cpu;
drivers/target/target_core_transport.c
940
cpu = cmd->cpuid;
drivers/target/target_core_transport.c
942
cpu = wwn->cmd_compl_affinity;
drivers/target/target_core_transport.c
944
queue_work_on(cpu, target_completion_wq, &cmd->work);
drivers/target/tcm_fc/tfc_cmd.c
425
int tag, cpu;
drivers/target/tcm_fc/tfc_cmd.c
427
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
drivers/target/tcm_fc/tfc_cmd.c
435
cmd->se_cmd.map_cpu = cpu;
drivers/tee/optee/smc_abi.c
1131
int cpu, rc;
drivers/tee/optee/smc_abi.c
1137
for_each_present_cpu(cpu)
drivers/tee/optee/smc_abi.c
1138
per_cpu_ptr(optee_pcpu, cpu)->optee = optee;
drivers/tee/optee/smc_abi.c
1555
static int optee_cpuhp_probe(unsigned int cpu)
drivers/tee/optee/smc_abi.c
62
static int optee_cpuhp_enable_pcpu_irq(unsigned int cpu)
drivers/tee/optee/smc_abi.c
69
static int optee_cpuhp_disable_pcpu_irq(unsigned int cpu)
drivers/thermal/cpufreq_cooling.c
160
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
drivers/thermal/cpufreq_cooling.c
163
unsigned long util = sched_cpu_util(cpu);
drivers/thermal/cpufreq_cooling.c
165
return (util * 100) / arch_scale_cpu_capacity(cpu);
drivers/thermal/cpufreq_cooling.c
168
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
drivers/thermal/cpufreq_cooling.c
175
now_idle = get_cpu_idle_time(cpu, &now, 0);
drivers/thermal/cpufreq_cooling.c
234
int i = 0, cpu;
drivers/thermal/cpufreq_cooling.c
239
freq = cpufreq_quick_get(policy->cpu);
drivers/thermal/cpufreq_cooling.c
241
for_each_cpu(cpu, policy->related_cpus) {
drivers/thermal/cpufreq_cooling.c
244
if (cpu_online(cpu))
drivers/thermal/cpufreq_cooling.c
245
load = get_load(cpufreq_cdev, cpu, i);
drivers/thermal/cpufreq_cooling.c
256
trace_thermal_power_cpu_get_power_simple(policy->cpu, *power);
drivers/thermal/cpufreq_cooling.c
532
dev = get_cpu_device(policy->cpu);
drivers/thermal/cpufreq_cooling.c
534
pr_warn("No cpu device for cpu %d\n", policy->cpu);
drivers/thermal/cpufreq_cooling.c
650
struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
drivers/thermal/cpufreq_cooling.c
655
policy->cpu);
drivers/thermal/cpufreq_cooling.c
660
struct em_perf_domain *em = em_cpu_get(policy->cpu);
drivers/thermal/cpufreq_cooling.c
665
policy->cpu, PTR_ERR(cdev));
drivers/thermal/cpuidle_cooling.c
245
int cpu, ret;
drivers/thermal/cpuidle_cooling.c
247
for_each_cpu(cpu, drv->cpumask) {
drivers/thermal/cpuidle_cooling.c
249
cpu_node = of_cpu_device_node_get(cpu);
drivers/thermal/cpuidle_cooling.c
256
pr_debug("'thermal-idle' node not found for cpu%d\n", cpu);
drivers/thermal/cpuidle_cooling.c
266
"for cpu%d: %d\n", cpu, ret);
drivers/thermal/imx_thermal.c
552
np = of_get_cpu_node(data->policy->cpu, NULL);
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
180
int cpu;
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
185
for_each_online_cpu(cpu) {
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
186
ret = intel_tcc_get_temp(cpu, &curr_temp, false);
drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
22
static int rapl_mmio_read_raw(int cpu, struct reg_action *ra, bool atomic)
drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
32
static int rapl_mmio_write_raw(int cpu, struct reg_action *ra)
drivers/thermal/intel/intel_hfi.c
175
int cpu, i = 0;
drivers/thermal/intel/intel_hfi.c
178
for_each_cpu(cpu, hfi_instance->cpus) {
drivers/thermal/intel/intel_hfi.c
182
index = per_cpu(hfi_cpu_info, cpu).index;
drivers/thermal/intel/intel_hfi.c
184
cpu_caps[i].cpu = cpu;
drivers/thermal/intel/intel_hfi.c
256
int cpu = smp_processor_id();
drivers/thermal/intel/intel_hfi.c
263
info = &per_cpu(hfi_cpu_info, cpu);
drivers/thermal/intel/intel_hfi.c
274
pr_debug("Received event on CPU %d but instance was null", cpu);
drivers/thermal/intel/intel_hfi.c
413
void intel_hfi_online(unsigned int cpu)
drivers/thermal/intel/intel_hfi.c
427
info = &per_cpu(hfi_cpu_info, cpu);
drivers/thermal/intel/intel_hfi.c
428
pkg_id = topology_logical_package_id(cpu);
drivers/thermal/intel/intel_hfi.c
479
cpumask_set_cpu(cpu, hfi_instance->cpus);
drivers/thermal/intel/intel_hfi.c
509
void intel_hfi_offline(unsigned int cpu)
drivers/thermal/intel/intel_hfi.c
511
struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu);
drivers/thermal/intel/intel_hfi.c
527
cpumask_clear_cpu(cpu, hfi_instance->cpus);
drivers/thermal/intel/intel_hfi.c
629
unsigned int cpu;
drivers/thermal/intel/intel_hfi.c
659
cpu = cpumask_any(hfi_instance->cpus);
drivers/thermal/intel/intel_hfi.c
660
smp_call_function_single(cpu, func, hfi_instance, true);
drivers/thermal/intel/intel_hfi.h
12
static inline void intel_hfi_online(unsigned int cpu) { }
drivers/thermal/intel/intel_hfi.h
13
static inline void intel_hfi_offline(unsigned int cpu) { }
drivers/thermal/intel/intel_hfi.h
7
void intel_hfi_online(unsigned int cpu);
drivers/thermal/intel/intel_hfi.h
8
void intel_hfi_offline(unsigned int cpu);
drivers/thermal/intel/intel_powerclamp.c
62
unsigned int cpu;
drivers/thermal/intel/intel_tcc.c
182
int intel_tcc_get_tjmax(int cpu)
drivers/thermal/intel/intel_tcc.c
187
if (cpu < 0)
drivers/thermal/intel/intel_tcc.c
190
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high);
drivers/thermal/intel/intel_tcc.c
209
int intel_tcc_get_offset(int cpu)
drivers/thermal/intel/intel_tcc.c
214
if (cpu < 0)
drivers/thermal/intel/intel_tcc.c
217
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high);
drivers/thermal/intel/intel_tcc.c
236
int intel_tcc_set_offset(int cpu, int offset)
drivers/thermal/intel/intel_tcc.c
247
if (cpu < 0)
drivers/thermal/intel/intel_tcc.c
250
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high);
drivers/thermal/intel/intel_tcc.c
261
if (cpu < 0)
drivers/thermal/intel/intel_tcc.c
264
return wrmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, low, high);
drivers/thermal/intel/intel_tcc.c
279
int intel_tcc_get_temp(int cpu, int *temp, bool pkg)
drivers/thermal/intel/intel_tcc.c
285
tjmax = intel_tcc_get_tjmax(cpu);
drivers/thermal/intel/intel_tcc.c
289
if (cpu < 0)
drivers/thermal/intel/intel_tcc.c
292
err = rdmsr_safe_on_cpu(cpu, msr, &low, &high);
drivers/thermal/intel/therm_throt.c
143
unsigned int cpu = dev->id; \
drivers/thermal/intel/therm_throt.c
147
if (cpu_online(cpu)) { \
drivers/thermal/intel/therm_throt.c
149
per_cpu(thermal_state, cpu).event.name); \
drivers/thermal/intel/therm_throt.c
469
static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
drivers/thermal/intel/therm_throt.c
472
struct cpuinfo_x86 *c = &cpu_data(cpu);
drivers/thermal/intel/therm_throt.c
528
static int thermal_throttle_online(unsigned int cpu)
drivers/thermal/intel/therm_throt.c
530
struct thermal_state *state = &per_cpu(thermal_state, cpu);
drivers/thermal/intel/therm_throt.c
531
struct device *dev = get_cpu_device(cpu);
drivers/thermal/intel/therm_throt.c
545
intel_hfi_online(cpu);
drivers/thermal/intel/therm_throt.c
551
return thermal_throttle_add_dev(dev, cpu);
drivers/thermal/intel/therm_throt.c
554
static int thermal_throttle_offline(unsigned int cpu)
drivers/thermal/intel/therm_throt.c
556
struct thermal_state *state = &per_cpu(thermal_state, cpu);
drivers/thermal/intel/therm_throt.c
557
struct device *dev = get_cpu_device(cpu);
drivers/thermal/intel/therm_throt.c
564
intel_hfi_offline(cpu);
drivers/thermal/intel/therm_throt.c
719
unsigned int cpu = smp_processor_id();
drivers/thermal/intel/therm_throt.c
750
pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
101
int id = topology_logical_die_id(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
113
ret = intel_tcc_get_temp(zonedev->cpu, &val, true);
drivers/thermal/intel/x86_pkg_temp_thermal.c
134
tj_max = intel_tcc_get_tjmax(zonedev->cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
144
ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
drivers/thermal/intel/x86_pkg_temp_thermal.c
170
return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
drivers/thermal/intel/x86_pkg_temp_thermal.c
216
int cpu = smp_processor_id();
drivers/thermal/intel/x86_pkg_temp_thermal.c
223
zonedev = pkg_temp_thermal_get_dev(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
247
static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
drivers/thermal/intel/x86_pkg_temp_thermal.c
251
schedule_delayed_work_on(cpu, work, ms);
drivers/thermal/intel/x86_pkg_temp_thermal.c
256
int cpu = smp_processor_id();
drivers/thermal/intel/x86_pkg_temp_thermal.c
266
zonedev = pkg_temp_thermal_get_dev(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
269
pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
drivers/thermal/intel/x86_pkg_temp_thermal.c
276
static int pkg_temp_thermal_trips_init(int cpu, int tj_max,
drivers/thermal/intel/x86_pkg_temp_thermal.c
293
ret = rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
drivers/thermal/intel/x86_pkg_temp_thermal.c
308
__func__, cpu, i, trips[i].temperature);
drivers/thermal/intel/x86_pkg_temp_thermal.c
314
static int pkg_temp_thermal_device_add(unsigned int cpu)
drivers/thermal/intel/x86_pkg_temp_thermal.c
317
int id = topology_logical_die_id(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
333
tj_max = intel_tcc_get_tjmax(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
342
err = pkg_temp_thermal_trips_init(cpu, tj_max, trips, thres_count);
drivers/thermal/intel/x86_pkg_temp_thermal.c
347
zonedev->cpu = cpu;
drivers/thermal/intel/x86_pkg_temp_thermal.c
363
cpumask_set_cpu(cpu, &zonedev->cpumask);
drivers/thermal/intel/x86_pkg_temp_thermal.c
377
static int pkg_thermal_cpu_offline(unsigned int cpu)
drivers/thermal/intel/x86_pkg_temp_thermal.c
379
struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
386
target = cpumask_any_but(&zonedev->cpumask, cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
387
cpumask_clear_cpu(cpu, &zonedev->cpumask);
drivers/thermal/intel/x86_pkg_temp_thermal.c
417
was_target = zonedev->cpu == cpu;
drivers/thermal/intel/x86_pkg_temp_thermal.c
418
zonedev->cpu = target;
drivers/thermal/intel/x86_pkg_temp_thermal.c
427
zones[topology_logical_die_id(cpu)] = NULL;
drivers/thermal/intel/x86_pkg_temp_thermal.c
464
static int pkg_thermal_cpu_online(unsigned int cpu)
drivers/thermal/intel/x86_pkg_temp_thermal.c
466
struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
467
struct cpuinfo_x86 *c = &cpu_data(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
475
cpumask_set_cpu(cpu, &zonedev->cpumask);
drivers/thermal/intel/x86_pkg_temp_thermal.c
478
return pkg_temp_thermal_device_add(cpu);
drivers/thermal/intel/x86_pkg_temp_thermal.c
51
int cpu;
drivers/thermal/intel/x86_pkg_temp_thermal.c
99
static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
drivers/thermal/thermal_netlink.c
222
cpu_cap->cpu))
drivers/thermal/thermal_netlink.h
8
int cpu;
drivers/thermal/thermal_trace.h
100
TP_ARGS(cpu, power),
drivers/thermal/thermal_trace.h
103
__field(int, cpu)
drivers/thermal/thermal_trace.h
108
__entry->cpu = cpu;
drivers/thermal/thermal_trace.h
112
TP_printk("cpu=%d power=%u", __entry->cpu, __entry->power)
drivers/thermal/thermal_trace.h
98
TP_PROTO(int cpu, u32 power),
drivers/tty/hvc/hvc_dcc.c
106
int cpu = get_cpu();
drivers/tty/hvc/hvc_dcc.c
108
if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP) && cpu && dcc_core0_available) {
drivers/tty/mips_ejtag_fdc.c
1010
add_timer_on(&priv->poll_timer, dev->cpu);
drivers/tty/mips_ejtag_fdc.c
1039
if (dev->cpu == 0)
drivers/tty/mips_ejtag_fdc.c
1094
add_timer_on(&priv->poll_timer, dev->cpu);
drivers/tty/mips_ejtag_fdc.c
1100
dev->cpu, "ttyFDC/%u");
drivers/tty/mips_ejtag_fdc.c
1163
unsigned int cpu;
drivers/tty/mips_ejtag_fdc.c
1166
cpu = smp_processor_id();
drivers/tty/mips_ejtag_fdc.c
1167
regs = mips_ejtag_fdc_con.regs[cpu];
drivers/tty/mips_ejtag_fdc.c
1171
mips_ejtag_fdc_con.regs[cpu] = regs;
drivers/tty/mips_ejtag_fdc.c
144
unsigned int cpu;
drivers/tty/mips_ejtag_fdc.c
309
unsigned int i, buf_len, cpu;
drivers/tty/mips_ejtag_fdc.c
317
cpu = smp_processor_id();
drivers/tty/mips_ejtag_fdc.c
318
regs = cons->regs[cpu];
drivers/tty/mips_ejtag_fdc.c
322
cons->regs[cpu] = regs;
drivers/tty/mips_ejtag_fdc.c
663
if (smp_processor_id() != priv->cpu)
drivers/tty/mips_ejtag_fdc.c
897
priv->cpu = dev->cpu;
drivers/tty/mips_ejtag_fdc.c
928
snprintf(priv->fdc_name, sizeof(priv->fdc_name), "ttyFDC%u", dev->cpu);
drivers/tty/mips_ejtag_fdc.c
954
mips_ejtag_fdc_con.regs[dev->cpu] = priv->reg;
drivers/tty/mips_ejtag_fdc.c
955
if (dev->cpu == 0)
drivers/tty/mips_ejtag_fdc.c
965
dev->cpu, "ttyFDC/%u");
drivers/tty/serial/sunhv.c
405
void sunhv_migrate_hvcons_irq(int cpu)
drivers/tty/serial/sunhv.c
408
irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
drivers/ufs/host/ufs-mediatek.c
1920
unsigned int cpu;
drivers/ufs/host/ufs-mediatek.c
1924
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
drivers/ufs/host/ufs-mediatek.c
1925
ufs_mtk_mcq_set_irq_affinity(hba, cpu);
drivers/ufs/host/ufs-mediatek.c
844
static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
drivers/ufs/host/ufs-mediatek.c
852
q_index = map->mq_map[cpu];
drivers/ufs/host/ufs-mediatek.c
862
static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
drivers/ufs/host/ufs-mediatek.c
867
irq = ufs_mtk_mcq_get_irq(hba, cpu);
drivers/ufs/host/ufs-mediatek.c
869
dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
drivers/ufs/host/ufs-mediatek.c
874
_cpu = (cpu == 0) ? 3 : cpu;
drivers/usb/gadget/function/f_tcm.c
1348
int tag, cpu;
drivers/usb/gadget/function/f_tcm.c
1350
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
drivers/usb/gadget/function/f_tcm.c
1357
cmd->se_cmd.map_cpu = cpu;
drivers/usb/gadget/function/f_tcm.c
1358
cmd->se_cmd.cpuid = cpu;
drivers/video/fbdev/gbefb.c
1023
tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
drivers/video/fbdev/gbefb.c
1155
gbe_tiles.cpu = dmam_alloc_coherent(&p_dev->dev,
drivers/video/fbdev/gbefb.c
1158
if (!gbe_tiles.cpu) {
drivers/video/fbdev/gbefb.c
1195
gbe_tiles.cpu[i] = (gbe_mem_phys >> TILE_SHIFT) + i;
drivers/video/fbdev/gbefb.c
72
uint16_t *cpu;
drivers/virt/acrn/hsm.c
438
u64 cpu, lapicid;
drivers/virt/acrn/hsm.c
441
if (kstrtoull(buf, 0, &cpu) < 0)
drivers/virt/acrn/hsm.c
444
if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
drivers/virt/acrn/hsm.c
447
if (cpu_online(cpu))
drivers/virt/acrn/hsm.c
448
remove_cpu(cpu);
drivers/virt/acrn/hsm.c
450
lapicid = cpu_data(cpu).topo.apicid;
drivers/virt/acrn/hsm.c
451
dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
drivers/virt/acrn/hsm.c
454
dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
drivers/virt/acrn/hsm.c
461
add_cpu(cpu);
drivers/virt/acrn/hypercall.h
58
static inline long hcall_sos_remove_cpu(u64 cpu)
drivers/virt/acrn/hypercall.h
60
return acrn_hypercall1(HC_SOS_REMOVE_CPU, cpu);
drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
111
cpu = smp_processor_id();
drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
116
ret = smp_call_function_single(cpu, arm_cca_attestation_init,
drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
150
ret = smp_call_function_single(cpu,
drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
93
int cpu;
drivers/virt/nitro_enclaves/ne_misc_dev.c
1073
unsigned int cpu = 0;
drivers/virt/nitro_enclaves/ne_misc_dev.c
1102
for_each_cpu(cpu, ne_enclave->threads_per_core[i])
drivers/virt/nitro_enclaves/ne_misc_dev.c
1103
if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
1416
unsigned int cpu = 0;
drivers/virt/nitro_enclaves/ne_misc_dev.c
1422
for_each_cpu(cpu, ne_enclave->threads_per_core[i])
drivers/virt/nitro_enclaves/ne_misc_dev.c
1424
cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]);
drivers/virt/nitro_enclaves/ne_misc_dev.c
182
unsigned int cpu = 0;
drivers/virt/nitro_enclaves/ne_misc_dev.c
201
cpu = cpumask_any(cpu_pool);
drivers/virt/nitro_enclaves/ne_misc_dev.c
202
if (cpu >= nr_cpu_ids) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
214
for_each_cpu(cpu, cpu_pool)
drivers/virt/nitro_enclaves/ne_misc_dev.c
215
if (cpu_is_offline(cpu)) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
217
ne_misc_dev.name, cpu);
drivers/virt/nitro_enclaves/ne_misc_dev.c
227
for_each_cpu(cpu, cpu_pool)
drivers/virt/nitro_enclaves/ne_misc_dev.c
229
numa_node = cpu_to_node(cpu);
drivers/virt/nitro_enclaves/ne_misc_dev.c
239
if (numa_node != cpu_to_node(cpu)) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
277
for_each_cpu(cpu, cpu_pool) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
278
for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
291
cpu = cpumask_any(cpu_pool);
drivers/virt/nitro_enclaves/ne_misc_dev.c
292
for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu))
drivers/virt/nitro_enclaves/ne_misc_dev.c
316
for_each_cpu(cpu, cpu_pool) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
317
core_id = topology_core_id(cpu);
drivers/virt/nitro_enclaves/ne_misc_dev.c
320
ne_misc_dev.name, core_id, cpu);
drivers/virt/nitro_enclaves/ne_misc_dev.c
327
cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]);
drivers/virt/nitro_enclaves/ne_misc_dev.c
340
for_each_cpu(cpu, cpu_pool) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
341
rc = remove_cpu(cpu);
drivers/virt/nitro_enclaves/ne_misc_dev.c
344
ne_misc_dev.name, cpu, rc);
drivers/virt/nitro_enclaves/ne_misc_dev.c
359
for_each_cpu(cpu, cpu_pool)
drivers/virt/nitro_enclaves/ne_misc_dev.c
360
add_cpu(cpu);
drivers/virt/nitro_enclaves/ne_misc_dev.c
387
unsigned int cpu = 0;
drivers/virt/nitro_enclaves/ne_misc_dev.c
400
for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
401
rc = add_cpu(cpu);
drivers/virt/nitro_enclaves/ne_misc_dev.c
404
ne_misc_dev.name, cpu, rc);
drivers/virt/nitro_enclaves/ne_misc_dev.c
479
static bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu)
drivers/virt/nitro_enclaves/ne_misc_dev.c
481
if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids))
drivers/virt/nitro_enclaves/ne_misc_dev.c
529
unsigned int cpu = 0;
drivers/virt/nitro_enclaves/ne_misc_dev.c
552
for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id])
drivers/virt/nitro_enclaves/ne_misc_dev.c
553
cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]);
drivers/virt/nitro_enclaves/ne_misc_dev.c
575
unsigned int cpu = 0;
drivers/virt/nitro_enclaves/ne_misc_dev.c
585
for_each_cpu(cpu, ne_enclave->threads_per_core[i])
drivers/virt/nitro_enclaves/ne_misc_dev.c
586
if (!ne_donated_cpu(ne_enclave, cpu)) {
drivers/virt/nitro_enclaves/ne_misc_dev.c
587
*vcpu_id = cpu;
drivers/watchdog/octeon-wdt-main.c
113
static int cpu2core(int cpu)
drivers/watchdog/octeon-wdt-main.c
116
return cpu_logical_map(cpu) & 0x3f;
drivers/watchdog/octeon-wdt-main.c
132
int cpu = raw_smp_processor_id();
drivers/watchdog/octeon-wdt-main.c
133
unsigned int core = cpu2core(cpu);
drivers/watchdog/octeon-wdt-main.c
134
int node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
137
if (per_cpu_countdown[cpu] > 0) {
drivers/watchdog/octeon-wdt-main.c
140
per_cpu_countdown[cpu]--;
drivers/watchdog/octeon-wdt-main.c
144
cpumask_clear_cpu(cpu, &irq_enabled_cpus);
drivers/watchdog/octeon-wdt-main.c
302
static int octeon_wdt_cpu_to_irq(int cpu)
drivers/watchdog/octeon-wdt-main.c
308
coreid = cpu2core(cpu);
drivers/watchdog/octeon-wdt-main.c
309
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
325
static int octeon_wdt_cpu_pre_down(unsigned int cpu)
drivers/watchdog/octeon-wdt-main.c
331
core = cpu2core(cpu);
drivers/watchdog/octeon-wdt-main.c
333
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
342
free_irq(octeon_wdt_cpu_to_irq(cpu), octeon_wdt_poke_irq);
drivers/watchdog/octeon-wdt-main.c
346
static int octeon_wdt_cpu_online(unsigned int cpu)
drivers/watchdog/octeon-wdt-main.c
355
core = cpu2core(cpu);
drivers/watchdog/octeon-wdt-main.c
356
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
364
per_cpu_countdown[cpu] = countdown_reset;
drivers/watchdog/octeon-wdt-main.c
384
irq_set_affinity(irq, cpumask_of(cpu));
drivers/watchdog/octeon-wdt-main.c
387
cpumask_set_cpu(cpu, &irq_enabled_cpus);
drivers/watchdog/octeon-wdt-main.c
403
int cpu;
drivers/watchdog/octeon-wdt-main.c
410
for_each_online_cpu(cpu) {
drivers/watchdog/octeon-wdt-main.c
411
coreid = cpu2core(cpu);
drivers/watchdog/octeon-wdt-main.c
412
node = cpu_to_node(cpu);
drivers/watchdog/octeon-wdt-main.c
414
per_cpu_countdown[cpu] = countdown_reset;
drivers/watchdog/octeon-wdt-main.c
416
!cpumask_test_cpu(cpu, &irq_enabled_cpus)) {
drivers/watchdog/octeon-wdt-main.c
418
enable_irq(octeon_wdt_cpu_to_irq(cpu));
drivers/watchdog/octeon-wdt-main.c
419
cpumask_set_cpu(cpu, &irq_enabled_cpus);
drivers/watchdog/octeon-wdt-main.c
454
int cpu;
drivers/watchdog/octeon-wdt-main.c
467
for_each_online_cpu(cpu) {
drivers/watchdog/octeon-wdt-main.c
468
coreid = cpu2core(cpu);
drivers/watchdog/octeon-wdt-main.c
469
node = cpu_to_node(cpu);
drivers/xen/cpu_hotplug.c
12
static void enable_hotplug_cpu(int cpu)
drivers/xen/cpu_hotplug.c
14
if (!cpu_present(cpu))
drivers/xen/cpu_hotplug.c
15
xen_arch_register_cpu(cpu);
drivers/xen/cpu_hotplug.c
17
set_cpu_present(cpu, true);
drivers/xen/cpu_hotplug.c
20
static void disable_hotplug_cpu(int cpu)
drivers/xen/cpu_hotplug.c
22
if (!cpu_is_hotpluggable(cpu))
drivers/xen/cpu_hotplug.c
25
if (cpu_online(cpu))
drivers/xen/cpu_hotplug.c
26
device_offline(get_cpu_device(cpu));
drivers/xen/cpu_hotplug.c
27
if (!cpu_online(cpu) && cpu_present(cpu)) {
drivers/xen/cpu_hotplug.c
28
xen_arch_unregister_cpu(cpu);
drivers/xen/cpu_hotplug.c
29
set_cpu_present(cpu, false);
drivers/xen/cpu_hotplug.c
34
static int vcpu_online(unsigned int cpu)
drivers/xen/cpu_hotplug.c
39
sprintf(dir, "cpu/%u", cpu);
drivers/xen/cpu_hotplug.c
52
pr_err("unknown state(%s) on CPU%d\n", state, cpu);
drivers/xen/cpu_hotplug.c
55
static void vcpu_hotplug(unsigned int cpu)
drivers/xen/cpu_hotplug.c
57
if (cpu >= nr_cpu_ids || !cpu_possible(cpu))
drivers/xen/cpu_hotplug.c
60
switch (vcpu_online(cpu)) {
drivers/xen/cpu_hotplug.c
62
enable_hotplug_cpu(cpu);
drivers/xen/cpu_hotplug.c
65
disable_hotplug_cpu(cpu);
drivers/xen/cpu_hotplug.c
75
unsigned int cpu;
drivers/xen/cpu_hotplug.c
80
sscanf(cpustr, "cpu/%u", &cpu);
drivers/xen/cpu_hotplug.c
81
vcpu_hotplug(cpu);
drivers/xen/cpu_hotplug.c
88
int cpu;
drivers/xen/cpu_hotplug.c
95
for_each_possible_cpu(cpu) {
drivers/xen/cpu_hotplug.c
96
if (vcpu_online(cpu) == 0)
drivers/xen/cpu_hotplug.c
97
disable_hotplug_cpu(cpu);
drivers/xen/events/events_2l.c
147
static inline xen_ulong_t active_evtchns(unsigned int cpu,
drivers/xen/events/events_2l.c
152
per_cpu(cpu_evtchn_mask, cpu)[idx] &
drivers/xen/events/events_2l.c
164
static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
drivers/xen/events/events_2l.c
177
irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn);
drivers/xen/events/events_2l.c
181
if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
drivers/xen/events/events_2l.c
212
pending_bits = active_evtchns(cpu, s, word_idx);
drivers/xen/events/events_2l.c
267
int cpu = smp_processor_id();
drivers/xen/events/events_2l.c
268
xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
drivers/xen/events/events_2l.c
276
printk("\nvcpu %d\n ", cpu);
drivers/xen/events/events_2l.c
281
pending = (get_irq_regs() && i == cpu)
drivers/xen/events/events_2l.c
289
v = per_cpu(xen_vcpu, cpu);
drivers/xen/events/events_2l.c
311
printk("\nlocal cpu%d mask:\n ", cpu);
drivers/xen/events/events_2l.c
357
static int evtchn_2l_percpu_deinit(unsigned int cpu)
drivers/xen/events/events_2l.c
359
memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
drivers/xen/events/events_2l.c
50
static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
drivers/xen/events/events_2l.c
52
clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
drivers/xen/events/events_2l.c
55
static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
drivers/xen/events/events_2l.c
59
set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
drivers/xen/events/events_2l.c
89
unsigned int cpu = get_cpu();
drivers/xen/events/events_2l.c
96
if (unlikely((cpu != cpu_from_evtchn(port))))
drivers/xen/events/events_base.c
112
unsigned short cpu; /* cpu bound */
drivers/xen/events/events_base.c
1245
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
drivers/xen/events/events_base.c
1254
ret = per_cpu(ipi_to_irq, cpu)[ipi];
drivers/xen/events/events_base.c
1264
bind_ipi.vcpu = xen_vcpu_nr(cpu);
drivers/xen/events/events_base.c
1270
ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
drivers/xen/events/events_base.c
1279
bind_evtchn_to_cpu(info, cpu, true);
drivers/xen/events/events_base.c
1317
static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn,
drivers/xen/events/events_base.c
1337
if (status.vcpu == xen_vcpu_nr(cpu)) {
drivers/xen/events/events_base.c
1360
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
drivers/xen/events/events_base.c
1369
ret = per_cpu(virq_to_irq, cpu)[virq];
drivers/xen/events/events_base.c
1384
bind_virq.vcpu = xen_vcpu_nr(cpu);
drivers/xen/events/events_base.c
1391
ret = find_virq(virq, cpu, &evtchn, percpu);
drivers/xen/events/events_base.c
1398
ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
drivers/xen/events/events_base.c
1408
bind_evtchn_to_cpu(info, cpu, percpu);
drivers/xen/events/events_base.c
1508
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
drivers/xen/events/events_base.c
1514
irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
drivers/xen/events/events_base.c
1528
unsigned int cpu,
drivers/xen/events/events_base.c
1536
irq = bind_ipi_to_irq(ipi, cpu);
drivers/xen/events/events_base.c
1632
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
drivers/xen/events/events_base.c
1638
int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
drivers/xen/events/events_base.c
1641
printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
drivers/xen/events/events_base.c
1645
evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
drivers/xen/events/events_base.c
1706
int cpu = smp_processor_id();
drivers/xen/events/events_base.c
1721
xen_evtchn_handle_events(cpu, &ctrl);
drivers/xen/events/events_base.c
1767
bind_evtchn_to_cpu(info, info->cpu, false);
drivers/xen/events/events_base.c
1801
int old_cpu = info->cpu;
drivers/xen/events/events_base.c
1826
unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
drivers/xen/events/events_base.c
1828
for_each_cpu_and(cpu, dest, cpu_online_mask) {
drivers/xen/events/events_base.c
1829
unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
drivers/xen/events/events_base.c
1833
best_cpu = cpu;
drivers/xen/events/events_base.c
1975
static void restore_cpu_virqs(unsigned int cpu)
drivers/xen/events/events_base.c
1983
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
drivers/xen/events/events_base.c
1991
bind_virq.vcpu = xen_vcpu_nr(cpu);
drivers/xen/events/events_base.c
1998
xen_irq_info_virq_setup(info, cpu, evtchn, virq);
drivers/xen/events/events_base.c
2000
bind_evtchn_to_cpu(info, cpu, false);
drivers/xen/events/events_base.c
2004
static void restore_cpu_ipis(unsigned int cpu)
drivers/xen/events/events_base.c
2012
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
drivers/xen/events/events_base.c
2019
bind_ipi.vcpu = xen_vcpu_nr(cpu);
drivers/xen/events/events_base.c
2026
xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
drivers/xen/events/events_base.c
2028
bind_evtchn_to_cpu(info, cpu, false);
drivers/xen/events/events_base.c
2098
unsigned int cpu;
drivers/xen/events/events_base.c
2114
for_each_possible_cpu(cpu) {
drivers/xen/events/events_base.c
2115
restore_cpu_virqs(cpu);
drivers/xen/events/events_base.c
2116
restore_cpu_ipis(cpu);
drivers/xen/events/events_base.c
2217
int xen_set_upcall_vector(unsigned int cpu)
drivers/xen/events/events_base.c
2222
.vcpu = per_cpu(xen_vcpu_id, cpu),
drivers/xen/events/events_base.c
2230
if (!cpu)
drivers/xen/events/events_base.c
2247
int xen_set_upcall_vector(unsigned int cpu) {}
drivers/xen/events/events_base.c
2255
static int xen_evtchn_cpu_prepare(unsigned int cpu)
drivers/xen/events/events_base.c
2259
xen_cpu_init_eoi(cpu);
drivers/xen/events/events_base.c
2262
ret = evtchn_ops->percpu_init(cpu);
drivers/xen/events/events_base.c
2267
static int xen_evtchn_cpu_dead(unsigned int cpu)
drivers/xen/events/events_base.c
2272
ret = evtchn_ops->percpu_deinit(cpu);
drivers/xen/events/events_base.c
288
if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
drivers/xen/events/events_base.c
291
WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
drivers/xen/events/events_base.c
296
if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
drivers/xen/events/events_base.c
299
if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
drivers/xen/events/events_base.c
331
unsigned short cpu)
drivers/xen/events/events_base.c
339
info->cpu = cpu;
drivers/xen/events/events_base.c
366
static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
drivers/xen/events/events_base.c
371
per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
drivers/xen/events/events_base.c
372
per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
drivers/xen/events/events_base.c
377
static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
drivers/xen/events/events_base.c
382
per_cpu(virq_to_irq, cpu)[virq] = info->irq;
drivers/xen/events/events_base.c
402
xen_evtchn_port_remove(info->evtchn, info->cpu);
drivers/xen/events/events_base.c
430
int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
drivers/xen/events/events_base.c
433
int irq = per_cpu(virq_to_irq, cpu)[virq];
drivers/xen/events/events_base.c
468
return info ? info->cpu : 0;
drivers/xen/events/events_base.c
513
static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
drivers/xen/events/events_base.c
519
irq_data_update_affinity(data, cpumask_of(cpu));
drivers/xen/events/events_base.c
520
irq_data_update_effective_affinity(data, cpumask_of(cpu));
drivers/xen/events/events_base.c
523
xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
drivers/xen/events/events_base.c
526
info->cpu = cpu;
drivers/xen/events/events_base.c
600
unsigned int cpu;
drivers/xen/events/events_base.c
634
cpu = info->eoi_cpu;
drivers/xen/events/events_base.c
636
(info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
drivers/xen/events/events_base.c
689
static void xen_cpu_init_eoi(unsigned int cpu)
drivers/xen/events/events_base.c
691
struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
drivers/xen/events/events_base.c
969
unsigned int cpu = info->cpu;
drivers/xen/events/events_base.c
977
per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
drivers/xen/events/events_base.c
980
per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
drivers/xen/events/events_base.c
981
per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
drivers/xen/events/events_fifo.c
102
static int init_control_block(int cpu,
drivers/xen/events/events_fifo.c
105
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
drivers/xen/events/events_fifo.c
116
init_control.vcpu = xen_vcpu_nr(cpu);
drivers/xen/events/events_fifo.c
188
static void evtchn_fifo_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
drivers/xen/events/events_fifo.c
273
static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
drivers/xen/events/events_fifo.c
277
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
drivers/xen/events/events_fifo.c
317
static void __evtchn_fifo_handle_events(unsigned cpu,
drivers/xen/events/events_fifo.c
324
control_block = per_cpu(cpu_control_block, cpu);
drivers/xen/events/events_fifo.c
330
consume_one_event(cpu, ctrl, control_block, q, &ready);
drivers/xen/events/events_fifo.c
335
static void evtchn_fifo_handle_events(unsigned cpu,
drivers/xen/events/events_fifo.c
338
__evtchn_fifo_handle_events(cpu, ctrl);
drivers/xen/events/events_fifo.c
343
unsigned cpu;
drivers/xen/events/events_fifo.c
345
for_each_possible_cpu(cpu) {
drivers/xen/events/events_fifo.c
346
void *control_block = per_cpu(cpu_control_block, cpu);
drivers/xen/events/events_fifo.c
357
if (!cpu_online(cpu)) {
drivers/xen/events/events_fifo.c
359
per_cpu(cpu_control_block, cpu) = NULL;
drivers/xen/events/events_fifo.c
363
ret = init_control_block(cpu, control_block);
drivers/xen/events/events_fifo.c
375
static int evtchn_fifo_alloc_control_block(unsigned cpu)
drivers/xen/events/events_fifo.c
384
ret = init_control_block(cpu, control_block);
drivers/xen/events/events_fifo.c
388
per_cpu(cpu_control_block, cpu) = control_block;
drivers/xen/events/events_fifo.c
397
static int evtchn_fifo_percpu_init(unsigned int cpu)
drivers/xen/events/events_fifo.c
399
if (!per_cpu(cpu_control_block, cpu))
drivers/xen/events/events_fifo.c
400
return evtchn_fifo_alloc_control_block(cpu);
drivers/xen/events/events_fifo.c
404
static int evtchn_fifo_percpu_deinit(unsigned int cpu)
drivers/xen/events/events_fifo.c
406
__evtchn_fifo_handle_events(cpu, NULL);
drivers/xen/events/events_fifo.c
428
int cpu = smp_processor_id();
drivers/xen/events/events_fifo.c
431
ret = evtchn_fifo_alloc_control_block(cpu);
drivers/xen/events/events_internal.h
17
void (*remove)(evtchn_port_t port, unsigned int cpu);
drivers/xen/events/events_internal.h
18
void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu,
drivers/xen/events/events_internal.h
27
void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
drivers/xen/events/events_internal.h
30
int (*percpu_init)(unsigned int cpu);
drivers/xen/events/events_internal.h
31
int (*percpu_deinit)(unsigned int cpu);
drivers/xen/events/events_internal.h
57
unsigned int cpu)
drivers/xen/events/events_internal.h
60
evtchn_ops->remove(evtchn, cpu);
drivers/xen/events/events_internal.h
64
unsigned int cpu,
drivers/xen/events/events_internal.h
67
evtchn_ops->bind_to_cpu(evtchn, cpu, old_cpu);
drivers/xen/events/events_internal.h
95
static inline void xen_evtchn_handle_events(unsigned cpu,
drivers/xen/events/events_internal.h
98
return evtchn_ops->handle_events(cpu, ctrl);
drivers/xen/mcelog.c
246
m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
drivers/xen/pcpu.c
103
struct pcpu *cpu = container_of(dev, struct pcpu, dev);
drivers/xen/pcpu.c
105
return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
drivers/xen/pcpu.c
274
static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
drivers/xen/pcpu.c
282
.u.pcpu_info.xen_cpuid = cpu,
drivers/xen/pcpu.c
293
pcpu = get_pcpu(cpu);
drivers/xen/pcpu.c
321
uint32_t cpu = 0, max_cpu = 0;
drivers/xen/pcpu.c
327
while (!err && (cpu <= max_cpu)) {
drivers/xen/pcpu.c
328
err = sync_pcpu(cpu, &max_cpu);
drivers/xen/pcpu.c
329
cpu++;
drivers/xen/time.c
105
for_each_possible_cpu(cpu) {
drivers/xen/time.c
106
xen_get_runstate_snapshot_cpu_delta(&state, cpu);
drivers/xen/time.c
107
memcpy(runstate_delta[cpu].time, state.time,
drivers/xen/time.c
108
sizeof(runstate_delta[cpu].time));
drivers/xen/time.c
120
for_each_possible_cpu(cpu) {
drivers/xen/time.c
122
per_cpu(old_runstate_time, cpu)[i] +=
drivers/xen/time.c
123
runstate_delta[cpu].time[i];
drivers/xen/time.c
144
u64 xen_steal_clock(int cpu)
drivers/xen/time.c
148
xen_get_runstate_snapshot_cpu(&state, cpu);
drivers/xen/time.c
152
void xen_setup_runstate_info(int cpu)
drivers/xen/time.c
156
area.addr.v = &per_cpu(xen_runstate, cpu);
drivers/xen/time.c
159
xen_vcpu_nr(cpu), &area))
drivers/xen/time.c
56
struct vcpu_runstate_info *res, unsigned int cpu)
drivers/xen/time.c
63
state = per_cpu_ptr(&xen_runstate, cpu);
drivers/xen/time.c
75
unsigned int cpu)
drivers/xen/time.c
79
xen_get_runstate_snapshot_cpu_delta(res, cpu);
drivers/xen/time.c
82
res->time[i] += per_cpu(old_runstate_time, cpu)[i];
drivers/xen/time.c
89
int cpu, i;
drivers/xen/xen-scsiback.c
678
int tag, cpu, i;
drivers/xen/xen-scsiback.c
680
tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
drivers/xen/xen-scsiback.c
689
req->se_cmd.map_cpu = cpu;
fs/aio.c
103
struct kioctx_cpu __percpu *cpu;
fs/aio.c
622
free_percpu(ctx->cpu);
fs/aio.c
787
ctx->cpu = alloc_percpu(struct kioctx_cpu);
fs/aio.c
788
if (!ctx->cpu)
fs/aio.c
834
free_percpu(ctx->cpu);
fs/aio.c
939
kcpu = this_cpu_ptr(ctx->cpu);
fs/aio.c
957
kcpu = this_cpu_ptr(ctx->cpu);
fs/btrfs/accessors.h
621
static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu,
fs/btrfs/accessors.h
624
cpu->offset = le64_to_cpu(disk->offset);
fs/btrfs/accessors.h
625
cpu->type = disk->type;
fs/btrfs/accessors.h
626
cpu->objectid = le64_to_cpu(disk->objectid);
fs/btrfs/accessors.h
630
const struct btrfs_key *cpu)
fs/btrfs/accessors.h
632
disk->offset = cpu_to_le64(cpu->offset);
fs/btrfs/accessors.h
633
disk->type = cpu->type;
fs/btrfs/accessors.h
634
disk->objectid = cpu_to_le64(cpu->objectid);
fs/btrfs/volumes.c
3742
static void btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
fs/btrfs/volumes.c
3745
memset(cpu, 0, sizeof(*cpu));
fs/btrfs/volumes.c
3747
cpu->profiles = le64_to_cpu(disk->profiles);
fs/btrfs/volumes.c
3748
cpu->usage = le64_to_cpu(disk->usage);
fs/btrfs/volumes.c
3749
cpu->devid = le64_to_cpu(disk->devid);
fs/btrfs/volumes.c
3750
cpu->pstart = le64_to_cpu(disk->pstart);
fs/btrfs/volumes.c
3751
cpu->pend = le64_to_cpu(disk->pend);
fs/btrfs/volumes.c
3752
cpu->vstart = le64_to_cpu(disk->vstart);
fs/btrfs/volumes.c
3753
cpu->vend = le64_to_cpu(disk->vend);
fs/btrfs/volumes.c
3754
cpu->target = le64_to_cpu(disk->target);
fs/btrfs/volumes.c
3755
cpu->flags = le64_to_cpu(disk->flags);
fs/btrfs/volumes.c
3756
cpu->limit = le64_to_cpu(disk->limit);
fs/btrfs/volumes.c
3757
cpu->stripes_min = le32_to_cpu(disk->stripes_min);
fs/btrfs/volumes.c
3758
cpu->stripes_max = le32_to_cpu(disk->stripes_max);
fs/btrfs/volumes.c
3762
const struct btrfs_balance_args *cpu)
fs/btrfs/volumes.c
3766
disk->profiles = cpu_to_le64(cpu->profiles);
fs/btrfs/volumes.c
3767
disk->usage = cpu_to_le64(cpu->usage);
fs/btrfs/volumes.c
3768
disk->devid = cpu_to_le64(cpu->devid);
fs/btrfs/volumes.c
3769
disk->pstart = cpu_to_le64(cpu->pstart);
fs/btrfs/volumes.c
3770
disk->pend = cpu_to_le64(cpu->pend);
fs/btrfs/volumes.c
3771
disk->vstart = cpu_to_le64(cpu->vstart);
fs/btrfs/volumes.c
3772
disk->vend = cpu_to_le64(cpu->vend);
fs/btrfs/volumes.c
3773
disk->target = cpu_to_le64(cpu->target);
fs/btrfs/volumes.c
3774
disk->flags = cpu_to_le64(cpu->flags);
fs/btrfs/volumes.c
3775
disk->limit = cpu_to_le64(cpu->limit);
fs/btrfs/volumes.c
3776
disk->stripes_min = cpu_to_le32(cpu->stripes_min);
fs/btrfs/volumes.c
3777
disk->stripes_max = cpu_to_le32(cpu->stripes_max);
fs/buffer.c
1542
bool has_bh_in_lru(int cpu, void *dummy)
fs/buffer.c
1544
struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
fs/buffer.c
3050
static int buffer_exit_cpu_dead(unsigned int cpu)
fs/buffer.c
3053
struct bh_lru *b = &per_cpu(bh_lrus, cpu);
fs/buffer.c
3059
this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
fs/buffer.c
3060
per_cpu(bh_accounting, cpu).nr = 0;
fs/coredump.c
1177
.cpu = raw_smp_processor_id(),
fs/coredump.c
436
err = cn_printf(cn, "%d", cprm->cpu);
fs/erofs/zdata.c
297
unsigned int cpu;
fs/erofs/zdata.c
299
for_each_possible_cpu(cpu) {
fs/erofs/zdata.c
301
z_erofs_pcpu_workers[cpu], 1);
fs/erofs/zdata.c
302
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
fs/erofs/zdata.c
309
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
fs/erofs/zdata.c
312
kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u");
fs/erofs/zdata.c
324
unsigned int cpu;
fs/erofs/zdata.c
331
for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */
fs/erofs/zdata.c
332
worker = erofs_init_percpu_worker(cpu);
fs/erofs/zdata.c
334
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
fs/erofs/zdata.c
343
static int erofs_cpu_online(unsigned int cpu)
fs/erofs/zdata.c
347
worker = erofs_init_percpu_worker(cpu);
fs/erofs/zdata.c
352
old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
fs/erofs/zdata.c
355
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
fs/erofs/zdata.c
362
static int erofs_cpu_offline(unsigned int cpu)
fs/erofs/zdata.c
367
worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
fs/erofs/zdata.c
369
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
fs/fuse/virtio_fs.c
236
unsigned int cpu, qid;
fs/fuse/virtio_fs.c
245
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
fs/fuse/virtio_fs.c
246
if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) {
fs/fuse/virtio_fs.c
248
ret = snprintf(buf + pos, size - pos, "%u", cpu);
fs/fuse/virtio_fs.c
250
ret = snprintf(buf + pos, size - pos, ", %u", cpu);
fs/fuse/virtio_fs.c
862
unsigned int q, cpu, nr_masks;
fs/fuse/virtio_fs.c
875
for_each_cpu(cpu, mask)
fs/fuse/virtio_fs.c
876
fs->mq_map[cpu] = q + VQ_REQUEST;
fs/fuse/virtio_fs.c
885
for_each_possible_cpu(cpu)
fs/fuse/virtio_fs.c
886
fs->mq_map[cpu] = VQ_REQUEST;
fs/fuse/virtio_fs.c
891
for_each_cpu(cpu, &masks[q % nr_masks])
fs/fuse/virtio_fs.c
892
fs->mq_map[cpu] = q + VQ_REQUEST;
fs/gfs2/rgrp.c
1933
int cpu, nonzero = 0;
fs/gfs2/rgrp.c
1936
for_each_present_cpu(cpu) {
fs/gfs2/rgrp.c
1937
st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
fs/namespace.c
273
int cpu;
fs/namespace.c
275
for_each_possible_cpu(cpu) {
fs/namespace.c
276
count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
fs/namespace.c
387
int cpu;
fs/namespace.c
389
for_each_possible_cpu(cpu) {
fs/namespace.c
390
count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
fs/nfs/super.c
653
int i, cpu;
fs/nfs/super.c
704
for_each_possible_cpu(cpu) {
fs/nfs/super.c
708
stats = per_cpu_ptr(nfss->io_stats, cpu);
fs/proc/stat.c
19
#define arch_irq_stat_cpu(cpu) 0
fs/proc/stat.c
25
u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
fs/proc/stat.c
29
if (cpu_online(cpu))
fs/proc/stat.c
30
idle_usecs = get_cpu_idle_time_us(cpu, NULL);
fs/proc/stat.c
41
static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
fs/proc/stat.c
45
if (cpu_online(cpu))
fs/proc/stat.c
46
iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
fs/resctrl/ctrlmondata.c
558
int cpu;
fs/resctrl/ctrlmondata.c
587
cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU);
fs/resctrl/ctrlmondata.c
595
if (tick_nohz_full_cpu(cpu))
fs/resctrl/ctrlmondata.c
598
smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
fs/resctrl/ctrlmondata.c
683
int domid, cpu, ret = 0;
fs/resctrl/ctrlmondata.c
722
cpu = cpumask_any(&d->hdr.cpu_mask);
fs/resctrl/ctrlmondata.c
723
ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
fs/resctrl/internal.h
27
unsigned int cpu;
fs/resctrl/internal.h
31
cpu = cpumask_any_andnot_but(mask, tick_nohz_full_mask, exclude_cpu);
fs/resctrl/internal.h
32
if (cpu < nr_cpu_ids)
fs/resctrl/internal.h
33
return cpu;
fs/resctrl/monitor.c
420
int cpu = smp_processor_id();
fs/resctrl/monitor.c
454
if (!cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
fs/resctrl/monitor.c
473
int cpu = smp_processor_id();
fs/resctrl/monitor.c
492
if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
fs/resctrl/monitor.c
624
static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
fs/resctrl/monitor.c
633
if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
fs/resctrl/monitor.c
825
int cpu;
fs/resctrl/monitor.c
827
cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
fs/resctrl/monitor.c
828
dom->cqm_work_cpu = cpu;
fs/resctrl/monitor.c
830
if (cpu < nr_cpu_ids)
fs/resctrl/monitor.c
831
schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
fs/resctrl/monitor.c
891
int cpu;
fs/resctrl/monitor.c
899
cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
fs/resctrl/monitor.c
900
dom->mbm_work_cpu = cpu;
fs/resctrl/monitor.c
902
if (cpu < nr_cpu_ids)
fs/resctrl/monitor.c
903
schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
fs/resctrl/pseudo_lock.c
153
int cpu;
fs/resctrl/pseudo_lock.c
156
for_each_cpu(cpu, &plr->d->hdr.cpu_mask) {
fs/resctrl/pseudo_lock.c
163
ret = dev_pm_qos_add_request(get_cpu_device(cpu),
fs/resctrl/pseudo_lock.c
169
cpu);
fs/resctrl/pseudo_lock.c
235
plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask);
fs/resctrl/pseudo_lock.c
237
if (!cpu_online(plr->cpu)) {
fs/resctrl/pseudo_lock.c
239
plr->cpu);
fs/resctrl/pseudo_lock.c
244
ci = get_cpu_cacheinfo_level(plr->cpu, scope);
fs/resctrl/pseudo_lock.c
693
unsigned int cpu;
fs/resctrl/pseudo_lock.c
710
cpu = cpumask_first(&plr->d->hdr.cpu_mask);
fs/resctrl/pseudo_lock.c
711
if (!cpu_online(cpu)) {
fs/resctrl/pseudo_lock.c
716
plr->cpu = cpu;
fs/resctrl/pseudo_lock.c
720
plr, cpu, "pseudo_lock_measure/%u");
fs/resctrl/pseudo_lock.c
723
plr, cpu, "pseudo_lock_measure/%u");
fs/resctrl/pseudo_lock.c
726
plr, cpu, "pseudo_lock_measure/%u");
fs/resctrl/pseudo_lock.c
824
plr->cpu, "pseudo_lock/%u");
fs/resctrl/rdtgroup.c
2489
int cpu = cpumask_any(&d->hdr.cpu_mask);
fs/resctrl/rdtgroup.c
2493
GFP_KERNEL, cpu_to_node(cpu));
fs/resctrl/rdtgroup.c
3956
int cpu;
fs/resctrl/rdtgroup.c
3967
for_each_cpu(cpu, &rdtgrp->cpu_mask)
fs/resctrl/rdtgroup.c
3968
resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
fs/resctrl/rdtgroup.c
4006
int cpu;
fs/resctrl/rdtgroup.c
4018
for_each_cpu(cpu, &rdtgrp->cpu_mask)
fs/resctrl/rdtgroup.c
4019
resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
fs/resctrl/rdtgroup.c
4468
void resctrl_online_cpu(unsigned int cpu)
fs/resctrl/rdtgroup.c
4472
cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
fs/resctrl/rdtgroup.c
4476
static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
fs/resctrl/rdtgroup.c
4481
if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask))
fs/resctrl/rdtgroup.c
4486
static struct rdt_l3_mon_domain *get_mon_domain_from_cpu(int cpu,
fs/resctrl/rdtgroup.c
4495
if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
fs/resctrl/rdtgroup.c
4502
void resctrl_offline_cpu(unsigned int cpu)
fs/resctrl/rdtgroup.c
4510
if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
fs/resctrl/rdtgroup.c
4511
clear_childcpus(rdtgrp, cpu);
fs/resctrl/rdtgroup.c
4519
d = get_mon_domain_from_cpu(cpu, l3);
fs/resctrl/rdtgroup.c
4521
if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) {
fs/resctrl/rdtgroup.c
4523
mbm_setup_overflow_handler(d, 0, cpu);
fs/resctrl/rdtgroup.c
4526
cpu == d->cqm_work_cpu && has_busy_rmid(d)) {
fs/resctrl/rdtgroup.c
4528
cqm_setup_limbo_handler(d, 0, cpu);
fs/seq_file.c
1096
seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos)
fs/seq_file.c
1100
for_each_possible_cpu(*cpu) {
fs/seq_file.c
1101
hlist_for_each(node, per_cpu_ptr(head, *cpu)) {
fs/seq_file.c
1121
int *cpu, loff_t *pos)
fs/seq_file.c
1130
for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids;
fs/seq_file.c
1131
*cpu = cpumask_next(*cpu, cpu_possible_mask)) {
fs/seq_file.c
1132
struct hlist_head *bucket = per_cpu_ptr(head, *cpu);
fs/squashfs/decompressor_multi_percpu.c
32
int err, cpu;
fs/squashfs/decompressor_multi_percpu.c
38
for_each_possible_cpu(cpu) {
fs/squashfs/decompressor_multi_percpu.c
39
stream = per_cpu_ptr(percpu, cpu);
fs/squashfs/decompressor_multi_percpu.c
52
for_each_possible_cpu(cpu) {
fs/squashfs/decompressor_multi_percpu.c
53
stream = per_cpu_ptr(percpu, cpu);
fs/squashfs/decompressor_multi_percpu.c
66
int cpu;
fs/squashfs/decompressor_multi_percpu.c
69
for_each_possible_cpu(cpu) {
fs/squashfs/decompressor_multi_percpu.c
70
stream = per_cpu_ptr(percpu, cpu);
fs/xfs/xfs_icache.c
1969
cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
fs/xfs/xfs_icache.c
2284
int cpu;
fs/xfs/xfs_icache.c
2289
for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
fs/xfs/xfs_icache.c
2290
gc = per_cpu_ptr(mp->m_inodegc, cpu);
fs/xfs/xfs_icache.c
2305
int cpu;
fs/xfs/xfs_icache.c
2313
for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
fs/xfs/xfs_icache.c
2314
gc = per_cpu_ptr(mp->m_inodegc, cpu);
fs/xfs/xfs_icache.c
2319
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
fs/xfs/xfs_icache.c
461
int cpu;
fs/xfs/xfs_icache.c
464
for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
fs/xfs/xfs_icache.c
465
gc = per_cpu_ptr(mp->m_inodegc, cpu);
fs/xfs/xfs_icache.c
467
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
fs/xfs/xfs_icache.c
480
int cpu;
fs/xfs/xfs_icache.c
484
for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
fs/xfs/xfs_icache.c
487
gc = per_cpu_ptr(mp->m_inodegc, cpu);
fs/xfs/xfs_log_cil.c
124
int cpu;
fs/xfs/xfs_log_cil.c
126
for_each_cpu(cpu, &ctx->cil_pcpmask) {
fs/xfs/xfs_log_cil.c
127
cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
fs/xfs/xfs_log_cil.c
159
int cpu;
fs/xfs/xfs_log_cil.c
172
for_each_cpu(cpu, &ctx->cil_pcpmask) {
fs/xfs/xfs_log_cil.c
173
struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
fs/xfs/xfs_log_cil.c
2007
int cpu;
fs/xfs/xfs_log_cil.c
2027
for_each_possible_cpu(cpu) {
fs/xfs/xfs_log_cil.c
2028
cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
fs/xfs/xfs_mount.h
72
unsigned int cpu;
fs/xfs/xfs_stats.c
12
int val = 0, cpu;
fs/xfs/xfs_stats.c
14
for_each_possible_cpu(cpu)
fs/xfs/xfs_stats.c
15
val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
fs/xfs/xfs_super.c
1190
int cpu;
fs/xfs/xfs_super.c
1196
for_each_possible_cpu(cpu) {
fs/xfs/xfs_super.c
1197
gc = per_cpu_ptr(mp->m_inodegc, cpu);
fs/xfs/xfs_super.c
1198
gc->cpu = cpu;
include/acpi/cppc_acpi.h
153
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
include/acpi/cppc_acpi.h
154
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
include/acpi/cppc_acpi.h
155
extern int cppc_set_enable(int cpu, bool enable);
include/acpi/cppc_acpi.h
156
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
include/acpi/cppc_acpi.h
157
extern bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu);
include/acpi/cppc_acpi.h
163
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
include/acpi/cppc_acpi.h
164
extern int cppc_get_transition_latency(int cpu);
include/acpi/cppc_acpi.h
170
extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
include/acpi/cppc_acpi.h
171
extern int cppc_set_epp(int cpu, u64 epp_val);
include/acpi/cppc_acpi.h
172
extern int cppc_get_auto_act_window(int cpu, u64 *auto_act_window);
include/acpi/cppc_acpi.h
173
extern int cppc_set_auto_act_window(int cpu, u64 auto_act_window);
include/acpi/cppc_acpi.h
174
extern int cppc_get_auto_sel(int cpu, bool *enable);
include/acpi/cppc_acpi.h
175
extern int cppc_set_auto_sel(int cpu, bool enable);
include/acpi/cppc_acpi.h
176
extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
include/acpi/cppc_acpi.h
177
extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
include/acpi/cppc_acpi.h
192
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
include/acpi/cppc_acpi.h
196
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
include/acpi/cppc_acpi.h
200
static inline int cppc_set_enable(int cpu, bool enable)
include/acpi/cppc_acpi.h
204
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
include/acpi/cppc_acpi.h
208
static inline bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)
include/acpi/cppc_acpi.h
224
static inline int cppc_get_transition_latency(int cpu)
include/acpi/cppc_acpi.h
240
static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
include/acpi/cppc_acpi.h
248
static inline int cppc_set_epp(int cpu, u64 epp_val)
include/acpi/cppc_acpi.h
252
static inline int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
include/acpi/cppc_acpi.h
256
static inline int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
include/acpi/cppc_acpi.h
260
static inline int cppc_get_auto_sel(int cpu, bool *enable)
include/acpi/cppc_acpi.h
264
static inline int cppc_set_auto_sel(int cpu, bool enable)
include/acpi/cppc_acpi.h
268
static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
include/acpi/cppc_acpi.h
272
static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
include/acpi/processor.h
259
*performance, unsigned int cpu);
include/acpi/processor.h
260
extern void acpi_processor_unregister_performance(unsigned int cpu);
include/acpi/processor.h
278
unsigned int cpu);
include/acpi/processor.h
279
int acpi_processor_ffh_cstate_probe(unsigned int cpu,
include/acpi/processor.h
287
*flags, unsigned int cpu)
include/acpi/processor.h
292
static inline int acpi_processor_ffh_cstate_probe(unsigned int cpu,
include/acpi/processor.h
310
static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
include/acpi/processor.h
313
if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
include/acpi/processor.h
315
return work_on_cpu(cpu, fn, arg);
include/acpi/processor.h
326
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
include/acpi/processor.h
352
static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
include/acpi/processor.h
427
int acpi_processor_ffh_lpi_probe(unsigned int cpu);
include/asm-generic/mshyperv.h
211
int hv_common_cpu_init(unsigned int cpu);
include/asm-generic/mshyperv.h
212
int hv_common_cpu_die(unsigned int cpu);
include/asm-generic/mshyperv.h
233
bool (*func)(int cpu))
include/asm-generic/mshyperv.h
235
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
include/asm-generic/mshyperv.h
253
for_each_cpu(cpu, cpus) {
include/asm-generic/mshyperv.h
254
if (func && func(cpu))
include/asm-generic/mshyperv.h
256
vcpu = hv_cpu_number_to_vp_number(cpu);
include/asm-generic/mshyperv.h
285
bool (*func)(int cpu))
include/asm-generic/mshyperv.h
312
void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set);
include/asm-generic/numa.h
18
void numa_clear_node(unsigned int cpu);
include/asm-generic/numa.h
35
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
include/asm-generic/numa.h
36
int early_cpu_to_node(int cpu);
include/asm-generic/numa.h
37
void numa_store_cpu_info(unsigned int cpu);
include/asm-generic/numa.h
38
void numa_add_cpu(unsigned int cpu);
include/asm-generic/numa.h
39
void numa_remove_cpu(unsigned int cpu);
include/asm-generic/numa.h
43
static inline void numa_store_cpu_info(unsigned int cpu) { }
include/asm-generic/numa.h
44
static inline void numa_add_cpu(unsigned int cpu) { }
include/asm-generic/numa.h
45
static inline void numa_remove_cpu(unsigned int cpu) { }
include/asm-generic/numa.h
47
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
include/asm-generic/numa.h
48
static inline int early_cpu_to_node(int cpu) { return 0; }
include/asm-generic/numa.h
53
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
include/asm-generic/preempt.h
31
#define init_idle_preempt_count(p, cpu) do { \
include/asm-generic/topology.h
35
#define cpu_to_node(cpu) ((void)(cpu),0)
include/asm-generic/topology.h
41
#define set_cpu_numa_node(cpu, node)
include/asm-generic/topology.h
44
#define cpu_to_mem(cpu) ((void)(cpu),0)
include/asm-generic/topology.h
68
#define set_cpu_numa_mem(cpu, node)
include/clocksource/hyperv_timer.h
109
static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
include/clocksource/hyperv_timer.h
110
static inline void hv_stimer_legacy_init(unsigned int cpu, int sint) {}
include/clocksource/hyperv_timer.h
111
static inline void hv_stimer_legacy_cleanup(unsigned int cpu) {}
include/clocksource/hyperv_timer.h
29
extern int hv_stimer_cleanup(unsigned int cpu);
include/clocksource/hyperv_timer.h
30
extern void hv_stimer_legacy_init(unsigned int cpu, int sint);
include/clocksource/hyperv_timer.h
31
extern void hv_stimer_legacy_cleanup(unsigned int cpu);
include/linux/acpi.h
1109
static inline acpi_handle acpi_get_processor_handle(int cpu)
include/linux/acpi.h
1543
int acpi_pptt_cpu_is_thread(unsigned int cpu);
include/linux/acpi.h
1544
int find_acpi_cpu_topology(unsigned int cpu, int level);
include/linux/acpi.h
1545
int find_acpi_cpu_topology_cluster(unsigned int cpu);
include/linux/acpi.h
1546
int find_acpi_cpu_topology_package(unsigned int cpu);
include/linux/acpi.h
1547
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
include/linux/acpi.h
1552
static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
include/linux/acpi.h
1556
static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
include/linux/acpi.h
1560
static inline int find_acpi_cpu_topology_cluster(unsigned int cpu)
include/linux/acpi.h
1564
static inline int find_acpi_cpu_topology_package(unsigned int cpu)
include/linux/acpi.h
1568
static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
include/linux/acpi.h
307
int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
include/linux/acpi.h
311
static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
include/linux/acpi.h
322
int acpi_unmap_cpu(int cpu);
include/linux/acpi.h
325
acpi_handle acpi_get_processor_handle(int cpu);
include/linux/alloc_tag.h
141
int cpu;
include/linux/alloc_tag.h
143
for_each_possible_cpu(cpu) {
include/linux/alloc_tag.h
144
counter = per_cpu_ptr(tag->counters, cpu);
include/linux/amd-iommu.h
33
extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr);
include/linux/amd-iommu.h
34
extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr);
include/linux/amd-iommu.h
45
static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
include/linux/amd-iommu.h
50
static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
include/linux/arch_topology.h
103
static inline bool topology_core_has_smt(int cpu)
include/linux/arch_topology.h
105
return cpu_topology[cpu].thread_id != -1;
include/linux/arch_topology.h
110
static inline bool topology_core_has_smt(int cpu) { return false; }
include/linux/arch_topology.h
15
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
include/linux/arch_topology.h
20
static inline unsigned long topology_get_freq_ref(int cpu)
include/linux/arch_topology.h
22
return per_cpu(capacity_freq_ref, cpu);
include/linux/arch_topology.h
27
static inline unsigned long topology_get_freq_scale(int cpu)
include/linux/arch_topology.h
29
return per_cpu(arch_freq_scale, cpu);
include/linux/arch_topology.h
54
static inline unsigned long topology_get_hw_pressure(int cpu)
include/linux/arch_topology.h
56
return per_cpu(hw_pressure, cpu);
include/linux/arch_topology.h
76
#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
include/linux/arch_topology.h
77
#define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id)
include/linux/arch_topology.h
78
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
include/linux/arch_topology.h
79
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
include/linux/arch_topology.h
80
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
include/linux/arch_topology.h
81
#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
include/linux/arch_topology.h
82
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
include/linux/arch_topology.h
90
const struct cpumask *cpu_coregroup_mask(int cpu);
include/linux/arch_topology.h
91
const struct cpumask *cpu_clustergroup_mask(int cpu);
include/linux/arch_topology.h
92
void update_siblings_masks(unsigned int cpu);
include/linux/arch_topology.h
96
void freq_inv_set_max_ratio(int cpu, u64 max_rate);
include/linux/blkdev.h
1619
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
include/linux/bpf.h
112
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
include/linux/bpf.h
3987
u32 cpu;
include/linux/bpf.h
4004
cpu = flags >> 32;
include/linux/bpf.h
4005
if ((flags & BPF_F_CPU) && cpu >= num_possible_cpus())
include/linux/buffer_head.h
524
bool has_bh_in_lru(int cpu, void *dummy);
include/linux/buffer_head.h
537
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
include/linux/cacheinfo.h
104
int acpi_get_cache_info(unsigned int cpu,
include/linux/cacheinfo.h
110
int acpi_get_cache_info(unsigned int cpu,
include/linux/cacheinfo.h
121
static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
include/linux/cacheinfo.h
123
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
include/linux/cacheinfo.h
143
static inline int get_cpu_cacheinfo_id(int cpu, int level)
include/linux/cacheinfo.h
145
struct cacheinfo *ci = get_cpu_cacheinfo_level(cpu, level);
include/linux/cacheinfo.h
84
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
include/linux/cacheinfo.h
85
int early_cache_level(unsigned int cpu);
include/linux/cacheinfo.h
86
int init_cache_level(unsigned int cpu);
include/linux/cacheinfo.h
87
int init_of_cache_level(unsigned int cpu);
include/linux/cacheinfo.h
88
int populate_cache_leaves(unsigned int cpu);
include/linux/cacheinfo.h
89
int cache_setup_acpi(unsigned int cpu);
include/linux/cacheinfo.h
90
bool last_level_cache_is_valid(unsigned int cpu);
include/linux/cacheinfo.h
92
int fetch_cache_info(unsigned int cpu);
include/linux/cacheinfo.h
93
int detect_cache_attributes(unsigned int cpu);
include/linux/cgroup-defs.h
780
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
include/linux/cgroup.h
718
void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
include/linux/cgroup_subsys.h
17
SUBSYS(cpu)
include/linux/clk/tegra.h
100
static inline void tegra_enable_cpu_clock(u32 cpu)
include/linux/clk/tegra.h
104
static inline void tegra_disable_cpu_clock(u32 cpu)
include/linux/clk/tegra.h
33
void (*wait_for_reset)(u32 cpu);
include/linux/clk/tegra.h
34
void (*put_in_reset)(u32 cpu);
include/linux/clk/tegra.h
35
void (*out_of_reset)(u32 cpu);
include/linux/clk/tegra.h
36
void (*enable_clock)(u32 cpu);
include/linux/clk/tegra.h
37
void (*disable_clock)(u32 cpu);
include/linux/clk/tegra.h
48
static inline void tegra_wait_cpu_in_reset(u32 cpu)
include/linux/clk/tegra.h
53
tegra_cpu_car_ops->wait_for_reset(cpu);
include/linux/clk/tegra.h
56
static inline void tegra_put_cpu_in_reset(u32 cpu)
include/linux/clk/tegra.h
61
tegra_cpu_car_ops->put_in_reset(cpu);
include/linux/clk/tegra.h
64
static inline void tegra_cpu_out_of_reset(u32 cpu)
include/linux/clk/tegra.h
69
tegra_cpu_car_ops->out_of_reset(cpu);
include/linux/clk/tegra.h
72
static inline void tegra_enable_cpu_clock(u32 cpu)
include/linux/clk/tegra.h
77
tegra_cpu_car_ops->enable_clock(cpu);
include/linux/clk/tegra.h
80
static inline void tegra_disable_cpu_clock(u32 cpu)
include/linux/clk/tegra.h
85
tegra_cpu_car_ops->disable_clock(cpu);
include/linux/clk/tegra.h
88
static inline void tegra_wait_cpu_in_reset(u32 cpu)
include/linux/clk/tegra.h
92
static inline void tegra_put_cpu_in_reset(u32 cpu)
include/linux/clk/tegra.h
96
static inline void tegra_cpu_out_of_reset(u32 cpu)
include/linux/clockchips.h
184
extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
include/linux/console.h
223
unsigned int cpu : 24;
include/linux/console.h
303
int cpu;
include/linux/context_tracking.h
14
extern void ct_cpu_track_user(int cpu);
include/linux/context_tracking_state.h
101
static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
include/linux/context_tracking_state.h
103
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
include/linux/context_tracking_state.h
113
static __always_inline long ct_nesting_cpu(int cpu)
include/linux/context_tracking_state.h
115
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
include/linux/context_tracking_state.h
125
static __always_inline long ct_nmi_nesting_cpu(int cpu)
include/linux/context_tracking_state.h
127
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
include/linux/context_tracking_state.h
141
static __always_inline bool context_tracking_enabled_cpu(int cpu)
include/linux/context_tracking_state.h
143
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
include/linux/context_tracking_state.h
174
static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
include/linux/context_tracking_state.h
94
static __always_inline int ct_rcu_watching_cpu(int cpu)
include/linux/context_tracking_state.h
96
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
include/linux/coredump.h
24
int cpu;
include/linux/coresight-pmu.h
22
#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
include/linux/cpu.h
102
DECLARE_PER_CPU(struct cpu, cpu_devices);
include/linux/cpu.h
118
int add_cpu(unsigned int cpu);
include/linux/cpu.h
120
void notify_cpu_starting(unsigned int cpu);
include/linux/cpu.h
138
static inline int add_cpu(unsigned int cpu) { return 0;}
include/linux/cpu.h
151
int cpu = 0;
include/linux/cpu.h
154
cpu = -1;
include/linux/cpu.h
156
return freeze_secondary_cpus(cpu);
include/linux/cpu.h
38
extern int register_cpu(struct cpu *cpu, int num);
include/linux/cpu.h
39
extern struct device *get_cpu_device(unsigned cpu);
include/linux/cpu.h
40
extern bool cpu_is_hotpluggable(unsigned cpu);
include/linux/cpu.h
41
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
include/linux/cpu.h
43
int cpu, unsigned int *thread);
include/linux/cpu.h
92
extern bool arch_cpu_is_hotpluggable(int cpu);
include/linux/cpu.h
93
extern int arch_register_cpu(int cpu);
include/linux/cpu.h
94
extern void arch_unregister_cpu(int cpu);
include/linux/cpu.h
96
extern void unregister_cpu(struct cpu *cpu);
include/linux/cpu_rmap.h
42
static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
include/linux/cpu_rmap.h
44
return rmap->near[cpu].index;
include/linux/cpu_rmap.h
47
static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
include/linux/cpu_rmap.h
49
return rmap->obj[rmap->near[cpu].index];
include/linux/cpufeature.h
47
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
include/linux/cpufreq.h
1149
static inline int parse_perf_domain(int cpu, const char *list_name,
include/linux/cpufreq.h
1155
struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
include/linux/cpufreq.h
1170
int cpu, ret;
include/linux/cpufreq.h
1179
for_each_possible_cpu(cpu) {
include/linux/cpufreq.h
1180
if (cpu == pcpu)
include/linux/cpufreq.h
1183
ret = parse_perf_domain(cpu, list_name, cell_name, &args);
include/linux/cpufreq.h
1188
cpumask_set_cpu(cpu, cpumask);
include/linux/cpufreq.h
1221
extern int arch_freq_get_on_cpu(int cpu);
include/linux/cpufreq.h
1237
unsigned int cpufreq_generic_get(unsigned int cpu);
include/linux/cpufreq.h
1246
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
include/linux/cpufreq.h
186
unsigned int cpu;
include/linux/cpufreq.h
205
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
include/linux/cpufreq.h
206
struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu);
include/linux/cpufreq.h
207
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
include/linux/cpufreq.h
210
static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
include/linux/cpufreq.h
214
static inline struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu)
include/linux/cpufreq.h
218
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
include/linux/cpufreq.h
239
unsigned int cpufreq_get(unsigned int cpu);
include/linux/cpufreq.h
240
unsigned int cpufreq_quick_get(unsigned int cpu);
include/linux/cpufreq.h
241
unsigned int cpufreq_quick_get_max(unsigned int cpu);
include/linux/cpufreq.h
242
unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
include/linux/cpufreq.h
245
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
include/linux/cpufreq.h
248
void cpufreq_update_policy(unsigned int cpu);
include/linux/cpufreq.h
249
void cpufreq_update_limits(unsigned int cpu);
include/linux/cpufreq.h
258
static inline unsigned long cpufreq_get_pressure(int cpu)
include/linux/cpufreq.h
260
return READ_ONCE(per_cpu(cpufreq_pressure, cpu));
include/linux/cpufreq.h
263
static inline unsigned int cpufreq_get(unsigned int cpu)
include/linux/cpufreq.h
267
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
include/linux/cpufreq.h
271
static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
include/linux/cpufreq.h
275
static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
include/linux/cpufreq.h
284
static inline void cpufreq_update_limits(unsigned int cpu) { }
include/linux/cpufreq.h
285
static inline unsigned long cpufreq_get_pressure(int cpu)
include/linux/cpufreq.h
375
void (*adjust_perf)(unsigned int cpu,
include/linux/cpufreq.h
401
unsigned int (*get)(unsigned int cpu);
include/linux/cpufreq.h
407
int (*bios_limit)(int cpu, unsigned int *limit);
include/linux/cpufreq.h
61
unsigned int cpu; /* cpu managing this policy, must be online */
include/linux/cpufreq.h
620
void cpufreq_driver_adjust_perf(unsigned int cpu,
include/linux/cpuhotplug.h
252
int (*startup)(unsigned int cpu),
include/linux/cpuhotplug.h
253
int (*teardown)(unsigned int cpu), bool multi_instance);
include/linux/cpuhotplug.h
257
int (*startup)(unsigned int cpu),
include/linux/cpuhotplug.h
258
int (*teardown)(unsigned int cpu),
include/linux/cpuhotplug.h
273
int (*startup)(unsigned int cpu),
include/linux/cpuhotplug.h
274
int (*teardown)(unsigned int cpu))
include/linux/cpuhotplug.h
293
int (*startup)(unsigned int cpu),
include/linux/cpuhotplug.h
294
int (*teardown)(unsigned int cpu))
include/linux/cpuhotplug.h
313
int (*startup)(unsigned int cpu),
include/linux/cpuhotplug.h
314
int (*teardown)(unsigned int cpu))
include/linux/cpuhotplug.h
335
int (*startup)(unsigned int cpu),
include/linux/cpuhotplug.h
336
int (*teardown)(unsigned int cpu))
include/linux/cpuhotplug.h
356
int (*startup)(unsigned int cpu,
include/linux/cpuhotplug.h
358
int (*teardown)(unsigned int cpu,
include/linux/cpuhotplug.h
515
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu);
include/linux/cpuhotplug.h
516
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle);
include/linux/cpuhotplug.h
521
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu);
include/linux/cpuhotplug.h
524
static inline void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
include/linux/cpuhplock.h
28
void clear_tasks_mm_cpumask(int cpu);
include/linux/cpuhplock.h
29
int remove_cpu(unsigned int cpu);
include/linux/cpuhplock.h
44
static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
include/linux/cpuidle.h
307
extern s64 cpuidle_governor_latency_req(unsigned int cpu);
include/linux/cpuidle.h
97
unsigned int cpu;
include/linux/cpuidle_haltpoll.h
12
static inline void arch_haltpoll_disable(unsigned int cpu)
include/linux/cpuidle_haltpoll.h
8
static inline void arch_haltpoll_enable(unsigned int cpu)
include/linux/cpumask.h
1130
#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
include/linux/cpumask.h
1131
#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
include/linux/cpumask.h
1132
#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
include/linux/cpumask.h
1134
#define for_each_possible_cpu_wrap(cpu, start) \
include/linux/cpumask.h
1135
for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
include/linux/cpumask.h
1136
#define for_each_online_cpu_wrap(cpu, start) \
include/linux/cpumask.h
1137
for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
include/linux/cpumask.h
1139
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
include/linux/cpumask.h
1140
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
include/linux/cpumask.h
1141
#define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask)
include/linux/cpumask.h
1142
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
include/linux/cpumask.h
1144
#define for_each_possible_cpu_wrap(cpu, start) \
include/linux/cpumask.h
1145
for_each_cpu_wrap((cpu), cpu_possible_mask, (start))
include/linux/cpumask.h
1146
#define for_each_online_cpu_wrap(cpu, start) \
include/linux/cpumask.h
1147
for_each_cpu_wrap((cpu), cpu_online_mask, (start))
include/linux/cpumask.h
1154
#define assign_cpu(cpu, mask, val) \
include/linux/cpumask.h
1155
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
include/linux/cpumask.h
1157
#define __assign_cpu(cpu, mask, val) \
include/linux/cpumask.h
1158
__assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
include/linux/cpumask.h
1160
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
include/linux/cpumask.h
1161
#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
include/linux/cpumask.h
1162
#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
include/linux/cpumask.h
1163
#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
include/linux/cpumask.h
1165
void set_cpu_online(unsigned int cpu, bool online);
include/linux/cpumask.h
1166
void set_cpu_possible(unsigned int cpu, bool possible);
include/linux/cpumask.h
1197
static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
include/linux/cpumask.h
1199
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
include/linux/cpumask.h
1200
p -= cpu / BITS_PER_LONG;
include/linux/cpumask.h
1229
static __always_inline bool cpu_online(unsigned int cpu)
include/linux/cpumask.h
1231
return cpumask_test_cpu(cpu, cpu_online_mask);
include/linux/cpumask.h
1234
static __always_inline bool cpu_enabled(unsigned int cpu)
include/linux/cpumask.h
1236
return cpumask_test_cpu(cpu, cpu_enabled_mask);
include/linux/cpumask.h
1239
static __always_inline bool cpu_possible(unsigned int cpu)
include/linux/cpumask.h
1241
return cpumask_test_cpu(cpu, cpu_possible_mask);
include/linux/cpumask.h
1244
static __always_inline bool cpu_present(unsigned int cpu)
include/linux/cpumask.h
1246
return cpumask_test_cpu(cpu, cpu_present_mask);
include/linux/cpumask.h
1249
static __always_inline bool cpu_active(unsigned int cpu)
include/linux/cpumask.h
1251
return cpumask_test_cpu(cpu, cpu_active_mask);
include/linux/cpumask.h
1254
static __always_inline bool cpu_dying(unsigned int cpu)
include/linux/cpumask.h
1256
return cpumask_test_cpu(cpu, cpu_dying_mask);
include/linux/cpumask.h
1267
static __always_inline bool cpu_online(unsigned int cpu)
include/linux/cpumask.h
1269
return cpu == 0;
include/linux/cpumask.h
1272
static __always_inline bool cpu_possible(unsigned int cpu)
include/linux/cpumask.h
1274
return cpu == 0;
include/linux/cpumask.h
1277
static __always_inline bool cpu_enabled(unsigned int cpu)
include/linux/cpumask.h
1279
return cpu == 0;
include/linux/cpumask.h
1282
static __always_inline bool cpu_present(unsigned int cpu)
include/linux/cpumask.h
1284
return cpu == 0;
include/linux/cpumask.h
1287
static __always_inline bool cpu_active(unsigned int cpu)
include/linux/cpumask.h
1289
return cpu == 0;
include/linux/cpumask.h
1292
static __always_inline bool cpu_dying(unsigned int cpu)
include/linux/cpumask.h
1299
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
include/linux/cpumask.h
135
static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
include/linux/cpumask.h
138
WARN_ON_ONCE(cpu >= bits);
include/linux/cpumask.h
143
static __always_inline unsigned int cpumask_check(unsigned int cpu)
include/linux/cpumask.h
145
cpu_max_bits_warn(cpu, small_cpumask_bits);
include/linux/cpumask.h
146
return cpu;
include/linux/cpumask.h
379
#define for_each_cpu(cpu, mask) \
include/linux/cpumask.h
380
for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
include/linux/cpumask.h
392
#define for_each_cpu_wrap(cpu, mask, start) \
include/linux/cpumask.h
393
for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
include/linux/cpumask.h
409
#define for_each_cpu_and(cpu, mask1, mask2) \
include/linux/cpumask.h
410
for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
include/linux/cpumask.h
427
#define for_each_cpu_andnot(cpu, mask1, mask2) \
include/linux/cpumask.h
428
for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
include/linux/cpumask.h
444
#define for_each_cpu_or(cpu, mask1, mask2) \
include/linux/cpumask.h
445
for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
include/linux/cpumask.h
454
#define for_each_cpu_from(cpu, mask) \
include/linux/cpumask.h
455
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
include/linux/cpumask.h
467
unsigned int cpumask_any_but(const struct cpumask *mask, int cpu)
include/linux/cpumask.h
472
if (cpu != -1)
include/linux/cpumask.h
473
cpumask_check(cpu);
include/linux/cpumask.h
476
if (i != cpu)
include/linux/cpumask.h
493
int cpu)
include/linux/cpumask.h
498
if (cpu != -1)
include/linux/cpumask.h
499
cpumask_check(cpu);
include/linux/cpumask.h
502
if (i != cpu)
include/linux/cpumask.h
505
return cpumask_next_and(cpu, mask1, mask2);
include/linux/cpumask.h
520
int cpu)
include/linux/cpumask.h
525
if (cpu != -1)
include/linux/cpumask.h
526
cpumask_check(cpu);
include/linux/cpumask.h
529
if (i != cpu)
include/linux/cpumask.h
532
return cpumask_next_andnot(cpu, mask1, mask2);
include/linux/cpumask.h
543
unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
include/linux/cpumask.h
545
return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
include/linux/cpumask.h
557
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
include/linux/cpumask.h
561
small_cpumask_bits, cpumask_check(cpu));
include/linux/cpumask.h
574
unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
include/linux/cpumask.h
581
small_cpumask_bits, cpumask_check(cpu));
include/linux/cpumask.h
600
void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
include/linux/cpumask.h
602
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
include/linux/cpumask.h
606
void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
include/linux/cpumask.h
608
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
include/linux/cpumask.h
618
unsigned int cpu, unsigned int ncpus)
include/linux/cpumask.h
620
cpumask_check(cpu + ncpus - 1);
include/linux/cpumask.h
621
bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus);
include/linux/cpumask.h
629
static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
include/linux/cpumask.h
631
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
include/linux/cpumask.h
634
static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
include/linux/cpumask.h
636
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
include/linux/cpumask.h
647
bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
include/linux/cpumask.h
649
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
include/linux/cpumask.h
662
bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
include/linux/cpumask.h
664
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
include/linux/cpumask.h
677
bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
include/linux/cpumask.h
679
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
include/linux/cpumask.h
958
#define cpumask_of(cpu) (get_cpu_mask(cpu))
include/linux/crash_core.h
84
void crash_save_cpu(struct pt_regs *regs, int cpu);
include/linux/crash_core.h
95
static inline void crash_save_cpu(struct pt_regs *regs, int cpu) {};
include/linux/dca.h
36
int cpu);
include/linux/dca.h
55
u8 dca_get_tag(int cpu);
include/linux/dca.h
56
u8 dca3_get_tag(struct device *dev, int cpu);
include/linux/dmar.h
120
extern int enable_drhd_fault_handling(unsigned int cpu);
include/linux/dw_apb_timer.h
39
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
include/linux/energy_model.h
171
struct em_perf_domain *em_cpu_get(int cpu);
include/linux/energy_model.h
189
void em_adjust_cpu_capacity(unsigned int cpu);
include/linux/energy_model.h
370
static inline struct em_perf_domain *em_cpu_get(int cpu)
include/linux/energy_model.h
421
static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
include/linux/ftrace.h
1337
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
include/linux/ftrace.h
1357
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
include/linux/gfp.h
403
void setup_pcp_cacheinfo(unsigned int cpu);
include/linux/hrtimer.h
373
int hrtimers_prepare_cpu(unsigned int cpu);
include/linux/hrtimer.h
374
int hrtimers_cpu_starting(unsigned int cpu);
include/linux/hrtimer.h
376
int hrtimers_cpu_dying(unsigned int cpu);
include/linux/hrtimer_defs.h
83
unsigned int cpu;
include/linux/hw_breakpoint.h
121
int cpu) { return NULL; }
include/linux/hw_breakpoint.h
77
int cpu);
include/linux/hypervisor.h
15
static inline void hypervisor_pin_vcpu(int cpu)
include/linux/hypervisor.h
17
x86_platform.hyper.pin_vcpu(cpu);
include/linux/hypervisor.h
24
static inline void hypervisor_pin_vcpu(int cpu)
include/linux/intel_tcc.h
13
int intel_tcc_get_tjmax(int cpu);
include/linux/intel_tcc.h
14
int intel_tcc_get_offset(int cpu);
include/linux/intel_tcc.h
15
int intel_tcc_set_offset(int cpu, int offset);
include/linux/intel_tcc.h
16
int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
include/linux/irq.h
1257
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
include/linux/irq.h
1267
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
include/linux/irq.h
1268
int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
include/linux/irq.h
1270
int ipi_send_single(unsigned int virq, unsigned int cpu);
include/linux/irq.h
1274
int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu));
include/linux/irq.h
538
void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
include/linux/irq.h
611
extern int irq_affinity_online_cpu(unsigned int cpu);
include/linux/irq_work.h
53
bool irq_work_queue_on(struct irq_work *work, int cpu);
include/linux/irqchip/arm-gic.h
161
int gic_get_cpu_id(unsigned int cpu);
include/linux/irqdesc.h
145
unsigned int cpu)
include/linux/irqdesc.h
147
return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
include/linux/irqflags.h
30
extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
include/linux/irqflags.h
38
static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
include/linux/kdb.h
188
unsigned int cpu = task_cpu(p);
include/linux/kdb.h
189
if (cpu > num_possible_cpus())
include/linux/kdb.h
190
cpu = 0;
include/linux/kdb.h
191
return cpu;
include/linux/kernel_stat.h
104
enum cpu_usage_stat usage, int cpu);
include/linux/kernel_stat.h
105
extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
include/linux/kernel_stat.h
108
enum cpu_usage_stat usage, int cpu)
include/linux/kernel_stat.h
113
static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
include/linux/kernel_stat.h
115
*dst = kcpustat_cpu(cpu);
include/linux/kernel_stat.h
127
extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
include/linux/kernel_stat.h
51
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
include/linux/kernel_stat.h
52
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
include/linux/kernel_stat.h
54
extern unsigned long long nr_context_switches_cpu(int cpu);
include/linux/kernel_stat.h
57
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
include/linux/kernel_stat.h
65
static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
include/linux/kernel_stat.h
67
return kstat_cpu(cpu).softirqs[irq];
include/linux/kernel_stat.h
70
static inline unsigned int kstat_cpu_softirqs_sum(int cpu)
include/linux/kernel_stat.h
76
sum += kstat_softirqs_cpu(i, cpu);
include/linux/kernel_stat.h
97
static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
include/linux/kernel_stat.h
99
return kstat_cpu(cpu).irqs_sum;
include/linux/kgdb.h
318
extern int kgdb_nmicallback(int cpu, void *regs);
include/linux/kgdb.h
319
extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
include/linux/kgdb.h
353
static inline int kgdb_nmicallback(int cpu, void *regs) { return 1; }
include/linux/kthread.h
104
void kthread_bind(struct task_struct *k, unsigned int cpu);
include/linux/kthread.h
236
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
include/linux/kthread.h
251
kthread_run_worker_on_cpu(int cpu, unsigned int flags,
include/linux/kthread.h
256
kw = kthread_create_worker_on_cpu(cpu, flags, namefmt);
include/linux/kthread.h
51
unsigned int cpu,
include/linux/kthread.h
57
void kthread_set_per_cpu(struct task_struct *k, int cpu);
include/linux/kthread.h
92
unsigned int cpu, const char *namefmt)
include/linux/kthread.h
96
p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
include/linux/kvm_host.h
1612
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
include/linux/kvm_host.h
329
int cpu;
include/linux/lockdep_types.h
195
int cpu;
include/linux/mm.h
2013
static inline int cpu_pid_to_cpupid(int cpu, int pid)
include/linux/mm.h
2015
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
include/linux/mmu_context.h
26
# define task_cpu_possible(cpu, p) true
include/linux/mmu_context.h
29
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
include/linux/mvebu-pmsu.h
15
int mvebu_pmsu_dfs_request(int cpu);
include/linux/mvebu-pmsu.h
17
static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; }
include/linux/nd.h
52
int cpu;
include/linux/netdevice.h
3552
unsigned int cpu;
include/linux/netdevice.h
4744
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
include/linux/netdevice.h
4748
WRITE_ONCE(txq->xmit_lock_owner, cpu);
include/linux/netdevice.h
4842
#define HARD_TX_LOCK(dev, txq, cpu) { \
include/linux/netdevice.h
4844
__netif_tx_lock(txq, cpu); \
include/linux/netdevice.h
4866
int cpu;
include/linux/netdevice.h
4869
cpu = smp_processor_id();
include/linux/netdevice.h
4874
__netif_tx_lock(txq, cpu);
include/linux/netdevice.h
4883
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
include/linux/netdevice.h
4888
return READ_ONCE(txq->xmit_lock_owner) == cpu;
include/linux/netdevice.h
4892
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
include/linux/netfilter/x_tables.h
432
xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
include/linux/netfilter/x_tables.h
435
return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
include/linux/nmi.h
100
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
include/linux/nmi.h
118
void watchdog_hardlockup_enable(unsigned int cpu);
include/linux/nmi.h
119
void watchdog_hardlockup_disable(unsigned int cpu);
include/linux/nmi.h
177
static inline bool trigger_single_cpu_backtrace(int cpu)
include/linux/nmi.h
179
arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
include/linux/nmi.h
202
static inline bool trigger_single_cpu_backtrace(int cpu)
include/linux/nmi.h
48
extern int lockup_detector_online_cpu(unsigned int cpu);
include/linux/nmi.h
49
extern int lockup_detector_offline_cpu(unsigned int cpu);
include/linux/nmi.h
99
void watchdog_hardlockup_touch_cpu(unsigned int cpu);
include/linux/node.h
179
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
include/linux/node.h
180
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
include/linux/node.h
198
static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
include/linux/node.h
202
static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
include/linux/objpool.h
126
static inline void *__objpool_try_get_slot(struct objpool_head *pool, int cpu)
include/linux/objpool.h
128
struct objpool_slot *slot = pool->cpu_slots[cpu];
include/linux/objpool.h
173
int start, cpu;
include/linux/objpool.h
179
for_each_possible_cpu_wrap(cpu, start) {
include/linux/objpool.h
180
obj = __objpool_try_get_slot(pool, cpu);
include/linux/objpool.h
191
__objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
include/linux/objpool.h
193
struct objpool_slot *slot = pool->cpu_slots[cpu];
include/linux/of.h
1531
#define for_each_of_cpu_node(cpu) \
include/linux/of.h
1532
for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
include/linux/of.h
1533
cpu = of_get_next_cpu_node(cpu))
include/linux/of.h
309
extern int of_find_last_cache_level(unsigned int cpu);
include/linux/of.h
368
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
include/linux/of.h
369
extern struct device_node *of_cpu_device_node_get(int cpu);
include/linux/of.h
685
static inline struct device_node *of_get_cpu_node(int cpu,
include/linux/of.h
691
static inline struct device_node *of_cpu_device_node_get(int cpu)
include/linux/padata.h
101
int cpu;
include/linux/part_stat.h
28
#define part_stat_get_cpu(part, field, cpu) \
include/linux/part_stat.h
29
(per_cpu_ptr((part)->bd_stats, (cpu))->field)
include/linux/part_stat.h
79
#define part_stat_local_read_cpu(part, field, cpu) \
include/linux/part_stat.h
80
local_read(&(part_stat_get_cpu(part, field, cpu)))
include/linux/percpu-defs.h
237
#define per_cpu_ptr(ptr, cpu) \
include/linux/percpu-defs.h
240
SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \
include/linux/percpu-defs.h
261
#define per_cpu_ptr(ptr, cpu) \
include/linux/percpu-defs.h
263
(void)(cpu); \
include/linux/percpu-defs.h
273
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
include/linux/percpu.h
108
typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
include/linux/perf/arm_pmu.h
192
int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
include/linux/perf/arm_pmu.h
193
void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
include/linux/perf_event.h
1252
int cpu,
include/linux/perf_event.h
1322
u32 cpu;
include/linux/perf_event.h
2100
extern int perf_event_init_cpu(unsigned int cpu);
include/linux/perf_event.h
2101
extern int perf_event_exit_cpu(unsigned int cpu);
include/linux/perf_event.h
586
bool (*filter) (struct pmu *pmu, int cpu); /* optional */
include/linux/perf_event.h
856
int cpu;
include/linux/platform_data/arm-ux500-pm.h
16
bool prcmu_is_cpu_in_wfi(int cpu);
include/linux/pm_domain.h
290
int cpu;
include/linux/posix-timers.h
217
struct cpu_timer cpu;
include/linux/preempt.h
337
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
include/linux/psci.h
17
bool psci_tos_resident_on(int cpu);
include/linux/pstore.h
240
pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
include/linux/pstore.h
242
rec->ip |= cpu;
include/linux/pstore.h
264
pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
include/linux/pstore.h
267
rec->ts |= cpu;
include/linux/random.h
134
int random_prepare_cpu(unsigned int cpu);
include/linux/random.h
135
int random_online_cpu(unsigned int cpu);
include/linux/ras.h
41
u32 cpu;
include/linux/rcupdate.h
150
int rcu_nocb_cpu_offload(int cpu);
include/linux/rcupdate.h
151
int rcu_nocb_cpu_deoffload(int cpu);
include/linux/rcupdate.h
159
static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
include/linux/rcupdate.h
160
static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
include/linux/rcutiny.h
142
static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
include/linux/rcutree.h
108
int rcutree_prepare_cpu(unsigned int cpu);
include/linux/rcutree.h
109
int rcutree_online_cpu(unsigned int cpu);
include/linux/rcutree.h
110
void rcutree_report_cpu_starting(unsigned int cpu);
include/linux/rcutree.h
113
int rcutree_dead_cpu(unsigned int cpu);
include/linux/rcutree.h
114
int rcutree_dying_cpu(unsigned int cpu);
include/linux/rcutree.h
115
int rcutree_offline_cpu(unsigned int cpu);
include/linux/rcutree.h
122
void rcutree_migrate_callbacks(int cpu);
include/linux/relay.h
179
unsigned int cpu,
include/linux/relay.h
292
int relay_prepare_cpu(unsigned int cpu);
include/linux/relay.h
69
unsigned int cpu; /* this buf's cpu */
include/linux/resctrl.h
106
int cpu;
include/linux/resctrl.h
516
void resctrl_online_cpu(unsigned int cpu);
include/linux/resctrl.h
517
void resctrl_offline_cpu(unsigned int cpu);
include/linux/ring_buffer.h
124
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
include/linux/ring_buffer.h
126
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
include/linux/ring_buffer.h
128
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
134
int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
include/linux/ring_buffer.h
151
ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
include/linux/ring_buffer.h
154
ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
include/linux/ring_buffer.h
158
ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags);
include/linux/ring_buffer.h
168
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
171
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
177
struct trace_buffer *buffer_b, int cpu);
include/linux/ring_buffer.h
181
struct trace_buffer *buffer_b, int cpu)
include/linux/ring_buffer.h
188
bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
196
bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
197
void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
198
void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
200
u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
201
unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
204
unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
205
unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
206
unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
207
unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
208
unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
212
int cpu, u64 *ts);
include/linux/ring_buffer.h
218
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
222
ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
223
void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
include/linux/ring_buffer.h
227
size_t len, int cpu, int full);
include/linux/ring_buffer.h
244
int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
include/linux/ring_buffer.h
249
int ring_buffer_map(struct trace_buffer *buffer, int cpu,
include/linux/ring_buffer.h
251
void ring_buffer_map_dup(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
252
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
include/linux/ring_buffer.h
253
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
include/linux/sbitmap.h
485
unsigned int *cpu)
include/linux/sbitmap.h
489
*cpu = get_cpu();
include/linux/sbitmap.h
520
unsigned int cpu);
include/linux/sched.h
1886
extern int dl_bw_alloc(int cpu, u64 dl_bw);
include/linux/sched.h
1887
extern void dl_bw_free(int cpu, u64 dl_bw);
include/linux/sched.h
1923
extern int idle_cpu(int cpu);
include/linux/sched.h
1932
extern struct task_struct *idle_task(int cpu);
include/linux/sched.h
1945
extern struct task_struct *curr_task(int cpu);
include/linux/sched.h
1946
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
include/linux/sched.h
2259
return READ_ONCE(task_thread_info(p)->cpu);
include/linux/sched.h
2262
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
include/linux/sched.h
2271
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
include/linux/sched.h
2284
extern struct task_struct *cpu_curr_snapshot(int cpu);
include/linux/sched.h
2295
static inline bool vcpu_is_preempted(int cpu)
include/linux/sched.h
2318
unsigned long sched_cpu_util(int cpu);
include/linux/sched.h
2325
extern int sched_core_idle_cpu(int cpu);
include/linux/sched.h
2329
static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
include/linux/sched.h
2332
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
include/linux/sched.h
389
unsigned int cpu;
include/linux/sched/clock.h
30
extern u64 sched_clock_cpu(int cpu);
include/linux/sched/clock.h
52
static inline u64 cpu_clock(int cpu)
include/linux/sched/clock.h
91
static inline u64 cpu_clock(int cpu)
include/linux/sched/clock.h
93
return sched_clock_cpu(cpu);
include/linux/sched/cpufreq.h
20
void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
include/linux/sched/cpufreq.h
23
void cpufreq_remove_update_util_hook(int cpu);
include/linux/sched/cputime.h
190
u64 dummy_steal_clock(int cpu);
include/linux/sched/cputime.h
194
static inline u64 paravirt_steal_clock(int cpu)
include/linux/sched/cputime.h
196
return static_call(pv_steal_clock)(cpu);
include/linux/sched/deadline.h
35
extern void dl_clear_root_domain_cpu(int cpu);
include/linux/sched/deadline.h
38
extern bool dl_bw_visited(int cpu, u64 cookie);
include/linux/sched/debug.h
12
extern void dump_cpu_task(int cpu);
include/linux/sched/ext.h
233
bool scx_hardlockup(int cpu);
include/linux/sched/ext.h
241
static inline bool scx_hardlockup(int cpu) { return false; }
include/linux/sched/hotplug.h
10
extern int sched_cpu_activate(unsigned int cpu);
include/linux/sched/hotplug.h
11
extern int sched_cpu_deactivate(unsigned int cpu);
include/linux/sched/hotplug.h
14
extern int sched_cpu_wait_empty(unsigned int cpu);
include/linux/sched/hotplug.h
15
extern int sched_cpu_dying(unsigned int cpu);
include/linux/sched/hotplug.h
9
extern int sched_cpu_starting(unsigned int cpu);
include/linux/sched/idle.h
14
extern void wake_up_if_idle(int cpu);
include/linux/sched/isolation.h
41
extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
include/linux/sched/isolation.h
65
static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
include/linux/sched/isolation.h
74
static inline bool housekeeping_cpu(int cpu, enum hk_type type)
include/linux/sched/isolation.h
78
return housekeeping_test_cpu(cpu, type);
include/linux/sched/isolation.h
83
static inline bool cpu_is_isolated(int cpu)
include/linux/sched/isolation.h
85
return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN);
include/linux/sched/nohz.h
10
extern void nohz_balance_enter_idle(int cpu);
include/linux/sched/nohz.h
13
static inline void nohz_balance_enter_idle(int cpu) { }
include/linux/sched/nohz.h
27
extern void wake_up_nohz_cpu(int cpu);
include/linux/sched/nohz.h
29
static inline void wake_up_nohz_cpu(int cpu) { }
include/linux/sched/stat.h
23
extern unsigned int nr_iowait_cpu(int cpu);
include/linux/sched/task.h
64
extern void init_idle(struct task_struct *idle, int cpu);
include/linux/sched/topology.h
188
extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
include/linux/sched/topology.h
213
unsigned long arch_scale_cpu_capacity(int cpu)
include/linux/sched/topology.h
221
unsigned long arch_scale_hw_pressure(int cpu)
include/linux/sched/topology.h
236
unsigned int arch_scale_freq_ref(int cpu)
include/linux/sched/topology.h
37
extern const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu);
include/linux/sched/topology.h
42
extern const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu);
include/linux/sched/topology.h
47
extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu);
include/linux/sched/topology.h
50
extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu);
include/linux/sched/topology.h
52
extern int arch_asym_cpu_priority(int cpu);
include/linux/seq_file.h
330
extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos);
include/linux/seq_file.h
332
extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
include/linux/smp.h
135
extern void arch_smp_send_reschedule(int cpu);
include/linux/smp.h
140
#define smp_send_reschedule(cpu) ({ \
include/linux/smp.h
141
trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
include/linux/smp.h
142
arch_smp_send_reschedule(cpu); \
include/linux/smp.h
18
typedef bool (*smp_cond_func_t)(int cpu, void *info);
include/linux/smp.h
207
static inline void smp_send_reschedule(int cpu) { }
include/linux/smp.h
295
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
include/linux/smp.h
299
int smpcfd_prepare_cpu(unsigned int cpu);
include/linux/smp.h
300
int smpcfd_dead_cpu(unsigned int cpu);
include/linux/smp.h
301
int smpcfd_dying_cpu(unsigned int cpu);
include/linux/smp.h
45
extern void __smp_call_single_queue(int cpu, struct llist_node *node);
include/linux/smp.h
56
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
include/linux/smpboot.h
34
int (*thread_should_run)(unsigned int cpu);
include/linux/smpboot.h
35
void (*thread_fn)(unsigned int cpu);
include/linux/smpboot.h
36
void (*create)(unsigned int cpu);
include/linux/smpboot.h
37
void (*setup)(unsigned int cpu);
include/linux/smpboot.h
38
void (*cleanup)(unsigned int cpu, bool online);
include/linux/smpboot.h
39
void (*park)(unsigned int cpu);
include/linux/smpboot.h
40
void (*unpark)(unsigned int cpu);
include/linux/soc/renesas/rcar-sysc.h
5
int rcar_sysc_power_down_cpu(unsigned int cpu);
include/linux/soc/renesas/rcar-sysc.h
6
int rcar_sysc_power_up_cpu(unsigned int cpu);
include/linux/soc/samsung/exynos-regs-pmu.h
707
#define GS101_CPU_INFORM(cpu) (0x0860 + ((cpu) & 7) * 4)
include/linux/soc/samsung/exynos-regs-pmu.h
709
#define GS101_IROM_CPU_INFORM(cpu) (0x0890 + ((cpu) & 7) * 4)
include/linux/soc/samsung/exynos-regs-pmu.h
718
#define GS101_CLUSTER_CPU_OFFSET(cl, cpu) ((cl) + ((cpu) * 0x80))
include/linux/soc/samsung/exynos-regs-pmu.h
719
#define GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
720
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x00)
include/linux/soc/samsung/exynos-regs-pmu.h
721
#define GS101_CLUSTER_CPU_STATUS(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
722
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x04)
include/linux/soc/samsung/exynos-regs-pmu.h
723
#define GS101_CLUSTER_CPU_STATES(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
724
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x08)
include/linux/soc/samsung/exynos-regs-pmu.h
725
#define GS101_CLUSTER_CPU_OPTION(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
726
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x0c)
include/linux/soc/samsung/exynos-regs-pmu.h
727
#define GS101_CLUSTER_CPU_OUT(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
728
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x20)
include/linux/soc/samsung/exynos-regs-pmu.h
729
#define GS101_CLUSTER_CPU_IN(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
730
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x24)
include/linux/soc/samsung/exynos-regs-pmu.h
731
#define GS101_CLUSTER_CPU_INT_IN(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
732
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x40)
include/linux/soc/samsung/exynos-regs-pmu.h
733
#define GS101_CLUSTER_CPU_INT_EN(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
734
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x44)
include/linux/soc/samsung/exynos-regs-pmu.h
735
#define GS101_CLUSTER_CPU_INT_TYPE(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
736
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x48)
include/linux/soc/samsung/exynos-regs-pmu.h
737
#define GS101_CLUSTER_CPU_INT_DIR(cl, cpu) \
include/linux/soc/samsung/exynos-regs-pmu.h
738
(GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x4c)
include/linux/srcutree.h
50
int cpu;
include/linux/stop_machine.h
159
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
include/linux/stop_machine.h
32
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
include/linux/stop_machine.h
34
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
include/linux/stop_machine.h
36
void stop_machine_park(int cpu);
include/linux/stop_machine.h
37
void stop_machine_unpark(int cpu);
include/linux/stop_machine.h
52
static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
include/linux/stop_machine.h
56
if (cpu == smp_processor_id())
include/linux/stop_machine.h
71
static inline bool stop_one_cpu_nowait(unsigned int cpu,
include/linux/stop_machine.h
75
if (cpu == smp_processor_id()) {
include/linux/swap.h
363
extern void lru_add_drain_cpu(int cpu);
include/linux/syscalls.h
720
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, void __user *cache);
include/linux/syscalls.h
871
pid_t pid, int cpu, int group_fd, unsigned long flags);
include/linux/tick.h
131
extern bool tick_nohz_tick_stopped_cpu(int cpu);
include/linux/tick.h
141
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
include/linux/tick.h
142
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
include/linux/tick.h
143
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
include/linux/tick.h
148
static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
include/linux/tick.h
165
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
include/linux/tick.h
166
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
include/linux/tick.h
202
extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
include/linux/tick.h
203
extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
include/linux/tick.h
212
extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
include/linux/tick.h
230
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
include/linux/tick.h
232
if (tick_nohz_full_cpu(cpu))
include/linux/tick.h
233
tick_nohz_dep_set_cpu(cpu, bit);
include/linux/tick.h
236
static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
include/linux/tick.h
238
if (tick_nohz_full_cpu(cpu))
include/linux/tick.h
239
tick_nohz_dep_clear_cpu(cpu, bit);
include/linux/tick.h
274
extern void tick_nohz_full_kick_cpu(int cpu);
include/linux/tick.h
279
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
include/linux/tick.h
281
static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
include/linux/tick.h
282
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
include/linux/tick.h
283
static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
include/linux/tick.h
287
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
include/linux/tick.h
288
static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
include/linux/tick.h
299
static inline void tick_nohz_full_kick_cpu(int cpu) { }
include/linux/tick.h
30
extern int tick_cpu_dying(unsigned int cpu);
include/linux/timer.h
150
extern void add_timer_on(struct timer_list *timer, int cpu);
include/linux/timer.h
175
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
include/linux/timer.h
179
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
include/linux/timer.h
184
int timers_prepare_cpu(unsigned int cpu);
include/linux/timer.h
185
int timers_dead_cpu(unsigned int cpu);
include/linux/topology.h
105
static inline void set_cpu_numa_node(int cpu, int node)
include/linux/topology.h
107
per_cpu(numa_node, cpu) = node;
include/linux/topology.h
148
static inline int cpu_to_mem(int cpu)
include/linux/topology.h
150
return per_cpu(_numa_mem_, cpu);
include/linux/topology.h
155
static inline void set_cpu_numa_mem(int cpu, int node)
include/linux/topology.h
157
per_cpu(_numa_mem_, cpu) = node;
include/linux/topology.h
172
static inline int cpu_to_mem(int cpu)
include/linux/topology.h
174
return cpu_to_node(cpu);
include/linux/topology.h
194
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
include/linux/topology.h
197
#define topology_die_id(cpu) ((void)(cpu), -1)
include/linux/topology.h
200
#define topology_cluster_id(cpu) ((void)(cpu), -1)
include/linux/topology.h
203
#define topology_core_id(cpu) ((void)(cpu), 0)
include/linux/topology.h
206
#define topology_book_id(cpu) ((void)(cpu), -1)
include/linux/topology.h
209
#define topology_drawer_id(cpu) ((void)(cpu), -1)
include/linux/topology.h
212
#define topology_ppin(cpu) ((void)(cpu), 0ull)
include/linux/topology.h
215
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
include/linux/topology.h
218
#define topology_core_cpumask(cpu) cpumask_of(cpu)
include/linux/topology.h
221
#define topology_cluster_cpumask(cpu) cpumask_of(cpu)
include/linux/topology.h
224
#define topology_die_cpumask(cpu) cpumask_of(cpu)
include/linux/topology.h
227
#define topology_book_cpumask(cpu) cpumask_of(cpu)
include/linux/topology.h
230
#define topology_drawer_cpumask(cpu) cpumask_of(cpu)
include/linux/topology.h
234
static inline const struct cpumask *cpu_smt_mask(int cpu)
include/linux/topology.h
236
return topology_sibling_cpumask(cpu);
include/linux/topology.h
242
static inline bool topology_is_primary_thread(unsigned int cpu)
include/linux/topology.h
257
return cpu == cpumask_first(topology_sibling_cpumask(cpu));
include/linux/topology.h
263
static inline const struct cpumask *cpu_node_mask(int cpu)
include/linux/topology.h
265
return cpumask_of_node(cpu_to_node(cpu));
include/linux/topology.h
269
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
include/linux/topology.h
272
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
include/linux/topology.h
274
return cpumask_nth_and(cpu, cpus, cpu_online_mask);
include/linux/topology.h
334
static inline unsigned long topology_get_cpu_scale(int cpu)
include/linux/topology.h
336
return per_cpu(cpu_scale, cpu);
include/linux/topology.h
339
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
include/linux/topology.h
91
static inline int cpu_to_node(int cpu)
include/linux/topology.h
93
return per_cpu(numa_node, cpu);
include/linux/torture.h
61
bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
include/linux/torture.h
63
bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
include/linux/trace_events.h
128
int cpu;
include/linux/unwind_deferred_types.h
29
u32 cpu;
include/linux/vmstat.h
103
static inline void vm_events_fold_cpu(int cpu)
include/linux/vmstat.h
227
int cpu;
include/linux/vmstat.h
228
for_each_online_cpu(cpu)
include/linux/vmstat.h
229
x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
include/linux/vmstat.h
295
void cpu_vm_stats_fold(int cpu);
include/linux/vmstat.h
399
static inline void cpu_vm_stats_fold(int cpu) { }
include/linux/vmstat.h
83
extern void vm_events_fold_cpu(int cpu);
include/linux/vtime.h
21
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
include/linux/vtime.h
27
static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
include/linux/vtime.h
75
static inline bool vtime_accounting_enabled_cpu(int cpu)
include/linux/vtime.h
77
return context_tracking_enabled_cpu(cpu);
include/linux/workqueue.h
120
int cpu;
include/linux/workqueue.h
473
void workqueue_softirq_dead(unsigned int cpu);
include/linux/workqueue.h
593
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
include/linux/workqueue.h
597
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
include/linux/workqueue.h
599
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
include/linux/workqueue.h
634
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
include/linux/workqueue.h
709
static inline bool schedule_work_on(int cpu, struct work_struct *work)
include/linux/workqueue.h
711
return queue_work_on(cpu, system_percpu_wq, work);
include/linux/workqueue.h
807
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
include/linux/workqueue.h
810
return queue_delayed_work_on(cpu, system_percpu_wq, dwork, delay);
include/linux/workqueue.h
828
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
include/linux/workqueue.h
832
static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
include/linux/workqueue.h
837
long work_on_cpu_key(int cpu, long (*fn)(void *),
include/linux/workqueue.h
866
void wq_watchdog_touch(int cpu);
include/linux/workqueue.h
868
static inline void wq_watchdog_touch(int cpu) { }
include/linux/workqueue.h
872
int workqueue_prepare_cpu(unsigned int cpu);
include/linux/workqueue.h
873
int workqueue_online_cpu(unsigned int cpu);
include/linux/workqueue.h
874
int workqueue_offline_cpu(unsigned int cpu);
include/media/drv-intf/saa7146.h
66
__le32 *cpu;
include/net/gen_stats.h
50
struct gnet_stats_basic_sync __percpu *cpu,
include/net/gen_stats.h
53
struct gnet_stats_basic_sync __percpu *cpu,
include/net/gen_stats.h
56
struct gnet_stats_basic_sync __percpu *cpu,
include/net/ip.h
305
static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
include/net/ip.h
307
return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
include/net/ip.h
312
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
include/net/ip.h
316
static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
include/net/ip.h
319
return snmp_get_cpu_field(mib, cpu, offct);
include/net/rps.h
32
u16 cpu;
include/net/sock.h
1193
int cpu = raw_smp_processor_id();
include/net/sock.h
1195
if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
include/net/sock.h
1196
WRITE_ONCE(sk->sk_incoming_cpu, cpu);
include/ras/ras_event.h
186
int cpu),
include/ras/ras_event.h
188
TP_ARGS(proc, pei_err, pei_len, ctx_err, ctx_len, oem, oem_len, sev, cpu),
include/ras/ras_event.h
203
__field(int, cpu)
include/ras/ras_event.h
230
__entry->cpu = cpu;
include/ras/ras_event.h
236
__entry->cpu,
include/rv/da_monitor.h
248
int cpu;
include/rv/da_monitor.h
250
for_each_cpu(cpu, cpu_online_mask) {
include/rv/da_monitor.h
251
da_mon = per_cpu_ptr(&DA_MON_NAME, cpu);
include/rv/da_monitor.h
292
int cpu;
include/rv/da_monitor.h
297
for_each_present_cpu(cpu)
include/rv/da_monitor.h
298
da_monitor_reset(da_get_monitor(idle_task(cpu)));
include/rv/ltl_monitor.h
53
int ret, cpu;
include/rv/ltl_monitor.h
68
for_each_present_cpu(cpu)
include/rv/ltl_monitor.h
69
ltl_task_init(idle_task(cpu), false);
include/soc/fsl/dpaa2-io.h
54
int cpu;
include/soc/fsl/dpaa2-io.h
69
struct dpaa2_io *dpaa2_io_service_select(int cpu);
include/soc/imx/revision.h
33
void imx_print_silicon_rev(const char *cpu, int srev);
include/soc/mscc/ocelot.h
1005
void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
include/soc/mscc/ocelot.h
1006
void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
include/soc/mscc/ocelot.h
1007
void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
include/soc/tegra/pm.h
31
int tegra_pm_park_secondary_cpu(unsigned long cpu);
include/soc/tegra/pm.h
62
static inline int tegra_pm_park_secondary_cpu(unsigned long cpu)
include/sound/simple_card_utils.h
102
((cpu) = simple_props_to_dlc_cpu(props, i)); \
include/sound/simple_card_utils.h
121
#define for_each_prop_dai_cpu(props, i, cpu) \
include/sound/simple_card_utils.h
124
((cpu) = simple_props_to_dai_cpu(props, i)); \
include/sound/simple_card_utils.h
136
int cpu; /* turn for CPU / Codec */
include/sound/simple_card_utils.h
99
#define for_each_prop_dlc_cpus(props, i, cpu) \
include/sound/soc.h
697
unsigned int cpu;
include/sound/soc.h
847
#define for_each_link_cpus(link, i, cpu) \
include/sound/soc.h
850
((cpu) = snd_soc_link_to_cpu(link, i)); \
include/sound/soc.h
913
#define SND_SOC_DAILINK_REG2(cpu, codec) SND_SOC_DAILINK_REG3(cpu, codec, null_dailink_component)
include/sound/soc.h
914
#define SND_SOC_DAILINK_REG3(cpu, codec, platform) \
include/sound/soc.h
915
.cpus = cpu, \
include/sound/soc.h
916
.num_cpus = ARRAY_SIZE(cpu), \
include/sound/soc.h
928
#define SND_SOC_DAILINK_DEFS(name, cpu, codec, platform...) \
include/sound/soc.h
929
SND_SOC_DAILINK_DEF(name##_cpus, cpu); \
include/trace/events/cgroup.h
209
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
include/trace/events/cgroup.h
211
TP_ARGS(cgrp, cpu, contended),
include/trace/events/cgroup.h
217
__field( int, cpu )
include/trace/events/cgroup.h
225
__entry->cpu = cpu;
include/trace/events/cgroup.h
231
__entry->cpu, __entry->contended)
include/trace/events/cgroup.h
241
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
include/trace/events/cgroup.h
243
TP_ARGS(cgrp, cpu, contended)
include/trace/events/cgroup.h
248
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
include/trace/events/cgroup.h
250
TP_ARGS(cgrp, cpu, contended)
include/trace/events/cgroup.h
255
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
include/trace/events/cgroup.h
257
TP_ARGS(cgrp, cpu, contended)
include/trace/events/cpuhp.h
12
TP_PROTO(unsigned int cpu,
include/trace/events/cpuhp.h
17
TP_ARGS(cpu, target, idx, fun),
include/trace/events/cpuhp.h
20
__field( unsigned int, cpu )
include/trace/events/cpuhp.h
27
__entry->cpu = cpu;
include/trace/events/cpuhp.h
34
__entry->cpu, __entry->target, __entry->idx, __entry->fun)
include/trace/events/cpuhp.h
39
TP_PROTO(unsigned int cpu,
include/trace/events/cpuhp.h
45
TP_ARGS(cpu, target, idx, fun, node),
include/trace/events/cpuhp.h
48
__field( unsigned int, cpu )
include/trace/events/cpuhp.h
55
__entry->cpu = cpu;
include/trace/events/cpuhp.h
62
__entry->cpu, __entry->target, __entry->idx, __entry->fun)
include/trace/events/cpuhp.h
67
TP_PROTO(unsigned int cpu,
include/trace/events/cpuhp.h
72
TP_ARGS(cpu, state, idx, ret),
include/trace/events/cpuhp.h
75
__field( unsigned int, cpu )
include/trace/events/cpuhp.h
82
__entry->cpu = cpu;
include/trace/events/cpuhp.h
89
__entry->cpu, __entry->state, __entry->idx, __entry->ret)
include/trace/events/csd.h
12
TP_PROTO(const unsigned int cpu,
include/trace/events/csd.h
17
TP_ARGS(cpu, callsite, func, csd),
include/trace/events/csd.h
20
__field(unsigned int, cpu)
include/trace/events/csd.h
27
__entry->cpu = cpu;
include/trace/events/csd.h
34
__entry->cpu, __entry->callsite, __entry->func, __entry->csd)
include/trace/events/hw_pressure.h
11
TP_PROTO(int cpu, unsigned long hw_pressure),
include/trace/events/hw_pressure.h
12
TP_ARGS(cpu, hw_pressure),
include/trace/events/hw_pressure.h
16
__field(int, cpu)
include/trace/events/hw_pressure.h
21
__entry->cpu = cpu;
include/trace/events/hw_pressure.h
24
TP_printk("cpu=%d hw_pressure=%lu", __entry->cpu, __entry->hw_pressure)
include/trace/events/ipi.h
12
TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
include/trace/events/ipi.h
14
TP_ARGS(cpu, callsite, callback),
include/trace/events/ipi.h
17
__field(unsigned int, cpu)
include/trace/events/ipi.h
23
__entry->cpu = cpu;
include/trace/events/ipi.h
29
__entry->cpu, __entry->callsite, __entry->callback)
include/trace/events/irq_matrix.h
143
TP_PROTO(int bit, unsigned int cpu,
include/trace/events/irq_matrix.h
146
TP_ARGS(bit, cpu, matrix, cmap)
include/trace/events/irq_matrix.h
151
TP_PROTO(int bit, unsigned int cpu,
include/trace/events/irq_matrix.h
154
TP_ARGS(bit, cpu, matrix, cmap)
include/trace/events/irq_matrix.h
159
TP_PROTO(int bit, unsigned int cpu,
include/trace/events/irq_matrix.h
162
TP_ARGS(bit, cpu, matrix, cmap)
include/trace/events/irq_matrix.h
167
TP_PROTO(int bit, unsigned int cpu,
include/trace/events/irq_matrix.h
170
TP_ARGS(bit, cpu, matrix, cmap)
include/trace/events/irq_matrix.h
175
TP_PROTO(int bit, unsigned int cpu,
include/trace/events/irq_matrix.h
178
TP_ARGS(bit, cpu, matrix, cmap)
include/trace/events/irq_matrix.h
183
TP_PROTO(int bit, unsigned int cpu,
include/trace/events/irq_matrix.h
186
TP_ARGS(bit, cpu, matrix, cmap)
include/trace/events/irq_matrix.h
67
TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix,
include/trace/events/irq_matrix.h
70
TP_ARGS(bit, cpu, matrix, cmap),
include/trace/events/irq_matrix.h
74
__field( unsigned int, cpu )
include/trace/events/irq_matrix.h
87
__entry->cpu = cpu;
include/trace/events/irq_matrix.h
99
__entry->bit, __entry->cpu, __entry->online,
include/trace/events/mce.h
38
__field( u32, cpu )
include/trace/events/mce.h
61
__entry->cpu = err->m.extcpu;
include/trace/events/mce.h
73
__entry->cpu,
include/trace/events/power.h
16
DECLARE_EVENT_CLASS(cpu,
include/trace/events/power.h
185
DEFINE_EVENT(cpu, cpu_frequency,
include/trace/events/power.h
207
__entry->cpu_id = policy->cpu;
include/trace/events/power.h
36
DEFINE_EVENT(cpu, cpu_idle,
include/trace/events/rcu.h
286
TP_PROTO(const char *rcuname, int cpu, const char *reason),
include/trace/events/rcu.h
288
TP_ARGS(rcuname, cpu, reason),
include/trace/events/rcu.h
292
__field(int, cpu)
include/trace/events/rcu.h
298
__entry->cpu = cpu;
include/trace/events/rcu.h
302
TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
include/trace/events/rcu.h
413
TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
include/trace/events/rcu.h
415
TP_ARGS(rcuname, gp_seq, cpu, qsevent),
include/trace/events/rcu.h
420
__field(int, cpu)
include/trace/events/rcu.h
427
__entry->cpu = cpu;
include/trace/events/rcu.h
433
__entry->cpu, __entry->qsevent)
include/trace/events/rcu.h
802
TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
include/trace/events/rcu.h
804
TP_ARGS(rcuname, s, cpu, cnt, done),
include/trace/events/rcu.h
809
__field(int, cpu)
include/trace/events/rcu.h
817
__entry->cpu = cpu;
include/trace/events/rcu.h
823
__entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
include/trace/events/sched.h
814
TP_PROTO(int cpu),
include/trace/events/sched.h
816
TP_ARGS(cpu),
include/trace/events/sched.h
819
__field( int, cpu )
include/trace/events/sched.h
823
__entry->cpu = cpu;
include/trace/events/sched.h
826
TP_printk("cpu=%d", __entry->cpu)
include/trace/events/sched.h
896
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
include/trace/events/sched.h
897
TP_ARGS(tsk, cpu, tif));
include/trace/events/timer.h
147
TP_PROTO(bool is_idle, unsigned int cpu),
include/trace/events/timer.h
149
TP_ARGS(is_idle, cpu),
include/trace/events/timer.h
153
__field( unsigned int, cpu )
include/trace/events/timer.h
158
__entry->cpu = cpu;
include/trace/events/timer.h
162
__entry->is_idle, __entry->cpu)
include/trace/events/timer_migration.h
149
__field( unsigned int, cpu )
include/trace/events/timer_migration.h
156
__entry->cpu = tmc->cpuevt.cpu;
include/trace/events/timer_migration.h
159
TP_printk("cpu=%d parent=%p wakeup=%llu", __entry->cpu, __entry->parent, __entry->wakeup)
include/trace/events/timer_migration.h
207
__field( unsigned int, cpu)
include/trace/events/timer_migration.h
214
__entry->cpu = tmc->cpuevt.cpu;
include/trace/events/timer_migration.h
218
__entry->cpu, __entry->parent, __entry->nextevt, __entry->wakeup)
include/trace/events/timer_migration.h
262
__entry->child_evtcpu = child ? child->groupevt.cpu : 0;
include/trace/events/timer_migration.h
71
__field( unsigned int, cpu )
include/trace/events/timer_migration.h
80
__entry->cpu = tmc->cpuevt.cpu;
include/trace/events/timer_migration.h
88
__entry->cpu, __entry->groupmask, __entry->parent,
include/trace/events/workqueue.h
35
__field( int, cpu )
include/trace/events/workqueue.h
43
__entry->cpu = pwq->pool->cpu;
include/trace/events/workqueue.h
48
__entry->req_cpu, __entry->cpu)
include/trace/events/xdp.h
182
__field(int, cpu)
include/trace/events/xdp.h
194
__entry->cpu = smp_processor_id();
include/trace/events/xdp.h
208
__entry->cpu, __entry->map_id,
include/trace/events/xdp.h
225
__field(int, cpu)
include/trace/events/xdp.h
234
__entry->cpu = smp_processor_id();
include/trace/events/xdp.h
244
__entry->cpu, __entry->map_id,
include/uapi/linux/blktrace_api.h
130
__u32 cpu; /* on what cpu did it happen */
include/uapi/linux/blktrace_api.h
145
__u32 cpu; /* on what cpu did it happen */
include/uapi/linux/bpf.h
1701
__u32 cpu;
include/uapi/linux/cn_proc.h
90
__u32 cpu;
include/uapi/linux/kvm.h
252
__u32 cpu;
include/uapi/linux/mshv.h
364
__u32 cpu;
include/uapi/linux/netfilter/xt_cpu.h
8
__u32 cpu;
include/xen/events.h
103
int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
include/xen/events.h
29
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
include/xen/events.h
30
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
include/xen/events.h
35
unsigned int cpu,
include/xen/events.h
76
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
include/xen/hvm.h
63
int xen_set_upcall_vector(unsigned int cpu);
include/xen/interface/platform.h
281
uint32_t cpu; /* Physical cpu. */
include/xen/interface/xen-mca.h
354
__u8 cpu; /* cpu number; obsolete; use extcpu now */
include/xen/xen-ops.h
17
static inline uint32_t xen_vcpu_nr(int cpu)
include/xen/xen-ops.h
19
return per_cpu(xen_vcpu_id, cpu);
include/xen/xen-ops.h
36
void xen_setup_runstate_info(int cpu);
include/xen/xen-ops.h
39
u64 xen_steal_clock(int cpu);
init/main.c
903
int cpu;
init/main.c
906
for_each_possible_cpu(cpu)
init/main.c
907
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
io_uring/io-wq.c
1403
unsigned int cpu;
io_uring/io-wq.c
1412
cpumask_set_cpu(od->cpu, worker->wq->cpu_mask);
io_uring/io-wq.c
1414
cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask);
io_uring/io-wq.c
1418
static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
io_uring/io-wq.c
1421
.cpu = cpu,
io_uring/io-wq.c
1431
static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
io_uring/io-wq.c
1435
return __io_wq_cpu_online(wq, cpu, true);
io_uring/io-wq.c
1438
static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
io_uring/io-wq.c
1442
return __io_wq_cpu_online(wq, cpu, false);
io_uring/sqpoll.c
495
int cpu = p->sq_thread_cpu;
io_uring/sqpoll.c
498
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
io_uring/sqpoll.c
505
if (!cpumask_test_cpu(cpu, allowed_mask)) {
io_uring/sqpoll.c
510
sqd->sq_cpu = cpu;
kernel/bpf/arraymap.c
296
static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
kernel/bpf/arraymap.c
301
if (cpu >= nr_cpu_ids)
kernel/bpf/arraymap.c
307
return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
kernel/bpf/arraymap.c
315
int cpu, off = 0;
kernel/bpf/arraymap.c
329
cpu = map_flags >> 32;
kernel/bpf/arraymap.c
330
copy_map_value(map, value, per_cpu_ptr(pptr, cpu));
kernel/bpf/arraymap.c
334
for_each_possible_cpu(cpu) {
kernel/bpf/arraymap.c
335
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
kernel/bpf/arraymap.c
410
int cpu;
kernel/bpf/arraymap.c
434
cpu = map_flags >> 32;
kernel/bpf/arraymap.c
435
ptr = per_cpu_ptr(pptr, cpu);
kernel/bpf/arraymap.c
440
for_each_possible_cpu(cpu) {
kernel/bpf/arraymap.c
441
ptr = per_cpu_ptr(pptr, cpu);
kernel/bpf/arraymap.c
442
val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
kernel/bpf/arraymap.c
485
int cpu;
kernel/bpf/arraymap.c
487
for_each_possible_cpu(cpu) {
kernel/bpf/arraymap.c
488
bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
kernel/bpf/arraymap.c
534
int cpu;
kernel/bpf/arraymap.c
540
for_each_possible_cpu(cpu) {
kernel/bpf/arraymap.c
541
seq_printf(m, "\tcpu%d: ", cpu);
kernel/bpf/arraymap.c
543
per_cpu_ptr(pptr, cpu), m);
kernel/bpf/arraymap.c
655
int off = 0, cpu = 0;
kernel/bpf/arraymap.c
674
for_each_possible_cpu(cpu) {
kernel/bpf/arraymap.c
676
per_cpu_ptr(pptr, cpu));
kernel/bpf/bpf_lru_list.c
346
int cpu,
kernel/bpf/bpf_lru_list.c
351
node->cpu = cpu;
kernel/bpf/bpf_lru_list.c
403
int cpu = raw_smp_processor_id();
kernel/bpf/bpf_lru_list.c
405
l = per_cpu_ptr(lru->percpu_lru, cpu);
kernel/bpf/bpf_lru_list.c
436
int cpu = raw_smp_processor_id();
kernel/bpf/bpf_lru_list.c
438
loc_l = per_cpu_ptr(clru->local_list, cpu);
kernel/bpf/bpf_lru_list.c
449
__local_list_add_pending(lru, loc_l, cpu, node, hash);
kernel/bpf/bpf_lru_list.c
484
__local_list_add_pending(lru, loc_l, cpu, node, hash);
kernel/bpf/bpf_lru_list.c
512
loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
kernel/bpf/bpf_lru_list.c
539
l = per_cpu_ptr(lru->percpu_lru, node->cpu);
kernel/bpf/bpf_lru_list.c
582
int cpu;
kernel/bpf/bpf_lru_list.c
589
for_each_possible_cpu(cpu) {
kernel/bpf/bpf_lru_list.c
592
l = per_cpu_ptr(lru->percpu_lru, cpu);
kernel/bpf/bpf_lru_list.c
595
node->cpu = cpu;
kernel/bpf/bpf_lru_list.c
619
static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
kernel/bpf/bpf_lru_list.c
626
loc_l->next_steal = cpu;
kernel/bpf/bpf_lru_list.c
649
int cpu;
kernel/bpf/bpf_lru_list.c
656
for_each_possible_cpu(cpu) {
kernel/bpf/bpf_lru_list.c
659
l = per_cpu_ptr(lru->percpu_lru, cpu);
kernel/bpf/bpf_lru_list.c
670
for_each_possible_cpu(cpu) {
kernel/bpf/bpf_lru_list.c
673
loc_l = per_cpu_ptr(clru->local_list, cpu);
kernel/bpf/bpf_lru_list.c
674
bpf_lru_locallist_init(loc_l, cpu);
kernel/bpf/bpf_lru_list.h
26
u16 cpu;
kernel/bpf/core.c
154
int cpu;
kernel/bpf/core.c
168
for_each_possible_cpu(cpu) {
kernel/bpf/core.c
171
pstats = per_cpu_ptr(prog->stats, cpu);
kernel/bpf/cpumap.c
433
u32 cpu)
kernel/bpf/cpumap.c
441
numa = cpu_to_node(cpu);
kernel/bpf/cpumap.c
469
rcpu->cpu = cpu;
kernel/bpf/cpumap.c
483
"cpumap/%d/map:%d", cpu,
kernel/bpf/cpumap.c
491
kthread_bind(rcpu->kthread, cpu);
kernel/bpf/cpumap.c
61
u32 cpu; /* kthread CPU and map index */
kernel/bpf/cpumap.c
724
const int to_cpu = rcpu->cpu;
kernel/bpf/cpumap.c
815
trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu);
kernel/bpf/cpumask.c
165
__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
kernel/bpf/cpumask.c
167
if (!cpu_valid(cpu))
kernel/bpf/cpumask.c
170
cpumask_set_cpu(cpu, (struct cpumask *)cpumask);
kernel/bpf/cpumask.c
178
__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
kernel/bpf/cpumask.c
180
if (!cpu_valid(cpu))
kernel/bpf/cpumask.c
183
cpumask_clear_cpu(cpu, (struct cpumask *)cpumask);
kernel/bpf/cpumask.c
195
__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
kernel/bpf/cpumask.c
197
if (!cpu_valid(cpu))
kernel/bpf/cpumask.c
200
return cpumask_test_cpu(cpu, (struct cpumask *)cpumask);
kernel/bpf/cpumask.c
212
__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
kernel/bpf/cpumask.c
214
if (!cpu_valid(cpu))
kernel/bpf/cpumask.c
217
return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask);
kernel/bpf/cpumask.c
230
__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
kernel/bpf/cpumask.c
232
if (!cpu_valid(cpu))
kernel/bpf/cpumask.c
235
return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask);
kernel/bpf/cpumask.c
32
static bool cpu_valid(u32 cpu)
kernel/bpf/cpumask.c
34
return cpu < nr_cpu_ids;
kernel/bpf/devmap.c
1130
int i, cpu;
kernel/bpf/devmap.c
1142
for_each_possible_cpu(cpu) {
kernel/bpf/devmap.c
1145
bq = per_cpu_ptr(netdev->xdp_bulkq, cpu);
kernel/bpf/hashtab.c
1026
int cpu;
kernel/bpf/hashtab.c
1029
cpu = map_flags >> 32;
kernel/bpf/hashtab.c
1030
ptr = per_cpu_ptr(pptr, cpu);
kernel/bpf/hashtab.c
1036
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
1037
ptr = per_cpu_ptr(pptr, cpu);
kernel/bpf/hashtab.c
1038
val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
kernel/bpf/hashtab.c
1055
int cpu;
kernel/bpf/hashtab.c
1057
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
1058
if (cpu == current_cpu)
kernel/bpf/hashtab.c
1059
copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
kernel/bpf/hashtab.c
1061
zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1702
int off = 0, cpu;
kernel/bpf/hashtab.c
1705
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
1706
copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1897
int off = 0, cpu;
kernel/bpf/hashtab.c
1902
cpu = elem_map_flags >> 32;
kernel/bpf/hashtab.c
1903
copy_map_value(&htab->map, dst_val, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1906
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
1908
per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
2168
int ret = 0, off = 0, cpu;
kernel/bpf/hashtab.c
2185
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
2187
per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
2427
static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
kernel/bpf/hashtab.c
2431
if (cpu >= nr_cpu_ids)
kernel/bpf/hashtab.c
2436
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
kernel/bpf/hashtab.c
2453
static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
kernel/bpf/hashtab.c
2457
if (cpu >= nr_cpu_ids)
kernel/bpf/hashtab.c
2463
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
kernel/bpf/hashtab.c
2474
int cpu, off = 0;
kernel/bpf/hashtab.c
2492
cpu = map_flags >> 32;
kernel/bpf/hashtab.c
2493
copy_map_value(map, value, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
2497
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
2498
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
253
int cpu;
kernel/bpf/hashtab.c
2530
int cpu;
kernel/bpf/hashtab.c
2543
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
2544
seq_printf(m, "\tcpu%d: ", cpu);
kernel/bpf/hashtab.c
2546
per_cpu_ptr(pptr, cpu), m);
kernel/bpf/hashtab.c
255
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
256
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
387
int cpu;
kernel/bpf/hashtab.c
394
for_each_possible_cpu(cpu) {
kernel/bpf/hashtab.c
400
*per_cpu_ptr(pptr, cpu) = l_new;
kernel/bpf/hashtab.c
482
int cpu;
kernel/bpf/hashtab.c
487
for_each_possible_cpu(cpu)
kernel/bpf/hashtab.c
488
bpf_obj_free_fields(hrec->record, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
845
int cpu;
kernel/bpf/hashtab.c
847
for_each_possible_cpu(cpu)
kernel/bpf/hashtab.c
848
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
kernel/bpf/helpers.c
133
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
kernel/bpf/helpers.c
136
return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
kernel/bpf/helpers.c
716
BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
kernel/bpf/helpers.c
718
if (cpu >= nr_cpu_ids)
kernel/bpf/helpers.c
721
return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
kernel/bpf/local_storage.c
187
int cpu, off = 0;
kernel/bpf/local_storage.c
202
cpu = map_flags >> 32;
kernel/bpf/local_storage.c
203
copy_map_value(_map, value, per_cpu_ptr(storage->percpu_buf, cpu));
kernel/bpf/local_storage.c
207
for_each_possible_cpu(cpu) {
kernel/bpf/local_storage.c
208
copy_map_value_long(_map, value + off, per_cpu_ptr(storage->percpu_buf, cpu));
kernel/bpf/local_storage.c
223
int cpu;
kernel/bpf/local_storage.c
242
cpu = map_flags >> 32;
kernel/bpf/local_storage.c
243
copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), value);
kernel/bpf/local_storage.c
247
for_each_possible_cpu(cpu) {
kernel/bpf/local_storage.c
248
val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
kernel/bpf/local_storage.c
249
copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), val);
kernel/bpf/local_storage.c
423
int cpu;
kernel/bpf/local_storage.c
441
for_each_possible_cpu(cpu) {
kernel/bpf/local_storage.c
442
seq_printf(m, "\tcpu%d: ", cpu);
kernel/bpf/local_storage.c
444
per_cpu_ptr(storage->percpu_buf, cpu),
kernel/bpf/map_iter.c
202
int cpu;
kernel/bpf/map_iter.c
207
for_each_possible_cpu(cpu) {
kernel/bpf/map_iter.c
208
pcount = per_cpu_ptr(map->elem_count, cpu);
kernel/bpf/memalloc.c
1030
int cpu, i;
kernel/bpf/memalloc.c
1036
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
1037
c = per_cpu_ptr(ma->cache, cpu);
kernel/bpf/memalloc.c
1043
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
1044
cc = per_cpu_ptr(ma->caches, cpu);
kernel/bpf/memalloc.c
492
static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
kernel/bpf/memalloc.c
503
alloc_bulk(c, cnt, cpu_to_node(cpu), false);
kernel/bpf/memalloc.c
518
int cpu, i, unit_size, percpu_size = 0;
kernel/bpf/memalloc.c
543
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
544
c = per_cpu_ptr(pc, cpu);
kernel/bpf/memalloc.c
550
prefill_mem_cache(c, cpu);
kernel/bpf/memalloc.c
563
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
564
cc = per_cpu_ptr(pcc, cpu);
kernel/bpf/memalloc.c
573
prefill_mem_cache(c, cpu);
kernel/bpf/memalloc.c
598
int cpu, i, unit_size, percpu_size;
kernel/bpf/memalloc.c
613
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
614
cc = per_cpu_ptr(pcc, cpu);
kernel/bpf/memalloc.c
625
prefill_mem_cache(c, cpu);
kernel/bpf/memalloc.c
666
int cpu, i;
kernel/bpf/memalloc.c
669
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
670
c = per_cpu_ptr(ma->cache, cpu);
kernel/bpf/memalloc.c
675
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
676
cc = per_cpu_ptr(ma->caches, cpu);
kernel/bpf/memalloc.c
753
int cpu, i, rcu_in_progress;
kernel/bpf/memalloc.c
757
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
758
c = per_cpu_ptr(ma->cache, cpu);
kernel/bpf/memalloc.c
770
for_each_possible_cpu(cpu) {
kernel/bpf/memalloc.c
771
cc = per_cpu_ptr(ma->caches, cpu);
kernel/bpf/percpu_freelist.c
104
int cpu;
kernel/bpf/percpu_freelist.c
106
for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
kernel/bpf/percpu_freelist.c
107
head = per_cpu_ptr(s->freelist, cpu);
kernel/bpf/percpu_freelist.c
14
for_each_possible_cpu(cpu) {
kernel/bpf/percpu_freelist.c
15
struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
kernel/bpf/percpu_freelist.c
49
int cpu;
kernel/bpf/percpu_freelist.c
55
for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
kernel/bpf/percpu_freelist.c
56
if (cpu == raw_smp_processor_id())
kernel/bpf/percpu_freelist.c
58
head = per_cpu_ptr(s->freelist, cpu);
kernel/bpf/percpu_freelist.c
8
int cpu;
kernel/bpf/percpu_freelist.c
82
unsigned int cpu, cpu_idx, i, j, n, m;
kernel/bpf/percpu_freelist.c
88
for_each_possible_cpu(cpu) {
kernel/bpf/percpu_freelist.c
89
head = per_cpu_ptr(s->freelist, cpu);
kernel/bpf/rqspinlock.c
125
int cpu;
kernel/bpf/rqspinlock.c
134
for_each_possible_cpu(cpu) {
kernel/bpf/rqspinlock.c
135
struct rqspinlock_held *rqh_cpu = per_cpu_ptr(&rqspinlock_held_locks, cpu);
kernel/bpf/rqspinlock.c
155
if (cpu == smp_processor_id() || real_cnt < 2 || real_cnt > RES_NR_HELD)
kernel/bpf/syscall.c
2463
int cpu;
kernel/bpf/syscall.c
2465
for_each_possible_cpu(cpu) {
kernel/bpf/syscall.c
2470
st = per_cpu_ptr(prog->stats, cpu);
kernel/bpf/task_iter.c
1048
int ret, cpu;
kernel/bpf/task_iter.c
1050
for_each_possible_cpu(cpu) {
kernel/bpf/task_iter.c
1051
work = per_cpu_ptr(&mmap_unlock_work, cpu);
kernel/bpf/verifier.c
23977
(void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
kernel/cgroup/cgroup.c
7131
int cpu;
kernel/cgroup/cgroup.c
7133
for_each_possible_cpu(cpu) {
kernel/cgroup/cgroup.c
7134
init_llist_head(per_cpu_ptr(&cgrp_dead_tasks, cpu));
kernel/cgroup/cgroup.c
7135
per_cpu(cgrp_dead_tasks_iwork, cpu) =
kernel/cgroup/cpuset.c
3046
int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
kernel/cgroup/cpuset.c
3048
if (unlikely(cpu >= nr_cpu_ids)) {
kernel/cgroup/cpuset.c
3054
ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
kernel/cgroup/cpuset.c
3084
int cpu = cpumask_any(cs->effective_cpus);
kernel/cgroup/cpuset.c
3086
dl_bw_free(cpu, cs->sum_migrate_dl_bw);
kernel/cgroup/cpuset.c
933
int cpu;
kernel/cgroup/cpuset.c
942
for_each_possible_cpu(cpu) {
kernel/cgroup/cpuset.c
943
if (dl_bw_visited(cpu, cookie))
kernel/cgroup/cpuset.c
946
dl_clear_root_domain_cpu(cpu);
kernel/cgroup/rstat.c
124
lhead = ss_lhead_cpu(css->ss, cpu);
kernel/cgroup/rstat.c
128
static void __css_process_update_tree(struct cgroup_subsys_state *css, int cpu)
kernel/cgroup/rstat.c
132
struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu);
kernel/cgroup/rstat.c
149
prstatc = css_rstat_cpu(parent, cpu);
kernel/cgroup/rstat.c
15
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
kernel/cgroup/rstat.c
157
static void css_process_update_tree(struct cgroup_subsys *ss, int cpu)
kernel/cgroup/rstat.c
159
struct llist_head *lhead = ss_lhead_cpu(ss, cpu);
kernel/cgroup/rstat.c
181
__css_process_update_tree(rstatc->owner, cpu);
kernel/cgroup/rstat.c
200
struct cgroup_subsys_state *child, int cpu)
kernel/cgroup/rstat.c
242
crstatc = css_rstat_cpu(child, cpu);
kernel/cgroup/rstat.c
282
struct cgroup_subsys_state *root, int cpu)
kernel/cgroup/rstat.c
284
struct css_rstat_cpu *rstatc = css_rstat_cpu(root, cpu);
kernel/cgroup/rstat.c
287
css_process_update_tree(root->ss, cpu);
kernel/cgroup/rstat.c
29
struct cgroup_subsys_state *css, int cpu)
kernel/cgroup/rstat.c
302
prstatc = css_rstat_cpu(parent, cpu);
kernel/cgroup/rstat.c
307
nrstatc = css_rstat_cpu(*nextp, cpu);
kernel/cgroup/rstat.c
31
return per_cpu_ptr(css->rstat_cpu, cpu);
kernel/cgroup/rstat.c
322
head = css_rstat_push_children(head, child, cpu);
kernel/cgroup/rstat.c
343
struct cgroup *parent, int cpu)
kernel/cgroup/rstat.c
35
struct cgroup *cgrp, int cpu)
kernel/cgroup/rstat.c
37
return per_cpu_ptr(cgrp->rstat_base_cpu, cpu);
kernel/cgroup/rstat.c
402
int cpu;
kernel/cgroup/rstat.c
413
for_each_possible_cpu(cpu) {
kernel/cgroup/rstat.c
417
__css_rstat_lock(css, cpu);
kernel/cgroup/rstat.c
418
pos = css_rstat_updated_list(css, cpu);
kernel/cgroup/rstat.c
421
cgroup_base_stat_flush(pos->cgroup, cpu);
kernel/cgroup/rstat.c
423
cgroup_parent(pos->cgroup), cpu);
kernel/cgroup/rstat.c
425
pos->ss->css_rstat_flush(pos, cpu);
kernel/cgroup/rstat.c
427
__css_rstat_unlock(css, cpu);
kernel/cgroup/rstat.c
436
int cpu;
kernel/cgroup/rstat.c
461
for_each_possible_cpu(cpu) {
kernel/cgroup/rstat.c
462
struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu);
kernel/cgroup/rstat.c
470
rstatbc = cgroup_rstat_base_cpu(cgrp, cpu);
kernel/cgroup/rstat.c
48
static inline struct llist_head *ss_lhead_cpu(struct cgroup_subsys *ss, int cpu)
kernel/cgroup/rstat.c
480
int cpu;
kernel/cgroup/rstat.c
491
for_each_possible_cpu(cpu) {
kernel/cgroup/rstat.c
492
struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu);
kernel/cgroup/rstat.c
51
return per_cpu_ptr(ss->lhead, cpu);
kernel/cgroup/rstat.c
52
return per_cpu_ptr(&rstat_backlog_list, cpu);
kernel/cgroup/rstat.c
520
int cpu;
kernel/cgroup/rstat.c
529
for_each_possible_cpu(cpu)
kernel/cgroup/rstat.c
530
init_llist_head(ss_lhead_cpu(ss, cpu));
kernel/cgroup/rstat.c
563
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
kernel/cgroup/rstat.c
565
struct cgroup_rstat_base_cpu *rstatbc = cgroup_rstat_base_cpu(cgrp, cpu);
kernel/cgroup/rstat.c
595
prstatbc = cgroup_rstat_base_cpu(parent, cpu);
kernel/cgroup/rstat.c
70
__bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
kernel/cgroup/rstat.c
93
rstatc = css_rstat_cpu(css, cpu);
kernel/context_tracking.c
677
void __init ct_cpu_track_user(int cpu)
kernel/context_tracking.c
681
if (!per_cpu(context_tracking.active, cpu)) {
kernel/context_tracking.c
682
per_cpu(context_tracking.active, cpu) = true;
kernel/context_tracking.c
704
int cpu;
kernel/context_tracking.c
706
for_each_possible_cpu(cpu)
kernel/context_tracking.c
707
ct_cpu_track_user(cpu);
kernel/cpu.c
1000
unsigned int cpu,
kernel/cpu.c
1004
__cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
kernel/cpu.c
1021
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
kernel/cpu.c
1027
ret = cpuhp_invoke_callback_range(true, cpu, st, target);
kernel/cpu.c
1030
ret, cpu, cpuhp_get_step(st->state)->name,
kernel/cpu.c
1033
cpuhp_reset_state(cpu, st, prev_state);
kernel/cpu.c
1035
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
kernel/cpu.c
1044
static int cpuhp_should_run(unsigned int cpu)
kernel/cpu.c
1065
static void cpuhp_thread_fun(unsigned int cpu)
kernel/cpu.c
1101
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
kernel/cpu.c
1109
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
kernel/cpu.c
1132
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
kernel/cpu.c
1135
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1138
if (!cpu_online(cpu))
kernel/cpu.c
1152
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
1182
static int cpuhp_kick_ap_work(unsigned int cpu)
kernel/cpu.c
1184
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1194
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
kernel/cpu.c
1195
ret = cpuhp_kick_ap(cpu, st, st->target);
kernel/cpu.c
1196
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
kernel/cpu.c
1212
int cpu;
kernel/cpu.c
1214
for_each_possible_cpu(cpu) {
kernel/cpu.c
1215
st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1230
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
kernel/cpu.c
1245
void clear_tasks_mm_cpumask(int cpu)
kernel/cpu.c
1256
WARN_ON(cpu_online(cpu));
kernel/cpu.c
1268
arch_clear_mm_cpumask_cpu(cpu, t->mm);
kernel/cpu.c
1279
int err, cpu = smp_processor_id();
kernel/cpu.c
128
int (*single)(unsigned int cpu);
kernel/cpu.c
129
int (*multi)(unsigned int cpu,
kernel/cpu.c
1295
cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
kernel/cpu.c
1298
stop_machine_park(cpu);
kernel/cpu.c
1302
static int takedown_cpu(unsigned int cpu)
kernel/cpu.c
1304
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1316
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
kernel/cpu.c
1324
BUG_ON(cpu_online(cpu));
kernel/cpu.c
133
int (*single)(unsigned int cpu);
kernel/cpu.c
1339
hotplug_cpu__broadcast_tick_pull(cpu);
kernel/cpu.c
134
int (*multi)(unsigned int cpu,
kernel/cpu.c
1341
__cpu_die(cpu);
kernel/cpu.c
1343
cpuhp_bp_sync_dead(cpu);
kernel/cpu.c
1345
lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu));
kernel/cpu.c
1352
rcutree_migrate_callbacks(cpu);
kernel/cpu.c
1380
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
kernel/cpu.c
1386
ret = cpuhp_invoke_callback_range(false, cpu, st, target);
kernel/cpu.c
1389
ret, cpu, cpuhp_get_step(st->state)->name,
kernel/cpu.c
1392
cpuhp_reset_state(cpu, st, prev_state);
kernel/cpu.c
1395
WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
kernel/cpu.c
1403
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
kernel/cpu.c
1406
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1412
if (!cpu_present(cpu))
kernel/cpu.c
1429
prev_state = cpuhp_set_state(cpu, st, target);
kernel/cpu.c
1436
ret = cpuhp_kick_ap_work(cpu);
kernel/cpu.c
1457
ret = cpuhp_down_callbacks(cpu, st, target);
kernel/cpu.c
1460
cpuhp_reset_state(cpu, st, prev_state);
kernel/cpu.c
1463
WARN(1, "DEAD callback error for CPU%d", cpu);
kernel/cpu.c
1473
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
kernel/cpu.c
1483
return _cpu_down(cpu, 0, target);
kernel/cpu.c
1486
static int cpu_down(unsigned int cpu, enum cpuhp_state target)
kernel/cpu.c
1491
err = cpu_down_maps_locked(cpu, target);
kernel/cpu.c
1511
int remove_cpu(unsigned int cpu)
kernel/cpu.c
1516
ret = device_offline(get_cpu_device(cpu));
kernel/cpu.c
1525
unsigned int cpu;
kernel/cpu.c
1538
for_each_online_cpu(cpu) {
kernel/cpu.c
1539
if (cpu == primary_cpu)
kernel/cpu.c
1542
error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
kernel/cpu.c
1545
cpu, error);
kernel/cpu.c
1576
void notify_cpu_starting(unsigned int cpu)
kernel/cpu.c
1578
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1581
rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
kernel/cpu.c
1582
cpumask_set_cpu(cpu, &cpus_booted_once_mask);
kernel/cpu.c
1587
cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
kernel/cpu.c
1616
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
kernel/cpu.c
1618
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1624
if (!cpu_present(cpu)) {
kernel/cpu.c
1638
idle = idle_thread_get(cpu);
kernel/cpu.c
1653
cpuhp_set_state(cpu, st, target);
kernel/cpu.c
1659
ret = cpuhp_kick_ap_work(cpu);
kernel/cpu.c
1674
ret = cpuhp_up_callbacks(cpu, st, target);
kernel/cpu.c
1681
static int cpu_up(unsigned int cpu, enum cpuhp_state target)
kernel/cpu.c
1685
if (!cpu_possible(cpu)) {
kernel/cpu.c
1687
cpu);
kernel/cpu.c
169
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
kernel/cpu.c
1691
err = try_online_node(cpu_to_node(cpu));
kernel/cpu.c
1701
if (!cpu_bootable(cpu)) {
kernel/cpu.c
1706
err = _cpu_up(cpu, 0, target);
kernel/cpu.c
1727
int add_cpu(unsigned int cpu)
kernel/cpu.c
173
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1732
ret = device_online(get_cpu_device(cpu));
kernel/cpu.c
175
int (*cbm)(unsigned int cpu, struct hlist_node *node);
kernel/cpu.c
176
int (*cb)(unsigned int cpu);
kernel/cpu.c
1767
unsigned int cpu;
kernel/cpu.c
1769
for_each_cpu(cpu, mask) {
kernel/cpu.c
1770
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
1772
if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
kernel/cpu.c
1778
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
kernel/cpu.c
1888
int cpu, error = 0;
kernel/cpu.c
1907
for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) {
kernel/cpu.c
1908
if (!cpu_online(cpu) || cpu == primary)
kernel/cpu.c
1917
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
kernel/cpu.c
1918
error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
kernel/cpu.c
1919
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
kernel/cpu.c
1921
cpumask_set_cpu(cpu, frozen_cpus);
kernel/cpu.c
1923
pr_err("Error taking CPU%d down: %d\n", cpu, error);
kernel/cpu.c
193
trace_cpuhp_enter(cpu, st->target, state, cb);
kernel/cpu.c
194
ret = cb(cpu);
kernel/cpu.c
195
trace_cpuhp_exit(cpu, st->state, state, ret);
kernel/cpu.c
1954
int cpu, error;
kernel/cpu.c
1966
for_each_cpu(cpu, frozen_cpus) {
kernel/cpu.c
1967
trace_suspend_resume(TPS("CPU_ON"), cpu, true);
kernel/cpu.c
1968
error = _cpu_up(cpu, 1, CPUHP_ONLINE);
kernel/cpu.c
1969
trace_suspend_resume(TPS("CPU_ON"), cpu, false);
kernel/cpu.c
1971
pr_info("CPU%d is up\n", cpu);
kernel/cpu.c
1974
pr_warn("Error taking CPU%d up: %d\n", cpu, error);
kernel/cpu.c
203
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
kernel/cpu.c
204
ret = cbm(cpu, node);
kernel/cpu.c
205
trace_cpuhp_exit(cpu, st->state, state, ret);
kernel/cpu.c
215
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
kernel/cpu.c
216
ret = cbm(cpu, node);
kernel/cpu.c
217
trace_cpuhp_exit(cpu, st->state, state, ret);
kernel/cpu.c
2289
int (*startup)(unsigned int cpu),
kernel/cpu.c
2290
int (*teardown)(unsigned int cpu),
kernel/cpu.c
2334
static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
kernel/cpu.c
2352
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
kernel/cpu.c
2354
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
2358
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
2362
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
kernel/cpu.c
2377
int cpu;
kernel/cpu.c
2380
for_each_present_cpu(cpu) {
kernel/cpu.c
2381
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
2384
if (cpu >= failedcpu)
kernel/cpu.c
2389
cpuhp_issue_call(cpu, state, false, node);
kernel/cpu.c
2398
int cpu;
kernel/cpu.c
240
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
kernel/cpu.c
241
ret = cbm(cpu, node);
kernel/cpu.c
2416
for_each_present_cpu(cpu) {
kernel/cpu.c
2417
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
242
trace_cpuhp_exit(cpu, st->state, state, ret);
kernel/cpu.c
2423
ret = cpuhp_issue_call(cpu, state, true, node);
kernel/cpu.c
2426
cpuhp_rollback_install(cpu, state, node);
kernel/cpu.c
2470
int (*startup)(unsigned int cpu),
kernel/cpu.c
2471
int (*teardown)(unsigned int cpu),
kernel/cpu.c
2474
int cpu, ret = 0;
kernel/cpu.c
2500
for_each_present_cpu(cpu) {
kernel/cpu.c
2501
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
2507
ret = cpuhp_issue_call(cpu, state, true, NULL);
kernel/cpu.c
2510
cpuhp_rollback_install(cpu, state, NULL);
kernel/cpu.c
2529
int (*startup)(unsigned int cpu),
kernel/cpu.c
2530
int (*teardown)(unsigned int cpu),
kernel/cpu.c
2547
int cpu;
kernel/cpu.c
2564
for_each_present_cpu(cpu) {
kernel/cpu.c
2565
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
2569
cpuhp_issue_call(cpu, state, false, node);
kernel/cpu.c
2594
int cpu;
kernel/cpu.c
2616
for_each_present_cpu(cpu) {
kernel/cpu.c
2617
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
2621
cpuhp_issue_call(cpu, state, false, NULL);
kernel/cpu.c
2638
static void cpuhp_offline_cpu_device(unsigned int cpu)
kernel/cpu.c
2640
struct device *dev = get_cpu_device(cpu);
kernel/cpu.c
2647
static void cpuhp_online_cpu_device(unsigned int cpu)
kernel/cpu.c
2649
struct device *dev = get_cpu_device(cpu);
kernel/cpu.c
2658
int cpu, ret = 0;
kernel/cpu.c
2661
for_each_online_cpu(cpu) {
kernel/cpu.c
2662
if (topology_is_primary_thread(cpu))
kernel/cpu.c
2668
if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
kernel/cpu.c
2670
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
kernel/cpu.c
2686
cpuhp_offline_cpu_device(cpu);
kernel/cpu.c
2696
static inline bool topology_is_core_online(unsigned int cpu)
kernel/cpu.c
2704
int cpu, ret = 0;
kernel/cpu.c
2708
for_each_present_cpu(cpu) {
kernel/cpu.c
2710
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
kernel/cpu.c
2712
if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
kernel/cpu.c
2714
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
kernel/cpu.c
2718
cpuhp_online_cpu_device(cpu);
kernel/cpu.c
3025
int cpu, ret;
kernel/cpu.c
3039
for_each_possible_cpu(cpu) {
kernel/cpu.c
3040
struct device *dev = get_cpu_device(cpu);
kernel/cpu.c
308
static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
kernel/cpu.c
311
atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
kernel/cpu.c
3121
void set_cpu_online(unsigned int cpu, bool online)
kernel/cpu.c
3134
if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
kernel/cpu.c
3137
if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
kernel/cpu.c
3146
void set_cpu_possible(unsigned int cpu, bool possible)
kernel/cpu.c
3149
if (!cpumask_test_and_set_cpu(cpu, &__cpu_possible_mask))
kernel/cpu.c
3152
if (cpumask_test_and_clear_cpu(cpu, &__cpu_possible_mask))
kernel/cpu.c
3162
int cpu = smp_processor_id();
kernel/cpu.c
3165
set_cpu_online(cpu, true);
kernel/cpu.c
3166
set_cpu_active(cpu, true);
kernel/cpu.c
3167
set_cpu_present(cpu, true);
kernel/cpu.c
3168
set_cpu_possible(cpu, true);
kernel/cpu.c
3171
__boot_cpu_id = cpu;
kernel/cpu.c
354
void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
kernel/cpu.c
360
static void cpuhp_bp_sync_dead(unsigned int cpu)
kernel/cpu.c
362
atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
kernel/cpu.c
371
if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
kernel/cpu.c
373
arch_cpuhp_cleanup_dead_cpu(cpu);
kernel/cpu.c
378
pr_err("CPU%u failed to report dead state\n", cpu);
kernel/cpu.c
381
static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
kernel/cpu.c
402
static bool cpuhp_can_boot_ap(unsigned int cpu)
kernel/cpu.c
404
atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
kernel/cpu.c
430
void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
kernel/cpu.c
436
static int cpuhp_bp_sync_alive(unsigned int cpu)
kernel/cpu.c
443
if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
kernel/cpu.c
444
pr_err("CPU%u failed to report alive state\n", cpu);
kernel/cpu.c
449
arch_cpuhp_cleanup_kick_cpu(cpu);
kernel/cpu.c
453
static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
kernel/cpu.c
454
static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
kernel/cpu.c
668
static inline bool cpu_smt_thread_allowed(unsigned int cpu)
kernel/cpu.c
671
return topology_smt_thread_allowed(cpu);
kernel/cpu.c
677
static inline bool cpu_bootable(unsigned int cpu)
kernel/cpu.c
679
if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
kernel/cpu.c
690
if (topology_is_primary_thread(cpu))
kernel/cpu.c
699
return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
kernel/cpu.c
711
static inline bool cpu_bootable(unsigned int cpu) { return true; }
kernel/cpu.c
715
cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
kernel/cpu.c
726
if (cpu_dying(cpu) != !bringup)
kernel/cpu.c
727
set_cpu_dying(cpu, !bringup);
kernel/cpu.c
733
cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
kernel/cpu.c
761
if (cpu_dying(cpu) != !bringup)
kernel/cpu.c
762
set_cpu_dying(cpu, !bringup);
kernel/cpu.c
782
static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
kernel/cpu.c
788
prev_state = cpuhp_set_state(cpu, st, target);
kernel/cpu.c
791
cpuhp_reset_state(cpu, st, prev_state);
kernel/cpu.c
798
static int bringup_wait_for_ap_online(unsigned int cpu)
kernel/cpu.c
800
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
804
if (WARN_ON_ONCE((!cpu_online(cpu))))
kernel/cpu.c
817
if (!cpu_bootable(cpu))
kernel/cpu.c
823
static int cpuhp_kick_ap_alive(unsigned int cpu)
kernel/cpu.c
825
if (!cpuhp_can_boot_ap(cpu))
kernel/cpu.c
828
return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
kernel/cpu.c
831
static int cpuhp_bringup_ap(unsigned int cpu)
kernel/cpu.c
833
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
843
ret = cpuhp_bp_sync_alive(cpu);
kernel/cpu.c
847
ret = bringup_wait_for_ap_online(cpu);
kernel/cpu.c
856
return cpuhp_kick_ap(cpu, st, st->target);
kernel/cpu.c
863
static int bringup_cpu(unsigned int cpu)
kernel/cpu.c
865
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
kernel/cpu.c
866
struct task_struct *idle = idle_thread_get(cpu);
kernel/cpu.c
869
if (!cpuhp_can_boot_ap(cpu))
kernel/cpu.c
883
ret = __cpu_up(cpu, idle);
kernel/cpu.c
887
ret = cpuhp_bp_sync_alive(cpu);
kernel/cpu.c
891
ret = bringup_wait_for_ap_online(cpu);
kernel/cpu.c
900
return cpuhp_kick_ap(cpu, st, st->target);
kernel/cpu.c
908
static int finish_cpu(unsigned int cpu)
kernel/cpu.c
910
struct task_struct *idle = idle_thread_get(cpu);
kernel/cpu.c
961
unsigned int cpu,
kernel/cpu.c
972
err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
kernel/cpu.c
978
cpu, bringup ? "UP" : "DOWN",
kernel/cpu.c
992
unsigned int cpu,
kernel/cpu.c
996
return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
kernel/crash_core.c
183
unsigned int cpu, i;
kernel/crash_core.c
223
for_each_possible_cpu(cpu) {
kernel/crash_core.c
225
notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
kernel/crash_core.c
448
void crash_save_cpu(struct pt_regs *regs, int cpu)
kernel/crash_core.c
453
if ((cpu < 0) || (cpu >= nr_cpu_ids))
kernel/crash_core.c
463
buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
kernel/crash_core.c
574
static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, void *arg)
kernel/crash_core.c
599
pr_debug("hp_action %u, cpu %u\n", hp_action, cpu);
kernel/crash_core.c
674
static int crash_cpuhp_online(unsigned int cpu)
kernel/crash_core.c
676
crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu, NULL);
kernel/crash_core.c
680
static int crash_cpuhp_offline(unsigned int cpu)
kernel/crash_core.c
682
crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu, NULL);
kernel/debug/debug_core.c
245
int cpu;
kernel/debug/debug_core.c
248
for_each_online_cpu(cpu) {
kernel/debug/debug_core.c
250
if (cpu == this_cpu)
kernel/debug/debug_core.c
253
csd = &per_cpu(kgdb_roundup_csd, cpu);
kernel/debug/debug_core.c
263
if (kgdb_info[cpu].rounding_up)
kernel/debug/debug_core.c
265
kgdb_info[cpu].rounding_up = true;
kernel/debug/debug_core.c
267
ret = smp_call_function_single_async(cpu, csd);
kernel/debug/debug_core.c
269
kgdb_info[cpu].rounding_up = false;
kernel/debug/debug_core.c
456
void kdb_dump_stack_on_cpu(int cpu)
kernel/debug/debug_core.c
458
if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
kernel/debug/debug_core.c
463
if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
kernel/debug/debug_core.c
465
cpu);
kernel/debug/debug_core.c
480
kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
kernel/debug/debug_core.c
481
while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
kernel/debug/debug_core.c
577
int cpu;
kernel/debug/debug_core.c
582
kgdb_info[ks->cpu].enter_kgdb++;
kernel/debug/debug_core.c
583
kgdb_info[ks->cpu].exception_state |= exception_state;
kernel/debug/debug_core.c
601
cpu = ks->cpu;
kernel/debug/debug_core.c
602
kgdb_info[cpu].debuggerinfo = regs;
kernel/debug/debug_core.c
603
kgdb_info[cpu].task = current;
kernel/debug/debug_core.c
604
kgdb_info[cpu].ret_state = 0;
kernel/debug/debug_core.c
605
kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
kernel/debug/debug_core.c
612
atomic_xchg(&kgdb_active, cpu);
kernel/debug/debug_core.c
622
if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
kernel/debug/debug_core.c
623
kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
kernel/debug/debug_core.c
625
} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
kernel/debug/debug_core.c
627
atomic_xchg(&kgdb_active, cpu);
kernel/debug/debug_core.c
630
} else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
kernel/debug/debug_core.c
632
kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
kernel/debug/debug_core.c
633
} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
kernel/debug/debug_core.c
645
kgdb_info[cpu].debuggerinfo = NULL;
kernel/debug/debug_core.c
646
kgdb_info[cpu].task = NULL;
kernel/debug/debug_core.c
647
kgdb_info[cpu].exception_state &=
kernel/debug/debug_core.c
649
kgdb_info[cpu].enter_kgdb--;
kernel/debug/debug_core.c
667
(kgdb_info[cpu].task &&
kernel/debug/debug_core.c
668
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
kernel/debug/debug_core.c
679
kgdb_info[cpu].ret_state = 1;
kernel/debug/debug_core.c
777
kgdb_info[cpu].ret_state = error;
kernel/debug/debug_core.c
810
kgdb_info[cpu].debuggerinfo = NULL;
kernel/debug/debug_core.c
811
kgdb_info[cpu].task = NULL;
kernel/debug/debug_core.c
812
kgdb_info[cpu].exception_state &=
kernel/debug/debug_core.c
814
kgdb_info[cpu].enter_kgdb--;
kernel/debug/debug_core.c
824
return kgdb_info[cpu].ret_state;
kernel/debug/debug_core.c
850
ks->cpu = raw_smp_processor_id();
kernel/debug/debug_core.c
858
if (kgdb_info[ks->cpu].enter_kgdb != 0)
kernel/debug/debug_core.c
878
int kgdb_nmicallback(int cpu, void *regs)
kernel/debug/debug_core.c
884
kgdb_info[cpu].rounding_up = false;
kernel/debug/debug_core.c
887
ks->cpu = cpu;
kernel/debug/debug_core.c
890
if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
kernel/debug/debug_core.c
900
int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
kernel/debug/debug_core.c
907
if (kgdb_info[cpu].enter_kgdb == 0) {
kernel/debug/debug_core.c
912
ks->cpu = cpu;
kernel/debug/debug_core.h
20
int cpu;
kernel/debug/debug_core.h
76
extern void kdb_dump_stack_on_cpu(int cpu);
kernel/debug/gdbstub.c
501
thread = kgdb_info[ks->cpu].task;
kernel/debug/gdbstub.c
502
local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
kernel/debug/gdbstub.c
704
int cpu;
kernel/debug/gdbstub.c
718
for_each_online_cpu(cpu) {
kernel/debug/gdbstub.c
720
int_to_threadref(thref, -cpu - 2);
kernel/debug/gdbstub.c
963
kgdb_usethread = kgdb_info[ks->cpu].task;
kernel/debug/gdbstub.c
964
ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
kernel/debug/kdb/kdb_bt.c
104
kdb_bt_cpu(unsigned long cpu)
kernel/debug/kdb/kdb_bt.c
108
if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
kernel/debug/kdb/kdb_bt.c
109
kdb_printf("WARNING: no process for cpu %ld\n", cpu);
kernel/debug/kdb/kdb_bt.c
114
kdb_tsk = KDB_TSK(cpu);
kernel/debug/kdb/kdb_bt.c
116
kdb_printf("WARNING: no task for cpu %ld\n", cpu);
kernel/debug/kdb/kdb_bt.c
137
unsigned long cpu;
kernel/debug/kdb/kdb_bt.c
143
for_each_online_cpu(cpu) {
kernel/debug/kdb/kdb_bt.c
144
p = curr_task(cpu);
kernel/debug/kdb/kdb_bt.c
178
unsigned long cpu = ~0;
kernel/debug/kdb/kdb_bt.c
182
diag = kdbgetularg((char *)argv[1], &cpu);
kernel/debug/kdb/kdb_bt.c
186
if (cpu != ~0) {
kernel/debug/kdb/kdb_bt.c
187
kdb_bt_cpu(cpu);
kernel/debug/kdb/kdb_bt.c
196
for_each_online_cpu(cpu) {
kernel/debug/kdb/kdb_bt.c
197
kdb_bt_cpu(cpu);
kernel/debug/kdb/kdb_debugger.c
159
kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e");
kernel/debug/kdb/kdb_debugger.c
161
kgdb_info[ks->cpu].ret_state = 1;
kernel/debug/kdb/kdb_debugger.c
171
return kgdb_info[ks->cpu].ret_state;
kernel/debug/kdb/kdb_debugger.c
41
kdb_current_task = kgdb_info[ks->cpu].task;
kernel/debug/kdb/kdb_debugger.c
42
kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
kernel/debug/kdb/kdb_main.c
2166
unsigned long cpu;
kernel/debug/kdb/kdb_main.c
2168
for_each_online_cpu(cpu) {
kernel/debug/kdb/kdb_main.c
2169
p = curr_task(cpu);
kernel/debug/kdb/kdb_main.c
2192
int cpu;
kernel/debug/kdb/kdb_main.c
2199
cpu = kdb_process_cpu(p);
kernel/debug/kdb/kdb_main.c
2208
if (!KDB_TSK(cpu)) {
kernel/debug/kdb/kdb_main.c
2211
if (KDB_TSK(cpu) != p)
kernel/debug/kdb/kdb_main.c
2213
"process table (0x%px)\n", KDB_TSK(cpu));
kernel/debug/kdb/kdb_main.c
2229
unsigned long cpu;
kernel/debug/kdb/kdb_main.c
2238
for_each_online_cpu(cpu) {
kernel/debug/kdb/kdb_main.c
2241
p = curr_task(cpu);
kernel/debug/kdb/kdb_main.c
2428
int cpu, diag, nextarg = 1;
kernel/debug/kdb/kdb_main.c
2462
#define KDB_PCU(cpu) __per_cpu_offset(cpu)
kernel/debug/kdb/kdb_main.c
2465
#define KDB_PCU(cpu) __per_cpu_offset[cpu]
kernel/debug/kdb/kdb_main.c
2467
#define KDB_PCU(cpu) 0
kernel/debug/kdb/kdb_main.c
2470
for_each_online_cpu(cpu) {
kernel/debug/kdb/kdb_main.c
2474
if (whichcpu != ~0UL && whichcpu != cpu)
kernel/debug/kdb/kdb_main.c
2476
addr = symaddr + KDB_PCU(cpu);
kernel/debug/kdb/kdb_main.c
2480
"read, diag=%d\n", cpu, addr, diag);
kernel/debug/kdb/kdb_main.c
2483
kdb_printf("%5d ", cpu);
kernel/debug/kdb/kdb_private.h
212
#define KDB_TSK(cpu) kgdb_info[cpu].task
kernel/debug/kdb/kdb_private.h
213
#define KDB_TSKREGS(cpu) kgdb_info[cpu].debuggerinfo
kernel/debug/kdb/kdb_support.c
517
int cpu;
kernel/debug/kdb/kdb_support.c
528
cpu = kdb_process_cpu(p);
kernel/debug/kdb/kdb_support.c
529
if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
kernel/debug/kdb/kdb_support.c
530
if (cpu != kdb_initial_cpu)
kernel/dma/swiotlb.c
1205
int cpu, i;
kernel/dma/swiotlb.c
1211
cpu = raw_smp_processor_id();
kernel/dma/swiotlb.c
1213
index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size,
kernel/events/callchain.c
103
for_each_possible_cpu(cpu)
kernel/events/callchain.c
104
kfree(entries->cpu_entries[cpu]);
kernel/events/callchain.c
156
int cpu;
kernel/events/callchain.c
169
cpu = smp_processor_id();
kernel/events/callchain.c
171
return (((void *)entries->cpu_entries[cpu]) +
kernel/events/callchain.c
53
int cpu;
kernel/events/callchain.c
57
for_each_possible_cpu(cpu)
kernel/events/callchain.c
58
kfree(entries->cpu_entries[cpu]);
kernel/events/callchain.c
74
int cpu;
kernel/events/callchain.c
91
for_each_possible_cpu(cpu) {
kernel/events/callchain.c
92
entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
kernel/events/callchain.c
93
cpu_to_node(cpu));
kernel/events/callchain.c
94
if (!entries->cpu_entries[cpu])
kernel/events/core.c
1091
int cpu, heap_size, ret = 0;
kernel/events/core.c
1100
for_each_possible_cpu(cpu) {
kernel/events/core.c
1101
cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
kernel/events/core.c
1106
GFP_KERNEL, cpu_to_node(cpu));
kernel/events/core.c
11112
static void swevent_hlist_put_cpu(int cpu)
kernel/events/core.c
11114
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
kernel/events/core.c
11126
int cpu;
kernel/events/core.c
11128
for_each_possible_cpu(cpu)
kernel/events/core.c
11129
swevent_hlist_put_cpu(cpu);
kernel/events/core.c
11132
static int swevent_hlist_get_cpu(int cpu)
kernel/events/core.c
11134
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
kernel/events/core.c
11139
cpumask_test_cpu(cpu, perf_online_mask)) {
kernel/events/core.c
11158
int err, cpu, failed_cpu;
kernel/events/core.c
11161
for_each_possible_cpu(cpu) {
kernel/events/core.c
11162
err = swevent_hlist_get_cpu(cpu);
kernel/events/core.c
11164
failed_cpu = cpu;
kernel/events/core.c
11171
for_each_possible_cpu(cpu) {
kernel/events/core.c
11172
if (cpu == failed_cpu)
kernel/events/core.c
11174
swevent_hlist_put_cpu(cpu);
kernel/events/core.c
11365
unsigned int cpu = smp_processor_id();
kernel/events/core.c
11369
perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
kernel/events/core.c
11375
perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
kernel/events/core.c
11744
int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
kernel/events/core.c
12546
int timer, cpu, ret;
kernel/events/core.c
12564
for_each_online_cpu(cpu) {
kernel/events/core.c
12566
cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
kernel/events/core.c
12569
cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc);
kernel/events/core.c
12578
static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu)
kernel/events/core.c
12582
return topology_sibling_cpumask(cpu);
kernel/events/core.c
12584
return topology_die_cpumask(cpu);
kernel/events/core.c
12586
return topology_cluster_cpumask(cpu);
kernel/events/core.c
12588
return topology_core_cpumask(cpu);
kernel/events/core.c
12741
int cpu;
kernel/events/core.c
12743
for_each_possible_cpu(cpu) {
kernel/events/core.c
12746
cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
kernel/events/core.c
12764
int cpu, max = PERF_TYPE_MAX;
kernel/events/core.c
12800
for_each_possible_cpu(cpu) {
kernel/events/core.c
12804
cpu_to_node(cpu));
kernel/events/core.c
12809
*per_cpu_ptr(pmu->cpu_pmu_context, cpu) = cpc;
kernel/events/core.c
12811
__perf_mux_hrtimer_init(cpc, cpu);
kernel/events/core.c
1297
static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
kernel/events/core.c
13049
if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
kernel/events/core.c
13052
int cpu;
kernel/events/core.c
13054
cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
kernel/events/core.c
13061
cpu = cpumask_any_and(pmu_cpumask, cpumask);
kernel/events/core.c
13062
if (cpu >= nr_cpu_ids)
kernel/events/core.c
13153
struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
kernel/events/core.c
13267
perf_event_alloc(struct perf_event_attr *attr, int cpu,
kernel/events/core.c
13279
if ((unsigned)cpu >= nr_cpu_ids) {
kernel/events/core.c
13280
if (!task || cpu != -1)
kernel/events/core.c
13288
node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
kernel/events/core.c
13324
event->cpu = cpu;
kernel/events/core.c
13658
if (output_event->cpu != event->cpu)
kernel/events/core.c
13664
if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
kernel/events/core.c
13807
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
kernel/events/core.c
13875
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
kernel/events/core.c
13924
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
kernel/events/core.c
13992
struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
kernel/events/core.c
14019
if (group_leader->cpu != event->cpu)
kernel/events/core.c
14138
perf_install_in_context(ctx, sibling, sibling->cpu);
kernel/events/core.c
14149
perf_install_in_context(ctx, group_leader, group_leader->cpu);
kernel/events/core.c
14163
perf_install_in_context(ctx, event, event->cpu);
kernel/events/core.c
14216
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
kernel/events/core.c
14239
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
kernel/events/core.c
14296
perf_install_in_context(ctx, event, event->cpu);
kernel/events/core.c
14317
int cpu, struct pmu *pmu,
kernel/events/core.c
14323
perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
kernel/events/core.c
14338
int cpu, struct perf_event *event)
kernel/events/core.c
14345
event->cpu = cpu;
kernel/events/core.c
14351
perf_install_in_context(ctx, event, cpu);
kernel/events/core.c
14360
int cpu, struct pmu *pmu, struct list_head *events)
kernel/events/core.c
14377
__perf_pmu_install_event(pmu, ctx, cpu, event);
kernel/events/core.c
14386
__perf_pmu_install_event(pmu, ctx, cpu, event);
kernel/events/core.c
147
static int cpu_function_call(int cpu, remote_function_f func, void *info)
kernel/events/core.c
14734
parent_event->cpu,
kernel/events/core.c
15030
int cpu;
kernel/events/core.c
15040
for_each_possible_cpu(cpu) {
kernel/events/core.c
15041
swhash = &per_cpu(swevent_htable, cpu);
kernel/events/core.c
15044
INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
kernel/events/core.c
15045
raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
kernel/events/core.c
15047
INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
kernel/events/core.c
15049
cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
kernel/events/core.c
15053
cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
kernel/events/core.c
15059
static void perf_swevent_init_cpu(unsigned int cpu)
kernel/events/core.c
15061
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
kernel/events/core.c
15067
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
kernel/events/core.c
15088
static void perf_event_clear_cpumask(unsigned int cpu)
kernel/events/core.c
15094
cpumask_clear_cpu(cpu, perf_online_mask);
kernel/events/core.c
15097
const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
kernel/events/core.c
15104
if (!cpumask_test_and_clear_cpu(cpu, pmu_cpumask))
kernel/events/core.c
15106
target[scope] = cpumask_any_but(cpumask, cpu);
kernel/events/core.c
15118
perf_pmu_migrate_context(pmu, cpu, target[pmu->scope]);
kernel/events/core.c
15122
static void perf_event_exit_cpu_context(int cpu)
kernel/events/core.c
15133
perf_event_clear_cpumask(cpu);
kernel/events/core.c
15134
cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
kernel/events/core.c
15139
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
kernel/events/core.c
15146
static void perf_event_exit_cpu_context(int cpu) { }
kernel/events/core.c
15150
static void perf_event_setup_cpumask(unsigned int cpu)
kernel/events/core.c
15165
cpumask_set_cpu(cpu, pmu_cpumask);
kernel/events/core.c
15171
const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
kernel/events/core.c
15180
cpumask_set_cpu(cpu, pmu_cpumask);
kernel/events/core.c
15183
cpumask_set_cpu(cpu, perf_online_mask);
kernel/events/core.c
15186
int perf_event_init_cpu(unsigned int cpu)
kernel/events/core.c
15191
perf_swevent_init_cpu(cpu);
kernel/events/core.c
15194
perf_event_setup_cpumask(cpu);
kernel/events/core.c
15195
cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
kernel/events/core.c
15206
int perf_event_exit_cpu(unsigned int cpu)
kernel/events/core.c
15208
perf_event_exit_cpu_context(cpu);
kernel/events/core.c
15215
int cpu;
kernel/events/core.c
15217
for_each_online_cpu(cpu)
kernel/events/core.c
15218
perf_event_exit_cpu(cpu);
kernel/events/core.c
156
smp_call_function_single(cpu, remote_function, &data, 1);
kernel/events/core.c
1776
if (left_cpu < right->cpu)
kernel/events/core.c
1778
if (left_cpu > right->cpu)
kernel/events/core.c
1830
return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e),
kernel/events/core.c
1835
int cpu;
kernel/events/core.c
1846
return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b);
kernel/events/core.c
1856
return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b),
kernel/events/core.c
1916
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
kernel/events/core.c
1920
.cpu = cpu,
kernel/events/core.c
1937
.cpu = event->cpu,
kernel/events/core.c
1950
#define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \
kernel/events/core.c
1951
for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
kernel/events/core.c
2442
return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
kernel/events/core.c
3122
int cpu)
kernel/events/core.c
3130
if (event->cpu != -1)
kernel/events/core.c
3131
WARN_ON_ONCE(event->cpu != cpu);
kernel/events/core.c
3160
cpu_function_call(cpu, __perf_install_in_context, event);
kernel/events/core.c
347
cpu_function_call(event->cpu, event_function, &efs);
kernel/events/core.c
4001
struct perf_event_groups *groups, int cpu,
kernel/events/core.c
4016
if (pmu->filter && pmu->filter(pmu, cpu))
kernel/events/core.c
4044
__heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, NULL));
kernel/events/core.c
4048
__heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup));
kernel/events/core.c
4557
key.cpu = smp_processor_id();
kernel/events/core.c
4565
key.cpu = -1;
kernel/events/core.c
4572
key.cpu = smp_processor_id();
kernel/events/core.c
4779
static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu);
kernel/events/core.c
4917
event_cpu = __perf_event_read_cpu(event, event->cpu);
kernel/events/core.c
5107
cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
kernel/events/core.c
5180
cpc = *per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
kernel/events/core.c
5314
struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
kernel/events/core.c
5716
(e1->cpu == e2->cpu ||
kernel/events/core.c
5717
e1->cpu == -1 ||
kernel/events/core.c
5718
e2->cpu == -1))
kernel/events/core.c
7322
event->cpu, rb_flags);
kernel/events/core.c
7431
if (event->cpu == -1 && event->attr.inherit)
kernel/events/core.c
7563
int cpu = READ_ONCE(event->oncpu);
kernel/events/core.c
7569
if (cpu < 0)
kernel/events/core.c
7575
if (cpu == smp_processor_id()) {
kernel/events/core.c
7603
irq_work_queue_on(&event->pending_disable_irq, cpu);
kernel/events/core.c
8017
data->cpu_entry.cpu = raw_smp_processor_id();
kernel/events/core.c
9066
int err, cpu;
kernel/events/core.c
9077
cpu = iter->cpu;
kernel/events/core.c
9078
if (cpu == -1)
kernel/events/core.c
9079
cpu = READ_ONCE(iter->oncpu);
kernel/events/core.c
9081
if (cpu == -1)
kernel/events/core.c
9084
err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
kernel/events/core.c
944
t = per_cpu_ptr(event->cgrp->info, event->cpu);
kernel/events/core.c
952
t = per_cpu_ptr(event->cgrp->info, event->cpu);
kernel/events/hw_breakpoint.c
200
int i, cpu, err_cpu;
kernel/events/hw_breakpoint.c
205
for_each_possible_cpu(cpu) {
kernel/events/hw_breakpoint.c
207
struct bp_cpuinfo *info = get_bp_info(cpu, i);
kernel/events/hw_breakpoint.c
225
if (err_cpu == cpu)
kernel/events/hw_breakpoint.c
303
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
kernel/events/hw_breakpoint.c
305
struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
kernel/events/hw_breakpoint.c
323
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
kernel/events/hw_breakpoint.c
343
if (iter->cpu >= 0) {
kernel/events/hw_breakpoint.c
344
if (cpu == -1) {
kernel/events/hw_breakpoint.c
347
} else if (cpu != iter->cpu)
kernel/events/hw_breakpoint.c
361
if (bp->cpu >= 0)
kernel/events/hw_breakpoint.c
362
return cpumask_of(bp->cpu);
kernel/events/hw_breakpoint.c
375
int cpu;
kernel/events/hw_breakpoint.c
377
if (bp->hw.target && bp->cpu < 0) {
kernel/events/hw_breakpoint.c
390
for_each_cpu(cpu, cpumask) {
kernel/events/hw_breakpoint.c
391
struct bp_cpuinfo *info = get_bp_info(cpu, type);
kernel/events/hw_breakpoint.c
396
nr += max_task_bp_pinned(cpu, type);
kernel/events/hw_breakpoint.c
398
nr += task_bp_pinned(cpu, bp, type);
kernel/events/hw_breakpoint.c
412
int cpu, next_tsk_pinned;
kernel/events/hw_breakpoint.c
422
struct bp_cpuinfo *info = get_bp_info(bp->cpu, type);
kernel/events/hw_breakpoint.c
476
if (bp->cpu < 0) { /* Case 1: fast path */
kernel/events/hw_breakpoint.c
482
for_each_possible_cpu(cpu) {
kernel/events/hw_breakpoint.c
483
bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
kernel/events/hw_breakpoint.c
487
bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
kernel/events/hw_breakpoint.c
494
bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
kernel/events/hw_breakpoint.c
497
for_each_possible_cpu(cpu) {
kernel/events/hw_breakpoint.c
498
bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
kernel/events/hw_breakpoint.c
507
for_each_cpu(cpu, cpumask) {
kernel/events/hw_breakpoint.c
508
next_tsk_pinned = task_bp_pinned(cpu, bp, type);
kernel/events/hw_breakpoint.c
511
bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
kernel/events/hw_breakpoint.c
62
static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
kernel/events/hw_breakpoint.c
64
return per_cpu_ptr(bp_cpuinfo + type, cpu);
kernel/events/hw_breakpoint.c
849
int cpu;
kernel/events/hw_breakpoint.c
856
for_each_online_cpu(cpu) {
kernel/events/hw_breakpoint.c
857
bp = perf_event_create_kernel_counter(attr, cpu, NULL,
kernel/events/hw_breakpoint.c
864
per_cpu(*cpu_events, cpu) = bp;
kernel/events/hw_breakpoint.c
882
int cpu;
kernel/events/hw_breakpoint.c
884
for_each_possible_cpu(cpu)
kernel/events/hw_breakpoint.c
885
unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
kernel/events/hw_breakpoint.c
898
int cpu;
kernel/events/hw_breakpoint.c
903
for_each_possible_cpu(cpu) {
kernel/events/hw_breakpoint.c
905
struct bp_cpuinfo *info = get_bp_info(cpu, type);
kernel/events/hw_breakpoint_test.c
108
int cpu;
kernel/events/hw_breakpoint_test.c
112
for_each_online_cpu(cpu) {
kernel/events/hw_breakpoint_test.c
117
return cpu;
kernel/events/hw_breakpoint_test.c
134
int cpu;
kernel/events/hw_breakpoint_test.c
137
for_each_online_cpu(cpu) {
kernel/events/hw_breakpoint_test.c
138
bool do_continue = fill_bp_slots(test, &idx, cpu, NULL, 0);
kernel/events/hw_breakpoint_test.c
140
TEST_EXPECT_NOSPC(register_test_bp(cpu, NULL, idx));
kernel/events/hw_breakpoint_test.c
31
static struct perf_event *register_test_bp(int cpu, struct task_struct *tsk, int idx)
kernel/events/hw_breakpoint_test.c
42
return perf_event_create_kernel_counter(&attr, cpu, tsk, NULL, NULL);
kernel/events/hw_breakpoint_test.c
65
static void fill_one_bp_slot(struct kunit *test, int *id, int cpu, struct task_struct *tsk)
kernel/events/hw_breakpoint_test.c
67
struct perf_event *bp = register_test_bp(cpu, tsk, *id);
kernel/events/hw_breakpoint_test.c
80
static bool fill_bp_slots(struct kunit *test, int *id, int cpu, struct task_struct *tsk, int skip)
kernel/events/hw_breakpoint_test.c
83
fill_one_bp_slot(test, id, cpu, tsk);
kernel/events/internal.h
82
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
kernel/events/ring_buffer.c
681
int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
kernel/events/ring_buffer.c
816
static void *perf_mmap_alloc_page(int cpu)
kernel/events/ring_buffer.c
821
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
kernel/events/ring_buffer.c
836
struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
kernel/events/ring_buffer.c
848
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
kernel/events/ring_buffer.c
853
rb->user_page = perf_mmap_alloc_page(cpu);
kernel/events/ring_buffer.c
858
rb->data_pages[i] = perf_mmap_alloc_page(cpu);
kernel/events/ring_buffer.c
918
struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
kernel/events/ring_buffer.c
928
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
kernel/fork.c
168
int cpu;
kernel/fork.c
171
for_each_possible_cpu(cpu)
kernel/fork.c
172
total += per_cpu(process_counts, cpu);
kernel/fork.c
2563
struct task_struct * __init fork_idle(int cpu)
kernel/fork.c
2574
task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
kernel/fork.c
2577
init_idle(task, cpu);
kernel/fork.c
291
static int free_vm_stack_cache(unsigned int cpu)
kernel/fork.c
293
struct vm_struct **cached_vm_stack_areas = per_cpu_ptr(cached_stacks, cpu);
kernel/futex/core.c
1571
int cpu;
kernel/futex/core.c
1583
for_each_possible_cpu(cpu) {
kernel/futex/core.c
1584
unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu);
kernel/irq/chip.c
398
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
kernel/irq/chip.c
404
cpumask_set_cpu(cpu, desc->percpu_enabled);
kernel/irq/chip.c
407
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
kernel/irq/chip.c
413
cpumask_clear_cpu(cpu, desc->percpu_enabled);
kernel/irq/chip.c
901
unsigned int cpu = smp_processor_id();
kernel/irq/chip.c
915
if (cpumask_test_cpu(cpu, action->affinity))
kernel/irq/chip.c
923
bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
kernel/irq/chip.c
926
irq_percpu_disable(desc, cpu);
kernel/irq/chip.c
929
enabled ? " and unmasked" : "", irq, cpu);
kernel/irq/cpuhotplug.c
192
static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
kernel/irq/cpuhotplug.c
203
return cpumask_test_cpu(cpu, hk_mask);
kernel/irq/cpuhotplug.c
206
static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
kernel/irq/cpuhotplug.c
212
!irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
kernel/irq/cpuhotplug.c
225
if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
kernel/irq/cpuhotplug.c
23
unsigned int cpu = smp_processor_id();
kernel/irq/cpuhotplug.c
233
int irq_affinity_online_cpu(unsigned int cpu)
kernel/irq/cpuhotplug.c
242
irq_restore_affinity_of_irq(desc, cpu);
kernel/irq/cpuhotplug.c
39
if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
kernel/irq/cpuhotplug.c
46
cpumask_pr_args(m), d->irq, cpu);
kernel/irq/cpuhotplug.c
50
return cpumask_test_cpu(cpu, m);
kernel/irq/internals.h
96
extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
kernel/irq/internals.h
97
extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
kernel/irq/ipi-mux.c
154
int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu))
kernel/irq/ipi-mux.c
27
static void (*ipi_mux_send)(unsigned int cpu);
kernel/irq/ipi-mux.c
59
int cpu;
kernel/irq/ipi-mux.c
61
for_each_cpu(cpu, mask) {
kernel/irq/ipi-mux.c
62
icpu = per_cpu_ptr(ipi_mux_pcpu, cpu);
kernel/irq/ipi-mux.c
87
ipi_mux_send(cpu);
kernel/irq/ipi.c
163
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
kernel/irq/ipi.c
168
if (!data || cpu >= nr_cpu_ids)
kernel/irq/ipi.c
172
if (!ipimask || !cpumask_test_cpu(cpu, ipimask))
kernel/irq/ipi.c
182
data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
kernel/irq/ipi.c
189
const struct cpumask *dest, unsigned int cpu)
kernel/irq/ipi.c
199
if (cpu >= nr_cpu_ids)
kernel/irq/ipi.c
210
if (!cpumask_test_cpu(cpu, ipimask))
kernel/irq/ipi.c
227
int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
kernel/irq/ipi.c
238
if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
kernel/irq/ipi.c
242
chip->ipi_send_mask(data, cpumask_of(cpu));
kernel/irq/ipi.c
248
cpu != data->common->ipi_offset) {
kernel/irq/ipi.c
250
unsigned irq = data->irq + cpu - data->common->ipi_offset;
kernel/irq/ipi.c
254
chip->ipi_send_single(data, cpu);
kernel/irq/ipi.c
273
unsigned int cpu;
kernel/irq/ipi.c
292
for_each_cpu(cpu, dest) {
kernel/irq/ipi.c
293
unsigned irq = base + cpu - data->common->ipi_offset;
kernel/irq/ipi.c
296
chip->ipi_send_single(data, cpu);
kernel/irq/ipi.c
299
for_each_cpu(cpu, dest)
kernel/irq/ipi.c
300
chip->ipi_send_single(data, cpu);
kernel/irq/ipi.c
313
int ipi_send_single(unsigned int virq, unsigned int cpu)
kernel/irq/ipi.c
319
if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
kernel/irq/ipi.c
322
return __ipi_send_single(desc, cpu);
kernel/irq/irqdesc.c
1005
int cpu;
kernel/irq/irqdesc.c
1012
for_each_cpu(cpu, cpumask)
kernel/irq/irqdesc.c
1013
sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu));
kernel/irq/irqdesc.c
255
int cpu;
kernel/irq/irqdesc.c
257
for_each_possible_cpu(cpu) {
kernel/irq/irqdesc.c
258
unsigned int c = irq_desc_kstat_cpu(desc, cpu);
kernel/irq/irqdesc.c
625
int cpu;
kernel/irq/irqdesc.c
630
for_each_possible_cpu(cpu)
kernel/irq/irqdesc.c
631
*per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { };
kernel/irq/irqdesc.c
995
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
kernel/irq/irqdesc.c
999
return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
kernel/irq/matrix.c
138
unsigned int cpu, best_cpu, maxavl = 0;
kernel/irq/matrix.c
143
for_each_cpu(cpu, msk) {
kernel/irq/matrix.c
144
cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
149
best_cpu = cpu;
kernel/irq/matrix.c
159
unsigned int cpu, best_cpu, allocated = UINT_MAX;
kernel/irq/matrix.c
164
for_each_cpu(cpu, msk) {
kernel/irq/matrix.c
165
cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
170
best_cpu = cpu;
kernel/irq/matrix.c
218
unsigned int cpu, failed_cpu;
kernel/irq/matrix.c
220
for_each_cpu(cpu, msk) {
kernel/irq/matrix.c
221
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
232
trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
kernel/irq/matrix.c
236
failed_cpu = cpu;
kernel/irq/matrix.c
237
for_each_cpu(cpu, msk) {
kernel/irq/matrix.c
238
if (cpu == failed_cpu)
kernel/irq/matrix.c
240
irq_matrix_remove_managed(m, cpumask_of(cpu));
kernel/irq/matrix.c
259
unsigned int cpu;
kernel/irq/matrix.c
261
for_each_cpu(cpu, msk) {
kernel/irq/matrix.c
262
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
282
trace_irq_matrix_remove_managed(bit, cpu, m, cm);
kernel/irq/matrix.c
295
unsigned int bit, cpu, end;
kernel/irq/matrix.c
301
cpu = matrix_find_best_cpu_managed(m, msk);
kernel/irq/matrix.c
302
if (cpu == UINT_MAX)
kernel/irq/matrix.c
305
cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
316
*mapped_cpu = cpu;
kernel/irq/matrix.c
317
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
kernel/irq/matrix.c
386
unsigned int cpu, bit;
kernel/irq/matrix.c
396
cpu = matrix_find_best_cpu(m, msk);
kernel/irq/matrix.c
397
if (cpu == UINT_MAX)
kernel/irq/matrix.c
400
cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
410
*mapped_cpu = cpu;
kernel/irq/matrix.c
411
trace_irq_matrix_alloc(bit, cpu, m, cm);
kernel/irq/matrix.c
424
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
kernel/irq/matrix.c
427
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
447
trace_irq_matrix_free(bit, cpu, m, cm);
kernel/irq/matrix.c
499
int cpu;
kernel/irq/matrix.c
509
for_each_online_cpu(cpu) {
kernel/irq/matrix.c
51
unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits);
kernel/irq/matrix.c
510
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
kernel/irq/matrix.c
513
cpu, cm->available, cm->managed,
kernel/irq/matrix.c
71
for_each_possible_cpu(cpu) {
kernel/irq/matrix.c
72
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
kernel/irq_work.c
137
bool irq_work_queue_on(struct irq_work *work, int cpu)
kernel/irq_work.c
144
WARN_ON_ONCE(cpu_is_offline(cpu));
kernel/irq_work.c
153
if (cpu != smp_processor_id()) {
kernel/irq_work.c
165
if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
kernel/irq_work.c
168
work = &per_cpu(irq_work_wakeup, cpu);
kernel/irq_work.c
173
__smp_call_single_queue(cpu, &work->node.llist);
kernel/irq_work.c
303
static void run_irq_workd(unsigned int cpu)
kernel/irq_work.c
308
static void irq_workd_setup(unsigned int cpu)
kernel/irq_work.c
49
static int irq_workd_should_run(unsigned int cpu)
kernel/kcov.c
1122
int cpu;
kernel/kcov.c
1124
for_each_possible_cpu(cpu) {
kernel/kcov.c
1126
sizeof(unsigned long), cpu_to_node(cpu));
kernel/kcov.c
1129
per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
kernel/kcsan/core.c
796
int cpu;
kernel/kcsan/core.c
800
for_each_possible_cpu(cpu)
kernel/kcsan/core.c
801
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
kernel/kthread.c
1137
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
kernel/kthread.c
1142
worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
kernel/kthread.c
1144
kthread_bind(worker->task, cpu);
kernel/kthread.c
58
unsigned int cpu;
kernel/kthread.c
580
static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
kernel/kthread.c
582
__kthread_bind_mask(p, cpumask_of(cpu), state);
kernel/kthread.c
601
void kthread_bind(struct task_struct *p, unsigned int cpu)
kernel/kthread.c
604
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
kernel/kthread.c
620
void *data, unsigned int cpu,
kernel/kthread.c
625
p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
kernel/kthread.c
626
cpu);
kernel/kthread.c
629
kthread_bind(p, cpu);
kernel/kthread.c
631
to_kthread(p)->cpu = cpu;
kernel/kthread.c
636
void kthread_set_per_cpu(struct task_struct *k, int cpu)
kernel/kthread.c
644
if (cpu < 0) {
kernel/kthread.c
649
kthread->cpu = cpu;
kernel/kthread.c
681
__kthread_bind(k, kthread->cpu, TASK_PARKED);
kernel/kthread.c
946
static int kthreads_online_cpu(unsigned int cpu)
kernel/livepatch/transition.c
128
for_each_possible_cpu(cpu) {
kernel/livepatch/transition.c
129
task = idle_task(cpu);
kernel/livepatch/transition.c
432
unsigned int cpu;
kernel/livepatch/transition.c
458
for_each_possible_cpu(cpu) {
kernel/livepatch/transition.c
459
task = idle_task(cpu);
kernel/livepatch/transition.c
460
if (cpu_online(cpu)) {
kernel/livepatch/transition.c
464
wake_up_if_idle(cpu);
kernel/livepatch/transition.c
512
unsigned int cpu;
kernel/livepatch/transition.c
536
for_each_possible_cpu(cpu) {
kernel/livepatch/transition.c
537
task = idle_task(cpu);
kernel/livepatch/transition.c
555
unsigned int cpu;
kernel/livepatch/transition.c
587
for_each_possible_cpu(cpu) {
kernel/livepatch/transition.c
588
task = idle_task(cpu);
kernel/livepatch/transition.c
629
unsigned int cpu;
kernel/livepatch/transition.c
647
for_each_possible_cpu(cpu)
kernel/livepatch/transition.c
648
clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
kernel/livepatch/transition.c
710
unsigned int cpu;
kernel/livepatch/transition.c
719
for_each_possible_cpu(cpu)
kernel/livepatch/transition.c
720
klp_update_patch_state(idle_task(cpu));
kernel/livepatch/transition.c
86
unsigned int cpu;
kernel/locking/lock_events.c
101
unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
kernel/locking/lock_events.c
65
int cpu, id, len;
kernel/locking/lock_events.c
76
for_each_possible_cpu(cpu)
kernel/locking/lock_events.c
77
sum += per_cpu(lockevents[id], cpu);
kernel/locking/lock_events.c
91
int cpu;
kernel/locking/lock_events.c
99
for_each_possible_cpu(cpu) {
kernel/locking/lockdep.c
302
int cpu, i;
kernel/locking/lockdep.c
305
for_each_possible_cpu(cpu) {
kernel/locking/lockdep.c
307
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
kernel/locking/lockdep.c
328
int cpu;
kernel/locking/lockdep.c
330
for_each_possible_cpu(cpu) {
kernel/locking/lockdep.c
332
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
kernel/locking/lockdep.c
4603
void lockdep_cleanup_dead_cpu(unsigned int cpu, struct task_struct *idle)
kernel/locking/lockdep.c
4608
if (unlikely(per_cpu(hardirqs_enabled, cpu))) {
kernel/locking/lockdep.c
4609
pr_warn("CPU %u left hardirqs enabled!", cpu);
kernel/locking/lockdep.c
4613
per_cpu(hardirqs_enabled, cpu) = 0;
kernel/locking/lockdep.c
4947
lock->cpu = raw_smp_processor_id();
kernel/locking/lockdep.c
6075
if (lock->cpu != smp_processor_id())
kernel/locking/lockdep.c
6087
int i, cpu;
kernel/locking/lockdep.c
6109
cpu = smp_processor_id();
kernel/locking/lockdep.c
6123
if (lock->cpu != cpu)
kernel/locking/lockdep.c
6126
lock->cpu = cpu;
kernel/locking/lockdep_internals.h
250
int idx, cpu;
kernel/locking/lockdep_internals.h
254
for_each_possible_cpu(cpu)
kernel/locking/lockdep_internals.h
255
ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
kernel/locking/osq_lock.c
102
node->cpu = curr;
kernel/locking/osq_lock.c
18
int cpu; /* encoded CPU # + 1 value */
kernel/locking/osq_lock.c
192
next = osq_wait_next(lock, node, prev->cpu);
kernel/locking/osq_lock.c
34
return node->cpu - 1;
kernel/locking/percpu-rwsem.c
191
int cpu; \
kernel/locking/percpu-rwsem.c
193
for_each_possible_cpu(cpu) \
kernel/locking/percpu-rwsem.c
194
__sum += per_cpu(var, cpu); \
kernel/locking/qspinlock.h
52
static inline __pure u32 encode_tail(int cpu, int idx)
kernel/locking/qspinlock.h
56
tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
kernel/locking/qspinlock.h
65
int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
kernel/locking/qspinlock.h
68
return per_cpu_ptr(&qnodes[idx].mcs, cpu);
kernel/locking/qspinlock_paravirt.h
281
pn->cpu = smp_processor_id();
kernel/locking/qspinlock_paravirt.h
52
int cpu;
kernel/locking/qspinlock_paravirt.h
539
pv_kick(node->cpu);
kernel/locking/qspinlock_stat.h
108
static inline void __pv_kick(int cpu)
kernel/locking/qspinlock_stat.h
112
per_cpu(pv_kick_time, cpu) = start;
kernel/locking/qspinlock_stat.h
113
pv_kick(cpu);
kernel/locking/qspinlock_stat.h
40
int cpu, id, len;
kernel/locking/qspinlock_stat.h
51
for_each_possible_cpu(cpu) {
kernel/locking/qspinlock_stat.h
52
sum += per_cpu(lockevents[id], cpu);
kernel/locking/qspinlock_stat.h
60
kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
kernel/locking/qspinlock_stat.h
64
kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
kernel/module/main.c
494
int cpu;
kernel/module/main.c
496
for_each_possible_cpu(cpu)
kernel/module/main.c
497
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
kernel/module/main.c
503
unsigned int cpu;
kernel/module/main.c
511
for_each_possible_cpu(cpu) {
kernel/module/main.c
512
void *start = per_cpu_ptr(mod->percpu, cpu);
kernel/padata.c
248
static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu,
kernel/padata.c
254
reorder = per_cpu_ptr(pd->reorder_list, cpu);
kernel/padata.c
275
pd->cpu = cpu;
kernel/padata.c
285
int cpu;
kernel/padata.c
288
cpu = pd->cpu;
kernel/padata.c
297
cpu = cpumask_first(pd->cpumask.pcpu);
kernel/padata.c
299
cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu);
kernel/padata.c
313
padata = padata_find_next(pd, cpu, processed);
kernel/padata.c
512
int cpu;
kernel/padata.c
515
for_each_cpu(cpu, pd->cpumask.cbcpu) {
kernel/padata.c
516
squeue = per_cpu_ptr(pd->squeue, cpu);
kernel/padata.c
527
int cpu;
kernel/padata.c
530
for_each_cpu(cpu, pd->cpumask.pcpu) {
kernel/padata.c
531
list = per_cpu_ptr(pd->reorder_list, cpu);
kernel/padata.c
569
pd->cpu = cpumask_first(pd->cpumask.pcpu);
kernel/padata.c
733
static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
kernel/padata.c
737
if (cpumask_test_cpu(cpu, cpu_online_mask)) {
kernel/padata.c
748
static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
kernel/padata.c
752
if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
kernel/padata.c
763
static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
kernel/padata.c
765
return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
kernel/padata.c
766
cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
kernel/padata.c
769
static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
kernel/padata.c
775
if (!pinst_has_cpu(pinst, cpu))
kernel/padata.c
779
ret = __padata_add_cpu(pinst, cpu);
kernel/padata.c
784
static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
kernel/padata.c
790
if (!pinst_has_cpu(pinst, cpu))
kernel/padata.c
794
ret = __padata_remove_cpu(pinst, cpu);
kernel/panic.c
313
int cpu;
kernel/panic.c
318
if (kstrtoint(str, 0, &cpu) || cpu < 0 || cpu >= nr_cpu_ids) {
kernel/panic.c
323
panic_force_cpu = cpu;
kernel/power/em_netlink.c
53
int cpu;
kernel/power/em_netlink.c
64
for_each_cpu(cpu, cpumask) {
kernel/power/em_netlink.c
66
cpu, DEV_ENERGYMODEL_A_PERF_DOMAIN_PAD))
kernel/power/energy_model.c
233
int i, cpu;
kernel/power/energy_model.c
239
cpu = cpumask_first(em_span_cpus(pd));
kernel/power/energy_model.c
247
max_cap = (u64) arch_scale_cpu_capacity(cpu);
kernel/power/energy_model.c
425
int cpu, ret, num_cpus, id;
kernel/power/energy_model.c
469
for_each_cpu(cpu, cpus) {
kernel/power/energy_model.c
470
cpu_dev = get_cpu_device(cpu);
kernel/power/energy_model.c
492
int i, cpu;
kernel/power/energy_model.c
498
cpu = cpumask_first_and(em_span_cpus(pd), cpu_active_mask);
kernel/power/energy_model.c
499
if (cpu >= nr_cpu_ids) {
kernel/power/energy_model.c
504
policy = cpufreq_cpu_get(cpu);
kernel/power/energy_model.c
553
struct em_perf_domain *em_cpu_get(int cpu)
kernel/power/energy_model.c
557
cpu_dev = get_cpu_device(cpu);
kernel/power/energy_model.c
619
int cpu, ret;
kernel/power/energy_model.c
642
for_each_cpu(cpu, cpus) {
kernel/power/energy_model.c
643
if (em_cpu_get(cpu)) {
kernel/power/energy_model.c
644
dev_err(dev, "EM: exists for CPU%d\n", cpu);
kernel/power/energy_model.c
653
cap = arch_scale_cpu_capacity(cpu);
kernel/power/energy_model.c
803
static void em_adjust_new_capacity(unsigned int cpu, struct device *dev,
kernel/power/energy_model.c
806
unsigned long cpu_capacity = arch_scale_cpu_capacity(cpu);
kernel/power/energy_model.c
819
pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n", cpu,
kernel/power/energy_model.c
841
void em_adjust_cpu_capacity(unsigned int cpu)
kernel/power/energy_model.c
843
struct device *dev = get_cpu_device(cpu);
kernel/power/energy_model.c
848
em_adjust_new_capacity(cpu, dev, pd);
kernel/power/energy_model.c
854
int cpu, failed_cpus = 0;
kernel/power/energy_model.c
862
for_each_possible_cpu(cpu) {
kernel/power/energy_model.c
867
if (cpumask_test_cpu(cpu, cpu_done_mask))
kernel/power/energy_model.c
870
policy = cpufreq_cpu_get(cpu);
kernel/power/energy_model.c
877
dev = get_cpu_device(cpu);
kernel/power/energy_model.c
885
em_adjust_new_capacity(cpu, dev, pd);
kernel/printk/internal.h
294
int cpu;
kernel/printk/nbcon.c
246
unsigned int cpu = smp_processor_id();
kernel/printk/nbcon.c
286
new.cpu = cpu;
kernel/printk/nbcon.c
354
unsigned int cpu = smp_processor_id();
kernel/printk/nbcon.c
383
new.cpu = cpu;
kernel/printk/nbcon.c
437
unsigned int cpu = smp_processor_id();
kernel/printk/nbcon.c
461
if (cur->cpu == cpu)
kernel/printk/nbcon.c
553
unsigned int cpu = smp_processor_id();
kernel/printk/nbcon.c
573
new.cpu = cpu;
kernel/printk/nbcon.c
667
if (cur->cpu != expected_cpu)
kernel/printk/nbcon.c
679
unsigned int cpu = smp_processor_id();
kernel/printk/nbcon.c
687
if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
kernel/printk/nbcon.c
732
unsigned int cpu = smp_processor_id();
kernel/printk/nbcon.c
735
if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
kernel/printk/nbcon.c
953
wctxt->cpu = pmsg->cpu;
kernel/printk/printk.c
2256
pmsg->cpu = printk_info_get_cpu(info);
kernel/printk/printk.c
2864
static int console_cpu_notify(unsigned int cpu)
kernel/printk/printk.c
5100
int cpu;
kernel/printk/printk.c
5103
cpu = smp_processor_id();
kernel/printk/printk.c
5125
cpu); /* LMM(__printk_cpu_sync_try_get:A) */
kernel/printk/printk.c
5133
} else if (old == cpu) {
kernel/printk/printk_ringbuffer_kunit_test.c
260
int cpu, reader_cpu;
kernel/printk/printk_ringbuffer_kunit_test.c
293
for_each_cpu(cpu, test_cpus) {
kernel/printk/printk_ringbuffer_kunit_test.c
297
thread_data->num = cpu;
kernel/printk/printk_ringbuffer_kunit_test.c
299
thread = kthread_run_on_cpu(prbtest_writer, thread_data, cpu,
kernel/rcu/rcu.h
349
extern void resched_cpu(int cpu);
kernel/rcu/rcu.h
425
#define for_each_leaf_node_possible_cpu(rnp, cpu) \
kernel/rcu/rcu.h
427
(cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
kernel/rcu/rcu.h
428
(cpu) <= rnp->grphi; \
kernel/rcu/rcu.h
429
(cpu) = cpumask_next((cpu), cpu_possible_mask))
kernel/rcu/rcu.h
434
#define rcu_find_next_bit(rnp, cpu, mask) \
kernel/rcu/rcu.h
435
((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
kernel/rcu/rcu.h
436
#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
kernel/rcu/rcu.h
438
(cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
kernel/rcu/rcu.h
439
(cpu) <= rnp->grphi; \
kernel/rcu/rcu.h
440
(cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
kernel/rcu/rcu.h
528
static inline bool rcu_cpu_online(int cpu) { return true; }
kernel/rcu/rcu.h
538
bool rcu_cpu_online(int cpu);
kernel/rcu/rcu.h
593
int rcu_get_gpwrap_count(int cpu);
kernel/rcu/rcu.h
612
static inline int rcu_get_gpwrap_count(int cpu) { return 0; }
kernel/rcu/rcu.h
634
static inline bool rcu_watching_zero_in_eqs(int cpu, int *vp) { return false; }
kernel/rcu/rcu.h
645
bool rcu_watching_zero_in_eqs(int cpu, int *vp);
kernel/rcu/rcu.h
683
static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
kernel/rcu/rcu.h
685
bool rcu_cpu_beenfullyonline(int cpu);
kernel/rcu/rcutorture.c
1021
int cpu;
kernel/rcu/rcutorture.c
1023
for_each_online_cpu(cpu) {
kernel/rcu/rcutorture.c
1024
torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
kernel/rcu/rcutorture.c
1025
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
kernel/rcu/rcutorture.c
1241
int cpu;
kernel/rcu/rcutorture.c
1255
if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
kernel/rcu/rcutorture.c
1261
if (cpu < 0)
kernel/rcu/rcutorture.c
1264
pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
kernel/rcu/rcutorture.c
2149
int cpu = raw_smp_processor_id();
kernel/rcu/rcutorture.c
2150
rtrsp->rt_cpu = cpu;
kernel/rcu/rcutorture.c
2152
rtrsp[-1].rt_end_cpu = cpu;
kernel/rcu/rcutorture.c
2536
int cpu = raw_smp_processor_id();
kernel/rcu/rcutorture.c
2544
rtorsup->rtorsu_nmigrates + (cpu != rtorsup->rtorsu_cpu));
kernel/rcu/rcutorture.c
2669
int cpu;
kernel/rcu/rcutorture.c
2681
for_each_possible_cpu(cpu)
kernel/rcu/rcutorture.c
2682
maxcpu = cpu;
kernel/rcu/rcutorture.c
2692
cpu = (r >> 1) % (maxcpu + 1);
kernel/rcu/rcutorture.c
2694
rcu_nocb_cpu_offload(cpu);
kernel/rcu/rcutorture.c
2697
rcu_nocb_cpu_deoffload(cpu);
kernel/rcu/rcutorture.c
2721
int cpu;
kernel/rcu/rcutorture.c
2735
for_each_possible_cpu(cpu) {
kernel/rcu/rcutorture.c
2737
pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
kernel/rcu/rcutorture.c
2738
batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
kernel/rcu/rcutorture.c
2741
n_gpwraps += cur_ops->get_gpwrap_count(cpu);
kernel/rcu/rcutorture.c
2949
static int rcutorture_booster_cleanup(unsigned int cpu)
kernel/rcu/rcutorture.c
2953
if (boost_tasks[cpu] == NULL)
kernel/rcu/rcutorture.c
2956
t = boost_tasks[cpu];
kernel/rcu/rcutorture.c
2957
boost_tasks[cpu] = NULL;
kernel/rcu/rcutorture.c
2966
static int rcutorture_booster_init(unsigned int cpu)
kernel/rcu/rcutorture.c
2970
if (boost_tasks[cpu] != NULL)
kernel/rcu/rcutorture.c
2980
t = per_cpu(ksoftirqd, cpu);
kernel/rcu/rcutorture.c
2986
t = per_cpu(ktimerd, cpu);
kernel/rcu/rcutorture.c
2998
boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
kernel/rcu/rcutorture.c
2999
cpu, "rcu_torture_boost_%u");
kernel/rcu/rcutorture.c
3000
if (IS_ERR(boost_tasks[cpu])) {
kernel/rcu/rcutorture.c
3001
retval = PTR_ERR(boost_tasks[cpu]);
kernel/rcu/rcutorture.c
3004
boost_tasks[cpu] = NULL;
kernel/rcu/rcutorture.c
3907
int cpu;
kernel/rcu/rcutorture.c
3913
cpu = smp_processor_id();
kernel/rcu/rcutorture.c
3914
dumpcpu = cpu + 1;
kernel/rcu/rcutorture.c
3917
pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
kernel/rcu/rcutorture.c
3930
int cpu = -1;
kernel/rcu/rcutorture.c
3938
cpu = cpumask_next(cpu, cpu_online_mask);
kernel/rcu/rcutorture.c
3939
if (cpu >= nr_cpu_ids)
kernel/rcu/rcutorture.c
3940
cpu = cpumask_next(-1, cpu_online_mask);
kernel/rcu/rcutorture.c
3941
WARN_ON_ONCE(cpu >= nr_cpu_ids);
kernel/rcu/rcutorture.c
3943
if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false))
kernel/rcu/rcutorture.c
428
int (*get_gpwrap_count)(int cpu);
kernel/rcu/rcutorture.c
4445
int cpu;
kernel/rcu/rcutorture.c
4538
for_each_possible_cpu(cpu) {
kernel/rcu/rcutorture.c
4540
per_cpu(rcu_torture_count, cpu)[i] = 0;
kernel/rcu/rcutorture.c
4541
per_cpu(rcu_torture_batch, cpu)[i] = 0;
kernel/rcu/srcutree.c
107
sdp->cpu = cpu;
kernel/rcu/srcutree.c
132
int cpu;
kernel/rcu/srcutree.c
1698
int cpu;
kernel/rcu/srcutree.c
1719
for_each_possible_cpu(cpu)
kernel/rcu/srcutree.c
1720
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
kernel/rcu/srcutree.c
185
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
186
sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/srcutree.c
187
sdp->mynode = &snp_first[cpu / levelspread[level]];
kernel/rcu/srcutree.c
190
snp->grplo = cpu;
kernel/rcu/srcutree.c
191
snp->grphi = cpu;
kernel/rcu/srcutree.c
193
sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
kernel/rcu/srcutree.c
2034
int cpu;
kernel/rcu/srcutree.c
2051
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
2057
sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/srcutree.c
2073
cpu, c0, c1,
kernel/rcu/srcutree.c
470
int cpu;
kernel/rcu/srcutree.c
474
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
475
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/srcutree.c
494
int cpu;
kernel/rcu/srcutree.c
498
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
499
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/srcutree.c
612
int cpu;
kernel/rcu/srcutree.c
615
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
616
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/srcutree.c
711
int cpu;
kernel/rcu/srcutree.c
725
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
726
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/srcutree.c
780
WARN_ONCE(old_read_flavor != read_flavor, "CPU %d old state %d new state %d\n", sdp->cpu, old_read_flavor, read_flavor);
kernel/rcu/srcutree.c
866
queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
kernel/rcu/srcutree.c
873
queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
kernel/rcu/srcutree.c
898
int cpu;
kernel/rcu/srcutree.c
900
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
kernel/rcu/srcutree.c
901
if (!(mask & (1UL << (cpu - snp->grplo))))
kernel/rcu/srcutree.c
903
srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
kernel/rcu/srcutree.c
91
int cpu;
kernel/rcu/srcutree.c
921
int cpu;
kernel/rcu/srcutree.c
98
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
981
for_each_possible_cpu(cpu) {
kernel/rcu/srcutree.c
982
sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/srcutree.c
99
sdp = per_cpu_ptr(ssp->sda, cpu);
kernel/rcu/tasks.h
1002
if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
kernel/rcu/tasks.h
1025
int cpu;
kernel/rcu/tasks.h
1051
for_each_possible_cpu(cpu) {
kernel/rcu/tasks.h
1053
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu);
kernel/rcu/tasks.h
1090
int cpu;
kernel/rcu/tasks.h
1109
cpu = task_cpu(t);
kernel/rcu/tasks.h
1112
"N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
kernel/rcu/tasks.h
1114
data_race(t->rcu_tasks_idle_cpu), cpu);
kernel/rcu/tasks.h
248
int cpu;
kernel/rcu/tasks.h
266
for_each_possible_cpu(cpu) {
kernel/rcu/tasks.h
267
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
kernel/rcu/tasks.h
270
if (cpu)
kernel/rcu/tasks.h
275
rtpcp->cpu = cpu;
kernel/rcu/tasks.h
285
maxcpu = cpu;
kernel/rcu/tasks.h
421
int cpu;
kernel/rcu/tasks.h
436
for_each_possible_cpu(cpu) {
kernel/rcu/tasks.h
437
if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
kernel/rcu/tasks.h
439
rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
kernel/rcu/tasks.h
457
int cpu;
kernel/rcu/tasks.h
467
for (cpu = 0; cpu < dequeue_limit; cpu++) {
kernel/rcu/tasks.h
468
if (!cpu_possible(cpu))
kernel/rcu/tasks.h
470
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
kernel/rcu/tasks.h
480
if (cpu > 0)
kernel/rcu/tasks.h
52
int cpu;
kernel/rcu/tasks.h
522
for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
kernel/rcu/tasks.h
523
if (!cpu_possible(cpu))
kernel/rcu/tasks.h
525
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
kernel/rcu/tasks.h
550
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
kernel/rcu/tasks.h
551
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
kernel/rcu/tasks.h
556
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
kernel/rcu/tasks.h
557
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
kernel/rcu/tasks.h
631
int cpu;
kernel/rcu/tasks.h
634
for_each_possible_cpu(cpu) {
kernel/rcu/tasks.h
635
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
kernel/rcu/tasks.h
720
int cpu;
kernel/rcu/tasks.h
725
for_each_possible_cpu(cpu) {
kernel/rcu/tasks.h
726
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
kernel/rcu/tasks.h
756
int cpu;
kernel/rcu/tasks.h
771
for_each_possible_cpu(cpu) {
kernel/rcu/tasks.h
773
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
kernel/rcu/tasks.h
776
cpumask_set_cpu(cpu, cm);
kernel/rcu/tasks.h
780
pr_cont(" %d:%ld", cpu, n);
kernel/rcu/tasks.h
976
int cpu;
kernel/rcu/tasks.h
999
cpu = task_cpu(t);
kernel/rcu/tree.c
2152
int cpu;
kernel/rcu/tree.c
2205
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
kernel/rcu/tree.c
2206
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
229
static long rcu_get_n_cbs_cpu(int cpu)
kernel/rcu/tree.c
231
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
2449
WARN_ON_ONCE(rdp->cpu != smp_processor_id());
kernel/rcu/tree.c
2734
int cpu;
kernel/rcu/tree.c
2761
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
kernel/rcu/tree.c
2765
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
2782
for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
kernel/rcu/tree.c
2783
resched_cpu(cpu);
kernel/rcu/tree.c
2881
queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
kernel/rcu/tree.c
2925
static void rcu_cpu_kthread_park(unsigned int cpu)
kernel/rcu/tree.c
2927
per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
kernel/rcu/tree.c
2930
static int rcu_cpu_kthread_should_run(unsigned int cpu)
kernel/rcu/tree.c
2940
static void rcu_cpu_kthread(unsigned int cpu)
kernel/rcu/tree.c
2987
int cpu;
kernel/rcu/tree.c
2989
for_each_possible_cpu(cpu)
kernel/rcu/tree.c
2990
per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
kernel/rcu/tree.c
325
return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
kernel/rcu/tree.c
332
bool rcu_watching_zero_in_eqs(int cpu, int *vp)
kernel/rcu/tree.c
337
snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
kernel/rcu/tree.c
344
return snap == ct_rcu_watching_cpu(cpu);
kernel/rcu/tree.c
3722
static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
kernel/rcu/tree.c
3724
trace_rcu_barrier(rcu_state.name, s, cpu,
kernel/rcu/tree.c
3793
uintptr_t cpu = (uintptr_t)cpu_in;
kernel/rcu/tree.c
3794
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
3797
WARN_ON_ONCE(cpu != rdp->cpu);
kernel/rcu/tree.c
3798
WARN_ON_ONCE(cpu != smp_processor_id());
kernel/rcu/tree.c
3819
uintptr_t cpu;
kernel/rcu/tree.c
3860
for_each_possible_cpu(cpu) {
kernel/rcu/tree.c
3861
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
3869
rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
kernel/rcu/tree.c
3876
rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
kernel/rcu/tree.c
3880
if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
kernel/rcu/tree.c
3885
rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
kernel/rcu/tree.c
3902
for_each_possible_cpu(cpu) {
kernel/rcu/tree.c
3903
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4010
bool rcu_cpu_online(int cpu)
kernel/rcu/tree.c
4012
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4141
rcu_boot_init_percpu_data(int cpu)
kernel/rcu/tree.c
4144
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4147
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
kernel/rcu/tree.c
4150
WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu)));
kernel/rcu/tree.c
4157
rdp->cpu = cpu;
kernel/rcu/tree.c
4164
int cpu;
kernel/rcu/tree.c
4169
for_each_leaf_node_possible_cpu(rnp, cpu)
kernel/rcu/tree.c
4170
cpumask_set_cpu(cpu, affinity);
kernel/rcu/tree.c
4240
int rcutree_prepare_cpu(unsigned int cpu)
kernel/rcu/tree.c
4243
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
kernel/rcu/tree.c
4244
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4281
rcu_spawn_cpu_nocb_kthread(cpu);
kernel/rcu/tree.c
4291
bool rcu_cpu_beenfullyonline(int cpu)
kernel/rcu/tree.c
4293
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4302
int rcutree_online_cpu(unsigned int cpu)
kernel/rcu/tree.c
4308
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4335
void rcutree_report_cpu_starting(unsigned int cpu)
kernel/rcu/tree.c
4343
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4447
void rcutree_migrate_callbacks(int cpu)
kernel/rcu/tree.c
4452
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4494
cpu, rcu_segcblist_n_cbs(&rdp->cblist),
kernel/rcu/tree.c
4504
int rcutree_dead_cpu(unsigned int cpu)
kernel/rcu/tree.c
4517
int rcutree_dying_cpu(unsigned int cpu)
kernel/rcu/tree.c
4520
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4533
int rcutree_offline_cpu(unsigned int cpu)
kernel/rcu/tree.c
4539
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
4876
int cpu = smp_processor_id();
kernel/rcu/tree.c
4896
rcutree_prepare_cpu(cpu);
kernel/rcu/tree.c
4897
rcutree_report_cpu_starting(cpu);
kernel/rcu/tree.c
4898
rcutree_online_cpu(cpu);
kernel/rcu/tree.c
673
if (!tick_nohz_full_cpu(rdp->cpu) ||
kernel/rcu/tree.c
692
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
kernel/rcu/tree.c
726
if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
kernel/rcu/tree.c
727
tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
kernel/rcu/tree.c
767
int cpu;
kernel/rcu/tree.c
770
cpu = task_cpu(t);
kernel/rcu/tree.c
773
smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
kernel/rcu/tree.c
832
rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
kernel/rcu/tree.c
834
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
kernel/rcu/tree.c
84
int rcu_get_gpwrap_count(int cpu)
kernel/rcu/tree.c
842
#define arch_irq_stat_cpu(cpu) 0
kernel/rcu/tree.c
86
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree.c
870
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
kernel/rcu/tree.c
903
__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
kernel/rcu/tree.c
940
if (tick_nohz_full_cpu(rdp->cpu) &&
kernel/rcu/tree.c
966
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
kernel/rcu/tree.c
970
int cpu = rdp->cpu;
kernel/rcu/tree.c
974
kcsp = &kcpustat_cpu(cpu);
kernel/rcu/tree.c
977
rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
kernel/rcu/tree.c
978
rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
kernel/rcu/tree.c
979
rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
kernel/rcu/tree.c
980
rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu);
kernel/rcu/tree.c
981
rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu);
kernel/rcu/tree.c
982
rsrp->nr_csw = nr_context_switches_cpu(cpu);
kernel/rcu/tree.h
146
#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
kernel/rcu/tree.h
296
int cpu;
kernel/rcu/tree.h
494
static void rcu_cpu_kthread_setup(unsigned int cpu);
kernel/rcu/tree.h
512
static void rcu_spawn_cpu_nocb_kthread(int cpu);
kernel/rcu/tree_exp.h
243
int cpu;
kernel/rcu/tree_exp.h
254
for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
kernel/rcu/tree_exp.h
255
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_exp.h
259
tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
kernel/rcu/tree_exp.h
360
int cpu;
kernel/rcu/tree_exp.h
371
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
kernel/rcu/tree_exp.h
372
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_exp.h
376
if (raw_smp_processor_id() == cpu ||
kernel/rcu/tree_exp.h
394
snap = ct_rcu_watching_cpu_acquire(cpu);
kernel/rcu/tree_exp.h
406
for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
kernel/rcu/tree_exp.h
407
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_exp.h
415
if (get_cpu() == cpu) {
kernel/rcu/tree_exp.h
420
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
kernel/rcu/tree_exp.h
560
int cpu;
kernel/rcu/tree_exp.h
574
for_each_leaf_node_possible_cpu(rnp, cpu) {
kernel/rcu/tree_exp.h
577
mask = leaf_node_cpu_bit(rnp, cpu);
kernel/rcu/tree_exp.h
581
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_exp.h
582
pr_cont(" %d-%c%c%c%c", cpu,
kernel/rcu/tree_exp.h
583
"O."[!!cpu_online(cpu)],
kernel/rcu/tree_exp.h
611
for_each_leaf_node_possible_cpu(rnp, cpu) {
kernel/rcu/tree_exp.h
612
mask = leaf_node_cpu_bit(rnp, cpu);
kernel/rcu/tree_exp.h
615
dump_cpu_task(cpu);
kernel/rcu/tree_exp.h
627
int cpu;
kernel/rcu/tree_exp.h
645
for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
kernel/rcu/tree_exp.h
646
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_exp.h
650
if (cpu_online(cpu))
kernel/rcu/tree_exp.h
651
tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
kernel/rcu/tree_nocb.h
102
WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
kernel/rcu/tree_nocb.h
1037
WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id());
kernel/rcu/tree_nocb.h
1039
pr_info("De-offloading %d\n", rdp->cpu);
kernel/rcu/tree_nocb.h
1084
int rcu_nocb_cpu_deoffload(int cpu)
kernel/rcu/tree_nocb.h
1086
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_nocb.h
1092
if (!cpu_online(cpu)) {
kernel/rcu/tree_nocb.h
1095
cpumask_clear_cpu(cpu, rcu_nocb_mask);
kernel/rcu/tree_nocb.h
1097
pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp->cpu);
kernel/rcu/tree_nocb.h
1124
WARN_ON_ONCE(cpu_online(rdp->cpu));
kernel/rcu/tree_nocb.h
1135
pr_info("Offloading %d\n", rdp->cpu);
kernel/rcu/tree_nocb.h
1152
int rcu_nocb_cpu_offload(int cpu)
kernel/rcu/tree_nocb.h
1154
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_nocb.h
1160
if (!cpu_online(cpu)) {
kernel/rcu/tree_nocb.h
1163
cpumask_set_cpu(cpu, rcu_nocb_mask);
kernel/rcu/tree_nocb.h
1165
pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp->cpu);
kernel/rcu/tree_nocb.h
1180
int cpu;
kernel/rcu/tree_nocb.h
1191
for_each_cpu(cpu, rcu_nocb_mask) {
kernel/rcu/tree_nocb.h
1192
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_nocb.h
1205
int cpu;
kernel/rcu/tree_nocb.h
1226
for_each_cpu(cpu, rcu_nocb_mask) {
kernel/rcu/tree_nocb.h
1227
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_nocb.h
1264
int cpu;
kernel/rcu/tree_nocb.h
1318
for_each_cpu(cpu, rcu_nocb_mask) {
kernel/rcu/tree_nocb.h
1319
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_nocb.h
1347
static void rcu_spawn_cpu_nocb_kthread(int cpu)
kernel/rcu/tree_nocb.h
1349
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_nocb.h
1367
"rcuog/%d", rdp_gp->cpu);
kernel/rcu/tree_nocb.h
1380
"rcuo%c/%d", rcu_state.abbr, cpu);
kernel/rcu/tree_nocb.h
1407
cpumask_clear_cpu(cpu, rcu_nocb_mask);
kernel/rcu/tree_nocb.h
1421
int cpu;
kernel/rcu/tree_nocb.h
1442
for_each_possible_cpu(cpu) {
kernel/rcu/tree_nocb.h
1443
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_nocb.h
1444
if (rdp->cpu >= nl) {
kernel/rcu/tree_nocb.h
1447
nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
kernel/rcu/tree_nocb.h
1457
__func__, cpu);
kernel/rcu/tree_nocb.h
1463
pr_cont(" %d", cpu);
kernel/rcu/tree_nocb.h
1466
if (cpumask_test_cpu(cpu, rcu_nocb_mask))
kernel/rcu/tree_nocb.h
1506
rdp->cpu,
kernel/rcu/tree_nocb.h
1555
rdp->cpu, rdp->nocb_gp_rdp->cpu,
kernel/rcu/tree_nocb.h
1556
nocb_next_rdp ? nocb_next_rdp->cpu : -1,
kernel/rcu/tree_nocb.h
1671
static void rcu_spawn_cpu_nocb_kthread(int cpu)
kernel/rcu/tree_nocb.h
211
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
224
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
kernel/rcu/tree_nocb.h
300
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
kernel/rcu/tree_nocb.h
449
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
468
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
499
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
kernel/rcu/tree_nocb.h
514
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
518
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
543
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
561
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
572
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
kernel/rcu/tree_nocb.h
616
static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
kernel/rcu/tree_nocb.h
618
trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
kernel/rcu/tree_nocb.h
621
trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
kernel/rcu/tree_nocb.h
631
int __maybe_unused cpu = my_rdp->cpu;
kernel/rcu/tree_nocb.h
670
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
kernel/rcu/tree_nocb.h
697
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
727
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
kernel/rcu/tree_nocb.h
766
trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
kernel/rcu/tree_nocb.h
773
nocb_gp_sleep(my_rdp, cpu);
kernel/rcu/tree_nocb.h
779
nocb_gp_sleep(my_rdp, cpu);
kernel/rcu/tree_nocb.h
879
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
kernel/rcu/tree_nocb.h
907
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
kernel/rcu/tree_nocb.h
954
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
kernel/rcu/tree_nocb.h
966
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
kernel/rcu/tree_plugin.h
1112
static void rcu_cpu_kthread_setup(unsigned int cpu)
kernel/rcu/tree_plugin.h
1114
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_plugin.h
762
cpu_online(rdp->cpu)) {
kernel/rcu/tree_plugin.h
766
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
kernel/rcu/tree_plugin.h
869
int cpu;
kernel/rcu/tree_plugin.h
893
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
kernel/rcu/tree_plugin.h
894
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_plugin.h
896
cpu, ".o"[rcu_rdp_cpu_online(rdp)],
kernel/rcu/tree_stall.h
1064
int cpu;
kernel/rcu/tree_stall.h
1081
for_each_possible_cpu(cpu) {
kernel/rcu/tree_stall.h
1082
cbs = rcu_get_n_cbs_cpu(cpu);
kernel/rcu/tree_stall.h
1087
pr_cont(" %d: %lu", cpu, cbs);
kernel/rcu/tree_stall.h
1091
max_cpu = cpu;
kernel/rcu/tree_stall.h
398
int cpu;
kernel/rcu/tree_stall.h
404
for_each_leaf_node_possible_cpu(rnp, cpu) {
kernel/rcu/tree_stall.h
410
if (!(data_race(rnp->qsmask) & leaf_node_cpu_bit(rnp, cpu)))
kernel/rcu/tree_stall.h
413
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
kernel/rcu/tree_stall.h
414
if (cpu_is_offline(cpu))
kernel/rcu/tree_stall.h
415
pr_err("Offline CPU %d blocking current GP.\n", cpu);
kernel/rcu/tree_stall.h
417
dump_cpu_task(cpu);
kernel/rcu/tree_stall.h
459
int cpu;
kernel/rcu/tree_stall.h
467
cpu = task_cpu(rcuc);
kernel/rcu/tree_stall.h
468
if (cpu_is_offline(cpu) || idle_cpu(cpu))
kernel/rcu/tree_stall.h
478
static void print_cpu_stat_info(int cpu)
kernel/rcu/tree_stall.h
481
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_stall.h
482
struct kernel_cpustat *kcsp = &kcpustat_cpu(cpu);
kernel/rcu/tree_stall.h
491
rsr.cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
kernel/rcu/tree_stall.h
492
rsr.cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
kernel/rcu/tree_stall.h
493
rsr.cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
kernel/rcu/tree_stall.h
497
kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu) - rsrp->nr_hardirqs,
kernel/rcu/tree_stall.h
498
kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs,
kernel/rcu/tree_stall.h
499
nr_context_switches_cpu(cpu) - rsrp->nr_csw);
kernel/rcu/tree_stall.h
518
static void print_cpu_stall_info(int cpu)
kernel/rcu/tree_stall.h
522
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_stall.h
544
rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu));
kernel/rcu/tree_stall.h
550
cpu,
kernel/rcu/tree_stall.h
551
"O."[!!cpu_online(cpu)],
kernel/rcu/tree_stall.h
558
ct_rcu_watching_cpu(cpu) & 0xffff,
kernel/rcu/tree_stall.h
559
ct_nesting_cpu(cpu), ct_nmi_nesting_cpu(cpu),
kernel/rcu/tree_stall.h
560
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
kernel/rcu/tree_stall.h
565
print_cpu_stat_info(cpu);
kernel/rcu/tree_stall.h
571
int cpu;
kernel/rcu/tree_stall.h
576
cpu = gpk ? task_cpu(gpk) : -1;
kernel/rcu/tree_stall.h
583
gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
kernel/rcu/tree_stall.h
585
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_stall.h
590
if (cpu_is_offline(cpu)) {
kernel/rcu/tree_stall.h
591
pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
kernel/rcu/tree_stall.h
594
dump_cpu_task(cpu);
kernel/rcu/tree_stall.h
607
int cpu;
kernel/rcu/tree_stall.h
619
cpu = task_cpu(gpk);
kernel/rcu/tree_stall.h
627
cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
kernel/rcu/tree_stall.h
633
int cpu;
kernel/rcu/tree_stall.h
660
for_each_leaf_node_possible_cpu(rnp, cpu)
kernel/rcu/tree_stall.h
661
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
kernel/rcu/tree_stall.h
662
print_cpu_stall_info(cpu);
kernel/rcu/tree_stall.h
670
for_each_possible_cpu(cpu)
kernel/rcu/tree_stall.h
671
totqlen += rcu_get_n_cbs_cpu(cpu);
kernel/rcu/tree_stall.h
711
int cpu;
kernel/rcu/tree_stall.h
736
for_each_possible_cpu(cpu)
kernel/rcu/tree_stall.h
737
totqlen += rcu_get_n_cbs_cpu(cpu);
kernel/rcu/tree_stall.h
893
int cpu;
kernel/rcu/tree_stall.h
917
for_each_leaf_node_possible_cpu(rnp, cpu) {
kernel/rcu/tree_stall.h
918
if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
kernel/rcu/tree_stall.h
920
*cpup = cpu;
kernel/rcu/tree_stall.h
937
int cpu;
kernel/rcu/tree_stall.h
978
for_each_leaf_node_possible_cpu(rnp, cpu) {
kernel/rcu/tree_stall.h
979
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/rcu/tree_stall.h
985
cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
kernel/rcu/tree_stall.h
988
for_each_possible_cpu(cpu) {
kernel/rcu/tree_stall.h
989
rdp = per_cpu_ptr(&rcu_data, cpu);
kernel/reboot.c
1137
int cpu = simple_strtoul(str, NULL, 0);
kernel/reboot.c
1139
if (cpu >= num_possible_cpus()) {
kernel/reboot.c
1142
cpu, num_possible_cpus());
kernel/reboot.c
1145
reboot_cpu = cpu;
kernel/reboot.c
1352
static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
kernel/reboot.c
253
int cpu = reboot_cpu;
kernel/reboot.c
258
if (!cpu_online(cpu))
kernel/reboot.c
259
cpu = cpumask_first(cpu_online_mask);
kernel/reboot.c
265
set_cpus_allowed_ptr(current, cpumask_of(cpu));
kernel/relay.c
202
*per_cpu_ptr(chan->buf, buf->cpu) = NULL;
kernel/relay.c
354
unsigned int cpu)
kernel/relay.c
359
tmpname = kasprintf(GFP_KERNEL, "%s%d", chan->base_filename, cpu);
kernel/relay.c
380
static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
kernel/relay.c
393
dentry = relay_create_buf_file(chan, buf, cpu);
kernel/relay.c
406
buf->cpu = cpu;
kernel/relay.c
411
buf->cpu = 0;
kernel/relay.c
437
int relay_prepare_cpu(unsigned int cpu)
kernel/relay.c
444
if (*per_cpu_ptr(chan->buf, cpu))
kernel/relay.c
446
buf = relay_open_buf(chan, cpu);
kernel/relay.c
448
pr_err("relay: cpu %d buffer creation failed\n", cpu);
kernel/relay.c
452
*per_cpu_ptr(chan->buf, cpu) = buf;
kernel/relay.c
624
unsigned int cpu,
kernel/relay.c
629
if (!chan || cpu >= NR_CPUS)
kernel/relay.c
632
buf = *per_cpu_ptr(chan->buf, cpu);
kernel/relay.c
819
relay_subbufs_consumed(buf->chan, buf->cpu, 1);
kernel/relay.c
832
relay_subbufs_consumed(buf->chan, buf->cpu, 1);
kernel/rseq.c
133
unsigned int cpu;
kernel/rseq.c
135
for_each_possible_cpu(cpu) {
kernel/rseq.c
136
stats.exit += data_race(per_cpu(rseq_stats.exit, cpu));
kernel/rseq.c
137
stats.signal += data_race(per_cpu(rseq_stats.signal, cpu));
kernel/rseq.c
138
stats.slowpath += data_race(per_cpu(rseq_stats.slowpath, cpu));
kernel/rseq.c
139
stats.fastpath += data_race(per_cpu(rseq_stats.fastpath, cpu));
kernel/rseq.c
140
stats.ids += data_race(per_cpu(rseq_stats.ids, cpu));
kernel/rseq.c
141
stats.cs += data_race(per_cpu(rseq_stats.cs, cpu));
kernel/rseq.c
142
stats.clear += data_race(per_cpu(rseq_stats.clear, cpu));
kernel/rseq.c
143
stats.fixup += data_race(per_cpu(rseq_stats.fixup, cpu));
kernel/rseq.c
145
stats.s_granted += data_race(per_cpu(rseq_stats.s_granted, cpu));
kernel/rseq.c
146
stats.s_expired += data_race(per_cpu(rseq_stats.s_expired, cpu));
kernel/rseq.c
147
stats.s_revoked += data_race(per_cpu(rseq_stats.s_revoked, cpu));
kernel/rseq.c
148
stats.s_yielded += data_race(per_cpu(rseq_stats.s_yielded, cpu));
kernel/rseq.c
149
stats.s_aborted += data_race(per_cpu(rseq_stats.s_aborted, cpu));
kernel/rseq.c
828
unsigned int cpu;
kernel/rseq.c
830
for_each_possible_cpu(cpu) {
kernel/rseq.c
831
hrtimer_setup(per_cpu_ptr(&slice_timer.timer, cpu), rseq_slice_expired,
kernel/scftorture.c
151
extern void resched_cpu(int cpu); // An alternative IPI vector.
kernel/scftorture.c
156
unsigned int cpu;
kernel/scftorture.c
160
cpu = raw_smp_processor_id() % nthreads;
kernel/scftorture.c
161
pool = &per_cpu(scf_free_pool, cpu);
kernel/scftorture.c
165
static void scf_cleanup_free_list(unsigned int cpu)
kernel/scftorture.c
171
pool = &per_cpu(scf_free_pool, cpu);
kernel/scftorture.c
183
int cpu;
kernel/scftorture.c
189
for_each_possible_cpu(cpu)
kernel/scftorture.c
190
invoked_count += data_race(per_cpu(scf_invoked_count, cpu));
kernel/scftorture.c
347
uintptr_t cpu;
kernel/scftorture.c
372
cpu = torture_random(trsp) % nr_cpu_ids;
kernel/scftorture.c
374
resched_cpu(cpu);
kernel/scftorture.c
379
cpu = torture_random(trsp) % nr_cpu_ids;
kernel/scftorture.c
385
scfcp->scfc_cpu = cpu;
kernel/scftorture.c
389
ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait);
kernel/scftorture.c
402
cpu = torture_random(trsp) % nr_cpu_ids;
kernel/scftorture.c
404
scfcp->scfc_cpu = cpu;
kernel/scftorture.c
410
ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, 0);
kernel/scftorture.c
478
int cpu;
kernel/scftorture.c
484
VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
kernel/scftorture.c
485
cpu = scfp->cpu % nr_cpu_ids;
kernel/scftorture.c
486
WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu)));
kernel/scftorture.c
491
VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id());
kernel/scftorture.c
495
WARN_ONCE(curcpu != cpu,
kernel/scftorture.c
497
__func__, scfp->cpu, curcpu, nr_cpu_ids);
kernel/scftorture.c
502
VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp->cpu);
kernel/scftorture.c
508
VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp->cpu);
kernel/scftorture.c
511
scf_cleanup_free_list(cpu);
kernel/scftorture.c
514
while (cpu_is_offline(cpu) && !torture_must_stop()) {
kernel/scftorture.c
519
set_cpus_allowed_ptr(current, cpumask_of(cpu));
kernel/scftorture.c
526
VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp->cpu);
kernel/scftorture.c
675
scf_stats_p[i].cpu = i;
kernel/scftorture.c
83
int cpu;
kernel/sched/clock.c
104
notrace static inline struct sched_clock_data *cpu_sdc(int cpu)
kernel/sched/clock.c
106
return &per_cpu(sched_clock_data, cpu);
kernel/sched/clock.c
158
int cpu;
kernel/sched/clock.c
168
for_each_possible_cpu(cpu)
kernel/sched/clock.c
169
per_cpu(sched_clock_data, cpu) = *scd;
kernel/sched/clock.c
394
notrace u64 sched_clock_cpu(int cpu)
kernel/sched/clock.c
406
scd = cpu_sdc(cpu);
kernel/sched/clock.c
408
if (cpu != smp_processor_id())
kernel/sched/clock.c
490
notrace u64 sched_clock_cpu(int cpu)
kernel/sched/core.c
10263
void dump_cpu_task(int cpu)
kernel/sched/core.c
10265
if (in_hardirq() && cpu == smp_processor_id()) {
kernel/sched/core.c
10275
if (trigger_single_cpu_backtrace(cpu))
kernel/sched/core.c
10278
pr_info("Task dump for CPU %d:\n", cpu);
kernel/sched/core.c
10279
sched_show_task(cpu_curr(cpu));
kernel/sched/core.c
10581
unsigned int cpu;
kernel/sched/core.c
10584
for_each_possible_cpu(cpu) {
kernel/sched/core.c
10585
struct mm_cid_pcpu *pcp = per_cpu_ptr(mm->mm_cid.pcpu, cpu);
kernel/sched/core.c
10586
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
1116
int cpu;
kernel/sched/core.c
1130
cpu = cpu_of(rq);
kernel/sched/core.c
1132
trace_sched_set_need_resched_tp(curr, cpu, tif);
kernel/sched/core.c
1133
if (cpu == smp_processor_id()) {
kernel/sched/core.c
1142
smp_send_reschedule(cpu);
kernel/sched/core.c
1144
trace_sched_wake_idle_without_ipi(cpu);
kernel/sched/core.c
1185
void resched_cpu(int cpu)
kernel/sched/core.c
1187
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
1191
if (cpu_online(cpu) || cpu == smp_processor_id())
kernel/sched/core.c
1207
int i, cpu = smp_processor_id(), default_cpu = -1;
kernel/sched/core.c
1211
if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
kernel/sched/core.c
1212
if (!idle_cpu(cpu))
kernel/sched/core.c
1213
return cpu;
kernel/sched/core.c
1214
default_cpu = cpu;
kernel/sched/core.c
1221
for_each_domain(cpu, sd) {
kernel/sched/core.c
1223
if (cpu == i)
kernel/sched/core.c
1247
static void wake_up_idle_cpu(int cpu)
kernel/sched/core.c
1249
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
1251
if (cpu == smp_processor_id())
kernel/sched/core.c
1277
smp_send_reschedule(cpu);
kernel/sched/core.c
1279
trace_sched_wake_idle_without_ipi(cpu);
kernel/sched/core.c
1282
static bool wake_up_full_nohz_cpu(int cpu)
kernel/sched/core.c
1290
if (cpu_is_offline(cpu))
kernel/sched/core.c
1292
if (tick_nohz_full_cpu(cpu)) {
kernel/sched/core.c
1293
if (cpu != smp_processor_id() ||
kernel/sched/core.c
1295
tick_nohz_full_kick_cpu(cpu);
kernel/sched/core.c
1307
void wake_up_nohz_cpu(int cpu)
kernel/sched/core.c
1309
if (!wake_up_full_nohz_cpu(cpu))
kernel/sched/core.c
1310
wake_up_idle_cpu(cpu);
kernel/sched/core.c
1316
int cpu = cpu_of(rq);
kernel/sched/core.c
1322
flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
kernel/sched/core.c
1325
rq->idle_balance = idle_cpu(cpu);
kernel/sched/core.c
2042
int cpu;
kernel/sched/core.c
2044
for_each_possible_cpu(cpu)
kernel/sched/core.c
2045
init_uclamp_rq(cpu_rq(cpu));
kernel/sched/core.c
2356
.new_mask = cpumask_of(rq->cpu),
kernel/sched/core.c
2403
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
kernel/sched/core.c
2406
if (!task_allowed_on_cpu(p, cpu))
kernel/sched/core.c
2411
return cpu_online(cpu);
kernel/sched/core.c
2415
return cpu_active(cpu);
kernel/sched/core.c
2419
return cpu_online(cpu);
kernel/sched/core.c
2422
if (cpu_dying(cpu))
kernel/sched/core.c
2426
return cpu_online(cpu);
kernel/sched/core.c
2898
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
kernel/sched/core.c
3297
static void __migrate_swap_task(struct task_struct *p, int cpu)
kernel/sched/core.c
3304
dst_rq = cpu_rq(cpu);
kernel/sched/core.c
3321
p->wake_cpu = cpu;
kernel/sched/core.c
337
static int sched_task_is_throttled(struct task_struct *p, int cpu)
kernel/sched/core.c
340
return p->sched_class->task_is_throttled(p, cpu);
kernel/sched/core.c
3418
int cpu = task_cpu(p);
kernel/sched/core.c
3420
if ((cpu != smp_processor_id()) && task_curr(p))
kernel/sched/core.c
3421
smp_send_reschedule(cpu);
kernel/sched/core.c
3447
static int select_fallback_rq(int cpu, struct task_struct *p)
kernel/sched/core.c
3449
int nid = cpu_to_node(cpu);
kernel/sched/core.c
348
int cpu = task_cpu(p);
kernel/sched/core.c
3505
task_pid_nr(p), p->comm, cpu);
kernel/sched/core.c
3516
int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
kernel/sched/core.c
3521
cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
kernel/sched/core.c
3524
cpu = cpumask_any(p->cpus_ptr);
kernel/sched/core.c
3537
if (unlikely(!is_cpu_allowed(p, cpu)))
kernel/sched/core.c
3538
cpu = select_fallback_rq(task_cpu(p), p);
kernel/sched/core.c
3540
return cpu;
kernel/sched/core.c
3543
void sched_set_stop_task(int cpu, struct task_struct *stop)
kernel/sched/core.c
3547
struct task_struct *old_stop = cpu_rq(cpu)->stop;
kernel/sched/core.c
3577
cpu_rq(cpu)->stop = stop;
kernel/sched/core.c
3589
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
kernel/sched/core.c
359
} while (sched_task_is_throttled(p, cpu));
kernel/sched/core.c
3598
if (cpu == rq->cpu) {
kernel/sched/core.c
3607
for_each_domain(rq->cpu, sd) {
kernel/sched/core.c
3608
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
kernel/sched/core.c
3777
bool call_function_single_prep_ipi(int cpu)
kernel/sched/core.c
3779
if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
kernel/sched/core.c
378
if (!sched_task_is_throttled(p, rq->cpu))
kernel/sched/core.c
3780
trace_sched_wake_idle_without_ipi(cpu);
kernel/sched/core.c
3793
static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
kernel/sched/core.c
3795
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
3801
__smp_call_single_queue(cpu, &p->wake_entry.llist);
kernel/sched/core.c
3805
void wake_up_if_idle(int cpu)
kernel/sched/core.c
3807
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
3848
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
kernel/sched/core.c
3863
if (!cpu_active(cpu))
kernel/sched/core.c
3867
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
kernel/sched/core.c
3874
if (!cpus_share_cache(smp_processor_id(), cpu))
kernel/sched/core.c
3877
if (cpu == smp_processor_id())
kernel/sched/core.c
3891
if (!cpu_rq(cpu)->nr_running)
kernel/sched/core.c
3897
static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
kernel/sched/core.c
3899
if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
kernel/sched/core.c
3900
sched_clock_cpu(cpu); /* Sync clocks across CPUs */
kernel/sched/core.c
3901
__ttwu_queue_wakelist(p, cpu, wake_flags);
kernel/sched/core.c
3908
static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
kernel/sched/core.c
3910
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
3913
if (ttwu_queue_wakelist(p, cpu, wake_flags))
kernel/sched/core.c
401
static void sched_core_lock(int cpu, unsigned long *flags)
kernel/sched/core.c
405
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
kernel/sched/core.c
4095
int cpu, success = 0;
kernel/sched/core.c
413
static void sched_core_unlock(int cpu, unsigned long *flags)
kernel/sched/core.c
417
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
kernel/sched/core.c
4230
cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
kernel/sched/core.c
4231
if (task_cpu(p) != cpu) {
kernel/sched/core.c
4239
set_task_cpu(p, cpu);
kernel/sched/core.c
4242
ttwu_queue(p, cpu, wake_flags);
kernel/sched/core.c
428
int cpu, t;
kernel/sched/core.c
4346
struct task_struct *cpu_curr_snapshot(int cpu)
kernel/sched/core.c
4348
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
4354
t = rcu_dereference(cpu_curr(cpu));
kernel/sched/core.c
436
for_each_cpu(cpu, &sched_core_mask) {
kernel/sched/core.c
437
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
kernel/sched/core.c
439
sched_core_lock(cpu, &flags);
kernel/sched/core.c
444
cpu_rq(cpu)->core->core_forceidle_start = 0;
kernel/sched/core.c
446
sched_core_unlock(cpu, &flags);
kernel/sched/core.c
454
for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
kernel/sched/core.c
455
cpu_rq(cpu)->core_enabled = enabled;
kernel/sched/core.c
462
int cpu;
kernel/sched/core.c
464
for_each_possible_cpu(cpu)
kernel/sched/core.c
465
WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
kernel/sched/core.c
5339
unsigned long long nr_context_switches_cpu(int cpu)
kernel/sched/core.c
5341
return cpu_rq(cpu)->nr_switches;
kernel/sched/core.c
5362
unsigned int nr_iowait_cpu(int cpu)
kernel/sched/core.c
5364
return atomic_read(&cpu_rq(cpu)->nr_iowait);
kernel/sched/core.c
5548
int cpu = smp_processor_id();
kernel/sched/core.c
5549
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
5556
if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
kernel/sched/core.c
5583
resched_latency_warn(cpu, resched_latency);
kernel/sched/core.c
5591
rq->idle_balance = idle_cpu(cpu);
kernel/sched/core.c
5599
int cpu;
kernel/sched/core.c
5637
int cpu = twork->cpu;
kernel/sched/core.c
5638
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
5648
if (tick_nohz_tick_stopped_cpu(cpu)) {
kernel/sched/core.c
5652
if (cpu_online(cpu)) {
kernel/sched/core.c
5687
static void sched_tick_start(int cpu)
kernel/sched/core.c
5692
if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
kernel/sched/core.c
5697
twork = per_cpu_ptr(tick_work_cpu, cpu);
kernel/sched/core.c
5701
twork->cpu = cpu;
kernel/sched/core.c
5708
static void sched_tick_stop(int cpu)
kernel/sched/core.c
5713
if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
kernel/sched/core.c
5718
twork = per_cpu_ptr(tick_work_cpu, cpu);
kernel/sched/core.c
5734
static inline void sched_tick_start(int cpu) { }
kernel/sched/core.c
5735
static inline void sched_tick_stop(int cpu) { }
kernel/sched/core.c
6018
int i, cpu, occ = 0;
kernel/sched/core.c
6025
cpu = cpu_of(rq);
kernel/sched/core.c
6028
if (cpu_is_offline(cpu)) {
kernel/sched/core.c
6062
smt_mask = cpu_smt_mask(cpu);
kernel/sched/core.c
6123
for_each_cpu_wrap(i, smt_mask, cpu) {
kernel/sched/core.c
6131
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
kernel/sched/core.c
6223
if (i == cpu) {
kernel/sched/core.c
6301
static bool steal_cookie_task(int cpu, struct sched_domain *sd)
kernel/sched/core.c
6305
for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
kernel/sched/core.c
6306
if (i == cpu)
kernel/sched/core.c
6312
if (try_steal_cookie(cpu, i))
kernel/sched/core.c
6323
int cpu = cpu_of(rq);
kernel/sched/core.c
6329
for_each_domain(cpu, sd) {
kernel/sched/core.c
6333
if (steal_cookie_task(cpu, sd))
kernel/sched/core.c
6352
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
kernel/sched/core.c
6360
static void sched_core_cpu_starting(unsigned int cpu)
kernel/sched/core.c
6362
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
kernel/sched/core.c
6363
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
kernel/sched/core.c
6366
guard(core_lock)(&cpu);
kernel/sched/core.c
6376
if (t == cpu)
kernel/sched/core.c
6392
if (t == cpu)
kernel/sched/core.c
6399
static void sched_core_cpu_deactivate(unsigned int cpu)
kernel/sched/core.c
6401
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
kernel/sched/core.c
6402
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
kernel/sched/core.c
6405
guard(core_lock)(&cpu);
kernel/sched/core.c
6419
if (t == cpu)
kernel/sched/core.c
6450
static inline void sched_core_cpu_dying(unsigned int cpu)
kernel/sched/core.c
6452
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
6460
static inline void sched_core_cpu_starting(unsigned int cpu) {}
kernel/sched/core.c
6461
static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
kernel/sched/core.c
6462
static inline void sched_core_cpu_dying(unsigned int cpu) {}
kernel/sched/core.c
6777
int cpu;
kernel/sched/core.c
6782
cpu = smp_processor_id();
kernel/sched/core.c
6783
rq = cpu_rq(cpu);
kernel/sched/core.c
7932
void __init init_idle(struct task_struct *idle, int cpu)
kernel/sched/core.c
7935
.new_mask = cpumask_of(cpu),
kernel/sched/core.c
7938
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
7951
kthread_set_per_cpu(idle, cpu);
kernel/sched/core.c
7969
__set_task_cpu(idle, cpu);
kernel/sched/core.c
7981
init_idle_preempt_count(idle, cpu);
kernel/sched/core.c
7987
ftrace_graph_init_idle_task(idle, cpu);
kernel/sched/core.c
7988
vtime_init_idle(idle, cpu);
kernel/sched/core.c
7989
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
kernel/sched/core.c
8089
int cpu;
kernel/sched/core.c
8098
cpu = select_fallback_rq(rq->cpu, p);
kernel/sched/core.c
8103
rq = __migrate_task(rq, &rf, p, cpu);
kernel/sched/core.c
8136
if (!cpu_dying(rq->cpu) || rq != this_rq())
kernel/sched/core.c
8173
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
kernel/sched/core.c
8184
static void balance_push_set(int cpu, bool on)
kernel/sched/core.c
8186
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8220
static inline void balance_push_set(int cpu, bool on)
kernel/sched/core.c
8235
cpumask_set_cpu(rq->cpu, rq->rd->online);
kernel/sched/core.c
8256
cpumask_clear_cpu(rq->cpu, rq->rd->online);
kernel/sched/core.c
8261
static inline void sched_set_rq_online(struct rq *rq, int cpu)
kernel/sched/core.c
8267
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
kernel/sched/core.c
8273
static inline void sched_set_rq_offline(struct rq *rq, int cpu)
kernel/sched/core.c
8279
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
kernel/sched/core.c
8320
static void cpuset_cpu_inactive(unsigned int cpu)
kernel/sched/core.c
8330
static inline void sched_smt_present_inc(int cpu)
kernel/sched/core.c
8333
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
kernel/sched/core.c
8338
static inline void sched_smt_present_dec(int cpu)
kernel/sched/core.c
8341
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
kernel/sched/core.c
8346
int sched_cpu_activate(unsigned int cpu)
kernel/sched/core.c
8348
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8354
balance_push_set(cpu, false);
kernel/sched/core.c
8359
sched_smt_present_inc(cpu);
kernel/sched/core.c
8360
set_cpu_active(cpu, true);
kernel/sched/core.c
8363
sched_update_numa(cpu, true);
kernel/sched/core.c
8364
sched_domains_numa_masks_set(cpu);
kernel/sched/core.c
8379
sched_set_rq_online(rq, cpu);
kernel/sched/core.c
8384
int sched_cpu_deactivate(unsigned int cpu)
kernel/sched/core.c
8386
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8389
ret = dl_bw_deactivate(cpu);
kernel/sched/core.c
8400
set_cpu_active(cpu, false);
kernel/sched/core.c
8408
balance_push_set(cpu, true);
kernel/sched/core.c
8422
sched_set_rq_offline(rq, cpu);
kernel/sched/core.c
8429
sched_smt_present_dec(cpu);
kernel/sched/core.c
8432
sched_core_cpu_deactivate(cpu);
kernel/sched/core.c
8438
sched_update_numa(cpu, false);
kernel/sched/core.c
8439
cpuset_cpu_inactive(cpu);
kernel/sched/core.c
8440
sched_domains_numa_masks_clear(cpu);
kernel/sched/core.c
8444
static void sched_rq_cpu_starting(unsigned int cpu)
kernel/sched/core.c
8446
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8452
int sched_cpu_starting(unsigned int cpu)
kernel/sched/core.c
8454
sched_core_cpu_starting(cpu);
kernel/sched/core.c
8455
sched_rq_cpu_starting(cpu);
kernel/sched/core.c
8456
sched_tick_start(cpu);
kernel/sched/core.c
8473
int sched_cpu_wait_empty(unsigned int cpu)
kernel/sched/core.c
8500
int cpu = cpu_of(rq);
kernel/sched/core.c
8504
printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
kernel/sched/core.c
8506
if (task_cpu(p) != cpu)
kernel/sched/core.c
8516
int sched_cpu_dying(unsigned int cpu)
kernel/sched/core.c
8518
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8522
sched_tick_stop(cpu);
kernel/sched/core.c
8539
sched_core_cpu_dying(cpu);
kernel/sched/core.c
8718
rq->cpu = i;
kernel/sched/core.c
9012
struct task_struct *curr_task(int cpu)
kernel/sched/core.c
9014
return cpu_curr(cpu);
kernel/sched/cpuacct.c
100
u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
kernel/sched/cpuacct.c
114
raw_spin_rq_lock_irq(cpu_rq(cpu));
kernel/sched/cpuacct.c
131
raw_spin_rq_unlock_irq(cpu_rq(cpu));
kernel/sched/cpuacct.c
137
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu)
kernel/sched/cpuacct.c
139
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
kernel/sched/cpuacct.c
140
u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
kernel/sched/cpuacct.c
150
raw_spin_rq_lock_irq(cpu_rq(cpu));
kernel/sched/cpuacct.c
158
raw_spin_rq_unlock_irq(cpu_rq(cpu));
kernel/sched/cpuacct.c
197
int cpu;
kernel/sched/cpuacct.c
205
for_each_possible_cpu(cpu)
kernel/sched/cpuacct.c
206
cpuacct_cpuusage_write(ca, cpu);
kernel/sched/cpuacct.c
245
int cpu;
kernel/sched/cpuacct.c
252
for_each_possible_cpu(cpu) {
kernel/sched/cpuacct.c
253
seq_printf(m, "%d", cpu);
kernel/sched/cpuacct.c
256
cpuacct_cpuusage_read(ca, cpu, index));
kernel/sched/cpuacct.c
267
int cpu;
kernel/sched/cpuacct.c
271
for_each_possible_cpu(cpu) {
kernel/sched/cpuacct.c
272
u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
kernel/sched/cpuacct.c
280
cputime.sum_exec_runtime += *per_cpu_ptr(ca->cpuusage, cpu);
kernel/sched/cpuacct.c
338
unsigned int cpu = task_cpu(tsk);
kernel/sched/cpuacct.c
341
lockdep_assert_rq_held(cpu_rq(cpu));
kernel/sched/cpuacct.c
344
*per_cpu_ptr(ca->cpuusage, cpu) += cputime;
kernel/sched/cpuacct.c
96
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
kernel/sched/cpuacct.c
99
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
kernel/sched/cpudeadline.c
106
return cp->elements[0].cpu;
kernel/sched/cpudeadline.c
125
int cpu, max_cpu = -1;
kernel/sched/cpudeadline.c
131
for_each_cpu(cpu, later_mask) {
kernel/sched/cpudeadline.c
132
if (!dl_task_fits_capacity(p, cpu)) {
kernel/sched/cpudeadline.c
133
cpumask_clear_cpu(cpu, later_mask);
kernel/sched/cpudeadline.c
135
cap = arch_scale_cpu_capacity(cpu);
kernel/sched/cpudeadline.c
138
(cpu == task_cpu(p) && cap == max_cap)) {
kernel/sched/cpudeadline.c
140
max_cpu = cpu;
kernel/sched/cpudeadline.c
175
void cpudl_clear(struct cpudl *cp, int cpu, bool online)
kernel/sched/cpudeadline.c
180
WARN_ON(!cpu_present(cpu));
kernel/sched/cpudeadline.c
184
old_idx = cp->elements[cpu].idx;
kernel/sched/cpudeadline.c
192
new_cpu = cp->elements[cp->size - 1].cpu;
kernel/sched/cpudeadline.c
194
cp->elements[old_idx].cpu = new_cpu;
kernel/sched/cpudeadline.c
197
cp->elements[cpu].idx = IDX_INVALID;
kernel/sched/cpudeadline.c
201
__cpumask_set_cpu(cpu, cp->free_cpus);
kernel/sched/cpudeadline.c
203
__cpumask_clear_cpu(cpu, cp->free_cpus);
kernel/sched/cpudeadline.c
218
void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
kernel/sched/cpudeadline.c
223
WARN_ON(!cpu_present(cpu));
kernel/sched/cpudeadline.c
227
old_idx = cp->elements[cpu].idx;
kernel/sched/cpudeadline.c
232
cp->elements[new_idx].cpu = cpu;
kernel/sched/cpudeadline.c
233
cp->elements[cpu].idx = new_idx;
kernel/sched/cpudeadline.c
235
__cpumask_clear_cpu(cpu, cp->free_cpus);
kernel/sched/cpudeadline.c
30
int orig_cpu = cp->elements[idx].cpu;
kernel/sched/cpudeadline.c
58
cp->elements[idx].cpu = cp->elements[largest].cpu;
kernel/sched/cpudeadline.c
60
cp->elements[cp->elements[idx].cpu].idx = idx;
kernel/sched/cpudeadline.c
64
cp->elements[idx].cpu = orig_cpu;
kernel/sched/cpudeadline.c
66
cp->elements[cp->elements[idx].cpu].idx = idx;
kernel/sched/cpudeadline.c
73
int orig_cpu = cp->elements[idx].cpu;
kernel/sched/cpudeadline.c
84
cp->elements[idx].cpu = cp->elements[p].cpu;
kernel/sched/cpudeadline.c
86
cp->elements[cp->elements[idx].cpu].idx = idx;
kernel/sched/cpudeadline.c
90
cp->elements[idx].cpu = orig_cpu;
kernel/sched/cpudeadline.c
92
cp->elements[cp->elements[idx].cpu].idx = idx;
kernel/sched/cpudeadline.h
21
void cpudl_set(struct cpudl *cp, int cpu, u64 dl);
kernel/sched/cpudeadline.h
22
void cpudl_clear(struct cpudl *cp, int cpu, bool online);
kernel/sched/cpudeadline.h
9
int cpu;
kernel/sched/cpufreq.c
30
void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
kernel/sched/cpufreq.c
37
if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
kernel/sched/cpufreq.c
41
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
kernel/sched/cpufreq.c
55
void cpufreq_remove_update_util_hook(int cpu)
kernel/sched/cpufreq.c
57
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
kernel/sched/cpufreq_schedutil.c
156
unsigned int freq = arch_scale_freq_ref(policy->cpu);
kernel/sched/cpufreq_schedutil.c
209
unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
kernel/sched/cpufreq_schedutil.c
228
unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
kernel/sched/cpufreq_schedutil.c
231
util += cpu_util_cfs_boost(sg_cpu->cpu);
kernel/sched/cpufreq_schedutil.c
232
util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
kernel/sched/cpufreq_schedutil.c
235
sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
kernel/sched/cpufreq_schedutil.c
372
if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)))
kernel/sched/cpufreq_schedutil.c
379
idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
kernel/sched/cpufreq_schedutil.c
395
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
kernel/sched/cpufreq_schedutil.c
428
max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
kernel/sched/cpufreq_schedutil.c
45
unsigned int cpu;
kernel/sched/cpufreq_schedutil.c
477
max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
kernel/sched/cpufreq_schedutil.c
485
cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,
kernel/sched/cpufreq_schedutil.c
498
max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
kernel/sched/cpufreq_schedutil.c
847
unsigned int cpu;
kernel/sched/cpufreq_schedutil.c
865
for_each_cpu(cpu, policy->cpus) {
kernel/sched/cpufreq_schedutil.c
866
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
kernel/sched/cpufreq_schedutil.c
869
sg_cpu->cpu = cpu;
kernel/sched/cpufreq_schedutil.c
871
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
kernel/sched/cpufreq_schedutil.c
879
unsigned int cpu;
kernel/sched/cpufreq_schedutil.c
881
for_each_cpu(cpu, policy->cpus)
kernel/sched/cpufreq_schedutil.c
882
cpufreq_remove_update_util_hook(cpu);
kernel/sched/cpupri.c
147
bool (*fitness_fn)(struct task_struct *p, int cpu))
kernel/sched/cpupri.c
150
int idx, cpu;
kernel/sched/cpupri.c
163
for_each_cpu(cpu, lowest_mask) {
kernel/sched/cpupri.c
164
if (!fitness_fn(p, cpu))
kernel/sched/cpupri.c
165
cpumask_clear_cpu(cpu, lowest_mask);
kernel/sched/cpupri.c
211
void cpupri_set(struct cpupri *cp, int cpu, int newpri)
kernel/sched/cpupri.c
213
int *currpri = &cp->cpu_to_pri[cpu];
kernel/sched/cpupri.c
233
cpumask_set_cpu(cpu, vec->mask);
kernel/sched/cpupri.c
267
cpumask_clear_cpu(cpu, vec->mask);
kernel/sched/cpupri.h
27
bool (*fitness_fn)(struct task_struct *p, int cpu));
kernel/sched/cpupri.h
28
void cpupri_set(struct cpupri *cp, int cpu, int pri);
kernel/sched/cputime.c
1001
err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
kernel/sched/cputime.c
1014
struct task_struct *tsk, int cpu)
kernel/sched/cputime.c
1026
state = vtime_state_fetch(vtime, cpu);
kernel/sched/cputime.c
1065
void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
kernel/sched/cputime.c
1067
const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
kernel/sched/cputime.c
1071
if (!vtime_accounting_enabled_cpu(cpu)) {
kernel/sched/cputime.c
1076
rq = cpu_rq(cpu);
kernel/sched/cputime.c
1089
err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
kernel/sched/cputime.c
259
static u64 native_steal_clock(int cpu)
kernel/sched/cputime.c
62
int cpu;
kernel/sched/cputime.c
67
cpu = smp_processor_id();
kernel/sched/cputime.c
68
delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
kernel/sched/cputime.c
791
vtime->cpu = -1;
kernel/sched/cputime.c
804
vtime->cpu = smp_processor_id();
kernel/sched/cputime.c
808
void vtime_init_idle(struct task_struct *t, int cpu)
kernel/sched/cputime.c
817
vtime->cpu = cpu;
kernel/sched/cputime.c
888
static int vtime_state_fetch(struct vtime *vtime, int cpu)
kernel/sched/cputime.c
896
if (vtime->cpu != cpu && vtime->cpu != -1)
kernel/sched/cputime.c
926
int cpu, u64 *val)
kernel/sched/cputime.c
936
state = vtime_state_fetch(vtime, cpu);
kernel/sched/cputime.c
979
enum cpu_usage_stat usage, int cpu)
kernel/sched/cputime.c
986
if (!vtime_accounting_enabled_cpu(cpu))
kernel/sched/cputime.c
989
rq = cpu_rq(cpu);
kernel/sched/deadline.c
1411
int cpu = cpu_of(rq);
kernel/sched/deadline.c
1412
unsigned long scale_freq = arch_scale_freq_capacity(cpu);
kernel/sched/deadline.c
1413
unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
kernel/sched/deadline.c
163
bool dl_bw_visited(int cpu, u64 cookie)
kernel/sched/deadline.c
165
struct root_domain *rd = cpu_rq(cpu)->rd;
kernel/sched/deadline.c
1841
int cpu;
kernel/sched/deadline.c
1845
for_each_online_cpu(cpu) {
kernel/sched/deadline.c
1849
rq = cpu_rq(cpu);
kernel/sched/deadline.c
1881
int cpu = cpu_of(rq);
kernel/sched/deadline.c
1887
if (!dl_bw_cpus(cpu))
kernel/sched/deadline.c
1890
__dl_add(dl_b, new_bw, dl_bw_cpus(cpu));
kernel/sched/deadline.c
1898
int cpu = cpu_of(rq);
kernel/sched/deadline.c
1903
dl_b = dl_bw_of(cpu);
kernel/sched/deadline.c
1906
cpus = dl_bw_cpus(cpu);
kernel/sched/deadline.c
1907
cap = dl_bw_capacity(cpu);
kernel/sched/deadline.c
2035
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
kernel/sched/deadline.c
2037
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
kernel/sched/deadline.c
2052
cpudl_clear(&rq->rd->cpudl, rq->cpu, rq->online);
kernel/sched/deadline.c
2053
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
kernel/sched/deadline.c
2059
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
kernel/sched/deadline.c
2407
select_task_rq_dl(struct task_struct *p, int cpu, int flags)
kernel/sched/deadline.c
2414
return cpu;
kernel/sched/deadline.c
2416
rq = cpu_rq(cpu);
kernel/sched/deadline.c
2441
select_rq |= !dl_task_fits_capacity(p, cpu);
kernel/sched/deadline.c
2448
cpu = target;
kernel/sched/deadline.c
2452
return cpu;
kernel/sched/deadline.c
2691
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
kernel/sched/deadline.c
2703
if (task_is_pushable(rq, p, cpu))
kernel/sched/deadline.c
2720
int cpu = task_cpu(task);
kernel/sched/deadline.c
2748
if (cpumask_test_cpu(cpu, later_mask))
kernel/sched/deadline.c
2749
return cpu;
kernel/sched/deadline.c
2758
for_each_domain(cpu, sd) {
kernel/sched/deadline.c
2795
cpu = cpumask_any_distribute(later_mask);
kernel/sched/deadline.c
2796
if (cpu < nr_cpu_ids)
kernel/sched/deadline.c
2797
return cpu;
kernel/sched/deadline.c
2811
WARN_ON_ONCE(rq->cpu != task_cpu(p));
kernel/sched/deadline.c
2826
int cpu;
kernel/sched/deadline.c
2829
cpu = find_later_rq(task);
kernel/sched/deadline.c
2831
if ((cpu == -1) || (cpu == rq->cpu))
kernel/sched/deadline.c
2834
later_rq = cpu_rq(cpu);
kernel/sched/deadline.c
2870
!cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
kernel/sched/deadline.c
2988
int this_cpu = this_rq->cpu, cpu;
kernel/sched/deadline.c
3003
for_each_cpu(cpu, this_rq->rd->dlo_mask) {
kernel/sched/deadline.c
3004
if (this_cpu == cpu)
kernel/sched/deadline.c
3007
src_rq = cpu_rq(cpu);
kernel/sched/deadline.c
3065
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
kernel/sched/deadline.c
3132
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
kernel/sched/deadline.c
3134
cpudl_clear(&rq->rd->cpudl, rq->cpu, true);
kernel/sched/deadline.c
3143
cpudl_clear(&rq->rd->cpudl, rq->cpu, false);
kernel/sched/deadline.c
3190
unsigned int cpu;
kernel/sched/deadline.c
3201
cpu = cpumask_first_and(cpu_active_mask, msk);
kernel/sched/deadline.c
3202
BUG_ON(cpu >= nr_cpu_ids);
kernel/sched/deadline.c
3203
rq = cpu_rq(cpu);
kernel/sched/deadline.c
3212
static void dl_server_add_bw(struct root_domain *rd, int cpu)
kernel/sched/deadline.c
3216
dl_se = &cpu_rq(cpu)->fair_server;
kernel/sched/deadline.c
3217
if (dl_server(dl_se) && cpu_active(cpu))
kernel/sched/deadline.c
3218
__dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
kernel/sched/deadline.c
3221
dl_se = &cpu_rq(cpu)->ext_server;
kernel/sched/deadline.c
3222
if (dl_server(dl_se) && cpu_active(cpu))
kernel/sched/deadline.c
3223
__dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
kernel/sched/deadline.c
3227
static u64 dl_server_read_bw(int cpu)
kernel/sched/deadline.c
3231
if (cpu_rq(cpu)->fair_server.dl_server)
kernel/sched/deadline.c
3232
dl_bw += cpu_rq(cpu)->fair_server.dl_bw;
kernel/sched/deadline.c
3235
if (cpu_rq(cpu)->ext_server.dl_server)
kernel/sched/deadline.c
3236
dl_bw += cpu_rq(cpu)->ext_server.dl_bw;
kernel/sched/deadline.c
3264
void dl_clear_root_domain_cpu(int cpu)
kernel/sched/deadline.c
3266
dl_clear_root_domain(cpu_rq(cpu)->rd);
kernel/sched/deadline.c
3400
static int task_is_throttled_dl(struct task_struct *p, int cpu)
kernel/sched/deadline.c
3453
int cpu, cpus, ret = 0;
kernel/sched/deadline.c
3461
for_each_online_cpu(cpu) {
kernel/sched/deadline.c
3464
if (dl_bw_visited(cpu, cookie))
kernel/sched/deadline.c
3467
dl_b = dl_bw_of(cpu);
kernel/sched/deadline.c
3468
cpus = dl_bw_cpus(cpu);
kernel/sched/deadline.c
3503
int cpu;
kernel/sched/deadline.c
3509
for_each_possible_cpu(cpu)
kernel/sched/deadline.c
3510
init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
kernel/sched/deadline.c
3512
for_each_possible_cpu(cpu) {
kernel/sched/deadline.c
3515
if (dl_bw_visited(cpu, cookie)) {
kernel/sched/deadline.c
3520
dl_b = dl_bw_of(cpu);
kernel/sched/deadline.c
3544
int cpus, err = -1, cpu = task_cpu(p);
kernel/sched/deadline.c
3545
struct dl_bw *dl_b = dl_bw_of(cpu);
kernel/sched/deadline.c
3561
cpus = dl_bw_cpus(cpu);
kernel/sched/deadline.c
3562
cap = dl_bw_capacity(cpu);
kernel/sched/deadline.c
3755
static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
kernel/sched/deadline.c
3763
dl_b = dl_bw_of(cpu);
kernel/sched/deadline.c
3766
cap = dl_bw_capacity(cpu);
kernel/sched/deadline.c
3769
__dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
kernel/sched/deadline.c
3781
__dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
kernel/sched/deadline.c
3790
cap -= arch_scale_cpu_capacity(cpu);
kernel/sched/deadline.c
3798
dl_server_bw = dl_server_read_bw(cpu);
kernel/sched/deadline.c
3811
if (dl_bw_cpus(cpu) - 1)
kernel/sched/deadline.c
3826
int dl_bw_deactivate(int cpu)
kernel/sched/deadline.c
3828
return dl_bw_manage(dl_bw_req_deactivate, cpu, 0);
kernel/sched/deadline.c
3831
int dl_bw_alloc(int cpu, u64 dl_bw)
kernel/sched/deadline.c
3833
return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
kernel/sched/deadline.c
3836
void dl_bw_free(int cpu, u64 dl_bw)
kernel/sched/deadline.c
3838
dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
kernel/sched/deadline.c
3841
void print_dl_stats(struct seq_file *m, int cpu)
kernel/sched/deadline.c
3843
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
kernel/sched/deadline.c
541
cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
kernel/sched/deadline.c
558
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
kernel/sched/deadline.c
635
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
kernel/sched/deadline.c
640
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
kernel/sched/deadline.c
652
int cpu;
kernel/sched/deadline.c
658
cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
kernel/sched/deadline.c
659
if (cpu >= nr_cpu_ids) {
kernel/sched/deadline.c
671
cpu = cpumask_any(cpu_active_mask);
kernel/sched/deadline.c
673
later_rq = cpu_rq(cpu);
kernel/sched/deadline.c
709
set_task_cpu(p, later_rq->cpu);
kernel/sched/debug.c
1011
void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
kernel/sched/debug.c
1016
SEQ_printf(m, "dl_rq[%d]:\n", cpu);
kernel/sched/debug.c
1022
dl_bw = &cpu_rq(cpu)->rd->dl_bw;
kernel/sched/debug.c
1029
static void print_cpu(struct seq_file *m, int cpu)
kernel/sched/debug.c
1031
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
1038
cpu, freq / 1000, (freq % 1000));
kernel/sched/debug.c
1041
SEQ_printf(m, "cpu#%d\n", cpu);
kernel/sched/debug.c
1080
print_cfs_stats(m, cpu);
kernel/sched/debug.c
1081
print_rt_stats(m, cpu);
kernel/sched/debug.c
1082
print_dl_stats(m, cpu);
kernel/sched/debug.c
1084
print_rq(m, rq, cpu);
kernel/sched/debug.c
1145
int cpu = (unsigned long)(v - 2);
kernel/sched/debug.c
1147
if (cpu != -1)
kernel/sched/debug.c
1148
print_cpu(m, cpu);
kernel/sched/debug.c
1157
int cpu;
kernel/sched/debug.c
1160
for_each_online_cpu(cpu) {
kernel/sched/debug.c
1168
print_cpu(NULL, cpu);
kernel/sched/debug.c
1381
void resched_latency_warn(int cpu, u64 latency)
kernel/sched/debug.c
1389
cpu, latency, cpu_rq(cpu)->ticks_without_resched);
kernel/sched/debug.c
340
long cpu = (long) ((struct seq_file *) filp->private_data)->private;
kernel/sched/debug.c
343
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
419
long cpu = (long) ((struct seq_file *) filp->private_data)->private;
kernel/sched/debug.c
420
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
428
unsigned long cpu = (unsigned long) m->private;
kernel/sched/debug.c
429
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
452
long cpu = (long) ((struct seq_file *) filp->private_data)->private;
kernel/sched/debug.c
453
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
461
unsigned long cpu = (unsigned long) m->private;
kernel/sched/debug.c
462
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
485
long cpu = (long) ((struct seq_file *) filp->private_data)->private;
kernel/sched/debug.c
486
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
494
unsigned long cpu = (unsigned long) m->private;
kernel/sched/debug.c
495
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
518
long cpu = (long) ((struct seq_file *) filp->private_data)->private;
kernel/sched/debug.c
519
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
527
unsigned long cpu = (unsigned long) m->private;
kernel/sched/debug.c
528
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
552
unsigned long cpu;
kernel/sched/debug.c
558
for_each_possible_cpu(cpu) {
kernel/sched/debug.c
562
snprintf(buf, sizeof(buf), "cpu%lu", cpu);
kernel/sched/debug.c
565
debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
kernel/sched/debug.c
566
debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
kernel/sched/debug.c
574
unsigned long cpu;
kernel/sched/debug.c
580
for_each_possible_cpu(cpu) {
kernel/sched/debug.c
584
snprintf(buf, sizeof(buf), "cpu%lu", cpu);
kernel/sched/debug.c
587
debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &ext_server_runtime_fops);
kernel/sched/debug.c
588
debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &ext_server_period_fops);
kernel/sched/debug.c
693
int cpu, i;
kernel/sched/debug.c
719
for_each_cpu(cpu, sd_sysctl_cpus) {
kernel/sched/debug.c
724
snprintf(buf, sizeof(buf), "cpu%d", cpu);
kernel/sched/debug.c
729
for_each_domain(cpu, sd) {
kernel/sched/debug.c
739
__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
kernel/sched/debug.c
743
void dirty_sched_domain_sysctl(int cpu)
kernel/sched/debug.c
746
__cpumask_set_cpu(cpu, sd_sysctl_cpus);
kernel/sched/debug.c
750
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
kernel/sched/debug.c
752
struct sched_entity *se = tg->se[cpu];
kernel/sched/debug.c
902
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
kernel/sched/debug.c
907
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
912
SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
kernel/sched/debug.c
915
SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
kernel/sched/debug.c
977
print_cfs_group_stats(m, cpu, cfs_rq->tg);
kernel/sched/debug.c
981
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
kernel/sched/debug.c
985
SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
kernel/sched/debug.c
988
SEQ_printf(m, "rt_rq[%d]:\n", cpu);
kernel/sched/ext.c
1224
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
kernel/sched/ext.c
1226
if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
kernel/sched/ext.c
1229
return &cpu_rq(cpu)->scx.local_dsq;
kernel/sched/ext.c
139
s32 cpu;
kernel/sched/ext.c
148
.cpu = -1,
kernel/sched/ext.c
1736
int cpu = cpu_of(rq);
kernel/sched/ext.c
1738
WARN_ON_ONCE(task_cpu(p) == cpu);
kernel/sched/ext.c
1755
p->comm, p->pid, task_cpu(p), cpu);
kernel/sched/ext.c
1765
if (!task_allowed_on_cpu(p, cpu)) {
kernel/sched/ext.c
1768
cpu, p->comm, p->pid);
kernel/sched/ext.c
1814
s32 cpu = raw_smp_processor_id();
kernel/sched/ext.c
1820
p->scx.holding_cpu = cpu;
kernel/sched/ext.c
1826
return likely(p->scx.holding_cpu == cpu) &&
kernel/sched/ext.c
199
static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
kernel/sched/ext.c
2474
s32 cpu;
kernel/sched/ext.c
2487
for_each_cpu(cpu, rq->scx.cpus_to_sync) {
kernel/sched/ext.c
2492
if (cpu == cpu_of(rq) ||
kernel/sched/ext.c
2493
smp_load_acquire(&cpu_rq(cpu)->scx.kick_sync) != ksyncs[cpu]) {
kernel/sched/ext.c
2494
cpumask_clear_cpu(cpu, rq->scx.cpus_to_sync);
kernel/sched/ext.c
2499
while (READ_ONCE(cpu_rq(cpu)->scx.kick_sync) == ksyncs[cpu]) {
kernel/sched/ext.c
2684
s32 cpu;
kernel/sched/ext.c
2691
cpu = SCX_CALL_OP_TASK_RET(sch,
kernel/sched/ext.c
2695
p->scx.selected_cpu = cpu;
kernel/sched/ext.c
2697
if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
kernel/sched/ext.c
2698
return cpu;
kernel/sched/ext.c
2702
s32 cpu;
kernel/sched/ext.c
2704
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
kernel/sched/ext.c
2705
if (cpu >= 0) {
kernel/sched/ext.c
2709
cpu = prev_cpu;
kernel/sched/ext.c
2711
p->scx.selected_cpu = cpu;
kernel/sched/ext.c
2715
return cpu;
kernel/sched/ext.c
2750
int cpu = cpu_of(rq);
kernel/sched/ext.c
2766
SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu);
kernel/sched/ext.c
2768
SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu);
kernel/sched/ext.c
2772
"cpu %d going %s, exiting scheduler", cpu,
kernel/sched/ext.c
2830
int cpu;
kernel/sched/ext.c
2834
for_each_online_cpu(cpu) {
kernel/sched/ext.c
2835
if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
kernel/sched/ext.c
3958
bool scx_hardlockup(int cpu)
kernel/sched/ext.c
3960
if (!handle_lockup("hard lockup - CPU %d", cpu))
kernel/sched/ext.c
3964
cpu);
kernel/sched/ext.c
4073
int cpu;
kernel/sched/ext.c
4076
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
kernel/sched/ext.c
4077
u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
kernel/sched/ext.c
4098
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
kernel/sched/ext.c
4099
if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target)
kernel/sched/ext.c
4100
cpumask_set_cpu(cpu, donee_mask);
kernel/sched/ext.c
4105
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
kernel/sched/ext.c
4106
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4111
if (cpumask_test_cpu(cpu, donee_mask))
kernel/sched/ext.c
4120
for_each_cpu(cpu, resched_mask)
kernel/sched/ext.c
4121
resched_cpu(cpu);
kernel/sched/ext.c
4123
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
kernel/sched/ext.c
4124
u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
kernel/sched/ext.c
4201
int cpu;
kernel/sched/ext.c
4244
for_each_possible_cpu(cpu) {
kernel/sched/ext.c
4245
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4284
if (cpu_online(cpu) || cpu == smp_processor_id())
kernel/sched/ext.c
4346
int cpu;
kernel/sched/ext.c
4348
for_each_possible_cpu(cpu) {
kernel/sched/ext.c
4349
struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
kernel/sched/ext.c
4364
int kind, cpu;
kernel/sched/ext.c
4444
for_each_possible_cpu(cpu) {
kernel/sched/ext.c
4445
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4598
dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
kernel/sched/ext.c
4656
scx_dump_data.cpu = -1;
kernel/sched/ext.c
4718
int cpu;
kernel/sched/ext.c
4745
for_each_possible_cpu(cpu) {
kernel/sched/ext.c
4746
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4772
cpu, rq->scx.nr_running, rq->scx.flags,
kernel/sched/ext.c
4798
&dctx, cpu, idle);
kernel/sched/ext.c
4892
int cpu;
kernel/sched/ext.c
4898
for_each_possible_cpu(cpu) {
kernel/sched/ext.c
4899
struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
kernel/sched/ext.c
4905
GFP_KERNEL, cpu_to_node(cpu));
kernel/sched/ext.c
5075
int i, cpu, ret;
kernel/sched/ext.c
5105
for_each_possible_cpu(cpu)
kernel/sched/ext.c
5106
cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
kernel/sched/ext.c
5535
static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
kernel/sched/ext.c
5536
static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
kernel/sched/ext.c
5537
static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
kernel/sched/ext.c
5552
static void sched_ext_ops__cpu_online(s32 cpu) {}
kernel/sched/ext.c
5553
static void sched_ext_ops__cpu_offline(s32 cpu) {}
kernel/sched/ext.c
5557
static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
kernel/sched/ext.c
5663
static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
kernel/sched/ext.c
5665
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
5680
if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) &&
kernel/sched/ext.c
5682
if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
kernel/sched/ext.c
5685
cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
kernel/sched/ext.c
5688
if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
kernel/sched/ext.c
5690
cpumask_set_cpu(cpu, this_scx->cpus_to_sync);
kernel/sched/ext.c
5691
ksyncs[cpu] = rq->scx.kick_sync;
kernel/sched/ext.c
5694
cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
kernel/sched/ext.c
5699
cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
kernel/sched/ext.c
5700
cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
kernel/sched/ext.c
5708
static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
kernel/sched/ext.c
5710
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
5716
(cpu_online(cpu) || cpu == cpu_of(this_rq)))
kernel/sched/ext.c
5729
s32 cpu;
kernel/sched/ext.c
5738
for_each_cpu(cpu, this_scx->cpus_to_kick) {
kernel/sched/ext.c
5739
should_wait |= kick_one_cpu(cpu, this_rq, ksyncs);
kernel/sched/ext.c
5740
cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
kernel/sched/ext.c
5741
cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
kernel/sched/ext.c
5744
for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
kernel/sched/ext.c
5745
kick_one_cpu_if_idle(cpu, this_rq);
kernel/sched/ext.c
5746
cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
kernel/sched/ext.c
5837
s32 cpu, v;
kernel/sched/ext.c
5849
for_each_possible_cpu(cpu) {
kernel/sched/ext.c
5850
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
5851
int n = cpu_to_node(cpu);
kernel/sched/ext.c
5866
if (cpu_online(cpu))
kernel/sched/ext.c
5867
cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
kernel/sched/ext.c
6573
static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags)
kernel/sched/ext.c
6578
if (!ops_cpu_valid(sch, cpu, NULL))
kernel/sched/ext.c
6599
struct rq *target_rq = cpu_rq(cpu);
kernel/sched/ext.c
6611
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
kernel/sched/ext.c
6613
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
kernel/sched/ext.c
6616
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
kernel/sched/ext.c
6618
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
kernel/sched/ext.c
6636
__bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
kernel/sched/ext.c
6643
scx_kick_cpu(sch, cpu, flags);
kernel/sched/ext.c
6671
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
kernel/sched/ext.c
6673
if (ops_cpu_valid(sch, cpu, NULL)) {
kernel/sched/ext.c
6674
ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
kernel/sched/ext.c
6980
if (raw_smp_processor_id() != dd->cpu) {
kernel/sched/ext.c
7038
__bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
kernel/sched/ext.c
7045
if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
kernel/sched/ext.c
7046
return arch_scale_cpu_capacity(cpu);
kernel/sched/ext.c
7065
__bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
kernel/sched/ext.c
7072
if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
kernel/sched/ext.c
7073
return arch_scale_freq_capacity(cpu);
kernel/sched/ext.c
7092
__bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
kernel/sched/ext.c
7103
scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu);
kernel/sched/ext.c
7107
if (ops_cpu_valid(sch, cpu, NULL)) {
kernel/sched/ext.c
7108
struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
kernel/sched/ext.c
7116
scx_error(sch, "Invalid target CPU %d", cpu);
kernel/sched/ext.c
7209
__bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
kernel/sched/ext.c
7219
if (!ops_cpu_valid(sch, cpu, NULL))
kernel/sched/ext.c
7229
return cpu_rq(cpu);
kernel/sched/ext.c
7264
__bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu)
kernel/sched/ext.c
7274
if (!ops_cpu_valid(sch, cpu, NULL))
kernel/sched/ext.c
7277
return rcu_dereference(cpu_rq(cpu)->curr);
kernel/sched/ext.c
7380
int cpu;
kernel/sched/ext.c
7384
for_each_possible_cpu(cpu) {
kernel/sched/ext.c
7385
e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats;
kernel/sched/ext.c
766
static inline bool __cpu_valid(s32 cpu)
kernel/sched/ext.c
768
return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
kernel/sched/ext.c
781
static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
kernel/sched/ext.c
783
if (__cpu_valid(cpu)) {
kernel/sched/ext.c
786
scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
kernel/sched/ext.h
25
static inline u32 scx_cpuperf_target(s32 cpu)
kernel/sched/ext.h
28
return cpu_rq(cpu)->scx.cpuperf_target;
kernel/sched/ext.h
50
static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
kernel/sched/ext_idle.c
104
else if (cpumask_test_cpu(cpu, idle_smts))
kernel/sched/ext_idle.c
105
__cpumask_clear_cpu(cpu, idle_smts);
kernel/sched/ext_idle.c
109
return cpumask_test_and_clear_cpu(cpu, idle_cpus);
kernel/sched/ext_idle.c
1092
return idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
1120
return idle_cpumask(NUMA_NO_NODE)->cpu;
kernel/sched/ext_idle.c
1150
return idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
117
int cpu;
kernel/sched/ext_idle.c
1182
return idle_cpumask(NUMA_NO_NODE)->cpu;
kernel/sched/ext_idle.c
121
cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed);
kernel/sched/ext_idle.c
1210
__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
kernel/sched/ext_idle.c
122
if (cpu < nr_cpu_ids)
kernel/sched/ext_idle.c
1223
if (!ops_cpu_valid(sch, cpu, NULL))
kernel/sched/ext_idle.c
1226
return scx_idle_test_and_clear_cpu(cpu);
kernel/sched/ext_idle.c
129
cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed);
kernel/sched/ext_idle.c
130
if (cpu >= nr_cpu_ids)
kernel/sched/ext_idle.c
1335
s32 cpu;
kernel/sched/ext_idle.c
134
if (scx_idle_test_and_clear_cpu(cpu))
kernel/sched/ext_idle.c
1347
cpu = scx_pick_idle_cpu(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
1348
if (cpu >= 0)
kernel/sched/ext_idle.c
1349
return cpu;
kernel/sched/ext_idle.c
135
return cpu;
kernel/sched/ext_idle.c
1352
cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed);
kernel/sched/ext_idle.c
1354
cpu = cpumask_any_distribute(cpus_allowed);
kernel/sched/ext_idle.c
1355
if (cpu < nr_cpu_ids)
kernel/sched/ext_idle.c
1356
return cpu;
kernel/sched/ext_idle.c
1382
s32 cpu;
kernel/sched/ext_idle.c
1396
cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
kernel/sched/ext_idle.c
1397
if (cpu >= 0)
kernel/sched/ext_idle.c
1398
return cpu;
kernel/sched/ext_idle.c
1401
cpu = cpumask_any_distribute(cpus_allowed);
kernel/sched/ext_idle.c
1402
if (cpu < nr_cpu_ids)
kernel/sched/ext_idle.c
1403
return cpu;
kernel/sched/ext_idle.c
153
s32 cpu = -EBUSY;
kernel/sched/ext_idle.c
181
cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
182
if (cpu >= 0)
kernel/sched/ext_idle.c
187
return cpu;
kernel/sched/ext_idle.c
202
s32 cpu;
kernel/sched/ext_idle.c
209
cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
kernel/sched/ext_idle.c
210
if (cpu >= 0)
kernel/sched/ext_idle.c
211
return cpu;
kernel/sched/ext_idle.c
231
static unsigned int llc_weight(s32 cpu)
kernel/sched/ext_idle.c
235
sd = rcu_dereference(per_cpu(sd_llc, cpu));
kernel/sched/ext_idle.c
246
static struct cpumask *llc_span(s32 cpu)
kernel/sched/ext_idle.c
250
sd = rcu_dereference(per_cpu(sd_llc, cpu));
kernel/sched/ext_idle.c
261
static unsigned int numa_weight(s32 cpu)
kernel/sched/ext_idle.c
266
sd = rcu_dereference(per_cpu(sd_numa, cpu));
kernel/sched/ext_idle.c
280
static struct cpumask *numa_span(s32 cpu)
kernel/sched/ext_idle.c
285
sd = rcu_dereference(per_cpu(sd_numa, cpu));
kernel/sched/ext_idle.c
301
int cpu;
kernel/sched/ext_idle.c
326
for_each_online_cpu(cpu)
kernel/sched/ext_idle.c
327
if (llc_weight(cpu) != numa_weight(cpu))
kernel/sched/ext_idle.c
33
cpumask_var_t cpu;
kernel/sched/ext_idle.c
348
s32 cpu = cpumask_first(cpu_online_mask);
kernel/sched/ext_idle.c
361
nr_cpus = llc_weight(cpu);
kernel/sched/ext_idle.c
366
cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
kernel/sched/ext_idle.c
383
nr_cpus = numa_weight(cpu);
kernel/sched/ext_idle.c
388
cpumask_pr_args(numa_span(cpu)), nr_cpus);
kernel/sched/ext_idle.c
458
s32 cpu;
kernel/sched/ext_idle.c
479
cpu = -EBUSY;
kernel/sched/ext_idle.c
526
cpu = smp_processor_id();
kernel/sched/ext_idle.c
527
if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) &&
kernel/sched/ext_idle.c
529
cpu = prev_cpu;
kernel/sched/ext_idle.c
546
waker_node = scx_cpu_node_if_enabled(cpu);
kernel/sched/ext_idle.c
548
cpu_rq(cpu)->scx.local_dsq.nr == 0 &&
kernel/sched/ext_idle.c
550
!cpumask_empty(idle_cpumask(waker_node)->cpu)) {
kernel/sched/ext_idle.c
551
if (cpumask_test_cpu(cpu, allowed))
kernel/sched/ext_idle.c
567
cpu = prev_cpu;
kernel/sched/ext_idle.c
575
cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
kernel/sched/ext_idle.c
576
if (cpu >= 0)
kernel/sched/ext_idle.c
584
cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
kernel/sched/ext_idle.c
585
if (cpu >= 0)
kernel/sched/ext_idle.c
597
cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
kernel/sched/ext_idle.c
598
if (cpu >= 0)
kernel/sched/ext_idle.c
606
cpu = -EBUSY;
kernel/sched/ext_idle.c
615
cpu = prev_cpu;
kernel/sched/ext_idle.c
623
cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
kernel/sched/ext_idle.c
624
if (cpu >= 0)
kernel/sched/ext_idle.c
632
cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
kernel/sched/ext_idle.c
633
if (cpu >= 0)
kernel/sched/ext_idle.c
645
cpu = scx_pick_idle_cpu(allowed, node, flags);
kernel/sched/ext_idle.c
652
return cpu;
kernel/sched/ext_idle.c
663
BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL));
kernel/sched/ext_idle.c
675
BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i));
kernel/sched/ext_idle.c
69
static int scx_cpu_node_if_enabled(int cpu)
kernel/sched/ext_idle.c
690
static void update_builtin_idle(int cpu, bool idle)
kernel/sched/ext_idle.c
692
int node = scx_cpu_node_if_enabled(cpu);
kernel/sched/ext_idle.c
693
struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
695
assign_cpu(cpu, idle_cpus, idle);
kernel/sched/ext_idle.c
699
const struct cpumask *smt = cpu_smt_mask(cpu);
kernel/sched/ext_idle.c
736
int cpu = cpu_of(rq);
kernel/sched/ext_idle.c
74
return cpu_to_node(cpu);
kernel/sched/ext_idle.c
756
update_builtin_idle(cpu, idle);
kernel/sched/ext_idle.c
77
static bool scx_idle_test_and_clear_cpu(int cpu)
kernel/sched/ext_idle.c
783
cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask);
kernel/sched/ext_idle.c
79
int node = scx_cpu_node_if_enabled(cpu);
kernel/sched/ext_idle.c
791
cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask);
kernel/sched/ext_idle.c
80
struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
kernel/sched/ext_idle.c
89
const struct cpumask *smt = cpu_smt_mask(cpu);
kernel/sched/ext_idle.c
897
s32 cpu;
kernel/sched/ext_idle.c
937
cpu = prev_cpu;
kernel/sched/ext_idle.c
939
cpu = -EBUSY;
kernel/sched/ext_idle.c
941
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
kernel/sched/ext_idle.c
948
return cpu;
kernel/sched/ext_idle.c
956
__bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
kernel/sched/ext_idle.c
963
if (unlikely(!sch) || !ops_cpu_valid(sch, cpu, NULL))
kernel/sched/ext_idle.c
965
return cpu_to_node(cpu);
kernel/sched/ext_idle.c
987
s32 cpu;
kernel/sched/ext_idle.c
995
cpu = select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags, NULL, 0);
kernel/sched/ext_idle.c
996
if (cpu >= 0) {
kernel/sched/ext_idle.c
998
return cpu;
kernel/sched/ext_internal.h
352
void (*dispatch)(s32 cpu, struct task_struct *prev);
kernel/sched/ext_internal.h
520
void (*update_idle)(s32 cpu, bool idle);
kernel/sched/ext_internal.h
530
void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
kernel/sched/ext_internal.h
542
void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
kernel/sched/ext_internal.h
606
void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
kernel/sched/ext_internal.h
735
void (*cpu_online)(s32 cpu);
kernel/sched/ext_internal.h
744
void (*cpu_offline)(s32 cpu);
kernel/sched/fair.c
10018
static void sched_balance_update_blocked_averages(int cpu)
kernel/sched/fair.c
10020
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
10088
static unsigned long scale_rt_capacity(int cpu)
kernel/sched/fair.c
10090
unsigned long max = get_actual_cpu_capacity(cpu);
kernel/sched/fair.c
10091
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
10115
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
kernel/sched/fair.c
10117
unsigned long capacity = scale_rt_capacity(cpu);
kernel/sched/fair.c
10123
cpu_rq(cpu)->cpu_capacity = capacity;
kernel/sched/fair.c
10124
trace_sched_cpu_capacity_tp(cpu_rq(cpu));
kernel/sched/fair.c
10131
void update_group_capacity(struct sched_domain *sd, int cpu)
kernel/sched/fair.c
10143
update_cpu_capacity(sd, cpu);
kernel/sched/fair.c
10157
for_each_cpu(cpu, sched_group_span(sdg)) {
kernel/sched/fair.c
10158
unsigned long cpu_cap = capacity_of(cpu);
kernel/sched/fair.c
10329
static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
kernel/sched/fair.c
10337
return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
kernel/sched/fair.c
10738
static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
kernel/sched/fair.c
10741
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
kernel/sched/fair.c
10757
static int idle_cpu_without(int cpu, struct task_struct *p)
kernel/sched/fair.c
10759
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
1144
static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
kernel/sched/fair.c
1146
static unsigned long capacity_of(int cpu);
kernel/sched/fair.c
11762
int cpu, idle_smt = -1;
kernel/sched/fair.c
11786
for_each_cpu_and(cpu, swb_cpus, env->cpus) {
kernel/sched/fair.c
11787
if (!idle_cpu(cpu))
kernel/sched/fair.c
11795
if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) {
kernel/sched/fair.c
11797
idle_smt = cpu;
kernel/sched/fair.c
11804
cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
kernel/sched/fair.c
11813
return cpu == env->dst_cpu;
kernel/sched/fair.c
11922
env.src_cpu = busiest->cpu;
kernel/sched/fair.c
12242
.src_cpu = busiest_rq->cpu,
kernel/sched/fair.c
12334
int cpu = rq->cpu;
kernel/sched/fair.c
12335
int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
kernel/sched/fair.c
12345
for_each_domain(cpu, sd) {
kernel/sched/fair.c
12366
if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
kernel/sched/fair.c
12372
idle = idle_cpu(cpu);
kernel/sched/fair.c
12373
busy = !idle && !sched_idle_cpu(cpu);
kernel/sched/fair.c
12489
int nr_busy, i, cpu = rq->cpu;
kernel/sched/fair.c
12543
sd = rcu_dereference_all(per_cpu(sd_asym_packing, cpu));
kernel/sched/fair.c
12554
if (sched_asym(sd, i, cpu)) {
kernel/sched/fair.c
12561
sd = rcu_dereference_all(per_cpu(sd_asym_cpucapacity, cpu));
kernel/sched/fair.c
12582
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
kernel/sched/fair.c
12609
static void set_cpu_sd_state_busy(int cpu)
kernel/sched/fair.c
12614
sd = rcu_dereference_all(per_cpu(sd_llc, cpu));
kernel/sched/fair.c
12633
cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
kernel/sched/fair.c
12635
set_cpu_sd_state_busy(rq->cpu);
kernel/sched/fair.c
12638
static void set_cpu_sd_state_idle(int cpu)
kernel/sched/fair.c
12643
sd = rcu_dereference_all(per_cpu(sd_llc, cpu));
kernel/sched/fair.c
12658
void nohz_balance_enter_idle(int cpu)
kernel/sched/fair.c
12660
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
12662
WARN_ON_ONCE(cpu != smp_processor_id());
kernel/sched/fair.c
12665
if (!cpu_active(cpu))
kernel/sched/fair.c
12690
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
kernel/sched/fair.c
12699
set_cpu_sd_state_idle(cpu);
kernel/sched/fair.c
12712
unsigned int cpu = rq->cpu;
kernel/sched/fair.c
12717
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
kernel/sched/fair.c
12723
sched_balance_update_blocked_averages(cpu);
kernel/sched/fair.c
12740
int this_cpu = this_rq->cpu;
kernel/sched/fair.c
12868
void nohz_run_idle_balance(int cpu)
kernel/sched/fair.c
12872
flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
kernel/sched/fair.c
12879
_nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK);
kernel/sched/fair.c
12884
int this_cpu = this_rq->cpu;
kernel/sched/fair.c
12926
int this_cpu = this_rq->cpu;
kernel/sched/fair.c
13090
sched_balance_update_blocked_averages(this_rq->cpu);
kernel/sched/fair.c
13405
static int task_is_throttled_fair(struct task_struct *p, int cpu)
kernel/sched/fair.c
13410
cfs_rq = task_group(p)->cfs_rq[cpu];
kernel/sched/fair.c
13412
cfs_rq = &cpu_rq(cpu)->cfs;
kernel/sched/fair.c
13750
int cpu;
kernel/sched/fair.c
13754
for_each_possible_cpu(cpu) {
kernel/sched/fair.c
13755
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
kernel/sched/fair.c
13756
struct sched_entity *se = tg->se[cpu];
kernel/sched/fair.c
13757
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
13783
struct sched_entity *se, int cpu,
kernel/sched/fair.c
13786
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
13792
tg->cfs_rq[cpu] = cfs_rq;
kernel/sched/fair.c
13793
tg->se[cpu] = se;
kernel/sched/fair.c
13999
void print_cfs_stats(struct seq_file *m, int cpu)
kernel/sched/fair.c
14004
for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
kernel/sched/fair.c
14005
print_cfs_rq(m, cpu, cfs_rq);
kernel/sched/fair.c
1464
static inline bool is_core_idle(int cpu)
kernel/sched/fair.c
1469
for_each_cpu(sibling, cpu_smt_mask(cpu)) {
kernel/sched/fair.c
1470
if (cpu == sibling)
kernel/sched/fair.c
2161
static inline bool test_idle_cores(int cpu);
kernel/sched/fair.c
2162
static inline int numa_idle_core(int idle_core, int cpu)
kernel/sched/fair.c
2165
idle_core >= 0 || !test_idle_cores(cpu))
kernel/sched/fair.c
2172
if (is_core_idle(cpu))
kernel/sched/fair.c
2173
idle_core = cpu;
kernel/sched/fair.c
2178
static inline int numa_idle_core(int idle_core, int cpu)
kernel/sched/fair.c
2194
int cpu, idle_core = -1;
kernel/sched/fair.c
2200
for_each_cpu(cpu, cpumask_of_node(nid)) {
kernel/sched/fair.c
2201
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
2205
ns->util += cpu_util_cfs(cpu);
kernel/sched/fair.c
2207
ns->compute_capacity += capacity_of(cpu);
kernel/sched/fair.c
2209
if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
kernel/sched/fair.c
2211
!cpumask_test_cpu(cpu, env->p->cpus_ptr))
kernel/sched/fair.c
2215
ns->idle_cpu = cpu;
kernel/sched/fair.c
2217
idle_core = numa_idle_core(idle_core, cpu);
kernel/sched/fair.c
2237
int cpu;
kernel/sched/fair.c
2241
for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start + 1) {
kernel/sched/fair.c
2242
if (cpu == env->best_cpu || !idle_cpu(cpu) ||
kernel/sched/fair.c
2243
!cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
kernel/sched/fair.c
2247
env->dst_cpu = cpu;
kernel/sched/fair.c
2465
int cpu = env->dst_stats.idle_cpu;
kernel/sched/fair.c
2468
if (cpu < 0)
kernel/sched/fair.c
2469
cpu = env->dst_cpu;
kernel/sched/fair.c
2475
if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
kernel/sched/fair.c
2477
cpu = env->best_cpu;
kernel/sched/fair.c
2480
env->dst_cpu = cpu;
kernel/sched/fair.c
2511
int cpu;
kernel/sched/fair.c
2555
for_each_cpu_and(cpu, cpumask_of_node(env->dst_nid), env->p->cpus_ptr) {
kernel/sched/fair.c
2556
env->dst_cpu = cpu;
kernel/sched/fair.c
3083
int cpu = cpupid_to_cpu(cpupid);
kernel/sched/fair.c
3111
tsk = READ_ONCE(cpu_rq(cpu)->curr);
kernel/sched/fair.c
313
int cpu = cpu_of(rq);
kernel/sched/fair.c
330
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
kernel/sched/fair.c
338
&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
kernel/sched/fair.c
4982
static inline unsigned long get_actual_cpu_capacity(int cpu)
kernel/sched/fair.c
4984
unsigned long capacity = arch_scale_cpu_capacity(cpu);
kernel/sched/fair.c
4986
capacity -= max(hw_load_avg(cpu_rq(cpu)), cpufreq_get_pressure(cpu));
kernel/sched/fair.c
4994
int cpu)
kernel/sched/fair.c
4996
unsigned long capacity = capacity_of(cpu);
kernel/sched/fair.c
5028
capacity_orig = arch_scale_cpu_capacity(cpu);
kernel/sched/fair.c
5104
(uclamp_min > get_actual_cpu_capacity(cpu)))
kernel/sched/fair.c
5110
static inline int task_fits_cpu(struct task_struct *p, int cpu)
kernel/sched/fair.c
5119
return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
kernel/sched/fair.c
5124
int cpu = cpu_of(rq);
kernel/sched/fair.c
5134
(arch_scale_cpu_capacity(cpu) == p->max_allowed_capacity) ||
kernel/sched/fair.c
5135
task_fits_cpu(p, cpu)) {
kernel/sched/fair.c
6445
static void sync_throttle(struct task_group *tg, int cpu)
kernel/sched/fair.c
6455
cfs_rq = tg->cfs_rq[cpu];
kernel/sched/fair.c
6456
pcfs_rq = tg->parent->cfs_rq[cpu];
kernel/sched/fair.c
6459
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
kernel/sched/fair.c
6721
int cpu = cpu_of(rq);
kernel/sched/fair.c
6726
if (!tick_nohz_full_cpu(cpu))
kernel/sched/fair.c
6739
tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
kernel/sched/fair.c
6748
static inline void sync_throttle(struct task_group *tg, int cpu) {}
kernel/sched/fair.c
6850
static inline bool cpu_overutilized(int cpu)
kernel/sched/fair.c
6857
rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
kernel/sched/fair.c
6858
rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
kernel/sched/fair.c
6861
return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
kernel/sched/fair.c
6888
if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
kernel/sched/fair.c
6899
static int sched_idle_cpu(int cpu)
kernel/sched/fair.c
6901
return sched_idle_rq(cpu_rq(cpu));
kernel/sched/fair.c
7281
static unsigned long capacity_of(int cpu)
kernel/sched/fair.c
7283
return cpu_rq(cpu)->cpu_capacity;
kernel/sched/fair.c
7503
int cpu, int prev_cpu, int sd_flag)
kernel/sched/fair.c
7505
int new_cpu = cpu;
kernel/sched/fair.c
7527
group = sched_balance_find_dst_group(sd, p, cpu);
kernel/sched/fair.c
7533
new_cpu = sched_balance_find_dst_group_cpu(group, p, cpu);
kernel/sched/fair.c
7534
if (new_cpu == cpu) {
kernel/sched/fair.c
7541
cpu = new_cpu;
kernel/sched/fair.c
7544
for_each_domain(cpu, tmp) {
kernel/sched/fair.c
7555
static inline int __select_idle_cpu(int cpu, struct task_struct *p)
kernel/sched/fair.c
7557
if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
kernel/sched/fair.c
7558
sched_cpu_cookie_match(cpu_rq(cpu), p))
kernel/sched/fair.c
7559
return cpu;
kernel/sched/fair.c
7568
static inline void set_idle_cores(int cpu, int val)
kernel/sched/fair.c
7572
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
kernel/sched/fair.c
7577
static inline bool test_idle_cores(int cpu)
kernel/sched/fair.c
7581
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
kernel/sched/fair.c
7598
int cpu;
kernel/sched/fair.c
7604
for_each_cpu(cpu, cpu_smt_mask(core)) {
kernel/sched/fair.c
7605
if (cpu == core)
kernel/sched/fair.c
7608
if (!available_idle_cpu(cpu))
kernel/sched/fair.c
7625
int cpu;
kernel/sched/fair.c
7627
for_each_cpu(cpu, cpu_smt_mask(core)) {
kernel/sched/fair.c
7628
if (!available_idle_cpu(cpu)) {
kernel/sched/fair.c
7631
if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) {
kernel/sched/fair.c
7632
*idle_cpu = cpu;
kernel/sched/fair.c
7639
if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
kernel/sched/fair.c
7640
*idle_cpu = cpu;
kernel/sched/fair.c
7655
int cpu;
kernel/sched/fair.c
7657
for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
kernel/sched/fair.c
7658
if (cpu == target)
kernel/sched/fair.c
7664
if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
kernel/sched/fair.c
7666
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
kernel/sched/fair.c
7667
return cpu;
kernel/sched/fair.c
7675
static inline void set_idle_cores(int cpu, int val)
kernel/sched/fair.c
7679
static inline bool test_idle_cores(int cpu)
kernel/sched/fair.c
7704
int i, cpu, idle_cpu = -1, nr = INT_MAX;
kernel/sched/fair.c
7724
for_each_cpu_wrap(cpu, sched_group_span(sg), target + 1) {
kernel/sched/fair.c
7725
if (!cpumask_test_cpu(cpu, cpus))
kernel/sched/fair.c
7729
i = select_idle_core(p, cpu, cpus, &idle_cpu);
kernel/sched/fair.c
7735
idle_cpu = __select_idle_cpu(cpu, p);
kernel/sched/fair.c
7744
for_each_cpu_wrap(cpu, cpus, target + 1) {
kernel/sched/fair.c
7746
i = select_idle_core(p, cpu, cpus, &idle_cpu);
kernel/sched/fair.c
7753
idle_cpu = __select_idle_cpu(cpu, p);
kernel/sched/fair.c
7775
int cpu, best_cpu = -1;
kernel/sched/fair.c
7785
for_each_cpu_wrap(cpu, cpus, target) {
kernel/sched/fair.c
7786
unsigned long cpu_cap = capacity_of(cpu);
kernel/sched/fair.c
7788
if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
kernel/sched/fair.c
7791
fits = util_fits_cpu(task_util, util_min, util_max, cpu);
kernel/sched/fair.c
7795
return cpu;
kernel/sched/fair.c
7801
cpu_cap = get_actual_cpu_capacity(cpu);
kernel/sched/fair.c
7810
best_cpu = cpu;
kernel/sched/fair.c
7821
int cpu)
kernel/sched/fair.c
7828
return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
kernel/sched/fair.c
8005
cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
kernel/sched/fair.c
8007
struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
kernel/sched/fair.c
8022
if (p && task_cpu(p) == cpu && dst_cpu != cpu)
kernel/sched/fair.c
8024
else if (p && task_cpu(p) != cpu && dst_cpu == cpu)
kernel/sched/fair.c
8058
if (dst_cpu == cpu)
kernel/sched/fair.c
8066
return min(util, arch_scale_cpu_capacity(cpu));
kernel/sched/fair.c
8069
unsigned long cpu_util_cfs(int cpu)
kernel/sched/fair.c
8071
return cpu_util(cpu, NULL, -1, 0);
kernel/sched/fair.c
8074
unsigned long cpu_util_cfs_boost(int cpu)
kernel/sched/fair.c
8076
return cpu_util(cpu, NULL, -1, 1);
kernel/sched/fair.c
8092
static unsigned long cpu_util_without(int cpu, struct task_struct *p)
kernel/sched/fair.c
8095
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
kernel/sched/fair.c
8098
return cpu_util(cpu, p, -1, 0);
kernel/sched/fair.c
8121
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
kernel/sched/fair.c
8126
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
8128
scale = arch_scale_cpu_capacity(cpu);
kernel/sched/fair.c
8195
unsigned long sched_cpu_util(int cpu)
kernel/sched/fair.c
8197
return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
kernel/sched/fair.c
8262
int cpu;
kernel/sched/fair.c
8264
for_each_cpu(cpu, pd_cpus) {
kernel/sched/fair.c
8265
unsigned long util = cpu_util(cpu, p, -1, 0);
kernel/sched/fair.c
8267
busy_time += effective_cpu_util(cpu, util, NULL, NULL);
kernel/sched/fair.c
8285
int cpu;
kernel/sched/fair.c
8287
for_each_cpu(cpu, pd_cpus) {
kernel/sched/fair.c
8288
struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
kernel/sched/fair.c
8289
unsigned long util = cpu_util(cpu, p, dst_cpu, 1);
kernel/sched/fair.c
8299
eff_util = effective_cpu_util(cpu, util, &min, &max);
kernel/sched/fair.c
8309
if (uclamp_rq_is_idle(cpu_rq(cpu)))
kernel/sched/fair.c
8315
eff_util = sugov_effective_cpu_perf(cpu, eff_util, min, max);
kernel/sched/fair.c
8391
int cpu, best_energy_cpu, target = -1;
kernel/sched/fair.c
8435
cpu = cpumask_first(cpus);
kernel/sched/fair.c
8436
cpu_actual_cap = get_actual_cpu_capacity(cpu);
kernel/sched/fair.c
8441
for_each_cpu(cpu, cpus) {
kernel/sched/fair.c
8442
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
8446
if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
kernel/sched/fair.c
8449
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
kernel/sched/fair.c
8452
util = cpu_util(cpu, p, cpu, 0);
kernel/sched/fair.c
8453
cpu_cap = capacity_of(cpu);
kernel/sched/fair.c
8477
fits = util_fits_cpu(util, util_min, util_max, cpu);
kernel/sched/fair.c
8483
if (cpu == prev_cpu) {
kernel/sched/fair.c
8495
max_spare_cap_cpu = cpu;
kernel/sched/fair.c
8584
int cpu = smp_processor_id();
kernel/sched/fair.c
8598
cpumask_test_cpu(cpu, p->cpus_ptr))
kernel/sched/fair.c
8599
return cpu;
kernel/sched/fair.c
8608
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
kernel/sched/fair.c
8612
for_each_domain(cpu, tmp) {
kernel/sched/fair.c
8619
if (cpu != prev_cpu)
kernel/sched/fair.c
8620
new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
kernel/sched/fair.c
8639
new_cpu = sched_balance_find_dst_cpu(sd, p, cpu, prev_cpu, sd_flag);
kernel/sched/fair.c
94
int __weak arch_asym_cpu_priority(int cpu)
kernel/sched/fair.c
9525
int cpu;
kernel/sched/fair.c
9546
cpu = cpumask_first_and_and(env->dst_grpmask, env->cpus, p->cpus_ptr);
kernel/sched/fair.c
9548
if (cpu < nr_cpu_ids) {
kernel/sched/fair.c
9550
env->new_dst_cpu = cpu;
kernel/sched/fair.c
96
return -cpu;
kernel/sched/fair.c
9900
int cpu = cpu_of(rq);
kernel/sched/fair.c
9920
se = cfs_rq->tg->se[cpu];
kernel/sched/idle.c
280
int cpu = smp_processor_id();
kernel/sched/idle.c
286
nohz_run_idle_balance(cpu);
kernel/sched/idle.c
334
if (cpu_is_offline(cpu)) {
kernel/sched/idle.c
459
select_task_rq_idle(struct task_struct *p, int cpu, int flags)
kernel/sched/isolation.c
112
bool housekeeping_test_cpu(int cpu, enum hk_type type)
kernel/sched/isolation.c
116
return cpumask_test_cpu(cpu, housekeeping_cpumask(type));
kernel/sched/isolation.c
80
int cpu;
kernel/sched/isolation.c
84
cpu = sched_numa_find_closest(housekeeping_cpumask(type), smp_processor_id());
kernel/sched/isolation.c
85
if (cpu < nr_cpu_ids)
kernel/sched/isolation.c
86
return cpu;
kernel/sched/isolation.c
88
cpu = cpumask_any_and_distribute(housekeeping_cpumask(type), cpu_online_mask);
kernel/sched/isolation.c
89
if (likely(cpu < nr_cpu_ids))
kernel/sched/isolation.c
90
return cpu;
kernel/sched/membarrier.c
252
int cpu;
kernel/sched/membarrier.c
270
for_each_online_cpu(cpu) {
kernel/sched/membarrier.c
281
if (cpu == raw_smp_processor_id())
kernel/sched/membarrier.c
284
if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
kernel/sched/membarrier.c
292
p = rcu_dereference(cpu_rq(cpu)->curr);
kernel/sched/membarrier.c
296
__cpumask_set_cpu(cpu, tmpmask);
kernel/sched/membarrier.c
377
int cpu;
kernel/sched/membarrier.c
380
for_each_online_cpu(cpu) {
kernel/sched/membarrier.c
383
p = rcu_dereference(cpu_rq(cpu)->curr);
kernel/sched/membarrier.c
385
__cpumask_set_cpu(cpu, tmpmask);
kernel/sched/membarrier.c
442
int cpu;
kernel/sched/membarrier.c
478
for_each_online_cpu(cpu) {
kernel/sched/membarrier.c
479
struct rq *rq = cpu_rq(cpu);
kernel/sched/membarrier.c
484
__cpumask_set_cpu(cpu, tmpmask);
kernel/sched/psi.c
1002
psi_write_end(cpu);
kernel/sched/psi.c
1008
int cpu = task_cpu(curr);
kernel/sched/psi.c
1024
irq = irq_time_read(cpu);
kernel/sched/psi.c
1030
psi_write_begin(cpu);
kernel/sched/psi.c
1031
now = cpu_clock(cpu);
kernel/sched/psi.c
1037
groupc = per_cpu_ptr(group->pcpu, cpu);
kernel/sched/psi.c
1045
psi_write_end(cpu);
kernel/sched/psi.c
1212
int cpu;
kernel/sched/psi.c
1232
for_each_possible_cpu(cpu) {
kernel/sched/psi.c
1235
guard(rq_lock_irq)(cpu_rq(cpu));
kernel/sched/psi.c
1237
psi_write_begin(cpu);
kernel/sched/psi.c
1238
now = cpu_clock(cpu);
kernel/sched/psi.c
1239
psi_group_change(group, cpu, 0, 0, now, true);
kernel/sched/psi.c
1240
psi_write_end(cpu);
kernel/sched/psi.c
181
static inline void psi_write_begin(int cpu)
kernel/sched/psi.c
183
write_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
kernel/sched/psi.c
186
static inline void psi_write_end(int cpu)
kernel/sched/psi.c
188
write_seqcount_end(per_cpu_ptr(&psi_seq, cpu));
kernel/sched/psi.c
191
static inline u32 psi_read_begin(int cpu)
kernel/sched/psi.c
193
return read_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
kernel/sched/psi.c
196
static inline bool psi_read_retry(int cpu, u32 seq)
kernel/sched/psi.c
198
return read_seqcount_retry(per_cpu_ptr(&psi_seq, cpu), seq);
kernel/sched/psi.c
271
static void get_recent_times(struct psi_group *group, int cpu,
kernel/sched/psi.c
275
struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
kernel/sched/psi.c
287
seq = psi_read_begin(cpu);
kernel/sched/psi.c
288
now = cpu_clock(cpu);
kernel/sched/psi.c
292
if (cpu == current_cpu)
kernel/sched/psi.c
294
} while (psi_read_retry(cpu, seq));
kernel/sched/psi.c
330
if (cpu == current_cpu)
kernel/sched/psi.c
369
int cpu;
kernel/sched/psi.c
380
for_each_possible_cpu(cpu) {
kernel/sched/psi.c
385
get_recent_times(group, cpu, aggregator, times,
kernel/sched/psi.c
796
static void psi_group_change(struct psi_group *group, int cpu,
kernel/sched/psi.c
804
lockdep_assert_rq_held(cpu_rq(cpu));
kernel/sched/psi.c
805
groupc = per_cpu_ptr(group->pcpu, cpu);
kernel/sched/psi.c
834
cpu, t, groupc->tasks[0],
kernel/sched/psi.c
871
if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
kernel/sched/psi.c
911
int cpu = task_cpu(task);
kernel/sched/psi.c
919
psi_write_begin(cpu);
kernel/sched/psi.c
920
now = cpu_clock(cpu);
kernel/sched/psi.c
922
psi_group_change(group, cpu, clear, set, now, true);
kernel/sched/psi.c
923
psi_write_end(cpu);
kernel/sched/psi.c
930
int cpu = task_cpu(prev);
kernel/sched/psi.c
933
psi_write_begin(cpu);
kernel/sched/psi.c
934
now = cpu_clock(cpu);
kernel/sched/psi.c
944
struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
kernel/sched/psi.c
950
psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
kernel/sched/psi.c
987
psi_group_change(group, cpu, clear, set, now, wake_clock);
kernel/sched/psi.c
999
psi_group_change(group, cpu, clear, set, now, wake_clock);
kernel/sched/rt.c
1061
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
kernel/sched/rt.c
1076
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
kernel/sched/rt.c
1499
select_task_rq_rt(struct task_struct *p, int cpu, int flags)
kernel/sched/rt.c
1509
rq = cpu_rq(cpu);
kernel/sched/rt.c
1545
if (test || !rt_task_fits_capacity(p, cpu)) {
kernel/sched/rt.c
1561
cpu = target;
kernel/sched/rt.c
1568
return cpu;
kernel/sched/rt.c
1745
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
kernel/sched/rt.c
1754
if (task_is_pushable(rq, p, cpu))
kernel/sched/rt.c
1768
int cpu = task_cpu(task);
kernel/sched/rt.c
1804
if (cpumask_test_cpu(cpu, lowest_mask))
kernel/sched/rt.c
1805
return cpu;
kernel/sched/rt.c
1815
for_each_domain(cpu, sd) {
kernel/sched/rt.c
1847
cpu = cpumask_any_distribute(lowest_mask);
kernel/sched/rt.c
1848
if (cpu < nr_cpu_ids)
kernel/sched/rt.c
1849
return cpu;
kernel/sched/rt.c
1864
BUG_ON(rq->cpu != task_cpu(p));
kernel/sched/rt.c
1880
int cpu;
kernel/sched/rt.c
1883
cpu = find_lowest_rq(task);
kernel/sched/rt.c
1885
if ((cpu == -1) || (cpu == rq->cpu))
kernel/sched/rt.c
1888
lowest_rq = cpu_rq(cpu);
kernel/sched/rt.c
1913
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
kernel/sched/rt.c
1965
int cpu;
kernel/sched/rt.c
1982
cpu = find_lowest_rq(rq->curr);
kernel/sched/rt.c
1983
if (cpu == -1 || cpu == rq->cpu)
kernel/sched/rt.c
1996
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
kernel/sched/rt.c
2111
int cpu;
kernel/sched/rt.c
2129
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
kernel/sched/rt.c
2131
rd->rto_cpu = cpu;
kernel/sched/rt.c
2134
if (cpu == this_cpu)
kernel/sched/rt.c
2137
if (cpu < nr_cpu_ids)
kernel/sched/rt.c
2138
return cpu;
kernel/sched/rt.c
2171
int cpu = -1;
kernel/sched/rt.c
2189
cpu = rto_next_cpu(rq->rd);
kernel/sched/rt.c
2195
if (cpu >= 0) {
kernel/sched/rt.c
2198
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
kernel/sched/rt.c
2208
int cpu;
kernel/sched/rt.c
2226
cpu = rto_next_cpu(rd);
kernel/sched/rt.c
2230
if (cpu < 0) {
kernel/sched/rt.c
2236
irq_work_queue_on(&rd->rto_push_work, cpu);
kernel/sched/rt.c
2242
int this_cpu = this_rq->cpu, cpu;
kernel/sched/rt.c
2259
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
kernel/sched/rt.c
2269
for_each_cpu(cpu, this_rq->rd->rto_mask) {
kernel/sched/rt.c
227
struct sched_rt_entity *rt_se, int cpu,
kernel/sched/rt.c
2270
if (this_cpu == cpu)
kernel/sched/rt.c
2273
src_rq = cpu_rq(cpu);
kernel/sched/rt.c
230
struct rq *rq = cpu_rq(cpu);
kernel/sched/rt.c
2338
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
kernel/sched/rt.c
237
tg->rt_rq[cpu] = rt_rq;
kernel/sched/rt.c
2374
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
kernel/sched/rt.c
238
tg->rt_se[cpu] = rt_se;
kernel/sched/rt.c
2385
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
kernel/sched/rt.c
2566
static int task_is_throttled_rt(struct task_struct *p, int cpu)
kernel/sched/rt.c
2571
rt_rq = task_group(p)->rt_rq[cpu];
kernel/sched/rt.c
2574
rt_rq = &cpu_rq(cpu)->rt;
kernel/sched/rt.c
2942
void print_rt_stats(struct seq_file *m, int cpu)
kernel/sched/rt.c
2948
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
kernel/sched/rt.c
2949
print_rt_rq(m, cpu, rt_rq);
kernel/sched/rt.c
349
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
kernel/sched/rt.c
370
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
kernel/sched/rt.c
389
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
kernel/sched/rt.c
394
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
kernel/sched/rt.c
455
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
kernel/sched/rt.c
468
cpu_cap = arch_scale_cpu_capacity(cpu);
kernel/sched/rt.c
473
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
kernel/sched/rt.c
533
int cpu = cpu_of(rq);
kernel/sched/rt.c
535
rt_se = rt_rq->tg->rt_se[cpu];
kernel/sched/rt.c
551
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
kernel/sched/rt.c
553
rt_se = rt_rq->tg->rt_se[cpu];
kernel/sched/rt.c
587
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
kernel/sched/rt.c
589
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
kernel/sched/rt.c
948
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
kernel/sched/rt.c
950
return &cpu_rq(cpu)->rt;
kernel/sched/sched.h
1249
int cpu;
kernel/sched/sched.h
1367
return rq->cpu;
kernel/sched/sched.h
1390
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
kernel/sched/sched.h
1393
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
kernel/sched/sched.h
1407
static inline bool available_idle_cpu(int cpu)
kernel/sched/sched.h
1409
if (!idle_rq(cpu_rq(cpu)))
kernel/sched/sched.h
1412
if (vcpu_is_preempted(cpu))
kernel/sched/sched.h
1489
int cpu;
kernel/sched/sched.h
1498
for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
kernel/sched/sched.h
1499
if (!available_idle_cpu(cpu)) {
kernel/sched/sched.h
1516
int cpu;
kernel/sched/sched.h
1522
for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
kernel/sched/sched.h
1523
if (sched_core_cookie_match(cpu_rq(cpu), p))
kernel/sched/sched.h
2022
extern void sched_update_numa(int cpu, bool online);
kernel/sched/sched.h
2023
extern void sched_domains_numa_masks_set(unsigned int cpu);
kernel/sched/sched.h
2024
extern void sched_domains_numa_masks_clear(unsigned int cpu);
kernel/sched/sched.h
2025
extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
kernel/sched/sched.h
2030
static inline void sched_update_numa(int cpu, bool online) { }
kernel/sched/sched.h
2031
static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
kernel/sched/sched.h
2032
static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
kernel/sched/sched.h
2034
static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
kernel/sched/sched.h
2052
extern int migrate_task_to(struct task_struct *p, int cpu);
kernel/sched/sched.h
2054
int cpu, int scpu);
kernel/sched/sched.h
2096
#define for_each_domain(cpu, __sd) \
kernel/sched/sched.h
2097
for (__sd = rcu_dereference_sched_domain(cpu_rq(cpu)->sd); \
kernel/sched/sched.h
2117
static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
kernel/sched/sched.h
2121
for_each_domain(cpu, sd) {
kernel/sched/sched.h
2138
static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
kernel/sched/sched.h
2142
for_each_domain(cpu, sd) {
kernel/sched/sched.h
2220
extern void dirty_sched_domain_sysctl(int cpu);
kernel/sched/sched.h
2252
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
kernel/sched/sched.h
2259
set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
kernel/sched/sched.h
2260
p->se.cfs_rq = tg->cfs_rq[cpu];
kernel/sched/sched.h
2261
p->se.parent = tg->se[cpu];
kernel/sched/sched.h
2262
p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
kernel/sched/sched.h
2273
p->rt.rt_rq = tg->rt_rq[cpu];
kernel/sched/sched.h
2274
p->rt.parent = tg->rt_se[cpu];
kernel/sched/sched.h
2280
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
kernel/sched/sched.h
2289
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
kernel/sched/sched.h
2291
set_task_rq(p, cpu);
kernel/sched/sched.h
2299
WRITE_ONCE(task_thread_info(p)->cpu, cpu);
kernel/sched/sched.h
2300
p->wake_cpu = cpu;
kernel/sched/sched.h
2659
int (*task_is_throttled)(struct task_struct *p, int cpu);
kernel/sched/sched.h
2794
extern void update_group_capacity(struct sched_domain *sd, int cpu);
kernel/sched/sched.h
2801
static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu)
kernel/sched/sched.h
2804
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
kernel/sched/sched.h
2808
if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p))
kernel/sched/sched.h
2887
extern void resched_cpu(int cpu);
kernel/sched/sched.h
2918
int cpu = cpu_of(rq);
kernel/sched/sched.h
2920
if (!tick_nohz_full_cpu(cpu))
kernel/sched/sched.h
2924
tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
kernel/sched/sched.h
2926
tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
kernel/sched/sched.h
3094
unsigned long arch_scale_freq_capacity(int cpu)
kernel/sched/sched.h
3144
if (rq1->core->cpu < rq2->core->cpu)
kernel/sched/sched.h
3146
if (rq1->core->cpu > rq2->core->cpu)
kernel/sched/sched.h
3153
return rq1->cpu < rq2->cpu;
kernel/sched/sched.h
3317
extern void print_cfs_stats(struct seq_file *m, int cpu);
kernel/sched/sched.h
3318
extern void print_rt_stats(struct seq_file *m, int cpu);
kernel/sched/sched.h
3319
extern void print_dl_stats(struct seq_file *m, int cpu);
kernel/sched/sched.h
3320
extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
kernel/sched/sched.h
3321
extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
kernel/sched/sched.h
3322
extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
kernel/sched/sched.h
3324
extern void resched_latency_warn(int cpu, u64 latency);
kernel/sched/sched.h
3358
#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
kernel/sched/sched.h
3366
extern void nohz_run_idle_balance(int cpu);
kernel/sched/sched.h
3368
static inline void nohz_run_idle_balance(int cpu) { }
kernel/sched/sched.h
3421
static inline u64 irq_time_read(int cpu)
kernel/sched/sched.h
3423
struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
kernel/sched/sched.h
3491
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
kernel/sched/sched.h
3495
unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
kernel/sched/sched.h
3508
static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
kernel/sched/sched.h
3510
unsigned long cap = arch_scale_cpu_capacity(cpu);
kernel/sched/sched.h
3526
extern unsigned long cpu_util_cfs(int cpu);
kernel/sched/sched.h
3527
extern unsigned long cpu_util_cfs_boost(int cpu);
kernel/sched/sched.h
363
extern int dl_bw_deactivate(int cpu);
kernel/sched/sched.h
4022
set_task_cpu(task, dst_rq->cpu);
kernel/sched/sched.h
4028
bool task_is_pushable(struct rq *rq, struct task_struct *p, int cpu)
kernel/sched/sched.h
4031
cpumask_test_cpu(cpu, &p->cpus_mask))
kernel/sched/sched.h
583
struct sched_entity *se, int cpu,
kernel/sched/sched.h
593
struct sched_rt_entity *rt_se, int cpu,
kernel/sched/smp.h
14
extern bool call_function_single_prep_ipi(int cpu);
kernel/sched/stats.c
111
int cpu;
kernel/sched/stats.c
120
cpu = (unsigned long)(v - 2);
kernel/sched/stats.c
121
rq = cpu_rq(cpu);
kernel/sched/stats.c
126
cpu, rq->yld_count,
kernel/sched/stats.c
136
for_each_domain(cpu, sd) {
kernel/sched/stop_task.c
13
select_task_rq_stop(struct task_struct *p, int cpu, int flags)
kernel/sched/syscalls.c
181
int idle_cpu(int cpu)
kernel/sched/syscalls.c
183
return idle_rq(cpu_rq(cpu));
kernel/sched/syscalls.c
192
struct task_struct *idle_task(int cpu)
kernel/sched/syscalls.c
194
return cpu_rq(cpu)->idle;
kernel/sched/syscalls.c
198
int sched_core_idle_cpu(int cpu)
kernel/sched/syscalls.c
200
struct rq *rq = cpu_rq(cpu);
kernel/sched/syscalls.c
205
return idle_cpu(cpu);
kernel/sched/topology.c
1030
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
1041
for_each_cpu_wrap(i, span, cpu) {
kernel/sched/topology.c
1097
sg = build_group_from_child_sched_domain(sibling, cpu);
kernel/sched/topology.c
1195
static struct sched_group *get_group(int cpu, struct sd_data *sdd)
kernel/sched/topology.c
1197
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
kernel/sched/topology.c
1203
cpu = cpumask_first(sched_domain_span(child));
kernel/sched/topology.c
1205
sg = *per_cpu_ptr(sdd->sg, cpu);
kernel/sched/topology.c
1206
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
kernel/sched/topology.c
1222
cpumask_set_cpu(cpu, sched_group_span(sg));
kernel/sched/topology.c
1223
cpumask_set_cpu(cpu, group_balance_mask(sg));
kernel/sched/topology.c
1241
build_sched_groups(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
1254
for_each_cpu_wrap(i, span, cpu) {
kernel/sched/topology.c
1286
static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
kernel/sched/topology.c
1294
int cpu, cores = 0, max_cpu = -1;
kernel/sched/topology.c
1299
for_each_cpu(cpu, mask) {
kernel/sched/topology.c
1302
cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
kernel/sched/topology.c
1310
for_each_cpu(cpu, sched_group_span(sg)) {
kernel/sched/topology.c
1312
max_cpu = cpu;
kernel/sched/topology.c
1313
else if (sched_asym_prefer(cpu, max_cpu))
kernel/sched/topology.c
1314
max_cpu = cpu;
kernel/sched/topology.c
1322
if (cpu != group_balance_cpu(sg))
kernel/sched/topology.c
1325
update_group_capacity(sd, cpu);
kernel/sched/topology.c
1329
void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
kernel/sched/topology.c
1331
int asym_prefer_cpu = cpu;
kernel/sched/topology.c
1336
for_each_domain(cpu, sd) {
kernel/sched/topology.c
1355
if (cpu != sg->asym_prefer_cpu) {
kernel/sched/topology.c
1362
if (!sched_asym_prefer(cpu, sg->asym_prefer_cpu))
kernel/sched/topology.c
1365
WRITE_ONCE(sg->asym_prefer_cpu, cpu);
kernel/sched/topology.c
139
static void sched_domain_debug(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
1434
static inline void asym_cpu_capacity_update_data(int cpu)
kernel/sched/topology.c
1436
unsigned long capacity = arch_scale_cpu_capacity(cpu);
kernel/sched/topology.c
1462
__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
kernel/sched/topology.c
147
printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
kernel/sched/topology.c
1473
int cpu;
kernel/sched/topology.c
1478
for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
kernel/sched/topology.c
1479
asym_cpu_capacity_update_data(cpu);
kernel/sched/topology.c
151
printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
kernel/sched/topology.c
154
if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
kernel/sched/topology.c
1578
static void claim_allocations(int cpu, struct sched_domain *sd)
kernel/sched/topology.c
1582
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
kernel/sched/topology.c
1583
*per_cpu_ptr(sdd->sd, cpu) = NULL;
kernel/sched/topology.c
1585
if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
kernel/sched/topology.c
1586
*per_cpu_ptr(sdd->sds, cpu) = NULL;
kernel/sched/topology.c
1588
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
kernel/sched/topology.c
1589
*per_cpu_ptr(sdd->sg, cpu) = NULL;
kernel/sched/topology.c
1591
if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
kernel/sched/topology.c
1592
*per_cpu_ptr(sdd->sgc, cpu) = NULL;
kernel/sched/topology.c
1639
struct sched_domain *child, int cpu)
kernel/sched/topology.c
1642
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
kernel/sched/topology.c
1646
sd_weight = cpumask_weight(tl->mask(tl, cpu));
kernel/sched/topology.c
1690
cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu));
kernel/sched/topology.c
1751
const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu)
kernel/sched/topology.c
1753
return cpu_smt_mask(cpu);
kernel/sched/topology.c
1763
const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu)
kernel/sched/topology.c
1765
return cpu_clustergroup_mask(cpu);
kernel/sched/topology.c
1775
const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu)
kernel/sched/topology.c
1777
return cpu_coregroup_mask(cpu);
kernel/sched/topology.c
1781
const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu)
kernel/sched/topology.c
1783
return cpu_node_mask(cpu);
kernel/sched/topology.c
1827
static const struct cpumask *sd_numa_mask(struct sched_domain_topology_level *tl, int cpu)
kernel/sched/topology.c
1829
return sched_domains_numa_masks[tl->numa_level][cpu_to_node(cpu)];
kernel/sched/topology.c
2179
void sched_update_numa(int cpu, bool online)
kernel/sched/topology.c
2183
node = cpu_to_node(cpu);
kernel/sched/topology.c
2195
void sched_domains_numa_masks_set(unsigned int cpu)
kernel/sched/topology.c
2197
int node = cpu_to_node(cpu);
kernel/sched/topology.c
2208
cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
kernel/sched/topology.c
2213
void sched_domains_numa_masks_clear(unsigned int cpu)
kernel/sched/topology.c
2220
cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
kernel/sched/topology.c
2233
int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
kernel/sched/topology.c
2235
int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
kernel/sched/topology.c
2245
cpu = cpumask_any_and_distribute(cpus, masks[i][j]);
kernel/sched/topology.c
2246
if (cpu < nr_cpu_ids) {
kernel/sched/topology.c
2247
found = cpu;
kernel/sched/topology.c
2261
int cpu;
kernel/sched/topology.c
2270
if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu)
kernel/sched/topology.c
2280
if (k->w <= k->cpu)
kernel/sched/topology.c
2296
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
kernel/sched/topology.c
2298
struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
kernel/sched/topology.c
2303
return cpumask_nth_and(cpu, cpus, cpu_online_mask);
kernel/sched/topology.c
2321
cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
kernel/sched/topology.c
2322
cpumask_nth_and(cpu, cpus, k.masks[0][node]);
kernel/sched/topology.c
2468
struct sched_domain *child, int cpu)
kernel/sched/topology.c
2470
struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
kernel/sched/topology.c
2502
int cpu;
kernel/sched/topology.c
2527
for_each_cpu(cpu, cpu_map) {
kernel/sched/topology.c
2528
const struct cpumask *tl_cpu_mask = tl->mask(tl, cpu);
kernel/sched/topology.c
2787
unsigned int cpu = cpumask_any(cpu_map);
kernel/sched/topology.c
2790
if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
kernel/sched/topology.c
331
static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
kernel/sched/topology.c
334
if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
kernel/sched/topology.c
342
static struct perf_domain *pd_init(int cpu)
kernel/sched/topology.c
344
struct em_perf_domain *obj = em_cpu_get(cpu);
kernel/sched/topology.c
349
pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
kernel/sched/topology.c
413
int cpu = cpumask_first(cpu_map);
kernel/sched/topology.c
414
struct root_domain *rd = cpu_rq(cpu)->rd;
kernel/sched/topology.c
43
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
kernel/sched/topology.c
482
if (cpumask_test_cpu(rq->cpu, old_rd->online))
kernel/sched/topology.c
485
cpumask_clear_cpu(rq->cpu, old_rd->span);
kernel/sched/topology.c
499
cpumask_set_cpu(rq->cpu, rd->span);
kernel/sched/topology.c
500
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
kernel/sched/topology.c
56
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
kernel/sched/topology.c
57
printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
kernel/sched/topology.c
59
if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
kernel/sched/topology.c
60
printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
kernel/sched/topology.c
676
static void update_top_cache_domain(int cpu)
kernel/sched/topology.c
680
int id = cpu;
kernel/sched/topology.c
683
sd = highest_flag_domain(cpu, SD_SHARE_LLC);
kernel/sched/topology.c
690
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
kernel/sched/topology.c
691
per_cpu(sd_llc_size, cpu) = size;
kernel/sched/topology.c
692
per_cpu(sd_llc_id, cpu) = id;
kernel/sched/topology.c
693
rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
kernel/sched/topology.c
695
sd = lowest_flag_domain(cpu, SD_CLUSTER);
kernel/sched/topology.c
704
per_cpu(sd_share_id, cpu) = id;
kernel/sched/topology.c
706
sd = lowest_flag_domain(cpu, SD_NUMA);
kernel/sched/topology.c
707
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
kernel/sched/topology.c
709
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
kernel/sched/topology.c
710
rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
kernel/sched/topology.c
712
sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
kernel/sched/topology.c
713
rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
kernel/sched/topology.c
721
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
kernel/sched/topology.c
723
struct rq *rq = cpu_rq(cpu);
kernel/sched/topology.c
772
sched_domain_debug(sd, cpu);
kernel/sched/topology.c
777
dirty_sched_domain_sysctl(cpu);
kernel/sched/topology.c
780
update_top_cache_domain(cpu);
kernel/sched/topology.c
953
build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
kernel/sched/topology.c
959
GFP_KERNEL, cpu_to_node(cpu));
kernel/sched/topology.c
982
int cpu;
kernel/sched/topology.c
985
cpu = cpumask_first(mask);
kernel/sched/topology.c
987
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
kernel/scs.c
93
static int scs_cleanup(unsigned int cpu)
kernel/scs.c
96
void **cache = per_cpu_ptr(scs_cache, cpu);
kernel/smp.c
1079
int cpu;
kernel/smp.c
1081
for_each_possible_cpu(cpu) {
kernel/smp.c
1083
if (cpu != smp_processor_id() && cpu_online(cpu))
kernel/smp.c
1084
wake_up_if_idle(cpu);
kernel/smp.c
1102
unsigned int cpu;
kernel/smp.c
1104
for_each_cpu(cpu, mask) {
kernel/smp.c
1105
if (!llist_empty(per_cpu_ptr(&call_single_queue, cpu)))
kernel/smp.c
1131
int cpu;
kernel/smp.c
1139
if (sscs->cpu >= 0)
kernel/smp.c
1140
hypervisor_pin_vcpu(sscs->cpu);
kernel/smp.c
1142
if (sscs->cpu >= 0)
kernel/smp.c
1148
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
kernel/smp.c
115
send_call_function_single_ipi(int cpu)
kernel/smp.c
1154
.cpu = phys ? cpu : -1,
kernel/smp.c
1159
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
kernel/smp.c
1162
queue_work_on(cpu, system_wq, &sscs.work);
kernel/smp.c
117
if (call_function_single_prep_ipi(cpu)) {
kernel/smp.c
118
trace_ipi_send_cpu(cpu, _RET_IP_,
kernel/smp.c
120
arch_send_call_function_single_ipi(cpu);
kernel/smp.c
233
int cpu = -1;
kernel/smp.c
244
cpu = csd_lock_wait_getcpu(csd);
kernel/smp.c
246
*bug_id, raw_smp_processor_id(), cpu);
kernel/smp.c
270
cpu = csd_lock_wait_getcpu(csd);
kernel/smp.c
271
if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
kernel/smp.c
274
cpux = cpu;
kernel/smp.c
280
cpu, csd->func, csd->info);
kernel/smp.c
298
if (cpu >= 0) {
kernel/smp.c
299
if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
kernel/smp.c
300
dump_cpu_task(cpu);
kernel/smp.c
302
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
kernel/smp.c
303
arch_send_call_function_single_ipi(cpu);
kernel/smp.c
380
void __smp_call_single_queue(int cpu, struct llist_node *node)
kernel/smp.c
397
trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
kernel/smp.c
412
if (llist_add(node, &per_cpu(call_single_queue, cpu)))
kernel/smp.c
413
send_call_function_single_ipi(cpu);
kernel/smp.c
421
static int generic_exec_single(int cpu, call_single_data_t *csd)
kernel/smp.c
427
if (cpu == smp_processor_id()) {
kernel/smp.c
445
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
kernel/smp.c
450
__smp_call_single_queue(cpu, &csd->node.llist);
kernel/smp.c
54
int smpcfd_prepare_cpu(unsigned int cpu)
kernel/smp.c
56
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
kernel/smp.c
59
cpu_to_node(cpu)))
kernel/smp.c
62
cpu_to_node(cpu))) {
kernel/smp.c
636
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
kernel/smp.c
681
csd->node.dst = cpu;
kernel/smp.c
684
err = generic_exec_single(cpu, csd);
kernel/smp.c
718
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
kernel/smp.c
732
err = generic_exec_single(cpu, csd);
kernel/smp.c
757
unsigned int cpu;
kernel/smp.c
76
int smpcfd_dead_cpu(unsigned int cpu)
kernel/smp.c
761
cpu = get_cpu();
kernel/smp.c
762
if (!cpumask_test_cpu(cpu, mask))
kernel/smp.c
763
cpu = sched_numa_find_nth_cpu(mask, 0, cpu_to_node(cpu));
kernel/smp.c
765
ret = smp_call_function_single(cpu, func, info, wait);
kernel/smp.c
78
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
kernel/smp.c
785
int cpu, last_cpu, this_cpu = smp_processor_id();
kernel/smp.c
818
for_each_cpu(cpu, cfd->cpumask) {
kernel/smp.c
819
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
kernel/smp.c
821
if (cond_func && !cond_func(cpu, info)) {
kernel/smp.c
822
__cpumask_clear_cpu(cpu, cfd->cpumask);
kernel/smp.c
836
csd->node.dst = cpu;
kernel/smp.c
838
trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
kernel/smp.c
844
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
kernel/smp.c
845
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
kernel/smp.c
847
last_cpu = cpu;
kernel/smp.c
86
int smpcfd_dying_cpu(unsigned int cpu)
kernel/smp.c
873
for_each_cpu(cpu, cfd->cpumask) {
kernel/smp.c
876
csd = per_cpu_ptr(cfd->csd, cpu);
kernel/smpboot.c
115
ht->cleanup(td->cpu, cpu_online(td->cpu));
kernel/smpboot.c
124
BUG_ON(td->cpu != smp_processor_id());
kernel/smpboot.c
125
ht->park(td->cpu);
kernel/smpboot.c
133
BUG_ON(td->cpu != smp_processor_id());
kernel/smpboot.c
141
ht->setup(td->cpu);
kernel/smpboot.c
149
ht->unpark(td->cpu);
kernel/smpboot.c
154
if (!ht->thread_should_run(td->cpu)) {
kernel/smpboot.c
160
ht->thread_fn(td->cpu);
kernel/smpboot.c
166
__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
kernel/smpboot.c
168
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
kernel/smpboot.c
174
td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
kernel/smpboot.c
177
td->cpu = cpu;
kernel/smpboot.c
180
tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
kernel/smpboot.c
186
kthread_set_per_cpu(tsk, cpu);
kernel/smpboot.c
193
*per_cpu_ptr(ht->store, cpu) = tsk;
kernel/smpboot.c
204
ht->create(cpu);
kernel/smpboot.c
209
int smpboot_create_threads(unsigned int cpu)
kernel/smpboot.c
216
ret = __smpboot_create_thread(cur, cpu);
kernel/smpboot.c
224
static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
kernel/smpboot.c
226
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
kernel/smpboot.c
232
int smpboot_unpark_threads(unsigned int cpu)
kernel/smpboot.c
238
smpboot_unpark_thread(cur, cpu);
kernel/smpboot.c
243
static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
kernel/smpboot.c
245
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
kernel/smpboot.c
251
int smpboot_park_threads(unsigned int cpu)
kernel/smpboot.c
257
smpboot_park_thread(cur, cpu);
kernel/smpboot.c
264
unsigned int cpu;
kernel/smpboot.c
267
for_each_possible_cpu(cpu) {
kernel/smpboot.c
268
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
kernel/smpboot.c
272
*per_cpu_ptr(ht->store, cpu) = NULL;
kernel/smpboot.c
28
struct task_struct *idle_thread_get(unsigned int cpu)
kernel/smpboot.c
286
unsigned int cpu;
kernel/smpboot.c
291
for_each_online_cpu(cpu) {
kernel/smpboot.c
292
ret = __smpboot_create_thread(plug_thread, cpu);
kernel/smpboot.c
297
smpboot_unpark_thread(plug_thread, cpu);
kernel/smpboot.c
30
struct task_struct *tsk = per_cpu(idle_threads, cpu);
kernel/smpboot.c
48
static __always_inline void idle_init(unsigned int cpu)
kernel/smpboot.c
50
struct task_struct *tsk = per_cpu(idle_threads, cpu);
kernel/smpboot.c
53
tsk = fork_idle(cpu);
kernel/smpboot.c
55
pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
kernel/smpboot.c
57
per_cpu(idle_threads, cpu) = tsk;
kernel/smpboot.c
66
unsigned int cpu, boot_cpu;
kernel/smpboot.c
70
for_each_possible_cpu(cpu) {
kernel/smpboot.c
71
if (cpu != boot_cpu)
kernel/smpboot.c
72
idle_init(cpu);
kernel/smpboot.c
81
unsigned int cpu;
kernel/smpboot.h
12
static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; }
kernel/smpboot.h
17
int smpboot_create_threads(unsigned int cpu);
kernel/smpboot.h
18
int smpboot_park_threads(unsigned int cpu);
kernel/smpboot.h
19
int smpboot_unpark_threads(unsigned int cpu);
kernel/smpboot.h
8
struct task_struct *idle_thread_get(unsigned int cpu);
kernel/softirq.c
1037
int cpu;
kernel/softirq.c
1039
for_each_possible_cpu(cpu) {
kernel/softirq.c
1040
per_cpu(tasklet_vec, cpu).tail =
kernel/softirq.c
1041
&per_cpu(tasklet_vec, cpu).head;
kernel/softirq.c
1042
per_cpu(tasklet_hi_vec, cpu).tail =
kernel/softirq.c
1043
&per_cpu(tasklet_hi_vec, cpu).head;
kernel/softirq.c
1050
static int ksoftirqd_should_run(unsigned int cpu)
kernel/softirq.c
1055
static void run_ksoftirqd(unsigned int cpu)
kernel/softirq.c
1072
static int takeover_tasklets(unsigned int cpu)
kernel/softirq.c
1074
workqueue_softirq_dead(cpu);
kernel/softirq.c
1080
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
kernel/softirq.c
1081
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
kernel/softirq.c
1082
__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
kernel/softirq.c
1083
per_cpu(tasklet_vec, cpu).head = NULL;
kernel/softirq.c
1084
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
kernel/softirq.c
1088
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
kernel/softirq.c
1089
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
kernel/softirq.c
1090
__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
kernel/softirq.c
1091
per_cpu(tasklet_hi_vec, cpu).head = NULL;
kernel/softirq.c
1092
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
kernel/softirq.c
1111
static void ktimerd_setup(unsigned int cpu)
kernel/softirq.c
1117
static int ktimerd_should_run(unsigned int cpu)
kernel/softirq.c
1128
static void run_ktimerd(unsigned int cpu)
kernel/softirq.c
685
int cpu = smp_processor_id();
kernel/softirq.c
688
if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
kernel/stop_machine.c
137
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
kernel/stop_machine.c
143
if (!cpu_stop_queue_work(cpu, &work))
kernel/stop_machine.c
205
int cpu = smp_processor_id(), err = 0;
kernel/stop_machine.c
218
is_active = cpu == cpumask_first(cpumask);
kernel/stop_machine.c
221
is_active = cpumask_test_cpu(cpu, cpumask);
kernel/stop_machine.c
385
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
kernel/stop_machine.c
389
return cpu_stop_queue_work(cpu, work_buf);
kernel/stop_machine.c
397
unsigned int cpu;
kernel/stop_machine.c
408
for_each_cpu(cpu, cpumask) {
kernel/stop_machine.c
409
work = &per_cpu(cpu_stopper.stop_work, cpu);
kernel/stop_machine.c
414
if (cpu_stop_queue_work(cpu, work))
kernel/stop_machine.c
475
static int cpu_stop_should_run(unsigned int cpu)
kernel/stop_machine.c
477
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
487
static void cpu_stopper_thread(unsigned int cpu)
kernel/stop_machine.c
489
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
527
void stop_machine_park(int cpu)
kernel/stop_machine.c
529
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
539
static void cpu_stop_create(unsigned int cpu)
kernel/stop_machine.c
541
sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
kernel/stop_machine.c
544
static void cpu_stop_park(unsigned int cpu)
kernel/stop_machine.c
546
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
551
void stop_machine_unpark(int cpu)
kernel/stop_machine.c
553
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
571
unsigned int cpu;
kernel/stop_machine.c
573
for_each_possible_cpu(cpu) {
kernel/stop_machine.c
574
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
636
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
kernel/stop_machine.c
638
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
kernel/stop_machine.c
91
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
kernel/stop_machine.c
93
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/sys.c
2917
int cpu = raw_smp_processor_id();
kernel/sys.c
2920
err |= put_user(cpu, cpup);
kernel/sys.c
2922
err |= put_user(cpu_to_node(cpu), nodep);
kernel/taskstats.c
299
unsigned int cpu;
kernel/taskstats.c
312
for_each_cpu(cpu, mask) {
kernel/taskstats.c
314
GFP_KERNEL, cpu_to_node(cpu));
kernel/taskstats.c
322
listeners = &per_cpu(listener_array, cpu);
kernel/taskstats.c
339
for_each_cpu(cpu, mask) {
kernel/taskstats.c
340
listeners = &per_cpu(listener_array, cpu);
kernel/time/clockevents.c
394
static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
kernel/time/clockevents.c
402
return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
kernel/time/clockevents.c
425
static int clockevents_unbind(struct clock_event_device *ced, int cpu)
kernel/time/clockevents.c
429
smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
kernel/time/clockevents.c
436
int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
kernel/time/clockevents.c
441
ret = clockevents_unbind(ced, cpu);
kernel/time/clockevents.c
629
void tick_offline_cpu(unsigned int cpu)
kernel/time/clockevents.c
635
tick_broadcast_offline(cpu);
kernel/time/clockevents.c
649
if (cpumask_test_cpu(cpu, dev->cpumask) &&
kernel/time/clockevents.c
750
int cpu;
kernel/time/clockevents.c
752
for_each_possible_cpu(cpu) {
kernel/time/clockevents.c
753
struct device *dev = &per_cpu(tick_percpu_dev, cpu);
kernel/time/clockevents.c
756
dev->id = cpu;
kernel/time/clocksource.c
311
int cpu, i, n = verify_n_cpus;
kernel/time/clocksource.c
326
cpu = cpumask_any_but(cpu_online_mask, smp_processor_id());
kernel/time/clocksource.c
327
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
kernel/time/clocksource.c
329
cpumask_set_cpu(cpu, &cpus_chosen);
kernel/time/clocksource.c
343
cpu = cpumask_random(cpu_online_mask);
kernel/time/clocksource.c
344
if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
kernel/time/clocksource.c
345
cpumask_set_cpu(cpu, &cpus_chosen);
kernel/time/clocksource.c
363
int cpu, testcpu;
kernel/time/clocksource.c
383
for_each_cpu(cpu, &cpus_chosen) {
kernel/time/clocksource.c
384
if (cpu == testcpu)
kernel/time/clocksource.c
387
smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
kernel/time/clocksource.c
391
cpumask_set_cpu(cpu, &cpus_behind);
kernel/time/clocksource.c
394
cpumask_set_cpu(cpu, &cpus_ahead);
kernel/time/hrtimer.c
1289
smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
kernel/time/hrtimer.c
2236
int hrtimers_prepare_cpu(unsigned int cpu)
kernel/time/hrtimer.c
2238
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
kernel/time/hrtimer.c
2249
cpu_base->cpu = cpu;
kernel/time/hrtimer.c
2254
int hrtimers_cpu_starting(unsigned int cpu)
kernel/time/hrtimer.c
226
int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER));
kernel/time/hrtimer.c
228
return &per_cpu(hrtimer_bases, cpu);
kernel/time/hrtimer.c
744
base->cpu);
kernel/time/hrtimer.c
952
int cpu;
kernel/time/hrtimer.c
964
for_each_online_cpu(cpu) {
kernel/time/hrtimer.c
967
cpu_base = &per_cpu(hrtimer_bases, cpu);
kernel/time/hrtimer.c
971
cpumask_set_cpu(cpu, mask);
kernel/time/posix-cpu-timers.c
1148
struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
kernel/time/posix-cpu-timers.c
115
return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
kernel/time/posix-cpu-timers.c
124
u64 delta, incr, expires = timer->it.cpu.node.expires;
kernel/time/posix-cpu-timers.c
1370
list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
kernel/time/posix-cpu-timers.c
1381
list_del_init(&timer->it.cpu.elist);
kernel/time/posix-cpu-timers.c
1382
cpu_firing = timer->it.cpu.firing;
kernel/time/posix-cpu-timers.c
1383
timer->it.cpu.firing = false;
kernel/time/posix-cpu-timers.c
1392
rcu_assign_pointer(timer->it.cpu.handling, NULL);
kernel/time/posix-cpu-timers.c
144
timer->it.cpu.node.expires += incr;
kernel/time/posix-cpu-timers.c
148
return timer->it.cpu.node.expires;
kernel/time/posix-cpu-timers.c
1495
timer.it.cpu.nanosleep = true;
kernel/time/posix-cpu-timers.c
1512
if (!cpu_timer_getexpires(&timer.it.cpu)) {
kernel/time/posix-cpu-timers.c
1534
expires = cpu_timer_getexpires(&timer.it.cpu);
kernel/time/posix-cpu-timers.c
410
timerqueue_init(&new_timer->it.cpu.node);
kernel/time/posix-cpu-timers.c
411
new_timer->it.cpu.pid = get_pid(pid);
kernel/time/posix-cpu-timers.c
453
struct cpu_timer *ctmr = &timer->it.cpu;
kernel/time/posix-cpu-timers.c
473
struct cpu_timer *ctmr = &timer->it.cpu;
kernel/time/posix-cpu-timers.c
496
if (timer->it.cpu.firing) {
kernel/time/posix-cpu-timers.c
503
timer->it.cpu.firing = false;
kernel/time/posix-cpu-timers.c
568
struct cpu_timer *ctmr = &timer->it.cpu;
kernel/time/posix-cpu-timers.c
595
struct cpu_timer *ctmr = &timer->it.cpu;
kernel/time/posix-cpu-timers.c
627
struct cpu_timer *ctmr = &timer->it.cpu;
kernel/time/posix-cpu-timers.c
668
if (unlikely(timer->it.cpu.firing)) {
kernel/time/posix-cpu-timers.c
675
timer->it.cpu.firing = false;
kernel/time/posix-cpu-timers.c
756
expires = cpu_timer_getexpires(&timer->it.cpu);
kernel/time/posix-cpu-timers.c
785
if (p && cpu_timer_getexpires(&timer->it.cpu)) {
kernel/time/tick-broadcast-hrtimer.c
71
bc->bound_on = bctimer.base->cpu_base->cpu;
kernel/time/tick-broadcast.c
102
static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
kernel/time/tick-broadcast.c
1029
int cpu = smp_processor_id();
kernel/time/tick-broadcast.c
104
return per_cpu(tick_oneshot_wakeup_device, cpu);
kernel/time/tick-broadcast.c
1060
tick_broadcast_clear_oneshot(cpu);
kernel/time/tick-broadcast.c
1083
cpumask_clear_cpu(cpu, tmpmask);
kernel/time/tick-broadcast.c
1120
tick_broadcast_set_event(bc, cpu, nexttick);
kernel/time/tick-broadcast.c
117
int cpu)
kernel/time/tick-broadcast.c
1186
static void tick_broadcast_oneshot_offline(unsigned int cpu)
kernel/time/tick-broadcast.c
1188
if (tick_get_oneshot_wakeup_device(cpu))
kernel/time/tick-broadcast.c
1189
tick_set_oneshot_wakeup_device(NULL, cpu);
kernel/time/tick-broadcast.c
119
struct clock_event_device *curdev = tick_get_oneshot_wakeup_device(cpu);
kernel/time/tick-broadcast.c
1195
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
kernel/time/tick-broadcast.c
1196
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
kernel/time/tick-broadcast.c
1197
cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
kernel/time/tick-broadcast.c
132
if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
kernel/time/tick-broadcast.c
144
per_cpu(tick_oneshot_wakeup_device, cpu) = newdev;
kernel/time/tick-broadcast.c
148
static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
kernel/time/tick-broadcast.c
154
int cpu)
kernel/time/tick-broadcast.c
163
void tick_install_broadcast_device(struct clock_event_device *dev, int cpu)
kernel/time/tick-broadcast.c
167
if (tick_set_oneshot_wakeup_device(dev, cpu))
kernel/time/tick-broadcast.c
247
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
kernel/time/tick-broadcast.c
264
cpumask_set_cpu(cpu, tick_broadcast_mask);
kernel/time/tick-broadcast.c
276
cpumask_clear_cpu(cpu, tick_broadcast_mask);
kernel/time/tick-broadcast.c
284
if (!cpumask_test_cpu(cpu, tick_broadcast_on))
kernel/time/tick-broadcast.c
285
cpumask_clear_cpu(cpu, tick_broadcast_mask);
kernel/time/tick-broadcast.c
297
tick_broadcast_clear_oneshot(cpu);
kernel/time/tick-broadcast.c
318
ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
kernel/time/tick-broadcast.c
348
int cpu = smp_processor_id();
kernel/time/tick-broadcast.c
355
if (cpumask_test_cpu(cpu, mask)) {
kernel/time/tick-broadcast.c
358
cpumask_clear_cpu(cpu, mask);
kernel/time/tick-broadcast.c
39
static void tick_broadcast_clear_oneshot(int cpu);
kernel/time/tick-broadcast.c
42
static void tick_broadcast_oneshot_offline(unsigned int cpu);
kernel/time/tick-broadcast.c
442
int cpu, bc_stopped;
kernel/time/tick-broadcast.c
459
cpu = smp_processor_id();
kernel/time/tick-broadcast.c
468
cpumask_set_cpu(cpu, tick_broadcast_on);
kernel/time/tick-broadcast.c
469
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
kernel/time/tick-broadcast.c
47
static inline void tick_broadcast_clear_oneshot(int cpu) { }
kernel/time/tick-broadcast.c
487
cpumask_clear_cpu(cpu, tick_broadcast_on);
kernel/time/tick-broadcast.c
488
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
kernel/time/tick-broadcast.c
50
static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
kernel/time/tick-broadcast.c
537
void tick_broadcast_offline(unsigned int cpu)
kernel/time/tick-broadcast.c
540
cpumask_clear_cpu(cpu, tick_broadcast_mask);
kernel/time/tick-broadcast.c
541
cpumask_clear_cpu(cpu, tick_broadcast_on);
kernel/time/tick-broadcast.c
542
tick_broadcast_oneshot_offline(cpu);
kernel/time/tick-broadcast.c
651
static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
kernel/time/tick-broadcast.c
658
tick_broadcast_set_affinity(bc, cpumask_of(cpu));
kernel/time/tick-broadcast.c
67
static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu);
kernel/time/tick-broadcast.c
69
const struct clock_event_device *tick_get_wakeup_device(int cpu)
kernel/time/tick-broadcast.c
694
int cpu, next_cpu = 0;
kernel/time/tick-broadcast.c
703
for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
kernel/time/tick-broadcast.c
71
return tick_get_oneshot_wakeup_device(cpu);
kernel/time/tick-broadcast.c
712
td = &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-broadcast.c
714
cpumask_set_cpu(cpu, tmpmask);
kernel/time/tick-broadcast.c
720
cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
kernel/time/tick-broadcast.c
723
next_cpu = cpu;
kernel/time/tick-broadcast.c
770
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
kernel/time/tick-broadcast.c
776
return bc->bound_on == cpu ? -EBUSY : 0;
kernel/time/tick-broadcast.c
798
int cpu)
kernel/time/tick-broadcast.c
815
ret = broadcast_needs_cpu(bc, cpu);
kernel/time/tick-broadcast.c
830
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
kernel/time/tick-broadcast.c
831
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
kernel/time/tick-broadcast.c
846
if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
kernel/time/tick-broadcast.c
849
tick_broadcast_set_event(bc, cpu, dev->next_event);
kernel/time/tick-broadcast.c
857
ret = broadcast_needs_cpu(bc, cpu);
kernel/time/tick-broadcast.c
859
cpumask_clear_cpu(cpu,
kernel/time/tick-broadcast.c
865
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
kernel/time/tick-broadcast.c
876
if (cpumask_test_and_clear_cpu(cpu,
kernel/time/tick-broadcast.c
919
cpumask_set_cpu(cpu, tick_broadcast_force_mask);
kernel/time/tick-broadcast.c
936
int cpu)
kernel/time/tick-broadcast.c
944
wd = tick_get_oneshot_wakeup_device(cpu);
kernel/time/tick-broadcast.c
966
int cpu = smp_processor_id();
kernel/time/tick-broadcast.c
968
if (!tick_oneshot_wakeup_control(state, td, cpu))
kernel/time/tick-broadcast.c
972
return ___tick_broadcast_oneshot_control(state, td, cpu);
kernel/time/tick-broadcast.c
986
static void tick_broadcast_clear_oneshot(int cpu)
kernel/time/tick-broadcast.c
988
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
kernel/time/tick-broadcast.c
989
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
kernel/time/tick-broadcast.c
996
int cpu;
kernel/time/tick-broadcast.c
998
for_each_cpu(cpu, mask) {
kernel/time/tick-broadcast.c
999
td = &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-common.c
110
int cpu = smp_processor_id();
kernel/time/tick-common.c
113
tick_periodic(cpu);
kernel/time/tick-common.c
144
tick_periodic(cpu);
kernel/time/tick-common.c
185
struct clock_event_device *newdev, int cpu,
kernel/time/tick-common.c
200
WRITE_ONCE(tick_do_timer_cpu, cpu);
kernel/time/tick-common.c
208
if (tick_nohz_full_cpu(cpu))
kernel/time/tick-common.c
209
tick_do_timer_boot_cpu = cpu;
kernel/time/tick-common.c
211
} else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
kernel/time/tick-common.c
222
WRITE_ONCE(tick_do_timer_cpu, cpu);
kernel/time/tick-common.c
252
if (tick_device_uses_broadcast(newdev, cpu))
kernel/time/tick-common.c
264
int cpu = smp_processor_id();
kernel/time/tick-common.c
267
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
kernel/time/tick-common.c
273
struct clock_event_device *newdev, int cpu)
kernel/time/tick-common.c
275
if (!cpumask_test_cpu(cpu, newdev->cpumask))
kernel/time/tick-common.c
277
if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
kernel/time/tick-common.c
283
if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
kernel/time/tick-common.c
329
int cpu;
kernel/time/tick-common.c
331
cpu = smp_processor_id();
kernel/time/tick-common.c
332
td = &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-common.c
351
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
kernel/time/tick-common.c
360
tick_install_broadcast_device(newdev, cpu);
kernel/time/tick-common.c
64
struct tick_device *tick_get_device(int cpu)
kernel/time/tick-common.c
66
return &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-common.c
86
static void tick_periodic(int cpu)
kernel/time/tick-common.c
88
if (READ_ONCE(tick_do_timer_cpu) == cpu) {
kernel/time/tick-internal.h
146
extern void tick_broadcast_offline(unsigned int cpu);
kernel/time/tick-internal.h
148
static inline void tick_broadcast_offline(unsigned int cpu) { }
kernel/time/tick-internal.h
165
unsigned int cpu);
kernel/time/tick-internal.h
166
extern void timer_lock_remote_bases(unsigned int cpu);
kernel/time/tick-internal.h
167
extern void timer_unlock_remote_bases(unsigned int cpu);
kernel/time/tick-internal.h
169
extern void timer_expire_remote(unsigned int cpu);
kernel/time/tick-internal.h
28
extern void tick_offline_cpu(unsigned int cpu);
kernel/time/tick-internal.h
36
extern struct tick_device *tick_get_device(int cpu);
kernel/time/tick-internal.h
68
extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
kernel/time/tick-internal.h
69
extern void tick_install_broadcast_device(struct clock_event_device *dev, int cpu);
kernel/time/tick-internal.h
79
extern const struct clock_event_device *tick_get_wakeup_device(int cpu);
kernel/time/tick-internal.h
81
static inline void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) { }
kernel/time/tick-internal.h
83
static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
kernel/time/tick-sched.c
1000
static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
kernel/time/tick-sched.c
1045
if (tick_cpu == cpu) {
kernel/time/tick-sched.c
1109
static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu)
kernel/time/tick-sched.c
1111
if (tick_nohz_next_event(ts, cpu))
kernel/time/tick-sched.c
1112
tick_nohz_stop_tick(ts, cpu);
kernel/time/tick-sched.c
1141
int cpu = smp_processor_id();
kernel/time/tick-sched.c
1143
if (can_stop_full_tick(cpu, ts))
kernel/time/tick-sched.c
1144
tick_nohz_full_stop_tick(ts, cpu);
kernel/time/tick-sched.c
1199
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
kernel/time/tick-sched.c
1201
WARN_ON_ONCE(cpu_is_offline(cpu));
kernel/time/tick-sched.c
1219
if (tick_cpu == cpu)
kernel/time/tick-sched.c
1238
int cpu = smp_processor_id();
kernel/time/tick-sched.c
1247
else if (can_stop_idle_tick(cpu, ts))
kernel/time/tick-sched.c
1248
expires = tick_nohz_next_event(ts, cpu);
kernel/time/tick-sched.c
1257
tick_nohz_stop_tick(ts, cpu);
kernel/time/tick-sched.c
1264
nohz_balance_enter_idle(cpu);
kernel/time/tick-sched.c
1374
int cpu = smp_processor_id();
kernel/time/tick-sched.c
1386
if (!can_stop_idle_tick(cpu, ts))
kernel/time/tick-sched.c
1389
next_event = tick_nohz_next_event(ts, cpu);
kernel/time/tick-sched.c
1412
unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
kernel/time/tick-sched.c
1414
struct tick_sched *ts = tick_get_tick_sched(cpu);
kernel/time/tick-sched.c
1634
void tick_sched_timer_dying(int cpu)
kernel/time/tick-sched.c
1636
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
kernel/time/tick-sched.c
1660
int cpu;
kernel/time/tick-sched.c
1662
for_each_possible_cpu(cpu)
kernel/time/tick-sched.c
1663
set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
kernel/time/tick-sched.c
229
int tick_cpu, cpu = smp_processor_id();
kernel/time/tick-sched.c
247
WRITE_ONCE(tick_do_timer_cpu, cpu);
kernel/time/tick-sched.c
248
tick_cpu = cpu;
kernel/time/tick-sched.c
252
if (tick_cpu == cpu)
kernel/time/tick-sched.c
383
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
kernel/time/tick-sched.c
387
if (unlikely(!cpu_online(cpu)))
kernel/time/tick-sched.c
42
struct tick_sched *tick_get_tick_sched(int cpu)
kernel/time/tick-sched.c
431
void tick_nohz_full_kick_cpu(int cpu)
kernel/time/tick-sched.c
433
if (!tick_nohz_full_cpu(cpu))
kernel/time/tick-sched.c
436
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
kernel/time/tick-sched.c
44
return &per_cpu(tick_cpu_sched, cpu);
kernel/time/tick-sched.c
441
int cpu;
kernel/time/tick-sched.c
477
cpu = task_cpu(tsk);
kernel/time/tick-sched.c
480
if (cpu_online(cpu))
kernel/time/tick-sched.c
481
tick_nohz_full_kick_cpu(cpu);
kernel/time/tick-sched.c
491
int cpu;
kernel/time/tick-sched.c
497
for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
kernel/time/tick-sched.c
498
tick_nohz_full_kick_cpu(cpu);
kernel/time/tick-sched.c
530
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
kernel/time/tick-sched.c
535
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
kernel/time/tick-sched.c
541
if (cpu == smp_processor_id()) {
kernel/time/tick-sched.c
546
tick_nohz_full_kick_cpu(cpu);
kernel/time/tick-sched.c
553
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
kernel/time/tick-sched.c
555
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
kernel/time/tick-sched.c
632
bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
kernel/time/tick-sched.c
639
if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
kernel/time/tick-sched.c
644
static int tick_nohz_cpu_down(unsigned int cpu)
kernel/time/tick-sched.c
646
return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
kernel/time/tick-sched.c
651
int cpu, ret;
kernel/time/tick-sched.c
670
cpu = smp_processor_id();
kernel/time/tick-sched.c
672
if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
kernel/time/tick-sched.c
674
"for timekeeping\n", cpu);
kernel/time/tick-sched.c
675
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
kernel/time/tick-sched.c
679
for_each_cpu(cpu, tick_nohz_full_mask)
kernel/time/tick-sched.c
680
ct_cpu_track_user(cpu);
kernel/time/tick-sched.c
723
bool tick_nohz_tick_stopped_cpu(int cpu)
kernel/time/tick-sched.c
725
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
kernel/time/tick-sched.c
832
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
kernel/time/tick-sched.c
834
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
kernel/time/tick-sched.c
837
!nr_iowait_cpu(cpu), last_update_time);
kernel/time/tick-sched.c
858
u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
kernel/time/tick-sched.c
860
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
kernel/time/tick-sched.c
863
nr_iowait_cpu(cpu), last_update_time);
kernel/time/tick-sched.c
922
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
kernel/time/tick-sched.c
984
if (tick_cpu != cpu &&
kernel/time/tick-sched.h
105
extern struct tick_sched *tick_get_tick_sched(int cpu);
kernel/time/tick-sched.h
109
extern void tick_sched_timer_dying(int cpu);
kernel/time/tick-sched.h
111
static inline void tick_sched_timer_dying(int cpu) { }
kernel/time/timekeeping_debug.c
62
int cpu;
kernel/time/timekeeping_debug.c
64
for_each_possible_cpu(cpu)
kernel/time/timekeeping_debug.c
65
sum += data_race(per_cpu(timekeeping_mg_floor_swaps, cpu));
kernel/time/timer.c
1119
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
kernel/time/timer.c
1299
void add_timer_on(struct timer_list *timer, int cpu)
kernel/time/timer.c
1312
new_base = get_timer_cpu_base(timer->flags, cpu);
kernel/time/timer.c
1334
(timer->flags & ~TIMER_BASEMASK) | cpu);
kernel/time/timer.c
2058
unsigned int cpu)
kernel/time/timer.c
2065
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
kernel/time/timer.c
2066
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
kernel/time/timer.c
2080
void timer_unlock_remote_bases(unsigned int cpu)
kernel/time/timer.c
2086
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
kernel/time/timer.c
2087
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
kernel/time/timer.c
2099
void timer_lock_remote_bases(unsigned int cpu)
kernel/time/timer.c
2105
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
kernel/time/timer.c
2106
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
kernel/time/timer.c
2132
void timer_expire_remote(unsigned int cpu)
kernel/time/timer.c
2134
struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
kernel/time/timer.c
2256
if (tick_nohz_full_cpu(base_local->cpu))
kernel/time/timer.c
2258
trace_timer_base_idle(true, base_local->cpu);
kernel/time/timer.c
2488
int cpu = new_base->cpu;
kernel/time/timer.c
2493
timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
kernel/time/timer.c
2498
int timers_prepare_cpu(unsigned int cpu)
kernel/time/timer.c
2504
base = per_cpu_ptr(&timer_bases[b], cpu);
kernel/time/timer.c
2514
int timers_dead_cpu(unsigned int cpu)
kernel/time/timer.c
2521
old_base = per_cpu_ptr(&timer_bases[b], cpu);
kernel/time/timer.c
2551
static void __init init_timer_cpu(int cpu)
kernel/time/timer.c
2557
base = per_cpu_ptr(&timer_bases[i], cpu);
kernel/time/timer.c
2558
base->cpu = cpu;
kernel/time/timer.c
2568
int cpu;
kernel/time/timer.c
2570
for_each_possible_cpu(cpu)
kernel/time/timer.c
2571
init_timer_cpu(cpu);
kernel/time/timer.c
259
unsigned int cpu;
kernel/time/timer.c
348
static unsigned long round_jiffies_common(unsigned long j, int cpu,
kernel/time/timer.c
362
j += cpu * 3;
kernel/time/timer.c
379
j -= cpu * 3;
kernel/time/timer.c
408
unsigned long __round_jiffies_relative(unsigned long j, int cpu)
kernel/time/timer.c
413
return round_jiffies_common(j + j0, cpu, false) - j0;
kernel/time/timer.c
469
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
kernel/time/timer.c
474
return round_jiffies_common(j + j0, cpu, true) - j0;
kernel/time/timer.c
602
tick_nohz_full_cpu(base->cpu)));
kernel/time/timer.c
603
wake_up_nohz_cpu(base->cpu);
kernel/time/timer.c
914
static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
kernel/time/timer.c
925
return per_cpu_ptr(&timer_bases[index], cpu);
kernel/time/timer_list.c
113
static void print_cpu(struct seq_file *m, int cpu, u64 now)
kernel/time/timer_list.c
115
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
kernel/time/timer_list.c
118
SEQ_printf(m, "cpu: %d\n", cpu);
kernel/time/timer_list.c
152
struct tick_sched *ts = tick_get_tick_sched(cpu);
kernel/time/timer_list.c
180
print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
kernel/time/timer_list.c
187
if (cpu < 0)
kernel/time/timer_list.c
190
SEQ_printf(m, "Per CPU device: %d\n", cpu);
kernel/time/timer_list.c
21
int cpu;
kernel/time/timer_list.c
235
if (cpu >= 0) {
kernel/time/timer_list.c
236
const struct clock_event_device *wd = tick_get_wakeup_device(cpu);
kernel/time/timer_list.c
270
int cpu;
kernel/time/timer_list.c
274
for_each_online_cpu(cpu)
kernel/time/timer_list.c
275
print_cpu(NULL, cpu, now);
kernel/time/timer_list.c
279
for_each_online_cpu(cpu)
kernel/time/timer_list.c
280
print_tickdevice(NULL, tick_get_device(cpu), cpu);
kernel/time/timer_list.c
290
if (iter->cpu == -1 && !iter->second_pass)
kernel/time/timer_list.c
293
print_cpu(m, iter->cpu, iter->now);
kernel/time/timer_list.c
295
else if (iter->cpu == -1 && iter->second_pass)
kernel/time/timer_list.c
298
print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
kernel/time/timer_list.c
306
iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
kernel/time/timer_list.c
307
if (iter->cpu >= nr_cpu_ids) {
kernel/time/timer_list.c
310
iter->cpu = -1;
kernel/time/timer_list.c
328
iter->cpu = -1;
kernel/time/timer_migration.c
1000
timer_lock_remote_bases(cpu);
kernel/time/timer_migration.c
1015
timer_unlock_remote_bases(cpu);
kernel/time/timer_migration.c
1020
fetch_next_timer_interrupt_remote(jif, now, &tevt, cpu);
kernel/time/timer_migration.c
1021
timer_unlock_remote_bases(cpu);
kernel/time/timer_migration.c
1069
unsigned int remote_cpu = evt->cpu;
kernel/time/timer_migration.c
1468
static int tmigr_clear_cpu_available(unsigned int cpu)
kernel/time/timer_migration.c
1476
cpumask_clear_cpu(cpu, tmigr_available_cpumask);
kernel/time/timer_migration.c
1499
static int __tmigr_set_cpu_available(unsigned int cpu)
kernel/time/timer_migration.c
1509
cpumask_set_cpu(cpu, tmigr_available_cpumask);
kernel/time/timer_migration.c
1522
static int tmigr_set_cpu_available(unsigned int cpu)
kernel/time/timer_migration.c
1524
if (tmigr_is_isolated(cpu))
kernel/time/timer_migration.c
1527
return __tmigr_set_cpu_available(cpu);
kernel/time/timer_migration.c
1560
int cpu;
kernel/time/timer_migration.c
1575
for_each_cpu(cpu, cpumask) {
kernel/time/timer_migration.c
1576
struct work_struct *work = per_cpu_ptr(works, cpu);
kernel/time/timer_migration.c
1579
schedule_work_on(cpu, work);
kernel/time/timer_migration.c
1581
for_each_cpu(cpu, cpumask)
kernel/time/timer_migration.c
1582
flush_work(per_cpu_ptr(works, cpu));
kernel/time/timer_migration.c
1595
for_each_cpu(cpu, cpumask) {
kernel/time/timer_migration.c
1596
if (!tick_nohz_cpu_hotpluggable(cpu)) {
kernel/time/timer_migration.c
1597
cpumask_clear_cpu(cpu, cpumask);
kernel/time/timer_migration.c
1602
for_each_cpu(cpu, cpumask) {
kernel/time/timer_migration.c
1603
struct work_struct *work = per_cpu_ptr(works, cpu);
kernel/time/timer_migration.c
1606
schedule_work_on(cpu, work);
kernel/time/timer_migration.c
1608
for_each_cpu(cpu, cpumask)
kernel/time/timer_migration.c
1609
flush_work(per_cpu_ptr(works, cpu));
kernel/time/timer_migration.c
1760
static int tmigr_setup_groups(unsigned int cpu, unsigned int node,
kernel/time/timer_migration.c
1825
struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
kernel/time/timer_migration.c
1895
static int tmigr_add_cpu(unsigned int cpu)
kernel/time/timer_migration.c
1898
int node = cpu_to_node(cpu);
kernel/time/timer_migration.c
1903
ret = tmigr_setup_groups(cpu, node, NULL, false);
kernel/time/timer_migration.c
1914
WARN_ON_ONCE(cpu == raw_smp_processor_id());
kernel/time/timer_migration.c
1926
static int tmigr_cpu_prepare(unsigned int cpu)
kernel/time/timer_migration.c
1928
struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
kernel/time/timer_migration.c
1939
tmc->cpuevt.cpu = cpu;
kernel/time/timer_migration.c
1943
ret = tmigr_add_cpu(cpu);
kernel/time/timer_migration.c
465
static inline bool tmigr_is_isolated(int cpu)
kernel/time/timer_migration.c
469
return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) &&
kernel/time/timer_migration.c
470
housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE));
kernel/time/timer_migration.c
840
evt->cpu = first_childevt->cpu;
kernel/time/timer_migration.c
866
evt->cpu = first_childevt->cpu;
kernel/time/timer_migration.c
941
static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
kernel/time/timer_migration.c
948
tmc = per_cpu_ptr(&tmigr_cpu, cpu);
kernel/time/timer_migration.c
981
if (cpu != smp_processor_id())
kernel/time/timer_migration.c
982
timer_expire_remote(cpu);
kernel/time/timer_migration.h
17
unsigned int cpu;
kernel/torture.c
202
bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
kernel/torture.c
210
if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
kernel/torture.c
218
torture_type, cpu);
kernel/torture.c
221
ret = remove_cpu(cpu);
kernel/torture.c
232
torture_type, cpu, s, ret);
kernel/torture.c
237
torture_type, cpu);
kernel/torture.c
264
bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
kernel/torture.c
272
if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
kernel/torture.c
278
torture_type, cpu);
kernel/torture.c
281
ret = add_cpu(cpu);
kernel/torture.c
292
torture_type, cpu, s, ret);
kernel/torture.c
297
torture_type, cpu);
kernel/torture.c
321
int cpu;
kernel/torture.c
324
for_each_possible_cpu(cpu) {
kernel/torture.c
325
if (cpu_online(cpu))
kernel/torture.c
327
ret = add_cpu(cpu);
kernel/torture.c
331
__func__, phase, torture_type, cpu, ret);
kernel/torture.c
343
int cpu;
kernel/torture.c
348
for_each_online_cpu(cpu)
kernel/torture.c
349
maxcpu = cpu;
kernel/torture.c
369
cpu = torture_random(&rand) % (maxcpu + 1);
kernel/torture.c
370
if (!torture_offline(cpu,
kernel/torture.c
373
torture_online(cpu,
kernel/trace/blktrace.c
101
t2->cpu = cpu;
kernel/trace/blktrace.c
117
pid_t pid, int cpu, sector_t sector, int bytes,
kernel/trace/blktrace.c
132
record_blktrace_event(t, pid, cpu, sector, bytes, what, bt->dev, error,
kernel/trace/blktrace.c
137
pid_t pid, int cpu, sector_t sector,
kernel/trace/blktrace.c
1506
MAJOR(t->device), MINOR(t->device), iter->cpu,
kernel/trace/blktrace.c
152
record_blktrace_event2(t, pid, cpu, sector, bytes, what, bt->dev, error,
kernel/trace/blktrace.c
157
pid_t pid, int cpu, sector_t sector, int bytes,
kernel/trace/blktrace.c
162
return relay_blktrace_event2(bt, sequence, pid, cpu, sector,
kernel/trace/blktrace.c
165
return relay_blktrace_event1(bt, sequence, pid, cpu, sector, bytes,
kernel/trace/blktrace.c
179
int cpu = smp_processor_id();
kernel/trace/blktrace.c
195
record_blktrace_event2(t, pid, cpu, 0, 0,
kernel/trace/blktrace.c
205
relay_blktrace_event(bt, 0, pid, cpu, 0, 0, action, 0, cgid,
kernel/trace/blktrace.c
321
int cpu;
kernel/trace/blktrace.c
383
cpu = raw_smp_processor_id();
kernel/trace/blktrace.c
424
pid, cpu, sector, bytes,
kernel/trace/blktrace.c
443
pid, cpu, sector, bytes,
kernel/trace/blktrace.c
462
sequence = per_cpu_ptr(bt->sequence, cpu);
kernel/trace/blktrace.c
464
relay_blktrace_event(bt, *sequence, pid, cpu, sector, bytes,
kernel/trace/blktrace.c
66
static void record_blktrace_event(struct blk_io_trace *t, pid_t pid, int cpu,
kernel/trace/blktrace.c
78
t->cpu = cpu;
kernel/trace/blktrace.c
94
static void record_blktrace_event2(struct blk_io_trace2 *t2, pid_t pid, int cpu,
kernel/trace/bpf_trace.c
2218
int cpu;
kernel/trace/bpf_trace.c
2221
for_each_possible_cpu(cpu) {
kernel/trace/bpf_trace.c
2222
work = per_cpu_ptr(&send_signal_work, cpu);
kernel/trace/bpf_trace.c
541
unsigned int cpu = smp_processor_id();
kernel/trace/bpf_trace.c
548
index = cpu;
kernel/trace/bpf_trace.c
620
unsigned int cpu = smp_processor_id();
kernel/trace/bpf_trace.c
626
index = cpu;
kernel/trace/bpf_trace.c
639
if (unlikely(event->oncpu != cpu))
kernel/trace/fgraph.c
1131
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
kernel/trace/fgraph.c
1140
WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
kernel/trace/fgraph.c
1148
ret_stack = per_cpu(idle_ret_stack, cpu);
kernel/trace/fgraph.c
1153
per_cpu(idle_ret_stack, cpu) = ret_stack;
kernel/trace/fgraph.c
1239
int ret, cpu;
kernel/trace/fgraph.c
1248
for_each_online_cpu(cpu) {
kernel/trace/fgraph.c
1249
if (!idle_task(cpu)->ret_stack)
kernel/trace/fgraph.c
1250
ftrace_graph_init_idle_task(idle_task(cpu), cpu);
kernel/trace/fgraph.c
1271
int cpu;
kernel/trace/fgraph.c
1273
for_each_online_cpu(cpu) {
kernel/trace/fgraph.c
1274
if (idle_task(cpu)->ret_stack)
kernel/trace/fgraph.c
1275
ret_stack_set_task_var(idle_task(cpu), idx, 0);
kernel/trace/fgraph.c
1329
static int fgraph_cpu_init(unsigned int cpu)
kernel/trace/fgraph.c
1331
if (!idle_task(cpu)->ret_stack)
kernel/trace/fgraph.c
1332
ftrace_graph_init_idle_task(idle_task(cpu), cpu);
kernel/trace/ftrace.c
1021
int cpu;
kernel/trace/ftrace.c
1023
for_each_possible_cpu(cpu) {
kernel/trace/ftrace.c
1024
stat = &per_cpu(ftrace_profile_stats, cpu);
kernel/trace/ftrace.c
1026
name = kasprintf(GFP_KERNEL, "function%d", cpu);
kernel/trace/ftrace.c
1034
cpu);
kernel/trace/ftrace.c
1043
cpu);
kernel/trace/ftrace.c
686
static int ftrace_profile_init_cpu(int cpu)
kernel/trace/ftrace.c
691
stat = &per_cpu(ftrace_profile_stats, cpu);
kernel/trace/ftrace.c
722
int cpu;
kernel/trace/ftrace.c
725
for_each_possible_cpu(cpu) {
kernel/trace/ftrace.c
726
ret = ftrace_profile_init_cpu(cpu);
kernel/trace/ftrace.c
8657
int cpu;
kernel/trace/ftrace.c
8671
for_each_possible_cpu(cpu)
kernel/trace/ftrace.c
8672
per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
kernel/trace/ring_buffer.c
1004
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
1006
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
1024
rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
kernel/trace/ring_buffer.c
1044
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
kernel/trace/ring_buffer.c
1050
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/ring_buffer.c
1054
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
1057
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
1064
if (rb_watermark_hit(buffer, cpu, full))
kernel/trace/ring_buffer.c
1101
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
kernel/trace/ring_buffer.c
1102
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
kernel/trace/ring_buffer.c
1153
int cpu, u64 *ts)
kernel/trace/ring_buffer.c
1626
static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu)
kernel/trace/ring_buffer.c
1653
if (cpu) {
kernel/trace/ring_buffer.c
1658
if (cpu > 1) {
kernel/trace/ring_buffer.c
1669
ptr += size * (cpu - 2);
kernel/trace/ring_buffer.c
1696
meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu);
kernel/trace/ring_buffer.c
1786
static bool rb_cpu_meta_valid(struct ring_buffer_cpu_meta *meta, int cpu,
kernel/trace/ring_buffer.c
1805
pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu);
kernel/trace/ring_buffer.c
1811
pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu);
kernel/trace/ring_buffer.c
1823
pr_info("Ring buffer boot meta [%d] array out of range\n", cpu);
kernel/trace/ring_buffer.c
1828
pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu);
kernel/trace/ring_buffer.c
1833
pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu);
kernel/trace/ring_buffer.c
1846
static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu,
kernel/trace/ring_buffer.c
1902
static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu)
kernel/trace/ring_buffer.c
1909
return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
kernel/trace/ring_buffer.c
1929
ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
kernel/trace/ring_buffer.c
1964
ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
kernel/trace/ring_buffer.c
1976
pr_info("Ring buffer [%d] rewound %d pages\n", cpu_buffer->cpu, i);
kernel/trace/ring_buffer.c
2043
ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
kernel/trace/ring_buffer.c
2046
cpu_buffer->cpu);
kernel/trace/ring_buffer.c
2064
cpu_buffer->cpu);
kernel/trace/ring_buffer.c
2071
pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu);
kernel/trace/ring_buffer.c
2097
int cpu;
kernel/trace/ring_buffer.c
2107
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
kernel/trace/ring_buffer.c
2110
meta = rb_range_meta(buffer, nr_pages, cpu);
kernel/trace/ring_buffer.c
2112
if (valid && rb_cpu_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) {
kernel/trace/ring_buffer.c
2122
if (cpu < nr_cpu_ids - 1)
kernel/trace/ring_buffer.c
2123
next_meta = rb_range_meta(buffer, nr_pages, cpu + 1);
kernel/trace/ring_buffer.c
2211
int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
2221
m->private = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
2274
meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
kernel/trace/ring_buffer.c
2278
bpage = alloc_cpu_page(cpu_buffer->cpu);
kernel/trace/ring_buffer.c
2302
bpage->page = alloc_cpu_data(cpu_buffer->cpu, order);
kernel/trace/ring_buffer.c
2353
rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
kernel/trace/ring_buffer.c
2356
alloc_cpu_buffer(cpu);
kernel/trace/ring_buffer.c
2364
cpu_buffer->cpu = cpu;
kernel/trace/ring_buffer.c
2376
bpage = alloc_cpu_page(cpu);
kernel/trace/ring_buffer.c
2390
cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu);
kernel/trace/ring_buffer.c
2399
bpage->page = alloc_cpu_data(cpu, order);
kernel/trace/ring_buffer.c
2484
int cpu;
kernel/trace/ring_buffer.c
2581
cpu = raw_smp_processor_id();
kernel/trace/ring_buffer.c
2582
cpumask_set_cpu(cpu, buffer->cpumask);
kernel/trace/ring_buffer.c
2583
buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
kernel/trace/ring_buffer.c
2584
if (!buffer->buffers[cpu])
kernel/trace/ring_buffer.c
2596
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
2597
if (buffer->buffers[cpu])
kernel/trace/ring_buffer.c
2598
rb_free_cpu_buffer(buffer->buffers[cpu]);
kernel/trace/ring_buffer.c
2678
int cpu;
kernel/trace/ring_buffer.c
2684
for_each_buffer_cpu(buffer, cpu)
kernel/trace/ring_buffer.c
2685
rb_free_cpu_buffer(buffer->buffers[cpu]);
kernel/trace/ring_buffer.c
2949
int cpu, err;
kernel/trace/ring_buffer.c
2984
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
2985
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
2993
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
2994
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
3023
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
3024
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
3029
if (!cpu_online(cpu)) {
kernel/trace/ring_buffer.c
3035
if (cpu != smp_processor_id()) {
kernel/trace/ring_buffer.c
3037
schedule_work_on(cpu,
kernel/trace/ring_buffer.c
3047
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
3048
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
3052
if (cpu_online(cpu))
kernel/trace/ring_buffer.c
3121
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
3122
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
313
#define for_each_buffer_cpu(buffer, cpu) \
kernel/trace/ring_buffer.c
3133
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
3136
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
314
for_each_cpu(cpu, buffer->cpumask)
kernel/trace/ring_buffer.c
316
#define for_each_online_buffer_cpu(buffer, cpu) \
kernel/trace/ring_buffer.c
317
for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
kernel/trace/ring_buffer.c
4025
int cpu;
kernel/trace/ring_buffer.c
4035
cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
kernel/trace/ring_buffer.c
4036
return irq_work_queue_on(&irq_work->work, cpu);
kernel/trace/ring_buffer.c
4065
if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
kernel/trace/ring_buffer.c
409
#define alloc_cpu_buffer(cpu) (struct ring_buffer_per_cpu *) \
kernel/trace/ring_buffer.c
411
cache_line_size()), GFP_KERNEL, cpu_to_node(cpu));
kernel/trace/ring_buffer.c
413
#define alloc_cpu_page(cpu) (struct buffer_page *) \
kernel/trace/ring_buffer.c
415
cache_line_size()), GFP_KERNEL, cpu_to_node(cpu));
kernel/trace/ring_buffer.c
417
static struct buffer_data_page *alloc_cpu_data(int cpu, int order)
kernel/trace/ring_buffer.c
4196
int cpu;
kernel/trace/ring_buffer.c
4200
cpu = raw_smp_processor_id();
kernel/trace/ring_buffer.c
4201
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
4216
int cpu;
kernel/trace/ring_buffer.c
4219
cpu = raw_smp_processor_id();
kernel/trace/ring_buffer.c
4220
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
4237
int cpu = raw_smp_processor_id();
kernel/trace/ring_buffer.c
4239
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
430
page = alloc_pages_node(cpu_to_node(cpu), mflags, order);
kernel/trace/ring_buffer.c
4435
ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
kernel/trace/ring_buffer.c
4439
cpu_buffer->cpu, ts, delta);
kernel/trace/ring_buffer.c
4446
cpu_buffer->cpu,
kernel/trace/ring_buffer.c
4706
int cpu;
kernel/trace/ring_buffer.c
4714
cpu = raw_smp_processor_id();
kernel/trace/ring_buffer.c
4716
if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
kernel/trace/ring_buffer.c
4719
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
4807
int cpu;
kernel/trace/ring_buffer.c
4812
cpu = smp_processor_id();
kernel/trace/ring_buffer.c
4813
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
4854
int cpu;
kernel/trace/ring_buffer.c
4861
cpu = raw_smp_processor_id();
kernel/trace/ring_buffer.c
4863
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
4866
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5024
bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5028
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5044
void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5048
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5051
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5064
void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5068
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5071
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5081
u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5088
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5091
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5114
unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5119
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5122
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5134
unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5138
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
514
int cpu;
kernel/trace/ring_buffer.c
5141
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5153
unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5158
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5161
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5176
ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5181
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5184
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5198
ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5203
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5206
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5219
ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
5223
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5226
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5242
int cpu;
kernel/trace/ring_buffer.c
5245
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
5246
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5265
int cpu;
kernel/trace/ring_buffer.c
5268
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
5269
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5709
cpu_buffer->cpu, ts);
kernel/trace/ring_buffer.c
5719
cpu_buffer->cpu, ts);
kernel/trace/ring_buffer.c
5802
cpu_buffer->cpu, ts);
kernel/trace/ring_buffer.c
5812
cpu_buffer->cpu, ts);
kernel/trace/ring_buffer.c
5866
ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
kernel/trace/ring_buffer.c
5869
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5874
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5944
ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
kernel/trace/ring_buffer.c
5956
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
5959
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
5996
ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags)
kernel/trace/ring_buffer.c
6001
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6016
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6076
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
6078
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6081
return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
kernel/trace/ring_buffer.c
6238
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
6240
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6242
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6273
int cpu;
kernel/trace/ring_buffer.c
6278
for_each_online_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6279
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6288
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6289
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6314
int cpu;
kernel/trace/ring_buffer.c
6319
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6320
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6329
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6330
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6352
int cpu;
kernel/trace/ring_buffer.c
6355
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6356
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6376
bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
6383
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6386
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6410
struct trace_buffer *buffer_b, int cpu)
kernel/trace/ring_buffer.c
6416
if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
kernel/trace/ring_buffer.c
6417
!cpumask_test_cpu(cpu, buffer_b->cpumask))
kernel/trace/ring_buffer.c
6420
cpu_buffer_a = buffer_a->buffers[cpu];
kernel/trace/ring_buffer.c
6421
cpu_buffer_b = buffer_b->buffers[cpu];
kernel/trace/ring_buffer.c
6470
buffer_a->buffers[cpu] = cpu_buffer_b;
kernel/trace/ring_buffer.c
6471
buffer_b->buffers[cpu] = cpu_buffer_a;
kernel/trace/ring_buffer.c
6503
ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
6509
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6517
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6532
bpage->data = alloc_cpu_data(cpu, cpu_buffer->buffer->subbuf_order);
kernel/trace/ring_buffer.c
6551
void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
kernel/trace/ring_buffer.c
6559
if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
kernel/trace/ring_buffer.c
6562
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6625
size_t len, int cpu, int full)
kernel/trace/ring_buffer.c
6627
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6636
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6862
int cpu;
kernel/trace/ring_buffer.c
6892
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6894
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6897
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6905
nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
kernel/trace/ring_buffer.c
6927
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6932
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
6935
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
6996
for_each_buffer_cpu(buffer, cpu) {
kernel/trace/ring_buffer.c
6997
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
7077
rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
7081
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
7084
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
7250
int ring_buffer_map(struct trace_buffer *buffer, int cpu,
kernel/trace/ring_buffer.c
7257
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
7260
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
7317
void ring_buffer_map_dup(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
7321
if (WARN_ON(!cpumask_test_cpu(cpu, buffer->cpumask)))
kernel/trace/ring_buffer.c
7324
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
7334
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
7339
if (!cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
7342
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
7371
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
7379
cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
kernel/trace/ring_buffer.c
7450
cpu, missed_events, cpu_buffer->reader_page->page->time_stamp);
kernel/trace/ring_buffer.c
7476
int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
kernel/trace/ring_buffer.c
7484
if (cpumask_test_cpu(cpu, buffer->cpumask))
kernel/trace/ring_buffer.c
7502
buffer->buffers[cpu] =
kernel/trace/ring_buffer.c
7503
rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
kernel/trace/ring_buffer.c
7504
if (!buffer->buffers[cpu]) {
kernel/trace/ring_buffer.c
7506
cpu);
kernel/trace/ring_buffer.c
7510
cpumask_set_cpu(cpu, buffer->cpumask);
kernel/trace/ring_buffer.c
7546
int cpu;
kernel/trace/ring_buffer.c
7653
int cpu = smp_processor_id();
kernel/trace/ring_buffer.c
7655
data = &rb_data[cpu];
kernel/trace/ring_buffer.c
7676
int cpu;
kernel/trace/ring_buffer.c
7693
for_each_online_cpu(cpu) {
kernel/trace/ring_buffer.c
7694
rb_data[cpu].buffer = buffer;
kernel/trace/ring_buffer.c
7695
rb_data[cpu].cpu = cpu;
kernel/trace/ring_buffer.c
7696
rb_data[cpu].cnt = cpu;
kernel/trace/ring_buffer.c
7697
rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
kernel/trace/ring_buffer.c
7698
cpu, "rbtester/%u");
kernel/trace/ring_buffer.c
7699
if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
kernel/trace/ring_buffer.c
7701
ret = PTR_ERR(rb_threads[cpu]);
kernel/trace/ring_buffer.c
771
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
7734
for_each_online_cpu(cpu) {
kernel/trace/ring_buffer.c
7735
if (!rb_threads[cpu])
kernel/trace/ring_buffer.c
7737
kthread_stop(rb_threads[cpu]);
kernel/trace/ring_buffer.c
7746
for_each_online_cpu(cpu) {
kernel/trace/ring_buffer.c
7748
struct rb_test_data *data = &rb_data[cpu];
kernel/trace/ring_buffer.c
777
read = local_read(&buffer->buffers[cpu]->pages_read);
kernel/trace/ring_buffer.c
7772
pr_info("CPU %d:\n", cpu);
kernel/trace/ring_buffer.c
778
lost = local_read(&buffer->buffers[cpu]->pages_lost);
kernel/trace/ring_buffer.c
7785
while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
kernel/trace/ring_buffer.c
779
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
kernel/trace/ring_buffer.c
795
static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
kernel/trace/ring_buffer.c
797
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
810
dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
kernel/trace/ring_buffer.c
855
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
kernel/trace/ring_buffer.c
863
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/ring_buffer.c
866
for_each_buffer_cpu(buffer, cpu)
kernel/trace/ring_buffer.c
867
ring_buffer_wake_waiters(buffer, cpu);
kernel/trace/ring_buffer.c
873
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
kernel/trace/ring_buffer.c
876
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
887
static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
kernel/trace/ring_buffer.c
893
if (cpu == RING_BUFFER_ALL_CPUS)
kernel/trace/ring_buffer.c
896
cpu_buffer = buffer->buffers[cpu];
kernel/trace/ring_buffer.c
898
if (!ring_buffer_empty_cpu(buffer, cpu)) {
kernel/trace/ring_buffer.c
907
ret = !pagebusy && full_hit(buffer, cpu, full);
kernel/trace/ring_buffer.c
920
int cpu, int full, ring_buffer_cond_fn cond, void *data)
kernel/trace/ring_buffer.c
922
if (rb_watermark_hit(buffer, cpu, full))
kernel/trace/ring_buffer.c
985
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
kernel/trace/ring_buffer.c
999
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/ring_buffer_benchmark.c
105
static enum event_status read_page(int cpu)
kernel/trace/ring_buffer_benchmark.c
117
bpage = ring_buffer_alloc_read_page(buffer, cpu);
kernel/trace/ring_buffer_benchmark.c
122
ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1);
kernel/trace/ring_buffer_benchmark.c
148
if (*entry != cpu) {
kernel/trace/ring_buffer_benchmark.c
161
if (*entry != cpu) {
kernel/trace/ring_buffer_benchmark.c
177
ring_buffer_free_read_page(buffer, cpu, bpage);
kernel/trace/ring_buffer_benchmark.c
198
int cpu;
kernel/trace/ring_buffer_benchmark.c
201
for_each_online_cpu(cpu) {
kernel/trace/ring_buffer_benchmark.c
205
stat = read_event(cpu);
kernel/trace/ring_buffer_benchmark.c
207
stat = read_page(cpu);
kernel/trace/ring_buffer_benchmark.c
85
static enum event_status read_event(int cpu)
kernel/trace/ring_buffer_benchmark.c
91
event = ring_buffer_consume(buffer, cpu, &ts, NULL);
kernel/trace/ring_buffer_benchmark.c
96
if (*entry != cpu) {
kernel/trace/rv/monitors/nrp/nrp.c
65
int cpu, int tif)
kernel/trace/rv/monitors/opid/opid.c
85
static void handle_sched_need_resched(void *data, struct task_struct *tsk, int cpu, int tif)
kernel/trace/trace.c
1555
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
kernel/trace/trace.c
1558
struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
kernel/trace/trace.c
1560
struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
kernel/trace/trace.c
1562
max_buf->cpu = cpu;
kernel/trace/trace.c
1592
struct task_struct *tsk, int cpu) { }
kernel/trace/trace.c
1607
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
kernel/trace/trace.c
1636
__update_max_tr(tr, tsk, cpu);
kernel/trace/trace.c
1653
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
kernel/trace/trace.c
1669
ret = ring_buffer_swap_cpu(tr->snapshot_buffer.buffer, tr->array_buffer.buffer, cpu);
kernel/trace/trace.c
1685
__update_max_tr(tr, tsk, cpu);
kernel/trace/trace.c
1998
static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
kernel/trace/trace.c
2009
ring_buffer_reset_cpu(buffer, cpu);
kernel/trace/trace.c
2026
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
kernel/trace/trace.c
2045
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
kernel/trace/trace.c
2238
int cpu;
kernel/trace/trace.c
2245
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
2246
page = alloc_pages_node(cpu_to_node(cpu),
kernel/trace/trace.c
2257
per_cpu(trace_buffered_event, cpu) = event;
kernel/trace/trace.c
2260
if (cpu == smp_processor_id() &&
kernel/trace/trace.c
2262
per_cpu(trace_buffered_event, cpu))
kernel/trace/trace.c
2288
int cpu;
kernel/trace/trace.c
2305
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
2306
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
kernel/trace/trace.c
2307
per_cpu(trace_buffered_event, cpu) = NULL;
kernel/trace/trace.c
2811
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
kernel/trace/trace.c
2819
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
kernel/trace/trace.c
2823
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
2831
event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
kernel/trace/trace.c
2854
int cpu;
kernel/trace/trace.c
2870
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
2872
if (ring_buffer_empty_cpu(buffer, cpu))
kernel/trace/trace.c
2875
ent = peek_next_entry(iter, cpu, &ts, &lost_events);
kernel/trace/trace.c
2882
next_cpu = cpu;
kernel/trace/trace.c
3147
iter->ent = __find_next_entry(iter, &iter->cpu,
kernel/trace/trace.c
3158
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
kernel/trace/trace.c
3189
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
kernel/trace/trace.c
3195
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
kernel/trace/trace.c
3197
buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
3217
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
kernel/trace/trace.c
3231
int cpu;
kernel/trace/trace.c
3250
iter->cpu = 0;
kernel/trace/trace.c
3254
for_each_tracing_cpu(cpu)
kernel/trace/trace.c
3255
tracing_iter_reset(iter, cpu);
kernel/trace/trace.c
3294
unsigned long *entries, int cpu)
kernel/trace/trace.c
3298
count = ring_buffer_entries_cpu(buf->buffer, cpu);
kernel/trace/trace.c
3304
if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
kernel/trace/trace.c
3305
count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
kernel/trace/trace.c
3310
ring_buffer_overrun_cpu(buf->buffer, cpu);
kernel/trace/trace.c
3319
int cpu;
kernel/trace/trace.c
3324
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
3325
get_total_entries_cpu(buf, &t, &e, cpu);
kernel/trace/trace.c
3331
unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
kernel/trace/trace.c
3338
get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
kernel/trace/trace.c
3414
struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
kernel/trace/trace.c
3431
buf->cpu,
kernel/trace/trace.c
3473
cpumask_test_cpu(iter->cpu, iter->started))
kernel/trace/trace.c
3476
if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
kernel/trace/trace.c
3480
cpumask_set_cpu(iter->cpu, iter->started);
kernel/trace/trace.c
3485
iter->cpu);
kernel/trace/trace.c
3561
entry->pid, iter->cpu, iter->ts);
kernel/trace/trace.c
3587
SEQ_PUT_HEX_FIELD(s, iter->cpu);
kernel/trace/trace.c
3616
SEQ_PUT_FIELD(s, iter->cpu);
kernel/trace/trace.c
3630
int cpu;
kernel/trace/trace.c
3634
cpu = iter->cpu_file;
kernel/trace/trace.c
3635
buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
3640
if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
kernel/trace/trace.c
3646
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
3647
buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
3652
if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
kernel/trace/trace.c
3670
iter->cpu);
kernel/trace/trace.c
3673
iter->cpu, iter->lost_events);
kernel/trace/trace.c
3897
int cpu;
kernel/trace/trace.c
3974
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
3975
iter->buffer_iter[cpu] =
kernel/trace/trace.c
3977
cpu, GFP_KERNEL);
kernel/trace/trace.c
3978
tracing_iter_reset(iter, cpu);
kernel/trace/trace.c
3981
cpu = iter->cpu_file;
kernel/trace/trace.c
3982
iter->buffer_iter[cpu] =
kernel/trace/trace.c
3984
cpu, GFP_KERNEL);
kernel/trace/trace.c
3985
tracing_iter_reset(iter, cpu);
kernel/trace/trace.c
4079
int cpu;
kernel/trace/trace.c
4090
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
4091
if (iter->buffer_iter[cpu])
kernel/trace/trace.c
4092
ring_buffer_read_finish(iter->buffer_iter[cpu]);
kernel/trace/trace.c
4143
int cpu = tracing_get_cpu(inode);
kernel/trace/trace.c
4151
if (cpu == RING_BUFFER_ALL_CPUS)
kernel/trace/trace.c
4154
tracing_reset_cpu(trace_buf, cpu);
kernel/trace/trace.c
4345
int cpu;
kernel/trace/trace.c
4352
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
4357
if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
kernel/trace/trace.c
4358
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
kernel/trace/trace.c
4359
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
kernel/trace/trace.c
4361
ring_buffer_record_disable_cpu(tr->snapshot_buffer.buffer, cpu);
kernel/trace/trace.c
4364
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
kernel/trace/trace.c
4365
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
kernel/trace/trace.c
4366
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
kernel/trace/trace.c
4368
ring_buffer_record_enable_cpu(tr->snapshot_buffer.buffer, cpu);
kernel/trace/trace.c
5227
int cpu;
kernel/trace/trace.c
5229
for_each_tracing_cpu(cpu)
kernel/trace/trace.c
5230
per_cpu_ptr(buf->data, cpu)->entries = val;
kernel/trace/trace.c
5233
static void update_buffer_entries(struct array_buffer *buf, int cpu)
kernel/trace/trace.c
5235
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
5238
per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
kernel/trace/trace.c
5247
int cpu, ret = 0;
kernel/trace/trace.c
5250
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
5252
per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
kernel/trace/trace.c
5255
per_cpu_ptr(trace_buf->data, cpu)->entries =
kernel/trace/trace.c
5256
per_cpu_ptr(size_buf->data, cpu)->entries;
kernel/trace/trace.c
5271
unsigned long size, int cpu)
kernel/trace/trace.c
5289
ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
kernel/trace/trace.c
5297
ret = ring_buffer_resize(tr->snapshot_buffer.buffer, size, cpu);
kernel/trace/trace.c
5300
&tr->array_buffer, cpu);
kernel/trace/trace.c
5322
update_buffer_entries(&tr->snapshot_buffer, cpu);
kernel/trace/trace.c
5327
update_buffer_entries(&tr->array_buffer, cpu);
kernel/trace/trace.c
5768
static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
kernel/trace/trace.c
5770
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
5775
} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
kernel/trace/trace.c
5776
cpumask_set_cpu(cpu, tr->pipe_cpumask);
kernel/trace/trace.c
5782
static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
kernel/trace/trace.c
5784
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
5788
WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
kernel/trace/trace.c
5789
cpumask_clear_cpu(cpu, tr->pipe_cpumask);
kernel/trace/trace.c
5797
int cpu;
kernel/trace/trace.c
5805
cpu = tracing_get_cpu(inode);
kernel/trace/trace.c
5806
ret = open_pipe_on_cpu(tr, cpu);
kernel/trace/trace.c
5837
iter->cpu_file = cpu;
kernel/trace/trace.c
5853
close_pipe_on_cpu(tr, cpu);
kernel/trace/trace.c
6250
int cpu = tracing_get_cpu(inode);
kernel/trace/trace.c
6257
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
6258
int cpu, buf_size_same;
kernel/trace/trace.c
6264
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
6267
size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
kernel/trace/trace.c
6268
if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
kernel/trace/trace.c
6284
r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
kernel/trace/trace.c
6326
int r, cpu;
kernel/trace/trace.c
6330
for_each_tracing_cpu(cpu) {
kernel/trace/trace.c
6331
size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
kernel/trace/trace.c
640
static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
kernel/trace/trace.c
6445
int cpu = tracing_get_cpu(inode);
kernel/trace/trace.c
6452
ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
kernel/trace/trace.c
649
ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
kernel/trace/trace.c
654
u64 ftrace_now(int cpu)
kernel/trace/trace.c
656
return buffer_ftrace_now(&global_trace.array_buffer, cpu);
kernel/trace/trace.c
6568
int cpu;
kernel/trace/trace.c
6573
for_each_possible_cpu(cpu) {
kernel/trace/trace.c
6574
buf = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
kernel/trace/trace.c
6583
int cpu;
kernel/trace/trace.c
6595
for_each_possible_cpu(cpu) {
kernel/trace/trace.c
6596
per_cpu_ptr(tinfo->tbuf, cpu)->buf = NULL;
kernel/trace/trace.c
6599
for_each_possible_cpu(cpu) {
kernel/trace/trace.c
6601
cpu_to_node(cpu));
kernel/trace/trace.c
6604
per_cpu_ptr(tinfo->tbuf, cpu)->buf = buf;
kernel/trace/trace.c
6760
int cpu = smp_processor_id();
kernel/trace/trace.c
6761
char *buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
kernel/trace/trace.c
6799
cpu = smp_processor_id();
kernel/trace/trace.c
6800
buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
kernel/trace/trace.c
6816
cnt = nr_context_switches_cpu(cpu);
kernel/trace/trace.c
6852
} while (nr_context_switches_cpu(cpu) != cnt);
kernel/trace/trace.c
726
static inline void trace_access_lock(int cpu)
kernel/trace/trace.c
728
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
738
mutex_lock(&per_cpu(cpu_access_lock, cpu));
kernel/trace/trace.c
742
static inline void trace_access_unlock(int cpu)
kernel/trace/trace.c
744
if (cpu == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
747
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
kernel/trace/trace.c
754
int cpu;
kernel/trace/trace.c
756
for_each_possible_cpu(cpu)
kernel/trace/trace.c
757
mutex_init(&per_cpu(cpu_access_lock, cpu));
kernel/trace/trace.c
764
static inline void trace_access_lock(int cpu)
kernel/trace/trace.c
766
(void)cpu;
kernel/trace/trace.c
770
static inline void trace_access_unlock(int cpu)
kernel/trace/trace.c
772
(void)cpu;
kernel/trace/trace.c
7986
int cpu;
kernel/trace/trace.c
7994
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kernel/trace/trace.c
8099
ref->cpu = iter->cpu_file;
kernel/trace/trace.c
8104
ring_buffer_free_read_page(ref->buffer, ref->cpu,
kernel/trace/trace.c
8310
int cpu = tracing_get_cpu(inode);
kernel/trace/trace.c
8322
cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
kernel/trace/trace.c
8325
cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
kernel/trace/trace.c
8328
cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
kernel/trace/trace.c
8331
cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
kernel/trace/trace.c
8336
t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
kernel/trace/trace.c
8347
ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
kernel/trace/trace.c
8353
cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
kernel/trace/trace.c
8356
cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
kernel/trace/trace.c
8590
static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
kernel/trace/trace.c
8604
"Could not create tracefs directory 'per_cpu/%d'\n", cpu);
kernel/trace/trace.c
8611
void *data, long cpu, const struct file_operations *fops)
kernel/trace/trace.c
8616
d_inode(ret)->i_cdev = (void *)(cpu + 1);
kernel/trace/trace.c
8621
tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
kernel/trace/trace.c
8623
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
kernel/trace/trace.c
8630
snprintf(cpu_dir, 30, "cpu%ld", cpu);
kernel/trace/trace.c
8639
tr, cpu, &tracing_pipe_fops);
kernel/trace/trace.c
8643
tr, cpu, &tracing_fops);
kernel/trace/trace.c
8646
tr, cpu, &tracing_buffers_fops);
kernel/trace/trace.c
8649
tr, cpu, &tracing_stats_fops);
kernel/trace/trace.c
8652
tr, cpu, &tracing_entries_fops);
kernel/trace/trace.c
8656
tr, cpu, &tracing_buffer_meta_fops);
kernel/trace/trace.c
8660
tr, cpu, &snapshot_fops);
kernel/trace/trace.c
8663
tr, cpu, &snapshot_raw_fops);
kernel/trace/trace.c
9869
int cpu;
kernel/trace/trace.c
9946
for_each_tracing_cpu(cpu)
kernel/trace/trace.c
9947
tracing_init_tracefs_percpu(tr, cpu);
kernel/trace/trace.h
222
int cpu;
kernel/trace/trace.h
664
trace_buffer_iter(struct trace_iterator *iter, int cpu)
kernel/trace/trace.h
666
return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
kernel/trace/trace.h
701
static inline bool tracer_tracing_is_on_cpu(struct trace_array *tr, int cpu)
kernel/trace/trace.h
704
return ring_buffer_record_is_on_cpu(tr->array_buffer.buffer, cpu);
kernel/trace/trace.h
718
int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu);
kernel/trace/trace.h
740
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
kernel/trace/trace.h
742
unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
kernel/trace/trace.h
775
#define for_each_tracing_cpu(cpu) \
kernel/trace/trace.h
776
for_each_cpu(cpu, tracing_buffer_mask)
kernel/trace/trace.h
801
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
kernel/trace/trace.h
804
struct task_struct *tsk, int cpu);
kernel/trace/trace.h
842
extern u64 ftrace_now(int cpu);
kernel/trace/trace_event_perf.c
101
int cpu;
kernel/trace/trace_event_perf.c
111
for_each_possible_cpu(cpu)
kernel/trace/trace_event_perf.c
112
INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
kernel/trace/trace_events.c
1181
int cpu;
kernel/trace/trace_events.c
1199
for_each_possible_cpu(cpu)
kernel/trace/trace_events.c
1200
per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
kernel/trace/trace_events.c
196
__generic_field(int, cpu, FILTER_CPU);
kernel/trace/trace_events.c
5018
int cpu;
kernel/trace/trace_events.c
5022
cpu = raw_smp_processor_id();
kernel/trace/trace_events.c
5023
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
kernel/trace/trace_events.c
5040
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
kernel/trace/trace_events_filter.c
669
do_filter_scalar_cpumask(int op, unsigned int cpu, const struct cpumask *mask)
kernel/trace/trace_events_filter.c
681
return cpumask_test_cpu(cpu, mask);
kernel/trace/trace_events_filter.c
688
do_filter_cpumask_scalar(int op, const struct cpumask *mask, unsigned int cpu)
kernel/trace/trace_events_filter.c
692
return cpumask_test_cpu(cpu, mask) &&
kernel/trace/trace_events_filter.c
695
return !cpumask_test_cpu(cpu, mask) ||
kernel/trace/trace_events_filter.c
698
return cpumask_test_cpu(cpu, mask);
kernel/trace/trace_events_filter.c
751
unsigned int cpu = *addr; \
kernel/trace/trace_events_filter.c
753
if (cpu >= nr_cpu_ids) \
kernel/trace/trace_events_filter.c
756
return do_filter_scalar_cpumask(pred->op, cpu, pred->mask); \
kernel/trace/trace_events_filter.c
938
int cpu, cmp;
kernel/trace/trace_events_filter.c
940
cpu = raw_smp_processor_id();
kernel/trace/trace_events_filter.c
945
return cpu == cmp;
kernel/trace/trace_events_filter.c
947
return cpu != cmp;
kernel/trace/trace_events_filter.c
949
return cpu < cmp;
kernel/trace/trace_events_filter.c
951
return cpu <= cmp;
kernel/trace/trace_events_filter.c
953
return cpu > cmp;
kernel/trace/trace_events_filter.c
955
return cpu >= cmp;
kernel/trace/trace_events_filter.c
964
int cpu = raw_smp_processor_id();
kernel/trace/trace_events_filter.c
966
return do_filter_scalar_cpumask(pred->op, cpu, pred->mask);
kernel/trace/trace_events_filter.c
986
unsigned int cpu = pred->val;
kernel/trace/trace_events_filter.c
988
return do_filter_cpumask_scalar(pred->op, mask, cpu);
kernel/trace/trace_events_hist.c
891
int cpu = smp_processor_id();
kernel/trace/trace_events_hist.c
893
return cpu;
kernel/trace/trace_functions.c
166
tr->array_buffer.cpu = raw_smp_processor_id();
kernel/trace/trace_functions.c
279
int cpu;
kernel/trace/trace_functions.c
292
cpu = raw_smp_processor_id();
kernel/trace/trace_functions.c
293
data = per_cpu_ptr(tr->array_buffer.data, cpu);
kernel/trace/trace_functions.c
391
int cpu;
kernel/trace/trace_functions.c
403
cpu = raw_smp_processor_id();
kernel/trace/trace_functions.c
404
data = per_cpu_ptr(tr->array_buffer.data, cpu);
kernel/trace/trace_functions.c
408
last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
kernel/trace/trace_functions_graph.c
1014
cpu, iter->ent->pid, flags);
kernel/trace/trace_functions_graph.c
1022
struct trace_seq *s, int cpu, u32 flags)
kernel/trace/trace_functions_graph.c
1033
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1035
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
kernel/trace/trace_functions_graph.c
1087
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1090
verif_pid(s, ent->pid, cpu, data);
kernel/trace/trace_functions_graph.c
1094
print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
kernel/trace/trace_functions_graph.c
1109
print_graph_cpu(s, cpu);
kernel/trace/trace_functions_graph.c
1139
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1154
depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
kernel/trace/trace_functions_graph.c
1187
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1200
depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
kernel/trace/trace_functions_graph.c
1236
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1267
ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
kernel/trace/trace_functions_graph.c
1276
data->cpu = cpu;
kernel/trace/trace_functions_graph.c
1297
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1308
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1310
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
kernel/trace/trace_functions_graph.c
1364
cpu, pid, flags);
kernel/trace/trace_functions_graph.c
1382
depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
kernel/trace/trace_functions_graph.c
1445
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1448
if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
kernel/trace/trace_functions_graph.c
1449
per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
kernel/trace/trace_functions_graph.c
1459
iter->cpu = data->cpu;
kernel/trace/trace_functions_graph.c
1461
if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
kernel/trace/trace_functions_graph.c
1462
per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
kernel/trace/trace_functions_graph.c
1465
iter->cpu = cpu;
kernel/trace/trace_functions_graph.c
1616
int cpu;
kernel/trace/trace_functions_graph.c
1631
for_each_possible_cpu(cpu) {
kernel/trace/trace_functions_graph.c
1632
pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
kernel/trace/trace_functions_graph.c
1633
int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
kernel/trace/trace_functions_graph.c
1634
int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
kernel/trace/trace_functions_graph.c
1635
int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
kernel/trace/trace_functions_graph.c
55
int cpu;
kernel/trace/trace_functions_graph.c
550
static void print_graph_cpu(struct trace_seq *s, int cpu)
kernel/trace/trace_functions_graph.c
557
trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
kernel/trace/trace_functions_graph.c
602
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
kernel/trace/trace_functions_graph.c
610
last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
kernel/trace/trace_functions_graph.c
629
print_graph_cpu(s, cpu);
kernel/trace/trace_functions_graph.c
654
ring_iter = trace_buffer_iter(iter, iter->cpu);
kernel/trace/trace_functions_graph.c
66
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
kernel/trace/trace_functions_graph.c
664
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
kernel/trace/trace_functions_graph.c
666
event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
kernel/trace/trace_functions_graph.c
733
enum trace_type type, int cpu, pid_t pid, u32 flags)
kernel/trace/trace_functions_graph.c
756
print_graph_cpu(s, cpu);
kernel/trace/trace_functions_graph.c
959
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
971
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
kernel/trace/trace_hwlat.c
456
static void stop_cpu_kthread(unsigned int cpu)
kernel/trace/trace_hwlat.c
460
kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread;
kernel/trace/trace_hwlat.c
463
per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
kernel/trace/trace_hwlat.c
474
unsigned int cpu;
kernel/trace/trace_hwlat.c
477
for_each_online_cpu(cpu)
kernel/trace/trace_hwlat.c
478
stop_cpu_kthread(cpu);
kernel/trace/trace_hwlat.c
485
static int start_cpu_kthread(unsigned int cpu)
kernel/trace/trace_hwlat.c
490
if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
kernel/trace/trace_hwlat.c
493
kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
kernel/trace/trace_hwlat.c
499
per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
kernel/trace/trace_hwlat.c
508
unsigned int cpu = smp_processor_id();
kernel/trace/trace_hwlat.c
517
if (!cpu_online(cpu))
kernel/trace/trace_hwlat.c
519
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
kernel/trace/trace_hwlat.c
522
start_cpu_kthread(cpu);
kernel/trace/trace_hwlat.c
535
static int hwlat_cpu_init(unsigned int cpu)
kernel/trace/trace_hwlat.c
537
schedule_work_on(cpu, &hwlat_hotplug_work);
kernel/trace/trace_hwlat.c
544
static int hwlat_cpu_die(unsigned int cpu)
kernel/trace/trace_hwlat.c
546
stop_cpu_kthread(cpu);
kernel/trace/trace_hwlat.c
578
unsigned int cpu;
kernel/trace/trace_hwlat.c
587
for_each_cpu(cpu, current_mask) {
kernel/trace/trace_hwlat.c
588
retval = start_cpu_kthread(cpu);
kernel/trace/trace_irqsoff.c
104
int cpu;
kernel/trace/trace_irqsoff.c
112
cpu = raw_smp_processor_id();
kernel/trace/trace_irqsoff.c
113
if (likely(!per_cpu(tracing_cpu, cpu)))
kernel/trace/trace_irqsoff.c
125
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
kernel/trace/trace_irqsoff.c
162
int cpu;
kernel/trace/trace_irqsoff.c
169
for_each_possible_cpu(cpu)
kernel/trace/trace_irqsoff.c
170
per_cpu(tracing_cpu, cpu) = 0;
kernel/trace/trace_irqsoff.c
345
int cpu)
kernel/trace/trace_irqsoff.c
352
T1 = ftrace_now(cpu);
kernel/trace/trace_irqsoff.c
377
update_max_tr_single(tr, current, cpu);
kernel/trace/trace_irqsoff.c
387
data->preempt_timestamp = ftrace_now(cpu);
kernel/trace/trace_irqsoff.c
394
int cpu;
kernel/trace/trace_irqsoff.c
402
cpu = raw_smp_processor_id();
kernel/trace/trace_irqsoff.c
404
if (per_cpu(tracing_cpu, cpu))
kernel/trace/trace_irqsoff.c
407
data = per_cpu_ptr(tr->array_buffer.data, cpu);
kernel/trace/trace_irqsoff.c
416
data->preempt_timestamp = ftrace_now(cpu);
kernel/trace/trace_irqsoff.c
421
per_cpu(tracing_cpu, cpu) = 1;
kernel/trace/trace_irqsoff.c
430
int cpu;
kernel/trace/trace_irqsoff.c
436
cpu = raw_smp_processor_id();
kernel/trace/trace_irqsoff.c
438
if (unlikely(per_cpu(tracing_cpu, cpu)))
kernel/trace/trace_irqsoff.c
439
per_cpu(tracing_cpu, cpu) = 0;
kernel/trace/trace_irqsoff.c
446
data = per_cpu_ptr(tr->array_buffer.data, cpu);
kernel/trace/trace_irqsoff.c
457
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
kernel/trace/trace_kdb.c
27
int cnt = 0, cpu;
kernel/trace/trace_kdb.c
44
for_each_tracing_cpu(cpu) {
kernel/trace/trace_kdb.c
45
iter.buffer_iter[cpu] =
kernel/trace/trace_kdb.c
47
cpu, GFP_ATOMIC);
kernel/trace/trace_kdb.c
48
tracing_iter_reset(&iter, cpu);
kernel/trace/trace_kdb.c
82
for_each_tracing_cpu(cpu) {
kernel/trace/trace_kdb.c
83
if (iter.buffer_iter[cpu]) {
kernel/trace/trace_kdb.c
84
ring_buffer_read_finish(iter.buffer_iter[cpu]);
kernel/trace/trace_kdb.c
85
iter.buffer_iter[cpu] = NULL;
kernel/trace/trace_kprobe.c
184
int cpu;
kernel/trace/trace_kprobe.c
186
for_each_possible_cpu(cpu)
kernel/trace/trace_kprobe.c
187
nhit += *per_cpu_ptr(tk->nhit, cpu);
kernel/trace/trace_osnoise.c
1168
static __always_inline void osnoise_stop_exception(char *msg, int cpu)
kernel/trace/trace_osnoise.c
1200
long cpu = task_cpu(p);
kernel/trace/trace_osnoise.c
1202
osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
kernel/trace/trace_osnoise.c
1203
if (osn_var->pid == p->pid && dest_cpu != cpu) {
kernel/trace/trace_osnoise.c
1204
per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
kernel/trace/trace_osnoise.c
1206
osnoise_stop_exception("timerlat user-thread migrated", cpu);
kernel/trace/trace_osnoise.c
1932
static void stop_kthread(unsigned int cpu)
kernel/trace/trace_osnoise.c
1936
kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
kernel/trace/trace_osnoise.c
1938
if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) &&
kernel/trace/trace_osnoise.c
1956
per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
kernel/trace/trace_osnoise.c
1970
int cpu;
kernel/trace/trace_osnoise.c
1974
for_each_online_cpu(cpu)
kernel/trace/trace_osnoise.c
1975
stop_kthread(cpu);
kernel/trace/trace_osnoise.c
1983
static int start_kthread(unsigned int cpu)
kernel/trace/trace_osnoise.c
1990
if (per_cpu(per_cpu_osnoise_var, cpu).kthread)
kernel/trace/trace_osnoise.c
1994
snprintf(comm, 24, "timerlat/%d", cpu);
kernel/trace/trace_osnoise.c
1999
per_cpu(per_cpu_osnoise_var, cpu).sampling = true;
kernel/trace/trace_osnoise.c
2003
snprintf(comm, 24, "osnoise/%d", cpu);
kernel/trace/trace_osnoise.c
2006
kthread = kthread_run_on_cpu(main, NULL, cpu, comm);
kernel/trace/trace_osnoise.c
2013
per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
kernel/trace/trace_osnoise.c
2014
cpumask_set_cpu(cpu, &kthread_cpumask);
kernel/trace/trace_osnoise.c
2029
int cpu;
kernel/trace/trace_osnoise.c
2042
for_each_possible_cpu(cpu) {
kernel/trace/trace_osnoise.c
2043
if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) {
kernel/trace/trace_osnoise.c
2046
kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
kernel/trace/trace_osnoise.c
2052
for_each_cpu(cpu, current_mask) {
kernel/trace/trace_osnoise.c
2053
retval = start_kthread(cpu);
kernel/trace/trace_osnoise.c
2069
unsigned int cpu = smp_processor_id();
kernel/trace/trace_osnoise.c
2079
if (!cpu_online(cpu))
kernel/trace/trace_osnoise.c
2082
if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
kernel/trace/trace_osnoise.c
2085
start_kthread(cpu);
kernel/trace/trace_osnoise.c
2093
static int osnoise_cpu_init(unsigned int cpu)
kernel/trace/trace_osnoise.c
2095
schedule_work_on(cpu, &osnoise_hotplug_work);
kernel/trace/trace_osnoise.c
2102
static int osnoise_cpu_die(unsigned int cpu)
kernel/trace/trace_osnoise.c
2104
stop_kthread(cpu);
kernel/trace/trace_osnoise.c
2377
long cpu = (long) inode->i_cdev;
kernel/trace/trace_osnoise.c
2410
if (current->nr_cpus_allowed > 1 || cpu != smp_processor_id()) {
kernel/trace/trace_osnoise.c
2453
long cpu = (long) file->private_data;
kernel/trace/trace_osnoise.c
2469
if (cpu == smp_processor_id()) {
kernel/trace/trace_osnoise.c
2475
per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
kernel/trace/trace_osnoise.c
2560
long cpu = (long) file->private_data;
kernel/trace/trace_osnoise.c
2565
osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
kernel/trace/trace_osnoise.c
2566
tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
kernel/trace/trace_osnoise.c
267
int cpu;
kernel/trace/trace_osnoise.c
2702
long cpu;
kernel/trace/trace_osnoise.c
2714
for_each_possible_cpu(cpu) {
kernel/trace/trace_osnoise.c
2715
snprintf(cpu_str, 30, "cpu%ld", cpu);
kernel/trace/trace_osnoise.c
2726
d_inode(timerlat_fd)->i_cdev = (void *)(cpu);
kernel/trace/trace_osnoise.c
275
for_each_online_cpu(cpu) {
kernel/trace/trace_osnoise.c
276
tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
kernel/trace/trace_osnoise.c
293
int cpu;
kernel/trace/trace_osnoise.c
299
for_each_online_cpu(cpu) {
kernel/trace/trace_osnoise.c
300
osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
kernel/trace/trace_osnoise.c
3023
int cpu;
kernel/trace/trace_osnoise.c
3031
for_each_online_cpu(cpu)
kernel/trace/trace_osnoise.c
3032
per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
kernel/trace/trace_output.c
551
lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
kernel/trace/trace_output.c
558
comm, entry->pid, cpu);
kernel/trace/trace_output.c
672
trace_seq_printf(s, "[%03d] ", iter->cpu);
kernel/trace/trace_output.c
705
comm, entry->pid, iter->cpu, entry->flags,
kernel/trace/trace_output.c
708
lat_print_generic(s, entry, iter->cpu);
kernel/trace/trace_sched_wakeup.c
379
probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
kernel/trace/trace_sched_wakeup.c
384
wakeup_current_cpu = cpu;
kernel/trace/trace_sched_wakeup.c
448
int cpu;
kernel/trace/trace_sched_wakeup.c
469
cpu = raw_smp_processor_id();
kernel/trace/trace_sched_wakeup.c
470
disabled = local_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
kernel/trace/trace_sched_wakeup.c
491
T1 = ftrace_now(cpu);
kernel/trace/trace_sched_wakeup.c
507
local_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
kernel/trace/trace_sched_wakeup.c
539
int cpu = smp_processor_id();
kernel/trace/trace_sched_wakeup.c
562
disabled = local_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
kernel/trace/trace_sched_wakeup.c
595
data->preempt_timestamp = ftrace_now(cpu);
kernel/trace/trace_sched_wakeup.c
609
local_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
kernel/trace/trace_sched_wakeup.c
73
int cpu;
kernel/trace/trace_sched_wakeup.c
81
cpu = raw_smp_processor_id();
kernel/trace/trace_sched_wakeup.c
82
if (cpu != wakeup_current_cpu)
kernel/trace/trace_sched_wakeup.c
85
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
kernel/trace/trace_selftest.c
27
static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
kernel/trace/trace_selftest.c
33
while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
kernel/trace/trace_selftest.c
67
int cpu, ret = 0;
kernel/trace/trace_selftest.c
83
for_each_possible_cpu(cpu) {
kernel/trace/trace_selftest.c
84
ret = trace_test_buffer_cpu(buf, cpu);
kernel/trace/trace_uprobe.c
827
int cpu;
kernel/trace/trace_uprobe.c
835
for_each_possible_cpu(cpu) {
kernel/trace/trace_uprobe.c
836
nhits += per_cpu(*tu->nhits, cpu);
kernel/trace/trace_uprobe.c
881
int cpu, err_cpu;
kernel/trace/trace_uprobe.c
887
for_each_possible_cpu(cpu) {
kernel/trace/trace_uprobe.c
888
struct page *p = alloc_pages_node(cpu_to_node(cpu),
kernel/trace/trace_uprobe.c
891
err_cpu = cpu;
kernel/trace/trace_uprobe.c
894
per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
kernel/trace/trace_uprobe.c
895
mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
kernel/trace/trace_uprobe.c
901
for_each_possible_cpu(cpu) {
kernel/trace/trace_uprobe.c
902
if (cpu == err_cpu)
kernel/trace/trace_uprobe.c
904
free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
kernel/trace/trace_uprobe.c
928
int cpu;
kernel/trace/trace_uprobe.c
933
for_each_possible_cpu(cpu)
kernel/trace/trace_uprobe.c
935
cpu)->buf);
kernel/trace/trace_uprobe.c
945
int cpu;
kernel/trace/trace_uprobe.c
947
cpu = raw_smp_processor_id();
kernel/trace/trace_uprobe.c
948
ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
kernel/unwind/deferred.c
84
if (info->id.cpu)
kernel/unwind/deferred.c
94
info->id.cpu = smp_processor_id() + 1; /* Must be non zero */
kernel/up.c
12
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
kernel/up.c
17
if (cpu != 0)
kernel/up.c
28
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
kernel/up.c
58
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
kernel/up.c
62
if (cpu != 0)
kernel/watchdog.c
157
void watchdog_hardlockup_touch_cpu(unsigned int cpu)
kernel/watchdog.c
159
per_cpu(watchdog_hardlockup_touched, cpu) = true;
kernel/watchdog.c
162
static bool is_hardlockup(unsigned int cpu)
kernel/watchdog.c
164
int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
kernel/watchdog.c
166
if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
kernel/watchdog.c
174
per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
kernel/watchdog.c
187
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
kernel/watchdog.c
191
if (per_cpu(watchdog_hardlockup_touched, cpu)) {
kernel/watchdog.c
192
per_cpu(watchdog_hardlockup_touched, cpu) = false;
kernel/watchdog.c
204
if (is_hardlockup(cpu)) {
kernel/watchdog.c
218
if (scx_hardlockup(cpu))
kernel/watchdog.c
222
if (per_cpu(watchdog_hardlockup_warned, cpu))
kernel/watchdog.c
242
pr_emerg("CPU%u: Watchdog detected hard LOCKUP on cpu %u\n", this_cpu, cpu);
kernel/watchdog.c
247
if (cpu == this_cpu) {
kernel/watchdog.c
255
trigger_single_cpu_backtrace(cpu);
kernel/watchdog.c
259
trigger_allbutcpu_cpu_backtrace(cpu);
kernel/watchdog.c
268
per_cpu(watchdog_hardlockup_warned, cpu) = true;
kernel/watchdog.c
270
per_cpu(watchdog_hardlockup_warned, cpu) = false;
kernel/watchdog.c
287
void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
kernel/watchdog.c
289
void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
kernel/watchdog.c
696
int cpu;
kernel/watchdog.c
707
for_each_cpu(cpu, &watchdog_allowed_mask) {
kernel/watchdog.c
708
per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
kernel/watchdog.c
709
wq_watchdog_touch(cpu);
kernel/watchdog.c
891
static void watchdog_enable(unsigned int cpu)
kernel/watchdog.c
896
WARN_ON_ONCE(cpu != smp_processor_id());
kernel/watchdog.c
913
watchdog_hardlockup_enable(cpu);
kernel/watchdog.c
916
static void watchdog_disable(unsigned int cpu)
kernel/watchdog.c
920
WARN_ON_ONCE(cpu != smp_processor_id());
kernel/watchdog.c
927
watchdog_hardlockup_disable(cpu);
kernel/watchdog.c
940
int cpu;
kernel/watchdog.c
945
for_each_cpu(cpu, &watchdog_allowed_mask)
kernel/watchdog.c
946
smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
kernel/watchdog.c
959
int cpu;
kernel/watchdog.c
962
for_each_cpu(cpu, &watchdog_allowed_mask)
kernel/watchdog.c
963
smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
kernel/watchdog.c
966
int lockup_detector_online_cpu(unsigned int cpu)
kernel/watchdog.c
968
if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
kernel/watchdog.c
969
watchdog_enable(cpu);
kernel/watchdog.c
973
int lockup_detector_offline_cpu(unsigned int cpu)
kernel/watchdog.c
975
if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
kernel/watchdog.c
976
watchdog_disable(cpu);
kernel/watchdog_buddy.c
11
static unsigned int watchdog_next_cpu(unsigned int cpu)
kernel/watchdog_buddy.c
15
next_cpu = cpumask_next_wrap(cpu, &watchdog_cpus);
kernel/watchdog_buddy.c
16
if (next_cpu == cpu)
kernel/watchdog_buddy.c
27
void watchdog_hardlockup_enable(unsigned int cpu)
kernel/watchdog_buddy.c
39
watchdog_hardlockup_touch_cpu(cpu);
kernel/watchdog_buddy.c
47
next_cpu = watchdog_next_cpu(cpu);
kernel/watchdog_buddy.c
58
cpumask_set_cpu(cpu, &watchdog_cpus);
kernel/watchdog_buddy.c
61
void watchdog_hardlockup_disable(unsigned int cpu)
kernel/watchdog_buddy.c
63
unsigned int next_cpu = watchdog_next_cpu(cpu);
kernel/watchdog_buddy.c
82
cpumask_clear_cpu(cpu, &watchdog_cpus);
kernel/watchdog_perf.c
121
static struct perf_event *hardlockup_detector_event_create(unsigned int cpu)
kernel/watchdog_perf.c
130
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
kernel/watchdog_perf.c
135
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
kernel/watchdog_perf.c
146
void watchdog_hardlockup_enable(unsigned int cpu)
kernel/watchdog_perf.c
150
WARN_ON_ONCE(cpu != smp_processor_id());
kernel/watchdog_perf.c
152
evt = hardlockup_detector_event_create(cpu);
kernel/watchdog_perf.c
154
pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
kernel/watchdog_perf.c
174
void watchdog_hardlockup_disable(unsigned int cpu)
kernel/watchdog_perf.c
178
WARN_ON_ONCE(cpu != smp_processor_id());
kernel/watchdog_perf.c
217
int cpu;
kernel/watchdog_perf.c
221
for_each_online_cpu(cpu) {
kernel/watchdog_perf.c
222
struct perf_event *event = per_cpu(watchdog_ev, cpu);
kernel/watchdog_perf.c
236
int cpu;
kernel/watchdog_perf.c
243
for_each_online_cpu(cpu) {
kernel/watchdog_perf.c
244
struct perf_event *event = per_cpu(watchdog_ev, cpu);
kernel/watchdog_perf.c
262
unsigned int cpu;
kernel/watchdog_perf.c
275
cpu = raw_smp_processor_id();
kernel/watchdog_perf.c
276
evt = hardlockup_detector_event_create(cpu);
kernel/workqueue.c
1226
return &per_cpu(bh_pool_irq_works, pool->cpu)[high];
kernel/workqueue.c
1233
if (unlikely(pool->cpu != smp_processor_id() &&
kernel/workqueue.c
1235
irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu);
kernel/workqueue.c
188
int cpu; /* I: the associated cpu */
kernel/workqueue.c
2228
static int wq_select_unbound_cpu(int cpu)
kernel/workqueue.c
2233
if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
kernel/workqueue.c
2234
return cpu;
kernel/workqueue.c
2242
return cpu;
kernel/workqueue.c
2248
static void __queue_work(int cpu, struct workqueue_struct *wq,
kernel/workqueue.c
2254
unsigned int req_cpu = cpu;
kernel/workqueue.c
2279
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
kernel/workqueue.c
2281
cpu = raw_smp_processor_id();
kernel/workqueue.c
2284
pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
kernel/workqueue.c
2332
wq->name, cpu);
kernel/workqueue.c
2395
bool queue_work_on(int cpu, struct workqueue_struct *wq,
kernel/workqueue.c
2405
__queue_work(cpu, wq, work);
kernel/workqueue.c
2425
int cpu;
kernel/workqueue.c
2432
cpu = raw_smp_processor_id();
kernel/workqueue.c
2433
if (node == cpu_to_node(cpu))
kernel/workqueue.c
2434
return cpu;
kernel/workqueue.c
2437
cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
kernel/workqueue.c
2440
return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
kernel/workqueue.c
2484
int cpu = select_numa_node_cpu(node);
kernel/workqueue.c
2486
__queue_work(cpu, wq, work);
kernel/workqueue.c
2500
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
kernel/workqueue.c
2504
static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
kernel/workqueue.c
2522
__queue_work(cpu, wq, &dwork->work);
kernel/workqueue.c
2526
WARN_ON_ONCE(cpu != WORK_CPU_UNBOUND && !cpu_online(cpu));
kernel/workqueue.c
2528
dwork->cpu = cpu;
kernel/workqueue.c
2533
cpu = smp_processor_id();
kernel/workqueue.c
2534
if (!housekeeping_test_cpu(cpu, HK_TYPE_TIMER))
kernel/workqueue.c
2535
cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
kernel/workqueue.c
2536
add_timer_on(timer, cpu);
kernel/workqueue.c
2538
if (likely(cpu == WORK_CPU_UNBOUND))
kernel/workqueue.c
2541
add_timer_on(timer, cpu);
kernel/workqueue.c
2562
bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
kernel/workqueue.c
2574
__queue_delayed_work(cpu, wq, dwork, delay);
kernel/workqueue.c
2601
bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
kernel/workqueue.c
2610
__queue_delayed_work(cpu, wq, dwork, delay);
kernel/workqueue.c
2673
if (pool->cpu < 0 && pool->attrs->affn_strict)
kernel/workqueue.c
2702
kthread_set_per_cpu(worker->task, pool->cpu);
kernel/workqueue.c
2766
if (pool->cpu >= 0)
kernel/workqueue.c
2768
pool->cpu, worker->id,
kernel/workqueue.c
3197
raw_smp_processor_id() != pool->cpu);
kernel/workqueue.c
3747
void workqueue_softirq_dead(unsigned int cpu)
kernel/workqueue.c
3752
struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i];
kernel/workqueue.c
4369
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
kernel/workqueue.c
4641
int cpu;
kernel/workqueue.c
4650
for_each_online_cpu(cpu) {
kernel/workqueue.c
4651
struct work_struct *work = per_cpu_ptr(works, cpu);
kernel/workqueue.c
4654
schedule_work_on(cpu, work);
kernel/workqueue.c
4657
for_each_online_cpu(cpu)
kernel/workqueue.c
4658
flush_work(per_cpu_ptr(works, cpu));
kernel/workqueue.c
4851
pool->cpu = -1;
kernel/workqueue.c
5025
if (WARN_ON(!(pool->cpu < 0)) ||
kernel/workqueue.c
5301
static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu)
kernel/workqueue.c
5304
int pod = pt->cpu_pod[cpu];
kernel/workqueue.c
5317
int cpu, struct pool_workqueue *pwq)
kernel/workqueue.c
5319
struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu);
kernel/workqueue.c
5346
int cpu;
kernel/workqueue.c
5348
for_each_possible_cpu(cpu)
kernel/workqueue.c
5349
put_pwq_unlocked(ctx->pwq_tbl[cpu]);
kernel/workqueue.c
5366
int cpu;
kernel/workqueue.c
5392
for_each_possible_cpu(cpu) {
kernel/workqueue.c
5395
ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
kernel/workqueue.c
5397
wq_calc_pod_cpumask(new_attrs, cpu);
kernel/workqueue.c
5398
ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
kernel/workqueue.c
5399
if (!ctx->pwq_tbl[cpu])
kernel/workqueue.c
5431
int cpu;
kernel/workqueue.c
5439
for_each_possible_cpu(cpu)
kernel/workqueue.c
5440
ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
kernel/workqueue.c
5441
ctx->pwq_tbl[cpu]);
kernel/workqueue.c
547
#define for_each_bh_worker_pool(pool, cpu) \
kernel/workqueue.c
548
for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \
kernel/workqueue.c
549
(pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
kernel/workqueue.c
5516
static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu)
kernel/workqueue.c
552
#define for_each_cpu_worker_pool(pool, cpu) \
kernel/workqueue.c
553
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
kernel/workqueue.c
5537
wq_calc_pod_cpumask(target_attrs, cpu);
kernel/workqueue.c
5538
if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs))
kernel/workqueue.c
554
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
kernel/workqueue.c
5551
old_pwq = install_unbound_pwq(wq, cpu, pwq);
kernel/workqueue.c
5560
old_pwq = install_unbound_pwq(wq, cpu, pwq);
kernel/workqueue.c
5569
int cpu, ret;
kernel/workqueue.c
5585
for_each_possible_cpu(cpu) {
kernel/workqueue.c
5589
pool = &(per_cpu_ptr(pools, cpu)[highpri]);
kernel/workqueue.c
5590
pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu);
kernel/workqueue.c
5623
for_each_possible_cpu(cpu) {
kernel/workqueue.c
5624
struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
kernel/workqueue.c
5953
int cpu;
kernel/workqueue.c
6013
for_each_possible_cpu(cpu) {
kernel/workqueue.c
6014
put_pwq_unlocked(unbound_pwq(wq, cpu));
kernel/workqueue.c
6015
RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL);
kernel/workqueue.c
6136
bool workqueue_congested(int cpu, struct workqueue_struct *wq)
kernel/workqueue.c
6143
if (cpu == WORK_CPU_UNBOUND)
kernel/workqueue.c
6144
cpu = smp_processor_id();
kernel/workqueue.c
6146
pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
kernel/workqueue.c
6601
static void unbind_workers(int cpu)
kernel/workqueue.c
6606
for_each_cpu_worker_pool(pool, cpu) {
kernel/workqueue.c
6669
kthread_set_per_cpu(worker->task, pool->cpu);
kernel/workqueue.c
6715
static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
kernel/workqueue.c
6723
if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
kernel/workqueue.c
6733
int workqueue_prepare_cpu(unsigned int cpu)
kernel/workqueue.c
6737
for_each_cpu_worker_pool(pool, cpu) {
kernel/workqueue.c
6746
int workqueue_online_cpu(unsigned int cpu)
kernel/workqueue.c
6754
cpumask_set_cpu(cpu, wq_online_cpumask);
kernel/workqueue.c
6762
if (pool->cpu == cpu)
kernel/workqueue.c
6764
else if (pool->cpu < 0)
kernel/workqueue.c
6765
restore_unbound_workers_cpumask(pool, cpu);
kernel/workqueue.c
6777
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
kernel/workqueue.c
6790
int workqueue_offline_cpu(unsigned int cpu)
kernel/workqueue.c
6795
if (WARN_ON(cpu != smp_processor_id()))
kernel/workqueue.c
6798
unbind_workers(cpu);
kernel/workqueue.c
6803
cpumask_clear_cpu(cpu, wq_online_cpumask);
kernel/workqueue.c
6812
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
kernel/workqueue.c
6816
wq_update_node_max_active(wq, cpu);
kernel/workqueue.c
6851
long work_on_cpu_key(int cpu, long (*fn)(void *),
kernel/workqueue.c
6857
schedule_work_on(cpu, &wfc.work);
kernel/workqueue.c
7002
int cpu;
kernel/workqueue.c
7014
for_each_possible_cpu(cpu) {
kernel/workqueue.c
7015
for_each_cpu_worker_pool(pool, cpu) {
kernel/workqueue.c
7081
int affn, cpu;
kernel/workqueue.c
7095
for_each_online_cpu(cpu)
kernel/workqueue.c
7096
unbound_wq_update_pwq(wq, cpu);
kernel/workqueue.c
725
unbound_pwq_slot(struct workqueue_struct *wq, int cpu)
kernel/workqueue.c
727
if (cpu >= 0)
kernel/workqueue.c
728
return per_cpu_ptr(wq->cpu_pwq, cpu);
kernel/workqueue.c
734
static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu)
kernel/workqueue.c
736
return rcu_dereference_check(*unbound_pwq_slot(wq, cpu),
kernel/workqueue.c
7656
int cpu;
kernel/workqueue.c
7659
for_each_possible_cpu(cpu)
kernel/workqueue.c
7660
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
kernel/workqueue.c
7691
if (pool->cpu >= 0)
kernel/workqueue.c
7692
touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
kernel/workqueue.c
7728
if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) {
kernel/workqueue.c
7751
notrace void wq_watchdog_touch(int cpu)
kernel/workqueue.c
7757
if (cpu >= 0)
kernel/workqueue.c
7758
per_cpu(wq_watchdog_touched_cpu, cpu) = now;
kernel/workqueue.c
7838
static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice)
kernel/workqueue.c
7841
pool->cpu = cpu;
kernel/workqueue.c
7842
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
kernel/workqueue.c
7843
cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
kernel/workqueue.c
7846
pool->node = cpu_to_node(cpu);
kernel/workqueue.c
7870
int i, cpu;
kernel/workqueue.c
7915
for_each_possible_cpu(cpu) {
kernel/workqueue.c
7919
for_each_bh_worker_pool(pool, cpu) {
kernel/workqueue.c
7920
init_cpu_worker_pool(pool, cpu, std_nice[i]);
kernel/workqueue.c
7927
for_each_cpu_worker_pool(pool, cpu)
kernel/workqueue.c
7928
init_cpu_worker_pool(pool, cpu, std_nice[i++]);
kernel/workqueue.c
8024
int cpu, bkt;
kernel/workqueue.c
8034
for_each_possible_cpu(cpu) {
kernel/workqueue.c
8035
for_each_bh_worker_pool(pool, cpu)
kernel/workqueue.c
8036
pool->node = cpu_to_node(cpu);
kernel/workqueue.c
8037
for_each_cpu_worker_pool(pool, cpu)
kernel/workqueue.c
8038
pool->node = cpu_to_node(cpu);
kernel/workqueue.c
8055
for_each_possible_cpu(cpu)
kernel/workqueue.c
8056
for_each_bh_worker_pool(pool, cpu)
kernel/workqueue.c
8059
for_each_online_cpu(cpu) {
kernel/workqueue.c
8060
for_each_cpu_worker_pool(pool, cpu) {
kernel/workqueue.c
8081
int cur, pre, cpu, pod;
kernel/workqueue.c
8110
for_each_possible_cpu(cpu) {
kernel/workqueue.c
8111
cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
kernel/workqueue.c
8112
pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
kernel/workqueue.c
8145
int cpu;
kernel/workqueue.c
8162
for_each_online_cpu(cpu)
kernel/workqueue.c
8163
unbound_wq_update_pwq(wq, cpu);
lib/cpu_rmap.c
100
if (rmap->near[cpu].dist > dist &&
lib/cpu_rmap.c
102
rmap->near[cpu].index = rmap->near[neigh].index;
lib/cpu_rmap.c
103
rmap->near[cpu].dist = dist;
lib/cpu_rmap.c
114
unsigned int cpu;
lib/cpu_rmap.c
118
for_each_possible_cpu(cpu) {
lib/cpu_rmap.c
119
index = rmap->near[cpu].index;
lib/cpu_rmap.c
121
cpu, index, rmap->near[cpu].dist);
lib/cpu_rmap.c
171
unsigned int cpu;
lib/cpu_rmap.c
179
for_each_online_cpu(cpu) {
lib/cpu_rmap.c
180
if (rmap->near[cpu].index == index) {
lib/cpu_rmap.c
181
rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
lib/cpu_rmap.c
182
cpumask_set_cpu(cpu, update_mask);
lib/cpu_rmap.c
191
for_each_cpu(cpu, affinity) {
lib/cpu_rmap.c
192
rmap->near[cpu].index = index;
lib/cpu_rmap.c
193
rmap->near[cpu].dist = 0;
lib/cpu_rmap.c
195
cpumask_of_node(cpu_to_node(cpu)));
lib/cpu_rmap.c
201
for_each_cpu(cpu, update_mask) {
lib/cpu_rmap.c
202
if (cpu_rmap_copy_neigh(rmap, cpu,
lib/cpu_rmap.c
203
topology_sibling_cpumask(cpu), 1))
lib/cpu_rmap.c
205
if (cpu_rmap_copy_neigh(rmap, cpu,
lib/cpu_rmap.c
206
topology_core_cpumask(cpu), 2))
lib/cpu_rmap.c
208
if (cpu_rmap_copy_neigh(rmap, cpu,
lib/cpu_rmap.c
209
cpumask_of_node(cpu_to_node(cpu)), 3))
lib/cpu_rmap.c
28
unsigned int cpu;
lib/cpu_rmap.c
52
for_each_possible_cpu(cpu) {
lib/cpu_rmap.c
53
rmap->near[cpu].index = cpu % size;
lib/cpu_rmap.c
54
rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
lib/cpu_rmap.c
94
static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
lib/cpumask.c
110
unsigned int cpu;
lib/cpumask.c
115
cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
lib/cpumask.c
117
WARN_ON(cpu >= nr_cpu_ids);
lib/cpumask.c
118
return cpu;
lib/debugobjects.c
1154
unsigned int cpu, pool_used, pcp_free = 0;
lib/debugobjects.c
1162
for_each_possible_cpu(cpu)
lib/debugobjects.c
1163
pcp_free += per_cpu(pool_pcpu.cnt, cpu);
lib/debugobjects.c
572
static int object_cpu_offline(unsigned int cpu)
lib/debugobjects.c
575
struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
lib/dhry_run.c
33
unsigned int cpu = get_cpu();
lib/dhry_run.c
50
pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
lib/group_cpus.c
18
int cpu, sibl;
lib/group_cpus.c
21
cpu = cpumask_first(nmsk);
lib/group_cpus.c
24
if (cpu >= nr_cpu_ids)
lib/group_cpus.c
27
cpumask_clear_cpu(cpu, nmsk);
lib/group_cpus.c
28
cpumask_set_cpu(cpu, irqmsk);
lib/group_cpus.c
297
unsigned int cpu, nc, n;
lib/group_cpus.c
306
cpu = cpumask_first(msk);
lib/group_cpus.c
307
if (cpu >= nr_cpu_ids)
lib/group_cpus.c
310
cluster_mask = topology_cluster_cpumask(cpu);
lib/group_cpus.c
32
siblmsk = topology_sibling_cpumask(cpu);
lib/group_cpus.c
333
cpu = cpumask_first(msk);
lib/group_cpus.c
334
cluster_mask = topology_cluster_cpumask(cpu);
lib/group_cpus.c
79
int cpu;
lib/group_cpus.c
81
for_each_possible_cpu(cpu)
lib/group_cpus.c
82
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
lib/irq_poll.c
188
static int irq_poll_cpu_dead(unsigned int cpu)
lib/irq_poll.c
198
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
lib/nmi_backtrace.c
107
cpu, (void *)instruction_pointer(regs));
lib/nmi_backtrace.c
109
pr_warn("NMI backtrace for cpu %d\n", cpu);
lib/nmi_backtrace.c
116
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
lib/nmi_backtrace.c
96
int cpu = smp_processor_id();
lib/nmi_backtrace.c
99
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
lib/percpu-refcount.c
175
int cpu;
lib/percpu-refcount.c
177
for_each_possible_cpu(cpu)
lib/percpu-refcount.c
178
count += *per_cpu_ptr(percpu_count, cpu);
lib/percpu-refcount.c
240
int cpu;
lib/percpu-refcount.c
258
for_each_possible_cpu(cpu)
lib/percpu-refcount.c
259
*per_cpu_ptr(percpu_count, cpu) = 0;
lib/percpu_counter.c
174
int cpu;
lib/percpu_counter.c
179
for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
lib/percpu_counter.c
180
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
lib/percpu_counter.c
258
static int compute_batch_value(unsigned int cpu)
lib/percpu_counter.c
266
static int percpu_counter_cpu_dead(unsigned int cpu)
lib/percpu_counter.c
271
compute_batch_value(cpu);
lib/percpu_counter.c
278
pcount = per_cpu_ptr(fbc->counters, cpu);
lib/percpu_counter.c
369
int cpu;
lib/percpu_counter.c
371
for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
lib/percpu_counter.c
372
pcount = per_cpu_ptr(fbc->counters, cpu);
lib/percpu_counter.c
62
int cpu;
lib/percpu_counter.c
66
for_each_possible_cpu(cpu) {
lib/percpu_counter.c
67
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
lib/radix-tree.c
1578
static int radix_tree_cpu_dead(unsigned int cpu)
lib/radix-tree.c
1584
rtp = &per_cpu(radix_tree_preloads, cpu);
lib/sbitmap.c
636
static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
lib/sbitmap.c
639
data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
lib/sbitmap.c
677
unsigned int cpu)
lib/sbitmap.c
700
sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
lib/test_lockup.c
577
unsigned int cpu;
lib/test_lockup.c
583
for_each_online_cpu(cpu) {
lib/test_lockup.c
584
INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
lib/test_lockup.c
585
queue_work_on(cpu, system_highpri_wq,
lib/test_lockup.c
586
per_cpu_ptr(&test_works, cpu));
lib/test_lockup.c
590
for_each_online_cpu(cpu)
lib/test_lockup.c
591
flush_work(per_cpu_ptr(&test_works, cpu));
lib/test_objpool.c
244
int cpu, nthreads = 0;
lib/test_objpool.c
249
for_each_possible_cpu(cpu) {
lib/test_objpool.c
250
struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
lib/test_objpool.c
257
pr_info("CPU: %d duration: %lluus\n", cpu, item->duration);
lib/test_objpool.c
353
int cpu;
lib/test_objpool.c
363
for_each_possible_cpu(cpu) {
lib/test_objpool.c
364
struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
lib/test_objpool.c
370
if (!cpu_online(cpu))
lib/test_objpool.c
374
cpu, "ot_worker_%d");
lib/test_objpool.c
376
pr_err("failed to create thread for cpu %d\n", cpu);
lib/test_objpool.c
540
int cpu;
lib/test_objpool.c
550
for_each_possible_cpu(cpu) {
lib/test_objpool.c
551
struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
lib/test_objpool.c
557
if (!cpu_online(cpu))
lib/test_objpool.c
560
work = kthread_run_on_cpu(ot_thread_worker, item, cpu, "ot_worker_%d");
lib/test_objpool.c
562
pr_err("failed to create thread for cpu %d\n", cpu);
lib/tests/cpumask_kunit.c
20
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
21
for_each_cpu(cpu, m) \
lib/tests/cpumask_kunit.c
31
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
34
for_each_cpu_##op(cpu, mask1, mask2) \
lib/tests/cpumask_kunit.c
43
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
44
for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
lib/tests/cpumask_kunit.c
52
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
53
for_each_##name##_cpu(cpu) \
mm/huge_memory.c
603
int cpu;
mm/huge_memory.c
605
for_each_possible_cpu(cpu) {
mm/huge_memory.c
606
struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
mm/internal.h
1144
bool need_mlock_drain(int cpu);
mm/internal.h
1146
void mlock_drain_remote(int cpu);
mm/internal.h
1224
static inline bool need_mlock_drain(int cpu) { return false; }
mm/internal.h
1226
static inline void mlock_drain_remote(int cpu) { }
mm/kasan/common.c
64
u32 cpu = raw_smp_processor_id();
mm/kasan/common.c
67
track->cpu = cpu;
mm/kasan/kasan.h
197
u64 cpu:20;
mm/kasan/quarantine.c
346
int cpu;
mm/kasan/quarantine.c
358
for_each_online_cpu(cpu) {
mm/kasan/quarantine.c
359
sq = per_cpu_ptr(&shrink_qlist, cpu);
mm/kasan/quarantine.c
383
static int kasan_cpu_online(unsigned int cpu)
mm/kasan/quarantine.c
389
static int kasan_cpu_offline(unsigned int cpu)
mm/kasan/report.c
275
prefix, track->pid, track->cpu,
mm/kasan/sw_tags.c
41
int cpu;
mm/kasan/sw_tags.c
43
for_each_possible_cpu(cpu)
mm/kasan/sw_tags.c
44
per_cpu(prng_state, cpu) = (u32)get_cycles();
mm/kfence/core.c
317
track->cpu = raw_smp_processor_id();
mm/kfence/kfence.h
50
int cpu;
mm/kfence/report.c
121
track->cpu, (unsigned long)ts_sec, rem_nsec / 1000,
mm/kmemleak.c
1396
unsigned int cpu;
mm/kmemleak.c
1399
for_each_possible_cpu(cpu) {
mm/kmemleak.c
1400
void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
mm/kmemleak.c
1583
unsigned int cpu;
mm/kmemleak.c
1585
for_each_possible_cpu(cpu) {
mm/kmemleak.c
1586
void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
mm/memcontrol.c
2019
static void schedule_drain_work(int cpu, struct work_struct *work)
mm/memcontrol.c
2028
if (!cpu_is_isolated(cpu))
mm/memcontrol.c
2029
queue_work_on(cpu, memcg_wq, work);
mm/memcontrol.c
2038
int cpu, curcpu;
mm/memcontrol.c
2051
for_each_online_cpu(cpu) {
mm/memcontrol.c
2052
struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
mm/memcontrol.c
2053
struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
mm/memcontrol.c
2059
if (cpu == curcpu)
mm/memcontrol.c
2062
schedule_drain_work(cpu, &memcg_st->work);
mm/memcontrol.c
2069
if (cpu == curcpu)
mm/memcontrol.c
2072
schedule_drain_work(cpu, &obj_st->work);
mm/memcontrol.c
2079
static int memcg_hotplug_cpu_dead(unsigned int cpu)
mm/memcontrol.c
2082
drain_obj_stock(&per_cpu(obj_stock, cpu));
mm/memcontrol.c
2083
drain_stock_fully(&per_cpu(memcg_stock, cpu));
mm/memcontrol.c
3750
int node, cpu;
mm/memcontrol.c
3776
for_each_possible_cpu(cpu) {
mm/memcontrol.c
3779
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
mm/memcontrol.c
4060
int cpu)
mm/memcontrol.c
4101
int cpu)
mm/memcontrol.c
4105
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
mm/memcontrol.c
4113
flush_nmi_stats(memcg, parent, cpu);
mm/memcontrol.c
4115
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
mm/memcontrol.c
4148
lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
mm/memcontrol.c
5165
int cpu;
mm/memcontrol.c
5181
for_each_possible_cpu(cpu) {
mm/memcontrol.c
5182
INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
mm/memcontrol.c
5184
INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
mm/memcontrol.c
568
int cpu)
mm/memcontrol.c
577
css_rstat_updated(&memcg->css, cpu);
mm/memcontrol.c
702
int cpu;
mm/memcontrol.c
710
cpu = get_cpu();
mm/memcontrol.c
714
memcg_rstat_updated(memcg, val, cpu);
mm/memcontrol.c
746
int cpu;
mm/memcontrol.c
754
cpu = get_cpu();
mm/memcontrol.c
763
memcg_rstat_updated(memcg, val, cpu);
mm/memcontrol.c
846
int cpu;
mm/memcontrol.c
854
cpu = get_cpu();
mm/memcontrol.c
857
memcg_rstat_updated(memcg, count, cpu);
mm/memory-failure.c
2642
int cpu;
mm/memory-failure.c
2644
for_each_possible_cpu(cpu) {
mm/memory-failure.c
2645
mf_cpu = &per_cpu(memory_failure_cpu, cpu);
mm/memory_hotplug.c
2149
int cpu;
mm/memory_hotplug.c
2151
for_each_present_cpu(cpu) {
mm/memory_hotplug.c
2152
if (cpu_to_node(cpu) == nid)
mm/mlock.c
223
void mlock_drain_remote(int cpu)
mm/mlock.c
227
WARN_ON_ONCE(cpu_online(cpu));
mm/mlock.c
228
fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
mm/mlock.c
233
bool need_mlock_drain(int cpu)
mm/mlock.c
235
return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
mm/mm_init.c
1554
int cpu;
mm/mm_init.c
1572
for_each_online_cpu(cpu) {
mm/mm_init.c
1575
p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
mm/numa_emulation.c
547
void numa_add_cpu(unsigned int cpu)
mm/numa_emulation.c
551
nid = early_cpu_to_node(cpu);
mm/numa_emulation.c
562
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
mm/numa_emulation.c
565
void numa_remove_cpu(unsigned int cpu)
mm/numa_emulation.c
570
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
mm/numa_emulation.c
573
static void numa_set_cpumask(unsigned int cpu, bool enable)
mm/numa_emulation.c
577
nid = early_cpu_to_node(cpu);
mm/numa_emulation.c
589
debug_cpumask_set_cpu(cpu, nid, enable);
mm/numa_emulation.c
593
void numa_add_cpu(unsigned int cpu)
mm/numa_emulation.c
595
numa_set_cpumask(cpu, true);
mm/numa_emulation.c
598
void numa_remove_cpu(unsigned int cpu)
mm/numa_emulation.c
600
numa_set_cpumask(cpu, false);
mm/page-writeback.c
2222
static int page_writeback_cpu_online(unsigned int cpu)
mm/page_alloc.c
2646
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
mm/page_alloc.c
2648
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
2669
static void drain_pages(unsigned int cpu)
mm/page_alloc.c
2674
drain_pages_zone(cpu, zone);
mm/page_alloc.c
2683
int cpu = smp_processor_id();
mm/page_alloc.c
2686
drain_pages_zone(cpu, zone);
mm/page_alloc.c
2688
drain_pages(cpu);
mm/page_alloc.c
2703
int cpu;
mm/page_alloc.c
2728
for_each_online_cpu(cpu) {
mm/page_alloc.c
2740
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
2745
pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
mm/page_alloc.c
2754
cpumask_set_cpu(cpu, &cpus_with_pcps);
mm/page_alloc.c
2756
cpumask_clear_cpu(cpu, &cpus_with_pcps);
mm/page_alloc.c
2759
for_each_cpu(cpu, &cpus_with_pcps) {
mm/page_alloc.c
2761
drain_pages_zone(cpu, zone);
mm/page_alloc.c
2763
drain_pages(cpu);
mm/page_alloc.c
2866
int cpu = smp_processor_id();
mm/page_alloc.c
2934
if (smp_processor_id() != cpu) {
mm/page_alloc.c
5800
int __maybe_unused cpu;
mm/page_alloc.c
5847
for_each_online_cpu(cpu)
mm/page_alloc.c
5848
set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
mm/page_alloc.c
5859
int cpu;
mm/page_alloc.c
5876
for_each_possible_cpu(cpu)
mm/page_alloc.c
5877
per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
mm/page_alloc.c
6070
int cpu;
mm/page_alloc.c
6072
for_each_possible_cpu(cpu) {
mm/page_alloc.c
6073
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
6116
int cpu;
mm/page_alloc.c
6123
for_each_possible_cpu(cpu) {
mm/page_alloc.c
6127
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
6128
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/page_alloc.c
6146
static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
mm/page_alloc.c
6152
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
6153
cci = get_cpu_cacheinfo(cpu);
mm/page_alloc.c
6169
void setup_pcp_cacheinfo(unsigned int cpu)
mm/page_alloc.c
6174
zone_pcp_update_cacheinfo(zone, cpu);
mm/page_alloc.c
6185
int __maybe_unused cpu;
mm/page_alloc.c
6197
for_each_possible_cpu(cpu) {
mm/page_alloc.c
6198
struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
mm/page_alloc.c
6283
static int page_alloc_cpu_dead(unsigned int cpu)
mm/page_alloc.c
6287
lru_add_drain_cpu(cpu);
mm/page_alloc.c
6288
mlock_drain_remote(cpu);
mm/page_alloc.c
6289
drain_pages(cpu);
mm/page_alloc.c
6297
vm_events_fold_cpu(cpu);
mm/page_alloc.c
6306
cpu_vm_stats_fold(cpu);
mm/page_alloc.c
6314
static int page_alloc_cpu_online(unsigned int cpu)
mm/page_alloc.c
7403
int cpu;
mm/page_alloc.c
7407
for_each_online_cpu(cpu) {
mm/page_alloc.c
7408
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/percpu-vm.c
104
__free_page(pages[pcpu_page_idx(cpu, i)]);
mm/percpu-vm.c
107
if (tcpu == cpu)
mm/percpu-vm.c
14
unsigned int cpu, int page_idx)
mm/percpu-vm.c
156
unsigned int cpu;
mm/percpu-vm.c
159
for_each_possible_cpu(cpu) {
mm/percpu-vm.c
163
page = pcpu_chunk_page(chunk, cpu, i);
mm/percpu-vm.c
165
pages[pcpu_page_idx(cpu, i)] = page;
mm/percpu-vm.c
167
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
mm/percpu-vm.c
19
return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
mm/percpu-vm.c
217
unsigned int cpu, tcpu;
mm/percpu-vm.c
220
for_each_possible_cpu(cpu) {
mm/percpu-vm.c
221
err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
mm/percpu-vm.c
222
&pages[pcpu_page_idx(cpu, page_start)],
mm/percpu-vm.c
228
pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
mm/percpu-vm.c
236
if (tcpu == cpu)
mm/percpu-vm.c
57
unsigned int cpu;
mm/percpu-vm.c
60
for_each_possible_cpu(cpu) {
mm/percpu-vm.c
62
struct page *page = pages[pcpu_page_idx(cpu, i)];
mm/percpu-vm.c
86
unsigned int cpu, tcpu;
mm/percpu-vm.c
91
for_each_possible_cpu(cpu) {
mm/percpu-vm.c
93
struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
mm/percpu-vm.c
95
*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
mm/percpu.c
1746
int slot, off, cpu, ret;
mm/percpu.c
1892
for_each_possible_cpu(cpu)
mm/percpu.c
1893
memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
mm/percpu.c
2295
unsigned int cpu;
mm/percpu.c
2297
for_each_possible_cpu(cpu) {
mm/percpu.c
2298
void *start = per_cpu_ptr(base, cpu);
mm/percpu.c
2359
unsigned int cpu;
mm/percpu.c
2377
for_each_possible_cpu(cpu) {
mm/percpu.c
2378
void *start = per_cpu_ptr(base, cpu);
mm/percpu.c
2572
unsigned int cpu;
mm/percpu.c
2619
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
mm/percpu.c
2620
unit_map[cpu] = UINT_MAX;
mm/percpu.c
2632
cpu = gi->cpu_map[i];
mm/percpu.c
2633
if (cpu == NR_CPUS)
mm/percpu.c
2636
PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
mm/percpu.c
2637
PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
mm/percpu.c
2638
PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
mm/percpu.c
2640
unit_map[cpu] = unit + i;
mm/percpu.c
2641
unit_off[cpu] = gi->base_offset + i * ai->unit_size;
mm/percpu.c
2645
unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
mm/percpu.c
2646
pcpu_low_unit_cpu = cpu;
mm/percpu.c
2648
unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
mm/percpu.c
2649
pcpu_high_unit_cpu = cpu;
mm/percpu.c
265
static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
mm/percpu.c
2654
for_each_possible_cpu(cpu)
mm/percpu.c
2655
PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
mm/percpu.c
267
return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
mm/percpu.c
270
static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
mm/percpu.c
272
return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
mm/percpu.c
276
unsigned int cpu, int page_idx)
mm/percpu.c
279
pcpu_unit_page_offset(cpu, page_idx);
mm/percpu.c
2818
unsigned int cpu, tcpu;
mm/percpu.c
2852
cpu = cpumask_first(&mask);
mm/percpu.c
2853
group_map[cpu] = group;
mm/percpu.c
2855
cpumask_clear_cpu(cpu, &mask);
mm/percpu.c
2859
(cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
mm/percpu.c
2860
cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
mm/percpu.c
2936
for_each_possible_cpu(cpu)
mm/percpu.c
2937
if (group_map[cpu] == group)
mm/percpu.c
2938
gi->cpu_map[gi->nr_units++] = cpu;
mm/percpu.c
2947
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
mm/percpu.c
2956
node = cpu_to_nd_fn(cpu);
mm/percpu.c
2961
cpu, node);
mm/percpu.c
2963
cpu, size, (u64)__pa(ptr));
mm/percpu.c
2970
cpu, size, node, (u64)__pa(ptr));
mm/percpu.c
3046
unsigned int cpu = NR_CPUS;
mm/percpu.c
3049
for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
mm/percpu.c
3050
cpu = gi->cpu_map[i];
mm/percpu.c
3051
BUG_ON(cpu == NR_CPUS);
mm/percpu.c
3054
ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
mm/percpu.c
3229
unsigned int cpu = ai->groups[0].cpu_map[unit];
mm/percpu.c
3233
ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
mm/percpu.c
3236
psize_str, cpu);
mm/percpu.c
3307
unsigned int cpu;
mm/percpu.c
3320
for_each_possible_cpu(cpu)
mm/percpu.c
3321
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
mm/show_mem.c
183
int cpu, nid;
mm/show_mem.c
193
for_each_online_cpu(cpu)
mm/show_mem.c
194
free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
mm/show_mem.c
294
for_each_online_cpu(cpu)
mm/show_mem.c
295
free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
mm/slab_common.c
1919
int cpu;
mm/slab_common.c
1921
for_each_possible_cpu(cpu) {
mm/slab_common.c
1922
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
mm/slab_common.c
2022
int i, cpu;
mm/slab_common.c
2031
for_each_possible_cpu(cpu) {
mm/slab_common.c
2032
krcp = per_cpu_ptr(&krc, cpu);
mm/slab_common.c
2070
for_each_possible_cpu(cpu) {
mm/slab_common.c
2071
krcp = per_cpu_ptr(&krc, cpu);
mm/slab_common.c
2128
int cpu;
mm/slab_common.c
2132
for_each_possible_cpu(cpu) {
mm/slab_common.c
2133
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
mm/slab_common.c
2146
int cpu, freed = 0;
mm/slab_common.c
2148
for_each_possible_cpu(cpu) {
mm/slab_common.c
2150
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
mm/slab_common.c
2168
int cpu;
mm/slab_common.c
2188
for_each_possible_cpu(cpu) {
mm/slab_common.c
2189
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
mm/slub.c
1055
p->cpu = raw_smp_processor_id();
mm/slub.c
1087
s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
mm/slub.c
3019
static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu)
mm/slub.c
3023
pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
mm/slub.c
3041
int cpu;
mm/slub.c
3054
for_each_possible_cpu(cpu) {
mm/slub.c
3057
pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
mm/slub.c
314
int cpu; /* Was running on cpu */
mm/slub.c
3945
static bool has_pcs_used(int cpu, struct kmem_cache *s)
mm/slub.c
3952
pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
mm/slub.c
3978
unsigned int cpu;
mm/slub.c
3983
for_each_online_cpu(cpu) {
mm/slub.c
3984
sfw = &per_cpu(slub_flush, cpu);
mm/slub.c
3985
if (!has_pcs_used(cpu, s)) {
mm/slub.c
3992
queue_work_on(cpu, flushwq, &sfw->work);
mm/slub.c
3995
for_each_online_cpu(cpu) {
mm/slub.c
3996
sfw = &per_cpu(slub_flush, cpu);
mm/slub.c
4039
unsigned int cpu;
mm/slub.c
4043
for_each_online_cpu(cpu) {
mm/slub.c
4044
sfw = &per_cpu(slub_flush, cpu);
mm/slub.c
4055
queue_work_on(cpu, flushwq, &sfw->work);
mm/slub.c
4058
for_each_online_cpu(cpu) {
mm/slub.c
4059
sfw = &per_cpu(slub_flush, cpu);
mm/slub.c
4089
static int slub_cpu_dead(unsigned int cpu)
mm/slub.c
4096
__pcs_flush_all_cpu(s, cpu);
mm/slub.c
4231
int cpu = raw_smp_processor_id();
mm/slub.c
4239
cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
mm/slub.c
6152
int cpu;
mm/slub.c
6154
for_each_possible_cpu(cpu)
mm/slub.c
6155
irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
mm/slub.c
7464
int cpu;
mm/slub.c
7466
for_each_possible_cpu(cpu) {
mm/slub.c
7469
pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
mm/slub.c
8304
int node, cpu;
mm/slub.c
8326
for_each_possible_cpu(cpu) {
mm/slub.c
8329
pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
mm/slub.c
8727
cpumask_set_cpu(track->cpu,
mm/slub.c
8766
cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
mm/slub.c
9181
int cpu;
mm/slub.c
9188
for_each_online_cpu(cpu) {
mm/slub.c
9189
unsigned int x = per_cpu_ptr(s->cpu_stats, cpu)->stat[si];
mm/slub.c
9191
data[cpu] = x;
mm/slub.c
9198
for_each_online_cpu(cpu) {
mm/slub.c
9199
if (data[cpu])
mm/slub.c
9201
cpu, data[cpu]);
mm/slub.c
9212
int cpu;
mm/slub.c
9214
for_each_online_cpu(cpu)
mm/slub.c
9215
per_cpu_ptr(s->cpu_stats, cpu)->stat[si] = 0;
mm/swap.c
321
static void folio_activate_drain(int cpu)
mm/swap.c
323
struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
mm/swap.c
339
static inline void folio_activate_drain(int cpu)
mm/swap.c
642
void lru_add_drain_cpu(int cpu)
mm/swap.c
644
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
mm/swap.c
673
folio_activate_drain(cpu);
mm/swap.c
775
static bool cpu_needs_drain(unsigned int cpu)
mm/swap.c
777
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
mm/swap.c
786
need_mlock_drain(cpu) ||
mm/swap.c
787
has_bh_in_lru(cpu, NULL);
mm/swap.c
812
unsigned cpu, this_gen;
mm/swap.c
874
for_each_online_cpu(cpu) {
mm/swap.c
875
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
mm/swap.c
877
if (cpu_needs_drain(cpu)) {
mm/swap.c
879
queue_work_on(cpu, mm_percpu_wq, work);
mm/swap.c
880
__cpumask_set_cpu(cpu, &has_work);
mm/swap.c
884
for_each_cpu(cpu, &has_work)
mm/swap.c
885
flush_work(&per_cpu(lru_add_drain_work, cpu));
mm/swapfile.c
2753
int cpu, i;
mm/swapfile.c
2756
for_each_possible_cpu(cpu) {
mm/swapfile.c
2757
pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu);
mm/vmalloc.c
2614
unsigned int cpu;
mm/vmalloc.c
2740
vb->cpu = raw_smp_processor_id();
mm/vmalloc.c
2757
vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
mm/vmalloc.c
2787
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
mm/vmalloc.c
2820
static void purge_fragmented_blocks(int cpu)
mm/vmalloc.c
2824
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
mm/vmalloc.c
2845
int cpu;
mm/vmalloc.c
2847
for_each_possible_cpu(cpu)
mm/vmalloc.c
2848
purge_fragmented_blocks(cpu);
mm/vmalloc.c
2952
int cpu;
mm/vmalloc.c
2959
for_each_possible_cpu(cpu) {
mm/vmalloc.c
2960
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
mm/vmstat.c
115
int cpu;
mm/vmstat.c
120
for_each_online_cpu(cpu) {
mm/vmstat.c
121
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
mm/vmstat.c
147
void vm_events_fold_cpu(int cpu)
mm/vmstat.c
149
struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
mm/vmstat.c
175
int cpu;
mm/vmstat.c
178
for_each_online_cpu(cpu) {
mm/vmstat.c
181
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/vmstat.c
2052
static bool need_update(int cpu)
mm/vmstat.c
2058
struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/vmstat.c
2070
n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
mm/vmstat.c
2119
int cpu;
mm/vmstat.c
2123
for_each_online_cpu(cpu) {
mm/vmstat.c
2124
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
mm/vmstat.c
2138
if (cpu_is_isolated(cpu))
mm/vmstat.c
2141
if (!delayed_work_pending(dw) && need_update(cpu))
mm/vmstat.c
2142
queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
mm/vmstat.c
2155
int cpu;
mm/vmstat.c
2157
for_each_possible_cpu(cpu) {
mm/vmstat.c
2158
INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
mm/vmstat.c
2167
if (!cpu_online(cpu))
mm/vmstat.c
2168
disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
mm/vmstat.c
2185
static int vmstat_cpu_online(unsigned int cpu)
mm/vmstat.c
2190
if (!node_state(cpu_to_node(cpu), N_CPU)) {
mm/vmstat.c
2191
node_set_state(cpu_to_node(cpu), N_CPU);
mm/vmstat.c
2193
enable_delayed_work(&per_cpu(vmstat_work, cpu));
mm/vmstat.c
2198
static int vmstat_cpu_down_prep(unsigned int cpu)
mm/vmstat.c
2200
disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
mm/vmstat.c
2204
static int vmstat_cpu_dead(unsigned int cpu)
mm/vmstat.c
2209
node = cpu_to_node(cpu);
mm/vmstat.c
279
int cpu;
mm/vmstat.c
284
for_each_online_cpu(cpu) {
mm/vmstat.c
285
per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
mm/vmstat.c
295
for_each_online_cpu(cpu) {
mm/vmstat.c
298
per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
mm/vmstat.c
302
pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
mm/vmstat.c
303
per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
mm/vmstat.c
324
int cpu;
mm/vmstat.c
334
for_each_online_cpu(cpu)
mm/vmstat.c
335
per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
mm/vmstat.c
42
int item, cpu;
mm/vmstat.c
46
for_each_online_cpu(cpu) {
mm/vmstat.c
47
per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
mm/vmstat.c
889
void cpu_vm_stats_fold(int cpu)
mm/vmstat.c
900
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/vmstat.c
928
p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
mm/zswap.c
249
int ret, cpu;
mm/zswap.c
272
for_each_possible_cpu(cpu)
mm/zswap.c
273
mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex);
mm/zswap.c
735
static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
mm/zswap.c
738
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
mm/zswap.c
744
buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu));
mm/zswap.c
750
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
mm/zswap.c
795
static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
mm/zswap.c
798
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
net/batman-adv/mesh-interface.c
89
int cpu;
net/batman-adv/mesh-interface.c
91
for_each_possible_cpu(cpu) {
net/batman-adv/mesh-interface.c
92
counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
net/bpf/test_run.c
1004
kattr->test.cpu || kattr->test.batch_size)
net/bpf/test_run.c
1459
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
net/bpf/test_run.c
1523
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
net/bpf/test_run.c
1730
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
net/bpf/test_run.c
682
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
net/bpf/test_run.c
753
int cpu = kattr->test.cpu, err = 0;
net/bpf/test_run.c
766
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
net/bpf/test_run.c
781
cpu == current_cpu) {
net/bpf/test_run.c
783
} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
net/bpf/test_run.c
791
err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
net/bridge/netfilter/ebtables.c
1001
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
net/bridge/netfilter/ebtables.c
40
#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
net/bridge/netfilter/ebtables.c
41
COUNTER_OFFSET(n) * cpu))
net/bridge/netfilter/ebtables.c
990
int i, cpu;
net/bridge/netfilter/ebtables.c
998
for_each_possible_cpu(cpu) {
net/bridge/netfilter/ebtables.c
999
if (cpu == 0)
net/core/dev.c
11762
int cpu;
net/core/dev.c
11764
for_each_possible_cpu(cpu) {
net/core/dev.c
11770
stats = per_cpu_ptr(dstats, cpu);
net/core/dev.c
11926
int cpu;
net/core/dev.c
11928
for_each_possible_cpu(cpu) {
net/core/dev.c
11933
stats = per_cpu_ptr(netstats, cpu);
net/core/dev.c
12704
unsigned int cpu;
net/core/dev.c
12708
cpu = smp_processor_id();
net/core/dev.c
12709
sd = &per_cpu(softnet_data, cpu);
net/core/dev.c
13185
static int backlog_napi_should_run(unsigned int cpu)
net/core/dev.c
13187
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
net/core/dev.c
13193
static void run_backlog_napi(unsigned int cpu)
net/core/dev.c
13195
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
net/core/dev.c
13200
static void backlog_napi_setup(unsigned int cpu)
net/core/dev.c
13202
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
net/core/dev.c
13261
sd->cpu = i;
net/core/dev.c
2710
int cpu, u16 offset, u16 count)
net/core/dev.c
2716
for (tci = cpu * num_tc; num_tc--; tci++) {
net/core/dev.c
3364
int cpu, count = 0;
net/core/dev.c
3370
for_each_cpu(cpu, cpus) {
net/core/dev.c
3372
cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
net/core/dev.c
4836
int cpu = smp_processor_id(); /* ok because BHs are off */
net/core/dev.c
4838
if (!netif_tx_owned(txq, cpu)) {
net/core/dev.c
4848
HARD_TX_LOCK(dev, txq, cpu);
net/core/dev.c
5004
unsigned int cpu)
net/core/dev.c
5009
if (cpu >= nr_cpu_ids)
net/core/dev.c
5012
sd_input_head = READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head);
net/core/dev.c
5051
tmp_cpu = READ_ONCE(tmp_rflow->cpu);
net/core/dev.c
5080
WRITE_ONCE(rflow->cpu, next_cpu);
net/core/dev.c
5096
int cpu = -1;
net/core/dev.c
5144
tcpu = rflow->cpu;
net/core/dev.c
5167
cpu = tcpu;
net/core/dev.c
5177
cpu = tcpu;
net/core/dev.c
5183
return cpu;
net/core/dev.c
5210
unsigned int cpu;
net/core/dev.c
5213
cpu = READ_ONCE(rflow->cpu);
net/core/dev.c
5215
rps_flow_is_active(rflow, flow_table, cpu))
net/core/dev.c
5281
void kick_defer_list_purge(unsigned int cpu)
net/core/dev.c
5283
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
net/core/dev.c
5295
smp_call_function_single_async(cpu, &sd->defer_csd);
net/core/dev.c
5345
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
net/core/dev.c
5359
sd = &per_cpu(softnet_data, cpu);
net/core/dev.c
5608
int cpu, rc;
net/core/dev.c
5611
cpu = smp_processor_id();
net/core/dev.c
5612
HARD_TX_LOCK(dev, txq, cpu);
net/core/dev.c
5675
int cpu;
net/core/dev.c
5679
cpu = get_rps_cpu(skb->dev, skb, &rflow);
net/core/dev.c
5680
if (cpu < 0)
net/core/dev.c
5681
cpu = smp_processor_id();
net/core/dev.c
5683
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
net/core/dev.c
6371
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
net/core/dev.c
6373
if (cpu >= 0) {
net/core/dev.c
6374
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
net/core/dev.c
6404
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
net/core/dev.c
6406
if (cpu >= 0) {
net/core/dev.c
6409
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
net/core/dev.c
6506
static bool flush_required(int cpu)
net/core/dev.c
6509
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
net/core/dev.c
6547
unsigned int cpu;
net/core/dev.c
6557
for_each_online_cpu(cpu) {
net/core/dev.c
6558
if (flush_required(cpu)) {
net/core/dev.c
6559
INIT_WORK(&ptr->w[cpu], flush_backlog);
net/core/dev.c
6560
queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]);
net/core/dev.c
6561
__cpumask_set_cpu(cpu, &ptr->flush_cpus);
net/core/dev.c
6569
for_each_cpu(cpu, &ptr->flush_cpus)
net/core/dev.c
6570
flush_work(&ptr->w[cpu]);
net/core/dev.c
6586
if (cpu_online(remsd->cpu))
net/core/dev.c
6587
smp_call_function_single_async(remsd->cpu, &remsd->csd);
net/core/dev.h
367
void kick_defer_list_purge(unsigned int cpu);
net/core/drop_monitor.c
1051
int cpu, rc;
net/core/drop_monitor.c
1065
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1066
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1086
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1087
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1107
int cpu;
net/core/drop_monitor.c
1120
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1121
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1141
int cpu, rc;
net/core/drop_monitor.c
1150
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1151
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1181
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1182
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1197
int cpu;
net/core/drop_monitor.c
1209
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1210
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1442
int cpu;
net/core/drop_monitor.c
1445
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1446
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1486
int cpu;
net/core/drop_monitor.c
1489
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1490
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1688
static void net_dm_cpu_data_init(int cpu)
net/core/drop_monitor.c
1692
data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1696
static void net_dm_cpu_data_fini(int cpu)
net/core/drop_monitor.c
1700
data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1708
static void net_dm_hw_cpu_data_init(int cpu)
net/core/drop_monitor.c
1712
hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1716
static void net_dm_hw_cpu_data_fini(int cpu)
net/core/drop_monitor.c
1720
hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1727
int cpu, rc;
net/core/drop_monitor.c
1736
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1737
net_dm_cpu_data_init(cpu);
net/core/drop_monitor.c
1738
net_dm_hw_cpu_data_init(cpu);
net/core/drop_monitor.c
1766
int cpu;
net/core/drop_monitor.c
1776
for_each_possible_cpu(cpu) {
net/core/drop_monitor.c
1777
net_dm_hw_cpu_data_fini(cpu);
net/core/drop_monitor.c
1778
net_dm_cpu_data_fini(cpu);
net/core/dst.c
323
int cpu;
net/core/dst.c
332
for_each_possible_cpu(cpu)
net/core/dst.c
333
__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
net/core/dst.c
341
int cpu;
net/core/dst.c
343
for_each_possible_cpu(cpu) {
net/core/dst.c
344
struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
net/core/gen_stats.c
127
struct gnet_stats_basic_sync __percpu *cpu)
net/core/gen_stats.c
133
struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
net/core/gen_stats.c
150
struct gnet_stats_basic_sync __percpu *cpu,
net/core/gen_stats.c
157
WARN_ON_ONCE((cpu || running) && in_hardirq());
net/core/gen_stats.c
159
if (cpu) {
net/core/gen_stats.c
160
gnet_stats_add_basic_cpu(bstats, cpu);
net/core/gen_stats.c
175
struct gnet_stats_basic_sync __percpu *cpu,
net/core/gen_stats.c
180
if (cpu) {
net/core/gen_stats.c
185
struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
net/core/gen_stats.c
212
struct gnet_stats_basic_sync __percpu *cpu,
net/core/gen_stats.c
218
gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running);
net/core/gen_stats.c
261
struct gnet_stats_basic_sync __percpu *cpu,
net/core/gen_stats.c
265
return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running);
net/core/gen_stats.c
288
struct gnet_stats_basic_sync __percpu *cpu,
net/core/gen_stats.c
292
return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running);
net/core/gen_stats.c
357
const struct gnet_stats_queue __percpu *cpu,
net/core/gen_stats.c
360
if (cpu) {
net/core/gen_stats.c
361
gnet_stats_add_queue_cpu(qstats, cpu);
net/core/neighbour.c
2293
int cpu;
net/core/neighbour.c
2298
for_each_possible_cpu(cpu) {
net/core/neighbour.c
2301
st = per_cpu_ptr(tbl->stats, cpu);
net/core/neighbour.c
3487
int cpu;
net/core/neighbour.c
3492
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
net/core/neighbour.c
3493
if (!cpu_possible(cpu))
net/core/neighbour.c
3495
*pos = cpu+1;
net/core/neighbour.c
3496
return per_cpu_ptr(tbl->stats, cpu);
net/core/neighbour.c
3504
int cpu;
net/core/neighbour.c
3506
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
net/core/neighbour.c
3507
if (!cpu_possible(cpu))
net/core/neighbour.c
3509
*pos = cpu+1;
net/core/neighbour.c
3510
return per_cpu_ptr(tbl->stats, cpu);
net/core/net-sysfs.c
1124
table->flows[count].cpu = RPS_NO_CPU;
net/core/net-sysfs.c
986
int cpu, i;
net/core/net-sysfs.c
995
for_each_cpu_and(cpu, mask, cpu_online_mask)
net/core/net-sysfs.c
996
map->cpus[i++] = cpu;
net/core/netpoll.c
166
int cpu = smp_processor_id();
net/core/netpoll.c
169
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
net/core/page_pool.c
101
for_each_possible_cpu(cpu) {
net/core/page_pool.c
103
per_cpu_ptr(pool->recycle_stats, cpu);
net/core/page_pool.c
88
int cpu = 0;
net/core/pktgen.c
3704
int cpu = t->cpu;
net/core/pktgen.c
3706
WARN_ON_ONCE(smp_processor_id() != cpu);
net/core/pktgen.c
3711
pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current));
net/core/pktgen.c
3831
int node = cpu_to_node(t->cpu);
net/core/pktgen.c
3915
static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
net/core/pktgen.c
3922
cpu_to_node(cpu));
net/core/pktgen.c
3929
t->cpu = cpu;
net/core/pktgen.c
3936
p = kthread_create_on_cpu(pktgen_thread_worker, t, cpu, "kpktgend_%d");
net/core/pktgen.c
3938
pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
net/core/pktgen.c
4023
int cpu, ret = 0;
net/core/pktgen.c
4041
for_each_online_cpu(cpu) {
net/core/pktgen.c
4044
err = pktgen_create_thread(cpu, pn);
net/core/pktgen.c
4047
cpu, err);
net/core/pktgen.c
470
int cpu;
net/core/skbuff.c
7284
int cpu;
net/core/skbuff.c
7290
cpu = skb->alloc_cpu;
net/core/skbuff.c
7291
if (cpu == raw_smp_processor_id() ||
net/core/skbuff.c
7292
WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
net/core/skbuff.c
7293
!cpu_online(cpu)) {
net/core/skbuff.c
7302
sdn = per_cpu_ptr(net_hotdata.skb_defer_nodes, cpu) + numa_node_id();
net/core/skbuff.c
7319
kick_defer_list_purge(cpu);
net/core/sock.c
4032
int cpu, idx = prot->inuse_idx;
net/core/sock.c
4035
for_each_possible_cpu(cpu)
net/core/sock.c
4036
res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
net/core/sock.c
4044
int cpu, res = 0;
net/core/sock.c
4046
for_each_possible_cpu(cpu)
net/core/sock.c
4047
res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
net/ipv4/af_inet.c
1673
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
net/ipv4/af_inet.c
1681
bhptr = per_cpu_ptr(mib, cpu);
net/ipv4/af_inet.c
1695
int cpu;
net/ipv4/af_inet.c
1697
for_each_possible_cpu(cpu) {
net/ipv4/af_inet.c
1698
res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
net/ipv4/fib_semantics.c
185
int cpu;
net/ipv4/fib_semantics.c
190
for_each_possible_cpu(cpu) {
net/ipv4/fib_semantics.c
193
rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
net/ipv4/fib_trie.c
2574
int cpu;
net/ipv4/fib_trie.c
2577
for_each_possible_cpu(cpu) {
net/ipv4/fib_trie.c
2578
const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
net/ipv4/netfilter/arp_tables.c
194
unsigned int cpu, stackidx = 0;
net/ipv4/netfilter/arp_tables.c
208
cpu = smp_processor_id();
net/ipv4/netfilter/arp_tables.c
210
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
net/ipv4/netfilter/arp_tables.c
604
unsigned int cpu;
net/ipv4/netfilter/arp_tables.c
607
for_each_possible_cpu(cpu) {
net/ipv4/netfilter/arp_tables.c
608
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/ipv4/netfilter/arp_tables.c
616
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/netfilter/arp_tables.c
634
unsigned int cpu, i;
net/ipv4/netfilter/arp_tables.c
636
for_each_possible_cpu(cpu) {
net/ipv4/netfilter/arp_tables.c
641
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/netfilter/ip_tables.c
236
unsigned int stackidx, cpu;
net/ipv4/netfilter/ip_tables.c
261
cpu = smp_processor_id();
net/ipv4/netfilter/ip_tables.c
263
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
net/ipv4/netfilter/ip_tables.c
743
unsigned int cpu;
net/ipv4/netfilter/ip_tables.c
746
for_each_possible_cpu(cpu) {
net/ipv4/netfilter/ip_tables.c
747
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/ipv4/netfilter/ip_tables.c
755
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/netfilter/ip_tables.c
773
unsigned int cpu, i;
net/ipv4/netfilter/ip_tables.c
775
for_each_possible_cpu(cpu) {
net/ipv4/netfilter/ip_tables.c
780
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/route.c
1558
int cpu;
net/ipv4/route.c
1560
for_each_possible_cpu(cpu) {
net/ipv4/route.c
1561
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
net/ipv4/route.c
236
int cpu;
net/ipv4/route.c
241
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
net/ipv4/route.c
242
if (!cpu_possible(cpu))
net/ipv4/route.c
244
*pos = cpu+1;
net/ipv4/route.c
245
return &per_cpu(rt_cache_stat, cpu);
net/ipv4/route.c
252
int cpu;
net/ipv4/route.c
254
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
net/ipv4/route.c
255
if (!cpu_possible(cpu))
net/ipv4/route.c
257
*pos = cpu+1;
net/ipv4/route.c
258
return &per_cpu(rt_cache_stat, cpu);
net/ipv4/route.c
3725
int cpu;
net/ipv4/route.c
3744
for_each_possible_cpu(cpu) {
net/ipv4/route.c
3745
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
net/ipv4/tcp_ipv4.c
3720
int cpu, res;
net/ipv4/tcp_ipv4.c
3722
for_each_possible_cpu(cpu) {
net/ipv4/tcp_ipv4.c
3738
per_cpu(ipv4_tcp_sk.sock, cpu) = sk;
net/ipv4/tcp_sigpool.c
100
int cpu;
net/ipv4/tcp_sigpool.c
102
for_each_possible_cpu(cpu)
net/ipv4/tcp_sigpool.c
103
kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
net/ipv4/tcp_sigpool.c
60
int cpu, err = 0;
net/ipv4/tcp_sigpool.c
73
for_each_possible_cpu(cpu) {
net/ipv4/tcp_sigpool.c
76
scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
net/ipv4/tcp_sigpool.c
82
old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
net/ipv4/tcp_sigpool.c
84
if (!cpu_online(cpu) || !old_scratch) {
net/ipv6/addrconf.c
6677
int cpu;
net/ipv6/addrconf.c
6682
for_each_possible_cpu(cpu) {
net/ipv6/addrconf.c
6685
rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
net/ipv6/ip6_fib.c
978
int cpu;
net/ipv6/ip6_fib.c
987
for_each_possible_cpu(cpu) {
net/ipv6/ip6_fib.c
991
ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
net/ipv6/netfilter/ip6_tables.c
259
unsigned int stackidx, cpu;
net/ipv6/netfilter/ip6_tables.c
283
cpu = smp_processor_id();
net/ipv6/netfilter/ip6_tables.c
285
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
net/ipv6/netfilter/ip6_tables.c
760
unsigned int cpu;
net/ipv6/netfilter/ip6_tables.c
763
for_each_possible_cpu(cpu) {
net/ipv6/netfilter/ip6_tables.c
764
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/ipv6/netfilter/ip6_tables.c
772
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv6/netfilter/ip6_tables.c
790
unsigned int cpu, i;
net/ipv6/netfilter/ip6_tables.c
792
for_each_possible_cpu(cpu) {
net/ipv6/netfilter/ip6_tables.c
797
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv6/route.c
162
int cpu;
net/ipv6/route.c
164
for_each_possible_cpu(cpu) {
net/ipv6/route.c
165
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
net/ipv6/route.c
3730
int cpu;
net/ipv6/route.c
3735
for_each_possible_cpu(cpu) {
net/ipv6/route.c
3738
ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
net/ipv6/route.c
6877
int cpu;
net/ipv6/route.c
6932
for_each_possible_cpu(cpu) {
net/ipv6/route.c
6933
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
net/iucv/iucv.c
398
int cpu = smp_processor_id();
net/iucv/iucv.c
410
parm = iucv_param_irq[cpu];
net/iucv/iucv.c
428
cpumask_set_cpu(cpu, &iucv_irq_cpumask);
net/iucv/iucv.c
438
int cpu = smp_processor_id();
net/iucv/iucv.c
442
parm = iucv_param_irq[cpu];
net/iucv/iucv.c
447
cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
net/iucv/iucv.c
457
int cpu = smp_processor_id();
net/iucv/iucv.c
461
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
net/iucv/iucv.c
465
parm = iucv_param_irq[cpu];
net/iucv/iucv.c
467
parm->db.ipbfadr1 = virt_to_dma32(iucv_irq_data[cpu]);
net/iucv/iucv.c
489
cpu, rc, err);
net/iucv/iucv.c
494
cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
net/iucv/iucv.c
511
int cpu = smp_processor_id();
net/iucv/iucv.c
514
if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
net/iucv/iucv.c
521
parm = iucv_param_irq[cpu];
net/iucv/iucv.c
525
cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
net/iucv/iucv.c
533
int cpu;
net/iucv/iucv.c
536
for_each_online_cpu(cpu)
net/iucv/iucv.c
538
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
net/iucv/iucv.c
539
!cpumask_test_cpu(cpu, &iucv_irq_cpumask))
net/iucv/iucv.c
540
smp_call_function_single(cpu, iucv_allow_cpu,
net/iucv/iucv.c
551
int cpu;
net/iucv/iucv.c
556
for_each_cpu(cpu, &cpumask)
net/iucv/iucv.c
557
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
net/iucv/iucv.c
570
int cpu, rc;
net/iucv/iucv.c
580
for_each_online_cpu(cpu)
net/iucv/iucv.c
581
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
net/iucv/iucv.c
609
static int iucv_cpu_dead(unsigned int cpu)
net/iucv/iucv.c
611
kfree(iucv_param_irq[cpu]);
net/iucv/iucv.c
612
iucv_param_irq[cpu] = NULL;
net/iucv/iucv.c
613
kfree(iucv_param[cpu]);
net/iucv/iucv.c
614
iucv_param[cpu] = NULL;
net/iucv/iucv.c
615
kfree(iucv_irq_data[cpu]);
net/iucv/iucv.c
616
iucv_irq_data[cpu] = NULL;
net/iucv/iucv.c
620
static int iucv_cpu_prepare(unsigned int cpu)
net/iucv/iucv.c
623
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
net/iucv/iucv.c
624
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
net/iucv/iucv.c
625
if (!iucv_irq_data[cpu])
net/iucv/iucv.c
629
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
net/iucv/iucv.c
630
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
net/iucv/iucv.c
631
if (!iucv_param[cpu])
net/iucv/iucv.c
634
iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
net/iucv/iucv.c
635
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
net/iucv/iucv.c
636
if (!iucv_param_irq[cpu])
net/iucv/iucv.c
642
iucv_cpu_dead(cpu);
net/iucv/iucv.c
646
static int iucv_cpu_online(unsigned int cpu)
net/iucv/iucv.c
654
static int iucv_cpu_down_prep(unsigned int cpu)
net/iucv/iucv.c
666
cpumask_clear_cpu(cpu, cpumask);
net/mac80211/sta_info.c
2489
int cpu;
net/mac80211/sta_info.c
2502
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
2505
cpustats = per_cpu_ptr(link_sta_info->pcpu_rx_stats, cpu);
net/mac80211/sta_info.c
2616
int cpu;
net/mac80211/sta_info.c
2630
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
2634
cpu);
net/mac80211/sta_info.c
2752
int i, ac, cpu, link_id = link->link_id;
net/mac80211/sta_info.c
2812
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
2816
cpu);
net/mac80211/sta_info.c
2828
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
2832
cpu);
net/mac80211/sta_info.c
2870
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
2874
cpu);
net/mac80211/sta_info.c
2989
int i, ac, cpu;
net/mac80211/sta_info.c
3043
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
3047
cpu);
net/mac80211/sta_info.c
3058
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
3062
cpu);
net/mac80211/sta_info.c
3098
for_each_possible_cpu(cpu) {
net/mac80211/sta_info.c
3101
cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu);
net/mptcp/protocol.c
4590
int cpu;
net/mptcp/protocol.c
4600
for_each_possible_cpu(cpu) {
net/mptcp/protocol.c
4601
delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
net/netfilter/nf_conncount.c
103
int cpu = raw_smp_processor_id();
net/netfilter/nf_conncount.c
118
if (conn->cpu == cpu || age >= 2) {
net/netfilter/nf_conncount.c
262
conn->cpu = raw_smp_processor_id();
net/netfilter/nf_conncount.c
46
int cpu;
net/netfilter/nf_conncount.c
468
conn->cpu = raw_smp_processor_id();
net/netfilter/nf_conntrack_netlink.c
2492
__u16 cpu, const struct ip_conntrack_stat *st)
net/netfilter/nf_conntrack_netlink.c
2500
NFNETLINK_V0, htons(cpu));
net/netfilter/nf_conntrack_netlink.c
2532
int cpu;
net/netfilter/nf_conntrack_netlink.c
2538
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
net/netfilter/nf_conntrack_netlink.c
2541
if (!cpu_possible(cpu))
net/netfilter/nf_conntrack_netlink.c
2544
st = per_cpu_ptr(net->ct.stat, cpu);
net/netfilter/nf_conntrack_netlink.c
2548
cpu, st) < 0)
net/netfilter/nf_conntrack_netlink.c
2551
cb->args[0] = cpu;
net/netfilter/nf_conntrack_netlink.c
3689
ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
net/netfilter/nf_conntrack_netlink.c
3698
NFNETLINK_V0, htons(cpu));
net/netfilter/nf_conntrack_netlink.c
3719
int cpu;
net/netfilter/nf_conntrack_netlink.c
3725
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
net/netfilter/nf_conntrack_netlink.c
3728
if (!cpu_possible(cpu))
net/netfilter/nf_conntrack_netlink.c
3731
st = per_cpu_ptr(net->ct.stat, cpu);
net/netfilter/nf_conntrack_netlink.c
3734
cpu, st) < 0)
net/netfilter/nf_conntrack_netlink.c
3737
cb->args[0] = cpu;
net/netfilter/nf_conntrack_netlink.c
65
unsigned int cpu;
net/netfilter/nf_conntrack_standalone.c
405
int cpu;
net/netfilter/nf_conntrack_standalone.c
410
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
net/netfilter/nf_conntrack_standalone.c
411
if (!cpu_possible(cpu))
net/netfilter/nf_conntrack_standalone.c
413
*pos = cpu + 1;
net/netfilter/nf_conntrack_standalone.c
414
return per_cpu_ptr(net->ct.stat, cpu);
net/netfilter/nf_conntrack_standalone.c
423
int cpu;
net/netfilter/nf_conntrack_standalone.c
425
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
net/netfilter/nf_conntrack_standalone.c
426
if (!cpu_possible(cpu))
net/netfilter/nf_conntrack_standalone.c
428
*pos = cpu + 1;
net/netfilter/nf_conntrack_standalone.c
429
return per_cpu_ptr(net->ct.stat, cpu);
net/netfilter/nf_flow_table_procfs.c
14
for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) {
net/netfilter/nf_flow_table_procfs.c
15
if (!cpu_possible(cpu))
net/netfilter/nf_flow_table_procfs.c
17
*pos = cpu + 1;
net/netfilter/nf_flow_table_procfs.c
18
return per_cpu_ptr(net->ft.stat, cpu);
net/netfilter/nf_flow_table_procfs.c
27
int cpu;
net/netfilter/nf_flow_table_procfs.c
29
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
net/netfilter/nf_flow_table_procfs.c
30
if (!cpu_possible(cpu))
net/netfilter/nf_flow_table_procfs.c
32
*pos = cpu + 1;
net/netfilter/nf_flow_table_procfs.c
33
return per_cpu_ptr(net->ft.stat, cpu);
net/netfilter/nf_flow_table_procfs.c
9
int cpu;
net/netfilter/nf_synproxy_core.c
244
int cpu;
net/netfilter/nf_synproxy_core.c
249
for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
net/netfilter/nf_synproxy_core.c
250
if (!cpu_possible(cpu))
net/netfilter/nf_synproxy_core.c
252
*pos = cpu + 1;
net/netfilter/nf_synproxy_core.c
253
return per_cpu_ptr(snet->stats, cpu);
net/netfilter/nf_synproxy_core.c
262
int cpu;
net/netfilter/nf_synproxy_core.c
264
for (cpu = *pos; cpu < nr_cpu_ids; cpu++) {
net/netfilter/nf_synproxy_core.c
265
if (!cpu_possible(cpu))
net/netfilter/nf_synproxy_core.c
267
*pos = cpu + 1;
net/netfilter/nf_synproxy_core.c
268
return per_cpu_ptr(snet->stats, cpu);
net/netfilter/nf_tables_api.c
1952
int cpu;
net/netfilter/nf_tables_api.c
1958
for_each_possible_cpu(cpu) {
net/netfilter/nf_tables_api.c
1959
cpu_stats = per_cpu_ptr(stats, cpu);
net/netfilter/nft_counter.c
136
int cpu;
net/netfilter/nft_counter.c
139
for_each_possible_cpu(cpu) {
net/netfilter/nft_counter.c
140
struct u64_stats_sync *nft_sync = per_cpu_ptr(&nft_counter_sync, cpu);
net/netfilter/nft_counter.c
142
this_cpu = per_cpu_ptr(priv->counter, cpu);
net/netfilter/nft_counter.c
300
int cpu;
net/netfilter/nft_counter.c
302
for_each_possible_cpu(cpu)
net/netfilter/nft_counter.c
303
u64_stats_init(per_cpu_ptr(&nft_counter_sync, cpu));
net/netfilter/nft_ct.c
349
int cpu;
net/netfilter/nft_ct.c
351
for_each_possible_cpu(cpu) {
net/netfilter/nft_ct.c
352
ct = per_cpu(nft_ct_pcpu_template, cpu);
net/netfilter/nft_ct.c
356
per_cpu(nft_ct_pcpu_template, cpu) = NULL;
net/netfilter/nft_ct.c
364
int cpu;
net/netfilter/nft_ct.c
369
for_each_possible_cpu(cpu) {
net/netfilter/nft_ct.c
377
per_cpu(nft_ct_pcpu_template, cpu) = tmp;
net/netfilter/nft_queue.c
38
int cpu = raw_smp_processor_id();
net/netfilter/nft_queue.c
40
queue = priv->queuenum + cpu % priv->queues_total;
net/netfilter/nft_set_pipapo.c
1197
static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
net/netfilter/nft_set_pipapo.c
1201
s = *per_cpu_ptr(m->scratch, cpu);
net/netfilter/x_tables.c
1228
int cpu;
net/netfilter/x_tables.c
1231
for_each_possible_cpu(cpu)
net/netfilter/x_tables.c
1232
kvfree(info->jumpstack[cpu]);
net/netfilter/x_tables.c
1353
int cpu;
net/netfilter/x_tables.c
1378
for_each_possible_cpu(cpu) {
net/netfilter/x_tables.c
1379
i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
net/netfilter/x_tables.c
1380
cpu_to_node(cpu));
net/netfilter/x_tables.c
1381
if (i->jumpstack[cpu] == NULL)
net/netfilter/x_tables.c
1415
unsigned int cpu;
net/netfilter/x_tables.c
1455
for_each_possible_cpu(cpu) {
net/netfilter/x_tables.c
1456
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/netfilter/xt_NFQUEUE.c
94
int cpu = smp_processor_id();
net/netfilter/xt_NFQUEUE.c
96
queue = info->queuenum + cpu % info->queues_total;
net/netfilter/xt_cpu.c
37
return (info->cpu == smp_processor_id()) ^ info->invert;
net/openvswitch/datapath.c
2753
unsigned int cpu;
net/openvswitch/datapath.c
2759
for_each_possible_cpu(cpu) {
net/openvswitch/datapath.c
2762
ovs_pcpu = per_cpu_ptr(ovs_pcpu_storage, cpu);
net/openvswitch/flow.c
108
rcu_assign_pointer(flow->stats[cpu],
net/openvswitch/flow.c
110
cpumask_set_cpu(cpu,
net/openvswitch/flow.c
115
flow->stats_last_writer = cpu;
net/openvswitch/flow.c
132
unsigned int cpu;
net/openvswitch/flow.c
138
for_each_cpu(cpu, flow->cpu_used_mask) {
net/openvswitch/flow.c
139
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
net/openvswitch/flow.c
159
unsigned int cpu;
net/openvswitch/flow.c
161
for_each_cpu(cpu, flow->cpu_used_mask) {
net/openvswitch/flow.c
162
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
net/openvswitch/flow.c
65
unsigned int cpu = smp_processor_id();
net/openvswitch/flow.c
68
stats = rcu_dereference(flow->stats[cpu]);
net/openvswitch/flow.c
74
if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
net/openvswitch/flow.c
75
flow->stats_last_writer = cpu;
net/openvswitch/flow.c
83
if (unlikely(flow->stats_last_writer != cpu)) {
net/openvswitch/flow.c
90
likely(!rcu_access_pointer(flow->stats[cpu]))) {
net/openvswitch/flow_table.c
110
unsigned int cpu;
net/openvswitch/flow_table.c
1118
int cpu;
net/openvswitch/flow_table.c
1127
for_each_possible_cpu(cpu) {
net/openvswitch/flow_table.c
1132
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
net/openvswitch/flow_table.c
118
for_each_cpu(cpu, flow->cpu_used_mask) {
net/openvswitch/flow_table.c
119
if (flow->stats[cpu])
net/openvswitch/flow_table.c
121
(struct sw_flow_stats __force *)flow->stats[cpu]);
net/openvswitch/flow_table.c
190
int i, cpu;
net/openvswitch/flow_table.c
200
for_each_possible_cpu(cpu) {
net/openvswitch/flow_table.c
205
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
net/packet/af_packet.c
1210
int cpu;
net/packet/af_packet.c
1216
for_each_possible_cpu(cpu)
net/packet/af_packet.c
1217
refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
net/packet/af_packet.c
314
int cpu = raw_smp_processor_id();
net/packet/af_packet.c
318
skb->sender_cpu = cpu + 1;
net/packet/af_packet.c
320
skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
net/rds/ib_recv.c
105
int cpu;
net/rds/ib_recv.c
111
for_each_possible_cpu(cpu) {
net/rds/ib_recv.c
112
head = per_cpu_ptr(cache->percpu, cpu);
net/rds/ib_recv.c
140
int cpu;
net/rds/ib_recv.c
142
for_each_possible_cpu(cpu) {
net/rds/ib_recv.c
143
head = per_cpu_ptr(cache->percpu, cpu);
net/rds/ib_stats.c
91
int cpu;
net/rds/ib_stats.c
96
for_each_online_cpu(cpu) {
net/rds/ib_stats.c
97
src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
net/rds/page.c
158
unsigned int cpu;
net/rds/page.c
160
for_each_possible_cpu(cpu) {
net/rds/page.c
163
rem = &per_cpu(rds_page_remainders, cpu);
net/rds/page.c
164
rdsdebug("cpu %u\n", cpu);
net/rds/stats.c
119
int cpu;
net/rds/stats.c
129
for_each_online_cpu(cpu) {
net/rds/stats.c
130
src = (uint64_t *)&(per_cpu(rds_stats, cpu));
net/rds/tcp_stats.c
58
int cpu;
net/rds/tcp_stats.c
63
for_each_online_cpu(cpu) {
net/rds/tcp_stats.c
64
src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
net/sched/cls_basic.c
275
int cpu;
net/sched/cls_basic.c
290
for_each_possible_cpu(cpu) {
net/sched/cls_basic.c
291
struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
net/sched/cls_matchall.c
334
int cpu;
net/sched/cls_matchall.c
355
for_each_possible_cpu(cpu) {
net/sched/cls_matchall.c
356
struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
net/sched/cls_u32.c
1367
int cpu;
net/sched/cls_u32.c
1424
for_each_possible_cpu(cpu) {
net/sched/cls_u32.c
1426
struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
net/sched/sch_generic.c
452
int cpu;
net/sched/sch_generic.c
454
cpu = smp_processor_id();
net/sched/sch_generic.c
464
__netif_tx_lock(txq, cpu);
net/smc/smc_stats.c
272
int cpu, i, size;
net/smc/smc_stats.c
292
for_each_possible_cpu(cpu) {
net/smc/smc_stats.c
293
src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
net/sunrpc/svc.c
235
unsigned int cpu;
net/sunrpc/svc.c
242
for_each_online_cpu(cpu) {
net/sunrpc/svc.c
244
m->to_pool[cpu] = pidx;
net/sunrpc/svc.c
245
m->pool_to[pidx] = cpu;
net/sunrpc/svc.c
403
int cpu = raw_smp_processor_id();
net/sunrpc/svc.c
411
pidx = m->to_pool[cpu];
net/sunrpc/svc.c
414
pidx = m->to_pool[cpu_to_node(cpu)];
net/tipc/crypto.c
2013
int i, j, cpu;
net/tipc/crypto.c
2051
for_each_possible_cpu(cpu) {
net/tipc/crypto.c
2053
stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
net/tipc/crypto.c
2065
for_each_possible_cpu(cpu) {
net/tipc/crypto.c
2067
stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
net/tipc/crypto.c
520
int keylen, err, cpu;
net/tipc/crypto.c
575
for_each_possible_cpu(cpu) {
net/tipc/crypto.c
576
*per_cpu_ptr(tmp->tfm_entry, cpu) = head;
net/tipc/crypto.c
629
int cpu;
net/tipc/crypto.c
650
for_each_possible_cpu(cpu) {
net/tipc/crypto.c
651
*per_cpu_ptr(aead->tfm_entry, cpu) =
net/tipc/crypto.c
652
*per_cpu_ptr(src->tfm_entry, cpu);
rust/helpers/cpumask.c
12
void rust_helper___cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
rust/helpers/cpumask.c
14
__cpumask_set_cpu(cpu, dstp);
rust/helpers/cpumask.c
18
void rust_helper_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
rust/helpers/cpumask.c
20
cpumask_clear_cpu(cpu, dstp);
rust/helpers/cpumask.c
24
void rust_helper___cpumask_clear_cpu(int cpu, struct cpumask *dstp)
rust/helpers/cpumask.c
26
__cpumask_clear_cpu(cpu, dstp);
rust/helpers/cpumask.c
30
bool rust_helper_cpumask_test_cpu(int cpu, struct cpumask *srcp)
rust/helpers/cpumask.c
32
return cpumask_test_cpu(cpu, srcp);
rust/helpers/cpumask.c
6
void rust_helper_cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
rust/helpers/cpumask.c
8
cpumask_set_cpu(cpu, dstp);
samples/bpf/cpustat_kern.c
107
u32 key, cpu, pstate_idx;
samples/bpf/cpustat_kern.c
113
cpu = ctx->cpu_id;
samples/bpf/cpustat_kern.c
115
key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_TIME;
samples/bpf/cpustat_kern.c
120
key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX;
samples/bpf/cpustat_kern.c
125
key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME;
samples/bpf/cpustat_kern.c
130
key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX;
samples/bpf/cpustat_kern.c
175
key = cpu * MAX_PSTATE_ENTRIES + pstate_idx;
samples/bpf/cpustat_kern.c
198
key = cpu * MAX_CSTATE_ENTRIES + prev_state;
samples/bpf/cpustat_kern.c
215
u32 key, cpu, pstate_idx;
samples/bpf/cpustat_kern.c
218
cpu = ctx->cpu_id;
samples/bpf/cpustat_kern.c
220
key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME;
samples/bpf/cpustat_kern.c
225
key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX;
samples/bpf/cpustat_kern.c
230
key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX;
samples/bpf/cpustat_kern.c
271
key = cpu * MAX_PSTATE_ENTRIES + pstate_idx;
samples/bpf/lathist_kern.c
31
int cpu = bpf_get_smp_processor_id();
samples/bpf/lathist_kern.c
32
u64 *ts = bpf_map_lookup_elem(&my_map, &cpu);
samples/bpf/lathist_kern.c
75
int key, cpu;
samples/bpf/lathist_kern.c
78
cpu = bpf_get_smp_processor_id();
samples/bpf/lathist_kern.c
79
ts = bpf_map_lookup_elem(&my_map, &cpu);
samples/bpf/lathist_kern.c
89
key = cpu * MAX_ENTRIES + delta;
samples/bpf/map_perf_test.bpf.c
216
int cpu = bpf_get_smp_processor_id();
samples/bpf/map_perf_test.bpf.c
219
&cpu);
samples/bpf/map_perf_test_user.c
120
static void do_test_lru(enum test_type test, int cpu)
samples/bpf/map_perf_test_user.c
129
if (test == INNER_LRU_HASH_PREALLOC && cpu) {
samples/bpf/map_perf_test_user.c
140
assert(cpu < MAX_NR_CPUS);
samples/bpf/map_perf_test_user.c
146
inner_lru_map_fds[cpu] =
samples/bpf/map_perf_test_user.c
152
if (inner_lru_map_fds[cpu] == -1) {
samples/bpf/map_perf_test_user.c
158
ret = bpf_map_update_elem(outer_fd, &cpu,
samples/bpf/map_perf_test_user.c
159
&inner_lru_map_fds[cpu],
samples/bpf/map_perf_test_user.c
163
cpu, strerror(errno), errno);
samples/bpf/map_perf_test_user.c
199
cpu, test_name,
samples/bpf/map_perf_test_user.c
203
static void test_lru_hash_prealloc(int cpu)
samples/bpf/map_perf_test_user.c
205
do_test_lru(LRU_HASH_PREALLOC, cpu);
samples/bpf/map_perf_test_user.c
208
static void test_nocommon_lru_hash_prealloc(int cpu)
samples/bpf/map_perf_test_user.c
210
do_test_lru(NOCOMMON_LRU_HASH_PREALLOC, cpu);
samples/bpf/map_perf_test_user.c
213
static void test_inner_lru_hash_prealloc(int cpu)
samples/bpf/map_perf_test_user.c
215
do_test_lru(INNER_LRU_HASH_PREALLOC, cpu);
samples/bpf/map_perf_test_user.c
218
static void test_lru_hash_lookup(int cpu)
samples/bpf/map_perf_test_user.c
220
do_test_lru(LRU_HASH_LOOKUP, cpu);
samples/bpf/map_perf_test_user.c
223
static void test_percpu_hash_prealloc(int cpu)
samples/bpf/map_perf_test_user.c
232
cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
samples/bpf/map_perf_test_user.c
235
static void test_hash_kmalloc(int cpu)
samples/bpf/map_perf_test_user.c
244
cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
samples/bpf/map_perf_test_user.c
247
static void test_percpu_hash_kmalloc(int cpu)
samples/bpf/map_perf_test_user.c
256
cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
samples/bpf/map_perf_test_user.c
259
static void test_lpm_kmalloc(int cpu)
samples/bpf/map_perf_test_user.c
268
cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
samples/bpf/map_perf_test_user.c
271
static void test_hash_lookup(int cpu)
samples/bpf/map_perf_test_user.c
280
cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
samples/bpf/map_perf_test_user.c
283
static void test_array_lookup(int cpu)
samples/bpf/map_perf_test_user.c
292
cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
samples/bpf/map_perf_test_user.c
300
typedef void (*test_func)(int cpu);
samples/bpf/map_perf_test_user.c
331
static void loop(int cpu)
samples/bpf/map_perf_test_user.c
337
CPU_SET(cpu, &cpuset);
samples/bpf/map_perf_test_user.c
342
test_funcs[i](cpu);
samples/bpf/map_perf_test_user.c
82
static void test_hash_prealloc(int cpu)
samples/bpf/map_perf_test_user.c
91
cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
samples/bpf/trace_event_kern.c
44
u32 cpu = bpf_get_smp_processor_id();
samples/bpf/trace_event_kern.c
57
bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
samples/bpf/trace_output_user.c
22
static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
samples/bpf/tracex6_user.c
25
static void check_on_cpu(int cpu, struct perf_event_attr *attr)
samples/bpf/tracex6_user.c
34
CPU_SET(cpu, &set);
samples/bpf/tracex6_user.c
37
pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
samples/bpf/tracex6_user.c
39
fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
samples/bpf/tracex6_user.c
43
assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
samples/bpf/tracex6_user.c
46
bpf_map_get_next_key(map_fd[1], &cpu, NULL);
samples/bpf/tracex6_user.c
48
if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
samples/bpf/tracex6_user.c
49
fprintf(stderr, "Value missing for CPU %d\n", cpu);
samples/bpf/tracex6_user.c
53
fprintf(stderr, "CPU %d: %llu\n", cpu, value);
samples/bpf/tracex6_user.c
56
if (bpf_map_lookup_elem(map_fd[2], &cpu, &value2)) {
samples/bpf/tracex6_user.c
57
fprintf(stderr, "Value2 missing for CPU %d\n", cpu);
samples/bpf/tracex6_user.c
61
fprintf(stderr, "CPU %d: counter: %llu, enabled: %llu, running: %llu\n", cpu,
samples/bpf/tracex6_user.c
66
assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
samples/bpf/tracex6_user.c
69
assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
samples/bpf/xdp_sample.bpf.c
131
u32 cpu = bpf_get_smp_processor_id();
samples/bpf/xdp_sample.bpf.c
138
idx = to_cpu * nr_cpus + cpu;
samples/bpf/xdp_sample.bpf.c
160
u32 cpu;
samples/bpf/xdp_sample.bpf.c
165
cpu = bpf_get_smp_processor_id();
samples/bpf/xdp_sample.bpf.c
166
rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &cpu);
samples/bpf/xdp_sample.bpf.c
184
u32 cpu = bpf_get_smp_processor_id();
samples/bpf/xdp_sample.bpf.c
196
idx = key * nr_cpus + cpu;
samples/bpf/xdp_sample.bpf.c
211
u32 cpu;
samples/bpf/xdp_sample.bpf.c
221
cpu = bpf_get_smp_processor_id();
samples/bpf/xdp_sample.bpf.c
222
rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &cpu);
samples/bpf/xdp_sample.bpf.c
68
u32 cpu = bpf_get_smp_processor_id();
samples/bpf/xdp_sample.bpf.c
78
idx = key * nr_cpus + cpu;
samples/bpf/xdp_sample_user.c
1077
struct datarec *rc = &r->cpu[i];
samples/bpf/xdp_sample_user.c
1081
pc = p == &beg ? &p_beg : &p->cpu[i];
samples/bpf/xdp_sample_user.c
334
e->val.cpu = alloc_record_per_cpu();
samples/bpf/xdp_sample_user.c
335
if (!e->val.cpu)
samples/bpf/xdp_sample_user.c
357
rec->cpu[i].processed = READ_ONCE(values[i].processed);
samples/bpf/xdp_sample_user.c
358
rec->cpu[i].dropped = READ_ONCE(values[i].dropped);
samples/bpf/xdp_sample_user.c
359
rec->cpu[i].issue = READ_ONCE(values[i].issue);
samples/bpf/xdp_sample_user.c
360
rec->cpu[i].xdp_pass = READ_ONCE(values[i].xdp_pass);
samples/bpf/xdp_sample_user.c
361
rec->cpu[i].xdp_drop = READ_ONCE(values[i].xdp_drop);
samples/bpf/xdp_sample_user.c
362
rec->cpu[i].xdp_redirect = READ_ONCE(values[i].xdp_redirect);
samples/bpf/xdp_sample_user.c
364
sum_processed += rec->cpu[i].processed;
samples/bpf/xdp_sample_user.c
365
sum_dropped += rec->cpu[i].dropped;
samples/bpf/xdp_sample_user.c
366
sum_issue += rec->cpu[i].issue;
samples/bpf/xdp_sample_user.c
367
sum_xdp_pass += rec->cpu[i].xdp_pass;
samples/bpf/xdp_sample_user.c
368
sum_xdp_drop += rec->cpu[i].xdp_drop;
samples/bpf/xdp_sample_user.c
369
sum_xdp_redirect += rec->cpu[i].xdp_redirect;
samples/bpf/xdp_sample_user.c
460
rec->rx_cnt.cpu = alloc_record_per_cpu();
samples/bpf/xdp_sample_user.c
461
if (!rec->rx_cnt.cpu) {
samples/bpf/xdp_sample_user.c
469
rec->redir_err[i].cpu = alloc_record_per_cpu();
samples/bpf/xdp_sample_user.c
470
if (!rec->redir_err[i].cpu) {
samples/bpf/xdp_sample_user.c
476
free(rec->redir_err[i].cpu);
samples/bpf/xdp_sample_user.c
482
rec->kthread.cpu = alloc_record_per_cpu();
samples/bpf/xdp_sample_user.c
483
if (!rec->kthread.cpu) {
samples/bpf/xdp_sample_user.c
491
rec->exception[i].cpu = alloc_record_per_cpu();
samples/bpf/xdp_sample_user.c
492
if (!rec->exception[i].cpu) {
samples/bpf/xdp_sample_user.c
498
free(rec->exception[i].cpu);
samples/bpf/xdp_sample_user.c
504
rec->devmap_xmit.cpu = alloc_record_per_cpu();
samples/bpf/xdp_sample_user.c
505
if (!rec->devmap_xmit.cpu) {
samples/bpf/xdp_sample_user.c
515
rec->enq[i].cpu = alloc_record_per_cpu();
samples/bpf/xdp_sample_user.c
516
if (!rec->enq[i].cpu) {
samples/bpf/xdp_sample_user.c
522
free(rec->enq[i].cpu);
samples/bpf/xdp_sample_user.c
531
free(rec->devmap_xmit.cpu);
samples/bpf/xdp_sample_user.c
534
free(rec->exception[i].cpu);
samples/bpf/xdp_sample_user.c
536
free(rec->kthread.cpu);
samples/bpf/xdp_sample_user.c
539
free(rec->redir_err[i].cpu);
samples/bpf/xdp_sample_user.c
541
free(rec->rx_cnt.cpu);
samples/bpf/xdp_sample_user.c
554
free(r->enq[i].cpu);
samples/bpf/xdp_sample_user.c
557
free(e->val.cpu);
samples/bpf/xdp_sample_user.c
560
free(r->devmap_xmit.cpu);
samples/bpf/xdp_sample_user.c
562
free(r->exception[i].cpu);
samples/bpf/xdp_sample_user.c
563
free(r->kthread.cpu);
samples/bpf/xdp_sample_user.c
565
free(r->redir_err[i].cpu);
samples/bpf/xdp_sample_user.c
566
free(r->rx_cnt.cpu);
samples/bpf/xdp_sample_user.c
661
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
662
struct datarec *p = &prev->cpu[i];
samples/bpf/xdp_sample_user.c
724
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
725
struct datarec *p = &prev->cpu[i];
samples/bpf/xdp_sample_user.c
768
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
769
struct datarec *p = &prev->cpu[i];
samples/bpf/xdp_sample_user.c
805
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
806
struct datarec *p = &prev->cpu[i];
samples/bpf/xdp_sample_user.c
835
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
836
struct datarec *p = &prev->cpu[i];
samples/bpf/xdp_sample_user.c
881
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
882
struct datarec *p = &prev->cpu[i];
samples/bpf/xdp_sample_user.c
926
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
927
struct datarec *p = &prev->cpu[i];
samples/bpf/xdp_sample_user.c
93
struct datarec *cpu;
samples/bpf/xdp_sample_user.c
962
struct datarec *r = &rec->cpu[i];
samples/bpf/xdp_sample_user.c
963
struct datarec *p = &prev->cpu[i];
security/selinux/selinuxfs.c
1536
loff_t cpu;
security/selinux/selinuxfs.c
1538
for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) {
security/selinux/selinuxfs.c
1539
if (!cpu_possible(cpu))
security/selinux/selinuxfs.c
1541
*idx = cpu + 1;
security/selinux/selinuxfs.c
1542
return &per_cpu(avc_cache_stats, cpu);
sound/soc/amd/acp/acp-sdw-legacy-mach.c
251
codec_maps[j].cpu = 0;
sound/soc/amd/acp/acp-sdw-sof-mach.c
183
codec_maps[j].cpu = 0;
sound/soc/atmel/mchp-pdmc.c
644
struct snd_soc_component *cpu = dai->component;
sound/soc/atmel/mchp-pdmc.c
653
snd_soc_component_update_bits(cpu, MCHP_PDMC_MR,
sound/soc/atmel/mchp-pdmc.c
673
snd_soc_component_update_bits(cpu, MCHP_PDMC_MR,
sound/soc/fsl/imx-card.c
542
struct device_node *cpu = NULL;
sound/soc/fsl/imx-card.c
599
cpu = of_get_child_by_name(np, "cpu");
sound/soc/fsl/imx-card.c
600
if (!cpu) {
sound/soc/fsl/imx-card.c
606
ret = snd_soc_of_get_dlc(cpu, &args, link->cpus, 0);
sound/soc/fsl/imx-card.c
743
of_node_put(cpu);
sound/soc/fsl/imx-card.c
747
cpu = NULL;
sound/soc/fsl/imx-card.c
754
of_node_put(cpu);
sound/soc/generic/audio-graph-card.c
104
int *cpu)
sound/soc/generic/audio-graph-card.c
113
if (cpu) {
sound/soc/generic/audio-graph-card.c
121
ret = graph_util_parse_dai(priv, ep, dlc, cpu);
sound/soc/generic/audio-graph-card.c
205
struct device_node *ep = li->cpu ? cpu_ep : codec_ep;
sound/soc/generic/audio-graph-card.c
211
if (li->cpu) {
sound/soc/generic/audio-graph-card.c
383
if (li->cpu ||
sound/soc/generic/audio-graph-card.c
388
if (li->cpu)
sound/soc/generic/audio-graph-card.c
426
for (li->cpu = 1; li->cpu >= 0; li->cpu--) {
sound/soc/generic/audio-graph-card.c
475
if (li->cpu) {
sound/soc/generic/audio-graph-card2.c
577
dai_link->ch_maps[*nm_idx].cpu = cpu_idx;
sound/soc/generic/simple-card-utils.c
718
struct snd_soc_dai_link_component *cpu;
sound/soc/generic/simple-card-utils.c
723
for_each_link_cpus(dai_link, j, cpu)
sound/soc/generic/simple-card-utils.c
724
of_node_put(cpu->of_node);
sound/soc/generic/simple-card.c
147
int *cpu)
sound/soc/generic/simple-card.c
156
if (cpu) {
sound/soc/generic/simple-card.c
164
ret = simple_parse_dai(priv, np, dlc, cpu);
sound/soc/generic/simple-card.c
178
struct device_node *cpu,
sound/soc/generic/simple-card.c
187
struct device_node *node __free(device_node) = of_get_parent(cpu);
sound/soc/generic/simple-card.c
200
graph_util_parse_link_direction(cpu, &playback_only, &capture_only);
sound/soc/generic/simple-card.c
207
of_property_read_u32(cpu, "mclk-fs", &dai_props->mclk_fs);
sound/soc/generic/simple-card.c
208
of_property_read_u32(cpu, PREFIX "mclk-fs", &dai_props->mclk_fs);
sound/soc/generic/simple-card.c
214
graph_util_parse_trigger_order(priv, cpu, &trigger_start, &trigger_stop);
sound/soc/generic/simple-card.c
252
if (li->cpu) {
sound/soc/generic/simple-card.c
319
struct device_node *cpu = NULL;
sound/soc/generic/simple-card.c
325
cpu = np;
sound/soc/generic/simple-card.c
337
ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu);
sound/soc/generic/simple-card.c
355
ret = simple_link_init(priv, cpu, codec, li, prefix, dai_name);
sound/soc/generic/simple-card.c
437
if (li->cpu != (np == codec))
sound/soc/generic/simple-card.c
447
if (li->cpu && (np != codec))
sound/soc/generic/simple-card.c
488
for (li->cpu = 1; li->cpu >= 0; li->cpu--) {
sound/soc/generic/simple-card.c
605
if (li->cpu) {
sound/soc/intel/boards/sof_sdw.c
981
codec_maps[j].cpu = i - 1;
sound/soc/loongson/loongson_card.c
124
struct device_node *cpu, *codec;
sound/soc/loongson/loongson_card.c
129
cpu = of_get_child_by_name(dev->of_node, "cpu");
sound/soc/loongson/loongson_card.c
130
if (!cpu) {
sound/soc/loongson/loongson_card.c
137
of_node_put(cpu);
sound/soc/loongson/loongson_card.c
142
ret = snd_soc_of_get_dlc(cpu, NULL, loongson_dai_links[i].cpus, 0);
sound/soc/loongson/loongson_card.c
156
of_node_put(cpu);
sound/soc/loongson/loongson_card.c
162
of_node_put(cpu);
sound/soc/meson/axg-card.c
312
struct snd_soc_dai_link_component *cpu;
sound/soc/meson/axg-card.c
315
cpu = devm_kzalloc(card->dev, sizeof(*cpu), GFP_KERNEL);
sound/soc/meson/axg-card.c
316
if (!cpu)
sound/soc/meson/axg-card.c
319
dai_link->cpus = cpu;
sound/soc/meson/gx-card.c
83
struct snd_soc_dai_link_component *cpu;
sound/soc/meson/gx-card.c
86
cpu = devm_kzalloc(card->dev, sizeof(*cpu), GFP_KERNEL);
sound/soc/meson/gx-card.c
87
if (!cpu)
sound/soc/meson/gx-card.c
90
dai_link->cpus = cpu;
sound/soc/qcom/common.c
104
cpu = of_get_child_by_name(np, "cpu");
sound/soc/qcom/common.c
108
if (!cpu) {
sound/soc/qcom/common.c
114
ret = snd_soc_of_get_dlc(cpu, &args, link->cpus, 0);
sound/soc/qcom/common.c
165
of_node_put(cpu);
sound/soc/qcom/common.c
177
of_node_put(cpu);
sound/soc/qcom/common.c
31
struct device_node *cpu = NULL;
sound/soc/samsung/aries_wm8994.c
540
struct device_node *cpu, *codec, *extcon_np;
sound/soc/samsung/aries_wm8994.c
634
cpu = of_get_child_by_name(dev->of_node, "cpu");
sound/soc/samsung/aries_wm8994.c
635
if (!cpu)
sound/soc/samsung/aries_wm8994.c
654
aries_dai[0].cpus->of_node = of_parse_phandle(cpu,
sound/soc/samsung/aries_wm8994.c
664
aries_dai[2].cpus->of_node = of_parse_phandle(cpu,
sound/soc/samsung/aries_wm8994.c
683
of_node_put(cpu);
sound/soc/samsung/littlemill.c
142
SND_SOC_DAILINK_DEFS(cpu,
sound/soc/samsung/littlemill.c
159
SND_SOC_DAILINK_REG(cpu),
sound/soc/samsung/lowland.c
110
SND_SOC_DAILINK_REG(cpu),
sound/soc/samsung/lowland.c
90
SND_SOC_DAILINK_DEFS(cpu,
sound/soc/samsung/midas_wm1811.c
575
struct device_node *cpu = NULL, *codec = NULL;
sound/soc/samsung/midas_wm1811.c
703
cpu = of_get_child_by_name(dev->of_node, "cpu");
sound/soc/samsung/midas_wm1811.c
704
if (!cpu)
sound/soc/samsung/midas_wm1811.c
709
of_node_put(cpu);
sound/soc/samsung/midas_wm1811.c
713
cpu_dai_node = of_parse_phandle(cpu, "sound-dai", 0);
sound/soc/samsung/midas_wm1811.c
714
of_node_put(cpu);
sound/soc/samsung/odroid.c
200
struct device_node *cpu, *codec;
sound/soc/samsung/odroid.c
241
cpu = of_get_child_by_name(dev->of_node, "cpu");
sound/soc/samsung/odroid.c
251
num_pcms = of_count_phandle_with_args(cpu, "sound-dai",
sound/soc/samsung/odroid.c
260
ret = snd_soc_of_get_dai_name(cpu, &link->cpus->dai_name, i);
sound/soc/samsung/odroid.c
265
cpu_dai = of_parse_phandle(cpu, "sound-dai", 0);
sound/soc/samsung/odroid.c
270
of_node_put(cpu);
sound/soc/samsung/snow.c
128
struct device_node *cpu, *codec;
sound/soc/samsung/snow.c
157
cpu = of_get_child_by_name(dev->of_node, "cpu");
sound/soc/samsung/snow.c
159
if (cpu) {
sound/soc/samsung/snow.c
162
link->cpus->of_node = of_parse_phandle(cpu, "sound-dai", 0);
sound/soc/samsung/snow.c
163
of_node_put(cpu);
sound/soc/samsung/tobermory.c
109
SND_SOC_DAILINK_DEFS(cpu,
sound/soc/samsung/tobermory.c
121
SND_SOC_DAILINK_REG(cpu),
sound/soc/sdw_utils/soc_sdw_cs_amp.c
105
ch_slot[ch_map->cpu] += ch_per_amp;
sound/soc/sdw_utils/soc_sdw_cs_amp.c
97
mask = GENMASK(ch_per_amp - 1, 0) << ch_slot[ch_map->cpu];
sound/soc/soc-core.c
1051
{ .cpu = 0, .codec = 0 },
sound/soc/soc-core.c
1052
{ .cpu = 1, .codec = 1 },
sound/soc/soc-core.c
1053
{ .cpu = 2, .codec = 2 },
sound/soc/soc-core.c
1054
{ .cpu = 3, .codec = 3 },
sound/soc/soc-core.c
1055
{ .cpu = 4, .codec = 4 },
sound/soc/soc-core.c
1056
{ .cpu = 5, .codec = 5 },
sound/soc/soc-core.c
1057
{ .cpu = 6, .codec = 6 },
sound/soc/soc-core.c
1058
{ .cpu = 7, .codec = 7 },
sound/soc/soc-core.c
1061
{ .cpu = 0, .codec = 0 },
sound/soc/soc-core.c
1062
{ .cpu = 0, .codec = 1 },
sound/soc/soc-core.c
1063
{ .cpu = 0, .codec = 2 },
sound/soc/soc-core.c
1064
{ .cpu = 0, .codec = 3 },
sound/soc/soc-core.c
1065
{ .cpu = 0, .codec = 4 },
sound/soc/soc-core.c
1066
{ .cpu = 0, .codec = 5 },
sound/soc/soc-core.c
1067
{ .cpu = 0, .codec = 6 },
sound/soc/soc-core.c
1068
{ .cpu = 0, .codec = 7 },
sound/soc/soc-core.c
1071
{ .cpu = 0, .codec = 0 },
sound/soc/soc-core.c
1072
{ .cpu = 1, .codec = 0 },
sound/soc/soc-core.c
1073
{ .cpu = 2, .codec = 0 },
sound/soc/soc-core.c
1074
{ .cpu = 3, .codec = 0 },
sound/soc/soc-core.c
1075
{ .cpu = 4, .codec = 0 },
sound/soc/soc-core.c
1076
{ .cpu = 5, .codec = 0 },
sound/soc/soc-core.c
1077
{ .cpu = 6, .codec = 0 },
sound/soc/soc-core.c
1078
{ .cpu = 7, .codec = 0 },
sound/soc/soc-core.c
1123
if ((ch_maps->cpu >= dai_link->num_cpus) ||
sound/soc/soc-core.c
1128
ch_maps->cpu, dai_link->num_cpus,
sound/soc/soc-core.c
1134
i, ch_maps->cpu, ch_maps->codec);
sound/soc/soc-core.c
1179
struct snd_soc_dai_link_component *codec, *platform, *cpu;
sound/soc/soc-core.c
1205
for_each_link_cpus(dai_link, i, cpu) {
sound/soc/soc-core.c
1206
snd_soc_rtd_to_cpu(rtd, i) = snd_soc_find_dai(cpu);
sound/soc/soc-core.c
1209
cpu->dai_name);
sound/soc/soc-dapm.c
4491
struct snd_soc_dapm_widget *codec, *cpu;
sound/soc/soc-dapm.c
4494
struct snd_soc_dapm_widget **src[] = { &cpu, &codec };
sound/soc/soc-dapm.c
4495
struct snd_soc_dapm_widget **sink[] = { &codec, &cpu };
sound/soc/soc-dapm.c
4506
cpu = snd_soc_dai_get_widget(cpu_dai, stream_cpu);
sound/soc/soc-dapm.c
4509
if (!cpu || !codec)
sound/soc/soc-dapm.c
4590
cpu_dai = snd_soc_rtd_to_cpu(rtd, ch_maps->cpu);
sound/soc/soc-pcm.c
1147
if (ch_maps->cpu == i)
sound/soc/soc-pcm.c
2826
cpu_dai = snd_soc_rtd_to_cpu(rtd, ch_maps->cpu);
tools/accounting/delaytop.c
167
SORT_FIELD(cpu, c, MODE_DEFAULT),
tools/accounting/delaytop.c
985
TASK_AVG(tasks[i], cpu),
tools/arch/mips/include/uapi/asm/kvm.h
205
__u32 cpu;
tools/bpf/bpftool/map_perf_ring.c
123
.cpu = -1,
tools/bpf/bpftool/map_perf_ring.c
151
ctx.cpu = strtoul(*argv, &endptr, 0);
tools/bpf/bpftool/map_perf_ring.c
178
if (ctx.idx == -1 || ctx.cpu == -1) {
tools/bpf/bpftool/map_perf_ring.c
183
ctx.cpu = 0;
tools/bpf/bpftool/map_perf_ring.c
188
opts.cpus = &ctx.cpu;
tools/bpf/bpftool/map_perf_ring.c
53
int cpu;
tools/bpf/bpftool/map_perf_ring.c
58
print_bpf_output(void *private_data, int cpu, struct perf_event_header *event)
tools/bpf/bpftool/map_perf_ring.c
67
int idx = ctx->all_cpus ? cpu : ctx->idx;
tools/bpf/bpftool/map_perf_ring.c
74
jsonw_uint(json_wtr, cpu);
tools/bpf/bpftool/map_perf_ring.c
96
cpu, idx);
tools/bpf/bpftool/prog.c
2187
__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
tools/bpf/bpftool/prog.c
2207
for (cpu = 0; cpu < num_cpu; cpu++)
tools/bpf/bpftool/prog.c
2208
profile_total_count += counts[cpu];
tools/bpf/bpftool/prog.c
2222
for (cpu = 0; cpu < num_cpu; cpu++) {
tools/bpf/bpftool/prog.c
2223
metrics[m].val.counter += values[cpu].counter;
tools/bpf/bpftool/prog.c
2224
metrics[m].val.enabled += values[cpu].enabled;
tools/bpf/bpftool/prog.c
2225
metrics[m].val.running += values[cpu].running;
tools/bpf/bpftool/prog.c
2364
static int profile_open_perf_event(int mid, int cpu, int map_fd)
tools/bpf/bpftool/prog.c
2369
-1 /*pid*/, cpu, -1 /*group_fd*/, 0);
tools/bpf/bpftool/prog.c
2373
cpu, metrics[mid].name);
tools/bpf/bpftool/prog.c
2394
unsigned int cpu, m;
tools/bpf/bpftool/prog.c
2413
for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
tools/bpf/bpftool/prog.c
2414
if (profile_open_perf_event(m, cpu, map_fd)) {
tools/bpf/bpftool/prog.c
2416
metrics[m].name, cpu);
tools/bpf/bpftool/skeleton/profiler.bpf.c
103
u32 cpu = bpf_get_smp_processor_id();
tools/bpf/bpftool/skeleton/profiler.bpf.c
110
err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
tools/include/linux/coresight-pmu.h
22
#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
tools/include/uapi/linux/bpf.h
1701
__u32 cpu;
tools/include/uapi/linux/kvm.h
252
__u32 cpu;
tools/lib/api/cpu.c
10
int cpu;
tools/lib/api/cpu.c
12
if (sysfs__read_int("devices/system/cpu/online", &cpu) < 0)
tools/lib/api/cpu.c
16
"devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpu);
tools/lib/bpf/bpf.c
1039
attr.test.cpu = OPTS_GET(opts, cpu, 0);
tools/lib/bpf/bpf.h
691
__u32 cpu;
tools/lib/bpf/libbpf.c
13700
int cpu, int map_key)
tools/lib/bpf/libbpf.c
13710
cpu_buf->cpu = cpu;
tools/lib/bpf/libbpf.c
13713
cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
tools/lib/bpf/libbpf.c
13718
cpu, errstr(err));
tools/lib/bpf/libbpf.c
13729
cpu, errstr(err));
tools/lib/bpf/libbpf.c
13736
cpu, errstr(err));
tools/lib/bpf/libbpf.c
13900
int cpu, map_key;
tools/lib/bpf/libbpf.c
13902
cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
tools/lib/bpf/libbpf.c
13908
if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
tools/lib/bpf/libbpf.c
13911
cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
tools/lib/bpf/libbpf.c
13924
cpu, map_key, cpu_buf->fd,
tools/lib/bpf/libbpf.c
13935
cpu, cpu_buf->fd,
tools/lib/bpf/libbpf.c
13975
return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
tools/lib/bpf/libbpf.c
13982
pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
tools/lib/bpf/libbpf.c
13989
pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
tools/lib/bpf/libbpf.h
1634
typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu,
tools/lib/bpf/libbpf.h
1636
typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
tools/lib/perf/cpumap.c
132
return cpu_a->cpu - cpu_b->cpu;
tools/lib/perf/cpumap.c
153
__perf_cpu_map__cpu(cpus, i).cpu !=
tools/lib/perf/cpumap.c
154
__perf_cpu_map__cpu(cpus, i - 1).cpu) {
tools/lib/perf/cpumap.c
155
RC_CHK_ACCESS(cpus)->map[j++].cpu =
tools/lib/perf/cpumap.c
156
__perf_cpu_map__cpu(cpus, i).cpu;
tools/lib/perf/cpumap.c
212
if (tmp_cpus[i].cpu == (int16_t)start_cpu)
tools/lib/perf/cpumap.c
222
tmp_cpus[nr_cpus++].cpu = (int16_t)start_cpu;
tools/lib/perf/cpumap.c
245
struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
tools/lib/perf/cpumap.c
250
RC_CHK_ACCESS(cpus)->map[0].cpu = cpu;
tools/lib/perf/cpumap.c
263
.cpu = -1
tools/lib/perf/cpumap.c
279
return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
tools/lib/perf/cpumap.c
287
return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
tools/lib/perf/cpumap.c
295
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
tools/lib/perf/cpumap.c
308
if (cpu_at_idx.cpu == cpu.cpu)
tools/lib/perf/cpumap.c
311
if (cpu_at_idx.cpu > cpu.cpu)
tools/lib/perf/cpumap.c
320
bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
tools/lib/perf/cpumap.c
322
return perf_cpu_map__idx(cpus, cpu) != -1;
tools/lib/perf/cpumap.c
340
if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
tools/lib/perf/cpumap.c
348
return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
tools/lib/perf/cpumap.c
353
struct perf_cpu cpu, result = {
tools/lib/perf/cpumap.c
354
.cpu = -1
tools/lib/perf/cpumap.c
358
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
tools/lib/perf/cpumap.c
359
result = cpu;
tools/lib/perf/cpumap.c
368
.cpu = -1
tools/lib/perf/cpumap.c
388
if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
tools/lib/perf/cpumap.c
390
if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
tools/lib/perf/cpumap.c
433
if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
tools/lib/perf/cpumap.c
434
if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
tools/lib/perf/cpumap.c
44
RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
tools/lib/perf/cpumap.c
468
if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
tools/lib/perf/cpumap.c
470
else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
tools/lib/perf/cpumap.c
487
if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
tools/lib/perf/cpumap.c
489
else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
tools/lib/perf/cpumap.c
98
RC_CHK_ACCESS(cpus)->map[i].cpu = i;
tools/lib/perf/evlist.c
121
evsel->cpus = perf_cpu_map__new_int(perf_cpu_map__cpu(srcs[i], 0).cpu);
tools/lib/perf/evlist.c
491
static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
tools/lib/perf/evlist.c
493
struct perf_sample_id *sid = SID(evsel, cpu, thread);
tools/lib/perf/evlist.c
496
sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
tools/lib/perf/evlist.c
525
int output, struct perf_cpu cpu)
tools/lib/perf/evlist.c
527
return perf_mmap__mmap(map, mp, output, cpu);
tools/lib/perf/evlist.c
552
int *output, fd, cpu;
tools/lib/perf/evlist.c
557
cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
tools/lib/perf/evlist.c
558
if (cpu == -1)
tools/lib/perf/evlist.c
573
fd = FD(evsel, cpu, thread);
tools/lib/perf/evlist.c
623
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
tools/lib/perf/evlist.c
626
perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
tools/lib/perf/evlist.c
639
int cpu, thread, idx = 0;
tools/lib/perf/evlist.c
656
for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
tools/lib/perf/evlist.c
660
if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
tools/lib/perf/evlist.c
682
int cpu, thread;
tools/lib/perf/evlist.c
686
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/lib/perf/evlist.c
691
if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
tools/lib/perf/evsel.c
129
struct perf_cpu cpu;
tools/lib/perf/evsel.c
160
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
tools/lib/perf/evsel.c
176
cpu, group_fd, 0);
tools/lib/perf/evsel.c
276
struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
tools/lib/perf/evsel.c
284
ret = perf_mmap__mmap(map, &mp, *fd, cpu);
tools/lib/perf/evsel.c
462
struct perf_cpu cpu __maybe_unused;
tools/lib/perf/evsel.c
466
perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
tools/lib/perf/evsel.c
94
pid_t pid, struct perf_cpu cpu, int group_fd,
tools/lib/perf/evsel.c
97
return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
tools/lib/perf/include/internal/cpumap.h
25
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
tools/lib/perf/include/internal/evsel.h
56
struct perf_cpu cpu;
tools/lib/perf/include/internal/mmap.h
28
struct perf_cpu cpu;
tools/lib/perf/include/internal/mmap.h
51
int fd, struct perf_cpu cpu);
tools/lib/perf/include/perf/cpumap.h
11
int16_t cpu;
tools/lib/perf/include/perf/cpumap.h
41
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_int(int cpu);
tools/lib/perf/include/perf/cpumap.h
82
LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
tools/lib/perf/include/perf/cpumap.h
90
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
tools/lib/perf/include/perf/cpumap.h
91
for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
tools/lib/perf/include/perf/cpumap.h
93
(idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
tools/lib/perf/include/perf/cpumap.h
99
if ((_cpu).cpu != -1)
tools/lib/perf/include/perf/event.h
194
__u16 cpu[];
tools/lib/perf/include/perf/event.h
329
__u64 cpu;
tools/lib/perf/include/perf/event.h
358
__u32 cpu;
tools/lib/perf/include/perf/event.h
368
__u32 cpu;
tools/lib/perf/include/perf/event.h
431
__u32 cpu;
tools/lib/perf/include/perf/event.h
520
__u32 cpu;
tools/lib/perf/include/perf/event.h
554
__u32 cpu;
tools/lib/perf/mmap.c
37
int fd, struct perf_cpu cpu)
tools/lib/perf/mmap.c
49
map->cpu = cpu;
tools/lib/perf/tests/test-cpumap.c
17
struct perf_cpu cpu;
tools/lib/perf/tests/test-cpumap.c
36
perf_cpu_map__for_each_cpu(cpu, idx, cpus)
tools/lib/perf/tests/test-cpumap.c
37
__T("wrong cpu number", cpu.cpu != -1);
tools/lib/perf/tests/test-evlist.c
339
struct perf_cpu cpu;
tools/lib/perf/tests/test-evlist.c
378
perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
tools/lib/perf/tests/test-evlist.c
382
CPU_SET(cpu.cpu, &mask);
tools/perf/arch/arm/util/cs-etm.c
103
if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) {
tools/perf/arch/arm/util/cs-etm.c
110
err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], &trcidr2);
tools/perf/arch/arm/util/cs-etm.c
148
struct perf_cpu cpu)
tools/perf/arch/arm/util/cs-etm.c
158
if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) {
tools/perf/arch/arm/util/cs-etm.c
165
err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], &trcidr0);
tools/perf/arch/arm/util/cs-etm.c
203
struct perf_cpu cpu;
tools/perf/arch/arm/util/cs-etm.c
220
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) {
tools/perf/arch/arm/util/cs-etm.c
221
if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_NOT_PRESENT) {
tools/perf/arch/arm/util/cs-etm.c
223
CORESIGHT_ETM_PMU_NAME, cpu.cpu);
tools/perf/arch/arm/util/cs-etm.c
226
err = cs_etm_validate_context_id(cs_etm_pmu, evsel, cpu);
tools/perf/arch/arm/util/cs-etm.c
230
err = cs_etm_validate_timestamp(cs_etm_pmu, evsel, cpu);
tools/perf/arch/arm/util/cs-etm.c
553
struct perf_cpu cpu;
tools/perf/arch/arm/util/cs-etm.c
567
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) {
tools/perf/arch/arm/util/cs-etm.c
568
enum cs_etm_version v = cs_etm_get_version(cs_etm_pmu, cpu);
tools/perf/arch/arm/util/cs-etm.c
582
static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val)
tools/perf/arch/arm/util/cs-etm.c
588
snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path);
tools/perf/arch/arm/util/cs-etm.c
599
static int cs_etm_get_ro_signed(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path,
tools/perf/arch/arm/util/cs-etm.c
607
snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path);
tools/perf/arch/arm/util/cs-etm.c
619
static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path)
tools/perf/arch/arm/util/cs-etm.c
624
snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path);
tools/perf/arch/arm/util/cs-etm.c
637
static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu)
tools/perf/arch/arm/util/cs-etm.c
641
if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]))
tools/perf/arch/arm/util/cs-etm.c
644
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], &trcdevarch);
tools/perf/arch/arm/util/cs-etm.c
652
static __u64 cs_etm_get_legacy_trace_id(struct perf_cpu cpu)
tools/perf/arch/arm/util/cs-etm.c
655
return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu % 48);
tools/perf/arch/arm/util/cs-etm.c
658
static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu)
tools/perf/arch/arm/util/cs-etm.c
666
data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu);
tools/perf/arch/arm/util/cs-etm.c
669
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0],
tools/perf/arch/arm/util/cs-etm.c
671
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR1],
tools/perf/arch/arm/util/cs-etm.c
673
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2],
tools/perf/arch/arm/util/cs-etm.c
675
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR8],
tools/perf/arch/arm/util/cs-etm.c
677
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS],
tools/perf/arch/arm/util/cs-etm.c
681
if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]) ||
tools/perf/arch/arm/util/cs-etm.c
682
cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE],
tools/perf/arch/arm/util/cs-etm.c
685
cpu.cpu);
tools/perf/arch/arm/util/cs-etm.c
690
static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu)
tools/perf/arch/arm/util/cs-etm.c
698
data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu);
tools/perf/arch/arm/util/cs-etm.c
701
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR0], &data[CS_ETE_TRCIDR0]);
tools/perf/arch/arm/util/cs-etm.c
702
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR1], &data[CS_ETE_TRCIDR1]);
tools/perf/arch/arm/util/cs-etm.c
703
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR2], &data[CS_ETE_TRCIDR2]);
tools/perf/arch/arm/util/cs-etm.c
704
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR8], &data[CS_ETE_TRCIDR8]);
tools/perf/arch/arm/util/cs-etm.c
705
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCAUTHSTATUS],
tools/perf/arch/arm/util/cs-etm.c
708
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH],
tools/perf/arch/arm/util/cs-etm.c
71
static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu);
tools/perf/arch/arm/util/cs-etm.c
712
if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]) ||
tools/perf/arch/arm/util/cs-etm.c
713
cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE],
tools/perf/arch/arm/util/cs-etm.c
716
cpu.cpu);
tools/perf/arch/arm/util/cs-etm.c
72
static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val);
tools/perf/arch/arm/util/cs-etm.c
721
static void cs_etm_get_metadata(struct perf_cpu cpu, u32 *offset,
tools/perf/arch/arm/util/cs-etm.c
73
static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path);
tools/perf/arch/arm/util/cs-etm.c
730
switch (cs_etm_get_version(cs_etm_pmu, cpu)) {
tools/perf/arch/arm/util/cs-etm.c
733
cs_etm_save_ete_header(&info->priv[*offset], itr, cpu);
tools/perf/arch/arm/util/cs-etm.c
742
cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu);
tools/perf/arch/arm/util/cs-etm.c
754
info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu);
tools/perf/arch/arm/util/cs-etm.c
756
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER],
tools/perf/arch/arm/util/cs-etm.c
758
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMIDR],
tools/perf/arch/arm/util/cs-etm.c
76
struct perf_cpu cpu)
tools/perf/arch/arm/util/cs-etm.c
775
info->priv[*offset + CS_ETM_CPU] = cpu.cpu;
tools/perf/arch/arm/util/cs-etm.c
78
if (cs_etm_is_ete(cs_etm_pmu, cpu))
tools/perf/arch/arm/util/cs-etm.c
795
struct perf_cpu cpu;
tools/perf/arch/arm/util/cs-etm.c
80
else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]))
tools/perf/arch/arm/util/cs-etm.c
808
perf_cpu_map__for_each_cpu(cpu, i, event_cpus) {
tools/perf/arch/arm/util/cs-etm.c
809
if (!perf_cpu_map__has(online_cpus, cpu))
tools/perf/arch/arm/util/cs-etm.c
82
else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER]))
tools/perf/arch/arm/util/cs-etm.c
829
perf_cpu_map__for_each_cpu(cpu, i, cpu_map) {
tools/perf/arch/arm/util/cs-etm.c
831
cs_etm_get_metadata(cpu, &offset, itr, info);
tools/perf/arch/arm/util/cs-etm.c
89
struct perf_cpu cpu)
tools/perf/arch/arm64/util/arm-spe.c
107
cpuid = get_cpuid_allow_env_override(cpu);
tools/perf/arch/arm64/util/arm-spe.c
113
data[ARM_SPE_CPU] = cpu.cpu;
tools/perf/arch/arm64/util/arm-spe.c
118
if (perf_cpu_map__has(sper->arm_spe_pmu->cpus, cpu))
tools/perf/arch/arm64/util/arm-spe.c
153
struct perf_cpu cpu;
tools/perf/arch/arm64/util/arm-spe.c
174
perf_cpu_map__for_each_cpu(cpu, i, cpu_map) {
tools/perf/arch/arm64/util/arm-spe.c
177
ret = arm_spe_save_cpu_header(itr, cpu, data);
tools/perf/arch/arm64/util/arm-spe.c
98
struct perf_cpu cpu, __u64 data[])
tools/perf/arch/arm64/util/header.c
16
static int _get_cpuid(char *buf, size_t sz, struct perf_cpu cpu)
tools/perf/arch/arm64/util/header.c
22
assert(cpu.cpu != -1);
tools/perf/arch/arm64/util/header.c
26
scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d" MIDR, sysfs, cpu.cpu);
tools/perf/arch/arm64/util/header.c
43
int get_cpuid(char *buf, size_t sz, struct perf_cpu cpu)
tools/perf/arch/arm64/util/header.c
48
if (cpu.cpu != -1)
tools/perf/arch/arm64/util/header.c
49
return _get_cpuid(buf, sz, cpu);
tools/perf/arch/arm64/util/header.c
55
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
tools/perf/arch/arm64/util/header.c
56
int ret = _get_cpuid(buf, sz, cpu);
tools/perf/arch/arm64/util/header.c
64
char *get_cpuid_str(struct perf_cpu cpu)
tools/perf/arch/arm64/util/header.c
73
res = get_cpuid(buf, MIDR_SIZE, cpu);
tools/perf/arch/arm64/util/header.c
75
pr_err("failed to get cpuid string for CPU %d\n", cpu.cpu);
tools/perf/arch/loongarch/util/header.c
73
int get_cpuid(char *buffer, size_t sz, struct perf_cpu cpu __maybe_unused)
tools/perf/arch/loongarch/util/header.c
93
char *get_cpuid_str(struct perf_cpu cpu __maybe_unused)
tools/perf/arch/powerpc/util/header.c
27
get_cpuid(char *buffer, size_t sz, struct perf_cpu cpu __maybe_unused)
tools/perf/arch/powerpc/util/header.c
45
get_cpuid_str(struct perf_cpu cpu __maybe_unused)
tools/perf/arch/riscv/util/header.c
101
get_cpuid_str(struct perf_cpu cpu __maybe_unused)
tools/perf/arch/riscv/util/header.c
84
int get_cpuid(char *buffer, size_t sz, struct perf_cpu cpu __maybe_unused)
tools/perf/arch/s390/util/header.c
140
char *get_cpuid_str(struct perf_cpu cpu)
tools/perf/arch/s390/util/header.c
144
if (buf && get_cpuid(buf, 128, cpu))
tools/perf/arch/s390/util/header.c
30
int get_cpuid(char *buffer, size_t sz, struct perf_cpu cpu __maybe_unused)
tools/perf/arch/x86/tests/amd-ibs-period.c
119
static int sched_affine(int cpu)
tools/perf/arch/x86/tests/amd-ibs-period.c
124
CPU_SET(cpu, &set);
tools/perf/arch/x86/tests/amd-ibs-period.c
230
int cpu, int group_fd, unsigned long flags)
tools/perf/arch/x86/tests/amd-ibs-period.c
232
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
tools/perf/arch/x86/tests/intel-pt-test.c
322
static int setaffinity(int cpu)
tools/perf/arch/x86/tests/intel-pt-test.c
327
CPU_SET(cpu, &cpu_set);
tools/perf/arch/x86/tests/intel-pt-test.c
329
pr_debug("sched_setaffinity() failed for CPU %d\n", cpu);
tools/perf/arch/x86/tests/intel-pt-test.c
355
static int get_pt_caps(int cpu, struct pt_caps *caps)
tools/perf/arch/x86/tests/intel-pt-test.c
360
if (setaffinity(cpu))
tools/perf/arch/x86/tests/intel-pt-test.c
367
pr_debug("CPU %d CPUID leaf 20 subleaf %d\n", cpu, i);
tools/perf/arch/x86/tests/intel-pt-test.c
390
static int compare_caps(int cpu, struct pt_caps *caps, struct pt_caps *caps0)
tools/perf/arch/x86/tests/intel-pt-test.c
415
cpu, i, j, reg, reg0);
tools/perf/arch/x86/tests/intel-pt-test.c
426
cpu, reg, reg0);
tools/perf/arch/x86/tests/intel-pt-test.c
431
pr_debug("CPU %d OK\n", cpu);
tools/perf/arch/x86/tests/intel-pt-test.c
438
int max_cpu = cpu__max_cpu().cpu;
tools/perf/arch/x86/tests/intel-pt-test.c
442
int cpu;
tools/perf/arch/x86/tests/intel-pt-test.c
452
for (cpu = 1, last_caps = caps0; cpu < max_cpu; cpu++) {
tools/perf/arch/x86/tests/intel-pt-test.c
455
if (get_pt_caps(cpu, &caps)) {
tools/perf/arch/x86/tests/intel-pt-test.c
456
pr_debug("CPU %d not found\n", cpu);
tools/perf/arch/x86/tests/intel-pt-test.c
460
pr_debug("CPU %d same caps as previous CPU\n", cpu);
tools/perf/arch/x86/tests/intel-pt-test.c
463
if (compare_caps(cpu, &caps, &caps0))
tools/perf/arch/x86/util/auxtrace.c
58
struct perf_cpu cpu = perf_cpu_map__min(evlist->core.all_cpus);
tools/perf/arch/x86/util/auxtrace.c
63
ret = get_cpuid(buffer, sizeof(buffer), cpu);
tools/perf/arch/x86/util/header.c
61
get_cpuid(char *buffer, size_t sz, struct perf_cpu cpu __maybe_unused)
tools/perf/arch/x86/util/header.c
66
char *get_cpuid_str(struct perf_cpu cpu __maybe_unused)
tools/perf/arch/x86/util/pmu.c
197
cpu_adjust[pmu_snc] = perf_cpu_map__cpu(node_cpus, 0).cpu;
tools/perf/arch/x86/util/pmu.c
229
struct perf_cpu cpu;
tools/perf/arch/x86/util/pmu.c
233
if (perf_cpu_map__cpu(pmu->cpus, 0).cpu != 0) {
tools/perf/arch/x86/util/pmu.c
253
perf_cpu_map__for_each_cpu(cpu, idx, pmu->cpus) {
tools/perf/arch/x86/util/pmu.c
258
RC_CHK_ACCESS(adjusted[pmu_snc])->map[idx].cpu = cpu.cpu + cpu_adjust;
tools/perf/arch/x86/util/pmu.c
260
cpu_adjust = perf_cpu_map__cpu(adjusted[pmu_snc], idx).cpu - cpu.cpu;
tools/perf/arch/x86/util/pmu.c
263
assert(perf_cpu_map__cpu(adjusted[pmu_snc], idx).cpu ==
tools/perf/arch/x86/util/pmu.c
264
cpu.cpu + cpu_adjust);
tools/perf/bench/epoll-ctl.c
223
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
tools/perf/bench/epoll-ctl.c
235
nrcpus = cpu__max_cpu().cpu;
tools/perf/bench/epoll-ctl.c
264
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
tools/perf/bench/epoll-ctl.c
318
struct perf_cpu_map *cpu;
tools/perf/bench/epoll-ctl.c
333
cpu = perf_cpu_map__new_online_cpus();
tools/perf/bench/epoll-ctl.c
334
if (!cpu)
tools/perf/bench/epoll-ctl.c
350
nthreads = perf_cpu_map__nr(cpu);
tools/perf/bench/epoll-ctl.c
379
do_threads(worker, cpu);
tools/perf/bench/epoll-ctl.c
424
perf_cpu_map__put(cpu);
tools/perf/bench/epoll-wait.c
292
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
tools/perf/bench/epoll-wait.c
312
nrcpus = cpu__max_cpu().cpu;
tools/perf/bench/epoll-wait.c
353
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
tools/perf/bench/epoll-wait.c
437
struct perf_cpu_map *cpu;
tools/perf/bench/epoll-wait.c
452
cpu = perf_cpu_map__new_online_cpus();
tools/perf/bench/epoll-wait.c
453
if (!cpu)
tools/perf/bench/epoll-wait.c
474
nthreads = perf_cpu_map__nr(cpu) - 1;
tools/perf/bench/epoll-wait.c
502
do_threads(worker, cpu);
tools/perf/bench/epoll-wait.c
557
perf_cpu_map__put(cpu);
tools/perf/bench/futex-hash.c
135
struct perf_cpu_map *cpu;
tools/perf/bench/futex-hash.c
145
cpu = perf_cpu_map__new_online_cpus();
tools/perf/bench/futex-hash.c
146
if (!cpu)
tools/perf/bench/futex-hash.c
160
params.nthreads = perf_cpu_map__nr(cpu);
tools/perf/bench/futex-hash.c
182
nrcpus = cpu__max_cpu().cpu;
tools/perf/bench/futex-hash.c
195
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
tools/perf/bench/futex-hash.c
252
free(cpu);
tools/perf/bench/futex-lock-pi.c
124
static void create_threads(struct worker *w, struct perf_cpu_map *cpu)
tools/perf/bench/futex-lock-pi.c
128
int nrcpus = cpu__max_cpu().cpu;
tools/perf/bench/futex-lock-pi.c
151
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
tools/perf/bench/futex-lock-pi.c
172
struct perf_cpu_map *cpu;
tools/perf/bench/futex-lock-pi.c
178
cpu = perf_cpu_map__new_online_cpus();
tools/perf/bench/futex-lock-pi.c
179
if (!cpu)
tools/perf/bench/futex-lock-pi.c
193
params.nthreads = perf_cpu_map__nr(cpu);
tools/perf/bench/futex-lock-pi.c
214
create_threads(worker, cpu);
tools/perf/bench/futex-lock-pi.c
252
perf_cpu_map__put(cpu);
tools/perf/bench/futex-requeue.c
127
static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
tools/perf/bench/futex-requeue.c
131
int nrcpus = cpu__max_cpu().cpu;
tools/perf/bench/futex-requeue.c
146
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
tools/perf/bench/futex-requeue.c
174
struct perf_cpu_map *cpu;
tools/perf/bench/futex-requeue.c
180
cpu = perf_cpu_map__new_online_cpus();
tools/perf/bench/futex-requeue.c
181
if (!cpu)
tools/perf/bench/futex-requeue.c
195
params.nthreads = perf_cpu_map__nr(cpu);
tools/perf/bench/futex-requeue.c
228
block_threads(worker, cpu);
tools/perf/bench/futex-requeue.c
314
perf_cpu_map__put(cpu);
tools/perf/bench/futex-wake-parallel.c
151
static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
tools/perf/bench/futex-wake-parallel.c
155
int nrcpus = cpu__max_cpu().cpu;
tools/perf/bench/futex-wake-parallel.c
170
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
tools/perf/bench/futex-wake-parallel.c
252
struct perf_cpu_map *cpu;
tools/perf/bench/futex-wake-parallel.c
271
cpu = perf_cpu_map__new_online_cpus();
tools/perf/bench/futex-wake-parallel.c
272
if (!cpu)
tools/perf/bench/futex-wake-parallel.c
276
params.nthreads = perf_cpu_map__nr(cpu);
tools/perf/bench/futex-wake-parallel.c
318
block_threads(blocked_worker, cpu);
tools/perf/bench/futex-wake-parallel.c
353
perf_cpu_map__put(cpu);
tools/perf/bench/futex-wake.c
101
static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
tools/perf/bench/futex-wake.c
106
int nrcpus = cpu__max_cpu().cpu;
tools/perf/bench/futex-wake.c
119
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
tools/perf/bench/futex-wake.c
147
struct perf_cpu_map *cpu;
tools/perf/bench/futex-wake.c
155
cpu = perf_cpu_map__new_online_cpus();
tools/perf/bench/futex-wake.c
156
if (!cpu)
tools/perf/bench/futex-wake.c
170
params.nthreads = perf_cpu_map__nr(cpu);
tools/perf/bench/futex-wake.c
195
block_threads(worker, cpu);
tools/perf/bench/futex-wake.c
239
perf_cpu_map__put(cpu);
tools/perf/bench/numa.c
1087
int cpu;
tools/perf/bench/numa.c
1105
cpu = td->curr_cpu;
tools/perf/bench/numa.c
1108
if (cpu < 0)
tools/perf/bench/numa.c
1111
node = numa_node_of_cpu(cpu);
tools/perf/bench/numa.c
1489
int cpu;
tools/perf/bench/numa.c
1498
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
tools/perf/bench/numa.c
1499
CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
tools/perf/bench/numa.c
256
int cpu;
tools/perf/bench/numa.c
260
for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
tools/perf/bench/numa.c
261
if (numa_bitmask_isbitset(cpumask, cpu)) {
tools/perf/bench/numa.c
293
int cpu;
tools/perf/bench/numa.c
295
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
tools/perf/bench/numa.c
296
CPU_SET_S(cpu, size, mask);
tools/perf/bench/numa.c
324
int cpu;
tools/perf/bench/numa.c
341
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
tools/perf/bench/numa.c
342
CPU_SET_S(cpu, size, mask);
tools/perf/bench/numa.c
350
for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
tools/perf/bench/numa.c
351
if (numa_bitmask_isbitset(cpumask, cpu))
tools/perf/bench/numa.c
352
CPU_SET_S(cpu, size, mask);
tools/perf/bench/numa.c
546
static int is_cpu_online(unsigned int cpu)
tools/perf/bench/numa.c
555
"/sys/devices/system/cpu/cpu%d", cpu);
tools/perf/bench/numa.c
567
"/sys/devices/system/cpu/cpu%d/online", cpu);
tools/perf/bench/numa.c
578
"devices/system/cpu/cpu%d/online", cpu);
tools/perf/bench/numa.c
678
int cpu;
tools/perf/bench/numa.c
697
for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
tools/perf/bench/numa.c
698
if (cpu < 0 || cpu >= g->p.nr_cpus) {
tools/perf/bench/numa.c
702
CPU_SET_S(cpu, size, td->bind_cpumask);
tools/perf/bench/numa.c
961
unsigned int cpu;
tools/perf/bench/numa.c
963
cpu = sched_getcpu();
tools/perf/bench/numa.c
965
g->threads[task_nr].curr_cpu = cpu;
tools/perf/builtin-annotate.c
302
if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
tools/perf/builtin-c2c.c
2314
struct perf_cpu cpu;
tools/perf/builtin-c2c.c
2353
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
tools/perf/builtin-c2c.c
2354
__set_bit(cpu.cpu, set);
tools/perf/builtin-c2c.c
2356
if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
tools/perf/builtin-c2c.c
2359
cpu2node[cpu.cpu] = node;
tools/perf/builtin-c2c.c
240
if (WARN_ONCE(sample->cpu == (unsigned int) -1,
tools/perf/builtin-c2c.c
244
__set_bit(sample->cpu, c2c_he->cpuset);
tools/perf/builtin-c2c.c
388
int cpu = sample->cpu == (unsigned int) -1 ? 0 : sample->cpu;
tools/perf/builtin-c2c.c
389
int node = c2c.cpu2node[cpu];
tools/perf/builtin-diff.c
419
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) {
tools/perf/builtin-ftrace.c
395
last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
tools/perf/builtin-inject.c
1262
if (!sid || sid->cpu.cpu == -1)
tools/perf/builtin-inject.c
1265
ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
tools/perf/builtin-inject.c
1344
sid->cpu.cpu = -1;
tools/perf/builtin-inject.c
1347
sid->vcpu.cpu = vcpu;
tools/perf/builtin-inject.c
1436
.cpu = -1,
tools/perf/builtin-inject.c
1809
if (sample->cpu != (u32)-1) {
tools/perf/builtin-inject.c
1810
if (sample->cpu >= gs->vcpu_cnt) {
tools/perf/builtin-inject.c
1812
sample->cpu);
tools/perf/builtin-inject.c
1816
sample->cpu = gs->vcpu[sample->cpu].cpu;
tools/perf/builtin-inject.c
1954
if (sample->cpu == (u32)-1) {
tools/perf/builtin-inject.c
1964
gs->vcpu[vcpu].cpu = sample->cpu;
tools/perf/builtin-inject.c
71
u32 cpu;
tools/perf/builtin-kmem.c
126
data->alloc_cpu = cpu;
tools/perf/builtin-kmem.c
181
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
tools/perf/builtin-kmem.c
204
node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
tools/perf/builtin-kmem.c
257
if ((short)sample->cpu != s_alloc->alloc_cpu) {
tools/perf/builtin-kmem.c
758
.cpu = sample->cpu,
tools/perf/builtin-kmem.c
88
int bytes_req, int bytes_alloc, int cpu)
tools/perf/builtin-kvm.c
1173
struct perf_cpu cpu = {-1};
tools/perf/builtin-kvm.c
1175
err = get_cpuid(buf, sizeof(buf), cpu);
tools/perf/builtin-kwork.c
1014
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1144
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1246
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1310
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1371
ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
tools/perf/builtin-kwork.c
160
if (l->cpu < r->cpu)
tools/perf/builtin-kwork.c
162
if (l->cpu > r->cpu)
tools/perf/builtin-kwork.c
2011
BUG_ON(work->cpu >= MAX_NR_CPUS);
tools/perf/builtin-kwork.c
2012
stat->cpus_runtime[work->cpu].total += work->total_runtime;
tools/perf/builtin-kwork.c
2024
stat->cpus_runtime[work->cpu].idle += work->total_runtime;
tools/perf/builtin-kwork.c
2036
stat->cpus_runtime[work->cpu].irq += work->total_runtime;
tools/perf/builtin-kwork.c
2039
stat->cpus_runtime[work->cpu].softirq += work->total_runtime;
tools/perf/builtin-kwork.c
2058
work->id, work->cpu);
tools/perf/builtin-kwork.c
2087
__set_bit(work->cpu, stat->all_cpus_bitmap);
tools/perf/builtin-kwork.c
2092
stat->cpus_runtime[work->cpu].total;
tools/perf/builtin-kwork.c
2106
stat->cpus_runtime[work->cpu].load += work->total_runtime;
tools/perf/builtin-kwork.c
2116
int cpu;
tools/perf/builtin-kwork.c
2134
cpu = data->cpu;
tools/perf/builtin-kwork.c
2136
data->id == 0 ? cpu : -1);
tools/perf/builtin-kwork.c
373
work->cpu = key->cpu;
tools/perf/builtin-kwork.c
423
int cpu = work->cpu;
tools/perf/builtin-kwork.c
427
if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
tools/perf/builtin-kwork.c
548
u64 id, int cpu)
tools/perf/builtin-kwork.c
556
if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
tools/perf/builtin-kwork.c
557
(cpu == -1 && work->id == id))
tools/perf/builtin-kwork.c
67
if (l->cpu > r->cpu)
tools/perf/builtin-kwork.c
69
if (l->cpu < r->cpu)
tools/perf/builtin-kwork.c
764
printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
tools/perf/builtin-kwork.c
928
work->id, work->cpu);
tools/perf/builtin-record.c
1089
test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
tools/perf/builtin-record.c
1093
thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
tools/perf/builtin-record.c
1098
thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
tools/perf/builtin-record.c
3698
struct perf_cpu cpu;
tools/perf/builtin-record.c
3704
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) {
tools/perf/builtin-record.c
3706
if ((unsigned long)cpu.cpu > mask->nbits)
tools/perf/builtin-record.c
3708
__set_bit(cpu.cpu, mask->bits);
tools/perf/builtin-record.c
3772
ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
tools/perf/builtin-record.c
3780
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
tools/perf/builtin-record.c
3781
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
tools/perf/builtin-record.c
3802
ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu);
tools/perf/builtin-record.c
3814
ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
tools/perf/builtin-record.c
3820
ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
tools/perf/builtin-record.c
3886
ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
tools/perf/builtin-record.c
4049
ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
tools/perf/builtin-report.c
307
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
tools/perf/builtin-sched.c
1147
int cpu = sample->cpu, err = -1;
tools/perf/builtin-sched.c
1150
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
tools/perf/builtin-sched.c
1152
timestamp0 = sched->cpu_last_switched[cpu];
tools/perf/builtin-sched.c
1153
sched->cpu_last_switched[cpu] = timestamp;
tools/perf/builtin-sched.c
1216
int cpu = sample->cpu, err = -1;
tools/perf/builtin-sched.c
1221
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
tools/perf/builtin-sched.c
1583
struct perf_cpu cpu = {
tools/perf/builtin-sched.c
1584
.cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
tools/perf/builtin-sched.c
1586
struct thread *curr_thread = sched->curr_thread[cpu.cpu];
tools/perf/builtin-sched.c
1587
struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
tools/perf/builtin-sched.c
1597
if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
tools/perf/builtin-sched.c
1600
if (cpu.cpu == this_cpu.cpu)
tools/perf/builtin-sched.c
1603
color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
tools/perf/builtin-sched.c
1605
thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
tools/perf/builtin-sched.c
1606
sched->curr_thread[cpu.cpu];
tools/perf/builtin-sched.c
1614
if (cpu.cpu == this_cpu.cpu)
tools/perf/builtin-sched.c
1617
curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
tools/perf/builtin-sched.c
1640
.cpu = sample->cpu,
tools/perf/builtin-sched.c
1650
BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
tools/perf/builtin-sched.c
1652
if (this_cpu.cpu > sched->max_cpu.cpu)
tools/perf/builtin-sched.c
1657
if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
tools/perf/builtin-sched.c
1662
cpus_nr = sched->max_cpu.cpu;
tools/perf/builtin-sched.c
1664
timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
tools/perf/builtin-sched.c
1665
sched->cpu_last_switched[this_cpu.cpu] = timestamp;
tools/perf/builtin-sched.c
1685
thread__put(sched->curr_thread[this_cpu.cpu]);
tools/perf/builtin-sched.c
1686
thread__put(sched->curr_out_thread[this_cpu.cpu]);
tools/perf/builtin-sched.c
1688
sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
tools/perf/builtin-sched.c
1689
sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
tools/perf/builtin-sched.c
1762
color_fprintf(stdout, color, " (CPU %d)", this_cpu.cpu);
tools/perf/builtin-sched.c
1771
tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
tools/perf/builtin-sched.c
1799
int this_cpu = sample->cpu, err = 0;
tools/perf/builtin-sched.c
1998
static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
tools/perf/builtin-sched.c
2005
if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
tools/perf/builtin-sched.c
2006
int i, n = __roundup_pow_of_two(cpu+1);
tools/perf/builtin-sched.c
2020
r->last_time[cpu] = timestamp;
tools/perf/builtin-sched.c
2024
static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
tools/perf/builtin-sched.c
2028
if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
tools/perf/builtin-sched.c
2031
return r->last_time[cpu];
tools/perf/builtin-sched.c
2089
u32 ncpus = sched->max_cpu.cpu + 1;
tools/perf/builtin-sched.c
2173
u32 max_cpus = sched->max_cpu.cpu + 1;
tools/perf/builtin-sched.c
2178
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
tools/perf/builtin-sched.c
2182
printf("%15s [%04d] ", tstr, sample->cpu);
tools/perf/builtin-sched.c
2191
if (i == sample->cpu)
tools/perf/builtin-sched.c
2452
static struct thread *get_idle_thread(int cpu)
tools/perf/builtin-sched.c
2458
if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
tools/perf/builtin-sched.c
2459
int i, j = __roundup_pow_of_two(cpu+1);
tools/perf/builtin-sched.c
2474
if (idle_threads[cpu] == NULL) {
tools/perf/builtin-sched.c
2475
idle_threads[cpu] = thread__new(0, 0);
tools/perf/builtin-sched.c
2477
if (idle_threads[cpu]) {
tools/perf/builtin-sched.c
2478
if (init_idle_thread(idle_threads[cpu]) < 0)
tools/perf/builtin-sched.c
2483
return thread__get(idle_threads[cpu]);
tools/perf/builtin-sched.c
2510
thread = get_idle_thread(sample->cpu);
tools/perf/builtin-sched.c
2512
pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
tools/perf/builtin-sched.c
2528
idle = get_idle_thread(sample->cpu);
tools/perf/builtin-sched.c
2530
pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
tools/perf/builtin-sched.c
2615
printf("%15s [%04d] ", tstr, sample->cpu);
tools/perf/builtin-sched.c
2617
printf(" %*s ", sched->max_cpu.cpu + 1, "");
tools/perf/builtin-sched.c
2688
max_cpus = sched->max_cpu.cpu + 1;
tools/perf/builtin-sched.c
2703
printf("%15s [%04d] ", tstr, sample->cpu);
tools/perf/builtin-sched.c
2711
c = (i == sample->cpu) ? 'm' : ' ';
tools/perf/builtin-sched.c
2774
thread = get_idle_thread(sample->cpu);
tools/perf/builtin-sched.c
2829
tprev = evsel__get_time(evsel, sample->cpu);
tools/perf/builtin-sched.c
2860
if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
tools/perf/builtin-sched.c
2919
evsel__save_time(evsel, sample->time, sample->cpu);
tools/perf/builtin-sched.c
2944
printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
tools/perf/builtin-sched.c
3175
printf(" (x %d)\n", sched->max_cpu.cpu);
tools/perf/builtin-sched.c
3193
.cpu = sample->cpu,
tools/perf/builtin-sched.c
3196
if (this_cpu.cpu > sched->max_cpu.cpu)
tools/perf/builtin-sched.c
3366
sched->max_cpu.cpu = env->nr_cpus_online;
tools/perf/builtin-sched.c
3367
if (sched->max_cpu.cpu == 0)
tools/perf/builtin-sched.c
3368
sched->max_cpu.cpu = 4;
tools/perf/builtin-sched.c
3369
if (init_idle_threads(sched->max_cpu.cpu))
tools/perf/builtin-sched.c
3559
sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
tools/perf/builtin-sched.c
3562
sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
tools/perf/builtin-sched.c
4272
cd_info1 = cd_map1[cs1->cpu];
tools/perf/builtin-sched.c
4275
cd_info2 = cd_map2[cs2->cpu];
tools/perf/builtin-sched.c
4280
if (cs2 && cs1->cpu != cs2->cpu) {
tools/perf/builtin-sched.c
4295
printf("CPU: %d\n", cs1->cpu);
tools/perf/builtin-sched.c
4326
cs1->cpu, dinfo1->dname);
tools/perf/builtin-sched.c
4329
cs1->cpu, dinfo1->domain);
tools/perf/builtin-sched.c
4397
this_cpu.cpu = event->schedstat_cpu.cpu;
tools/perf/builtin-sched.c
4400
this_cpu.cpu = event->schedstat_domain.cpu;
tools/perf/builtin-sched.c
4421
if (!list_empty(&cpu_head) && temp->cpu_data->cpu == initial_cpu)
tools/perf/builtin-sched.c
4426
initial_cpu = temp->cpu_data->cpu;
tools/perf/builtin-sched.c
4431
if (temp->cpu_data->cpu == initial_cpu) {
tools/perf/builtin-sched.c
4716
nr = cpu__max_present_cpu().cpu;
tools/perf/builtin-sched.c
860
int cpu = sample->cpu;
tools/perf/builtin-sched.c
866
if (cpu >= MAX_CPUS || cpu < 0)
tools/perf/builtin-sched.c
869
timestamp0 = sched->cpu_last_switched[cpu];
tools/perf/builtin-sched.c
886
sched->cpu_last_switched[cpu] = timestamp;
tools/perf/builtin-script.c
1311
u8 cpumode, int cpu, struct symbol **lastsym,
tools/perf/builtin-script.c
1323
al.cpu = cpu;
tools/perf/builtin-script.c
1375
x.cpu = sample->cpu;
tools/perf/builtin-script.c
1388
x.cpumode, x.cpu, &lastsym, evsel, fp);
tools/perf/builtin-script.c
1419
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
tools/perf/builtin-script.c
1470
printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
tools/perf/builtin-script.c
1576
size_t depth = thread_stack__depth(thread, sample->cpu);
tools/perf/builtin-script.c
2271
struct perf_cpu cpu)
tools/perf/builtin-script.c
2273
return aggr_cpu_id__global(cpu, /*data=*/NULL);
tools/perf/builtin-script.c
2331
cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){sample->cpu});
tools/perf/builtin-script.c
2334
if (perf_cpu_map__cpu(evsel->core.cpus, /*idx=*/0).cpu == -1 ||
tools/perf/builtin-script.c
2335
sample->cpu == (u32)-1) {
tools/perf/builtin-script.c
2339
pr_info("Missing CPU map entry for CPU %d\n", sample->cpu);
tools/perf/builtin-script.c
2380
int depth = thread_stack__depth(thread, sample->cpu);
tools/perf/builtin-script.c
2462
event_format__fprintf(tp_format, sample->cpu,
tools/perf/builtin-script.c
2572
struct perf_cpu cpu;
tools/perf/builtin-script.c
2582
perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
tools/perf/builtin-script.c
2588
cpu.cpu,
tools/perf/builtin-script.c
2635
if (cpu_list && sample->cpu != (u32)-1)
tools/perf/builtin-script.c
2636
return !test_bit(sample->cpu, cpu_bitmap);
tools/perf/builtin-script.c
2895
sample->cpu = 0;
tools/perf/builtin-script.c
867
printed += fprintf(fp, "%3d ", sample->cpu);
tools/perf/builtin-script.c
869
printed += fprintf(fp, "[%03d] ", sample->cpu);
tools/perf/builtin-stat.c
1271
static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
tools/perf/builtin-stat.c
1281
id = perf_cpu_map__min(cpu_map).cpu;
tools/perf/builtin-stat.c
1283
id = cpu.cpu;
tools/perf/builtin-stat.c
1298
static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache)
tools/perf/builtin-stat.c
1308
ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt);
tools/perf/builtin-stat.c
1337
cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map);
tools/perf/builtin-stat.c
1347
cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
tools/perf/builtin-stat.c
1369
static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data)
tools/perf/builtin-stat.c
1375
id = aggr_cpu_id__die(cpu, data);
tools/perf/builtin-stat.c
1379
ret = cpu__get_cache_details(cpu, &cache);
tools/perf/builtin-stat.c
1402
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1404
return aggr_cpu_id__socket(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1408
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1410
return aggr_cpu_id__die(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1414
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1416
return aggr_cpu_id__cache(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1420
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1422
return aggr_cpu_id__cluster(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1426
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1428
return aggr_cpu_id__core(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1432
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1434
return aggr_cpu_id__node(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1438
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1440
return aggr_cpu_id__global(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1444
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1446
return aggr_cpu_id__cpu(cpu, /*data=*/NULL);
tools/perf/builtin-stat.c
1450
aggr_get_id_t get_id, struct perf_cpu cpu)
tools/perf/builtin-stat.c
1455
if (cpu.cpu == -1 || cpu.cpu >= config->cpus_aggr_map->nr)
tools/perf/builtin-stat.c
1456
return get_id(config, cpu);
tools/perf/builtin-stat.c
1458
if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
tools/perf/builtin-stat.c
1459
config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
tools/perf/builtin-stat.c
1461
id = config->cpus_aggr_map->map[cpu.cpu];
tools/perf/builtin-stat.c
1466
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1468
return perf_stat__get_aggr(config, perf_stat__get_socket, cpu);
tools/perf/builtin-stat.c
1472
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1474
return perf_stat__get_aggr(config, perf_stat__get_die, cpu);
tools/perf/builtin-stat.c
1478
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1480
return perf_stat__get_aggr(config, perf_stat__get_cluster, cpu);
tools/perf/builtin-stat.c
1484
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1486
return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu);
tools/perf/builtin-stat.c
1490
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1492
return perf_stat__get_aggr(config, perf_stat__get_core, cpu);
tools/perf/builtin-stat.c
1496
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1498
return perf_stat__get_aggr(config, perf_stat__get_node, cpu);
tools/perf/builtin-stat.c
1502
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1504
return perf_stat__get_aggr(config, perf_stat__get_global, cpu);
tools/perf/builtin-stat.c
1508
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1510
return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu);
tools/perf/builtin-stat.c
1603
nr = perf_cpu_map__max(evsel_list->core.all_cpus).cpu + 1;
tools/perf/builtin-stat.c
1621
static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data)
tools/perf/builtin-stat.c
1626
if (cpu.cpu != -1)
tools/perf/builtin-stat.c
1627
id.socket = env->cpu[cpu.cpu].socket_id;
tools/perf/builtin-stat.c
1632
static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data)
tools/perf/builtin-stat.c
1637
if (cpu.cpu != -1) {
tools/perf/builtin-stat.c
1643
id.socket = env->cpu[cpu.cpu].socket_id;
tools/perf/builtin-stat.c
1644
id.die = env->cpu[cpu.cpu].die_id;
tools/perf/builtin-stat.c
1650
static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env,
tools/perf/builtin-stat.c
1676
map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu);
tools/perf/builtin-stat.c
1681
id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
tools/perf/builtin-stat.c
1687
static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu,
tools/perf/builtin-stat.c
1693
if (cpu.cpu != -1) {
tools/perf/builtin-stat.c
1696
id.socket = env->cpu[cpu.cpu].socket_id;
tools/perf/builtin-stat.c
1697
id.die = env->cpu[cpu.cpu].die_id;
tools/perf/builtin-stat.c
1698
perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id);
tools/perf/builtin-stat.c
1704
static struct aggr_cpu_id perf_env__get_cluster_aggr_by_cpu(struct perf_cpu cpu,
tools/perf/builtin-stat.c
1710
if (cpu.cpu != -1) {
tools/perf/builtin-stat.c
1711
id.socket = env->cpu[cpu.cpu].socket_id;
tools/perf/builtin-stat.c
1712
id.die = env->cpu[cpu.cpu].die_id;
tools/perf/builtin-stat.c
1713
id.cluster = env->cpu[cpu.cpu].cluster_id;
tools/perf/builtin-stat.c
1719
static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data)
tools/perf/builtin-stat.c
1724
if (cpu.cpu != -1) {
tools/perf/builtin-stat.c
1729
id.socket = env->cpu[cpu.cpu].socket_id;
tools/perf/builtin-stat.c
1730
id.die = env->cpu[cpu.cpu].die_id;
tools/perf/builtin-stat.c
1731
id.cluster = env->cpu[cpu.cpu].cluster_id;
tools/perf/builtin-stat.c
1732
id.core = env->cpu[cpu.cpu].core_id;
tools/perf/builtin-stat.c
1738
static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data)
tools/perf/builtin-stat.c
1743
if (cpu.cpu != -1) {
tools/perf/builtin-stat.c
1749
id.socket = env->cpu[cpu.cpu].socket_id;
tools/perf/builtin-stat.c
1750
id.die = env->cpu[cpu.cpu].die_id;
tools/perf/builtin-stat.c
1751
id.core = env->cpu[cpu.cpu].core_id;
tools/perf/builtin-stat.c
1752
id.cpu = cpu;
tools/perf/builtin-stat.c
1758
static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data)
tools/perf/builtin-stat.c
1762
id.node = perf_env__numa_node(data, cpu);
tools/perf/builtin-stat.c
1766
static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,
tools/perf/builtin-stat.c
1772
id.cpu = (struct perf_cpu){ .cpu = 0 };
tools/perf/builtin-stat.c
1777
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1779
return perf_env__get_socket_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
1782
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1784
return perf_env__get_die_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
1788
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1790
return perf_env__get_cluster_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
1794
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1796
return perf_env__get_cache_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
1800
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1802
return perf_env__get_core_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
1806
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1808
return perf_env__get_cpu_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
1812
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1814
return perf_env__get_node_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
1818
struct perf_cpu cpu)
tools/perf/builtin-stat.c
1820
return perf_env__get_global_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
tools/perf/builtin-stat.c
275
struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx);
tools/perf/builtin-stat.c
277
return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
tools/perf/builtin-stat.c
361
cpu_map_idx).cpu,
tools/perf/builtin-timechart.c
1029
svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
tools/perf/builtin-timechart.c
1038
svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
tools/perf/builtin-timechart.c
1124
svg_process(sample->cpu,
tools/perf/builtin-timechart.c
1277
svg_running(Y, sample->cpu,
tools/perf/builtin-timechart.c
1282
svg_blocked(Y, sample->cpu,
tools/perf/builtin-timechart.c
1287
svg_waiting(Y, sample->cpu,
tools/perf/builtin-timechart.c
148
int cpu;
tools/perf/builtin-timechart.c
182
int cpu;
tools/perf/builtin-timechart.c
281
unsigned int cpu, u64 start, u64 end,
tools/perf/builtin-timechart.c
301
sample->cpu = cpu;
tools/perf/builtin-timechart.c
357
static void c_state_start(int cpu, u64 timestamp, int state)
tools/perf/builtin-timechart.c
359
cpus_cstate_start_times[cpu] = timestamp;
tools/perf/builtin-timechart.c
360
cpus_cstate_state[cpu] = state;
tools/perf/builtin-timechart.c
363
static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
tools/perf/builtin-timechart.c
370
pwr->state = cpus_cstate_state[cpu];
tools/perf/builtin-timechart.c
371
pwr->start_time = cpus_cstate_start_times[cpu];
tools/perf/builtin-timechart.c
373
pwr->cpu = cpu;
tools/perf/builtin-timechart.c
380
static struct power_event *p_state_end(struct timechart *tchart, int cpu,
tools/perf/builtin-timechart.c
388
pwr->state = cpus_pstate_state[cpu];
tools/perf/builtin-timechart.c
389
pwr->start_time = cpus_pstate_start_times[cpu];
tools/perf/builtin-timechart.c
391
pwr->cpu = cpu;
tools/perf/builtin-timechart.c
401
static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
tools/perf/builtin-timechart.c
408
pwr = p_state_end(tchart, cpu, timestamp);
tools/perf/builtin-timechart.c
412
cpus_pstate_state[cpu] = new_freq;
tools/perf/builtin-timechart.c
413
cpus_pstate_start_times[cpu] = timestamp;
tools/perf/builtin-timechart.c
425
static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
tools/perf/builtin-timechart.c
451
pid_put_sample(tchart, p->pid, p->current->state, cpu,
tools/perf/builtin-timechart.c
458
static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
tools/perf/builtin-timechart.c
469
pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
tools/perf/builtin-timechart.c
474
pid_put_sample(tchart, next_pid, p->current->state, cpu,
tools/perf/builtin-timechart.c
637
sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
tools/perf/builtin-timechart.c
651
sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
tools/perf/builtin-timechart.c
676
c_state_end(tchart, sample->cpu, sample->time);
tools/perf/builtin-timechart.c
700
u64 cpu;
tools/perf/builtin-timechart.c
703
for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
tools/perf/builtin-timechart.c
710
pwr->state = cpus_cstate_state[cpu];
tools/perf/builtin-timechart.c
711
pwr->start_time = cpus_cstate_start_times[cpu];
tools/perf/builtin-timechart.c
713
pwr->cpu = cpu;
tools/perf/builtin-timechart.c
721
pwr = p_state_end(tchart, cpu, tchart->last_time);
tools/perf/builtin-trace.c
2732
sample->pid, sample->tid, sample->cpu);
tools/perf/builtin-trace.c
3317
event_format__fprintf(tp_format, sample->cpu,
tools/perf/builtin-trace.c
3645
sample->cpu, sample->raw_size);
tools/perf/dlfilters/dlfilter-show-cycles.c
108
__s32 cpu = sample->cpu;
tools/perf/dlfilters/dlfilter-show-cycles.c
114
if (cpu >= 0 && cpu < MAX_CPU) {
tools/perf/dlfilters/dlfilter-show-cycles.c
115
print_vals(cycles[cpu][pos], cycles[cpu][pos] - cycles_rpt[cpu][pos]);
tools/perf/dlfilters/dlfilter-show-cycles.c
116
cycles_rpt[cpu][pos] = cycles[cpu][pos];
tools/perf/dlfilters/dlfilter-show-cycles.c
82
__s32 cpu = sample->cpu;
tools/perf/dlfilters/dlfilter-show-cycles.c
91
if (cpu >= 0 && cpu < MAX_CPU)
tools/perf/dlfilters/dlfilter-show-cycles.c
92
cycles[cpu][pos] += sample->cyc_cnt;
tools/perf/dlfilters/dlfilter-test-api-v0.c
196
.cpu = 31,
tools/perf/dlfilters/dlfilter-test-api-v0.c
212
CHECK_SAMPLE(cpu);
tools/perf/dlfilters/dlfilter-test-api-v0.c
53
__s32 cpu;
tools/perf/dlfilters/dlfilter-test-api-v2.c
211
.cpu = 31,
tools/perf/dlfilters/dlfilter-test-api-v2.c
227
CHECK_SAMPLE(cpu);
tools/perf/dlfilters/dlfilter-test-api-v2.c
59
__s32 cpu;
tools/perf/include/perf/perf_dlfilter.h
54
__s32 cpu;
tools/perf/perf-sys.h
14
pid_t pid, int cpu, int group_fd,
tools/perf/perf-sys.h
17
return syscall(__NR_perf_event_open, attr, pid, cpu,
tools/perf/pmu-events/empty-pmu-events.c
3146
static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
tools/perf/pmu-events/empty-pmu-events.c
3150
struct perf_cpu cpu;
tools/perf/pmu-events/empty-pmu-events.c
3161
if (has_last_result && last_result.cpu.cpu == cpu.cpu)
tools/perf/pmu-events/empty-pmu-events.c
3164
cpuid = get_cpuid_allow_env_override(cpu);
tools/perf/pmu-events/empty-pmu-events.c
3195
last_result.cpu = cpu;
tools/perf/pmu-events/empty-pmu-events.c
3203
struct perf_cpu cpu = {-1};
tools/perf/pmu-events/empty-pmu-events.c
3217
cpu = perf_cpu_map__min(pmu->cpus);
tools/perf/pmu-events/empty-pmu-events.c
3219
return map_for_cpu(cpu);
tools/perf/pmu-events/empty-pmu-events.c
3260
struct perf_cpu cpu = {-1};
tools/perf/pmu-events/empty-pmu-events.c
3261
const struct pmu_events_map *map = map_for_cpu(cpu);
tools/perf/tests/bitmap.c
20
struct perf_cpu cpu;
tools/perf/tests/bitmap.c
22
perf_cpu_map__for_each_cpu(cpu, i, map)
tools/perf/tests/bitmap.c
23
__set_bit(cpu.cpu, bm);
tools/perf/tests/cpumap.c
42
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 0);
tools/perf/tests/cpumap.c
44
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, i - 1).cpu == i);
tools/perf/tests/cpumap.c
64
TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[0] == 1);
tools/perf/tests/cpumap.c
65
TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[1] == 256);
tools/perf/tests/cpumap.c
69
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
tools/perf/tests/cpumap.c
70
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 1).cpu == 256);
tools/perf/tests/cpumap.c
95
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
tools/perf/tests/cpumap.c
96
TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__max(map).cpu == 256);
tools/perf/tests/dlfilter-test.c
181
.cpu = 31,
tools/perf/tests/event_update.c
73
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 0).cpu == 1);
tools/perf/tests/event_update.c
74
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 1).cpu == 2);
tools/perf/tests/event_update.c
75
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 2).cpu == 3);
tools/perf/tests/expr.c
79
struct perf_cpu cpu = {-1};
tools/perf/tests/expr.c
80
char *cpuid = get_cpuid_allow_env_override(cpu);
tools/perf/tests/hists_cumulate.c
169
#define CPU(he) (he->cpu)
tools/perf/tests/hists_output.c
134
#define CPU(he) (he->cpu)
tools/perf/tests/hists_output.c
18
u32 cpu;
tools/perf/tests/hists_output.c
29
{ .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
tools/perf/tests/hists_output.c
31
{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
tools/perf/tests/hists_output.c
33
{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
tools/perf/tests/hists_output.c
35
{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
tools/perf/tests/hists_output.c
37
{ .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
tools/perf/tests/hists_output.c
39
{ .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
tools/perf/tests/hists_output.c
41
{ .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
tools/perf/tests/hists_output.c
43
{ .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
tools/perf/tests/hists_output.c
45
{ .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
tools/perf/tests/hists_output.c
47
{ .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
tools/perf/tests/hists_output.c
67
sample.cpu = fake_samples[i].cpu;
tools/perf/tests/mem2node.c
32
struct perf_cpu cpu;
tools/perf/tests/mem2node.c
35
perf_cpu_map__for_each_cpu(cpu, i, map)
tools/perf/tests/mem2node.c
36
__set_bit(cpu.cpu, bm);
tools/perf/tests/mmap-basic.c
65
CPU_SET(perf_cpu_map__cpu(cpus, 0).cpu, &cpu_set);
tools/perf/tests/mmap-basic.c
69
perf_cpu_map__cpu(cpus, 0).cpu,
tools/perf/tests/openat-syscall-all-cpus.c
110
expected, cpu.cpu, perf_counts(evsel->counts, idx, 0)->val);
tools/perf/tests/openat-syscall-all-cpus.c
26
struct perf_cpu cpu;
tools/perf/tests/openat-syscall-all-cpus.c
64
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
tools/perf/tests/openat-syscall-all-cpus.c
72
if (cpu.cpu >= CPU_SETSIZE) {
tools/perf/tests/openat-syscall-all-cpus.c
73
pr_debug("Ignoring CPU %d\n", cpu.cpu);
tools/perf/tests/openat-syscall-all-cpus.c
77
CPU_SET(cpu.cpu, &cpu_set);
tools/perf/tests/openat-syscall-all-cpus.c
80
cpu.cpu,
tools/perf/tests/openat-syscall-all-cpus.c
88
CPU_CLR(cpu.cpu, &cpu_set);
tools/perf/tests/openat-syscall-all-cpus.c
95
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
tools/perf/tests/openat-syscall-all-cpus.c
98
if (cpu.cpu >= CPU_SETSIZE)
tools/perf/tests/perf-record.c
137
cpu = err;
tools/perf/tests/perf-record.c
20
int i, cpu = -1;
tools/perf/tests/perf-record.c
21
int nrcpus = cpu__max_cpu().cpu;
tools/perf/tests/perf-record.c
213
pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
tools/perf/tests/perf-record.c
225
if (sample.cpu != cpu) {
tools/perf/tests/perf-record.c
227
name, cpu, sample.cpu);
tools/perf/tests/perf-record.c
28
if (errno == EINVAL && nrcpus < (cpu__max_cpu().cpu << 8)) {
tools/perf/tests/perf-record.c
38
if (cpu == -1)
tools/perf/tests/perf-record.c
39
cpu = i;
tools/perf/tests/perf-record.c
45
return cpu;
tools/perf/tests/perf-record.c
57
int nrcpus = cpu__max_cpu().cpu;
tools/perf/tests/perf-record.c
73
u32 cpu;
tools/perf/tests/sample-parsing.c
256
.cpu = 110,
tools/perf/tests/sample-parsing.c
73
COMP(cpu);
tools/perf/tests/stat.c
74
TEST_ASSERT_VAL("wrong cpu", st->cpu == 1);
tools/perf/tests/stat.c
92
!perf_event__synthesize_stat(NULL, (struct perf_cpu){.cpu = 1}, 2, 3,
tools/perf/tests/switch-tracking.c
109
if (cpu >= switch_tracking->nr_tids) {
tools/perf/tests/switch-tracking.c
132
int cpu, err;
tools/perf/tests/switch-tracking.c
145
cpu = sample.cpu;
tools/perf/tests/switch-tracking.c
147
cpu, prev_tid, next_tid);
tools/perf/tests/switch-tracking.c
148
err = check_cpu(switch_tracking, cpu);
tools/perf/tests/switch-tracking.c
155
if (switch_tracking->tids[cpu] != -1 &&
tools/perf/tests/switch-tracking.c
156
switch_tracking->tids[cpu] != prev_tid) {
tools/perf/tests/switch-tracking.c
161
switch_tracking->tids[cpu] = next_tid;
tools/perf/tests/switch-tracking.c
92
static int check_cpu(struct switch_tracking *switch_tracking, int cpu)
tools/perf/tests/switch-tracking.c
94
int i, nr = cpu + 1;
tools/perf/tests/switch-tracking.c
96
if (cpu < 0)
tools/perf/tests/topology.c
101
if (!env->cpu && strncmp(env->arch, "s390", 4) && strncmp(env->arch, "aarch64", 7))
tools/perf/tests/topology.c
115
TEST_ASSERT_VAL("Session header CPU map not set", env->cpu);
tools/perf/tests/topology.c
118
cpu.cpu = i;
tools/perf/tests/topology.c
119
if (!perf_cpu_map__has(map, cpu))
tools/perf/tests/topology.c
122
env->cpu[i].core_id,
tools/perf/tests/topology.c
123
env->cpu[i].socket_id);
tools/perf/tests/topology.c
127
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
128
id = aggr_cpu_id__cpu(cpu, NULL);
tools/perf/tests/topology.c
130
cpu.cpu == id.cpu.cpu);
tools/perf/tests/topology.c
133
env->cpu[cpu.cpu].core_id == id.core);
tools/perf/tests/topology.c
135
env->cpu[cpu.cpu].socket_id == id.socket);
tools/perf/tests/topology.c
138
env->cpu[cpu.cpu].die_id == id.die);
tools/perf/tests/topology.c
144
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
145
id = aggr_cpu_id__core(cpu, NULL);
tools/perf/tests/topology.c
147
env->cpu[cpu.cpu].core_id == id.core);
tools/perf/tests/topology.c
150
env->cpu[cpu.cpu].socket_id == id.socket);
tools/perf/tests/topology.c
153
env->cpu[cpu.cpu].die_id == id.die);
tools/perf/tests/topology.c
159
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
160
id = aggr_cpu_id__die(cpu, NULL);
tools/perf/tests/topology.c
162
env->cpu[cpu.cpu].socket_id == id.socket);
tools/perf/tests/topology.c
165
env->cpu[cpu.cpu].die_id == id.die);
tools/perf/tests/topology.c
169
TEST_ASSERT_VAL("Die map - CPU is set", id.cpu.cpu == -1);
tools/perf/tests/topology.c
174
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
175
id = aggr_cpu_id__socket(cpu, NULL);
tools/perf/tests/topology.c
177
env->cpu[cpu.cpu].socket_id == id.socket);
tools/perf/tests/topology.c
182
TEST_ASSERT_VAL("Socket map - CPU is set", id.cpu.cpu == -1);
tools/perf/tests/topology.c
187
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/tests/topology.c
188
id = aggr_cpu_id__node(cpu, NULL);
tools/perf/tests/topology.c
190
cpu__get_node(cpu) == id.node);
tools/perf/tests/topology.c
194
TEST_ASSERT_VAL("Node map - CPU is set", id.cpu.cpu == -1);
tools/perf/tests/topology.c
72
struct perf_cpu cpu;
tools/perf/ui/browsers/res_sample.c
52
res_samples[i].cpu, res_samples[i].tid) < 0) {
tools/perf/ui/browsers/res_sample.c
81
r->cpu >= 0 ? "--cpu " : "",
tools/perf/ui/browsers/res_sample.c
82
r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
tools/perf/util/addr_location.c
17
al->cpu = 0;
tools/perf/util/addr_location.h
21
s32 cpu;
tools/perf/util/affinity.c
15
int sz = cpu__max_cpu().cpu + 8 - 1;
tools/perf/util/affinity.c
49
void affinity__set(struct affinity *a, int cpu)
tools/perf/util/affinity.c
58
if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
tools/perf/util/affinity.c
62
__set_bit(cpu, a->sched_cpus);
tools/perf/util/affinity.c
69
__clear_bit(cpu, a->sched_cpus);
tools/perf/util/affinity.c
92
struct perf_cpu cpu;
tools/perf/util/affinity.c
98
perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpumap)
tools/perf/util/affinity.c
99
__set_bit(cpu.cpu, cpuset);
tools/perf/util/affinity.h
15
void affinity__set(struct affinity *a, int cpu);
tools/perf/util/annotate.c
609
sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
tools/perf/util/arm-spe.c
115
int cpu;
tools/perf/util/arm-spe.c
1220
if (queue->cpu != -1)
tools/perf/util/arm-spe.c
1221
speq->cpu = queue->cpu;
tools/perf/util/arm-spe.c
1371
int cpu;
tools/perf/util/arm-spe.c
1378
cpu = sample->cpu;
tools/perf/util/arm-spe.c
1383
return machine__set_current_tid(spe->machine, cpu, pid, tid);
tools/perf/util/arm-spe.c
1644
unsigned int i, cpu, hdr_size, cpu_num, cpu_size;
tools/perf/util/arm-spe.c
1664
for (cpu = 0; cpu < cpu_num; cpu++) {
tools/perf/util/arm-spe.c
246
speq->cpu = -1;
tools/perf/util/arm-spe.c
291
tid = machine__get_current_tid(spe->machine, speq->cpu);
tools/perf/util/arm-spe.c
305
if (queue->cpu == -1)
tools/perf/util/arm-spe.c
306
speq->cpu = thread__cpu(speq->thread);
tools/perf/util/arm-spe.c
313
int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
tools/perf/util/arm-spe.c
323
static u64 *arm_spe__get_metadata_by_cpu(struct arm_spe *spe, int cpu)
tools/perf/util/arm-spe.c
331
if (cpu < 0) {
tools/perf/util/arm-spe.c
344
if (spe->metadata[i][ARM_SPE_CPU] == (u64)cpu)
tools/perf/util/arm-spe.c
381
sample->cpu = speq->cpu;
tools/perf/util/arm-spe.c
941
arm_spe__get_metadata_by_cpu(spe, speq->cpu);
tools/perf/util/arm-spe.c
978
metadata = arm_spe__get_metadata_by_cpu(spe, speq->cpu);
tools/perf/util/auxtrace.c
1161
buffer.cpu = sid->cpu;
tools/perf/util/auxtrace.c
1278
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
tools/perf/util/auxtrace.c
1289
auxtrace_error->cpu = cpu;
tools/perf/util/auxtrace.c
1309
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
tools/perf/util/auxtrace.c
1312
auxtrace_synth_guest_error(auxtrace_error, type, code, cpu, pid, tid,
tools/perf/util/auxtrace.c
1437
event->auxtrace.tid, event->auxtrace.cpu);
tools/perf/util/auxtrace.c
146
mm->cpu = mp->cpu.cpu;
tools/perf/util/auxtrace.c
1799
e->cpu, e->pid, e->tid, e->ip, e->code, msg);
tools/perf/util/auxtrace.c
2000
ev.auxtrace.cpu = mm->cpu;
tools/perf/util/auxtrace.c
204
mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
tools/perf/util/auxtrace.c
207
mp->cpu.cpu = -1;
tools/perf/util/auxtrace.c
273
queue_array[i].cpu = queues->queue_array[i].cpu;
tools/perf/util/auxtrace.c
324
queue->cpu = buffer->cpu.cpu;
tools/perf/util/auxtrace.c
371
static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu)
tools/perf/util/auxtrace.c
375
return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap);
tools/perf/util/auxtrace.c
386
if (filter_cpu(session, buffer->cpu))
tools/perf/util/auxtrace.c
431
.cpu = { event->auxtrace.cpu },
tools/perf/util/auxtrace.h
252
struct perf_cpu cpu;
tools/perf/util/auxtrace.h
277
int cpu;
tools/perf/util/auxtrace.h
341
int cpu;
tools/perf/util/auxtrace.h
367
struct perf_cpu cpu;
tools/perf/util/auxtrace.h
604
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
tools/perf/util/auxtrace.h
608
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
tools/perf/util/bpf_counter.c
322
(struct perf_cpu){.cpu = bpf_cpu});
tools/perf/util/bpf_counter.c
339
int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu;
tools/perf/util/bpf_counter.c
347
&cpu, &fd, BPF_ANY);
tools/perf/util/bpf_counter.c
627
key = perf_cpu_map__cpu(evsel->core.cpus, i).cpu;
tools/perf/util/bpf_counter.c
658
int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu;
tools/perf/util/bpf_counter.c
661
&cpu, &fd, BPF_ANY);
tools/perf/util/bpf_counter.c
670
struct perf_cpu cpu;
tools/perf/util/bpf_counter.c
673
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus)
tools/perf/util/bpf_counter.c
674
bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu.cpu);
tools/perf/util/bpf_counter.c
694
__u32 num_cpu_bpf = cpu__max_cpu().cpu;
tools/perf/util/bpf_counter.c
706
__u32 cpu;
tools/perf/util/bpf_counter.c
717
counts->val = values[entry.cpu].counter;
tools/perf/util/bpf_counter.c
718
counts->ena = values[entry.cpu].enabled;
tools/perf/util/bpf_counter.c
719
counts->run = values[entry.cpu].running;
tools/perf/util/bpf_counter.c
723
cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
tools/perf/util/bpf_counter.c
724
assert(cpu >= 0);
tools/perf/util/bpf_counter.c
726
counts->val = values[cpu].counter;
tools/perf/util/bpf_counter.c
727
counts->ena = values[cpu].enabled;
tools/perf/util/bpf_counter.c
728
counts->run = values[cpu].running;
tools/perf/util/bpf_counter.c
737
for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
tools/perf/util/bpf_counter.c
738
counts->val += values[cpu].counter;
tools/perf/util/bpf_counter.c
739
counts->ena += values[cpu].enabled;
tools/perf/util/bpf_counter.c
740
counts->run += values[cpu].running;
tools/perf/util/bpf_counter.c
83
int bperf_trigger_reading(int prog_fd, int cpu)
tools/perf/util/bpf_counter.c
89
.cpu = cpu,
tools/perf/util/bpf_counter.h
34
int bperf_trigger_reading(int prog_fd, int cpu);
tools/perf/util/bpf_counter.h
67
int cpu __maybe_unused,
tools/perf/util/bpf_counter_cgroup.c
102
struct perf_cpu cpu;
tools/perf/util/bpf_counter_cgroup.c
103
int total_cpus = cpu__max_cpu().cpu;
tools/perf/util/bpf_counter_cgroup.c
131
perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
tools/perf/util/bpf_counter_cgroup.c
158
perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
tools/perf/util/bpf_counter_cgroup.c
160
__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
tools/perf/util/bpf_counter_cgroup.c
236
struct perf_cpu cpu;
tools/perf/util/bpf_counter_cgroup.c
240
perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
tools/perf/util/bpf_counter_cgroup.c
241
bperf_trigger_reading(prog_fd, cpu.cpu);
tools/perf/util/bpf_counter_cgroup.c
271
int total_cpus = cpu__max_cpu().cpu;
tools/perf/util/bpf_counter_cgroup.c
290
struct perf_cpu cpu;
tools/perf/util/bpf_counter_cgroup.c
299
perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
tools/perf/util/bpf_counter_cgroup.c
301
counts->val = values[cpu.cpu].counter;
tools/perf/util/bpf_counter_cgroup.c
302
counts->ena = values[cpu.cpu].enabled;
tools/perf/util/bpf_counter_cgroup.c
303
counts->run = values[cpu.cpu].running;
tools/perf/util/bpf_counter_cgroup.c
45
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
tools/perf/util/bpf_counter_cgroup.c
49
int map_size, total_cpus = cpu__max_cpu().cpu;
tools/perf/util/bpf_ftrace.c
174
int ncpus = cpu__max_cpu().cpu;
tools/perf/util/bpf_ftrace.c
84
u32 cpu;
tools/perf/util/bpf_ftrace.c
90
cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
tools/perf/util/bpf_ftrace.c
91
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
tools/perf/util/bpf_kwork.c
153
struct perf_cpu cpu;
tools/perf/util/bpf_kwork.c
168
perf_cpu_map__for_each_cpu(cpu, idx, map) {
tools/perf/util/bpf_kwork.c
171
if (cpu.cpu >= nr_cpus) {
tools/perf/util/bpf_kwork.c
173
pr_err("Requested cpu %d too large\n", cpu.cpu);
tools/perf/util/bpf_kwork.c
176
bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
tools/perf/util/bpf_kwork.c
272
.cpu = key->cpu,
tools/perf/util/bpf_kwork.c
32
u32 cpu;
tools/perf/util/bpf_kwork.c
320
.cpu = 0,
tools/perf/util/bpf_kwork.c
325
.cpu = 0,
tools/perf/util/bpf_kwork_top.c
128
struct perf_cpu cpu;
tools/perf/util/bpf_kwork_top.c
143
perf_cpu_map__for_each_cpu(cpu, idx, map) {
tools/perf/util/bpf_kwork_top.c
146
if (cpu.cpu >= nr_cpus) {
tools/perf/util/bpf_kwork_top.c
148
pr_err("Requested cpu %d too large\n", cpu.cpu);
tools/perf/util/bpf_kwork_top.c
151
bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
tools/perf/util/bpf_kwork_top.c
223
.cpu = work->cpu,
tools/perf/util/bpf_kwork_top.c
239
struct work_data *data, int cpu)
tools/perf/util/bpf_kwork_top.c
245
.cpu = cpu,
tools/perf/util/bpf_kwork_top.c
52
__u32 cpu;
tools/perf/util/bpf_lock_contention.c
324
u32 cpu;
tools/perf/util/bpf_lock_contention.c
330
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
tools/perf/util/bpf_lock_contention.c
331
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
tools/perf/util/bpf_lock_contention.c
522
total_cpus = cpu__max_cpu().cpu;
tools/perf/util/bpf_off_cpu.c
235
u32 cpu;
tools/perf/util/bpf_off_cpu.c
241
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
tools/perf/util/bpf_off_cpu.c
242
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
tools/perf/util/bpf_off_cpu.c
93
int cpu_nr = pcpu.cpu;
tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
148
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
177
key = idx * num_cpus + cpu;
tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
80
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
84
err = bpf_perf_event_read_value(&events, cpu, &reading, sizeof(reading));
tools/perf/util/bpf_skel/func_latency.bpf.c
58
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/func_latency.bpf.c
61
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
tools/perf/util/bpf_skel/kwork_top.bpf.c
105
static __always_inline void update_task_info(struct task_struct *task, __u32 cpu)
tools/perf/util/bpf_skel/kwork_top.bpf.c
109
.cpu = cpu,
tools/perf/util/bpf_skel/kwork_top.bpf.c
139
static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu)
tools/perf/util/bpf_skel/kwork_top.bpf.c
157
update_task_info(task, cpu);
tools/perf/util/bpf_skel/kwork_top.bpf.c
181
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/kwork_top.bpf.c
183
if (cpu_is_filtered(cpu))
tools/perf/util/bpf_skel/kwork_top.bpf.c
188
on_sched_out(prev, ts, cpu);
tools/perf/util/bpf_skel/kwork_top.bpf.c
202
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/kwork_top.bpf.c
204
if (cpu_is_filtered(cpu))
tools/perf/util/bpf_skel/kwork_top.bpf.c
238
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/kwork_top.bpf.c
240
if (cpu_is_filtered(cpu))
tools/perf/util/bpf_skel/kwork_top.bpf.c
274
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/kwork_top.bpf.c
276
if (cpu_is_filtered(cpu))
tools/perf/util/bpf_skel/kwork_top.bpf.c
310
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/kwork_top.bpf.c
312
if (cpu_is_filtered(cpu))
tools/perf/util/bpf_skel/kwork_top.bpf.c
47
__u32 cpu;
tools/perf/util/bpf_skel/kwork_top.bpf.c
92
static __always_inline int cpu_is_filtered(__u32 cpu)
tools/perf/util/bpf_skel/kwork_top.bpf.c
97
cpu_val = bpf_map_lookup_elem(&kwork_top_cpu_filter, &cpu);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
101
cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
226
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
23
__u32 cpu;
tools/perf/util/bpf_skel/kwork_trace.bpf.c
242
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
268
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
286
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
299
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
317
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
329
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
342
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
359
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
372
.cpu = bpf_get_smp_processor_id(),
tools/perf/util/bpf_skel/kwork_trace.bpf.c
95
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/lock_contention.bpf.c
243
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/lock_contention.bpf.c
246
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
tools/perf/util/bpf_skel/off_cpu.bpf.c
181
__u32 cpu = bpf_get_smp_processor_id();
tools/perf/util/bpf_skel/off_cpu.bpf.c
184
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
tools/perf/util/bpf_skel/sample_filter.bpf.c
118
return kctx->data->cpu_entry.cpu;
tools/perf/util/bpf_skel/vmlinux/vmlinux.h
171
u32 cpu;
tools/perf/util/bpf_trace_augment.c
62
struct perf_cpu cpu;
tools/perf/util/bpf_trace_augment.c
72
perf_cpu_map__for_each_cpu(cpu, i, bpf_output->core.cpus) {
tools/perf/util/bpf_trace_augment.c
73
int mycpu = cpu.cpu;
tools/perf/util/cloexec.c
26
int cpu;
tools/perf/util/cloexec.c
30
cpu = sched_getcpu();
tools/perf/util/cloexec.c
31
if (cpu < 0)
tools/perf/util/cloexec.c
32
cpu = 0;
tools/perf/util/cloexec.c
40
fd = sys_perf_event_open(&attr, pid, cpu, -1,
tools/perf/util/cloexec.c
61
fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
tools/perf/util/cpumap.c
110
int cpu;
tools/perf/util/cpumap.c
113
for_each_set_bit(cpu, local_copy, 64) {
tools/perf/util/cpumap.c
114
if (cpu + cpus_per_i < INT16_MAX) {
tools/perf/util/cpumap.c
115
RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
tools/perf/util/cpumap.c
117
pr_err("Invalid cpumap entry %d\n", cpu + cpus_per_i);
tools/perf/util/cpumap.c
138
RC_CHK_ACCESS(map)->map[i++].cpu = -1;
tools/perf/util/cpumap.c
140
for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
tools/perf/util/cpumap.c
141
i++, cpu++) {
tools/perf/util/cpumap.c
142
if (cpu < INT16_MAX) {
tools/perf/util/cpumap.c
143
RC_CHK_ACCESS(map)->map[i].cpu = cpu;
tools/perf/util/cpumap.c
145
pr_err("Invalid cpumap entry %d\n", cpu);
tools/perf/util/cpumap.c
185
RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
tools/perf/util/cpumap.c
206
static int cpu__get_topology_int(int cpu, const char *name, int *value)
tools/perf/util/cpumap.c
211
"devices/system/cpu/cpu%d/topology/%s", cpu, name);
tools/perf/util/cpumap.c
216
int cpu__get_socket_id(struct perf_cpu cpu)
tools/perf/util/cpumap.c
218
int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
tools/perf/util/cpumap.c
222
struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
tools/perf/util/cpumap.c
226
id.socket = cpu__get_socket_id(cpu);
tools/perf/util/cpumap.c
258
struct perf_cpu cpu;
tools/perf/util/cpumap.c
267
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
tools/perf/util/cpumap.c
269
struct aggr_cpu_id cpu_id = get_id(cpu, data);
tools/perf/util/cpumap.c
300
int cpu__get_die_id(struct perf_cpu cpu)
tools/perf/util/cpumap.c
302
int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
tools/perf/util/cpumap.c
307
struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
tools/perf/util/cpumap.c
312
die = cpu__get_die_id(cpu);
tools/perf/util/cpumap.c
322
id = aggr_cpu_id__socket(cpu, data);
tools/perf/util/cpumap.c
330
int cpu__get_cluster_id(struct perf_cpu cpu)
tools/perf/util/cpumap.c
332
int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value);
tools/perf/util/cpumap.c
337
struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data)
tools/perf/util/cpumap.c
339
int cluster = cpu__get_cluster_id(cpu);
tools/perf/util/cpumap.c
346
id = aggr_cpu_id__die(cpu, data);
tools/perf/util/cpumap.c
354
int cpu__get_core_id(struct perf_cpu cpu)
tools/perf/util/cpumap.c
356
int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
tools/perf/util/cpumap.c
360
struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
tools/perf/util/cpumap.c
363
int core = cpu__get_core_id(cpu);
tools/perf/util/cpumap.c
366
id = aggr_cpu_id__cluster(cpu, data);
tools/perf/util/cpumap.c
379
struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
tools/perf/util/cpumap.c
384
id = aggr_cpu_id__core(cpu, data);
tools/perf/util/cpumap.c
388
id.cpu = cpu;
tools/perf/util/cpumap.c
393
struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
tools/perf/util/cpumap.c
397
id.node = cpu__get_node(cpu);
tools/perf/util/cpumap.c
401
struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
tools/perf/util/cpumap.c
406
cpu.cpu = 0;
tools/perf/util/cpumap.c
407
id.cpu = cpu;
tools/perf/util/cpumap.c
451
max_cpu_num.cpu = 4096;
tools/perf/util/cpumap.c
452
max_present_cpu_num.cpu = 4096;
tools/perf/util/cpumap.c
469
max_cpu_num.cpu = max;
tools/perf/util/cpumap.c
485
max_present_cpu_num.cpu = (int16_t)max;
tools/perf/util/cpumap.c
488
pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
tools/perf/util/cpumap.c
529
if (unlikely(!max_cpu_num.cpu))
tools/perf/util/cpumap.c
537
if (unlikely(!max_present_cpu_num.cpu))
tools/perf/util/cpumap.c
544
int cpu__get_node(struct perf_cpu cpu)
tools/perf/util/cpumap.c
551
return cpunode_map[cpu.cpu];
tools/perf/util/cpumap.c
561
cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
tools/perf/util/cpumap.c
567
for (i = 0; i < max_cpu_num.cpu; i++)
tools/perf/util/cpumap.c
577
unsigned int cpu, mem;
tools/perf/util/cpumap.c
616
if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
tools/perf/util/cpumap.c
618
cpunode_map[cpu] = mem;
tools/perf/util/cpumap.c
635
struct perf_cpu cpu = { .cpu = INT16_MAX };
tools/perf/util/cpumap.c
639
cpu = perf_cpu_map__cpu(map, i);
tools/perf/util/cpumap.c
646
perf_cpu_map__cpu(map, i).cpu);
tools/perf/util/cpumap.c
648
} else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
tools/perf/util/cpumap.c
654
perf_cpu_map__cpu(map, start).cpu);
tools/perf/util/cpumap.c
658
perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu);
tools/perf/util/cpumap.c
690
if (last_cpu.cpu < 0) {
tools/perf/util/cpumap.c
695
bitmap = zalloc(last_cpu.cpu / 8 + 1);
tools/perf/util/cpumap.c
702
bitmap[c.cpu / 8] |= 1 << (c.cpu % 8);
tools/perf/util/cpumap.c
704
for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
tools/perf/util/cpumap.c
705
unsigned char bits = bitmap[cpu / 8];
tools/perf/util/cpumap.c
707
if (cpu % 8)
tools/perf/util/cpumap.c
713
if ((cpu % 32) == 0 && cpu > 0)
tools/perf/util/cpumap.c
743
a->cpu.cpu == b->cpu.cpu;
tools/perf/util/cpumap.c
756
a->cpu.cpu == -1;
tools/perf/util/cpumap.c
770
.cpu = (struct perf_cpu){ .cpu = -1 },
tools/perf/util/cpumap.c
79
if (data->cpus_data.cpu[i] == (u16) -1) {
tools/perf/util/cpumap.c
80
RC_CHK_ACCESS(map)->map[i].cpu = -1;
tools/perf/util/cpumap.c
81
} else if (data->cpus_data.cpu[i] < INT16_MAX) {
tools/perf/util/cpumap.c
82
RC_CHK_ACCESS(map)->map[i].cpu = (int16_t) data->cpus_data.cpu[i];
tools/perf/util/cpumap.c
84
pr_err("Invalid cpumap entry %u\n", data->cpus_data.cpu[i]);
tools/perf/util/cpumap.h
127
struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data);
tools/perf/util/cpumap.h
133
struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data);
tools/perf/util/cpumap.h
139
struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data);
tools/perf/util/cpumap.h
145
struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data);
tools/perf/util/cpumap.h
151
struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data);
tools/perf/util/cpumap.h
156
struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data);
tools/perf/util/cpumap.h
161
struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data);
tools/perf/util/cpumap.h
34
struct perf_cpu cpu;
tools/perf/util/cpumap.h
71
return perf_cpu_map__nr(cpus) == 1 && perf_cpu_map__cpu(cpus, 0).cpu == -1;
tools/perf/util/cpumap.h
78
int cpu__get_node(struct perf_cpu cpu);
tools/perf/util/cpumap.h
83
int cpu__get_socket_id(struct perf_cpu cpu);
tools/perf/util/cpumap.h
88
int cpu__get_die_id(struct perf_cpu cpu);
tools/perf/util/cpumap.h
93
int cpu__get_cluster_id(struct perf_cpu cpu);
tools/perf/util/cpumap.h
98
int cpu__get_core_id(struct perf_cpu cpu);
tools/perf/util/cputopo.c
109
sysfs__mountpoint(), cpu);
tools/perf/util/cputopo.c
112
sysfs__mountpoint(), cpu);
tools/perf/util/cputopo.c
193
struct perf_cpu cpu;
tools/perf/util/cputopo.c
197
perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
tools/perf/util/cputopo.c
199
has_first = perf_cpu_map__has(user_requested_cpus, cpu);
tools/perf/util/cputopo.c
209
if (perf_cpu_map__has(user_requested_cpus, cpu) != has_first) {
tools/perf/util/cputopo.c
267
ncpus = cpu__max_present_cpu().cpu;
tools/perf/util/cputopo.c
298
if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i }))
tools/perf/util/cputopo.c
35
static int build_cpu_topology(struct cpu_topology *tp, int cpu)
tools/perf/util/cputopo.c
413
if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
tools/perf/util/cputopo.c
46
sysfs__mountpoint(), cpu);
tools/perf/util/cputopo.c
49
sysfs__mountpoint(), cpu);
tools/perf/util/cputopo.c
81
sysfs__mountpoint(), cpu);
tools/perf/util/cs-etm-base.c
141
int i, cpu = 0, version, err;
tools/perf/util/cs-etm-base.c
148
for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
397
int cpu;
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
402
if (cs_etm__get_cpu(etmq, trace_chan_id, &cpu) < 0)
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
412
packet_queue->packet_buffer[et].cpu = cpu;
tools/perf/util/cs-etm.c
1206
queue->cpu = queue_nr; /* Placeholder, may be reset to -1 in per-thread mode */
tools/perf/util/cs-etm.c
134
static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu);
tools/perf/util/cs-etm.c
1585
sample.cpu = tidq->packet->cpu;
tools/perf/util/cs-etm.c
1647
sample.cpu = tidq->packet->cpu;
tools/perf/util/cs-etm.c
176
int cs_etm__get_cpu(struct cs_etm_queue *etmq, u8 trace_chan_id, int *cpu)
tools/perf/util/cs-etm.c
186
*cpu = (int)metadata[CS_ETM_CPU];
tools/perf/util/cs-etm.c
283
static struct cs_etm_queue *cs_etm__get_queue(struct cs_etm_auxtrace *etm, int cpu)
tools/perf/util/cs-etm.c
288
return etm->queues.queue_array[cpu].priv;
tools/perf/util/cs-etm.c
3044
if (auxtrace_event->cpu == (__u32) -1) {
tools/perf/util/cs-etm.c
3048
} else if (auxtrace_event->cpu != sample->cpu) {
tools/perf/util/cs-etm.c
3080
struct cs_etm_queue *etmq = cs_etm__get_queue(etm, auxtrace_event->cpu);
tools/perf/util/cs-etm.c
3093
" tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
tools/perf/util/cs-etm.c
3179
" tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
tools/perf/util/cs-etm.c
322
static int cs_etm__process_trace_id_v0(struct cs_etm_auxtrace *etm, int cpu,
tools/perf/util/cs-etm.c
329
cpu_data = get_cpu_data(etm, cpu);
tools/perf/util/cs-etm.c
344
static int cs_etm__process_trace_id_v0_1(struct cs_etm_auxtrace *etm, int cpu,
tools/perf/util/cs-etm.c
347
struct cs_etm_queue *etmq = cs_etm__get_queue(etm, cpu);
tools/perf/util/cs-etm.c
390
cpu_data = get_cpu_data(etm, cpu);
tools/perf/util/cs-etm.c
448
static int get_cpu_data_idx(struct cs_etm_auxtrace *etm, int cpu)
tools/perf/util/cs-etm.c
453
if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
tools/perf/util/cs-etm.c
465
static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
tools/perf/util/cs-etm.c
467
int idx = get_cpu_data_idx(etm, cpu);
tools/perf/util/cs-etm.c
486
int cpu, version, err;
tools/perf/util/cs-etm.c
512
cpu = sample.cpu;
tools/perf/util/cs-etm.c
513
if (cpu == -1) {
tools/perf/util/cs-etm.c
521
err = cs_etm__process_trace_id_v0(etm, cpu, hw_id);
tools/perf/util/cs-etm.c
525
err = cs_etm__process_trace_id_v0_1(etm, cpu, hw_id);
tools/perf/util/cs-etm.c
587
queue->packet_buffer[i].cpu = INT_MIN;
tools/perf/util/cs-etm.h
186
int cpu;
tools/perf/util/cs-etm.h
260
int cs_etm__get_cpu(struct cs_etm_queue *etmq, u8 trace_chan_id, int *cpu);
tools/perf/util/data-convert-bt.c
1376
int cpu;
tools/perf/util/data-convert-bt.c
1378
for (cpu = 0; cpu < cw->stream_cnt; cpu++)
tools/perf/util/data-convert-bt.c
1379
ctf_stream__delete(cw->stream[cpu]);
tools/perf/util/data-convert-bt.c
1644
int cpu, ret = 0;
tools/perf/util/data-convert-bt.c
1646
for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
tools/perf/util/data-convert-bt.c
1647
ret = ctf_stream__flush(cw->stream[cpu]);
tools/perf/util/data-convert-bt.c
60
int cpu;
tools/perf/util/data-convert-bt.c
687
pr_err("CTF stream %d flush failed\n", cs->cpu);
tools/perf/util/data-convert-bt.c
690
cs->cpu, cs->count);
tools/perf/util/data-convert-bt.c
698
static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
tools/perf/util/data-convert-bt.c
731
ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
tools/perf/util/data-convert-bt.c
739
cs->cpu = cpu;
tools/perf/util/data-convert-bt.c
761
static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
tools/perf/util/data-convert-bt.c
763
struct ctf_stream *cs = cw->stream[cpu];
tools/perf/util/data-convert-bt.c
766
cs = ctf_stream__create(cw, cpu);
tools/perf/util/data-convert-bt.c
767
cw->stream[cpu] = cs;
tools/perf/util/data-convert-bt.c
776
int cpu = 0;
tools/perf/util/data-convert-bt.c
779
cpu = sample->cpu;
tools/perf/util/data-convert-bt.c
781
if (cpu > cw->stream_cnt) {
tools/perf/util/data-convert-bt.c
783
cpu, cw->stream_cnt);
tools/perf/util/data-convert-bt.c
784
cpu = 0;
tools/perf/util/data-convert-bt.c
787
return cpu;
tools/perf/util/data-convert-json.c
196
output_json_key_format(out, true, 3, "cpu", "%i", sample->cpu);
tools/perf/util/dlfilter.c
532
ASSIGN(cpu);
tools/perf/util/dump-insn.h
17
int cpu;
tools/perf/util/env.c
265
zfree(&env->cpu);
tools/perf/util/env.c
363
if (env->cpu != NULL)
tools/perf/util/env.c
367
env->nr_cpus_avail = cpu__max_present_cpu().cpu;
tools/perf/util/env.c
373
env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
tools/perf/util/env.c
374
if (env->cpu == NULL)
tools/perf/util/env.c
378
struct perf_cpu cpu = { .cpu = idx };
tools/perf/util/env.c
379
int core_id = cpu__get_core_id(cpu);
tools/perf/util/env.c
380
int socket_id = cpu__get_socket_id(cpu);
tools/perf/util/env.c
381
int die_id = cpu__get_die_id(cpu);
tools/perf/util/env.c
383
env->cpu[idx].core_id = core_id >= 0 ? core_id : -1;
tools/perf/util/env.c
384
env->cpu[idx].socket_id = socket_id >= 0 ? socket_id : -1;
tools/perf/util/env.c
385
env->cpu[idx].die_id = die_id >= 0 ? die_id : -1;
tools/perf/util/env.c
430
struct perf_cpu cpu = {-1};
tools/perf/util/env.c
431
int err = get_cpuid(cpuid, sizeof(cpuid), cpu);
tools/perf/util/env.c
459
env->nr_cpus_avail = cpu__max_present_cpu().cpu;
tools/perf/util/env.c
693
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
tools/perf/util/env.c
701
nr = max(nr, (int)perf_cpu_map__max(nn->map).cpu);
tools/perf/util/env.c
725
env->numa_map[tmp.cpu] = i;
tools/perf/util/env.c
729
return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
tools/perf/util/env.h
111
struct cpu_topology_map *cpu;
tools/perf/util/env.h
213
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
tools/perf/util/env.h
65
u32 cpu;
tools/perf/util/event.c
579
size_t size = fprintf(fp, "\ncpu%u ", cs->cpu);
tools/perf/util/event.c
833
al->cpu = sample->cpu;
tools/perf/util/event.c
837
if (al->cpu >= 0) {
tools/perf/util/event.c
840
if (env && env->cpu)
tools/perf/util/event.c
841
al->socket = env->cpu[al->cpu].socket_id;
tools/perf/util/event.c
929
al->cpu = sample->cpu;
tools/perf/util/evlist.c
1635
sample->vcpu = sid->vcpu.cpu;
tools/perf/util/evlist.c
429
.cpu = (struct perf_cpu){ .cpu = -1},
tools/perf/util/evlist.c
444
itr->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
tools/perf/util/evlist.c
446
affinity__set(itr->affinity, itr->cpu.cpu);
tools/perf/util/evlist.c
447
itr->cpu_map_idx = perf_cpu_map__idx(itr->evsel->core.cpus, itr->cpu);
tools/perf/util/evlist.c
471
evlist_cpu_itr->cpu);
tools/perf/util/evlist.c
478
evlist_cpu_itr->cpu =
tools/perf/util/evlist.c
482
affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu);
tools/perf/util/evlist.c
485
evlist_cpu_itr->cpu);
tools/perf/util/evlist.c
882
int output, struct perf_cpu cpu)
tools/perf/util/evlist.c
887
return mmap__mmap(map, mp, output, cpu);
tools/perf/util/evlist.h
365
struct perf_cpu cpu;
tools/perf/util/evsel.c
115
static int store_event(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
tools/perf/util/evsel.c
141
__WRITE_ASS(cpu, "d", cpu.cpu);
tools/perf/util/evsel.c
195
static void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
tools/perf/util/evsel.c
200
if ((fd != -1) && store_event(attr, pid, cpu, fd, group_fd, flags)) {
tools/perf/util/evsel.c
2121
struct perf_cpu cpu;
tools/perf/util/evsel.c
2123
cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
tools/perf/util/evsel.c
2124
return perf_cpu_map__idx(other->core.cpus, cpu);
tools/perf/util/evsel.c
2169
for (int cpu = 0; cpu < nr_cpus; cpu++)
tools/perf/util/evsel.c
2171
FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
tools/perf/util/evsel.c
2381
struct perf_cpu cpu, unsigned long flags)
tools/perf/util/evsel.c
2383
int fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
tools/perf/util/evsel.c
2390
fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
tools/perf/util/evsel.c
2398
fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
tools/perf/util/evsel.c
2406
fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
tools/perf/util/evsel.c
2420
struct perf_cpu cpu = {.cpu = -1};
tools/perf/util/evsel.c
2422
return __has_attr_feature(attr, cpu, flags);
tools/perf/util/evsel.c
2513
static bool evsel__probe_aux_action(struct evsel *evsel, struct perf_cpu cpu)
tools/perf/util/evsel.c
2521
if (__has_attr_feature(&attr, cpu, /*flags=*/0)) {
tools/perf/util/evsel.c
2536
static void evsel__detect_missing_aux_action_feature(struct evsel *evsel, struct perf_cpu cpu)
tools/perf/util/evsel.c
2567
if (!evsel__probe_aux_action(leader, cpu))
tools/perf/util/evsel.c
2571
static bool evsel__detect_missing_features(struct evsel *evsel, struct perf_cpu cpu)
tools/perf/util/evsel.c
2581
evsel__detect_missing_aux_action_feature(evsel, cpu);
tools/perf/util/evsel.c
2789
struct perf_cpu cpu;
tools/perf/util/evsel.c
2837
cpu = perf_cpu_map__cpu(cpus, idx);
tools/perf/util/evsel.c
2858
pid, cpu.cpu, group_fd, evsel->open_flags);
tools/perf/util/evsel.c
2860
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu,
tools/perf/util/evsel.c
2876
test_attr__open(&evsel->core.attr, pid, cpu,
tools/perf/util/evsel.c
2932
if (err == -EINVAL && evsel__detect_missing_features(evsel, cpu))
tools/perf/util/evsel.c
3025
sample->cpu = u.val32[0];
tools/perf/util/evsel.c
3221
data->cpu = data->pid = data->tid = -1;
tools/perf/util/evsel.c
3307
data->cpu = u.val32[0];
tools/perf/util/evsel.h
243
void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
tools/perf/util/evsel_config.h
56
int cpu;
tools/perf/util/expr.c
460
struct perf_cpu cpu = {-1};
tools/perf/util/expr.c
461
char *cpuid = get_cpuid_allow_env_override(cpu);
tools/perf/util/header.c
1140
static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
tools/perf/util/header.c
1146
scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
tools/perf/util/header.c
1207
int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp)
tools/perf/util/header.c
1216
err = cpu_cache_level__read(&c, cpu, level);
tools/perf/util/header.c
1240
u32 nr, cpu, cnt = 0;
tools/perf/util/header.c
1242
nr = cpu__max_cpu().cpu;
tools/perf/util/header.c
1244
for (cpu = 0; cpu < nr; cpu++) {
tools/perf/util/header.c
1245
int ret = build_caches_for_cpu(cpu, caches, &cnt);
tools/perf/util/header.c
1257
u32 max_caches = cpu__max_cpu().cpu * MAX_CACHE_LVL;
tools/perf/util/header.c
1640
u32 cpu, domain;
tools/perf/util/header.c
1664
retval = sscanf(line, "cpu%u %*s", &cpu);
tools/perf/util/header.c
1666
cd_map[cpu] = zalloc(sizeof(*cd_map[cpu]));
tools/perf/util/header.c
1667
if (!cd_map[cpu])
tools/perf/util/header.c
1669
cd_map[cpu]->cpu = cpu;
tools/perf/util/header.c
1678
temp_domains = realloc(cd_map[cpu]->domains, dcount * sizeof(domain_info));
tools/perf/util/header.c
1682
cd_map[cpu]->domains = temp_domains;
tools/perf/util/header.c
1688
cd_map[cpu]->domains[dcount - 1] = domain_info;
tools/perf/util/header.c
1718
cd_map[cpu]->nr_domains = dcount;
tools/perf/util/header.c
1736
nr = cpu__max_present_cpu().cpu;
tools/perf/util/header.c
1755
ret = do_write(ff, &cd_map[i]->cpu, sizeof(u32));
tools/perf/util/header.c
1888
if (ph->env.cpu != NULL) {
tools/perf/util/header.c
1892
i, ph->env.cpu[i].core_id,
tools/perf/util/header.c
1893
ph->env.cpu[i].die_id,
tools/perf/util/header.c
1894
ph->env.cpu[i].socket_id);
tools/perf/util/header.c
1899
if (ph->env.cpu != NULL) {
tools/perf/util/header.c
1903
i, ph->env.cpu[i].core_id,
tools/perf/util/header.c
1904
ph->env.cpu[i].socket_id);
tools/perf/util/header.c
2441
fprintf(fp, "# cpu : %u\n", cd_map[i]->cpu);
tools/perf/util/header.c
2797
env->cpu = calloc(cpu_nr, sizeof(*env->cpu));
tools/perf/util/header.c
2798
if (!env->cpu)
tools/perf/util/header.c
2846
zfree(&env->cpu);
tools/perf/util/header.c
2854
env->cpu[i].core_id = nr;
tools/perf/util/header.c
2860
env->cpu[i].socket_id = nr;
tools/perf/util/header.c
2894
env->cpu[i].die_id = nr;
tools/perf/util/header.c
2903
zfree(&env->cpu);
tools/perf/util/header.c
3614
u32 schedstat_version, max_sched_domains, cpu, domain, nr_domains;
tools/perf/util/header.c
3644
if (do_read_u32(ff, &cpu))
tools/perf/util/header.c
3647
cd_map[cpu] = zalloc(sizeof(*cd_map[cpu]));
tools/perf/util/header.c
3648
if (!cd_map[cpu])
tools/perf/util/header.c
3651
cd_map[cpu]->cpu = cpu;
tools/perf/util/header.c
3656
cd_map[cpu]->nr_domains = nr_domains;
tools/perf/util/header.c
3658
cd_map[cpu]->domains = zalloc(sizeof(*d_info) * max_sched_domains);
tools/perf/util/header.c
3659
if (!cd_map[cpu]->domains)
tools/perf/util/header.c
3670
assert(cd_map[cpu]->domains[domain] == NULL);
tools/perf/util/header.c
3671
cd_map[cpu]->domains[domain] = d_info;
tools/perf/util/header.c
501
nrc = cpu__max_present_cpu().cpu;
tools/perf/util/header.c
638
ret = do_write(ff, &env->cpu[j].core_id,
tools/perf/util/header.c
639
sizeof(env->cpu[j].core_id));
tools/perf/util/header.c
642
ret = do_write(ff, &env->cpu[j].socket_id,
tools/perf/util/header.c
643
sizeof(env->cpu[j].socket_id));
tools/perf/util/header.c
662
ret = do_write(ff, &env->cpu[j].die_id,
tools/perf/util/header.c
663
sizeof(env->cpu[j].die_id));
tools/perf/util/header.c
840
char * __weak get_cpuid_str(struct perf_cpu cpu __maybe_unused)
tools/perf/util/header.c
845
char *get_cpuid_allow_env_override(struct perf_cpu cpu)
tools/perf/util/header.c
854
cpuid = get_cpuid_str(cpu);
tools/perf/util/header.c
898
struct perf_cpu cpu __maybe_unused)
tools/perf/util/header.c
905
struct perf_cpu cpu = perf_cpu_map__min(evlist->core.all_cpus);
tools/perf/util/header.c
909
ret = get_cpuid(buffer, sizeof(buffer), cpu);
tools/perf/util/header.h
203
int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp);
tools/perf/util/header.h
208
int get_cpuid(char *buffer, size_t sz, struct perf_cpu cpu);
tools/perf/util/header.h
210
char *get_cpuid_str(struct perf_cpu cpu);
tools/perf/util/header.h
212
char *get_cpuid_allow_env_override(struct perf_cpu cpu);
tools/perf/util/hist.c
1230
.cpu = al->cpu,
tools/perf/util/hist.c
787
r->cpu = sample->cpu;
tools/perf/util/hist.c
819
.cpu = al->cpu,
tools/perf/util/hist.h
181
int cpu;
tools/perf/util/hist.h
261
s32 cpu;
tools/perf/util/intel-bts.c
137
INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
tools/perf/util/intel-bts.c
161
btsq->cpu = -1;
tools/perf/util/intel-bts.c
181
if (queue->cpu != -1)
tools/perf/util/intel-bts.c
182
btsq->cpu = queue->cpu;
tools/perf/util/intel-bts.c
293
sample.cpu = btsq->cpu;
tools/perf/util/intel-bts.c
346
static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
tools/perf/util/intel-bts.c
353
INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
tools/perf/util/intel-bts.c
387
err = intel_bts_synth_error(btsq->bts, btsq->cpu,
tools/perf/util/intel-bts.c
433
thread_stack__event(thread, btsq->cpu, btsq->sample_flags,
tools/perf/util/intel-bts.c
505
thread_stack__set_trace_nr(thread, btsq->cpu, buffer->buffer_nr + 1);
tools/perf/util/intel-bts.c
74
int cpu;
tools/perf/util/intel-pt.c
1244
thread_stack__sample_late(thread, sample->cpu, pt->chain,
tools/perf/util/intel-pt.c
1282
thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
tools/perf/util/intel-pt.c
1327
ptq->cpu = -1;
tools/perf/util/intel-pt.c
1486
ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
tools/perf/util/intel-pt.c
1497
if (queue->cpu == -1)
tools/perf/util/intel-pt.c
1498
ptq->cpu = thread__cpu(ptq->thread);
tools/perf/util/intel-pt.c
1590
if (queue->cpu != -1)
tools/perf/util/intel-pt.c
1591
ptq->cpu = queue->cpu;
tools/perf/util/intel-pt.c
1616
queue_nr, ptq->cpu, ptq->pid, ptq->tid);
tools/perf/util/intel-pt.c
1707
sample->cpu = ptq->cpu;
tools/perf/util/intel-pt.c
1827
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
tools/perf/util/intel-pt.c
1834
thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
tools/perf/util/intel-pt.c
199
int cpu;
tools/perf/util/intel-pt.c
2466
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
tools/perf/util/intel-pt.c
2495
thread_stack__br_sample(ptq->thread, ptq->cpu,
tools/perf/util/intel-pt.c
2684
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
tools/perf/util/intel-pt.c
2706
code, cpu, pid, tid, ip, msg, timestamp,
tools/perf/util/intel-pt.c
2746
return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
tools/perf/util/intel-pt.c
2759
intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
tools/perf/util/intel-pt.c
2761
err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
tools/perf/util/intel-pt.c
2896
thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
tools/perf/util/intel-pt.c
2902
thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
tools/perf/util/intel-pt.c
3143
ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
tools/perf/util/intel-pt.c
3296
ptq->cpu = queue->cpu;
tools/perf/util/intel-pt.c
3299
ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
tools/perf/util/intel-pt.c
3340
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
tools/perf/util/intel-pt.c
3345
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
tools/perf/util/intel-pt.c
3349
if (cpu < 0 || !pt->queues.nr_queues)
tools/perf/util/intel-pt.c
3352
if ((unsigned)cpu >= pt->queues.nr_queues)
tools/perf/util/intel-pt.c
3355
i = cpu;
tools/perf/util/intel-pt.c
3357
if (pt->queues.queue_array[i].cpu == cpu)
tools/perf/util/intel-pt.c
3361
if (pt->queues.queue_array[--i].cpu == cpu)
tools/perf/util/intel-pt.c
3366
if (pt->queues.queue_array[j].cpu == cpu)
tools/perf/util/intel-pt.c
3373
static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
tools/perf/util/intel-pt.c
3382
ptq = intel_pt_cpu_to_ptq(pt, cpu);
tools/perf/util/intel-pt.c
3407
intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
tools/perf/util/intel-pt.c
3423
int cpu, ret;
tools/perf/util/intel-pt.c
3430
cpu = sample->cpu;
tools/perf/util/intel-pt.c
3433
cpu, tid, sample->time, perf_time_to_tsc(sample->time,
tools/perf/util/intel-pt.c
3436
ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
tools/perf/util/intel-pt.c
3440
return machine__set_current_tid(pt->machine, cpu, -1, tid);
tools/perf/util/intel-pt.c
3449
int cpu = sample->cpu;
tools/perf/util/intel-pt.c
3454
ptq = intel_pt_cpu_to_ptq(pt, cpu);
tools/perf/util/intel-pt.c
3476
if (machine__get_current_tid(pt->machine, cpu) == tid)
tools/perf/util/intel-pt.c
3479
return machine__set_current_tid(pt->machine, cpu, pid, tid);
tools/perf/util/intel-pt.c
3514
int cpu, ret;
tools/perf/util/intel-pt.c
3519
cpu = sample->cpu;
tools/perf/util/intel-pt.c
3540
ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
tools/perf/util/intel-pt.c
3544
return machine__set_current_tid(pt->machine, cpu, pid, tid);
tools/perf/util/intel-pt.c
3555
sample->cpu, event->itrace_start.pid,
tools/perf/util/intel-pt.c
3559
return machine__set_current_tid(pt->machine, sample->cpu,
tools/perf/util/intel-pt.c
3779
event->header.type, sample->cpu, sample->time, timestamp);
tools/perf/util/kwork.h
110
int cpu;
tools/perf/util/machine.c
1131
int nr_cpus_avail, cpu;
tools/perf/util/machine.c
1150
for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
tools/perf/util/machine.c
1152
cpu * X86_64_CPU_ENTRY_AREA_SIZE +
tools/perf/util/machine.c
3109
pid_t machine__get_current_tid(struct machine *machine, int cpu)
tools/perf/util/machine.c
3111
if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
tools/perf/util/machine.c
3114
return machine->current_tid[cpu];
tools/perf/util/machine.c
3117
int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
tools/perf/util/machine.c
3123
if (cpu < 0)
tools/perf/util/machine.c
3128
(unsigned int)cpu,
tools/perf/util/machine.c
3132
machine->current_tid[cpu] = tid;
tools/perf/util/machine.c
3138
thread__set_cpu(thread, cpu);
tools/perf/util/machine.h
302
pid_t machine__get_current_tid(struct machine *machine, int cpu);
tools/perf/util/machine.h
303
int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
tools/perf/util/mmap.c
109
node_index = cpu__get_node(cpu);
tools/perf/util/mmap.c
142
struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused)
tools/perf/util/mmap.c
176
ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
tools/perf/util/mmap.c
247
struct perf_cpu cpu;
tools/perf/util/mmap.c
255
cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
tools/perf/util/mmap.c
256
if (cpu__get_node(cpu) == node)
tools/perf/util/mmap.c
257
__set_bit(cpu.cpu, mask->bits);
tools/perf/util/mmap.c
264
map->affinity_mask.nbits = cpu__max_cpu().cpu;
tools/perf/util/mmap.c
270
build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
tools/perf/util/mmap.c
272
__set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
tools/perf/util/mmap.c
277
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu)
tools/perf/util/mmap.c
279
if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
tools/perf/util/mmap.c
98
static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity)
tools/perf/util/mmap.h
52
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu);
tools/perf/util/parse-events.c
921
if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
tools/perf/util/perf_api_probe.c
14
static int perf_do_probe_api(setup_probe_fn_t fn, struct perf_cpu cpu, const char *str)
tools/perf/util/perf_api_probe.c
151
struct perf_cpu cpu;
tools/perf/util/perf_api_probe.c
158
cpu = perf_cpu_map__cpu(cpus, 0);
tools/perf/util/perf_api_probe.c
161
fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0);
tools/perf/util/perf_api_probe.c
32
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags);
tools/perf/util/perf_api_probe.c
46
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags);
tools/perf/util/perf_api_probe.c
64
struct perf_cpu cpu;
tools/perf/util/perf_api_probe.c
70
cpu = perf_cpu_map__cpu(cpus, 0);
tools/perf/util/perf_api_probe.c
73
ret = perf_do_probe_api(fn, cpu, "software/cpu-clock/u");
tools/perf/util/perf_api_probe.c
85
ret = perf_do_probe_api(fn, cpu, buf);
tools/perf/util/powerpc-vpadtl.c
181
struct powerpc_vpadtl *vpa, u64 save, int cpu)
tools/perf/util/powerpc-vpadtl.c
188
sample.cpu = cpu;
tools/perf/util/powerpc-vpadtl.c
354
ret = powerpc_vpadtl_sample(record, vpa, vpaq_timestamp, vpaq->cpu);
tools/perf/util/powerpc-vpadtl.c
462
if (queue->cpu != -1)
tools/perf/util/powerpc-vpadtl.c
463
vpaq->cpu = queue->cpu;
tools/perf/util/powerpc-vpadtl.c
58
int cpu;
tools/perf/util/python.c
1148
int cpu = 0, cpu_idx, thread = 0, thread_idx;
tools/perf/util/python.c
1156
if (!PyArg_ParseTuple(args, "ii", &cpu, &thread))
tools/perf/util/python.c
1159
cpu_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){.cpu = cpu});
tools/perf/util/python.c
1161
PyErr_Format(PyExc_TypeError, "CPU %d is not part of evsel's CPUs", cpu);
tools/perf/util/python.c
1410
int ret, cpu = 0, cpu_idx = 0, thread = 0, thread_idx = 0;
tools/perf/util/python.c
1418
if (!PyArg_ParseTuple(args, "sii", &metric, &cpu, &thread))
tools/perf/util/python.c
1441
(struct perf_cpu){.cpu = cpu});
tools/perf/util/python.c
1457
metric, cpu, thread);
tools/perf/util/python.c
1566
static struct mmap *get_md(struct evlist *evlist, int cpu)
tools/perf/util/python.c
1573
if (md->core.cpu.cpu == cpu)
tools/perf/util/python.c
1585
int sample_id_all = 1, cpu;
tools/perf/util/python.c
1591
&cpu, &sample_id_all))
tools/perf/util/python.c
1594
md = get_md(evlist, cpu);
tools/perf/util/python.c
1596
PyErr_Format(PyExc_TypeError, "Unknown CPU '%d'", cpu);
tools/perf/util/python.c
555
return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
tools/perf/util/python.c
57
sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
tools/perf/util/record.c
102
if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
tools/perf/util/record.c
226
struct perf_cpu cpu = { .cpu = 0 };
tools/perf/util/record.c
244
cpu = perf_cpu_map__cpu(cpus, 0);
tools/perf/util/record.c
248
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
tools/perf/util/record.c
252
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1,
tools/perf/util/s390-cpumsf.c
186
int cpu;
tools/perf/util/s390-cpumsf.c
203
if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
tools/perf/util/s390-cpumsf.c
206
q = &sf->queues.queue_array[sample->cpu];
tools/perf/util/s390-cpumsf.c
216
sf->logdir, sample->cpu)
tools/perf/util/s390-cpumsf.c
217
: asprintf(&name, "aux.ctr.%02x", sample->cpu);
tools/perf/util/s390-cpumsf.c
512
.cpu = sfq->cpu,
tools/perf/util/s390-cpumsf.c
539
sample.tid, sample.cpumode, sample.cpu);
tools/perf/util/s390-cpumsf.c
780
sfq->cpu = -1;
tools/perf/util/s390-cpumsf.c
816
if (queue->cpu != -1)
tools/perf/util/s390-cpumsf.c
817
sfq->cpu = queue->cpu;
tools/perf/util/s390-cpumsf.c
888
static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
tools/perf/util/s390-cpumsf.c
897
code, cpu, pid, tid, ip, msg, timestamp);
tools/perf/util/s390-cpumsf.c
908
return s390_cpumsf_synth_error(sf, 1, sample->cpu,
tools/perf/util/sample.h
96
u32 cpu;
tools/perf/util/scripting-engines/trace-event-perl.c
353
int cpu = sample->cpu;
tools/perf/util/scripting-engines/trace-event-perl.c
387
XPUSHs(sv_2mortal(newSVuv(cpu)));
tools/perf/util/scripting-engines/trace-event-perl.c
425
XPUSHs(sv_2mortal(newSVuv(cpu)));
tools/perf/util/scripting-engines/trace-event-python.c
1003
PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
tools/perf/util/scripting-engines/trace-event-python.c
1010
pydict_set_item_string_decref(dict, "common_cpu", _PyLong_FromLong(cpu));
tools/perf/util/scripting-engines/trace-event-python.c
1324
tuple_set_s32(t, 10, es->sample->cpu);
tools/perf/util/scripting-engines/trace-event-python.c
1445
tuple_set_s32(t, 3, sample->cpu);
tools/perf/util/scripting-engines/trace-event-python.c
1548
tuple_set_s32(t, 3, sample->cpu);
tools/perf/util/scripting-engines/trace-event-python.c
1581
tuple_set_s32(t, 1, sample->cpu);
tools/perf/util/scripting-engines/trace-event-python.c
1632
tuple_set_s32(t, 2, e->cpu);
tools/perf/util/scripting-engines/trace-event-python.c
1661
process_stat(struct evsel *counter, struct perf_cpu cpu, int thread, u64 tstamp,
tools/perf/util/scripting-engines/trace-event-python.c
1681
PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu.cpu));
tools/perf/util/scripting-engines/trace-event-python.c
1705
struct perf_cpu cpu;
tools/perf/util/scripting-engines/trace-event-python.c
1707
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
tools/perf/util/scripting-engines/trace-event-python.c
1708
process_stat(counter, cpu,
tools/perf/util/scripting-engines/trace-event-python.c
863
_PyLong_FromLong(sample->cpu));
tools/perf/util/scripting-engines/trace-event-python.c
951
int cpu = sample->cpu;
tools/perf/util/session.c
1044
printf("%u ", sample->cpu);
tools/perf/util/session.c
2772
struct perf_cpu cpu;
tools/perf/util/session.c
2794
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/util/session.c
2795
if (cpu.cpu >= nr_cpus) {
tools/perf/util/session.c
2797
"Consider raising MAX_NR_CPUS\n", cpu.cpu);
tools/perf/util/session.c
2801
__set_bit(cpu.cpu, cpu_bitmap);
tools/perf/util/session.c
2899
fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
tools/perf/util/session.c
2914
sid->cpu.cpu = e->cpu;
tools/perf/util/session.c
2921
sid->vcpu.cpu = e2->vcpu;
tools/perf/util/session.c
587
event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
tools/perf/util/session.c
595
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
tools/perf/util/session.c
631
data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
tools/perf/util/session.c
675
event->stat.cpu = bswap_32(event->stat.cpu);
tools/perf/util/sort.c
916
return right->cpu - left->cpu;
tools/perf/util/sort.c
922
return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
tools/perf/util/stat-display.c
1129
struct perf_cpu cpu;
tools/perf/util/stat-display.c
1131
perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
tools/perf/util/stat-display.c
1141
if (!perf_cpu_map__has(evsel__cpus(counter), cpu))
tools/perf/util/stat-display.c
1145
if (config->aggr_map->map[aggr_idx].cpu.cpu == cpu.cpu)
tools/perf/util/stat-display.c
1150
os->id = aggr_cpu_id__cpu(cpu, /*data=*/NULL);
tools/perf/util/stat-display.c
1491
struct perf_cpu curr_cpu = config->aggr_map->map[aggr_idx].cpu;
tools/perf/util/stat-display.c
290
} else if (id.cpu.cpu > -1) {
tools/perf/util/stat-display.c
292
aggr_header_lens[AGGR_NONE] - 3, id.cpu.cpu);
tools/perf/util/stat-display.c
345
} else if (id.cpu.cpu > -1) {
tools/perf/util/stat-display.c
347
id.cpu.cpu, sep);
tools/perf/util/stat-display.c
396
} else if (id.cpu.cpu > -1) {
tools/perf/util/stat-display.c
398
id.cpu.cpu);
tools/perf/util/stat-display.c
899
struct perf_cpu cpu;
tools/perf/util/stat-display.c
933
config->aggr_get_id(config, (struct perf_cpu){ .cpu = 0 });
tools/perf/util/stat-display.c
942
perf_cpu_map__for_each_cpu(cpu, idx, counter->core.cpus) {
tools/perf/util/stat-display.c
943
struct aggr_cpu_id own_id = config->aggr_get_id(config, cpu);
tools/perf/util/stat-shadow.c
34
if (config->aggr_map->map[aggr_idx].cpu.cpu == 0) {
tools/perf/util/stat.c
309
struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
tools/perf/util/stat.c
340
s = cpu__get_socket_id(cpu);
tools/perf/util/stat.c
348
d = cpu__get_die_id(cpu);
tools/perf/util/stat.c
425
struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
tools/perf/util/stat.c
426
struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
tools/perf/util/stat.c
582
struct perf_cpu cpu;
tools/perf/util/stat.c
586
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
tools/perf/util/stat.c
589
id = aggr_cpu_id__core(cpu, NULL);
tools/perf/util/stat.c
599
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
tools/perf/util/stat.c
602
id = aggr_cpu_id__core(cpu, NULL);
tools/perf/util/stat.c
619
struct perf_cpu cpu;
tools/perf/util/stat.c
625
perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
tools/perf/util/stat.c
631
core_id = aggr_cpu_id__core(cpu, NULL);
tools/perf/util/stat.c
666
cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
tools/perf/util/stat.c
668
pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
tools/perf/util/stat.c
674
st->cpu, st->thread, evsel__name(counter));
tools/perf/util/stat.c
688
st->id, st->cpu, st->thread);
tools/perf/util/svghelper.c
221
void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
tools/perf/util/svghelper.c
227
fprintf(svgfile, "<title>#%d blocked %s</title>\n", cpu,
tools/perf/util/svghelper.c
235
void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
tools/perf/util/svghelper.c
250
cpu, time_to_string(end - start));
tools/perf/util/svghelper.c
258
if (cpu > 9)
tools/perf/util/svghelper.c
266
time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1);
tools/perf/util/svghelper.c
289
void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
tools/perf/util/svghelper.c
313
fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start));
tools/perf/util/svghelper.c
357
void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
tools/perf/util/svghelper.c
371
cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
tools/perf/util/svghelper.c
373
sprintf(cpu_string, "CPU %i", (int)cpu);
tools/perf/util/svghelper.c
375
10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
tools/perf/util/svghelper.c
378
10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
tools/perf/util/svghelper.c
383
void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace)
tools/perf/util/svghelper.c
398
fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), cpu2y(cpu));
tools/perf/util/svghelper.c
417
void svg_cstate(int cpu, u64 start, u64 end, int type)
tools/perf/util/svghelper.c
435
cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
tools/perf/util/svghelper.c
44
static double cpu2slot(int cpu)
tools/perf/util/svghelper.c
445
time2pixels(start), cpu2y(cpu)+width, width, type);
tools/perf/util/svghelper.c
46
return 2 * cpu + 1;
tools/perf/util/svghelper.c
474
void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
tools/perf/util/svghelper.c
485
height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
tools/perf/util/svghelper.c
51
static double cpu2y(int cpu)
tools/perf/util/svghelper.c
54
return cpu2slot(topology_map[cpu]) * SLOT_MULT;
tools/perf/util/svghelper.c
56
return cpu2slot(cpu) * SLOT_MULT;
tools/perf/util/svghelper.c
700
static void scan_thread_topology(int *map, struct topology *t, int cpu,
tools/perf/util/svghelper.c
707
if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i])))
tools/perf/util/svghelper.c
720
int cpu;
tools/perf/util/svghelper.c
723
for_each_set_bit(cpu, cpumask_bits(&t->sib_core[i]), nr_cpus)
tools/perf/util/svghelper.c
724
scan_thread_topology(map, t, cpu, &pos, nr_cpus);
tools/perf/util/svghelper.c
731
struct perf_cpu cpu;
tools/perf/util/svghelper.c
737
perf_cpu_map__for_each_cpu(cpu, idx, map) {
tools/perf/util/svghelper.c
738
if (cpu.cpu >= nr_cpus) {
tools/perf/util/svghelper.c
743
__set_bit(cpu.cpu, cpumask_bits(b));
tools/perf/util/svghelper.h
14
void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
tools/perf/util/svghelper.h
15
void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
tools/perf/util/svghelper.h
16
void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
tools/perf/util/svghelper.h
17
void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency);
tools/perf/util/svghelper.h
20
void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace);
tools/perf/util/svghelper.h
21
void svg_cstate(int cpu, u64 start, u64 end, int type);
tools/perf/util/svghelper.h
22
void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
tools/perf/util/symbol.c
2365
struct perf_cpu cpu;
tools/perf/util/symbol.c
2378
perf_cpu_map__for_each_cpu(cpu, i, map) {
tools/perf/util/symbol.c
2379
if (cpu.cpu <= 0 || cpu.cpu > MAX_NR_CPUS) {
tools/perf/util/symbol.c
2380
pr_err("Requested parallelism level %d is invalid.\n", cpu.cpu);
tools/perf/util/symbol.c
2383
__clear_bit(cpu.cpu, symbol_conf.parallelism_filter);
tools/perf/util/synthetic-events.c
1264
data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
tools/perf/util/synthetic-events.c
1270
struct perf_cpu cpu;
tools/perf/util/synthetic-events.c
1277
perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
tools/perf/util/synthetic-events.c
1278
int bit_word = cpu.cpu / 32;
tools/perf/util/synthetic-events.c
1279
u32 bit_mask = 1U << (cpu.cpu & 31);
tools/perf/util/synthetic-events.c
1299
syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
tools/perf/util/synthetic-events.c
1301
syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
tools/perf/util/synthetic-events.c
1302
syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
tools/perf/util/synthetic-events.c
1420
struct perf_cpu cpu, u32 thread, u64 id,
tools/perf/util/synthetic-events.c
1432
event.cpu = cpu.cpu;
tools/perf/util/synthetic-events.c
1658
u.val32[0] = sample->cpu;
tools/perf/util/synthetic-events.c
1838
u.val32[0] = sample->cpu;
tools/perf/util/synthetic-events.c
1921
e->cpu = sid->cpu.cpu;
tools/perf/util/synthetic-events.c
1929
e2[i].vcpu = sid->vcpu.cpu;
tools/perf/util/synthetic-events.c
2539
__u64 *cpu, __u64 timestamp)
tools/perf/util/synthetic-events.c
2561
if (io__get_dec(io, (__u64 *)cpu) != ' ')
tools/perf/util/synthetic-events.c
2582
cs->cpu = *cpu;
tools/perf/util/synthetic-events.c
2592
__u64 cpu, __u64 timestamp)
tools/perf/util/synthetic-events.c
2644
ds->cpu = cpu;
tools/perf/util/synthetic-events.c
2663
__u64 cpu = -1;
tools/perf/util/synthetic-events.c
2704
&cpu, timestamp);
tools/perf/util/synthetic-events.c
2707
cpu, timestamp);
tools/perf/util/synthetic-events.c
2712
this_cpu.cpu = cpu;
tools/perf/util/synthetic-events.c
61
.cpu = -1,
tools/perf/util/synthetic-events.h
88
int perf_event__synthesize_stat(const struct perf_tool *tool, struct perf_cpu cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
tools/perf/util/thread-stack.c
1107
struct thread_stack *ts = thread__stack(thread, sample->cpu);
tools/perf/util/thread-stack.c
1118
ts = thread_stack__new(thread, sample->cpu, crp, true, 0);
tools/perf/util/thread-stack.c
1232
size_t thread_stack__depth(struct thread *thread, int cpu)
tools/perf/util/thread-stack.c
1234
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
173
static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
182
if (thread_stack__per_cpu(thread) && cpu > 0)
tools/perf/util/thread-stack.c
183
new_sz = roundup_pow_of_two(cpu + 1);
tools/perf/util/thread-stack.c
197
if (thread_stack__per_cpu(thread) && cpu > 0 &&
tools/perf/util/thread-stack.c
198
(unsigned int)cpu < ts->arr_sz)
tools/perf/util/thread-stack.c
199
ts += cpu;
tools/perf/util/thread-stack.c
208
static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
tools/perf/util/thread-stack.c
212
if (cpu < 0)
tools/perf/util/thread-stack.c
213
cpu = 0;
tools/perf/util/thread-stack.c
215
if (!ts || (unsigned int)cpu >= ts->arr_sz)
tools/perf/util/thread-stack.c
218
ts += cpu;
tools/perf/util/thread-stack.c
227
int cpu)
tools/perf/util/thread-stack.c
233
return thread__cpu_stack(thread, cpu);
tools/perf/util/thread-stack.c
406
int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
tools/perf/util/thread-stack.c
410
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
416
ts = thread_stack__new(thread, cpu, NULL, callstack, br_stack_sz);
tools/perf/util/thread-stack.c
473
void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
tools/perf/util/thread-stack.c
475
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
521
void thread_stack__sample(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
525
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
564
void thread_stack__sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
568
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
617
void thread_stack__br_sample(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
620
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
678
void thread_stack__br_sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
682
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.h
82
int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
tools/perf/util/thread-stack.h
85
void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
tools/perf/util/thread-stack.h
86
void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
tools/perf/util/thread-stack.h
88
void thread_stack__sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.h
91
void thread_stack__br_sample(struct thread *thread, int cpu,
tools/perf/util/thread-stack.h
93
void thread_stack__br_sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.h
98
size_t thread_stack__depth(struct thread *thread, int cpu);
tools/perf/util/thread.h
180
return RC_CHK_ACCESS(thread)->cpu;
tools/perf/util/thread.h
183
static inline void thread__set_cpu(struct thread *thread, int cpu)
tools/perf/util/thread.h
185
RC_CHK_ACCESS(thread)->cpu = cpu;
tools/perf/util/thread.h
40
int cpu;
tools/perf/util/tool_pmu.c
124
static int read_stat_field(int fd, struct perf_cpu cpu, int field, __u64 *val)
tools/perf/util/tool_pmu.c
133
for (i = -1; i < cpu.cpu; i++) {
tools/perf/util/tool_pmu.c
277
struct perf_cpu cpu;
tools/perf/util/tool_pmu.c
279
cpu = perf_cpu_map__cpu(evsel->core.cpus, idx);
tools/perf/util/tool_pmu.c
280
err = read_stat_field(fd, cpu, system ? 3 : 1,
tools/perf/util/tool_pmu.c
356
*result = cpu__max_present_cpu().cpu;
tools/perf/util/tool_pmu.c
369
*result = cpu__max_present_cpu().cpu;
tools/perf/util/tool_pmu.c
537
struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus,
tools/perf/util/tool_pmu.c
540
err = read_stat_field(fd, cpu, system ? 3 : 1, &cur_time);
tools/perf/util/trace-event-parse.c
103
int cpu, void *data, int size, FILE *fp)
tools/perf/util/trace-event-parse.c
109
record.cpu = cpu;
tools/perf/util/trace-event.h
43
int cpu, void *data, int size, FILE *fp);
tools/perf/util/util.c
520
unsigned int cpu;
tools/perf/util/util.c
521
int err = syscall(__NR_getcpu, &cpu, NULL, NULL);
tools/perf/util/util.c
524
return cpu;
tools/power/cpupower/bench/benchmark.c
103
if (set_cpufreq_governor("performance", config->cpu) != 0)
tools/power/cpupower/bench/benchmark.c
146
if (set_cpufreq_governor(config->governor, config->cpu) != 0)
tools/power/cpupower/bench/main.c
101
sscanf(optarg, "%u", &config->cpu);
tools/power/cpupower/bench/main.c
172
config->cpu,
tools/power/cpupower/bench/parse.c
137
config->cpu = 0;
tools/power/cpupower/bench/parse.c
213
sscanf(val, "%u", &config->cpu);
tools/power/cpupower/bench/parse.h
18
unsigned int cpu; /* cpu for which the affinity is set */
tools/power/cpupower/bench/system.c
155
printf("set cpu affinity to cpu #%u\n", config->cpu);
tools/power/cpupower/bench/system.c
157
set_cpu_affinity(config->cpu);
tools/power/cpupower/bench/system.c
46
int set_cpufreq_governor(char *governor, unsigned int cpu)
tools/power/cpupower/bench/system.c
51
if (cpupower_is_cpu_online(cpu) != 1) {
tools/power/cpupower/bench/system.c
53
fprintf(stderr, "error: cpu %u does not exist\n", cpu);
tools/power/cpupower/bench/system.c
57
if (cpufreq_modify_policy_governor(cpu, governor) != 0) {
tools/power/cpupower/bench/system.c
75
int set_cpu_affinity(unsigned int cpu)
tools/power/cpupower/bench/system.c
80
CPU_SET(cpu, &cpuset);
tools/power/cpupower/bench/system.c
82
dprintf("set affinity to cpu #%u\n", cpu);
tools/power/cpupower/bench/system.h
11
int set_cpufreq_governor(char *governor, unsigned int cpu);
tools/power/cpupower/bench/system.h
12
int set_cpu_affinity(unsigned int cpu);
tools/power/cpupower/debug/i386/centrino-decode.c
101
cpu = strtoul(argv[1], NULL, 0);
tools/power/cpupower/debug/i386/centrino-decode.c
102
if (cpu >= MCPU)
tools/power/cpupower/debug/i386/centrino-decode.c
107
decode(cpu);
tools/power/cpupower/debug/i386/centrino-decode.c
109
decode_live(cpu);
tools/power/cpupower/debug/i386/centrino-decode.c
28
static int rdmsr(unsigned int cpu, unsigned int msr,
tools/power/cpupower/debug/i386/centrino-decode.c
38
if (cpu > MCPU)
tools/power/cpupower/debug/i386/centrino-decode.c
41
sprintf(file, "/dev/cpu/%d/msr", cpu);
tools/power/cpupower/debug/i386/centrino-decode.c
75
static int decode_live(unsigned int cpu)
tools/power/cpupower/debug/i386/centrino-decode.c
80
err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi);
tools/power/cpupower/debug/i386/centrino-decode.c
83
printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu);
tools/power/cpupower/debug/i386/centrino-decode.c
96
unsigned int cpu, mode = 0;
tools/power/cpupower/debug/i386/centrino-decode.c
99
cpu = 0;
tools/power/cpupower/debug/i386/powernow-k8-decode.c
27
static int get_fidvid(uint32_t cpu, uint32_t *fid, uint32_t *vid)
tools/power/cpupower/debug/i386/powernow-k8-decode.c
34
if (cpu > MCPU)
tools/power/cpupower/debug/i386/powernow-k8-decode.c
37
sprintf(file, "/dev/cpu/%d/msr", cpu);
tools/power/cpupower/debug/i386/powernow-k8-decode.c
71
int cpu;
tools/power/cpupower/debug/i386/powernow-k8-decode.c
75
cpu = 0;
tools/power/cpupower/debug/i386/powernow-k8-decode.c
77
cpu = strtoul(argv[1], NULL, 0);
tools/power/cpupower/debug/i386/powernow-k8-decode.c
79
err = get_fidvid(cpu, &fid, &vid);
tools/power/cpupower/debug/i386/powernow-k8-decode.c
90
cpu,
tools/power/cpupower/lib/acpi_cppc.c
17
static int acpi_cppc_read_file(unsigned int cpu, const char *fname,
tools/power/cpupower/lib/acpi_cppc.c
23
cpu, fname);
tools/power/cpupower/lib/acpi_cppc.c
38
unsigned long acpi_cppc_get_data(unsigned int cpu, enum acpi_cppc_value which)
tools/power/cpupower/lib/acpi_cppc.c
48
len = acpi_cppc_read_file(cpu, acpi_cppc_value_files[which],
tools/power/cpupower/lib/acpi_cppc.h
18
unsigned long acpi_cppc_get_data(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
117
static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
120
return cpufreq_get_sysfs_value_from_table(cpu, cpufreq_value_files,
tools/power/cpupower/lib/cpufreq.c
141
static char *sysfs_cpufreq_get_one_string(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
151
len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which],
tools/power/cpupower/lib/cpufreq.c
183
static int sysfs_cpufreq_write_one_value(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
190
if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which],
tools/power/cpupower/lib/cpufreq.c
197
unsigned long cpufreq_get_freq_kernel(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
199
return sysfs_cpufreq_get_one_value(cpu, SCALING_CUR_FREQ);
tools/power/cpupower/lib/cpufreq.c
202
unsigned long cpufreq_get_freq_hardware(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
204
return sysfs_cpufreq_get_one_value(cpu, CPUINFO_CUR_FREQ);
tools/power/cpupower/lib/cpufreq.c
207
unsigned long cpufreq_get_transition_latency(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
209
return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY);
tools/power/cpupower/lib/cpufreq.c
212
char *cpufreq_get_energy_performance_preference(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
214
return sysfs_cpufreq_get_one_string(cpu, ENERGY_PERFORMANCE_PREFERENCE);
tools/power/cpupower/lib/cpufreq.c
224
int cpufreq_get_hardware_limits(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
23
static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
tools/power/cpupower/lib/cpufreq.c
231
*min = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MIN_FREQ);
tools/power/cpupower/lib/cpufreq.c
235
*max = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MAX_FREQ);
tools/power/cpupower/lib/cpufreq.c
242
char *cpufreq_get_driver(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
244
return sysfs_cpufreq_get_one_string(cpu, SCALING_DRIVER);
tools/power/cpupower/lib/cpufreq.c
254
struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
262
policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR);
tools/power/cpupower/lib/cpufreq.c
267
policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
tools/power/cpupower/lib/cpufreq.c
268
policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ);
tools/power/cpupower/lib/cpufreq.c
289
int cpu)
tools/power/cpupower/lib/cpufreq.c
29
cpu, fname);
tools/power/cpupower/lib/cpufreq.c
297
len = sysfs_cpufreq_read_file(cpu, "scaling_available_governors",
tools/power/cpupower/lib/cpufreq.c
35
static unsigned int sysfs_cpufreq_write_file(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
363
*cpufreq_get_available_frequencies(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
372
len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
tools/power/cpupower/lib/cpufreq.c
419
*cpufreq_get_boost_frequencies(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
428
len = sysfs_cpufreq_read_file(cpu, "scaling_boost_frequencies",
tools/power/cpupower/lib/cpufreq.c
44
cpu, fname);
tools/power/cpupower/lib/cpufreq.c
494
static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
504
len = sysfs_cpufreq_read_file(cpu, file, linebuf, sizeof(linebuf));
tools/power/cpupower/lib/cpufreq.c
532
if (sscanf(one_value, "%u", ¤t->cpu) != 1)
tools/power/cpupower/lib/cpufreq.c
550
struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
552
return sysfs_get_cpu_list(cpu, "affected_cpus");
tools/power/cpupower/lib/cpufreq.c
571
struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
573
return sysfs_get_cpu_list(cpu, "related_cpus");
tools/power/cpupower/lib/cpufreq.c
616
int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy)
tools/power/cpupower/lib/cpufreq.c
637
old_min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
tools/power/cpupower/lib/cpufreq.c
641
ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
tools/power/cpupower/lib/cpufreq.c
647
ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, min,
tools/power/cpupower/lib/cpufreq.c
653
ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
tools/power/cpupower/lib/cpufreq.c
659
return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
tools/power/cpupower/lib/cpufreq.c
664
int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq)
tools/power/cpupower/lib/cpufreq.c
670
return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ,
tools/power/cpupower/lib/cpufreq.c
675
int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq)
tools/power/cpupower/lib/cpufreq.c
681
return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
tools/power/cpupower/lib/cpufreq.c
685
int cpufreq_modify_policy_governor(unsigned int cpu, char *governor)
tools/power/cpupower/lib/cpufreq.c
695
return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
tools/power/cpupower/lib/cpufreq.c
699
int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency)
tools/power/cpupower/lib/cpufreq.c
701
struct cpufreq_policy *pol = cpufreq_get_policy(cpu);
tools/power/cpupower/lib/cpufreq.c
710
ret = cpufreq_modify_policy_governor(cpu, userspace_gov);
tools/power/cpupower/lib/cpufreq.c
721
return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_SET_SPEED,
tools/power/cpupower/lib/cpufreq.c
725
struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
735
len = sysfs_cpufreq_read_file(cpu, "stats/time_in_state",
tools/power/cpupower/lib/cpufreq.c
800
unsigned long cpufreq_get_transitions(unsigned int cpu)
tools/power/cpupower/lib/cpufreq.c
802
return sysfs_cpufreq_get_one_value(cpu, STATS_NUM_TRANSITIONS);
tools/power/cpupower/lib/cpufreq.c
86
unsigned long cpufreq_get_sysfs_value_from_table(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.c
99
len = sysfs_cpufreq_read_file(cpu, table[index], linebuf,
tools/power/cpupower/lib/cpufreq.h
108
struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
122
*cpufreq_get_available_governors(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
136
*cpufreq_get_available_frequencies(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
142
*cpufreq_get_boost_frequencies(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
155
int cpu);
tools/power/cpupower/lib/cpufreq.h
167
int cpu);
tools/power/cpupower/lib/cpufreq.h
177
struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.h
182
unsigned long cpufreq_get_transitions(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
191
int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy);
tools/power/cpupower/lib/cpufreq.h
199
int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq);
tools/power/cpupower/lib/cpufreq.h
200
int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq);
tools/power/cpupower/lib/cpufreq.h
201
int cpufreq_modify_policy_governor(unsigned int cpu, char *governor);
tools/power/cpupower/lib/cpufreq.h
211
int cpufreq_set_frequency(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.h
221
unsigned long cpufreq_get_sysfs_value_from_table(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.h
31
unsigned int cpu;
tools/power/cpupower/lib/cpufreq.h
57
unsigned long cpufreq_get_freq_kernel(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
59
unsigned long cpufreq_get_freq_hardware(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
61
#define cpufreq_get(cpu) cpufreq_get_freq_kernel(cpu);
tools/power/cpupower/lib/cpufreq.h
68
unsigned long cpufreq_get_transition_latency(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
76
char *cpufreq_get_energy_performance_preference(unsigned int cpu);
tools/power/cpupower/lib/cpufreq.h
85
int cpufreq_get_hardware_limits(unsigned int cpu,
tools/power/cpupower/lib/cpufreq.h
96
char *cpufreq_get_driver(unsigned int cpu);
tools/power/cpupower/lib/cpuidle.c
135
unsigned long long cpuidle_state_get_one_value(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
147
len = cpuidle_state_read_file(cpu, idlestate,
tools/power/cpupower/lib/cpuidle.c
176
static char *cpuidle_state_get_one_string(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
187
len = cpuidle_state_read_file(cpu, idlestate,
tools/power/cpupower/lib/cpuidle.c
209
int cpuidle_is_state_disabled(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
212
if (cpuidle_state_count(cpu) <= idlestate)
tools/power/cpupower/lib/cpuidle.c
215
if (!cpuidle_state_file_exists(cpu, idlestate,
tools/power/cpupower/lib/cpuidle.c
218
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_DISABLE);
tools/power/cpupower/lib/cpuidle.c
230
int cpuidle_state_disable(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
238
if (cpuidle_state_count(cpu) <= idlestate)
tools/power/cpupower/lib/cpuidle.c
241
if (!cpuidle_state_file_exists(cpu, idlestate,
tools/power/cpupower/lib/cpuidle.c
247
bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable",
tools/power/cpupower/lib/cpuidle.c
254
unsigned long cpuidle_state_latency(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
257
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
tools/power/cpupower/lib/cpuidle.c
260
unsigned long cpuidle_state_residency(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
263
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_RESIDENCY);
tools/power/cpupower/lib/cpuidle.c
266
unsigned long cpuidle_state_usage(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
269
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
tools/power/cpupower/lib/cpuidle.c
272
unsigned long long cpuidle_state_time(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
275
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_TIME);
tools/power/cpupower/lib/cpuidle.c
278
char *cpuidle_state_name(unsigned int cpu, unsigned int idlestate)
tools/power/cpupower/lib/cpuidle.c
280
return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_NAME);
tools/power/cpupower/lib/cpuidle.c
283
char *cpuidle_state_desc(unsigned int cpu, unsigned int idlestate)
tools/power/cpupower/lib/cpuidle.c
285
return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_DESC);
tools/power/cpupower/lib/cpuidle.c
29
unsigned int cpuidle_state_file_exists(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
293
unsigned int cpuidle_state_count(unsigned int cpu)
tools/power/cpupower/lib/cpuidle.c
304
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
tools/power/cpupower/lib/cpuidle.c
310
"cpu%u/cpuidle/state%d", cpu, idlestates);
tools/power/cpupower/lib/cpuidle.c
38
cpu, idlestate, fname);
tools/power/cpupower/lib/cpuidle.c
51
unsigned int cpuidle_state_read_file(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
61
cpu, idlestate, fname);
tools/power/cpupower/lib/cpuidle.c
86
unsigned int cpuidle_state_write_file(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.c
96
cpu, idlestate, fname);
tools/power/cpupower/lib/cpuidle.h
11
unsigned long cpuidle_state_residency(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.h
13
unsigned long cpuidle_state_usage(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.h
15
unsigned long long cpuidle_state_time(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.h
17
char *cpuidle_state_name(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.h
19
char *cpuidle_state_desc(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.h
21
unsigned int cpuidle_state_count(unsigned int cpu);
tools/power/cpupower/lib/cpuidle.h
5
int cpuidle_is_state_disabled(unsigned int cpu,
tools/power/cpupower/lib/cpuidle.h
7
int cpuidle_state_disable(unsigned int cpu, unsigned int idlestate,
tools/power/cpupower/lib/cpuidle.h
9
unsigned long cpuidle_state_latency(unsigned int cpu,
tools/power/cpupower/lib/cpupower.c
118
static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result)
tools/power/cpupower/lib/cpupower.c
125
cpu, fname);
tools/power/cpupower/lib/cpupower.c
146
else if (top1->cpu < top2->cpu)
tools/power/cpupower/lib/cpupower.c
148
else if (top1->cpu > top2->cpu)
tools/power/cpupower/lib/cpupower.c
170
int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF);
tools/power/cpupower/lib/cpupower.c
178
for (cpu = 0; cpu < cpus; cpu++) {
tools/power/cpupower/lib/cpupower.c
179
cpu_top->core_info[cpu].cpu = cpu;
tools/power/cpupower/lib/cpupower.c
180
cpu_top->core_info[cpu].is_online = cpupower_is_cpu_online(cpu);
tools/power/cpupower/lib/cpupower.c
182
cpu,
tools/power/cpupower/lib/cpupower.c
184
&(cpu_top->core_info[cpu].pkg)) < 0) {
tools/power/cpupower/lib/cpupower.c
185
cpu_top->core_info[cpu].pkg = -1;
tools/power/cpupower/lib/cpupower.c
186
cpu_top->core_info[cpu].core = -1;
tools/power/cpupower/lib/cpupower.c
190
cpu,
tools/power/cpupower/lib/cpupower.c
192
&(cpu_top->core_info[cpu].core)) < 0) {
tools/power/cpupower/lib/cpupower.c
193
cpu_top->core_info[cpu].pkg = -1;
tools/power/cpupower/lib/cpupower.c
194
cpu_top->core_info[cpu].core = -1;
tools/power/cpupower/lib/cpupower.c
197
if (cpu_top->core_info[cpu].core == -1) {
tools/power/cpupower/lib/cpupower.c
198
strncpy(cpu_top->core_info[cpu].core_cpu_list, "-1", CPULIST_BUFFER);
tools/power/cpupower/lib/cpupower.c
202
cpu, "core_cpus_list");
tools/power/cpupower/lib/cpupower.c
205
cpu_top->core_info[cpu].core_cpu_list,
tools/power/cpupower/lib/cpupower.c
207
printf("Warning CPU%u has a 0 size core_cpus_list string", cpu);
tools/power/cpupower/lib/cpupower.c
219
for (cpu = 1; cpu < cpus; cpu++) {
tools/power/cpupower/lib/cpupower.c
220
if (strcmp(cpu_top->core_info[cpu].core_cpu_list, last_cpu_list) != 0 &&
tools/power/cpupower/lib/cpupower.c
221
cpu_top->core_info[cpu].pkg != -1) {
tools/power/cpupower/lib/cpupower.c
222
last_cpu_list = cpu_top->core_info[cpu].core_cpu_list;
tools/power/cpupower/lib/cpupower.c
234
for(cpu = 1; cpu < cpus; cpu++) {
tools/power/cpupower/lib/cpupower.c
235
if (cpu_top->core_info[cpu].pkg != last_pkg &&
tools/power/cpupower/lib/cpupower.c
236
cpu_top->core_info[cpu].pkg != -1) {
tools/power/cpupower/lib/cpupower.c
238
last_pkg = cpu_top->core_info[cpu].pkg;
tools/power/cpupower/lib/cpupower.c
75
int cpupower_is_cpu_online(unsigned int cpu)
tools/power/cpupower/lib/cpupower.c
85
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu);
tools/power/cpupower/lib/cpupower.c
94
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu);
tools/power/cpupower/lib/cpupower.h
20
int cpu;
tools/power/cpupower/lib/cpupower.h
33
int cpupower_is_cpu_online(unsigned int cpu);
tools/power/cpupower/utils/cpufreq-info.c
125
static int get_boost_mode_x86(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
131
ret = cpufreq_has_x86_boost_support(cpu, &support, &active, &b_states);
tools/power/cpupower/utils/cpufreq-info.c
134
" on CPU %d -- are you root?\n"), cpu);
tools/power/cpupower/utils/cpufreq-info.c
154
ret = decode_pstates(cpu, b_states, pstates, &pstate_no);
tools/power/cpupower/utils/cpufreq-info.c
180
intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu);
tools/power/cpupower/utils/cpufreq-info.c
207
static int get_boost_mode_generic(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
221
static int get_boost_mode(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
228
return get_boost_mode_x86(cpu);
tools/power/cpupower/utils/cpufreq-info.c
230
get_boost_mode_generic(cpu);
tools/power/cpupower/utils/cpufreq-info.c
232
freqs = cpufreq_get_boost_frequencies(cpu);
tools/power/cpupower/utils/cpufreq-info.c
250
static int get_freq_kernel(unsigned int cpu, unsigned int human)
tools/power/cpupower/utils/cpufreq-info.c
252
unsigned long freq = cpufreq_get_freq_kernel(cpu);
tools/power/cpupower/utils/cpufreq-info.c
269
static int get_freq_hardware(unsigned int cpu, unsigned int human)
tools/power/cpupower/utils/cpufreq-info.c
276
freq = cpufreq_get_freq_hardware(cpu);
tools/power/cpupower/utils/cpufreq-info.c
292
static int get_hardware_limits(unsigned int cpu, unsigned int human)
tools/power/cpupower/utils/cpufreq-info.c
296
if (cpufreq_get_hardware_limits(cpu, &min, &max)) {
tools/power/cpupower/utils/cpufreq-info.c
315
static int get_driver(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
317
char *driver = cpufreq_get_driver(cpu);
tools/power/cpupower/utils/cpufreq-info.c
329
static int get_policy(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
331
struct cpufreq_policy *policy = cpufreq_get_policy(cpu);
tools/power/cpupower/utils/cpufreq-info.c
351
static int get_available_governors(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
354
cpufreq_get_available_governors(cpu);
tools/power/cpupower/utils/cpufreq-info.c
374
static int get_affected_cpus(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
376
struct cpufreq_affected_cpus *cpus = cpufreq_get_affected_cpus(cpu);
tools/power/cpupower/utils/cpufreq-info.c
385
printf("%d ", cpus->cpu);
tools/power/cpupower/utils/cpufreq-info.c
388
printf("%d\n", cpus->cpu);
tools/power/cpupower/utils/cpufreq-info.c
395
static int get_related_cpus(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
397
struct cpufreq_affected_cpus *cpus = cpufreq_get_related_cpus(cpu);
tools/power/cpupower/utils/cpufreq-info.c
406
printf("%d ", cpus->cpu);
tools/power/cpupower/utils/cpufreq-info.c
409
printf("%d\n", cpus->cpu);
tools/power/cpupower/utils/cpufreq-info.c
416
static int get_freq_stats(unsigned int cpu, unsigned int human)
tools/power/cpupower/utils/cpufreq-info.c
418
unsigned long total_trans = cpufreq_get_transitions(cpu);
tools/power/cpupower/utils/cpufreq-info.c
420
struct cpufreq_stats *stats = cpufreq_get_stats(cpu, &total_time);
tools/power/cpupower/utils/cpufreq-info.c
441
static int get_epp(unsigned int cpu, bool interactive)
tools/power/cpupower/utils/cpufreq-info.c
445
epp = cpufreq_get_energy_performance_preference(cpu);
tools/power/cpupower/utils/cpufreq-info.c
458
static int get_latency(unsigned int cpu, unsigned int human)
tools/power/cpupower/utils/cpufreq-info.c
460
unsigned long latency = cpufreq_get_transition_latency(cpu);
tools/power/cpupower/utils/cpufreq-info.c
462
if (!get_epp(cpu, false))
tools/power/cpupower/utils/cpufreq-info.c
481
static int get_perf_cap(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
485
amd_pstate_show_perf_and_freq(cpu, no_rounding);
tools/power/cpupower/utils/cpufreq-info.c
490
static void debug_output_one(unsigned int cpu)
tools/power/cpupower/utils/cpufreq-info.c
494
get_driver(cpu);
tools/power/cpupower/utils/cpufreq-info.c
495
get_related_cpus(cpu);
tools/power/cpupower/utils/cpufreq-info.c
496
get_affected_cpus(cpu);
tools/power/cpupower/utils/cpufreq-info.c
497
get_latency(cpu, 1);
tools/power/cpupower/utils/cpufreq-info.c
498
get_epp(cpu, true);
tools/power/cpupower/utils/cpufreq-info.c
499
get_hardware_limits(cpu, 1);
tools/power/cpupower/utils/cpufreq-info.c
501
freqs = cpufreq_get_available_frequencies(cpu);
tools/power/cpupower/utils/cpufreq-info.c
514
get_available_governors(cpu);
tools/power/cpupower/utils/cpufreq-info.c
515
get_policy(cpu);
tools/power/cpupower/utils/cpufreq-info.c
516
if (get_freq_hardware(cpu, 1) < 0)
tools/power/cpupower/utils/cpufreq-info.c
517
get_freq_kernel(cpu, 1);
tools/power/cpupower/utils/cpufreq-info.c
518
get_boost_mode(cpu);
tools/power/cpupower/utils/cpufreq-info.c
519
get_perf_cap(cpu);
tools/power/cpupower/utils/cpufreq-info.c
548
unsigned int cpu = 0;
tools/power/cpupower/utils/cpufreq-info.c
58
unsigned int cpu, nr_cpus;
tools/power/cpupower/utils/cpufreq-info.c
633
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/cpufreq-info.c
634
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/cpufreq-info.c
636
if (!bitmask_isbitset(cpus_chosen, cpu))
tools/power/cpupower/utils/cpufreq-info.c
639
printf(_("analyzing CPU %d:\n"), cpu);
tools/power/cpupower/utils/cpufreq-info.c
641
if (sysfs_is_cpu_online(cpu) != 1) {
tools/power/cpupower/utils/cpufreq-info.c
649
get_boost_mode(cpu);
tools/power/cpupower/utils/cpufreq-info.c
652
debug_output_one(cpu);
tools/power/cpupower/utils/cpufreq-info.c
655
ret = get_affected_cpus(cpu);
tools/power/cpupower/utils/cpufreq-info.c
658
ret = get_related_cpus(cpu);
tools/power/cpupower/utils/cpufreq-info.c
661
ret = get_available_governors(cpu);
tools/power/cpupower/utils/cpufreq-info.c
664
ret = get_policy(cpu);
tools/power/cpupower/utils/cpufreq-info.c
667
ret = get_driver(cpu);
tools/power/cpupower/utils/cpufreq-info.c
67
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/power/cpupower/utils/cpufreq-info.c
670
ret = get_hardware_limits(cpu, human);
tools/power/cpupower/utils/cpufreq-info.c
673
ret = get_freq_hardware(cpu, human);
tools/power/cpupower/utils/cpufreq-info.c
676
ret = get_freq_kernel(cpu, human);
tools/power/cpupower/utils/cpufreq-info.c
679
ret = get_freq_stats(cpu, human);
tools/power/cpupower/utils/cpufreq-info.c
68
policy = cpufreq_get_policy(cpu);
tools/power/cpupower/utils/cpufreq-info.c
682
ret = get_latency(cpu, human);
tools/power/cpupower/utils/cpufreq-info.c
685
ret = get_perf_cap(cpu);
tools/power/cpupower/utils/cpufreq-info.c
688
ret = get_epp(cpu, true);
tools/power/cpupower/utils/cpufreq-info.c
72
if (cpufreq_get_hardware_limits(cpu, &min, &max)) {
tools/power/cpupower/utils/cpufreq-info.c
79
cpu , policy->min, max ? min_pctg : 0, policy->max,
tools/power/cpupower/utils/cpufreq-set.c
144
static int do_new_policy(unsigned int cpu, struct cpufreq_policy *new_pol)
tools/power/cpupower/utils/cpufreq-set.c
146
struct cpufreq_policy *cur_pol = cpufreq_get_policy(cpu);
tools/power/cpupower/utils/cpufreq-set.c
163
ret = cpufreq_set_policy(cpu, new_pol);
tools/power/cpupower/utils/cpufreq-set.c
171
static int do_one_cpu(unsigned int cpu, struct cpufreq_policy *new_pol,
tools/power/cpupower/utils/cpufreq-set.c
176
return cpufreq_set_frequency(cpu, freq);
tools/power/cpupower/utils/cpufreq-set.c
183
return cpufreq_modify_policy_min(cpu, new_pol->min);
tools/power/cpupower/utils/cpufreq-set.c
185
return cpufreq_modify_policy_max(cpu, new_pol->max);
tools/power/cpupower/utils/cpufreq-set.c
187
return cpufreq_modify_policy_governor(cpu,
tools/power/cpupower/utils/cpufreq-set.c
192
return do_new_policy(cpu, new_pol);
tools/power/cpupower/utils/cpufreq-set.c
204
unsigned int cpu;
tools/power/cpupower/utils/cpufreq-set.c
297
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/cpufreq-set.c
298
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/cpufreq-set.c
301
if (!bitmask_isbitset(cpus_chosen, cpu) ||
tools/power/cpupower/utils/cpufreq-set.c
302
cpupower_is_cpu_online(cpu) != 1)
tools/power/cpupower/utils/cpufreq-set.c
305
cpus = cpufreq_get_related_cpus(cpu);
tools/power/cpupower/utils/cpufreq-set.c
309
bitmask_setbit(cpus_chosen, cpus->cpu);
tools/power/cpupower/utils/cpufreq-set.c
313
bitmask_setbit(cpus_chosen, cpus->cpu);
tools/power/cpupower/utils/cpufreq-set.c
321
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/cpufreq-set.c
322
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/cpufreq-set.c
324
if (!bitmask_isbitset(cpus_chosen, cpu) ||
tools/power/cpupower/utils/cpufreq-set.c
325
cpupower_is_cpu_online(cpu) != 1)
tools/power/cpupower/utils/cpufreq-set.c
328
printf(_("Setting cpu: %d\n"), cpu);
tools/power/cpupower/utils/cpufreq-set.c
329
ret = do_one_cpu(cpu, &new_pol, freq, policychange);
tools/power/cpupower/utils/cpuidle-info.c
104
cstates = cpuidle_state_count(cpu);
tools/power/cpupower/utils/cpuidle-info.c
106
printf(_("CPU %u: No C-states info\n"), cpu);
tools/power/cpupower/utils/cpuidle-info.c
119
cpuidle_state_latency(cpu, cstate));
tools/power/cpupower/utils/cpuidle-info.c
121
cpuidle_state_residency(cpu, cstate));
tools/power/cpupower/utils/cpuidle-info.c
123
cpuidle_state_usage(cpu, cstate));
tools/power/cpupower/utils/cpuidle-info.c
125
cpuidle_state_time(cpu, cstate));
tools/power/cpupower/utils/cpuidle-info.c
145
unsigned int cpu = 0;
tools/power/cpupower/utils/cpuidle-info.c
190
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/cpuidle-info.c
191
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/cpuidle-info.c
193
if (!bitmask_isbitset(cpus_chosen, cpu))
tools/power/cpupower/utils/cpuidle-info.c
196
printf(_("analyzing CPU %d:\n"), cpu);
tools/power/cpupower/utils/cpuidle-info.c
198
if (sysfs_is_cpu_online(cpu) != 1) {
tools/power/cpupower/utils/cpuidle-info.c
207
proc_cpuidle_cpu_output(cpu);
tools/power/cpupower/utils/cpuidle-info.c
211
cpuidle_cpu_output(cpu, verbose);
tools/power/cpupower/utils/cpuidle-info.c
23
static void cpuidle_cpu_output(unsigned int cpu, int verbose)
tools/power/cpupower/utils/cpuidle-info.c
28
idlestates = cpuidle_state_count(cpu);
tools/power/cpupower/utils/cpuidle-info.c
30
printf(_("CPU %u: No idle states\n"), cpu);
tools/power/cpupower/utils/cpuidle-info.c
37
tmp = cpuidle_state_name(cpu, idlestate);
tools/power/cpupower/utils/cpuidle-info.c
49
int disabled = cpuidle_is_state_disabled(cpu, idlestate);
tools/power/cpupower/utils/cpuidle-info.c
53
tmp = cpuidle_state_name(cpu, idlestate);
tools/power/cpupower/utils/cpuidle-info.c
59
tmp = cpuidle_state_desc(cpu, idlestate);
tools/power/cpupower/utils/cpuidle-info.c
66
cpuidle_state_latency(cpu, idlestate));
tools/power/cpupower/utils/cpuidle-info.c
68
cpuidle_state_residency(cpu, idlestate));
tools/power/cpupower/utils/cpuidle-info.c
70
cpuidle_state_usage(cpu, idlestate));
tools/power/cpupower/utils/cpuidle-info.c
72
cpuidle_state_time(cpu, idlestate));
tools/power/cpupower/utils/cpuidle-info.c
99
static void proc_cpuidle_cpu_output(unsigned int cpu)
tools/power/cpupower/utils/cpuidle-set.c
102
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/cpuidle-set.c
103
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/cpuidle-set.c
105
if (!bitmask_isbitset(cpus_chosen, cpu))
tools/power/cpupower/utils/cpuidle-set.c
108
if (cpupower_is_cpu_online(cpu) != 1)
tools/power/cpupower/utils/cpuidle-set.c
111
idlestates = cpuidle_state_count(cpu);
tools/power/cpupower/utils/cpuidle-set.c
117
ret = cpuidle_state_disable(cpu, idlestate, 1);
tools/power/cpupower/utils/cpuidle-set.c
119
printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
122
idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
127
idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
130
ret = cpuidle_state_disable(cpu, idlestate, 0);
tools/power/cpupower/utils/cpuidle-set.c
132
printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
135
idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
140
idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
145
(cpu, idlestate);
tools/power/cpupower/utils/cpuidle-set.c
147
(cpu, idlestate);
tools/power/cpupower/utils/cpuidle-set.c
151
(cpu, idlestate, 0);
tools/power/cpupower/utils/cpuidle-set.c
153
printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
159
(cpu, idlestate, 1);
tools/power/cpupower/utils/cpuidle-set.c
161
printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
168
(cpu, idlestate);
tools/power/cpupower/utils/cpuidle-set.c
171
(cpu, idlestate, 0);
tools/power/cpupower/utils/cpuidle-set.c
173
printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu);
tools/power/cpupower/utils/cpuidle-set.c
31
unsigned int cpu = 0, idlestate = 0, idlestates = 0;
tools/power/cpupower/utils/cpupower-info.c
104
ret = cpupower_intel_get_perf_bias(cpu);
tools/power/cpupower/utils/cpupower-info.c
33
unsigned int cpu;
tools/power/cpupower/utils/cpupower-info.c
90
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/cpupower-info.c
91
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/cpupower-info.c
93
if (!bitmask_isbitset(cpus_chosen, cpu))
tools/power/cpupower/utils/cpupower-info.c
96
printf(_("analyzing CPU %d:\n"), cpu);
tools/power/cpupower/utils/cpupower-info.c
98
if (sysfs_is_cpu_online(cpu) != 1){
tools/power/cpupower/utils/cpupower-set.c
141
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/cpupower-set.c
142
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/cpupower-set.c
144
if (!bitmask_isbitset(cpus_chosen, cpu))
tools/power/cpupower/utils/cpupower-set.c
147
if (sysfs_is_cpu_online(cpu) != 1){
tools/power/cpupower/utils/cpupower-set.c
148
fprintf(stderr, _("Cannot set values on CPU %d:"), cpu);
tools/power/cpupower/utils/cpupower-set.c
154
ret = cpupower_intel_set_perf_bias(cpu, perf_bias);
tools/power/cpupower/utils/cpupower-set.c
157
"value on CPU %d\n"), cpu);
tools/power/cpupower/utils/cpupower-set.c
163
ret = cpupower_set_epp(cpu, epp);
tools/power/cpupower/utils/cpupower-set.c
166
"Error setting epp value on CPU %d\n", cpu);
tools/power/cpupower/utils/cpupower-set.c
38
unsigned int cpu;
tools/power/cpupower/utils/helpers/amd.c
112
int decode_pstates(unsigned int cpu, int boost_states,
tools/power/cpupower/utils/helpers/amd.c
125
if (read_msr(cpu, MSR_AMD_PSTATE_LIMIT, &val))
tools/power/cpupower/utils/helpers/amd.c
136
if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val))
tools/power/cpupower/utils/helpers/amd.c
193
static unsigned long amd_pstate_get_data(unsigned int cpu,
tools/power/cpupower/utils/helpers/amd.c
196
return cpufreq_get_sysfs_value_from_table(cpu,
tools/power/cpupower/utils/helpers/amd.c
202
void amd_pstate_boost_init(unsigned int cpu, int *support, int *active)
tools/power/cpupower/utils/helpers/amd.c
207
highest_perf = amd_pstate_get_data(cpu, AMD_PSTATE_HIGHEST_PERF);
tools/power/cpupower/utils/helpers/amd.c
208
nominal_perf = acpi_cppc_get_data(cpu, NOMINAL_PERF);
tools/power/cpupower/utils/helpers/amd.c
214
cpufreq_get_hardware_limits(cpu, &cpuinfo_min, &cpuinfo_max);
tools/power/cpupower/utils/helpers/amd.c
215
amd_pstate_max = amd_pstate_get_data(cpu, AMD_PSTATE_MAX_FREQ);
tools/power/cpupower/utils/helpers/amd.c
220
void amd_pstate_show_perf_and_freq(unsigned int cpu, int no_rounding)
tools/power/cpupower/utils/helpers/amd.c
225
amd_pstate_get_data(cpu, AMD_PSTATE_HIGHEST_PERF));
tools/power/cpupower/utils/helpers/amd.c
230
print_speed(amd_pstate_get_data(cpu, AMD_PSTATE_MAX_FREQ), no_rounding);
tools/power/cpupower/utils/helpers/amd.c
234
acpi_cppc_get_data(cpu, NOMINAL_PERF));
tools/power/cpupower/utils/helpers/amd.c
235
print_speed(acpi_cppc_get_data(cpu, NOMINAL_FREQ) * 1000,
tools/power/cpupower/utils/helpers/amd.c
240
acpi_cppc_get_data(cpu, LOWEST_NONLINEAR_PERF));
tools/power/cpupower/utils/helpers/amd.c
241
print_speed(amd_pstate_get_data(cpu, AMD_PSTATE_LOWEST_NONLINEAR_FREQ),
tools/power/cpupower/utils/helpers/amd.c
246
acpi_cppc_get_data(cpu, LOWEST_PERF));
tools/power/cpupower/utils/helpers/amd.c
247
print_speed(acpi_cppc_get_data(cpu, LOWEST_FREQ) * 1000, no_rounding);
tools/power/cpupower/utils/helpers/amd.c
251
amd_pstate_get_data(cpu, AMD_PSTATE_HW_PREFCORE),
tools/power/cpupower/utils/helpers/amd.c
252
amd_pstate_get_data(cpu, AMD_PSTATE_PREFCORE_RANKING));
tools/power/cpupower/utils/helpers/helpers.h
115
extern int read_msr(int cpu, unsigned int idx, unsigned long long *val);
tools/power/cpupower/utils/helpers/helpers.h
116
extern int write_msr(int cpu, unsigned int idx, unsigned long long val);
tools/power/cpupower/utils/helpers/helpers.h
118
extern int cpupower_intel_set_perf_bias(unsigned int cpu, unsigned int val);
tools/power/cpupower/utils/helpers/helpers.h
119
extern int cpupower_intel_get_perf_bias(unsigned int cpu);
tools/power/cpupower/utils/helpers/helpers.h
120
extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
tools/power/cpupower/utils/helpers/helpers.h
122
extern int cpupower_set_epp(unsigned int cpu, char *epp);
tools/power/cpupower/utils/helpers/helpers.h
139
extern int decode_pstates(unsigned int cpu, int boost_states,
tools/power/cpupower/utils/helpers/helpers.h
144
int cpufreq_has_x86_boost_support(unsigned int cpu, int *support,
tools/power/cpupower/utils/helpers/helpers.h
150
void amd_pstate_boost_init(unsigned int cpu,
tools/power/cpupower/utils/helpers/helpers.h
152
void amd_pstate_show_perf_and_freq(unsigned int cpu,
tools/power/cpupower/utils/helpers/helpers.h
168
static inline int decode_pstates(unsigned int cpu, int boost_states,
tools/power/cpupower/utils/helpers/helpers.h
172
static inline int read_msr(int cpu, unsigned int idx, unsigned long long *val)
tools/power/cpupower/utils/helpers/helpers.h
174
static inline int write_msr(int cpu, unsigned int idx, unsigned long long val)
tools/power/cpupower/utils/helpers/helpers.h
176
static inline int cpupower_intel_set_perf_bias(unsigned int cpu, unsigned int val)
tools/power/cpupower/utils/helpers/helpers.h
178
static inline int cpupower_intel_get_perf_bias(unsigned int cpu)
tools/power/cpupower/utils/helpers/helpers.h
180
static inline unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu)
tools/power/cpupower/utils/helpers/helpers.h
183
static inline int cpupower_set_epp(unsigned int cpu, char *epp)
tools/power/cpupower/utils/helpers/helpers.h
190
static inline int cpufreq_has_x86_boost_support(unsigned int cpu, int *support,
tools/power/cpupower/utils/helpers/helpers.h
198
static inline void amd_pstate_boost_init(unsigned int cpu, int *support,
tools/power/cpupower/utils/helpers/helpers.h
201
static inline void amd_pstate_show_perf_and_freq(unsigned int cpu,
tools/power/cpupower/utils/helpers/misc.c
109
int cpupower_intel_set_perf_bias(unsigned int cpu, unsigned int val)
tools/power/cpupower/utils/helpers/misc.c
117
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu);
tools/power/cpupower/utils/helpers/misc.c
126
int cpupower_set_epp(unsigned int cpu, char *epp)
tools/power/cpupower/utils/helpers/misc.c
132
PATH_TO_CPU "cpu%u/cpufreq/energy_performance_preference", cpu);
tools/power/cpupower/utils/helpers/misc.c
17
int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, int *active,
tools/power/cpupower/utils/helpers/misc.c
220
unsigned int cpu = 0;
tools/power/cpupower/utils/helpers/misc.c
225
for (cpu = bitmask_first(cpus_chosen);
tools/power/cpupower/utils/helpers/misc.c
226
cpu <= bitmask_last(cpus_chosen); cpu++) {
tools/power/cpupower/utils/helpers/misc.c
228
if (cpupower_is_cpu_online(cpu) == 1)
tools/power/cpupower/utils/helpers/misc.c
229
bitmask_setbit(online_cpus, cpu);
tools/power/cpupower/utils/helpers/misc.c
231
bitmask_setbit(offline_cpus, cpu);
tools/power/cpupower/utils/helpers/misc.c
37
if (!read_msr(cpu, MSR_AMD_HWCR, &val)) {
tools/power/cpupower/utils/helpers/misc.c
47
amd_pstate_boost_init(cpu, support, active);
tools/power/cpupower/utils/helpers/misc.c
87
int cpupower_intel_get_perf_bias(unsigned int cpu)
tools/power/cpupower/utils/helpers/misc.c
97
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu);
tools/power/cpupower/utils/helpers/msr.c
26
int read_msr(int cpu, unsigned int idx, unsigned long long *val)
tools/power/cpupower/utils/helpers/msr.c
31
sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
tools/power/cpupower/utils/helpers/msr.c
55
int write_msr(int cpu, unsigned int idx, unsigned long long val)
tools/power/cpupower/utils/helpers/msr.c
60
sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
tools/power/cpupower/utils/helpers/msr.c
75
unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu)
tools/power/cpupower/utils/helpers/msr.c
83
ret = read_msr(cpu, MSR_NEHALEM_TURBO_RATIO_LIMIT, &val);
tools/power/cpupower/utils/helpers/sysfs.c
103
unsigned int sysfs_idlestate_file_exists(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
112
cpu, idlestate, fname);
tools/power/cpupower/utils/helpers/sysfs.c
124
unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate,
tools/power/cpupower/utils/helpers/sysfs.c
132
cpu, idlestate, fname);
tools/power/cpupower/utils/helpers/sysfs.c
157
unsigned int sysfs_idlestate_write_file(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
167
cpu, idlestate, fname);
tools/power/cpupower/utils/helpers/sysfs.c
203
static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
215
len = sysfs_idlestate_read_file(cpu, idlestate,
tools/power/cpupower/utils/helpers/sysfs.c
243
static char *sysfs_idlestate_get_one_string(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
254
len = sysfs_idlestate_read_file(cpu, idlestate,
tools/power/cpupower/utils/helpers/sysfs.c
277
int sysfs_is_idlestate_disabled(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
280
if (sysfs_get_idlestate_count(cpu) <= idlestate)
tools/power/cpupower/utils/helpers/sysfs.c
283
if (!sysfs_idlestate_file_exists(cpu, idlestate,
tools/power/cpupower/utils/helpers/sysfs.c
286
return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_DISABLE);
tools/power/cpupower/utils/helpers/sysfs.c
298
int sysfs_idlestate_disable(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
305
if (sysfs_get_idlestate_count(cpu) <= idlestate)
tools/power/cpupower/utils/helpers/sysfs.c
308
if (!sysfs_idlestate_file_exists(cpu, idlestate,
tools/power/cpupower/utils/helpers/sysfs.c
314
bytes_written = sysfs_idlestate_write_file(cpu, idlestate, "disable",
tools/power/cpupower/utils/helpers/sysfs.c
321
unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
324
return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
tools/power/cpupower/utils/helpers/sysfs.c
327
unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
330
return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
tools/power/cpupower/utils/helpers/sysfs.c
333
unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.c
336
return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_TIME);
tools/power/cpupower/utils/helpers/sysfs.c
339
char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate)
tools/power/cpupower/utils/helpers/sysfs.c
341
return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_NAME);
tools/power/cpupower/utils/helpers/sysfs.c
344
char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate)
tools/power/cpupower/utils/helpers/sysfs.c
346
return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_DESC);
tools/power/cpupower/utils/helpers/sysfs.c
354
unsigned int sysfs_get_idlestate_count(unsigned int cpu)
tools/power/cpupower/utils/helpers/sysfs.c
365
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
tools/power/cpupower/utils/helpers/sysfs.c
371
"cpu%u/cpuidle/state%d", cpu, idlestates);
tools/power/cpupower/utils/helpers/sysfs.c
47
int sysfs_is_cpu_online(unsigned int cpu)
tools/power/cpupower/utils/helpers/sysfs.c
57
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu);
tools/power/cpupower/utils/helpers/sysfs.c
66
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu);
tools/power/cpupower/utils/helpers/sysfs.h
11
extern unsigned int sysfs_idlestate_file_exists(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.h
15
extern int sysfs_is_cpu_online(unsigned int cpu);
tools/power/cpupower/utils/helpers/sysfs.h
17
extern int sysfs_is_idlestate_disabled(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.h
19
extern int sysfs_idlestate_disable(unsigned int cpu, unsigned int idlestate,
tools/power/cpupower/utils/helpers/sysfs.h
21
extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.h
23
extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.h
25
extern unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.h
27
extern char *sysfs_get_idlestate_name(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.h
29
extern char *sysfs_get_idlestate_desc(unsigned int cpu,
tools/power/cpupower/utils/helpers/sysfs.h
31
extern unsigned int sysfs_get_idlestate_count(unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
124
static int amd_fam14h_init(cstate_t *state, unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
129
ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
150
(unsigned int) val, cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
154
previous_count[state->id][cpu] = 0;
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
159
static int amd_fam14h_disable(cstate_t *state, unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
164
ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
181
current_count[state->id][cpu] = val;
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
184
current_count[state->id][cpu], cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
186
previous_count[state->id][cpu], cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
196
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
208
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
215
diff = current_count[id][cpu] - previous_count[id][cpu];
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
230
int num, cpu;
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
233
for (cpu = 0; cpu < cpu_count; cpu++)
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
234
amd_fam14h_init(&amd_fam14h_cstates[num], cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
247
int num, cpu;
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
253
for (cpu = 0; cpu < cpu_count; cpu++)
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
254
amd_fam14h_disable(&amd_fam14h_cstates[num], cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
46
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
48
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
99
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
26
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
28
unsigned long long statediff = current_count[cpu][id]
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
29
- previous_count[cpu][id];
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
31
cpuidle_cstates[id].name, timediff, *percent, cpu);
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
39
cpuidle_cstates[id].name, timediff, statediff, *percent, cpu);
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
46
int cpu, state;
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
48
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
51
previous_count[cpu][state] =
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
52
cpuidle_state_time(cpu, state);
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
54
cpu, state, previous_count[cpu][state]);
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
62
int cpu, state;
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
67
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
70
current_count[cpu][state] =
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
71
cpuidle_state_time(cpu, state);
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
73
cpu, state, current_count[cpu][state]);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
150
void print_results(int topology_depth, int cpu)
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
159
if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu))
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
161
if (!cpu_top.core_info[cpu].is_online &&
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
162
cpu_top.core_info[cpu].pkg == -1)
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
167
printf("%4d|", cpu_top.core_info[cpu].pkg);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
169
printf("%4d|", cpu_top.core_info[cpu].core);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
171
printf("%4d|", cpu_top.core_info[cpu].cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
189
cpu_top.core_info[cpu].cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
198
cpu_top.core_info[cpu].cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
217
if (!cpu_top.core_info[cpu].is_online &&
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
218
cpu_top.core_info[cpu].pkg != -1) {
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
345
int cpu;
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
348
for (cpu = 0; cpu < cpu_count; cpu++)
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
349
bind_cpu(cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
360
for (cpu = 0; cpu < cpu_count; cpu++)
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
361
bind_cpu(cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
409
int cpu;
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
475
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
477
print_results(TOPOLOGY_DEPTH_PKG, cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
479
print_results(TOPOLOGY_DEPTH_CPU, cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
47
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
49
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
83
static inline int bind_cpu(int cpu)
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
89
CPU_SET(cpu, &set);
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
101
hsw_ext_cstates[id].name, previous_count[id][cpu],
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
102
current_count[id][cpu], cpu);
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
107
current_count[id][cpu] - previous_count[id][cpu],
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
108
*percent, cpu);
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
115
int num, cpu;
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
119
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
120
is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu);
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
121
previous_count[num][cpu] = val;
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
131
int num, cpu;
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
136
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
137
is_valid[cpu] |= !hsw_ext_get_count(num, &val, cpu);
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
138
current_count[num][cpu] = val;
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
29
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
63
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
83
if (read_msr(cpu, msr, val))
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
89
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
93
if (!is_valid[cpu])
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
97
(current_count[id][cpu] - previous_count[id][cpu])) /
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
106
if (bind_cpu(cpu))
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
124
ret = read_msr(cpu, MSR_APERF, aval);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
125
ret |= read_msr(cpu, MSR_MPERF, mval);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
130
static int mperf_init_stats(unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
135
ret = get_aperf_mperf(cpu, &aval, &mval);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
136
aperf_previous_count[cpu] = aval;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
137
mperf_previous_count[cpu] = mval;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
138
is_valid[cpu] = !ret;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
143
static int mperf_measure_stats(unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
148
ret = get_aperf_mperf(cpu, &aval, &mval);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
149
aperf_current_count[cpu] = aval;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
150
mperf_current_count[cpu] = mval;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
151
is_valid[cpu] |= !ret;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
157
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
162
if (!is_valid[cpu])
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
168
mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
169
aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
172
tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
177
timediff = max_frequency * timespec_diff_us(time_start[cpu], time_end[cpu]);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
188
mperf_cstates[id].name, mperf_diff, aperf_diff, cpu);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
194
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
201
if (!is_valid[cpu])
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
204
mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
205
aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
209
tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
210
time_diff = timespec_diff_us(time_start[cpu], time_end[cpu]);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
227
int cpu;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
229
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
230
clock_gettime(CLOCK_REALTIME, &time_start[cpu]);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
231
mperf_get_tsc(&tsc_at_measure_start[cpu]);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
232
mperf_init_stats(cpu);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
240
int cpu;
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
242
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
243
clock_gettime(CLOCK_REALTIME, &time_end[cpu]);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
244
mperf_get_tsc(&tsc_at_measure_end[cpu]);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
245
mperf_measure_stats(cpu);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
33
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
35
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
93
static int get_aperf_mperf(int cpu, unsigned long long *aval,
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
102
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
106
if (!is_valid[cpu])
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
110
(current_count[id][cpu] - previous_count[id][cpu])) /
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
114
nhm_cstates[id].name, previous_count[id][cpu],
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
115
current_count[id][cpu], cpu);
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
120
current_count[id][cpu] - previous_count[id][cpu],
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
121
*percent, cpu);
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
128
int num, cpu;
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
134
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
135
is_valid[cpu] = !nhm_get_count(num, &val, cpu);
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
136
previous_count[num][cpu] = val;
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
148
int num, cpu;
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
153
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
154
is_valid[cpu] |= !nhm_get_count(num, &val, cpu);
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
155
current_count[num][cpu] = val;
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
30
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
72
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
95
if (read_msr(cpu, msr, val))
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
32
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/snb_idle.c
100
current_count[id][cpu], cpu);
tools/power/cpupower/utils/idle_monitor/snb_idle.c
105
current_count[id][cpu] - previous_count[id][cpu],
tools/power/cpupower/utils/idle_monitor/snb_idle.c
106
*percent, cpu);
tools/power/cpupower/utils/idle_monitor/snb_idle.c
113
int num, cpu;
tools/power/cpupower/utils/idle_monitor/snb_idle.c
117
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/snb_idle.c
118
is_valid[cpu] = !snb_get_count(num, &val, cpu);
tools/power/cpupower/utils/idle_monitor/snb_idle.c
119
previous_count[num][cpu] = val;
tools/power/cpupower/utils/idle_monitor/snb_idle.c
129
int num, cpu;
tools/power/cpupower/utils/idle_monitor/snb_idle.c
134
for (cpu = 0; cpu < cpu_count; cpu++) {
tools/power/cpupower/utils/idle_monitor/snb_idle.c
135
is_valid[cpu] |= !snb_get_count(num, &val, cpu);
tools/power/cpupower/utils/idle_monitor/snb_idle.c
136
current_count[num][cpu] = val;
tools/power/cpupower/utils/idle_monitor/snb_idle.c
27
unsigned int cpu);
tools/power/cpupower/utils/idle_monitor/snb_idle.c
61
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/snb_idle.c
81
if (read_msr(cpu, msr, val))
tools/power/cpupower/utils/idle_monitor/snb_idle.c
87
unsigned int cpu)
tools/power/cpupower/utils/idle_monitor/snb_idle.c
91
if (!is_valid[cpu])
tools/power/cpupower/utils/idle_monitor/snb_idle.c
95
(current_count[id][cpu] - previous_count[id][cpu])) /
tools/power/cpupower/utils/idle_monitor/snb_idle.c
99
snb_cstates[id].name, previous_count[id][cpu],
tools/power/x86/intel-speed-select/hfi-events.c
177
int cpu;
tools/power/x86/intel-speed-select/hfi-events.c
186
set_isst_id(&id, perf_cap->cpu);
tools/power/x86/intel-speed-select/hfi-events.c
209
perf_cap.cpu = nla_get_u32(cap);
tools/power/x86/intel-speed-select/isst-config.c
1161
if (id->cpu < 0 || tid->cpu >= 0)
tools/power/x86/intel-speed-select/isst-config.c
1191
tid->cpu = id->cpu;
tools/power/x86/intel-speed-select/isst-config.c
1203
id.cpu = -1;
tools/power/x86/intel-speed-select/isst-config.c
1469
isst_display_error_info_message(1, "Failed to get perf-profile info on cpu", 1, id->cpu);
tools/power/x86/intel-speed-select/isst-config.c
1508
static void adjust_scaling_max_from_base_freq(int cpu);
tools/power/x86/intel-speed-select/isst-config.c
1537
if (force_online_offline && id->cpu >= 0) {
tools/power/x86/intel-speed-select/isst-config.c
1714
static int set_cpufreq_scaling_min_max(int cpu, int max, int freq)
tools/power/x86/intel-speed-select/isst-config.c
1721
"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1724
"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1747
static void adjust_scaling_max_from_base_freq(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
1751
scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1752
base_freq = get_cpufreq_base_freq(cpu);
tools/power/x86/intel-speed-select/isst-config.c
1754
set_cpufreq_scaling_min_max(cpu, 1, base_freq);
tools/power/x86/intel-speed-select/isst-config.c
1757
static void adjust_scaling_min_from_base_freq(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
1761
scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1762
base_freq = get_cpufreq_base_freq(cpu);
tools/power/x86/intel-speed-select/isst-config.c
1764
set_cpufreq_scaling_min_max(cpu, 0, base_freq);
tools/power/x86/intel-speed-select/isst-config.c
1802
static int set_cpufreq_scaling_min_max_from_cpuinfo(int cpu, int cpuinfo_max, int scaling_max)
tools/power/x86/intel-speed-select/isst-config.c
1807
if (!CPU_ISSET_S(cpu, present_cpumask_size, present_cpumask))
tools/power/x86/intel-speed-select/isst-config.c
1812
"/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1815
"/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_min_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1829
"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1832
"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
tools/power/x86/intel-speed-select/isst-config.c
1854
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
1874
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
1956
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
2207
if (!ret && id->cpu >= 0)
tools/power/x86/intel-speed-select/isst-config.c
2293
int cpu;
tools/power/x86/intel-speed-select/isst-config.c
2295
sscanf(cpu_str, "%d", &cpu);
tools/power/x86/intel-speed-select/isst-config.c
2296
CPU_SET_S(cpu, target_cpumask_size, target_cpumask);
tools/power/x86/intel-speed-select/isst-config.c
2499
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
2648
base_freq = get_cpufreq_base_freq(id->cpu);
tools/power/x86/intel-speed-select/isst-config.c
2649
set_cpufreq_scaling_min_max(id->cpu, 1, base_freq);
tools/power/x86/intel-speed-select/isst-config.c
266
static int get_stored_topology_info(int cpu, int *core_id, int *pkg_id, int *die_id)
tools/power/x86/intel-speed-select/isst-config.c
2704
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
277
ret = fseek(fp, cpu * sizeof(cpu_top), SEEK_SET);
tools/power/x86/intel-speed-select/isst-config.c
337
cpu_top.cpu = i;
tools/power/x86/intel-speed-select/isst-config.c
348
static int get_physical_package_id(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
352
if (cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
355
if (cpu_map && cpu_map[cpu].initialized)
tools/power/x86/intel-speed-select/isst-config.c
356
return cpu_map[cpu].pkg_id;
tools/power/x86/intel-speed-select/isst-config.c
360
cpu);
tools/power/x86/intel-speed-select/isst-config.c
364
ret = get_stored_topology_info(cpu, &core_id, &pkg_id, &die_id);
tools/power/x86/intel-speed-select/isst-config.c
372
static int get_physical_core_id(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
376
if (cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
379
if (cpu_map && cpu_map[cpu].initialized)
tools/power/x86/intel-speed-select/isst-config.c
380
return cpu_map[cpu].core_id;
tools/power/x86/intel-speed-select/isst-config.c
384
cpu);
tools/power/x86/intel-speed-select/isst-config.c
388
ret = get_stored_topology_info(cpu, &core_id, &pkg_id, &die_id);
tools/power/x86/intel-speed-select/isst-config.c
396
static int get_physical_die_id(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
400
if (cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
403
if (cpu_map && cpu_map[cpu].initialized)
tools/power/x86/intel-speed-select/isst-config.c
404
return cpu_map[cpu].die_id;
tools/power/x86/intel-speed-select/isst-config.c
408
cpu);
tools/power/x86/intel-speed-select/isst-config.c
412
ret = get_stored_topology_info(cpu, &core_id, &pkg_id, &die_id);
tools/power/x86/intel-speed-select/isst-config.c
427
static int get_physical_punit_id(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
429
if (cpu < 0)
tools/power/x86/intel-speed-select/isst-config.c
432
if (cpu_map && cpu_map[cpu].initialized)
tools/power/x86/intel-speed-select/isst-config.c
433
return cpu_map[cpu].punit_id;
tools/power/x86/intel-speed-select/isst-config.c
438
void set_isst_id(struct isst_id *id, int cpu)
tools/power/x86/intel-speed-select/isst-config.c
440
id->cpu = cpu;
tools/power/x86/intel-speed-select/isst-config.c
442
id->pkg = get_physical_package_id(cpu);
tools/power/x86/intel-speed-select/isst-config.c
446
id->die = get_physical_die_id(cpu);
tools/power/x86/intel-speed-select/isst-config.c
450
id->punit = get_physical_punit_id(cpu);
tools/power/x86/intel-speed-select/isst-config.c
455
int is_cpu_in_power_domain(int cpu, struct isst_id *id)
tools/power/x86/intel-speed-select/isst-config.c
459
set_isst_id(&tid, cpu);
tools/power/x86/intel-speed-select/isst-config.c
467
int get_cpufreq_base_freq(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
469
return parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/base_frequency", cpu);
tools/power/x86/intel-speed-select/isst-config.c
477
static unsigned int is_cpu_online(int cpu)
tools/power/x86/intel-speed-select/isst-config.c
484
"/sys/devices/system/cpu/cpu%d/online", cpu);
tools/power/x86/intel-speed-select/isst-config.c
504
void set_cpu_online_offline(int cpu, int state)
tools/power/x86/intel-speed-select/isst-config.c
509
if (cpu_0_cgroupv2 && !cpu) {
tools/power/x86/intel-speed-select/isst-config.c
516
"/sys/devices/system/cpu/cpu%d/online", cpu);
tools/power/x86/intel-speed-select/isst-config.c
520
if (!cpu) {
tools/power/x86/intel-speed-select/isst-config.c
594
id.cpu = cpus[i][k][k];
tools/power/x86/intel-speed-select/isst-config.c
596
id.die = get_physical_die_id(id.cpu);
tools/power/x86/intel-speed-select/isst-config.c
612
id.cpu = cpus[i][j][k];
tools/power/x86/intel-speed-select/isst-config.c
614
if (id.cpu >= 0)
tools/power/x86/intel-speed-select/isst-config.c
615
id.die = get_physical_die_id(id.cpu);
tools/power/x86/intel-speed-select/isst-config.c
77
short cpu;
tools/power/x86/intel-speed-select/isst-config.c
874
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-core-mbox.c
1000
core_id = find_phy_core_num(id->cpu);
tools/power/x86/intel-speed-select/isst-core-mbox.c
1003
ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param, 0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
1008
debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x resp:%x\n", id->cpu, param,
tools/power/x86/intel-speed-select/isst-core-mbox.c
1022
core_id = find_phy_core_num(id->cpu);
tools/power/x86/intel-speed-select/isst-core-mbox.c
1025
ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param,
tools/power/x86/intel-speed-select/isst-core-mbox.c
1030
debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x req:%x\n", id->cpu, param,
tools/power/x86/intel-speed-select/isst-core-mbox.c
108
cpu, reg, write);
tools/power/x86/intel-speed-select/isst-core-mbox.c
115
cpu, reg, write, *value);
tools/power/x86/intel-speed-select/isst-core-mbox.c
123
int _send_mbox_command(unsigned int cpu, unsigned char command,
tools/power/x86/intel-speed-select/isst-core-mbox.c
133
cpu, command, sub_command, parameter, req_data);
tools/power/x86/intel-speed-select/isst-core-mbox.c
141
debug_printf("CPU %d\n", cpu);
tools/power/x86/intel-speed-select/isst-core-mbox.c
152
cpu, PQR_ASSOC_OFFSET + core_id * 4, write,
tools/power/x86/intel-speed-select/isst-core-mbox.c
160
cpu, PM_CLOS_OFFSET + clos_id * 4, write,
tools/power/x86/intel-speed-select/isst-core-mbox.c
174
mbox_cmds.mbox_cmd[0].logical_cpu = cpu;
tools/power/x86/intel-speed-select/isst-core-mbox.c
197
cpu, command, sub_command, parameter, req_data, errno);
tools/power/x86/intel-speed-select/isst-core-mbox.c
203
cpu, command, sub_command, parameter, req_data, *resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
224
ret = _send_mbox_command(id->cpu, READ_PM_CONFIG, PM_FEATURE, 0, 0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
229
debug_printf("cpu:%d READ_PM_CONFIG resp:%x\n", id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
242
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
253
debug_printf("cpu:%d CONFIG_TDP_GET_LEVELS_INFO resp:%x\n", id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
271
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
284
debug_printf("cpu:%d pm_config is not supported\n", id->cpu);
tools/power/x86/intel-speed-select/isst-core-mbox.c
286
debug_printf("cpu:%d pm_config SST-CP state:%d cap:%d\n", id->cpu, cp_state, cp_cap);
tools/power/x86/intel-speed-select/isst-core-mbox.c
293
id->cpu, resp, ctdp_level->fact_support, ctdp_level->pbf_support,
tools/power/x86/intel-speed-select/isst-core-mbox.c
309
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
322
id->cpu, config_index, resp, ctdp_level->uncore_p0, ctdp_level->uncore_p1,
tools/power/x86/intel-speed-select/isst-core-mbox.c
328
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
341
id->cpu, config_index, resp, ctdp_level->uncore_p0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
389
ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_P1_INFO, 0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
404
id->cpu, config_index, resp, ctdp_level->sse_p1,
tools/power/x86/intel-speed-select/isst-core-mbox.c
414
ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_MEM_FREQ,
tools/power/x86/intel-speed-select/isst-core-mbox.c
438
id->cpu, config_index, resp, ctdp_level->mem_freq);
tools/power/x86/intel-speed-select/isst-core-mbox.c
447
ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_TDP_INFO,
tools/power/x86/intel-speed-select/isst-core-mbox.c
459
id->cpu, config_index, resp, ctdp_level->tdp_ratio,
tools/power/x86/intel-speed-select/isst-core-mbox.c
462
ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_TJMAX_INFO,
tools/power/x86/intel-speed-select/isst-core-mbox.c
475
id->cpu, config_index, resp, ctdp_level->t_proc_hot);
tools/power/x86/intel-speed-select/isst-core-mbox.c
486
ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_PWR_INFO,
tools/power/x86/intel-speed-select/isst-core-mbox.c
496
id->cpu, config_index, resp, ctdp_level->pkg_max_power,
tools/power/x86/intel-speed-select/isst-core-mbox.c
513
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
521
id->cpu, config_index, i, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
529
debug_printf("cpu:%d ctdp:%d mask:%d cpu count:%d\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core-mbox.c
542
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
550
id->cpu, req, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
558
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
564
debug_printf("cpu:%d CONFIG_TDP_GET_TURBO_LIMIT req:%x resp:%x\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core-mbox.c
592
debug_printf("cpu:%d bucket info via MSR\n", id->cpu);
tools/power/x86/intel-speed-select/isst-core-mbox.c
596
ret = isst_send_msr_command(id->cpu, 0x1ae, 0, buckets_info);
tools/power/x86/intel-speed-select/isst-core-mbox.c
600
debug_printf("cpu:%d bucket info via MSR successful 0x%llx\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core-mbox.c
618
ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_SET_LEVEL, 0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
641
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
649
id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
659
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
66
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-core-mbox.c
665
debug_printf("cpu:%d CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO resp:%x\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core-mbox.c
673
id->cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TDP_INFO, 0, req, &resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
677
debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TDP_INFO resp:%x\n", id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
683
id->cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TJ_MAX_INFO, 0, req, &resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
687
debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TJ_MAX_INFO resp:%x\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core-mbox.c
705
debug_printf("cpu:%d No support for dynamic ISST\n", id->cpu);
tools/power/x86/intel-speed-select/isst-core-mbox.c
735
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
741
id->cpu, pbf, req);
tools/power/x86/intel-speed-select/isst-core-mbox.c
75
static int _send_mmio_command(unsigned int cpu, unsigned int reg, int write,
tools/power/x86/intel-speed-select/isst-core-mbox.c
757
id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
765
id->cpu, i, level, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
778
id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
786
id->cpu, i, level, k, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
803
ret = _send_mbox_command(id->cpu, CONFIG_TDP,
tools/power/x86/intel-speed-select/isst-core-mbox.c
810
id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
84
debug_printf("mmio_cmd cpu:%d reg:%d write:%d\n", cpu, reg, write);
tools/power/x86/intel-speed-select/isst-core-mbox.c
843
ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
848
debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
873
ret = _send_mbox_command(id->cpu, WRITE_PM_CONFIG, PM_FEATURE, 0, req,
tools/power/x86/intel-speed-select/isst-core-mbox.c
878
debug_printf("cpu:%d WRITE_PM_CONFIG resp:%x\n", id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
91
io_regs.io_reg[0].logical_cpu = cpu;
tools/power/x86/intel-speed-select/isst-core-mbox.c
916
ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
923
debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", id->cpu, resp);
tools/power/x86/intel-speed-select/isst-core-mbox.c
940
ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG,
tools/power/x86/intel-speed-select/isst-core-mbox.c
945
debug_printf("cpu:%d CLOS_PM_QOS_CONFIG priority type:%d req:%x\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core-mbox.c
956
ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_CLOS, clos, 0,
tools/power/x86/intel-speed-select/isst-core-mbox.c
984
ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_CLOS, param, req,
tools/power/x86/intel-speed-select/isst-core-mbox.c
989
debug_printf("cpu:%d CLOS_PM_CLOS param:%x req:%x\n", id->cpu, param, req);
tools/power/x86/intel-speed-select/isst-core-tpmi.c
221
id->cpu, ctdp_level->fact_support, ctdp_level->pbf_support,
tools/power/x86/intel-speed-select/isst-core-tpmi.c
270
id->cpu, config_index, ctdp_level->tdp_ratio, ctdp_level->pkg_tdp,
tools/power/x86/intel-speed-select/isst-core-tpmi.c
285
id->cpu, config_index, ctdp_level->pkg_max_power,
tools/power/x86/intel-speed-select/isst-core-tpmi.c
312
id->cpu, config_index, ctdp_level->cpu_count);
tools/power/x86/intel-speed-select/isst-core-tpmi.c
382
debug_printf("cpu:%d TRL bucket info: 0x%llx\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core-tpmi.c
424
id->cpu, config_index, cpu_count);
tools/power/x86/intel-speed-select/isst-core-tpmi.c
449
id->cpu, level, pbf_info->p1_low, pbf_info->p1_high,
tools/power/x86/intel-speed-select/isst-core-tpmi.c
468
debug_printf("cpu:%d No support for dynamic ISST\n", id->cpu);
tools/power/x86/intel-speed-select/isst-core-tpmi.c
753
debug_printf("cpu:%d clos:%d min:%d max:%d\n", id->cpu, clos,
tools/power/x86/intel-speed-select/isst-core-tpmi.c
796
debug_printf("set cpu:%d clos:%d min:%d max:%d\n", id->cpu, clos,
tools/power/x86/intel-speed-select/isst-core-tpmi.c
810
assoc_cmds.assoc_info[0].logical_cpu = find_phy_core_num(id->cpu);
tools/power/x86/intel-speed-select/isst-core-tpmi.c
831
assoc_cmds.assoc_info[0].logical_cpu = find_phy_core_num(id->cpu);
tools/power/x86/intel-speed-select/isst-core.c
152
ret = isst_send_msr_command(id->cpu, 0x1AD, 0, &msr_trl);
tools/power/x86/intel-speed-select/isst-core.c
265
ret = isst_send_msr_command(id->cpu, 0x1AD, 0, trl);
tools/power/x86/intel-speed-select/isst-core.c
279
ret = isst_send_msr_command(id->cpu, 0x1AD, 1, &trl);
tools/power/x86/intel-speed-select/isst-core.c
293
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-core.c
322
ret = isst_send_msr_command(id->cpu, 0x1AD, 1, &msr_trl);
tools/power/x86/intel-speed-select/isst-core.c
335
ret = isst_send_msr_command(id->cpu, 0x64b, 0, &tdp_control);
tools/power/x86/intel-speed-select/isst-core.c
380
id->cpu, pkg_dev->enabled, pkg_dev->current_level,
tools/power/x86/intel-speed-select/isst-core.c
397
debug_printf("cpu:%d Get Information for TDP level:%d\n", id->cpu,
tools/power/x86/intel-speed-select/isst-core.c
402
ctdp_level->control_cpu = id->cpu;
tools/power/x86/intel-speed-select/isst-core.c
430
freq = get_cpufreq_base_freq(id->cpu);
tools/power/x86/intel-speed-select/isst-core.c
465
isst_display_error_info_message(0, "Invalid level, Can't get TDP control information at specified levels on cpu", 1, id->cpu);
tools/power/x86/intel-speed-select/isst-core.c
70
int isst_send_msr_command(unsigned int cpu, unsigned int msr, int write,
tools/power/x86/intel-speed-select/isst-core.c
83
msr_cmds.msr_cmd[0].logical_cpu = cpu;
tools/power/x86/intel-speed-select/isst-core.c
92
cpu, msr, write);
tools/power/x86/intel-speed-select/isst-core.c
99
cpu, msr, write, *req_resp, msr_cmds.msr_cmd[0].data);
tools/power/x86/intel-speed-select/isst-daemon.c
44
debug_printf("Invalid package/die info for cpu:%d\n", id->cpu);
tools/power/x86/intel-speed-select/isst-daemon.c
56
debug_printf("Can't get tdp levels for cpu:%d\n", id->cpu);
tools/power/x86/intel-speed-select/isst-daemon.c
60
debug_printf("Get Config level %d pkg:%d die:%d current_level:%d\n", id->cpu,
tools/power/x86/intel-speed-select/isst-daemon.c
72
id->cpu, id->pkg, id->die, per_package_levels_info[id->pkg][id->die][id->punit],
tools/power/x86/intel-speed-select/isst-daemon.c
82
debug_printf("Can't get core_mask:%d\n", id->cpu);
tools/power/x86/intel-speed-select/isst-display.c
176
if (id->die < 0 && id->cpu < 0)
tools/power/x86/intel-speed-select/isst-display.c
180
else if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-display.c
187
id->pkg, id->die, id->punit, id->cpu);
tools/power/x86/intel-speed-select/isst-display.c
190
id->pkg, id->die, id->cpu);
tools/power/x86/intel-speed-select/isst-display.c
207
if (id->cpu < 0)
tools/power/x86/intel-speed-select/isst-display.c
210
snprintf(header, sizeof(header), "cpu-%d", id->cpu);
tools/power/x86/intel-speed-select/isst-display.c
276
if (!bucket_info[j].hp_cores && id->cpu >= 0)
tools/power/x86/intel-speed-select/isst-display.c
381
if (id->cpu >= 0) {
tools/power/x86/intel-speed-select/isst.h
217
extern int is_cpu_in_power_domain(int cpu, struct isst_id *id);
tools/power/x86/intel-speed-select/isst.h
228
extern void set_isst_id(struct isst_id *id, int cpu);
tools/power/x86/intel-speed-select/isst.h
237
extern int isst_send_msr_command(unsigned int cpu, unsigned int command,
tools/power/x86/intel-speed-select/isst.h
299
extern int get_cpufreq_base_freq(int cpu);
tools/power/x86/intel-speed-select/isst.h
308
extern void set_cpu_online_offline(int cpu, int state);
tools/power/x86/intel-speed-select/isst.h
88
int cpu;
tools/power/x86/turbostat/turbostat.c
10011
fd_perf = open_perf_counter(cpu, perf_type, perf_config, -1, 0);
tools/power/x86/turbostat/turbostat.c
10013
warnx("%s: perf/%s/%s: failed to open counter on cpu%d", __func__, perf_device, pinfo->event, cpu);
tools/power/x86/turbostat/turbostat.c
10022
fprintf(stderr, "Add perf/%s/%s cpu%d: %d\n", perf_device, pinfo->event, cpu, pinfo->fd_perf_per_domain[next_domain]);
tools/power/x86/turbostat/turbostat.c
2206
int get_msr_sum(int cpu, off_t offset, unsigned long long *msr);
tools/power/x86/turbostat/turbostat.c
2428
int cpu_is_not_present(int cpu)
tools/power/x86/turbostat/turbostat.c
2430
return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
tools/power/x86/turbostat/turbostat.c
2433
int cpu_is_not_allowed(int cpu)
tools/power/x86/turbostat/turbostat.c
2435
return !CPU_ISSET_S(cpu, cpu_allowed_setsize, cpu_allowed_set);
tools/power/x86/turbostat/turbostat.c
2449
int cpu, retval;
tools/power/x86/turbostat/turbostat.c
2453
for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
2458
int pkg_id = cpus[cpu].package_id;
tools/power/x86/turbostat/turbostat.c
2460
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
2463
if (cpus[cpu].ht_id > 0) /* skip HT sibling */
tools/power/x86/turbostat/turbostat.c
2466
t = &thread_base[cpu];
tools/power/x86/turbostat/turbostat.c
2467
c = &core_base[GLOBAL_CORE_ID(cpus[cpu].core_id, pkg_id)];
tools/power/x86/turbostat/turbostat.c
2476
if (cpus[cpu].ht_sibling_cpu_id[i] <= 0)
tools/power/x86/turbostat/turbostat.c
2478
t = &thread_base[cpus[cpu].ht_sibling_cpu_id[i]];
tools/power/x86/turbostat/turbostat.c
2501
int cpu_migrate(int cpu)
tools/power/x86/turbostat/turbostat.c
2504
CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
tools/power/x86/turbostat/turbostat.c
2511
int get_msr_fd(int cpu)
tools/power/x86/turbostat/turbostat.c
2516
fd = fd_percpu[cpu];
tools/power/x86/turbostat/turbostat.c
2520
sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
tools/power/x86/turbostat/turbostat.c
2525
fd_percpu[cpu] = fd;
tools/power/x86/turbostat/turbostat.c
2552
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
tools/power/x86/turbostat/turbostat.c
2556
return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
tools/power/x86/turbostat/turbostat.c
2559
static long open_perf_counter(int cpu, unsigned int type, unsigned int config, int group_fd, __u64 read_format)
tools/power/x86/turbostat/turbostat.c
2576
const int fd = perf_event_open(&attr, pid, cpu, group_fd, flags);
tools/power/x86/turbostat/turbostat.c
2581
int get_instr_count_fd(int cpu)
tools/power/x86/turbostat/turbostat.c
2583
if (fd_instr_count_percpu[cpu])
tools/power/x86/turbostat/turbostat.c
2584
return fd_instr_count_percpu[cpu];
tools/power/x86/turbostat/turbostat.c
2586
fd_instr_count_percpu[cpu] = open_perf_counter(cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS, -1, 0);
tools/power/x86/turbostat/turbostat.c
2588
return fd_instr_count_percpu[cpu];
tools/power/x86/turbostat/turbostat.c
2591
int get_msr(int cpu, off_t offset, unsigned long long *msr)
tools/power/x86/turbostat/turbostat.c
2597
retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset);
tools/power/x86/turbostat/turbostat.c
2600
err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset);
tools/power/x86/turbostat/turbostat.c
2605
int add_msr_counter(int cpu, off_t offset)
tools/power/x86/turbostat/turbostat.c
2616
retval = pread(get_msr_fd(cpu), &value, sizeof(value), offset);
tools/power/x86/turbostat/turbostat.c
2628
int add_rapl_msr_counter(int cpu, const struct rapl_counter_arch_info *cai)
tools/power/x86/turbostat/turbostat.c
2635
ret = add_msr_counter(cpu, cai->msr);
tools/power/x86/turbostat/turbostat.c
2654
unsigned int cpu_to_domain(const struct perf_counter_info *pc, int cpu)
tools/power/x86/turbostat/turbostat.c
2658
return cpu;
tools/power/x86/turbostat/turbostat.c
2661
return cpus[cpu].core_id;
tools/power/x86/turbostat/turbostat.c
2664
return cpus[cpu].package_id;
tools/power/x86/turbostat/turbostat.c
3253
void get_perf_llc_stats(int cpu, struct llc_stats *llc)
tools/power/x86/turbostat/turbostat.c
3262
actual_read_size = read(fd_llc_percpu[cpu], &r, expected_read_size);
tools/power/x86/turbostat/turbostat.c
3265
err(-1, "%s(cpu%d,) %d,,%ld", __func__, cpu, fd_llc_percpu[cpu], expected_read_size);
tools/power/x86/turbostat/turbostat.c
3273
void get_perf_l2_stats(int cpu, struct l2_stats *l2)
tools/power/x86/turbostat/turbostat.c
3282
actual_read_size = read(fd_l2_percpu[cpu], &r, expected_read_size);
tools/power/x86/turbostat/turbostat.c
3285
err(-1, "%s(cpu%d,) %d,,%ld", __func__, cpu, fd_l2_percpu[cpu], expected_read_size);
tools/power/x86/turbostat/turbostat.c
3290
warn("%s: cpu%d: failed to read(%d) perf_data (req %zu act %zu)", __func__, cpu, fd_l2_percpu[cpu], expected_read_size, actual_read_size);
tools/power/x86/turbostat/turbostat.c
4478
int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp, char *counter_path)
tools/power/x86/turbostat/turbostat.c
4482
if (get_msr(cpu, mp->msr_num, counterp))
tools/power/x86/turbostat/turbostat.c
4488
sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", cpu, mp->sp->path);
tools/power/x86/turbostat/turbostat.c
4523
int get_epb(int cpu)
tools/power/x86/turbostat/turbostat.c
4530
sprintf(path, "/sys/devices/system/cpu/cpu%d/power/energy_perf_bias", cpu);
tools/power/x86/turbostat/turbostat.c
4548
get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
tools/power/x86/turbostat/turbostat.c
4601
int get_core_throt_cnt(int cpu, unsigned long long *cnt)
tools/power/x86/turbostat/turbostat.c
4608
sprintf(path, "/sys/devices/system/cpu/cpu%d/thermal_throttle/core_throttle_count", cpu);
tools/power/x86/turbostat/turbostat.c
4797
int get_rapl_counters(int cpu, unsigned int domain, struct core_data *c, struct pkg_data *p)
tools/power/x86/turbostat/turbostat.c
4804
fprintf(stderr, "%s: cpu%d domain%d\n", __func__, cpu, domain);
tools/power/x86/turbostat/turbostat.c
4848
if (get_msr_sum(cpu, rci->msr[i], &rci->data[i]))
tools/power/x86/turbostat/turbostat.c
4851
if (get_msr(cpu, rci->msr[i], &rci->data[i]))
tools/power/x86/turbostat/turbostat.c
4890
int get_cstate_counters(unsigned int cpu, PER_THREAD_PARAMS)
tools/power/x86/turbostat/turbostat.c
4903
fprintf(stderr, "%s: cpu%d\n", __func__, cpu);
tools/power/x86/turbostat/turbostat.c
4906
assert(cpu <= ccstate_counter_info_size);
tools/power/x86/turbostat/turbostat.c
4912
cci = &ccstate_counter_info[cpu];
tools/power/x86/turbostat/turbostat.c
4982
if (get_msr(cpu, cci->msr[i], &cci->data[i]))
tools/power/x86/turbostat/turbostat.c
5035
int get_smi_aperf_mperf(unsigned int cpu, struct thread_data *t)
tools/power/x86/turbostat/turbostat.c
5042
fprintf(stderr, "%s: cpu%d\n", __func__, cpu);
tools/power/x86/turbostat/turbostat.c
5045
assert(cpu <= msr_counter_info_size);
tools/power/x86/turbostat/turbostat.c
5047
mci = &msr_counter_info[cpu];
tools/power/x86/turbostat/turbostat.c
5081
if (get_msr(cpu, mci->msr[i], &mci->data[i]))
tools/power/x86/turbostat/turbostat.c
5101
int perf_counter_info_read_values(struct perf_counter_info *pp, int cpu, unsigned long long *out, size_t out_size)
tools/power/x86/turbostat/turbostat.c
5108
domain = cpu_to_domain(pp, cpu);
tools/power/x86/turbostat/turbostat.c
5162
static inline int get_rapl_domain_id(int cpu)
tools/power/x86/turbostat/turbostat.c
5165
return cpus[cpu].package_id;
tools/power/x86/turbostat/turbostat.c
5167
return GLOBAL_CORE_ID(cpus[cpu].core_id, cpus[cpu].package_id);
tools/power/x86/turbostat/turbostat.c
5177
int cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
5184
if (cpu_migrate(cpu)) {
tools/power/x86/turbostat/turbostat.c
5185
fprintf(outf, "%s: Could not migrate to CPU %d\n", __func__, cpu);
tools/power/x86/turbostat/turbostat.c
5196
get_smi_aperf_mperf(cpu, t);
tools/power/x86/turbostat/turbostat.c
5199
get_perf_llc_stats(cpu, &t->llc);
tools/power/x86/turbostat/turbostat.c
5202
get_perf_l2_stats(cpu, &t->l2);
tools/power/x86/turbostat/turbostat.c
5205
if (read(get_instr_count_fd(cpu), &t->instr_count, sizeof(long long)) != sizeof(long long))
tools/power/x86/turbostat/turbostat.c
5209
t->irq_count = irqs_per_cpu[cpu];
tools/power/x86/turbostat/turbostat.c
5211
t->nmi_count = nmi_per_cpu[cpu];
tools/power/x86/turbostat/turbostat.c
5213
get_cstate_counters(cpu, t, c, p);
tools/power/x86/turbostat/turbostat.c
5216
if (get_mp(cpu, mp, &t->counter[i], mp->sp->path))
tools/power/x86/turbostat/turbostat.c
5220
if (perf_counter_info_read_values(sys.perf_tp, cpu, t->perf_counter, MAX_ADDED_THREAD_COUNTERS))
tools/power/x86/turbostat/turbostat.c
5231
status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
tools/power/x86/turbostat/turbostat.c
5247
if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us))
tools/power/x86/turbostat/turbostat.c
5251
if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
tools/power/x86/turbostat/turbostat.c
5257
get_core_throt_cnt(cpu, &c->core_throt_cnt);
tools/power/x86/turbostat/turbostat.c
5260
if (get_mp(cpu, mp, &c->counter[i], mp->sp->path))
tools/power/x86/turbostat/turbostat.c
5264
if (perf_counter_info_read_values(sys.perf_cp, cpu, c->perf_counter, MAX_ADDED_CORE_COUNTERS))
tools/power/x86/turbostat/turbostat.c
5275
if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
tools/power/x86/turbostat/turbostat.c
5279
if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
tools/power/x86/turbostat/turbostat.c
5283
if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
tools/power/x86/turbostat/turbostat.c
5287
if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
tools/power/x86/turbostat/turbostat.c
5297
status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
tools/power/x86/turbostat/turbostat.c
5303
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
tools/power/x86/turbostat/turbostat.c
5339
if (get_mp(cpu, mp, &p->counter[i], path))
tools/power/x86/turbostat/turbostat.c
5343
if (perf_counter_info_read_values(sys.perf_pp, cpu, p->perf_counter, MAX_ADDED_PACKAGE_COUNTERS))
tools/power/x86/turbostat/turbostat.c
570
int get_msr(int cpu, off_t offset, unsigned long long *msr);
tools/power/x86/turbostat/turbostat.c
5865
for (int cpu = 0; cpu < topo.max_cpu_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
5866
if (msr_counter_info[cpu].fd_perf != -1)
tools/power/x86/turbostat/turbostat.c
5867
close(msr_counter_info[cpu].fd_perf);
tools/power/x86/turbostat/turbostat.c
6025
int cpu_is_first_core_in_package(int cpu)
tools/power/x86/turbostat/turbostat.c
6027
return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
tools/power/x86/turbostat/turbostat.c
6030
int get_package_id(int cpu)
tools/power/x86/turbostat/turbostat.c
6032
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
tools/power/x86/turbostat/turbostat.c
6035
int get_die_id(int cpu)
tools/power/x86/turbostat/turbostat.c
6037
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
tools/power/x86/turbostat/turbostat.c
6040
int get_l3_id(int cpu)
tools/power/x86/turbostat/turbostat.c
6042
return parse_int_file("/sys/devices/system/cpu/cpu%d/cache/index3/id", cpu);
tools/power/x86/turbostat/turbostat.c
6045
int get_core_id(int cpu)
tools/power/x86/turbostat/turbostat.c
6047
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
tools/power/x86/turbostat/turbostat.c
6052
int pkg, node, lnode, cpu, cpux;
tools/power/x86/turbostat/turbostat.c
6056
for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu)
tools/power/x86/turbostat/turbostat.c
6057
cpus[cpu].logical_node_id = -1;
tools/power/x86/turbostat/turbostat.c
6062
for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
6063
if (cpus[cpu].package_id != pkg)
tools/power/x86/turbostat/turbostat.c
6066
if (cpus[cpu].logical_node_id != -1)
tools/power/x86/turbostat/turbostat.c
6068
cpus[cpu].logical_node_id = lnode;
tools/power/x86/turbostat/turbostat.c
6069
node = cpus[cpu].physical_node_id;
tools/power/x86/turbostat/turbostat.c
6075
for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) {
tools/power/x86/turbostat/turbostat.c
6095
int cpu = thiscpu->cpu_id;
tools/power/x86/turbostat/turbostat.c
6098
sprintf(path, "/sys/devices/system/cpu/cpu%d/node%i/cpulist", cpu, i);
tools/power/x86/turbostat/turbostat.c
6170
int cpu = thiscpu->cpu_id;
tools/power/x86/turbostat/turbostat.c
6184
sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu);
tools/power/x86/turbostat/turbostat.c
6201
if ((so != cpu) && (cpus[so].ht_id < 0)) {
tools/power/x86/turbostat/turbostat.c
6203
cpus[cpu].ht_sibling_cpu_id[thread_id] = so;
tools/power/x86/turbostat/turbostat.c
6205
fprintf(stderr, "%s: cpu%d.ht_sibling_cpu_id[%d] = %d\n", __func__, cpu, thread_id, so);
tools/power/x86/turbostat/turbostat.c
6228
int cpu, retval;
tools/power/x86/turbostat/turbostat.c
6232
for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
6237
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
6240
if (cpus[cpu].ht_id > 0) /* skip HT sibling */
tools/power/x86/turbostat/turbostat.c
6243
t = &thread_base[cpu];
tools/power/x86/turbostat/turbostat.c
6244
t2 = &thread_base2[cpu];
tools/power/x86/turbostat/turbostat.c
6245
c = &core_base[GLOBAL_CORE_ID(cpus[cpu].core_id, cpus[cpu].package_id)];
tools/power/x86/turbostat/turbostat.c
6246
c2 = &core_base2[GLOBAL_CORE_ID(cpus[cpu].core_id, cpus[cpu].package_id)];
tools/power/x86/turbostat/turbostat.c
6247
p = &pkg_base[cpus[cpu].package_id];
tools/power/x86/turbostat/turbostat.c
6248
p2 = &pkg_base2[cpus[cpu].package_id];
tools/power/x86/turbostat/turbostat.c
6256
if (cpus[cpu].ht_sibling_cpu_id[i] <= 0)
tools/power/x86/turbostat/turbostat.c
6258
t = &thread_base[cpus[cpu].ht_sibling_cpu_id[i]];
tools/power/x86/turbostat/turbostat.c
6259
t2 = &thread_base2[cpus[cpu].ht_sibling_cpu_id[i]];
tools/power/x86/turbostat/turbostat.c
6386
int count_cpus(int cpu)
tools/power/x86/turbostat/turbostat.c
6388
UNUSED(cpu);
tools/power/x86/turbostat/turbostat.c
6394
int mark_cpu_present(int cpu)
tools/power/x86/turbostat/turbostat.c
6396
CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
tools/power/x86/turbostat/turbostat.c
6400
int clear_ht_id(int cpu)
tools/power/x86/turbostat/turbostat.c
6404
cpus[cpu].ht_id = -1;
tools/power/x86/turbostat/turbostat.c
6406
cpus[cpu].ht_sibling_cpu_id[i] = -1;
tools/power/x86/turbostat/turbostat.c
6425
int set_cpu_hybrid_type(int cpu)
tools/power/x86/turbostat/turbostat.c
6427
if (cpu_migrate(cpu))
tools/power/x86/turbostat/turbostat.c
6432
cpus[cpu].type = type;
tools/power/x86/turbostat/turbostat.c
6697
int get_msr_sum(int cpu, off_t offset, unsigned long long *msr)
tools/power/x86/turbostat/turbostat.c
6711
ret = get_msr(cpu, offset, &msr_cur);
tools/power/x86/turbostat/turbostat.c
6714
msr_last = per_cpu_msr_sum[cpu].entries[idx].last;
tools/power/x86/turbostat/turbostat.c
6716
*msr = msr_last + per_cpu_msr_sum[cpu].entries[idx].sum;
tools/power/x86/turbostat/turbostat.c
6727
int cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
6743
ret = get_msr(cpu, offset, &msr_cur);
tools/power/x86/turbostat/turbostat.c
6749
msr_last = per_cpu_msr_sum[cpu].entries[i].last;
tools/power/x86/turbostat/turbostat.c
6750
per_cpu_msr_sum[cpu].entries[i].last = msr_cur & 0xffffffff;
tools/power/x86/turbostat/turbostat.c
6753
per_cpu_msr_sum[cpu].entries[i].sum += msr_last;
tools/power/x86/turbostat/turbostat.c
7486
int cpu, epb;
tools/power/x86/turbostat/turbostat.c
7494
cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
7500
if (cpu_migrate(cpu)) {
tools/power/x86/turbostat/turbostat.c
7501
fprintf(outf, "print_epb: Could not migrate to CPU %d\n", cpu);
tools/power/x86/turbostat/turbostat.c
7505
epb = get_epb(cpu);
tools/power/x86/turbostat/turbostat.c
7523
fprintf(outf, "cpu%d: EPB: %d (%s)\n", cpu, epb, epb_string);
tools/power/x86/turbostat/turbostat.c
7535
int cpu;
tools/power/x86/turbostat/turbostat.c
7546
cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
7552
if (cpu_migrate(cpu)) {
tools/power/x86/turbostat/turbostat.c
7553
fprintf(outf, "print_hwp: Could not migrate to CPU %d\n", cpu);
tools/power/x86/turbostat/turbostat.c
7557
if (get_msr(cpu, MSR_PM_ENABLE, &msr))
tools/power/x86/turbostat/turbostat.c
7560
fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n", cpu, msr, (msr & (1 << 0)) ? "" : "No-");
tools/power/x86/turbostat/turbostat.c
7566
if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr))
tools/power/x86/turbostat/turbostat.c
7571
cpu, msr,
tools/power/x86/turbostat/turbostat.c
7575
if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
tools/power/x86/turbostat/turbostat.c
7580
cpu, msr,
tools/power/x86/turbostat/turbostat.c
7587
if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
tools/power/x86/turbostat/turbostat.c
7592
cpu, msr,
tools/power/x86/turbostat/turbostat.c
7598
if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
tools/power/x86/turbostat/turbostat.c
7602
"(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n", cpu, msr, ((msr) & 0x1) ? "EN" : "Dis", ((msr) & 0x2) ? "EN" : "Dis");
tools/power/x86/turbostat/turbostat.c
7604
if (get_msr(cpu, MSR_HWP_STATUS, &msr))
tools/power/x86/turbostat/turbostat.c
7608
"(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-");
tools/power/x86/turbostat/turbostat.c
7619
int cpu;
tools/power/x86/turbostat/turbostat.c
7627
cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
7633
if (cpu_migrate(cpu)) {
tools/power/x86/turbostat/turbostat.c
7634
fprintf(outf, "print_perf_limit: Could not migrate to CPU %d\n", cpu);
tools/power/x86/turbostat/turbostat.c
7639
get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
tools/power/x86/turbostat/turbostat.c
7640
fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
tools/power/x86/turbostat/turbostat.c
7670
get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
tools/power/x86/turbostat/turbostat.c
7671
fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
tools/power/x86/turbostat/turbostat.c
7688
get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
tools/power/x86/turbostat/turbostat.c
7689
fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
tools/power/x86/turbostat/turbostat.c
7821
void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
tools/power/x86/turbostat/turbostat.c
7824
cpu, label,
tools/power/x86/turbostat/turbostat.c
7988
int cpu;
tools/power/x86/turbostat/turbostat.c
8000
cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
8001
if (cpu_migrate(cpu)) {
tools/power/x86/turbostat/turbostat.c
8002
fprintf(outf, "print_rapl: Could not migrate to CPU %d\n", cpu);
tools/power/x86/turbostat/turbostat.c
8008
if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
tools/power/x86/turbostat/turbostat.c
8012
if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
tools/power/x86/turbostat/turbostat.c
8016
fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr, rapl_power_units, rapl_energy_units, rapl_time_units);
tools/power/x86/turbostat/turbostat.c
8020
if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
tools/power/x86/turbostat/turbostat.c
8024
cpu, msr,
tools/power/x86/turbostat/turbostat.c
8032
if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
tools/power/x86/turbostat/turbostat.c
8035
fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 63) & 1 ? "" : "UN");
tools/power/x86/turbostat/turbostat.c
8037
print_power_limit_msr(cpu, msr, "PKG Limit #1");
tools/power/x86/turbostat/turbostat.c
8039
cpu,
tools/power/x86/turbostat/turbostat.c
8044
if (get_msr(cpu, MSR_VR_CURRENT_CONFIG, &msr))
tools/power/x86/turbostat/turbostat.c
8047
fprintf(outf, "cpu%d: MSR_VR_CURRENT_CONFIG: 0x%08llx\n", cpu, msr);
tools/power/x86/turbostat/turbostat.c
8048
fprintf(outf, "cpu%d: PKG Limit #4: %f Watts (%slocked)\n", cpu, ((msr >> 0) & 0x1FFF) * rapl_power_units, (msr >> 31) & 1 ? "" : "UN");
tools/power/x86/turbostat/turbostat.c
8052
if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
tools/power/x86/turbostat/turbostat.c
8056
cpu, msr,
tools/power/x86/turbostat/turbostat.c
8062
if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
tools/power/x86/turbostat/turbostat.c
8064
fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN");
tools/power/x86/turbostat/turbostat.c
8066
print_power_limit_msr(cpu, msr, "DRAM Limit");
tools/power/x86/turbostat/turbostat.c
8069
if (get_msr(cpu, MSR_PP0_POLICY, &msr))
tools/power/x86/turbostat/turbostat.c
8072
fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
tools/power/x86/turbostat/turbostat.c
8075
if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
tools/power/x86/turbostat/turbostat.c
8077
fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN");
tools/power/x86/turbostat/turbostat.c
8078
print_power_limit_msr(cpu, msr, "Cores Limit");
tools/power/x86/turbostat/turbostat.c
8081
if (get_msr(cpu, MSR_PP1_POLICY, &msr))
tools/power/x86/turbostat/turbostat.c
8084
fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
tools/power/x86/turbostat/turbostat.c
8086
if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
tools/power/x86/turbostat/turbostat.c
8088
fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN");
tools/power/x86/turbostat/turbostat.c
8089
print_power_limit_msr(cpu, msr, "GFX Limit");
tools/power/x86/turbostat/turbostat.c
8172
int cpu;
tools/power/x86/turbostat/turbostat.c
8185
cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
8186
if (cpu_migrate(cpu)) {
tools/power/x86/turbostat/turbostat.c
8187
fprintf(outf, "Could not migrate to CPU %d\n", cpu);
tools/power/x86/turbostat/turbostat.c
8193
fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n", cpu, tj_max);
tools/power/x86/turbostat/turbostat.c
8216
cpu, msr, tcc_default - tcc_offset, tcc_default, tcc_offset);
tools/power/x86/turbostat/turbostat.c
8218
fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, tcc_default);
tools/power/x86/turbostat/turbostat.c
8231
fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", cpu, tj_max);
tools/power/x86/turbostat/turbostat.c
8240
int cpu;
tools/power/x86/turbostat/turbostat.c
8251
cpu = t->cpu_id;
tools/power/x86/turbostat/turbostat.c
8257
if (cpu_migrate(cpu)) {
tools/power/x86/turbostat/turbostat.c
8258
fprintf(outf, "print_thermal: Could not migrate to CPU %d\n", cpu);
tools/power/x86/turbostat/turbostat.c
8263
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
tools/power/x86/turbostat/turbostat.c
8267
fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tj_max - dts);
tools/power/x86/turbostat/turbostat.c
8269
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
tools/power/x86/turbostat/turbostat.c
8274
fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tj_max - dts, tj_max - dts2);
tools/power/x86/turbostat/turbostat.c
8280
if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
tools/power/x86/turbostat/turbostat.c
8285
fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", cpu, msr, tj_max - dts, resolution);
tools/power/x86/turbostat/turbostat.c
8287
if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
tools/power/x86/turbostat/turbostat.c
8292
fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tj_max - dts, tj_max - dts2);
tools/power/x86/turbostat/turbostat.c
8477
int add_rapl_perf_counter(int cpu, struct rapl_counter_info_t *rci, const struct rapl_counter_arch_info *cai, double *scale_, enum rapl_unit *unit_)
tools/power/x86/turbostat/turbostat.c
8500
ret = open_perf_counter(cpu, rapl_type, rapl_energy_pkg_config, rci->fd_perf, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
8513
fprintf(stderr, "%s: %d (cpu: %d)\n", __func__, ret, cpu);
tools/power/x86/turbostat/turbostat.c
8680
for (int cpu = 0; cpu < topo.max_cpu_num + 1; ++cpu) {
tools/power/x86/turbostat/turbostat.c
8682
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
8686
next_domain = get_rapl_domain_id(cpu);
tools/power/x86/turbostat/turbostat.c
8695
if ((cai->flags & RAPL_COUNTER_FLAG_PLATFORM_COUNTER) && (cpu != master_cpu))
tools/power/x86/turbostat/turbostat.c
8716
if (add_rapl_perf_counter(cpu, rci, cai, &scale, &unit) != -1) {
tools/power/x86/turbostat/turbostat.c
8723
} else if (add_rapl_msr_counter(cpu, cai) >= 0) {
tools/power/x86/turbostat/turbostat.c
8762
int add_cstate_perf_counter(int cpu, struct cstate_counter_info_t *cci, const struct cstate_counter_arch_info *cai)
tools/power/x86/turbostat/turbostat.c
8780
ret = open_perf_counter(cpu, type, config, *pfd_group, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
8791
fprintf(stderr, "%s: %d (cpu: %d)\n", __func__, ret, cpu);
tools/power/x86/turbostat/turbostat.c
8796
int add_msr_perf_counter(int cpu, struct msr_counter_info_t *cci, const struct msr_counter_arch_info *cai)
tools/power/x86/turbostat/turbostat.c
8809
ret = open_perf_counter(cpu, type, config, cci->fd_perf, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
8820
fprintf(stderr, "%s: %s/%s: %d (cpu: %d)\n", __func__, cai->perf_subsys, cai->perf_name, ret, cpu);
tools/power/x86/turbostat/turbostat.c
8834
for (int cpu = 0; cpu < mci_num; ++cpu)
tools/power/x86/turbostat/turbostat.c
8835
msr_counter_info[cpu].fd_perf = -1;
tools/power/x86/turbostat/turbostat.c
8843
for (int cpu = 0; cpu < mci_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
8845
struct msr_counter_info_t *const cci = &msr_counter_info[cpu];
tools/power/x86/turbostat/turbostat.c
8847
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
8852
if (add_msr_perf_counter(cpu, cci, cai) != -1) {
tools/power/x86/turbostat/turbostat.c
8857
} else if (add_msr_counter(cpu, cai->msr) >= 0) {
tools/power/x86/turbostat/turbostat.c
8925
for (int cpu = 0; cpu < cci_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
8926
ccstate_counter_info[cpu].fd_perf_core = -1;
tools/power/x86/turbostat/turbostat.c
8927
ccstate_counter_info[cpu].fd_perf_pkg = -1;
tools/power/x86/turbostat/turbostat.c
8937
for (int cpu = 0; cpu < cci_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
8939
struct cstate_counter_info_t *const cci = &ccstate_counter_info[cpu];
tools/power/x86/turbostat/turbostat.c
8941
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
8944
const int core_id = cpus[cpu].core_id;
tools/power/x86/turbostat/turbostat.c
8945
const int pkg_id = cpus[cpu].package_id;
tools/power/x86/turbostat/turbostat.c
8964
if (add_cstate_perf_counter(cpu, cci, cai) != -1) {
tools/power/x86/turbostat/turbostat.c
8969
} else if (pkg_cstate_limit >= cai->pkg_cstate_limit && add_msr_counter(cpu, cai->msr) >= 0) {
tools/power/x86/turbostat/turbostat.c
9352
int cpu;
tools/power/x86/turbostat/turbostat.c
9362
for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
9364
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
9367
fd_llc_percpu[cpu] = open_perf_counter(cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES, -1, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9368
if (fd_llc_percpu[cpu] == -1) {
tools/power/x86/turbostat/turbostat.c
9369
warnx("%s: perf REFS: failed to open counter on cpu%d", __func__, cpu);
tools/power/x86/turbostat/turbostat.c
9373
retval = open_perf_counter(cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES, fd_llc_percpu[cpu], PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9375
warnx("%s: perf MISS: failed to open counter on cpu%d", __func__, cpu);
tools/power/x86/turbostat/turbostat.c
9386
int cpu;
tools/power/x86/turbostat/turbostat.c
9398
for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
tools/power/x86/turbostat/turbostat.c
9400
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
9404
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.uniform, perf_model_support->first.refs, -1, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9405
if (fd_l2_percpu[cpu] == -1) {
tools/power/x86/turbostat/turbostat.c
9406
err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.uniform, perf_model_support->first.refs);
tools/power/x86/turbostat/turbostat.c
9410
retval = open_perf_counter(cpu, perf_pmu_types.uniform, perf_model_support->first.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9412
err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.uniform, perf_model_support->first.hits);
tools/power/x86/turbostat/turbostat.c
9418
if (perf_pcore_set && CPU_ISSET_S(cpu, cpu_possible_setsize, perf_pcore_set)) {
tools/power/x86/turbostat/turbostat.c
9419
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.pcore, perf_model_support->first.refs, -1, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9420
if (fd_l2_percpu[cpu] == -1) {
tools/power/x86/turbostat/turbostat.c
9421
err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->first.refs);
tools/power/x86/turbostat/turbostat.c
9425
retval = open_perf_counter(cpu, perf_pmu_types.pcore, perf_model_support->first.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9427
err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->first.hits);
tools/power/x86/turbostat/turbostat.c
9431
} else if (perf_ecore_set && CPU_ISSET_S(cpu, cpu_possible_setsize, perf_ecore_set)) {
tools/power/x86/turbostat/turbostat.c
9432
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.ecore, perf_model_support->second.refs, -1, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9433
if (fd_l2_percpu[cpu] == -1) {
tools/power/x86/turbostat/turbostat.c
9434
err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->second.refs);
tools/power/x86/turbostat/turbostat.c
9438
retval = open_perf_counter(cpu, perf_pmu_types.ecore, perf_model_support->second.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9440
err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->second.hits);
tools/power/x86/turbostat/turbostat.c
9444
} else if (perf_lcore_set && CPU_ISSET_S(cpu, cpu_possible_setsize, perf_lcore_set)) {
tools/power/x86/turbostat/turbostat.c
9445
fd_l2_percpu[cpu] = open_perf_counter(cpu, perf_pmu_types.lcore, perf_model_support->third.refs, -1, PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9446
if (fd_l2_percpu[cpu] == -1) {
tools/power/x86/turbostat/turbostat.c
9447
err(-1, "%s(cpu%d, 0x%x, 0x%llx) REFS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->third.refs);
tools/power/x86/turbostat/turbostat.c
9451
retval = open_perf_counter(cpu, perf_pmu_types.lcore, perf_model_support->third.hits, fd_l2_percpu[cpu], PERF_FORMAT_GROUP);
tools/power/x86/turbostat/turbostat.c
9453
err(-1, "%s(cpu%d, 0x%x, 0x%llx) HITS", __func__, cpu, perf_pmu_types.pcore, perf_model_support->third.hits);
tools/power/x86/turbostat/turbostat.c
9458
err(-1, "%s: cpu%d: type %d", __func__, cpu, cpus[cpu].type);
tools/power/x86/turbostat/turbostat.c
9955
for (int cpu = 0; cpu < topo.max_cpu_num + 1; ++cpu) {
tools/power/x86/turbostat/turbostat.c
9957
next_domain = cpu_to_domain(pinfo, cpu);
tools/power/x86/turbostat/turbostat.c
9961
if (cpu_is_not_allowed(cpu))
tools/power/x86/turbostat/turbostat.c
9980
switch (cpus[cpu].type) {
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1006
int update_sysfs(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1018
update_cpufreq_scaling_freq(0, cpu, req_update.hwp_min);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1021
update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1028
int verify_hwp_req_self_consistency(int cpu, struct msr_hwp_request *req)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1033
cpu, req->hwp_min, req->hwp_max);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1039
cpu, req->hwp_desired, req->hwp_max);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1044
cpu, req->hwp_desired, req->hwp_min);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1050
int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, struct msr_hwp_cap *cap)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1055
cpu, req->hwp_max, cap->highest);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1058
cpu, req->hwp_max, cap->lowest);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1064
cpu, req->hwp_min, cap->highest);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1067
cpu, req->hwp_min, cap->lowest);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1072
cpu, req->hwp_min, req->hwp_max);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1077
cpu, req->hwp_desired, req->hwp_max);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1080
cpu, req->hwp_desired, req->hwp_min);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1083
cpu, req->hwp_desired, cap->lowest);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1086
cpu, req->hwp_desired, cap->highest);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1092
int update_hwp_request_msr(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1099
read_hwp_request_msr(cpu, &req, msr_offset);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1101
print_hwp_request(cpu, &req, "old: ");
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1120
read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1122
print_hwp_cap(cpu, &cap, "");
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1125
check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1127
verify_hwp_req_self_consistency(cpu, &req);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1129
write_hwp_request_msr(cpu, &req, msr_offset);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1132
read_hwp_request_msr(cpu, &req, msr_offset);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1133
print_hwp_request(cpu, &req, "new: ");
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1141
int cpu = first_cpu_in_pkg[pkg];
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1145
read_hwp_request_msr(cpu, &req, msr_offset);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1164
read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1166
print_hwp_cap(cpu, &cap, "");
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1169
check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1171
verify_hwp_req_self_consistency(cpu, &req);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1173
write_hwp_request_msr(cpu, &req, msr_offset);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1176
read_hwp_request_msr(cpu, &req, msr_offset);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1182
int enable_hwp_on_cpu(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1186
get_msr(cpu, MSR_PM_ENABLE, &old_msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1192
put_msr(cpu, MSR_PM_ENABLE, new_msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1195
printf("cpu%d: MSR_PM_ENABLE old: %llX new: %llX\n", cpu, old_msr, new_msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1200
int update_cpu_epb_sysfs(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1204
epb = get_epb_sysfs(cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1205
set_epb_sysfs(cpu, new_epb);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1209
cpu, epb, (unsigned int) new_epb);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1214
int update_cpu_msrs(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1221
get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1228
put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1230
printf("cpu%d: turbo ENABLE\n", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1239
put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1241
printf("cpu%d: turbo DISABLE\n", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1251
update_hwp_request_msr(cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1255
unsigned int get_pkg_num(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1262
sprintf(pathname, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1272
int set_max_cpu_pkg_num(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1276
if (max_cpu_num < cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1277
max_cpu_num = cpu;
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1279
pkg = get_pkg_num(cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1282
errx(1, "cpu%d: %d >= MAX_PACKAGES (%d)", cpu, pkg, MAX_PACKAGES);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1289
first_cpu_in_pkg[pkg] = cpu;
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1294
int mark_cpu_present(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
1296
CPU_SET_S(cpu, cpu_setsize, cpu_present_set);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
367
int cpu;
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
372
for (cpu = 0; cpu <= max_cpu_num; ++cpu) {
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
373
if (CPU_ISSET_S(cpu, cpu_setsize, cpu_selected_set))
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
374
if (!CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
375
errx(1, "Requested cpu%d is not present", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
382
int cpu = 0;
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
408
while (cpu <= end_cpu) {
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
409
if (cpu > max_cpu_num)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
410
errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
411
CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
412
cpu++;
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
419
for (cpu = 0; cpu <= max_cpu_num; cpu += 1) {
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
420
if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
421
CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
429
for (cpu = 0; cpu <= max_cpu_num; cpu += 2) {
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
430
if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
431
CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
440
for (cpu = 1; cpu <= max_cpu_num; cpu += 2) {
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
441
if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
442
CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
449
cpu = strtol(startp, &endp, 10);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
452
if (cpu > max_cpu_num)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
453
errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
454
CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
688
int get_msr(int cpu, int offset, unsigned long long *msr)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
694
sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
708
fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
714
int put_msr(int cpu, int offset, unsigned long long new_msr)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
720
sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
728
err(-2, "pwrite(cpu%d, offset 0x%x, 0x%llx) = %d", cpu, offset, new_msr, retval);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
733
fprintf(stderr, "put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
780
void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
782
if (cpu != -1)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
783
printf("cpu%d: ", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
788
void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
792
get_msr(cpu, msr_offset, &msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
800
void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
802
if (cpu != -1)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
803
printf("cpu%d: ", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
823
void read_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
827
get_msr(cpu, msr_offset, &msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
837
void write_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
843
cpu, hwp_req->hwp_min, hwp_req->hwp_max,
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
854
put_msr(cpu, msr_offset, msr);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
857
static int get_epb_sysfs(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
867
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
879
static int set_epb_sysfs(int cpu, int val)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
889
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
903
int print_cpu_msrs(int cpu)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
909
epb = get_epb_sysfs(cpu);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
911
printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
916
read_hwp_request_msr(cpu, &req, MSR_HWP_REQUEST);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
917
print_hwp_request(cpu, &req, "");
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
919
read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
920
print_hwp_cap(cpu, &cap, "");
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
972
void update_cpufreq_scaling_freq(int is_max, int cpu, unsigned int ratio)
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
980
cpu, is_max ? "max" : "min");
tools/sched_ext/include/scx/common.bpf.h
101
struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
358
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
tools/sched_ext/include/scx/common.bpf.h
359
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
tools/sched_ext/include/scx/common.bpf.h
360
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
tools/sched_ext/include/scx/common.bpf.h
361
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
tools/sched_ext/include/scx/common.bpf.h
362
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
tools/sched_ext/include/scx/common.bpf.h
411
#define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu)
tools/sched_ext/include/scx/common.bpf.h
434
#define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0)
tools/sched_ext/include/scx/common.bpf.h
440
#define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0)
tools/sched_ext/include/scx/common.bpf.h
68
void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
tools/sched_ext/include/scx/common.bpf.h
78
u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
79
u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
80
void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
83
int scx_bpf_cpu_node(s32 cpu) __ksym __weak;
tools/sched_ext/include/scx/common.bpf.h
92
bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
tools/sched_ext/include/scx/common.bpf.h
99
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
tools/sched_ext/include/scx/compat.bpf.h
182
#define __COMPAT_scx_bpf_cpu_node(cpu) \
tools/sched_ext/include/scx/compat.bpf.h
184
scx_bpf_cpu_node(cpu) : 0)
tools/sched_ext/include/scx/compat.bpf.h
211
static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
tools/sched_ext/include/scx/compat.bpf.h
216
return scx_bpf_cpu_curr(cpu);
tools/sched_ext/include/scx/compat.bpf.h
218
rq = scx_bpf_cpu_rq(cpu);
tools/sched_ext/scx_central.bpf.c
133
static bool dispatch_to_cpu(s32 cpu)
tools/sched_ext/scx_central.bpf.c
154
if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
tools/sched_ext/scx_central.bpf.c
170
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
tools/sched_ext/scx_central.bpf.c
172
if (cpu != central_cpu)
tools/sched_ext/scx_central.bpf.c
173
scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
tools/sched_ext/scx_central.bpf.c
182
void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_central.bpf.c
184
if (cpu == central_cpu) {
tools/sched_ext/scx_central.bpf.c
188
bpf_for(cpu, 0, nr_cpu_ids) {
tools/sched_ext/scx_central.bpf.c
195
gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
tools/sched_ext/scx_central.bpf.c
199
if (dispatch_to_cpu(cpu))
tools/sched_ext/scx_central.bpf.c
226
gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
tools/sched_ext/scx_central.bpf.c
240
s32 cpu = scx_bpf_task_cpu(p);
tools/sched_ext/scx_central.bpf.c
241
u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
tools/sched_ext/scx_central.bpf.c
248
s32 cpu = scx_bpf_task_cpu(p);
tools/sched_ext/scx_central.bpf.c
249
u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
tools/sched_ext/scx_central.bpf.c
268
s32 cpu = (nr_timers + i) % nr_cpu_ids;
tools/sched_ext/scx_central.bpf.c
271
if (cpu == central_cpu)
tools/sched_ext/scx_central.bpf.c
275
started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
tools/sched_ext/scx_central.bpf.c
282
scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
tools/sched_ext/scx_central.bpf.c
289
scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
tools/sched_ext/scx_cpu0.bpf.c
66
void BPF_STRUCT_OPS(cpu0_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_cpu0.bpf.c
68
if (cpu == 0)
tools/sched_ext/scx_cpu0.c
50
int ret, cpu;
tools/sched_ext/scx_cpu0.c
56
for (cpu = 0; cpu < nr_cpus; cpu++)
tools/sched_ext/scx_cpu0.c
57
stats[idx] += cnts[idx][cpu];
tools/sched_ext/scx_flatcg.bpf.c
321
s32 cpu;
tools/sched_ext/scx_flatcg.bpf.c
323
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
tools/sched_ext/scx_flatcg.bpf.c
328
return cpu;
tools/sched_ext/scx_flatcg.bpf.c
342
return cpu;
tools/sched_ext/scx_flatcg.bpf.c
727
void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_flatcg.c
115
int ret, cpu;
tools/sched_ext/scx_flatcg.c
121
for (cpu = 0; cpu < skel->rodata->nr_cpus; cpu++)
tools/sched_ext/scx_flatcg.c
122
stats[idx] += cnts[cpu];
tools/sched_ext/scx_pair.bpf.c
290
static int lookup_pairc_and_mask(s32 cpu, struct pair_ctx **pairc, u32 *mask)
tools/sched_ext/scx_pair.bpf.c
294
vptr = (u32 *)ARRAY_ELEM_PTR(pair_id, cpu, nr_cpu_ids);
tools/sched_ext/scx_pair.bpf.c
302
vptr = (u32 *)ARRAY_ELEM_PTR(in_pair_idx, cpu, nr_cpu_ids);
tools/sched_ext/scx_pair.bpf.c
312
static int try_dispatch(s32 cpu)
tools/sched_ext/scx_pair.bpf.c
325
ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask);
tools/sched_ext/scx_pair.bpf.c
328
cpu);
tools/sched_ext/scx_pair.bpf.c
486
s32 *pair = (s32 *)ARRAY_ELEM_PTR(pair_cpu, cpu, nr_cpu_ids);
tools/sched_ext/scx_pair.bpf.c
495
void BPF_STRUCT_OPS(pair_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_pair.bpf.c
498
if (try_dispatch(cpu) != -EAGAIN)
tools/sched_ext/scx_pair.bpf.c
503
void BPF_STRUCT_OPS(pair_cpu_acquire, s32 cpu, struct scx_cpu_acquire_args *args)
tools/sched_ext/scx_pair.bpf.c
510
ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask);
tools/sched_ext/scx_pair.bpf.c
521
s32 *pair = (s32 *)ARRAY_ELEM_PTR(pair_cpu, cpu, nr_cpu_ids);
tools/sched_ext/scx_pair.bpf.c
530
void BPF_STRUCT_OPS(pair_cpu_release, s32 cpu, struct scx_cpu_release_args *args)
tools/sched_ext/scx_pair.bpf.c
537
ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask);
tools/sched_ext/scx_pair.bpf.c
550
s32 *pair = (s32 *)ARRAY_ELEM_PTR(pair_cpu, cpu, nr_cpu_ids);
tools/sched_ext/scx_qmap.bpf.c
138
s32 cpu;
tools/sched_ext/scx_qmap.bpf.c
144
cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
tools/sched_ext/scx_qmap.bpf.c
145
if (cpu >= 0)
tools/sched_ext/scx_qmap.bpf.c
146
return cpu;
tools/sched_ext/scx_qmap.bpf.c
166
s32 cpu;
tools/sched_ext/scx_qmap.bpf.c
171
cpu = pick_direct_dispatch_cpu(p, prev_cpu);
tools/sched_ext/scx_qmap.bpf.c
173
if (cpu >= 0) {
tools/sched_ext/scx_qmap.bpf.c
175
return cpu;
tools/sched_ext/scx_qmap.bpf.c
203
s32 cpu;
tools/sched_ext/scx_qmap.bpf.c
240
(cpu = pick_direct_dispatch_cpu(p, scx_bpf_task_cpu(p))) >= 0) {
tools/sched_ext/scx_qmap.bpf.c
242
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, slice_ns, enq_flags);
tools/sched_ext/scx_qmap.bpf.c
253
s32 cpu;
tools/sched_ext/scx_qmap.bpf.c
256
cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
tools/sched_ext/scx_qmap.bpf.c
257
if (cpu >= 0)
tools/sched_ext/scx_qmap.bpf.c
258
scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
tools/sched_ext/scx_qmap.bpf.c
338
s32 cpu;
tools/sched_ext/scx_qmap.bpf.c
341
cpu = this_cpu;
tools/sched_ext/scx_qmap.bpf.c
343
cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
tools/sched_ext/scx_qmap.bpf.c
345
if (scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, SCX_DSQ_LOCAL_ON | cpu,
tools/sched_ext/scx_qmap.bpf.c
347
if (cpu == this_cpu) {
tools/sched_ext/scx_qmap.bpf.c
366
void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_qmap.bpf.c
559
void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args)
tools/sched_ext/scx_qmap.bpf.c
621
void BPF_STRUCT_OPS(qmap_dump_cpu, struct scx_dump_ctx *dctx, s32 cpu, bool idle)
tools/sched_ext/scx_qmap.bpf.c
628
if (!(cpuc = bpf_map_lookup_percpu_elem(&cpu_ctx_stor, &zero, cpu)))
tools/sched_ext/scx_qmap.bpf.c
679
s32 cpu;
tools/sched_ext/scx_qmap.bpf.c
687
bpf_for(cpu, 0, scx_bpf_nr_cpu_ids()) {
tools/sched_ext/scx_qmap.bpf.c
690
if (bpf_cpumask_test_cpu(cpu, online))
tools/sched_ext/scx_qmap.bpf.c
692
else if (bpf_cpumask_test_cpu(cpu, possible))
tools/sched_ext/scx_qmap.bpf.c
697
if ((cpu & 7) == 7) {
tools/sched_ext/scx_qmap.bpf.c
711
void BPF_STRUCT_OPS(qmap_cpu_online, s32 cpu)
tools/sched_ext/scx_qmap.bpf.c
714
bpf_printk("CPU %d coming online", cpu);
tools/sched_ext/scx_qmap.bpf.c
720
void BPF_STRUCT_OPS(qmap_cpu_offline, s32 cpu)
tools/sched_ext/scx_qmap.bpf.c
723
bpf_printk("CPU %d going offline", cpu);
tools/sched_ext/scx_sdt.bpf.c
610
s32 cpu;
tools/sched_ext/scx_sdt.bpf.c
618
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
tools/sched_ext/scx_sdt.bpf.c
626
return cpu;
tools/sched_ext/scx_sdt.bpf.c
644
void BPF_STRUCT_OPS(sdt_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_simple.bpf.c
58
s32 cpu;
tools/sched_ext/scx_simple.bpf.c
60
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
tools/sched_ext/scx_simple.bpf.c
66
return cpu;
tools/sched_ext/scx_simple.bpf.c
90
void BPF_STRUCT_OPS(simple_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_simple.c
52
int ret, cpu;
tools/sched_ext/scx_simple.c
58
for (cpu = 0; cpu < nr_cpus; cpu++)
tools/sched_ext/scx_simple.c
59
stats[idx] += cnts[idx][cpu];
tools/sched_ext/scx_userland.bpf.c
147
s32 cpu;
tools/sched_ext/scx_userland.bpf.c
162
cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
tools/sched_ext/scx_userland.bpf.c
163
if (cpu >= 0) {
tools/sched_ext/scx_userland.bpf.c
165
return cpu;
tools/sched_ext/scx_userland.bpf.c
227
void BPF_STRUCT_OPS(userland_dispatch, s32 cpu, struct task_struct *prev)
tools/sched_ext/scx_userland.bpf.c
258
void BPF_STRUCT_OPS(userland_update_idle, s32 cpu, bool idle)
tools/sched_ext/scx_userland.bpf.c
298
scx_bpf_kick_cpu(cpu, 0);
tools/testing/radix-tree/test.h
58
int radix_tree_cpu_dead(unsigned int cpu);
tools/testing/selftests/arm64/fp/fp-stress.c
311
static void start_fpsimd(struct child_data *child, int cpu, int copy)
tools/testing/selftests/arm64/fp/fp-stress.c
315
ret = asprintf(&child->name, "FPSIMD-%d-%d", cpu, copy);
tools/testing/selftests/arm64/fp/fp-stress.c
324
static void start_kernel(struct child_data *child, int cpu, int copy)
tools/testing/selftests/arm64/fp/fp-stress.c
328
ret = asprintf(&child->name, "KERNEL-%d-%d", cpu, copy);
tools/testing/selftests/arm64/fp/fp-stress.c
337
static void start_sve(struct child_data *child, int vl, int cpu)
tools/testing/selftests/arm64/fp/fp-stress.c
345
ret = asprintf(&child->name, "SVE-VL-%d-%d", vl, cpu);
tools/testing/selftests/arm64/fp/fp-stress.c
354
static void start_ssve(struct child_data *child, int vl, int cpu)
tools/testing/selftests/arm64/fp/fp-stress.c
358
ret = asprintf(&child->name, "SSVE-VL-%d-%d", vl, cpu);
tools/testing/selftests/arm64/fp/fp-stress.c
371
static void start_za(struct child_data *child, int vl, int cpu)
tools/testing/selftests/arm64/fp/fp-stress.c
379
ret = asprintf(&child->name, "ZA-VL-%d-%d", vl, cpu);
tools/testing/selftests/arm64/fp/fp-stress.c
388
static void start_zt(struct child_data *child, int cpu)
tools/testing/selftests/arm64/fp/fp-stress.c
392
ret = asprintf(&child->name, "ZT-%d", cpu);
tools/testing/selftests/bpf/bench.c
456
static void set_thread_affinity(pthread_t thread, int cpu)
tools/testing/selftests/bpf/bench.c
462
CPU_SET(cpu, &cpuset);
tools/testing/selftests/bpf/bench.c
466
cpu, -err);
tools/testing/selftests/bpf/benchs/bench_ringbufs.c
431
perfbuf_process_sample_raw(void *input_ctx, int cpu,
tools/testing/selftests/bpf/benchs/bench_ringbufs.c
509
int cpu;
tools/testing/selftests/bpf/bpf_arena_alloc.h
23
__u32 cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/bpf_arena_alloc.h
24
void __arena *page = page_frag_cur_page[cpu];
tools/testing/selftests/bpf/bpf_arena_alloc.h
25
int __arena *cur_offset = &page_frag_cur_offset[cpu];
tools/testing/selftests/bpf/bpf_arena_alloc.h
37
page_frag_cur_page[cpu] = page;
tools/testing/selftests/bpf/bpf_util.h
67
#define bpf_percpu(name, cpu) name[(cpu)].v
tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
116
cpu = 0;
tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
119
cpu = 0;
tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
18
static long cpu;
tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
34
CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
tools/testing/selftests/bpf/prog_tests/btf.c
5348
int cpu;
tools/testing/selftests/bpf/prog_tests/btf.c
5353
for (cpu = 0; cpu < num_cpus; cpu++) {
tools/testing/selftests/bpf/prog_tests/btf.c
5354
v->ui32 = i + cpu;
tools/testing/selftests/bpf/prog_tests/btf.c
5363
v->si8_4[0][0] = (cpu + i) & 0xff;
tools/testing/selftests/bpf/prog_tests/btf.c
5364
v->si8_4[0][1] = (cpu + i + 1) & 0xff;
tools/testing/selftests/bpf/prog_tests/btf.c
5365
v->si8_4[1][0] = (cpu + i + 2) & 0xff;
tools/testing/selftests/bpf/prog_tests/btf.c
5366
v->si8_4[1][1] = (cpu + i + 3) & 0xff;
tools/testing/selftests/bpf/prog_tests/btf.c
5375
for (cpu = 0; cpu < num_cpus; cpu++) {
tools/testing/selftests/bpf/prog_tests/btf.c
5390
int cpu, void *mapv)
tools/testing/selftests/bpf/prog_tests/btf.c
5402
percpu_map ? cpu : next_key,
tools/testing/selftests/bpf/prog_tests/btf.c
5427
percpu_map ? cpu : next_key,
tools/testing/selftests/bpf/prog_tests/btf.c
5557
int cpu;
tools/testing/selftests/bpf/prog_tests/btf.c
5563
for (cpu = 0; cpu < num_cpus; cpu++) {
tools/testing/selftests/bpf/prog_tests/btf.c
5575
if (cpu == 0) {
tools/testing/selftests/bpf/prog_tests/btf.c
5596
cpu, cmapv);
tools/testing/selftests/bpf/prog_tests/for_each.c
132
ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
tools/testing/selftests/bpf/prog_tests/for_each.c
69
ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
tools/testing/selftests/bpf/prog_tests/for_each.c
72
ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
36
int cpu;
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
53
for (cpu = 0; cpu < cpu_cnt; cpu++) {
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
54
pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
55
-1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
56
if (pfd_array[cpu] < 0)
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
60
return cpu == 0;
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
65
int cpu, fd;
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
67
for (cpu = 0; cpu < cpu_cnt; cpu++) {
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
68
fd = pfd_array[cpu];
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
23
static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
20
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
tools/testing/selftests/bpf/prog_tests/map_kptr.c
17
int key = 0, ret, cpu;
tools/testing/selftests/bpf/prog_tests/map_kptr.c
43
cpu = libbpf_num_possible_cpus();
tools/testing/selftests/bpf/prog_tests/map_kptr.c
44
if (!ASSERT_GT(cpu, 0, "libbpf_num_possible_cpus"))
tools/testing/selftests/bpf/prog_tests/map_kptr.c
47
pbuf = calloc(cpu, sizeof(buf));
tools/testing/selftests/bpf/prog_tests/map_kptr.c
60
&key, sizeof(key), pbuf, cpu * sizeof(buf), 0);
tools/testing/selftests/bpf/prog_tests/netcnt.c
20
int cpu, nproc;
tools/testing/selftests/bpf/prog_tests/netcnt.c
62
for (cpu = 0; cpu < nproc; cpu++) {
tools/testing/selftests/bpf/prog_tests/netcnt.c
63
ASSERT_LE(percpu_netcnt[cpu].packets, MAX_PERCPU_PACKETS, "MAX_PERCPU_PACKETS");
tools/testing/selftests/bpf/prog_tests/netcnt.c
65
packets += percpu_netcnt[cpu].packets;
tools/testing/selftests/bpf/prog_tests/netcnt.c
66
bytes += percpu_netcnt[cpu].bytes;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
125
int i, j, cpu, map_fd, err;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
187
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
201
flags = (u64)cpu << 32 | BPF_F_CPU;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
214
if (!ASSERT_EQ(values[0], j != cpu ? 0 : value,
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
230
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
247
batch_opts.elem_flags = (u64)cpu << 32 | BPF_F_CPU;
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
286
if (!ASSERT_EQ(v, j != cpu ? 0 : value,
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
17
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
22
if (cpu_data != cpu)
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
23
CHECK(cpu_data != cpu, "check_cpu_data",
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
24
"cpu_data %d != cpu %d\n", cpu_data, cpu);
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
26
CPU_SET(cpu, cpu_seen);
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
29
int trigger_on_cpu(int cpu)
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
35
CPU_SET(cpu, &cpu_set);
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
38
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
61
opts.cpu = i;
tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
70
opts.cpu = 0xffffffff;
tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
76
opts.cpu = 1;
tools/testing/selftests/bpf/prog_tests/test_overhead.c
50
int cpu = 0;
tools/testing/selftests/bpf/prog_tests/test_overhead.c
53
CPU_SET(cpu, &cpuset);
tools/testing/selftests/bpf/prog_tests/timer.c
13
static int perf_event_open(__u32 type, __u64 config, int pid, int cpu)
tools/testing/selftests/bpf/prog_tests/timer.c
22
return syscall(__NR_perf_event_open, &attr, pid, cpu, -1, 0);
tools/testing/selftests/bpf/prog_tests/timer_lockup.c
12
static long cpu;
tools/testing/selftests/bpf/prog_tests/timer_lockup.c
30
CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
33
static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
tools/testing/selftests/bpf/prog_tests/xdp_attach.c
95
static void on_xdp_errmsg(void *ctx, int cpu, void *data, __u32 size)
tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
20
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
65
__u32 cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/bloom_filter_bench.c
67
percpu_stats[cpu & 255].stats[key]++;
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
112
static inline u32 encode_tail(int cpu, int idx)
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
116
tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
124
u32 cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
127
return &qnodes[cpu][idx].mcs;
tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
33
u32 cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
34
u32 key = cpu + MAX_ENTRIES;
tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
38
percpu_time[cpu & 255] = bpf_ktime_get_ns() - start_time;
tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
53
u32 cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
57
times_index = percpu_times_index[cpu & CPU_MASK] % NR_SLOTS;
tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
60
percpu_times[cpu & CPU_MASK][times_index] = bpf_ktime_get_ns() - start_time;
tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
61
percpu_times_index[cpu & CPU_MASK] += 1;
tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
61
__u32 cpu;
tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
63
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
65
cgrp->self.rstat_cpu, cpu);
tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
41
struct cgroup_subsys_state *css, int cpu) __ksym;
tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
84
int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu)
tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
95
&cg_id, cpu);
tools/testing/selftests/bpf/progs/cpumask_common.h
38
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
tools/testing/selftests/bpf/progs/cpumask_common.h
39
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
tools/testing/selftests/bpf/progs/cpumask_common.h
40
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak;
tools/testing/selftests/bpf/progs/cpumask_common.h
41
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
tools/testing/selftests/bpf/progs/cpumask_common.h
42
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
tools/testing/selftests/bpf/progs/cpumask_success.c
408
int cpu;
tools/testing/selftests/bpf/progs/cpumask_success.c
420
cpu = bpf_cpumask_any_distribute(cast(mask1));
tools/testing/selftests/bpf/progs/cpumask_success.c
421
if (cpu != 0) {
tools/testing/selftests/bpf/progs/cpumask_success.c
426
cpu = bpf_cpumask_any_distribute(cast(dst2));
tools/testing/selftests/bpf/progs/cpumask_success.c
427
if (cpu < nr_cpus) {
tools/testing/selftests/bpf/progs/cpumask_success.c
438
cpu = bpf_cpumask_any_distribute(cast(dst2));
tools/testing/selftests/bpf/progs/cpumask_success.c
439
if (cpu > 1) {
tools/testing/selftests/bpf/progs/cpumask_success.c
444
cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
tools/testing/selftests/bpf/progs/cpumask_success.c
445
if (cpu < nr_cpus) {
tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
46
__u32 cpu = 0;
tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
53
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
51
__u32 cpu = 0;
tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
64
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/nested_trust_common.h
9
bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c
15
__u32 cpu;
tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c
17
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c
18
active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
tools/testing/selftests/bpf/progs/test_d_path_check_types.c
19
u32 cpu;
tools/testing/selftests/bpf/progs/test_d_path_check_types.c
21
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/test_d_path_check_types.c
22
active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
27
__u32 cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
32
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
35
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
37
out__rq_cpu = rq->cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
38
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
44
out__cpu_0_rq_cpu = rq->cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
48
out__this_rq_cpu = rq->cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
16
__u32 cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
18
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
19
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
20
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
25
*(volatile int *)(&rq->cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
14
__u32 cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
16
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
17
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_weak.c
44
out__existing_typed = rq->cpu;
tools/testing/selftests/bpf/progs/test_perf_buffer.c
26
int cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/test_perf_buffer.c
37
&cpu, sizeof(cpu));
tools/testing/selftests/bpf/progs/test_tcp_estats.c
125
int cpu;
tools/testing/selftests/bpf/progs/timer.c
371
__s32 cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/timer.c
373
if (cpu != pinned_cpu)
tools/testing/selftests/bpf/progs/trigger_bench.c
23
int cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/trigger_bench.c
25
__sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1);
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
227
__u32 cpu = bpf_get_smp_processor_id();
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
228
__u32 rb_slot = cpu & 1;
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
100
if (cpu & 1)
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
104
switch (cpu % 3) {
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
143
int cpu = smp_processor_id();
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
148
if (cpu) {
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
152
struct rqsl_lock_pair locks = rqsl_get_lock_pair(cpu);
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
194
int cpu = smp_processor_id();
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
199
if (!cpu || READ_ONCE(pause))
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
202
locks = rqsl_get_lock_pair(cpu);
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
307
int cpu, i;
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
311
for_each_online_cpu(cpu) {
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
312
struct rqsl_cpu_hist *hist = per_cpu_ptr(&rqsl_cpu_hists, cpu);
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
347
cpu, total, norm_total, nmi_total,
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
357
cpu, total, norm_total, nmi_total,
tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
91
static struct rqsl_lock_pair rqsl_get_lock_pair(int cpu)
tools/testing/selftests/bpf/test_tcpnotify_user.c
32
static void dummyfn(void *ctx, int cpu, void *data, __u32 size)
tools/testing/selftests/breakpoints/step_after_suspend_test.c
194
int cpu;
tools/testing/selftests/breakpoints/step_after_suspend_test.c
214
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
tools/testing/selftests/breakpoints/step_after_suspend_test.c
215
if (!CPU_ISSET(cpu, &available_cpus))
tools/testing/selftests/breakpoints/step_after_suspend_test.c
224
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
tools/testing/selftests/breakpoints/step_after_suspend_test.c
227
if (!CPU_ISSET(cpu, &available_cpus))
tools/testing/selftests/breakpoints/step_after_suspend_test.c
230
test_success = run_test(cpu);
tools/testing/selftests/breakpoints/step_after_suspend_test.c
233
ksft_test_result_pass("CPU %d\n", cpu);
tools/testing/selftests/breakpoints/step_after_suspend_test.c
236
ksft_test_result_skip("CPU %d\n", cpu);
tools/testing/selftests/breakpoints/step_after_suspend_test.c
239
ksft_test_result_fail("CPU %d\n", cpu);
tools/testing/selftests/breakpoints/step_after_suspend_test.c
24
void child(int cpu)
tools/testing/selftests/breakpoints/step_after_suspend_test.c
29
CPU_SET(cpu, &set);
tools/testing/selftests/breakpoints/step_after_suspend_test.c
50
int run_test(int cpu)
tools/testing/selftests/breakpoints/step_after_suspend_test.c
61
child(cpu);
tools/testing/selftests/drivers/net/hw/toeplitz.c
105
int cpu;
tools/testing/selftests/drivers/net/hw/toeplitz.c
139
static void verify_rss(uint32_t rx_hash, int cpu)
tools/testing/selftests/drivers/net/hw/toeplitz.c
149
if (rx_irq_cpus[queue] != cpu) {
tools/testing/selftests/drivers/net/hw/toeplitz.c
150
log_verbose(". error: rss cpu mismatch (%d)", cpu);
tools/testing/selftests/drivers/net/hw/toeplitz.c
155
static void verify_rps(uint64_t rx_hash, int cpu)
tools/testing/selftests/drivers/net/hw/toeplitz.c
160
if (rps_silo_to_cpu[silo] != cpu) {
tools/testing/selftests/drivers/net/hw/toeplitz.c
161
log_verbose(". error: rps cpu mismatch (%d)", cpu);
tools/testing/selftests/drivers/net/hw/toeplitz.c
166
static void log_rxhash(int cpu, uint32_t rx_hash,
tools/testing/selftests/drivers/net/hw/toeplitz.c
178
cpu, rx_hash, saddr, daddr,
tools/testing/selftests/drivers/net/hw/toeplitz.c
183
static void verify_rxhash(const char *pkt, uint32_t rx_hash, int cpu)
tools/testing/selftests/drivers/net/hw/toeplitz.c
202
log_rxhash(cpu, rx_hash, addrs, addr_len);
tools/testing/selftests/drivers/net/hw/toeplitz.c
212
verify_rss(rx_hash, cpu);
tools/testing/selftests/drivers/net/hw/toeplitz.c
214
verify_rps(rx_hash, cpu);
tools/testing/selftests/drivers/net/hw/toeplitz.c
224
ring->cpu);
tools/testing/selftests/drivers/net/hw/toeplitz.c
434
rings[i].cpu = i;
tools/testing/selftests/intel_pstate/aperf.c
24
unsigned int i, cpu, fd;
tools/testing/selftests/intel_pstate/aperf.c
39
cpu = strtol(argv[1], (char **) NULL, 10);
tools/testing/selftests/intel_pstate/aperf.c
46
sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
tools/testing/selftests/intel_pstate/aperf.c
50
printf("/dev/cpu/%d/msr: %s\n", cpu, strerror(errno));
tools/testing/selftests/intel_pstate/aperf.c
55
CPU_SET(cpu, &cpuset);
tools/testing/selftests/intel_pstate/msr.c
15
int cpu, fd;
tools/testing/selftests/intel_pstate/msr.c
23
cpu = strtol(argv[1], (char **) NULL, 10);
tools/testing/selftests/intel_pstate/msr.c
28
sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
tools/testing/selftests/kvm/arm64/arch_timer.c
108
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/arm64/arch_timer.c
109
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/arm64/arch_timer.c
143
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/arm64/arch_timer.c
144
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/arm64/vgic_init.c
372
#define KVM_VGIC_V2_ATTR(offset, cpu) \
tools/testing/selftests/kvm/arm64/vgic_init.c
374
FIELD_PREP(KVM_DEV_ARM_VGIC_CPUID_MASK, cpu))
tools/testing/selftests/kvm/include/kvm_util.h
1089
int __pin_task_to_cpu(pthread_t task, int cpu);
tools/testing/selftests/kvm/include/kvm_util.h
1091
static inline void pin_task_to_cpu(pthread_t task, int cpu)
tools/testing/selftests/kvm/include/kvm_util.h
1095
r = __pin_task_to_cpu(task, cpu);
tools/testing/selftests/kvm/include/kvm_util.h
1096
TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
tools/testing/selftests/kvm/include/kvm_util.h
1101
int cpu = sched_getcpu();
tools/testing/selftests/kvm/include/kvm_util.h
1103
pin_task_to_cpu(task, cpu);
tools/testing/selftests/kvm/include/kvm_util.h
1104
return cpu;
tools/testing/selftests/kvm/include/kvm_util.h
1107
static inline void pin_self_to_cpu(int cpu)
tools/testing/selftests/kvm/include/kvm_util.h
1109
pin_task_to_cpu(pthread_self(), cpu);
tools/testing/selftests/kvm/include/x86/sev.h
57
struct kvm_vcpu **cpu);
tools/testing/selftests/kvm/lib/arm64/gic.c
20
static void gic_cpu_init(unsigned int cpu)
tools/testing/selftests/kvm/lib/arm64/gic.c
22
gic_common_ops->gic_cpu_init(cpu);
tools/testing/selftests/kvm/lib/arm64/gic.c
53
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/lib/arm64/gic.c
59
gic_cpu_init(cpu);
tools/testing/selftests/kvm/lib/arm64/gic_private.h
12
void (*gic_cpu_init)(unsigned int cpu);
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
172
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
193
cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
236
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
239
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
245
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
248
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
310
static void gicv3_cpu_init(unsigned int cpu)
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
317
GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
319
redist_base_cpu = gicr_base_cpu(cpu);
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
324
GUEST_ASSERT_EQ(GICR_TYPER_CPU_NUMBER(typer), cpu);
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
341
gicv3_gicr_wait_for_rwp(cpu);
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
53
static inline volatile void *gicr_base_cpu(uint32_t cpu)
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
56
return GICR_BASE_GVA + cpu * SZ_64K * 2;
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
59
static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
63
while (readl(gicr_base_cpu(cpu) + GICR_CTLR) & GICR_CTLR_RWP) {
tools/testing/selftests/kvm/lib/kvm_util.c
629
int __pin_task_to_cpu(pthread_t task, int cpu)
tools/testing/selftests/kvm/lib/kvm_util.c
634
CPU_SET(cpu, &cpuset);
tools/testing/selftests/kvm/lib/kvm_util.c
669
char *cpu, *cpu_list;
tools/testing/selftests/kvm/lib/kvm_util.c
679
cpu = strtok(cpu_list, delim);
tools/testing/selftests/kvm/lib/kvm_util.c
683
TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i);
tools/testing/selftests/kvm/lib/kvm_util.c
684
vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
tools/testing/selftests/kvm/lib/kvm_util.c
685
cpu = strtok(NULL, delim);
tools/testing/selftests/kvm/lib/kvm_util.c
689
if (cpu) {
tools/testing/selftests/kvm/lib/kvm_util.c
690
pin_self_to_cpu(parse_pcpu(cpu, &allowed_mask));
tools/testing/selftests/kvm/lib/kvm_util.c
691
cpu = strtok(NULL, delim);
tools/testing/selftests/kvm/lib/kvm_util.c
694
TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
tools/testing/selftests/kvm/lib/x86/sev.c
162
struct kvm_vcpu **cpu)
tools/testing/selftests/kvm/lib/x86/sev.c
172
*cpu = cpus[0];
tools/testing/selftests/kvm/loongarch/arch_timer.c
115
static void guest_test_emulate_timer(uint32_t cpu)
tools/testing/selftests/kvm/loongarch/arch_timer.c
119
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/loongarch/arch_timer.c
139
static void guest_time_count_test(uint32_t cpu)
tools/testing/selftests/kvm/loongarch/arch_timer.c
168
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/loongarch/arch_timer.c
171
guest_time_count_test(cpu);
tools/testing/selftests/kvm/loongarch/arch_timer.c
175
guest_test_period_timer(cpu);
tools/testing/selftests/kvm/loongarch/arch_timer.c
176
guest_test_oneshot_timer(cpu);
tools/testing/selftests/kvm/loongarch/arch_timer.c
177
guest_test_emulate_timer(cpu);
tools/testing/selftests/kvm/loongarch/arch_timer.c
30
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/loongarch/arch_timer.c
32
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/loongarch/arch_timer.c
65
static void guest_test_period_timer(uint32_t cpu)
tools/testing/selftests/kvm/loongarch/arch_timer.c
69
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/loongarch/arch_timer.c
89
static void guest_test_oneshot_timer(uint32_t cpu)
tools/testing/selftests/kvm/loongarch/arch_timer.c
93
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/riscv/arch_timer.c
22
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/riscv/arch_timer.c
23
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/riscv/arch_timer.c
69
uint32_t cpu = guest_get_vcpuid();
tools/testing/selftests/kvm/riscv/arch_timer.c
70
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
tools/testing/selftests/kvm/rseq_test.c
105
CPU_CLR(cpu, &allowed_mask);
tools/testing/selftests/kvm/rseq_test.c
212
u32 cpu, rseq_cpu;
tools/testing/selftests/kvm/rseq_test.c
287
r = sys_getcpu(&cpu, NULL);
tools/testing/selftests/kvm/rseq_test.c
294
TEST_ASSERT(rseq_cpu == cpu,
tools/testing/selftests/kvm/rseq_test.c
295
"rseq CPU = %d, sched CPU = %d", rseq_cpu, cpu);
tools/testing/selftests/kvm/rseq_test.c
51
static int next_cpu(int cpu)
tools/testing/selftests/kvm/rseq_test.c
62
cpu++;
tools/testing/selftests/kvm/rseq_test.c
63
if (cpu > max_cpu) {
tools/testing/selftests/kvm/rseq_test.c
64
cpu = min_cpu;
tools/testing/selftests/kvm/rseq_test.c
65
TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
tools/testing/selftests/kvm/rseq_test.c
66
"Min CPU = %d must always be usable", cpu);
tools/testing/selftests/kvm/rseq_test.c
69
} while (!CPU_ISSET(cpu, &possible_mask));
tools/testing/selftests/kvm/rseq_test.c
71
return cpu;
tools/testing/selftests/kvm/rseq_test.c
78
int r, i, cpu;
tools/testing/selftests/kvm/rseq_test.c
82
for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
tools/testing/selftests/kvm/rseq_test.c
83
CPU_SET(cpu, &allowed_mask);
tools/testing/selftests/kvm/steal_time.c
131
static void guest_code(int cpu)
tools/testing/selftests/kvm/steal_time.c
145
GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]);
tools/testing/selftests/kvm/steal_time.c
151
WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
tools/testing/selftests/kvm/steal_time.c
155
WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
tools/testing/selftests/kvm/steal_time.c
243
static void guest_code(int cpu)
tools/testing/selftests/kvm/steal_time.c
245
struct sta_struct *st = st_gva[cpu];
tools/testing/selftests/kvm/steal_time.c
253
sta_set_shmem(st_gpa[cpu], 0);
tools/testing/selftests/kvm/steal_time.c
257
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
tools/testing/selftests/kvm/steal_time.c
264
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
tools/testing/selftests/kvm/steal_time.c
325
static void guest_code(int cpu)
tools/testing/selftests/kvm/steal_time.c
328
struct kvm_steal_time *st = st_gva[cpu];
tools/testing/selftests/kvm/steal_time.c
334
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
tools/testing/selftests/kvm/steal_time.c
341
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
tools/testing/selftests/kvm/steal_time.c
42
static void guest_code(int cpu)
tools/testing/selftests/kvm/steal_time.c
44
struct kvm_steal_time *st = st_gva[cpu];
tools/testing/selftests/kvm/steal_time.c
47
GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
tools/testing/selftests/kvm/steal_time.c
53
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
tools/testing/selftests/kvm/steal_time.c
60
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
tools/testing/selftests/kvm/x86/aperfmperf_test.c
129
int msr_fd, cpu, i;
tools/testing/selftests/kvm/x86/aperfmperf_test.c
137
cpu = pin_self_to_any_cpu();
tools/testing/selftests/kvm/x86/aperfmperf_test.c
139
msr_fd = open_dev_msr(cpu);
tools/testing/selftests/kvm/x86/aperfmperf_test.c
30
static int open_dev_msr(int cpu)
tools/testing/selftests/kvm/x86/aperfmperf_test.c
34
snprintf(path, sizeof(path), "/dev/cpu/%d/msr", cpu);
tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
100
for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++) {
tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
102
pthread_join(cpu_threads[cpu], &this_cpu_failures);
tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
95
unsigned long cpu;
tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
96
for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++)
tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
97
pthread_create(&cpu_threads[cpu], NULL, run_vcpu, (void *)cpu);
tools/testing/selftests/mm/uffd-common.c
320
unsigned long nr, cpu;
tools/testing/selftests/mm/uffd-common.c
395
for (cpu = 0; cpu < gopts->nr_parallel; cpu++)
tools/testing/selftests/mm/uffd-common.c
396
if (pipe2(&gopts->pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK))
tools/testing/selftests/mm/uffd-common.c
533
unsigned long cpu = args->cpu;
tools/testing/selftests/mm/uffd-common.c
545
pollfd[1].fd = gopts->pipefd[cpu*2];
tools/testing/selftests/mm/uffd-common.h
73
int cpu;
tools/testing/selftests/mm/uffd-stress.c
104
unsigned long cpu = (unsigned long) args->cpu;
tools/testing/selftests/mm/uffd-stress.c
111
page_nr += cpu * gopts->nr_pages_per_cpu;
tools/testing/selftests/mm/uffd-stress.c
163
unsigned long cpu = (unsigned long) args->cpu;
tools/testing/selftests/mm/uffd-stress.c
166
start_nr = cpu * gopts->nr_pages_per_cpu;
tools/testing/selftests/mm/uffd-stress.c
167
end_nr = (cpu+1) * gopts->nr_pages_per_cpu;
tools/testing/selftests/mm/uffd-stress.c
195
unsigned long cpu;
tools/testing/selftests/mm/uffd-stress.c
202
for (cpu = 0; cpu < gopts->nr_parallel; cpu++) {
tools/testing/selftests/mm/uffd-stress.c
203
if (pthread_create(&locking_threads[cpu], &attr,
tools/testing/selftests/mm/uffd-stress.c
204
locking_thread, (void *)&args[cpu]))
tools/testing/selftests/mm/uffd-stress.c
207
if (pthread_create(&uffd_threads[cpu],
tools/testing/selftests/mm/uffd-stress.c
210
(void *) &args[cpu]))
tools/testing/selftests/mm/uffd-stress.c
213
if (pthread_create(&uffd_threads[cpu], &attr,
tools/testing/selftests/mm/uffd-stress.c
215
(void *)&args[cpu]))
tools/testing/selftests/mm/uffd-stress.c
219
if (pthread_create(&background_threads[cpu], &attr,
tools/testing/selftests/mm/uffd-stress.c
220
background_thread, (void *)&args[cpu]))
tools/testing/selftests/mm/uffd-stress.c
223
for (cpu = 0; cpu < gopts->nr_parallel; cpu++)
tools/testing/selftests/mm/uffd-stress.c
224
if (pthread_join(background_threads[cpu], NULL))
tools/testing/selftests/mm/uffd-stress.c
239
for (cpu = 0; cpu < gopts->nr_parallel; cpu++)
tools/testing/selftests/mm/uffd-stress.c
240
if (pthread_join(locking_threads[cpu], NULL))
tools/testing/selftests/mm/uffd-stress.c
243
for (cpu = 0; cpu < gopts->nr_parallel; cpu++) {
tools/testing/selftests/mm/uffd-stress.c
246
if (write(gopts->pipefd[cpu*2+1], &c, 1) != 1)
tools/testing/selftests/mm/uffd-stress.c
248
if (pthread_join(uffd_threads[cpu],
tools/testing/selftests/mm/uffd-stress.c
249
(void *)&args[cpu]))
tools/testing/selftests/mm/uffd-stress.c
252
if (pthread_cancel(uffd_threads[cpu]))
tools/testing/selftests/mm/uffd-stress.c
254
if (pthread_join(uffd_threads[cpu], NULL))
tools/testing/selftests/mm/uffd-stress.c
91
args[i].cpu = i;
tools/testing/selftests/mqueue/mq_perf_tests.c
547
int i, cpu, rc;
tools/testing/selftests/mqueue/mq_perf_tests.c
586
cpu = atoi(option);
tools/testing/selftests/mqueue/mq_perf_tests.c
587
if (cpu >= cpus_online)
tools/testing/selftests/mqueue/mq_perf_tests.c
590
cpu);
tools/testing/selftests/mqueue/mq_perf_tests.c
592
cpus_to_pin[num_cpus_to_pin++] = cpu;
tools/testing/selftests/mqueue/mq_perf_tests.c
598
for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
tools/testing/selftests/mqueue/mq_perf_tests.c
599
if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
tools/testing/selftests/mqueue/mq_perf_tests.c
605
CPU_SET_S(cpus_to_pin[cpu],
tools/testing/selftests/mqueue/mq_perf_tests.c
690
for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
tools/testing/selftests/mqueue/mq_perf_tests.c
691
printf(",%d", cpus_to_pin[cpu]);
tools/testing/selftests/net/bench/page_pool/time_bench.c
256
struct time_bench_cpu *cpu = private;
tools/testing/selftests/net/bench/page_pool/time_bench.c
257
struct time_bench_sync *sync = cpu->sync;
tools/testing/selftests/net/bench/page_pool/time_bench.c
259
void *data = cpu->data;
tools/testing/selftests/net/bench/page_pool/time_bench.c
262
cpumask_set_cpu(cpu->rec.cpu, &newmask);
tools/testing/selftests/net/bench/page_pool/time_bench.c
270
if (!cpu->bench_func(&cpu->rec, data)) {
tools/testing/selftests/net/bench/page_pool/time_bench.c
272
cpu->rec.cpu, smp_processor_id());
tools/testing/selftests/net/bench/page_pool/time_bench.c
275
pr_info("SUCCESS: ran on CPU:%d(%d)\n", cpu->rec.cpu,
tools/testing/selftests/net/bench/page_pool/time_bench.c
278
cpu->did_bench_run = true;
tools/testing/selftests/net/bench/page_pool/time_bench.c
296
int cpu;
tools/testing/selftests/net/bench/page_pool/time_bench.c
304
for_each_cpu(cpu, mask) {
tools/testing/selftests/net/bench/page_pool/time_bench.c
305
struct time_bench_cpu *c = &cpu_tasks[cpu];
tools/testing/selftests/net/bench/page_pool/time_bench.c
312
desc, cpu, rec->tsc_cycles, rec->ns_per_call_quotient,
tools/testing/selftests/net/bench/page_pool/time_bench.c
335
int cpu, running = 0;
tools/testing/selftests/net/bench/page_pool/time_bench.c
346
for_each_cpu(cpu, mask) {
tools/testing/selftests/net/bench/page_pool/time_bench.c
347
struct time_bench_cpu *c = &cpu_tasks[cpu];
tools/testing/selftests/net/bench/page_pool/time_bench.c
360
c->rec.cpu = cpu;
tools/testing/selftests/net/bench/page_pool/time_bench.c
363
"time_bench%d", cpu);
tools/testing/selftests/net/bench/page_pool/time_bench.c
385
for_each_cpu(cpu, mask) {
tools/testing/selftests/net/bench/page_pool/time_bench.c
386
struct time_bench_cpu *c = &cpu_tasks[cpu];
tools/testing/selftests/net/bench/page_pool/time_bench.c
67
int cpu;
tools/testing/selftests/net/bench/page_pool/time_bench.c
70
cpu = smp_processor_id();
tools/testing/selftests/net/bench/page_pool/time_bench.c
71
pr_info("DEBUG: cpu:%d\n", cpu);
tools/testing/selftests/net/bench/page_pool/time_bench.c
90
perf_event = perf_event_create_kernel_counter(&perf_conf, cpu,
tools/testing/selftests/net/bench/page_pool/time_bench.h
23
uint32_t cpu; /* Used when embedded in time_bench_cpu */
tools/testing/selftests/net/msg_zerocopy.c
124
static int do_setcpu(int cpu)
tools/testing/selftests/net/msg_zerocopy.c
129
CPU_SET(cpu, &mask);
tools/testing/selftests/net/msg_zerocopy.c
133
fprintf(stderr, "cpu: %u\n", cpu);
tools/testing/selftests/net/reuseport_bpf_cpu.c
187
int epfd, cpu;
tools/testing/selftests/net/reuseport_bpf_cpu.c
195
for (cpu = 0; cpu < len; ++cpu) {
tools/testing/selftests/net/reuseport_bpf_cpu.c
197
ev.data.fd = rcv_fd[cpu];
tools/testing/selftests/net/reuseport_bpf_cpu.c
198
if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fd[cpu], &ev))
tools/testing/selftests/net/reuseport_bpf_cpu.c
203
for (cpu = 0; cpu < len; ++cpu) {
tools/testing/selftests/net/reuseport_bpf_cpu.c
204
send_from_cpu(cpu, family, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
205
receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
209
for (cpu = len - 1; cpu >= 0; --cpu) {
tools/testing/selftests/net/reuseport_bpf_cpu.c
210
send_from_cpu(cpu, family, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
211
receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
215
for (cpu = 0; cpu < len; cpu += 2) {
tools/testing/selftests/net/reuseport_bpf_cpu.c
216
send_from_cpu(cpu, family, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
217
receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
221
for (cpu = 1; cpu < len; cpu += 2) {
tools/testing/selftests/net/reuseport_bpf_cpu.c
222
send_from_cpu(cpu, family, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
223
receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
tools/testing/selftests/net/reuseport_bpf_cpu.c
227
for (cpu = 0; cpu < len; ++cpu)
tools/testing/selftests/net/reuseport_bpf_cpu.c
228
close(rcv_fd[cpu]);
tools/testing/selftests/net/so_incoming_cpu.c
115
void set_so_incoming_cpu(struct __test_metadata *_metadata, int fd, int cpu)
tools/testing/selftests/net/so_incoming_cpu.c
119
ret = setsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &cpu, sizeof(int));
tools/testing/selftests/net/so_incoming_cpu.c
126
int cpu)
tools/testing/selftests/net/so_incoming_cpu.c
134
set_so_incoming_cpu(_metadata, fd, cpu);
tools/testing/selftests/net/so_incoming_cpu.c
143
set_so_incoming_cpu(_metadata, fd, cpu);
tools/testing/selftests/net/so_incoming_cpu.c
152
set_so_incoming_cpu(_metadata, fd, cpu);
tools/testing/selftests/net/so_incoming_cpu.c
212
int i, j, fd, cpu, ret, total = 0;
tools/testing/selftests/net/so_incoming_cpu.c
221
ret = getsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &cpu, &len);
tools/testing/selftests/net/so_incoming_cpu.c
223
ASSERT_EQ(cpu, i);
tools/testing/selftests/net/udpgso_bench_tx.c
104
static int set_cpu(int cpu)
tools/testing/selftests/net/udpgso_bench_tx.c
109
CPU_SET(cpu, &mask);
tools/testing/selftests/net/udpgso_bench_tx.c
111
error(1, 0, "setaffinity %d", cpu);
tools/testing/selftests/powerpc/benchmarks/context_switch.c
106
static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
tools/testing/selftests/powerpc/benchmarks/context_switch.c
129
CPU_SET_S(cpu, size, cpuset);
tools/testing/selftests/powerpc/benchmarks/context_switch.c
421
static void (*start_fn)(void *(*fn)(void *), void *arg, unsigned long cpu);
tools/testing/selftests/powerpc/benchmarks/context_switch.c
74
static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
tools/testing/selftests/powerpc/benchmarks/context_switch.c
82
CPU_SET(cpu, &cpuset);
tools/testing/selftests/powerpc/benchmarks/fork.c
148
if (cpu != -1) {
tools/testing/selftests/powerpc/benchmarks/fork.c
150
CPU_SET(cpu, &cpuset);
tools/testing/selftests/powerpc/benchmarks/fork.c
291
cpu = -1;
tools/testing/selftests/powerpc/benchmarks/fork.c
293
cpu = atoi(argv[optind++]);
tools/testing/selftests/powerpc/benchmarks/fork.c
299
set_cpu(cpu);
tools/testing/selftests/powerpc/benchmarks/fork.c
30
static void set_cpu(int cpu)
tools/testing/selftests/powerpc/benchmarks/fork.c
312
printf(" on cpu %d\n", cpu);
tools/testing/selftests/powerpc/benchmarks/fork.c
319
start_process_on(bench_proc, NULL, cpu);
tools/testing/selftests/powerpc/benchmarks/fork.c
34
if (cpu == -1)
tools/testing/selftests/powerpc/benchmarks/fork.c
38
CPU_SET(cpu, &cpuset);
tools/testing/selftests/powerpc/benchmarks/fork.c
46
static void start_process_on(void *(*fn)(void *), void *arg, int cpu)
tools/testing/selftests/powerpc/benchmarks/fork.c
59
set_cpu(cpu);
tools/testing/selftests/powerpc/benchmarks/fork.c
66
static int cpu;
tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
39
int cpu;
tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
41
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
43
CPU_SET(cpu, &mask);
tools/testing/selftests/powerpc/include/utils.h
44
int bind_to_cpu(int cpu);
tools/testing/selftests/powerpc/mm/tlbie_test.c
397
static void set_pthread_cpu(pthread_t th, int cpu)
tools/testing/selftests/powerpc/mm/tlbie_test.c
403
CPU_SET(cpu, &run_cpu_mask);
tools/testing/selftests/powerpc/mm/tlbie_test.c
413
static void set_mycpu(int cpu)
tools/testing/selftests/powerpc/mm/tlbie_test.c
419
CPU_SET(cpu, &run_cpu_mask);
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
22
static int setup_cpu_event(struct event *event, int cpu)
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
33
FAIL_IF(event_open_with_cpu(event, cpu));
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
43
int cpu, rc;
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
48
cpu = bind_to_cpu(BIND_CPU_ANY);
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
49
FAIL_IF(cpu < 0);
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
61
rc = setup_cpu_event(&event, cpu);
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
22
static int setup_cpu_event(struct event *event, int cpu)
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
31
FAIL_IF(event_open_with_cpu(event, cpu));
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
41
int cpu, rc;
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
46
cpu = bind_to_cpu(BIND_CPU_ANY);
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
47
FAIL_IF(cpu < 0);
tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
59
rc = setup_cpu_event(&event, cpu);
tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
22
static int setup_cpu_event(struct event *event, int cpu)
tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
31
FAIL_IF(event_open_with_cpu(event, cpu));
tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
41
int cpu, rc;
tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
46
cpu = bind_to_cpu(BIND_CPU_ANY);
tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
47
FAIL_IF(cpu < 0);
tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
62
rc = setup_cpu_event(&event, cpu);
tools/testing/selftests/powerpc/pmu/event.c
17
int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
tools/testing/selftests/powerpc/pmu/event.c
20
return syscall(__NR_perf_event_open, attr, pid, cpu,
tools/testing/selftests/powerpc/pmu/event.c
69
int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd)
tools/testing/selftests/powerpc/pmu/event.c
71
e->fd = perf_event_open(&e->attr, pid, cpu, group_fd, 0);
tools/testing/selftests/powerpc/pmu/event.c
90
int event_open_with_cpu(struct event *e, int cpu)
tools/testing/selftests/powerpc/pmu/event.c
92
return event_open_with_options(e, PERF_NO_PID, cpu, PERF_NO_GROUP);
tools/testing/selftests/powerpc/pmu/event.h
36
int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd);
tools/testing/selftests/powerpc/pmu/event.h
39
int event_open_with_cpu(struct event *e, int cpu);
tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
147
int i, ncpus, cpu, ret = 0;
tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
178
for (i = 0, cpu = 0; i < nprocs && cpu < ncpus; cpu++) {
tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
179
if (!CPU_ISSET_S(cpu, size, mask))
tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
181
fd[i] = perf_cpu_event_open(cpu, type, addr, len);
tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
90
static int perf_cpu_event_open(long cpu, __u32 type, __u64 addr, __u64 len)
tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
95
return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c
116
static int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c
119
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
tools/testing/selftests/powerpc/tm/tm-poison.c
28
int cpu, pid;
tools/testing/selftests/powerpc/tm/tm-poison.c
38
cpu = pick_online_cpu();
tools/testing/selftests/powerpc/tm/tm-poison.c
39
FAIL_IF(cpu < 0);
tools/testing/selftests/powerpc/tm/tm-poison.c
43
CPU_SET(cpu, &cpuset);
tools/testing/selftests/powerpc/tm/tm-trap.c
250
int cpu, rc;
tools/testing/selftests/powerpc/tm/tm-trap.c
270
cpu = pick_online_cpu();
tools/testing/selftests/powerpc/tm/tm-trap.c
271
FAIL_IF(cpu < 0);
tools/testing/selftests/powerpc/tm/tm-trap.c
275
CPU_SET(cpu, &cpuset);
tools/testing/selftests/powerpc/tm/tm-unavailable.c
341
int cpu, rc, exception; /* FP = 0, VEC = 1, VSX = 2 */
tools/testing/selftests/powerpc/tm/tm-unavailable.c
349
cpu = pick_online_cpu();
tools/testing/selftests/powerpc/tm/tm-unavailable.c
350
FAIL_IF(cpu < 0);
tools/testing/selftests/powerpc/tm/tm-unavailable.c
354
CPU_SET(cpu, &cpuset);
tools/testing/selftests/powerpc/utils.c
419
int ncpus, cpu = -1;
tools/testing/selftests/powerpc/utils.c
439
for (cpu = 8; cpu < ncpus; cpu += 8)
tools/testing/selftests/powerpc/utils.c
440
if (CPU_ISSET_S(cpu, size, mask))
tools/testing/selftests/powerpc/utils.c
444
for (cpu = ncpus - 1; cpu >= 0; cpu--)
tools/testing/selftests/powerpc/utils.c
445
if (CPU_ISSET_S(cpu, size, mask))
tools/testing/selftests/powerpc/utils.c
452
return cpu;
tools/testing/selftests/powerpc/utils.c
455
int bind_to_cpu(int cpu)
tools/testing/selftests/powerpc/utils.c
460
if (cpu == BIND_CPU_ANY) {
tools/testing/selftests/powerpc/utils.c
461
cpu = pick_online_cpu();
tools/testing/selftests/powerpc/utils.c
462
if (cpu < 0)
tools/testing/selftests/powerpc/utils.c
463
return cpu;
tools/testing/selftests/powerpc/utils.c
466
printf("Binding to cpu %d\n", cpu);
tools/testing/selftests/powerpc/utils.c
469
CPU_SET(cpu, &mask);
tools/testing/selftests/powerpc/utils.c
475
return cpu;
tools/testing/selftests/powerpc/utils.c
524
int cpu, int group_fd, unsigned long flags)
tools/testing/selftests/powerpc/utils.c
526
return syscall(__NR_perf_event_open, hw_event, pid, cpu,
tools/testing/selftests/powerpc/vphn/asm/vphn.h
22
long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
tools/testing/selftests/powerpc/vphn/vphn.c
79
long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
tools/testing/selftests/powerpc/vphn/vphn.c
84
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
tools/testing/selftests/proc/proc-uptime-002.c
50
unsigned int cpu;
tools/testing/selftests/proc/proc-uptime-002.c
68
for (cpu = 0; cpu < len * 8; cpu++) {
tools/testing/selftests/proc/proc-uptime-002.c
70
m[cpu / (8 * sizeof(unsigned long))] |= 1UL << (cpu % (8 * sizeof(unsigned long)));
tools/testing/selftests/resctrl/cat_test.c
175
ret = taskset_benchmark(bm_pid, uparams->cpu, &old_affinity);
tools/testing/selftests/resctrl/cat_test.c
186
pe_fd = perf_open(&pea, bm_pid, uparams->cpu);
tools/testing/selftests/resctrl/cat_test.c
200
ret = write_schemata("", schemata, uparams->cpu, test->resource);
tools/testing/selftests/resctrl/cat_test.c
204
ret = write_schemata(param->ctrlgrp, schemata, uparams->cpu, test->resource);
tools/testing/selftests/resctrl/cat_test.c
254
ret = get_cache_size(uparams->cpu, test->resource, &cache_total_size);
tools/testing/selftests/resctrl/cat_test.c
350
ret = write_schemata("", schemata, uparams->cpu, test->resource);
tools/testing/selftests/resctrl/cat_test.c
362
ret = write_schemata("", schemata, uparams->cpu, test->resource);
tools/testing/selftests/resctrl/cmt_test.c
130
ret = get_cache_size(uparams->cpu, "L3", &cache_total_size);
tools/testing/selftests/resctrl/mba_test.c
191
buf_size = get_fill_buf_size(uparams->cpu, "L3");
tools/testing/selftests/resctrl/mba_test.c
59
ret = write_schemata(p->ctrlgrp, allocation_str, uparams->cpu, test->resource);
tools/testing/selftests/resctrl/mbm_test.c
111
ret = write_schemata(p->ctrlgrp, "100", uparams->cpu, test->resource);
tools/testing/selftests/resctrl/mbm_test.c
150
buf_size = get_fill_buf_size(uparams->cpu, "L3");
tools/testing/selftests/resctrl/resctrl.h
184
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
tools/testing/selftests/resctrl/resctrl.h
86
int cpu;
tools/testing/selftests/resctrl/resctrl_tests.c
246
uparams->cpu = 1;
tools/testing/selftests/resctrl/resctrl_tests.c
312
uparams.cpu = atoi(optarg);
tools/testing/selftests/resctrl/resctrl_val.c
497
ret = perf_open_imc_read_mem_bw(uparams->cpu);
tools/testing/selftests/resctrl/resctrl_val.c
553
ret = get_domain_id(test->resource, uparams->cpu, &domain_id);
tools/testing/selftests/resctrl/resctrl_val.c
562
ret = taskset_benchmark(ppid, uparams->cpu, &old_affinity);
tools/testing/selftests/resctrl/resctrlfs.c
949
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
tools/testing/selftests/resctrl/resctrlfs.c
954
ret = syscall(__NR_perf_event_open, hw_event, pid, cpu,
tools/testing/selftests/ring-buffer/map_test.c
139
int cpu = sched_getcpu();
tools/testing/selftests/ring-buffer/map_test.c
157
ASSERT_GE(cpu, 0);
tools/testing/selftests/ring-buffer/map_test.c
163
ASSERT_EQ(tracefs_cpu_map(&self->map_desc, cpu), 0);
tools/testing/selftests/ring-buffer/map_test.c
169
CPU_SET(cpu, &cpu_mask);
tools/testing/selftests/ring-buffer/map_test.c
303
int cpu = sched_getcpu();
tools/testing/selftests/ring-buffer/map_test.c
305
ASSERT_GE(cpu, 0);
tools/testing/selftests/ring-buffer/map_test.c
308
ASSERT_EQ(tracefs_cpu_map(&map_desc, cpu), -EBUSY);
tools/testing/selftests/ring-buffer/map_test.c
314
int cpu = sched_getcpu();
tools/testing/selftests/ring-buffer/map_test.c
316
ASSERT_EQ(tracefs_cpu_map(&map_desc, cpu), 0);
tools/testing/selftests/ring-buffer/map_test.c
79
int tracefs_cpu_map(struct tracefs_cpu_map_desc *desc, int cpu)
tools/testing/selftests/ring-buffer/map_test.c
87
cpu) < 0)
tools/testing/selftests/rseq/basic_percpu_ops_test.c
102
return cpu;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
105
void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
tools/testing/selftests/rseq/basic_percpu_ops_test.c
107
assert(lock->c[cpu].v == 1);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
112
rseq_smp_store_release(&lock->c[cpu].v, 0);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
118
int i, cpu;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
126
cpu = rseq_this_cpu_lock(&data->lock);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
127
data->c[cpu].count++;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
128
rseq_percpu_unlock(&data->lock, cpu);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
174
int cpu;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
180
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/basic_percpu_ops_test.c
182
expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
184
targetptr = (intptr_t *)&list->c[cpu].head;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
187
targetptr, expect, newval, cpu);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
193
*_cpu = cpu;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
208
int ret, cpu;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
210
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/basic_percpu_ops_test.c
211
targetptr = (intptr_t *)&list->c[cpu].head;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
217
offset, load, cpu);
tools/testing/selftests/rseq/basic_percpu_ops_test.c
220
*_cpu = cpu;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
233
struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
tools/testing/selftests/rseq/basic_percpu_ops_test.c
237
node = list->c[cpu].head;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
240
list->c[cpu].head = node->next;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
85
int cpu;
tools/testing/selftests/rseq/basic_percpu_ops_test.c
90
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/basic_percpu_ops_test.c
92
&lock->c[cpu].v, 0, 1, cpu);
tools/testing/selftests/rseq/param_test.c
1012
int cpu;
tools/testing/selftests/rseq/param_test.c
1020
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
1022
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
tools/testing/selftests/rseq/param_test.c
1023
if (offset == buffer->c[cpu].buflen)
tools/testing/selftests/rseq/param_test.c
1025
destptr = (char *)&buffer->c[cpu].array[offset];
tools/testing/selftests/rseq/param_test.c
1030
targetptr_final = &buffer->c[cpu].offset;
tools/testing/selftests/rseq/param_test.c
1035
newval_final, cpu);
tools/testing/selftests/rseq/param_test.c
1043
*_cpu = cpu;
tools/testing/selftests/rseq/param_test.c
1052
int cpu;
tools/testing/selftests/rseq/param_test.c
1060
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
1062
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
tools/testing/selftests/rseq/param_test.c
1066
srcptr = (char *)&buffer->c[cpu].array[offset - 1];
tools/testing/selftests/rseq/param_test.c
1070
targetptr_final = &buffer->c[cpu].offset;
tools/testing/selftests/rseq/param_test.c
1073
newval_final, cpu);
tools/testing/selftests/rseq/param_test.c
1081
*_cpu = cpu;
tools/testing/selftests/rseq/param_test.c
1091
int cpu)
tools/testing/selftests/rseq/param_test.c
1095
offset = buffer->c[cpu].offset;
tools/testing/selftests/rseq/param_test.c
1098
memcpy(item, &buffer->c[cpu].array[offset - 1], sizeof(*item));
tools/testing/selftests/rseq/param_test.c
1099
buffer->c[cpu].offset = offset - 1;
tools/testing/selftests/rseq/param_test.c
1274
int cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
1278
sizeof(struct percpu_list_entry) * cpu, 1, cpu);
tools/testing/selftests/rseq/param_test.c
326
int rseq_membarrier_expedited(int cpu)
tools/testing/selftests/rseq/param_test.c
351
int rseq_membarrier_expedited(int cpu)
tools/testing/selftests/rseq/param_test.c
354
MEMBARRIER_CMD_FLAG_CPU, cpu);
tools/testing/selftests/rseq/param_test.c
441
int cpu;
tools/testing/selftests/rseq/param_test.c
446
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
447
if (cpu < 0) {
tools/testing/selftests/rseq/param_test.c
449
getpid(), (int) rseq_gettid(), rseq_current_cpu_raw(), cpu);
tools/testing/selftests/rseq/param_test.c
453
&lock->c[cpu].v,
tools/testing/selftests/rseq/param_test.c
454
0, 1, cpu);
tools/testing/selftests/rseq/param_test.c
464
return cpu;
tools/testing/selftests/rseq/param_test.c
467
static void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
tools/testing/selftests/rseq/param_test.c
469
assert(lock->c[cpu].v == 1);
tools/testing/selftests/rseq/param_test.c
474
rseq_smp_store_release(&lock->c[cpu].v, 0);
tools/testing/selftests/rseq/param_test.c
488
int cpu = rseq_this_cpu_lock(&data->lock);
tools/testing/selftests/rseq/param_test.c
489
data->c[cpu].count++;
tools/testing/selftests/rseq/param_test.c
490
rseq_percpu_unlock(&data->lock, cpu);
tools/testing/selftests/rseq/param_test.c
568
int cpu;
tools/testing/selftests/rseq/param_test.c
570
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
572
&data->c[cpu].count, 1, cpu);
tools/testing/selftests/rseq/param_test.c
635
int cpu;
tools/testing/selftests/rseq/param_test.c
641
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
643
expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
tools/testing/selftests/rseq/param_test.c
645
targetptr = (intptr_t *)&list->c[cpu].head;
tools/testing/selftests/rseq/param_test.c
648
targetptr, expect, newval, cpu);
tools/testing/selftests/rseq/param_test.c
654
*_cpu = cpu;
tools/testing/selftests/rseq/param_test.c
666
int cpu;
tools/testing/selftests/rseq/param_test.c
674
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
675
targetptr = (intptr_t *)&list->c[cpu].head;
tools/testing/selftests/rseq/param_test.c
681
offset, load, cpu);
tools/testing/selftests/rseq/param_test.c
691
*_cpu = cpu;
tools/testing/selftests/rseq/param_test.c
699
struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
tools/testing/selftests/rseq/param_test.c
703
node = list->c[cpu].head;
tools/testing/selftests/rseq/param_test.c
706
list->c[cpu].head = node->next;
tools/testing/selftests/rseq/param_test.c
811
int cpu;
tools/testing/selftests/rseq/param_test.c
819
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
820
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
tools/testing/selftests/rseq/param_test.c
821
if (offset == buffer->c[cpu].buflen)
tools/testing/selftests/rseq/param_test.c
824
targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset];
tools/testing/selftests/rseq/param_test.c
826
targetptr_final = &buffer->c[cpu].offset;
tools/testing/selftests/rseq/param_test.c
829
newval_spec, newval_final, cpu);
tools/testing/selftests/rseq/param_test.c
837
*_cpu = cpu;
tools/testing/selftests/rseq/param_test.c
845
int cpu;
tools/testing/selftests/rseq/param_test.c
852
cpu = get_current_cpu_id();
tools/testing/selftests/rseq/param_test.c
854
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
tools/testing/selftests/rseq/param_test.c
859
head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]);
tools/testing/selftests/rseq/param_test.c
861
targetptr = (intptr_t *)&buffer->c[cpu].offset;
tools/testing/selftests/rseq/param_test.c
864
(intptr_t *)&buffer->c[cpu].array[offset - 1],
tools/testing/selftests/rseq/param_test.c
865
(intptr_t)head, newval, cpu);
tools/testing/selftests/rseq/param_test.c
871
*_cpu = cpu;
tools/testing/selftests/rseq/param_test.c
880
int cpu)
tools/testing/selftests/rseq/param_test.c
885
offset = buffer->c[cpu].offset;
tools/testing/selftests/rseq/param_test.c
888
head = buffer->c[cpu].array[offset - 1];
tools/testing/selftests/rseq/param_test.c
889
buffer->c[cpu].offset = offset - 1;
tools/testing/selftests/rseq/rseq-arm-bits.h
118
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm-bits.h
14
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm-bits.h
154
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-arm-bits.h
180
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm-bits.h
209
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm-bits.h
250
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm-bits.h
299
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm-bits.h
338
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm-bits.h
379
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm-bits.h
461
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm-bits.h
47
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm-bits.h
82
long voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-arm64-bits.h
100
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm64-bits.h
134
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-arm64-bits.h
15
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm64-bits.h
155
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm64-bits.h
183
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm64-bits.h
211
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm64-bits.h
257
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm64-bits.h
287
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm64-bits.h
324
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-arm64-bits.h
354
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm64-bits.h
39
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-arm64-bits.h
73
long voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-mips-bits.h
109
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-mips-bits.h
14
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-mips-bits.h
140
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-mips-bits.h
166
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-mips-bits.h
192
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-mips-bits.h
229
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-mips-bits.h
272
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-mips-bits.h
309
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-mips-bits.h
344
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-mips-bits.h
423
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-mips-bits.h
45
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-mips-bits.h
75
long voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
10
int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
120
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
140
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-or1k-bits.h
166
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
193
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-or1k-bits.h
234
int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
252
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-or1k-bits.h
285
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
314
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-or1k-bits.h
33
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-or1k-bits.h
348
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
376
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-or1k-bits.h
64
off_t voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-or1k-bits.h
90
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-ppc-bits.h
116
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-ppc-bits.h
15
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-ppc-bits.h
152
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-ppc-bits.h
179
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-ppc-bits.h
209
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-ppc-bits.h
245
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-ppc-bits.h
294
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-ppc-bits.h
331
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-ppc-bits.h
371
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-ppc-bits.h
413
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-ppc-bits.h
45
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-ppc-bits.h
80
long voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-riscv-bits.h
119
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-riscv-bits.h
139
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-riscv-bits.h
165
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-riscv-bits.h
192
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-riscv-bits.h
232
int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, intptr_t inc, int cpu)
tools/testing/selftests/rseq/rseq-riscv-bits.h
250
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-riscv-bits.h
283
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-riscv-bits.h
312
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-riscv-bits.h
32
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-riscv-bits.h
346
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-riscv-bits.h
374
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-riscv-bits.h
63
off_t voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-riscv-bits.h
89
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-riscv-bits.h
9
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-s390-bits.h
111
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-s390-bits.h
147
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-s390-bits.h
171
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-s390-bits.h
201
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-s390-bits.h
236
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-s390-bits.h
286
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-s390-bits.h
318
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-s390-bits.h
359
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-s390-bits.h
38
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-s390-bits.h
430
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-s390-bits.h
77
long voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-s390-bits.h
9
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
116
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
150
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
16
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
172
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
204
int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, long off, intptr_t inc, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
231
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
257
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
292
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
339
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
371
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
409
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
45
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
480
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
528
int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
557
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
594
long voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
628
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
662
int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
684
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
712
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
748
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
795
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
82
long voffp, intptr_t *load, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
832
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq-x86-bits.h
872
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq-x86-bits.h
949
: [cpu_id] "r" (cpu),
tools/testing/selftests/rseq/rseq.c
287
int32_t cpu;
tools/testing/selftests/rseq/rseq.c
289
cpu = sched_getcpu();
tools/testing/selftests/rseq/rseq.c
290
if (cpu < 0) {
tools/testing/selftests/rseq/rseq.c
294
return cpu;
tools/testing/selftests/rseq/rseq.c
96
static int sys_getcpu(unsigned *cpu, unsigned *node)
tools/testing/selftests/rseq/rseq.c
98
return syscall(__NR_getcpu, cpu, node, NULL);
tools/testing/selftests/rseq/rseq.h
194
int32_t cpu;
tools/testing/selftests/rseq/rseq.h
196
cpu = rseq_current_cpu_raw();
tools/testing/selftests/rseq/rseq.h
197
if (rseq_unlikely(cpu < 0))
tools/testing/selftests/rseq/rseq.h
198
cpu = rseq_fallback_current_cpu();
tools/testing/selftests/rseq/rseq.h
199
return cpu;
tools/testing/selftests/rseq/rseq.h
250
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq.h
256
return rseq_cmpeqv_storev_relaxed_cpu_id(v, expect, newv, cpu);
tools/testing/selftests/rseq/rseq.h
258
return rseq_cmpeqv_storev_relaxed_mm_cid(v, expect, newv, cpu);
tools/testing/selftests/rseq/rseq.h
270
int cpu)
tools/testing/selftests/rseq/rseq.h
276
return rseq_cmpnev_storeoffp_load_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
tools/testing/selftests/rseq/rseq.h
278
return rseq_cmpnev_storeoffp_load_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
tools/testing/selftests/rseq/rseq.h
285
intptr_t *v, intptr_t count, int cpu)
tools/testing/selftests/rseq/rseq.h
291
return rseq_addv_relaxed_cpu_id(v, count, cpu);
tools/testing/selftests/rseq/rseq.h
293
return rseq_addv_relaxed_mm_cid(v, count, cpu);
tools/testing/selftests/rseq/rseq.h
305
intptr_t *ptr, long off, intptr_t inc, int cpu)
tools/testing/selftests/rseq/rseq.h
311
return rseq_offset_deref_addv_relaxed_cpu_id(ptr, off, inc, cpu);
tools/testing/selftests/rseq/rseq.h
313
return rseq_offset_deref_addv_relaxed_mm_cid(ptr, off, inc, cpu);
tools/testing/selftests/rseq/rseq.h
323
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq.h
329
return rseq_cmpeqv_trystorev_storev_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
tools/testing/selftests/rseq/rseq.h
331
return rseq_cmpeqv_trystorev_storev_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
tools/testing/selftests/rseq/rseq.h
337
return rseq_cmpeqv_trystorev_storev_release_cpu_id(v, expect, v2, newv2, newv, cpu);
tools/testing/selftests/rseq/rseq.h
339
return rseq_cmpeqv_trystorev_storev_release_mm_cid(v, expect, v2, newv2, newv, cpu);
tools/testing/selftests/rseq/rseq.h
351
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq.h
357
return rseq_cmpeqv_cmpeqv_storev_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
tools/testing/selftests/rseq/rseq.h
359
return rseq_cmpeqv_cmpeqv_storev_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
tools/testing/selftests/rseq/rseq.h
368
intptr_t newv, int cpu)
tools/testing/selftests/rseq/rseq.h
374
return rseq_cmpeqv_trymemcpy_storev_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
tools/testing/selftests/rseq/rseq.h
376
return rseq_cmpeqv_trymemcpy_storev_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
tools/testing/selftests/rseq/rseq.h
382
return rseq_cmpeqv_trymemcpy_storev_release_cpu_id(v, expect, dst, src, len, newv, cpu);
tools/testing/selftests/rseq/rseq.h
384
return rseq_cmpeqv_trymemcpy_storev_release_mm_cid(v, expect, dst, src, len, newv, cpu);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
122
int cpu;
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
129
cpu = scx_bpf_select_cpu_and(p, bpf_get_smp_processor_id(), 0, p->cpus_ptr, 0);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
134
return cpu;
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
18
validate_idle_cpu(const struct task_struct *p, const struct cpumask *allowed, s32 cpu)
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
20
if (scx_bpf_test_and_clear_cpu_idle(cpu))
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
21
scx_bpf_error("CPU %d should be marked as busy", cpu);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
24
!bpf_cpumask_test_cpu(cpu, allowed))
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
26
cpu, p->pid, p->comm);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
33
s32 cpu;
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
44
cpu = scx_bpf_select_cpu_and(p, prev_cpu, wake_flags, allowed, 0);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
45
if (cpu >= 0) {
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
46
validate_idle_cpu(p, allowed, cpu);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
49
return cpu;
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
58
s32 prev_cpu = scx_bpf_task_cpu(p), cpu;
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
72
cpu = scx_bpf_select_cpu_and(p, prev_cpu, 0, allowed, 0);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
73
if (cpu >= 0) {
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
74
validate_idle_cpu(p, allowed, cpu);
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
75
scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c
22
static s32 target_cpu(s32 cpu)
tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c
24
if (cpu == test_cpu_a)
tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c
26
if (cpu == test_cpu_b)
tools/testing/selftests/sched_ext/cyclic_kick_wait.bpf.c
28
if (cpu == test_cpu_c)
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
141
workers[i].cpu = test_cpus[i / WORKERS_PER_CPU];
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
37
int cpu;
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
49
CPU_SET(worker->cpu, &mask);
tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
16
s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
18
if (cpu >= 0) {
tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
25
return cpu;
tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
16
s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
18
if (cpu >= 0) {
tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
22
return cpu;
tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
34
void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c
24
s32 cpu, prev_cpu = scx_bpf_task_cpu(p);
tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c
27
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, 0, &found);
tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c
29
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, enq_flags);
tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c
50
s32 cpu;
tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c
57
cpu = scx_bpf_select_cpu_dfl(p, bpf_get_smp_processor_id(), 0, &found);
tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c
59
cpu = -EBUSY;
tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c
64
return cpu;
tools/testing/selftests/sched_ext/exit.bpf.c
39
void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
tools/testing/selftests/sched_ext/hotplug.bpf.c
20
static void exit_from_hotplug(s32 cpu, bool onlining)
tools/testing/selftests/sched_ext/hotplug.bpf.c
33
scx_bpf_exit(code, "hotplug event detected (%d going %s)", cpu,
tools/testing/selftests/sched_ext/hotplug.bpf.c
37
void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_online, s32 cpu)
tools/testing/selftests/sched_ext/hotplug.bpf.c
39
exit_from_hotplug(cpu, true);
tools/testing/selftests/sched_ext/hotplug.bpf.c
42
void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_offline, s32 cpu)
tools/testing/selftests/sched_ext/hotplug.bpf.c
44
exit_from_hotplug(cpu, false);
tools/testing/selftests/sched_ext/maximal.bpf.c
31
void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
tools/testing/selftests/sched_ext/maximal.bpf.c
67
void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle)
tools/testing/selftests/sched_ext/maximal.bpf.c
70
void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu,
tools/testing/selftests/sched_ext/maximal.bpf.c
74
void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu,
tools/testing/selftests/sched_ext/maximal.bpf.c
78
void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu)
tools/testing/selftests/sched_ext/maximal.bpf.c
81
void BPF_STRUCT_OPS(maximal_cpu_offline, s32 cpu)
tools/testing/selftests/sched_ext/maybe_null.bpf.c
15
void BPF_STRUCT_OPS(maybe_null_success_dispatch, s32 cpu, struct task_struct *p)
tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
15
void BPF_STRUCT_OPS(maybe_null_fail_dispatch, s32 cpu, struct task_struct *p)
tools/testing/selftests/sched_ext/numa.bpf.c
22
static bool is_cpu_idle(s32 cpu, int node)
tools/testing/selftests/sched_ext/numa.bpf.c
28
idle = bpf_cpumask_test_cpu(cpu, idle_cpumask);
tools/testing/selftests/sched_ext/numa.bpf.c
38
s32 cpu;
tools/testing/selftests/sched_ext/numa.bpf.c
45
cpu = __COMPAT_scx_bpf_pick_idle_cpu_node(p->cpus_ptr, node,
tools/testing/selftests/sched_ext/numa.bpf.c
47
if (cpu < 0)
tools/testing/selftests/sched_ext/numa.bpf.c
48
cpu = __COMPAT_scx_bpf_pick_any_cpu_node(p->cpus_ptr, node,
tools/testing/selftests/sched_ext/numa.bpf.c
51
if (is_cpu_idle(cpu, node))
tools/testing/selftests/sched_ext/numa.bpf.c
52
scx_bpf_error("CPU %d should be marked as busy", cpu);
tools/testing/selftests/sched_ext/numa.bpf.c
54
if (__COMPAT_scx_bpf_cpu_node(cpu) != node)
tools/testing/selftests/sched_ext/numa.bpf.c
55
scx_bpf_error("CPU %d should be in node %d", cpu, node);
tools/testing/selftests/sched_ext/numa.bpf.c
57
return cpu;
tools/testing/selftests/sched_ext/numa.bpf.c
67
void BPF_STRUCT_OPS(numa_dispatch, s32 cpu, struct task_struct *prev)
tools/testing/selftests/sched_ext/numa.bpf.c
69
int node = __COMPAT_scx_bpf_cpu_node(cpu);
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
111
int last_insert_test_cpu, cpu;
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
114
cpu = bpf_get_smp_processor_id();
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
115
last_insert_test_cpu = __sync_val_compare_and_swap(&insert_test_cpu, -1, cpu);
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
119
bpf_printk("peek_dsq_enqueue beginning phase 1 peek test on cpu %d", cpu);
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
140
void BPF_STRUCT_OPS(peek_dsq_dispatch, s32 cpu, struct task_struct *prev)
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
147
if (insert_test_cpu == cpu && dsq_peek_result2 == -1) {
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
150
bpf_printk("peek_dsq_dispatch completing phase 1 peek test on cpu %d", cpu);
tools/testing/selftests/sched_ext/rt_stall.c
60
static void set_affinity(int cpu)
tools/testing/selftests/sched_ext/rt_stall.c
65
CPU_SET(cpu, &mask);
tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
38
s32 cpu;
tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
46
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags,
tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
49
return cpu;
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
19
s32 cpu = prev_cpu;
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
21
if (scx_bpf_test_and_clear_cpu_idle(cpu))
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
24
cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
25
if (cpu >= 0)
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
29
cpu = prev_cpu;
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
33
return cpu;
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
41
s32 cpu;
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
43
cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
44
if (cpu >= 0)
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
47
cpu = prev_cpu;
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
48
scx_bpf_test_and_clear_cpu_idle(cpu);
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
51
return cpu;
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
54
void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
tools/testing/selftests/seccomp/seccomp_benchmark.c
143
long cpu;
tools/testing/selftests/seccomp/seccomp_benchmark.c
152
for (cpu = ncores - 1; cpu >= 0; cpu--) {
tools/testing/selftests/seccomp/seccomp_benchmark.c
154
CPU_SET_S(cpu, setsz, setp);
tools/testing/selftests/seccomp/seccomp_benchmark.c
157
printf("Pinned to CPU %lu of %lu\n", cpu + 1, ncores);
tools/testing/selftests/user_events/perf_test.c
33
int cpu, int group_fd, unsigned long flags)
tools/testing/selftests/user_events/perf_test.c
35
return syscall(__NR_perf_event_open, pe, pid, cpu, group_fd, flags);
tools/testing/selftests/vDSO/vdso_test_correctness.c
151
static long sys_getcpu(unsigned * cpu, unsigned * node,
tools/testing/selftests/vDSO/vdso_test_correctness.c
154
return syscall(__NR_getcpu, cpu, node, cache);
tools/testing/selftests/vDSO/vdso_test_correctness.c
176
for (int cpu = 0; ; cpu++) {
tools/testing/selftests/vDSO/vdso_test_correctness.c
179
CPU_SET(cpu, &cpuset);
tools/testing/selftests/vDSO/vdso_test_correctness.c
202
if (!ret_sys && (cpu_sys != cpu || node_sys != node))
tools/testing/selftests/vDSO/vdso_test_correctness.c
204
if (!ret_vdso && (cpu_vdso != cpu || node_vdso != node))
tools/testing/selftests/vDSO/vdso_test_correctness.c
206
if (!ret_vsys && (cpu_vsys != cpu || node_vsys != node))
tools/testing/selftests/vDSO/vdso_test_correctness.c
209
printf("[%s]\tCPU %u:", ok ? "OK" : "FAIL", cpu);
tools/testing/selftests/vDSO/vdso_test_getcpu.c
26
unsigned int cpu, node;
tools/testing/selftests/vDSO/vdso_test_getcpu.c
44
ret = VDSO_CALL(get_cpu, 3, &cpu, &node, 0);
tools/testing/selftests/vDSO/vdso_test_getcpu.c
46
printf("Running on CPU %u node %u\n", cpu, node);
tools/testing/selftests/x86/test_vsyscall.c
105
static inline long sys_getcpu(unsigned * cpu, unsigned * node,
tools/testing/selftests/x86/test_vsyscall.c
108
return syscall(SYS_getcpu, cpu, node, cache);
tools/testing/selftests/x86/test_vsyscall.c
222
static void test_getcpu(int cpu)
tools/testing/selftests/x86/test_vsyscall.c
230
ksft_print_msg("getcpu() on CPU %d\n", cpu);
tools/testing/selftests/x86/test_vsyscall.c
233
CPU_SET(cpu, &cpuset);
tools/testing/selftests/x86/test_vsyscall.c
235
ksft_print_msg("failed to force CPU %d\n", cpu);
tools/testing/selftests/x86/test_vsyscall.c
249
if (cpu_sys != cpu)
tools/testing/selftests/x86/test_vsyscall.c
251
cpu_sys, cpu);
tools/testing/selftests/x86/test_vsyscall.c
266
if (cpu_vdso != cpu || node_vdso != node) {
tools/testing/selftests/x86/test_vsyscall.c
267
if (cpu_vdso != cpu)
tools/testing/selftests/x86/test_vsyscall.c
269
cpu_vdso, cpu);
tools/testing/selftests/x86/test_vsyscall.c
291
if (cpu_vsys != cpu || node_vsys != node) {
tools/testing/selftests/x86/test_vsyscall.c
292
if (cpu_vsys != cpu)
tools/testing/selftests/x86/test_vsyscall.c
294
cpu_vsys, cpu);
tools/testing/shared/linux/percpu.h
10
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
tools/testing/shared/linux/percpu.h
11
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
tools/tracing/rtla/src/common.h
110
#define for_each_monitored_cpu(cpu, nr_cpus, common) \
tools/tracing/rtla/src/common.h
111
for (cpu = 0; cpu < nr_cpus; cpu++) \
tools/tracing/rtla/src/common.h
112
if (!(common)->cpus || CPU_ISSET(cpu, &(common)->monitored_cpus))
tools/tracing/rtla/src/osnoise_hist.c
116
hist = data->hist[cpu].samples;
tools/tracing/rtla/src/osnoise_hist.c
117
data->hist[cpu].count += count;
tools/tracing/rtla/src/osnoise_hist.c
118
update_min(&data->hist[cpu].min_sample, &duration);
tools/tracing/rtla/src/osnoise_hist.c
119
update_sum(&data->hist[cpu].sum_sample, &total_duration);
tools/tracing/rtla/src/osnoise_hist.c
120
update_max(&data->hist[cpu].max_sample, &duration);
tools/tracing/rtla/src/osnoise_hist.c
182
long long cpu, counter, duration;
tools/tracing/rtla/src/osnoise_hist.c
207
cpu = get_llong_from_str(position);
tools/tracing/rtla/src/osnoise_hist.c
208
if (cpu == -1)
tools/tracing/rtla/src/osnoise_hist.c
219
osnoise_hist_update_multiple(tool, cpu, duration, counter);
tools/tracing/rtla/src/osnoise_hist.c
233
int cpu;
tools/tracing/rtla/src/osnoise_hist.c
249
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/osnoise_hist.c
251
if (!data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
254
trace_seq_printf(s, " CPU-%03d", cpu);
tools/tracing/rtla/src/osnoise_hist.c
270
int cpu;
tools/tracing/rtla/src/osnoise_hist.c
278
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/osnoise_hist.c
280
if (!data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
283
trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].count);
tools/tracing/rtla/src/osnoise_hist.c
290
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/osnoise_hist.c
292
if (!data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
295
trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].min_sample);
tools/tracing/rtla/src/osnoise_hist.c
303
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/osnoise_hist.c
305
if (!data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
308
if (data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
310
((double) data->hist[cpu].sum_sample) / data->hist[cpu].count);
tools/tracing/rtla/src/osnoise_hist.c
319
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/osnoise_hist.c
321
if (!data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
324
trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].max_sample);
tools/tracing/rtla/src/osnoise_hist.c
342
int bucket, cpu;
tools/tracing/rtla/src/osnoise_hist.c
354
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/osnoise_hist.c
356
if (!data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
359
total += data->hist[cpu].samples[bucket];
tools/tracing/rtla/src/osnoise_hist.c
360
trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].samples[bucket]);
tools/tracing/rtla/src/osnoise_hist.c
390
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/osnoise_hist.c
392
if (!data->hist[cpu].count)
tools/tracing/rtla/src/osnoise_hist.c
396
data->hist[cpu].samples[data->entries]);
tools/tracing/rtla/src/osnoise_hist.c
41
int cpu;
tools/tracing/rtla/src/osnoise_hist.c
44
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
tools/tracing/rtla/src/osnoise_hist.c
45
if (data->hist[cpu].samples)
tools/tracing/rtla/src/osnoise_hist.c
46
free(data->hist[cpu].samples);
tools/tracing/rtla/src/osnoise_hist.c
68
int cpu;
tools/tracing/rtla/src/osnoise_hist.c
82
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/tracing/rtla/src/osnoise_hist.c
83
data->hist[cpu].samples = calloc(1, sizeof(*data->hist->samples) * (entries + 1));
tools/tracing/rtla/src/osnoise_hist.c
84
if (!data->hist[cpu].samples)
tools/tracing/rtla/src/osnoise_hist.c
89
for (cpu = 0; cpu < nr_cpus; cpu++)
tools/tracing/rtla/src/osnoise_hist.c
90
data->hist[cpu].min_sample = ~0;
tools/tracing/rtla/src/osnoise_hist.c
99
static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
tools/tracing/rtla/src/osnoise_top.c
189
static void osnoise_top_print(struct osnoise_tool *tool, int cpu)
tools/tracing/rtla/src/osnoise_top.c
199
cpu_data = &data->cpu_data[cpu];
tools/tracing/rtla/src/osnoise_top.c
209
trace_seq_printf(s, "%3d #%-6d %12llu ", cpu, cpu_data->sum_cycles, cpu_data->sum_runtime);
tools/tracing/rtla/src/osnoise_top.c
88
int cpu = record->cpu;
tools/tracing/rtla/src/osnoise_top.c
93
cpu_data = &data->cpu_data[cpu];
tools/tracing/rtla/src/timerlat_aa.c
138
*timerlat_aa_get_data(struct timerlat_aa_context *taa_ctx, int cpu)
tools/tracing/rtla/src/timerlat_aa.c
140
return &taa_ctx->taa_data[cpu];
tools/tracing/rtla/src/timerlat_aa.c
243
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
266
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
309
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
405
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
438
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
483
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
515
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
546
struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu);
tools/tracing/rtla/src/timerlat_aa.c
558
static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
tools/tracing/rtla/src/timerlat_aa.c
733
int cpu;
tools/tracing/rtla/src/timerlat_aa.c
741
for (cpu = 0; cpu < taa_ctx->nr_cpus; cpu++) {
tools/tracing/rtla/src/timerlat_aa.c
742
taa_data = timerlat_aa_get_data(taa_ctx, cpu);
tools/tracing/rtla/src/timerlat_aa.c
745
printf("## CPU %d hit stop tracing, analyzing it ##\n", cpu);
tools/tracing/rtla/src/timerlat_aa.c
746
timerlat_thread_analysis(taa_data, cpu, irq_thresh, thread_thresh);
tools/tracing/rtla/src/timerlat_aa.c
748
printf("## CPU %d hit stop tracing, analyzing it ##\n", cpu);
tools/tracing/rtla/src/timerlat_aa.c
749
timerlat_thread_analysis(taa_data, cpu, irq_thresh, thread_thresh);
tools/tracing/rtla/src/timerlat_aa.c
754
max_exit_from_idle_cpu = cpu;
tools/tracing/rtla/src/timerlat_aa.c
769
for (cpu = 0; cpu < taa_ctx->nr_cpus; cpu++) {
tools/tracing/rtla/src/timerlat_aa.c
770
taa_data = timerlat_aa_get_data(taa_ctx, cpu);
tools/tracing/rtla/src/timerlat_aa.c
773
printf(" [%.3d] %24s:%llu", cpu, taa_data->current_comm, taa_data->current_pid);
tools/tracing/rtla/src/timerlat_hist.c
105
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/tracing/rtla/src/timerlat_hist.c
106
data->hist[cpu].irq = calloc(1, sizeof(*data->hist->irq) * (entries + 1));
tools/tracing/rtla/src/timerlat_hist.c
107
if (!data->hist[cpu].irq)
tools/tracing/rtla/src/timerlat_hist.c
110
data->hist[cpu].thread = calloc(1, sizeof(*data->hist->thread) * (entries + 1));
tools/tracing/rtla/src/timerlat_hist.c
111
if (!data->hist[cpu].thread)
tools/tracing/rtla/src/timerlat_hist.c
114
data->hist[cpu].user = calloc(1, sizeof(*data->hist->user) * (entries + 1));
tools/tracing/rtla/src/timerlat_hist.c
115
if (!data->hist[cpu].user)
tools/tracing/rtla/src/timerlat_hist.c
120
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/tracing/rtla/src/timerlat_hist.c
121
data->hist[cpu].min_irq = ~0;
tools/tracing/rtla/src/timerlat_hist.c
122
data->hist[cpu].min_thread = ~0;
tools/tracing/rtla/src/timerlat_hist.c
123
data->hist[cpu].min_user = ~0;
tools/tracing/rtla/src/timerlat_hist.c
137
timerlat_hist_update(struct osnoise_tool *tool, int cpu,
tools/tracing/rtla/src/timerlat_hist.c
153
hist = data->hist[cpu].irq;
tools/tracing/rtla/src/timerlat_hist.c
154
data->hist[cpu].irq_count++;
tools/tracing/rtla/src/timerlat_hist.c
155
update_min(&data->hist[cpu].min_irq, &latency);
tools/tracing/rtla/src/timerlat_hist.c
156
update_sum(&data->hist[cpu].sum_irq, &latency);
tools/tracing/rtla/src/timerlat_hist.c
157
update_max(&data->hist[cpu].max_irq, &latency);
tools/tracing/rtla/src/timerlat_hist.c
159
hist = data->hist[cpu].thread;
tools/tracing/rtla/src/timerlat_hist.c
160
data->hist[cpu].thread_count++;
tools/tracing/rtla/src/timerlat_hist.c
161
update_min(&data->hist[cpu].min_thread, &latency);
tools/tracing/rtla/src/timerlat_hist.c
162
update_sum(&data->hist[cpu].sum_thread, &latency);
tools/tracing/rtla/src/timerlat_hist.c
163
update_max(&data->hist[cpu].max_thread, &latency);
tools/tracing/rtla/src/timerlat_hist.c
165
hist = data->hist[cpu].user;
tools/tracing/rtla/src/timerlat_hist.c
166
data->hist[cpu].user_count++;
tools/tracing/rtla/src/timerlat_hist.c
167
update_min(&data->hist[cpu].min_user, &latency);
tools/tracing/rtla/src/timerlat_hist.c
168
update_sum(&data->hist[cpu].sum_user, &latency);
tools/tracing/rtla/src/timerlat_hist.c
169
update_max(&data->hist[cpu].max_user, &latency);
tools/tracing/rtla/src/timerlat_hist.c
188
int cpu = record->cpu;
tools/tracing/rtla/src/timerlat_hist.c
195
timerlat_hist_update(tool, cpu, context, latency);
tools/tracing/rtla/src/timerlat_hist.c
292
int cpu;
tools/tracing/rtla/src/timerlat_hist.c
308
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
310
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
314
trace_seq_printf(s, " IRQ-%03d", cpu);
tools/tracing/rtla/src/timerlat_hist.c
317
trace_seq_printf(s, " Thr-%03d", cpu);
tools/tracing/rtla/src/timerlat_hist.c
320
trace_seq_printf(s, " Usr-%03d", cpu);
tools/tracing/rtla/src/timerlat_hist.c
352
int cpu;
tools/tracing/rtla/src/timerlat_hist.c
360
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
362
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
367
data->hist[cpu].irq_count);
tools/tracing/rtla/src/timerlat_hist.c
371
data->hist[cpu].thread_count);
tools/tracing/rtla/src/timerlat_hist.c
375
data->hist[cpu].user_count);
tools/tracing/rtla/src/timerlat_hist.c
382
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
384
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
389
data->hist[cpu].irq_count,
tools/tracing/rtla/src/timerlat_hist.c
390
data->hist[cpu].min_irq,
tools/tracing/rtla/src/timerlat_hist.c
395
data->hist[cpu].thread_count,
tools/tracing/rtla/src/timerlat_hist.c
396
data->hist[cpu].min_thread,
tools/tracing/rtla/src/timerlat_hist.c
401
data->hist[cpu].user_count,
tools/tracing/rtla/src/timerlat_hist.c
402
data->hist[cpu].min_user,
tools/tracing/rtla/src/timerlat_hist.c
410
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
412
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
417
data->hist[cpu].irq_count,
tools/tracing/rtla/src/timerlat_hist.c
418
data->hist[cpu].sum_irq,
tools/tracing/rtla/src/timerlat_hist.c
423
data->hist[cpu].thread_count,
tools/tracing/rtla/src/timerlat_hist.c
424
data->hist[cpu].sum_thread,
tools/tracing/rtla/src/timerlat_hist.c
429
data->hist[cpu].user_count,
tools/tracing/rtla/src/timerlat_hist.c
430
data->hist[cpu].sum_user,
tools/tracing/rtla/src/timerlat_hist.c
438
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
440
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
445
data->hist[cpu].irq_count,
tools/tracing/rtla/src/timerlat_hist.c
446
data->hist[cpu].max_irq,
tools/tracing/rtla/src/timerlat_hist.c
451
data->hist[cpu].thread_count,
tools/tracing/rtla/src/timerlat_hist.c
452
data->hist[cpu].max_thread,
tools/tracing/rtla/src/timerlat_hist.c
457
data->hist[cpu].user_count,
tools/tracing/rtla/src/timerlat_hist.c
458
data->hist[cpu].max_user,
tools/tracing/rtla/src/timerlat_hist.c
473
int cpu;
tools/tracing/rtla/src/timerlat_hist.c
483
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
485
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
488
cpu_data = &data->hist[cpu];
tools/tracing/rtla/src/timerlat_hist.c
56
int cpu;
tools/tracing/rtla/src/timerlat_hist.c
59
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
tools/tracing/rtla/src/timerlat_hist.c
60
if (data->hist[cpu].irq)
tools/tracing/rtla/src/timerlat_hist.c
61
free(data->hist[cpu].irq);
tools/tracing/rtla/src/timerlat_hist.c
618
int bucket, cpu;
tools/tracing/rtla/src/timerlat_hist.c
63
if (data->hist[cpu].thread)
tools/tracing/rtla/src/timerlat_hist.c
630
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
632
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
636
total += data->hist[cpu].irq[bucket];
tools/tracing/rtla/src/timerlat_hist.c
638
data->hist[cpu].irq[bucket]);
tools/tracing/rtla/src/timerlat_hist.c
64
free(data->hist[cpu].thread);
tools/tracing/rtla/src/timerlat_hist.c
642
total += data->hist[cpu].thread[bucket];
tools/tracing/rtla/src/timerlat_hist.c
644
data->hist[cpu].thread[bucket]);
tools/tracing/rtla/src/timerlat_hist.c
648
total += data->hist[cpu].user[bucket];
tools/tracing/rtla/src/timerlat_hist.c
650
data->hist[cpu].user[bucket]);
tools/tracing/rtla/src/timerlat_hist.c
66
if (data->hist[cpu].user)
tools/tracing/rtla/src/timerlat_hist.c
668
for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
tools/tracing/rtla/src/timerlat_hist.c
67
free(data->hist[cpu].user);
tools/tracing/rtla/src/timerlat_hist.c
670
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
tools/tracing/rtla/src/timerlat_hist.c
675
data->hist[cpu].irq[data->entries]);
tools/tracing/rtla/src/timerlat_hist.c
679
data->hist[cpu].thread[data->entries]);
tools/tracing/rtla/src/timerlat_hist.c
683
data->hist[cpu].user[data->entries]);
tools/tracing/rtla/src/timerlat_hist.c
89
int cpu;
tools/tracing/rtla/src/timerlat_top.c
105
timerlat_top_update_sum(struct osnoise_tool *tool, int cpu, struct timerlat_top_cpu *sum)
tools/tracing/rtla/src/timerlat_top.c
108
struct timerlat_top_cpu *cpu_data = &data->cpu_data[cpu];
tools/tracing/rtla/src/timerlat_top.c
130
timerlat_top_update(struct osnoise_tool *tool, int cpu,
tools/tracing/rtla/src/timerlat_top.c
136
struct timerlat_top_cpu *cpu_data = &data->cpu_data[cpu];
tools/tracing/rtla/src/timerlat_top.c
172
int cpu = record->cpu;
tools/tracing/rtla/src/timerlat_top.c
180
timerlat_top_update(top, cpu, thread, latency);
tools/tracing/rtla/src/timerlat_top.c
305
static void timerlat_top_print(struct osnoise_tool *top, int cpu)
tools/tracing/rtla/src/timerlat_top.c
309
struct timerlat_top_cpu *cpu_data = &data->cpu_data[cpu];
tools/tracing/rtla/src/timerlat_top.c
321
trace_seq_printf(s, "%3d #%-9llu |", cpu, cpu_data->irq_count);
tools/tracing/rtla/src/timerlat_top.c
68
int cpu;
tools/tracing/rtla/src/timerlat_top.c
82
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/tracing/rtla/src/timerlat_top.c
83
data->cpu_data[cpu].min_irq = ~0;
tools/tracing/rtla/src/timerlat_top.c
84
data->cpu_data[cpu].min_thread = ~0;
tools/tracing/rtla/src/timerlat_top.c
85
data->cpu_data[cpu].min_user = ~0;
tools/tracing/rtla/src/timerlat_u.c
32
static int timerlat_u_main(int cpu, struct timerlat_u_params *params)
tools/tracing/rtla/src/timerlat_u.c
44
CPU_SET(cpu, &set);
tools/tracing/rtla/src/timerlat_u.c
48
debug_msg("Error setting user thread affinity %d, is the CPU online?\n", cpu);
tools/tracing/rtla/src/timerlat_u.c
76
snprintf(buffer, sizeof(buffer), "osnoise/per_cpu/cpu%d/timerlat_fd", cpu);
tools/tracing/rtla/src/timerlat_u.c
82
debug_msg("User-space timerlat pid %d on cpu %d\n", gettid(), cpu);
tools/tracing/rtla/src/timerlat_u.c
93
debug_msg("Leaving timerlat pid %d on cpu %d\n", gettid(), cpu);
tools/tracing/rtla/src/trace.c
119
int cpu, void *context)
tools/tracing/rtla/src/trace.c
143
int cpu, void *context)
tools/tracing/rtla/src/trace.h
38
int cpu, void *context);
tools/tracing/rtla/src/utils.c
123
int cpu;
tools/tracing/rtla/src/utils.c
131
cpu = atoi(p);
tools/tracing/rtla/src/utils.c
132
if (cpu < 0 || (!cpu && *p != '0') || cpu >= nr_cpus)
tools/tracing/rtla/src/utils.c
140
if (end_cpu < cpu || (!end_cpu && *p != '0') || end_cpu >= nr_cpus)
tools/tracing/rtla/src/utils.c
145
end_cpu = cpu;
tools/tracing/rtla/src/utils.c
147
if (cpu == end_cpu) {
tools/tracing/rtla/src/utils.c
148
debug_msg("cpu_set: adding cpu %d\n", cpu);
tools/tracing/rtla/src/utils.c
149
CPU_SET(cpu, set);
tools/tracing/rtla/src/utils.c
151
for (i = cpu; i <= end_cpu; i++) {
tools/tracing/rtla/src/utils.c
557
int save_cpu_idle_disable_state(unsigned int cpu)
tools/tracing/rtla/src/utils.c
564
nr_states = cpuidle_state_count(cpu);
tools/tracing/rtla/src/utils.c
576
saved_cpu_idle_disable_state[cpu] = calloc(nr_states, sizeof(unsigned int));
tools/tracing/rtla/src/utils.c
577
if (!saved_cpu_idle_disable_state[cpu])
tools/tracing/rtla/src/utils.c
582
disabled = cpuidle_is_state_disabled(cpu, state);
tools/tracing/rtla/src/utils.c
585
saved_cpu_idle_disable_state[cpu][state] = disabled;
tools/tracing/rtla/src/utils.c
599
int restore_cpu_idle_disable_state(unsigned int cpu)
tools/tracing/rtla/src/utils.c
606
nr_states = cpuidle_state_count(cpu);
tools/tracing/rtla/src/utils.c
615
if (!saved_cpu_idle_disable_state[cpu])
tools/tracing/rtla/src/utils.c
617
disabled = saved_cpu_idle_disable_state[cpu][state];
tools/tracing/rtla/src/utils.c
618
result = cpuidle_state_disable(cpu, state, disabled);
tools/tracing/rtla/src/utils.c
623
free(saved_cpu_idle_disable_state[cpu]);
tools/tracing/rtla/src/utils.c
624
saved_cpu_idle_disable_state[cpu] = NULL;
tools/tracing/rtla/src/utils.c
646
int cpu;
tools/tracing/rtla/src/utils.c
654
for (cpu = 0; cpu < nr_cpus; cpu++) {
tools/tracing/rtla/src/utils.c
655
free(saved_cpu_idle_disable_state[cpu]);
tools/tracing/rtla/src/utils.c
656
saved_cpu_idle_disable_state[cpu] = NULL;
tools/tracing/rtla/src/utils.c
674
int set_deepest_cpu_idle_state(unsigned int cpu, unsigned int deepest_state)
tools/tracing/rtla/src/utils.c
680
nr_states = cpuidle_state_count(cpu);
tools/tracing/rtla/src/utils.c
683
result = cpuidle_state_disable(cpu, state, 1);
tools/tracing/rtla/src/utils.h
73
int save_cpu_idle_disable_state(unsigned int cpu);
tools/tracing/rtla/src/utils.h
74
int restore_cpu_idle_disable_state(unsigned int cpu);
tools/tracing/rtla/src/utils.h
76
int set_deepest_cpu_idle_state(unsigned int cpu, unsigned int state);
tools/tracing/rtla/src/utils.h
79
static inline int save_cpu_idle_disable_state(unsigned int cpu) { return -1; }
tools/tracing/rtla/src/utils.h
80
static inline int restore_cpu_idle_disable_state(unsigned int cpu) { return -1; }
tools/tracing/rtla/src/utils.h
82
static inline int set_deepest_cpu_idle_state(unsigned int cpu, unsigned int state) { return -1; }
tools/verification/rv/include/trace.h
16
int cpu, void *context);
tools/verification/rv/src/in_kernel.c
481
int cpu = record->cpu;
tools/verification/rv/src/in_kernel.c
496
trace_seq_printf(s, "%8lld [%03d] ", pid, cpu);
tools/verification/rv/src/trace.c
46
int cpu, void *context)
tools/virtio/ringtest/main.c
81
long int cpu;
tools/virtio/ringtest/main.c
87
cpu = strtol(arg, &endptr, 0);
tools/virtio/ringtest/main.c
90
assert(cpu >= 0 && cpu < CPU_SETSIZE);
tools/virtio/ringtest/main.c
94
CPU_SET(cpu, &cpuset);
tools/virtio/virtio-trace/trace-agent-rw.c
40
void *rw_thread_init(int cpu, const char *in_path, const char *out_path,
tools/virtio/virtio-trace/trace-agent-rw.c
46
rw_ti->cpu_num = cpu;
tools/virtio/virtio-trace/trace-agent-rw.c
51
pr_err("Could not open in_fd (CPU:%d)\n", cpu);
tools/virtio/virtio-trace/trace-agent-rw.c
60
pr_err("Could not open out_fd (CPU:%d)\n", cpu);
tools/virtio/virtio-trace/trace-agent-rw.c
68
pr_err("Could not create pipe in rw-thread(%d)\n", cpu);
tools/virtio/virtio-trace/trace-agent-rw.c
77
pr_err("Could not change pipe size in rw-thread(%d)\n", cpu);
tools/virtio/virtio-trace/trace-agent.c
156
int cpu;
tools/virtio/virtio-trace/trace-agent.c
161
for (cpu = 0; cpu < s->cpus; cpu++) {
tools/virtio/virtio-trace/trace-agent.c
163
in_path = make_input_path(cpu);
tools/virtio/virtio-trace/trace-agent.c
169
out_path = make_output_path(cpu);
tools/virtio/virtio-trace/trace-agent.c
176
rw_thread_init(cpu, in_path, out_path, s->use_stdout,
tools/virtio/virtio-trace/trace-agent.c
177
s->pipe_size, s->rw_ti[cpu]);
tools/virtio/virtio-trace/trace-agent.c
224
int cpu;
tools/virtio/virtio-trace/trace-agent.c
228
for (cpu = 0; cpu < s->cpus; cpu++)
tools/virtio/virtio-trace/trace-agent.c
229
rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]);
tools/virtio/virtio-trace/trace-agent.c
234
for (cpu = 0; cpu < s->cpus; cpu++) {
tools/virtio/virtio-trace/trace-agent.c
237
ret = pthread_join(rw_thread_per_cpu[cpu], NULL);
tools/virtio/virtio-trace/trace-agent.c
239
pr_err("pthread_join() error:%d (cpu %d)\n", ret, cpu);
tools/virtio/virtio-trace/trace-agent.h
58
extern void *rw_thread_init(int cpu, const char *in_path, const char *out_path,
virt/kvm/kvm_main.c
167
int cpu = get_cpu();
virt/kvm/kvm_main.c
171
kvm_arch_vcpu_load(vcpu, cpu);
virt/kvm/kvm_main.c
220
int cpu;
virt/kvm/kvm_main.c
239
cpu = READ_ONCE(vcpu->cpu);
virt/kvm/kvm_main.c
240
if (cpu != -1 && cpu != current_cpu)
virt/kvm/kvm_main.c
241
__cpumask_set_cpu(cpu, tmp);
virt/kvm/kvm_main.c
3818
int me, cpu;
virt/kvm/kvm_main.c
3844
cpu = READ_ONCE(vcpu->cpu);
virt/kvm/kvm_main.c
3845
if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) {
virt/kvm/kvm_main.c
3854
smp_call_function_single(cpu, ack_kick, NULL, wait);
virt/kvm/kvm_main.c
3856
smp_send_reschedule(cpu);
virt/kvm/kvm_main.c
445
vcpu->cpu = -1;
virt/kvm/kvm_main.c
5613
static int kvm_online_cpu(unsigned int cpu)
virt/kvm/kvm_main.c
5633
static int kvm_offline_cpu(unsigned int cpu)
virt/kvm/kvm_main.c
6382
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
virt/kvm/kvm_main.c
6390
kvm_arch_vcpu_load(vcpu, cpu);
virt/kvm/kvm_main.c
6490
int cpu;
virt/kvm/kvm_main.c
6505
for_each_possible_cpu(cpu) {
virt/kvm/kvm_main.c
6506
if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
virt/kvm/kvm_main.c
6507
GFP_KERNEL, cpu_to_node(cpu))) {
virt/kvm/kvm_main.c
6567
for_each_possible_cpu(cpu)
virt/kvm/kvm_main.c
6568
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
virt/kvm/kvm_main.c
6576
int cpu;
virt/kvm/kvm_main.c
6588
for_each_possible_cpu(cpu)
virt/kvm/kvm_main.c
6589
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));