task_cpu
cpumask_set_cpu(task_cpu(t), &process_cpus);
level, task_cpu(current), cr30, cr31);
unsigned long cpu = task_cpu(p);
unsigned long cpu = task_cpu(p);
unsigned long cpu = task_cpu(p);
state, task_cpu(tsk),
qid = task_cpu(current);
seq_put_decimal_ll(m, " ", task_cpu(task));
cpumask_set_cpu(task_cpu(t), mask);
smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
unsigned int cpu = task_cpu(p);
.cpu_id = task_cpu(t),
return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
return task_cpu(t);
return cpu_to_node(task_cpu(p));
__entry->target_cpu = task_cpu(p);
__entry->orig_cpu = task_cpu(p);
ret = smp_call_function_single(task_cpu(p), remote_function,
if (task_cpu(p) != smp_processor_id())
wtp == NULL ? -1 : (int)task_cpu(wtp));
cpu = task_cpu(t);
cpu = task_cpu(t);
cpu = task_cpu(t);
rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
cpu = task_cpu(rcuc);
cpu = gpk ? task_cpu(gpk) : -1;
cpu = task_cpu(gpk);
ids.cpu_id = task_cpu(t);
mm_cid_transit_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));
return cpu_curr(task_cpu(p)) == p;
WARN_ON_ONCE(task_cpu(p) != new_cpu);
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) ||
!cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
if (task_cpu(p) != new_cpu) {
if (task_cpu(arg->dst_task) != arg->dst_cpu)
if (task_cpu(arg->src_task) != arg->src_cpu)
int cpu = task_cpu(p);
int cpu = task_cpu(p);
cpu = select_fallback_rq(task_cpu(p), p);
if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
if (task_cpu(p) != cpu) {
ttwu_stat(p, task_cpu(p), wake_flags);
p->recent_used_cpu = task_cpu(p);
__set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
if (task_cpu(owner) != this_cpu) {
if (!task_on_rq_queued(owner) || task_cpu(owner) != this_cpu)
int curr_cpu = task_cpu(p);
if (task_cpu(p) != cpu)
set_task_rq(tsk, task_cpu(tsk));
unsigned int cpu = task_cpu(tsk);
(cpu == task_cpu(p) && cap == max_cap)) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
int cpu = task_cpu(task);
WARN_ON_ONCE(rq->cpu != task_cpu(p));
__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
int cpus, err = -1, cpu = task_cpu(p);
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
__dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
if (task_cpu(p) != rq_cpu)
WARN_ON_ONCE(task_cpu(p) == cpu);
p->comm, p->pid, task_cpu(p), cpu);
return sch->global_dsqs[cpu_to_node(task_cpu(p))];
return task_cpu(p);
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
set_task_rq(p, task_cpu(p));
.src_cpu = task_cpu(p),
int src_nid = cpu_to_node(task_cpu(p));
if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
if (p && task_cpu(p) == cpu && dst_cpu != cpu)
else if (p && task_cpu(p) != cpu && dst_cpu == cpu)
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
return task_cpu(p); /* IDLE tasks as never migrated */
int cpu = task_cpu(curr);
task->pid, task->comm, task_cpu(task),
int cpu = task_cpu(task);
int cpu = task_cpu(prev);
int cpu = task_cpu(task);
BUG_ON(rq->cpu != task_cpu(p));
#define task_rq(p) cpu_rq(task_cpu(p))
int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
return task_cpu(p); /* stop tasks as never migrate */
struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
cpu = task_cpu(tsk);
long cpu = task_cpu(p);
entry->next_cpu = task_cpu(next);
entry->next_cpu = task_cpu(wakee);
wakeup_cpu = task_cpu(p);