Symbol: task_rq
kernel/sched/core.c
10629
if (task_rq(t)->curr == t)
kernel/sched/core.c
10872
struct rq *rq = task_rq(p);
kernel/sched/core.c
10919
struct rq *rq = task_rq(p);
kernel/sched/core.c
1465
p->sched_class->reweight_task(task_rq(p), p, &lw);
kernel/sched/core.c
2259
rq = task_rq(p);
kernel/sched/core.c
2554
if (task_rq(p) == rq) {
kernel/sched/core.c
2635
if (task_rq(p) != rq)
kernel/sched/core.c
2654
if (task_rq(p) == rq) {
kernel/sched/core.c
275
if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
kernel/sched/core.c
3274
lockdep_is_held(__rq_lockp(task_rq(p)))));
kernel/sched/core.c
3303
src_rq = task_rq(p);
kernel/sched/core.c
3664
atomic_dec(&task_rq(p)->nr_iowait);
kernel/sched/core.c
4234
atomic_dec(&task_rq(p)->nr_iowait);
kernel/sched/core.c
5447
struct sched_entity *curr = task_rq(p)->cfs.curr;
kernel/sched/core.c
5969
return (task_rq(t)->idle == t);
kernel/sched/core.c
724
rq = task_rq(p);
kernel/sched/core.c
726
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
kernel/sched/core.c
746
rq = task_rq(p);
kernel/sched/core.c
765
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
kernel/sched/core.c
8102
if (task_rq(p) == rq && task_on_rq_queued(p))
kernel/sched/deadline.c
2463
rq = task_rq(p);
kernel/sched/deadline.c
2733
if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
kernel/sched/deadline.c
2872
(task_rq(task) != rq ||
kernel/sched/deadline.c
3100
rq = task_rq(p);
kernel/sched/deadline.c
340
dl_rq_change_utilization(task_rq(p), &p->dl, new_bw);
kernel/sched/deadline.c
81
rq = task_rq(dl_task_of(dl_se));
kernel/sched/ext.c
1207
lockdep_assert_rq_held(task_rq(p));
kernel/sched/ext.c
1301
struct rq *rq = task_rq(p);
kernel/sched/ext.c
1446
dsq = &task_rq(p)->scx.bypass_dsq;
kernel/sched/ext.c
1827
!WARN_ON_ONCE(src_rq != task_rq(p));
kernel/sched/ext.c
1866
struct rq *src_rq = task_rq(p), *dst_rq;
kernel/sched/ext.c
1930
struct rq *task_rq = task_rq(p);
kernel/sched/ext.c
1943
if (rq == task_rq) {
kernel/sched/ext.c
1951
if (likely(consume_remote_task(rq, p, dsq, task_rq)))
kernel/sched/ext.c
1987
struct rq *src_rq = task_rq(p);
kernel/sched/ext.c
2035
!WARN_ON_ONCE(src_rq != task_rq(p))) {
kernel/sched/ext.c
2654
!scx_rq_bypassing(task_rq(a)))
kernel/sched/ext.c
2682
rq_bypass = scx_rq_bypassing(task_rq(p));
kernel/sched/ext.c
3004
struct rq *rq = task_rq(p);
kernel/sched/ext.c
3032
struct rq *rq = task_rq(p);
kernel/sched/ext.c
3051
lockdep_assert_rq_held(task_rq(p));
kernel/sched/ext.c
3070
SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
kernel/sched/ext.c
3167
struct rq *rq = task_rq(p);
kernel/sched/ext.c
3213
lockdep_assert_rq_held(task_rq(p));
kernel/sched/ext.c
3260
lockdep_assert_rq_held(task_rq(p));
kernel/sched/ext.c
4426
update_rq_clock(task_rq(p));
kernel/sched/ext.c
6127
src_rq = task_rq(p);
kernel/sched/ext.c
6152
WARN_ON_ONCE(src_rq != task_rq(p))) {
kernel/sched/ext.c
7193
return task_rq(p)->curr == p;
kernel/sched/fair.c
13360
struct rq *rq = task_rq(a);
kernel/sched/fair.c
13367
WARN_ON_ONCE(task_rq(b)->core != rq->core);
kernel/sched/fair.c
13390
cfs_rqa = &task_rq(a)->cfs;
kernel/sched/fair.c
13391
cfs_rqb = &task_rq(b)->cfs;
kernel/sched/fair.c
13448
check_update_overutilized_status(task_rq(curr));
kernel/sched/fair.c
1554
(lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
kernel/sched/fair.c
6809
WARN_ON_ONCE(task_rq(p) != rq);
kernel/sched/fair.c
9790
WARN_ON_ONCE(task_rq(p) != rq);
kernel/sched/rt.c
1784
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
kernel/sched/rt.c
1789
ret = cpupri_find(&task_rq(task)->rd->cpupri,
kernel/sched/rt.c
313
return task_rq(p);
kernel/sched/sched.h
1702
return &task_rq(p)->cfs;
kernel/sched/sched.h
1708
struct rq *rq = task_rq(p);
kernel/sched/stats.h
142
if (task_on_cpu(task_rq(p), p))
kernel/sched/syscalls.c
1124
if (!cpumask_subset(task_rq(p)->rd->span, mask))
kernel/sched/syscalls.c
1415
p_rq = task_rq(p);
kernel/sched/syscalls.c
1424
if (task_rq(p) != p_rq)