Symbol: this_rq
kernel/sched/core.c
2518
struct rq *rq = this_rq();
kernel/sched/core.c
2629
struct rq *lowest_rq = NULL, *rq = this_rq();
kernel/sched/core.c
3596
rq = this_rq();
kernel/sched/core.c
3737
struct rq *rq = this_rq();
kernel/sched/core.c
5004
__acquires(__rq_lockp(this_rq()))
kernel/sched/core.c
5022
__acquire(__rq_lockp(this_rq()));
kernel/sched/core.c
5113
__releases(__rq_lockp(this_rq()))
kernel/sched/core.c
5115
struct rq *rq = this_rq();
kernel/sched/core.c
5209
__releases(__rq_lockp(this_rq()))
kernel/sched/core.c
5882
schedstat_inc(this_rq()->sched_count);
kernel/sched/core.c
8087
struct rq *rq = this_rq();
kernel/sched/core.c
8136
if (!cpu_dying(rq->cpu) || rq != this_rq())
kernel/sched/core.c
8207
struct rq *rq = this_rq();
kernel/sched/core.c
938
if (rq == this_rq())
kernel/sched/cputime.c
227
struct rq *rq = this_rq();
kernel/sched/cputime.c
275
steal -= this_rq()->prev_steal_time;
kernel/sched/cputime.c
278
this_rq()->prev_steal_time += steal;
kernel/sched/cputime.c
408
} else if (p == this_rq()->idle) {
kernel/sched/cputime.c
501
else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
kernel/sched/deadline.c
2986
static void pull_dl_task(struct rq *this_rq)
kernel/sched/deadline.c
2988
int this_cpu = this_rq->cpu, cpu;
kernel/sched/deadline.c
2994
if (likely(!dl_overloaded(this_rq)))
kernel/sched/deadline.c
3003
for_each_cpu(cpu, this_rq->rd->dlo_mask) {
kernel/sched/deadline.c
3013
if (this_rq->dl.dl_nr_running &&
kernel/sched/deadline.c
3014
dl_time_before(this_rq->dl.earliest_dl.curr,
kernel/sched/deadline.c
3020
double_lock_balance(this_rq, src_rq);
kernel/sched/deadline.c
3037
dl_task_is_earliest_deadline(p, this_rq)) {
kernel/sched/deadline.c
3052
move_queued_task_locked(src_rq, this_rq, p);
kernel/sched/deadline.c
3060
double_unlock_balance(this_rq, src_rq);
kernel/sched/deadline.c
3064
raw_spin_rq_unlock(this_rq);
kernel/sched/deadline.c
3068
raw_spin_rq_lock(this_rq);
kernel/sched/deadline.c
3073
resched_curr(this_rq);
kernel/sched/ext.c
1830
static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
kernel/sched/ext.c
1833
raw_spin_rq_unlock(this_rq);
kernel/sched/ext.c
1836
move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
kernel/sched/ext.c
1840
raw_spin_rq_lock(this_rq);
kernel/sched/ext.c
2137
dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p);
kernel/sched/ext.c
5663
static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
kernel/sched/ext.c
5666
struct scx_rq *this_scx = &this_rq->scx;
kernel/sched/ext.c
5680
if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) &&
kernel/sched/ext.c
5708
static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
kernel/sched/ext.c
5716
(cpu_online(cpu) || cpu == cpu_of(this_rq)))
kernel/sched/ext.c
5724
struct rq *this_rq = this_rq();
kernel/sched/ext.c
5725
struct scx_rq *this_scx = &this_rq->scx;
kernel/sched/ext.c
5739
should_wait |= kick_one_cpu(cpu, this_rq, ksyncs);
kernel/sched/ext.c
5745
kick_one_cpu_if_idle(cpu, this_rq);
kernel/sched/ext.c
5754
raw_spin_rq_lock(this_rq);
kernel/sched/ext.c
5756
resched_curr(this_rq);
kernel/sched/ext.c
5757
raw_spin_rq_unlock(this_rq);
kernel/sched/ext.c
6106
struct rq *this_rq, *src_rq, *locked_rq;
kernel/sched/ext.c
6130
this_rq = this_rq();
kernel/sched/ext.c
6131
in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
kernel/sched/ext.c
6134
if (this_rq != src_rq) {
kernel/sched/ext.c
6135
raw_spin_rq_unlock(this_rq);
kernel/sched/ext.c
6158
dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p);
kernel/sched/ext.c
6175
if (this_rq != locked_rq) {
kernel/sched/ext.c
6177
raw_spin_rq_lock(this_rq);
kernel/sched/ext.c
6575
struct rq *this_rq;
kernel/sched/ext.c
6583
this_rq = this_rq();
kernel/sched/ext.c
6590
if (scx_rq_bypassing(this_rq))
kernel/sched/ext.c
6611
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
kernel/sched/ext.c
6613
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
kernel/sched/ext.c
6616
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
kernel/sched/ext.c
6618
cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
kernel/sched/ext.c
6621
irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
kernel/sched/ext.c
6668
ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
kernel/sched/ext.c
7025
rq = this_rq();
kernel/sched/ext.c
7350
rq = this_rq();
kernel/sched/fair.c
11865
static int sched_balance_rq(int this_cpu, struct rq *this_rq,
kernel/sched/fair.c
11878
.dst_rq = this_rq,
kernel/sched/fair.c
12627
WARN_ON_ONCE(rq != this_rq());
kernel/sched/fair.c
12733
static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
kernel/sched/fair.c
12740
int this_cpu = this_rq->cpu;
kernel/sched/fair.c
12836
static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
kernel/sched/fair.c
12838
unsigned int flags = this_rq->nohz_idle_balance;
kernel/sched/fair.c
12843
this_rq->nohz_idle_balance = 0;
kernel/sched/fair.c
12848
_nohz_idle_balance(this_rq, flags);
kernel/sched/fair.c
12882
static void nohz_newidle_balance(struct rq *this_rq)
kernel/sched/fair.c
12884
int this_cpu = this_rq->cpu;
kernel/sched/fair.c
12887
if (this_rq->avg_idle < sysctl_sched_migration_cost)
kernel/sched/fair.c
12905
static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
kernel/sched/fair.c
12910
static inline void nohz_newidle_balance(struct rq *this_rq) { }
kernel/sched/fair.c
12922
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
kernel/sched/fair.c
12923
__must_hold(__rq_lockp(this_rq))
kernel/sched/fair.c
12926
int this_cpu = this_rq->cpu;
kernel/sched/fair.c
12932
update_misfit_status(NULL, this_rq);
kernel/sched/fair.c
12938
if (this_rq->ttwu_pending)
kernel/sched/fair.c
12946
this_rq->idle_stamp = rq_clock(this_rq);
kernel/sched/fair.c
12960
rq_unpin_lock(this_rq, rf);
kernel/sched/fair.c
12962
sd = rcu_dereference_sched_domain(this_rq->sd);
kernel/sched/fair.c
12966
if (!get_rd_overloaded(this_rq->rd) ||
kernel/sched/fair.c
12967
this_rq->avg_idle < sd->max_newidle_lb_cost) {
kernel/sched/fair.c
12979
__sched_balance_update_blocked_averages(this_rq);
kernel/sched/fair.c
12981
rq_modified_begin(this_rq, &fair_sched_class);
kernel/sched/fair.c
12982
raw_spin_rq_unlock(this_rq);
kernel/sched/fair.c
12989
if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
kernel/sched/fair.c
13010
pulled_task = sched_balance_rq(this_cpu, this_rq,
kernel/sched/fair.c
13034
raw_spin_rq_lock(this_rq);
kernel/sched/fair.c
13036
if (curr_cost > this_rq->max_idle_balance_cost)
kernel/sched/fair.c
13037
this_rq->max_idle_balance_cost = curr_cost;
kernel/sched/fair.c
13044
if (this_rq->cfs.h_nr_queued && !pulled_task)
kernel/sched/fair.c
13048
if (rq_modified_above(this_rq, &fair_sched_class))
kernel/sched/fair.c
13053
if (time_after(this_rq->next_balance, next_balance))
kernel/sched/fair.c
13054
this_rq->next_balance = next_balance;
kernel/sched/fair.c
13057
this_rq->idle_stamp = 0;
kernel/sched/fair.c
13059
nohz_newidle_balance(this_rq);
kernel/sched/fair.c
13061
rq_repin_lock(this_rq, rf);
kernel/sched/fair.c
13076
struct rq *this_rq = this_rq();
kernel/sched/fair.c
13077
enum cpu_idle_type idle = this_rq->idle_balance;
kernel/sched/fair.c
13086
if (nohz_idle_balance(this_rq, idle))
kernel/sched/fair.c
13090
sched_balance_update_blocked_averages(this_rq->cpu);
kernel/sched/fair.c
13091
sched_balance_domains(this_rq, idle);
kernel/sched/fair.c
4845
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
kernel/sched/fair.c
4846
__must_hold(__rq_lockp(this_rq));
kernel/sched/fair.c
6139
if (rq == this_rq()) {
kernel/sched/fair.c
7888
this_rq()->nr_running <= 1 &&
kernel/sched/fair.c
8390
struct root_domain *rd = this_rq()->rd;
kernel/sched/fair.c
8601
if (!is_rd_overutilized(this_rq()->rd)) {
kernel/sched/idle.c
24
idle_set_state(this_rq(), idle_state);
kernel/sched/loadavg.c
253
calc_load_nohz_fold(this_rq());
kernel/sched/loadavg.c
267
struct rq *this_rq = this_rq();
kernel/sched/loadavg.c
272
this_rq->calc_load_update = READ_ONCE(calc_load_update);
kernel/sched/loadavg.c
273
if (time_before(jiffies, this_rq->calc_load_update))
kernel/sched/loadavg.c
281
if (time_before(jiffies, this_rq->calc_load_update + 10))
kernel/sched/loadavg.c
282
this_rq->calc_load_update += LOAD_FREQ;
kernel/sched/loadavg.c
387
void calc_global_load_tick(struct rq *this_rq)
kernel/sched/loadavg.c
391
if (time_before(jiffies, this_rq->calc_load_update))
kernel/sched/loadavg.c
394
delta = calc_load_fold_active(this_rq, 0);
kernel/sched/loadavg.c
398
this_rq->calc_load_update += LOAD_FREQ;
kernel/sched/loadavg.c
80
long calc_load_fold_active(struct rq *this_rq, long adjust)
kernel/sched/loadavg.c
84
nr_active = this_rq->nr_running - adjust;
kernel/sched/loadavg.c
85
nr_active += (long)this_rq->nr_uninterruptible;
kernel/sched/loadavg.c
87
if (nr_active != this_rq->calc_load_active) {
kernel/sched/loadavg.c
88
delta = nr_active - this_rq->calc_load_active;
kernel/sched/loadavg.c
89
this_rq->calc_load_active = nr_active;
kernel/sched/membarrier.c
240
struct rq *rq = this_rq();
kernel/sched/rt.c
2210
rq = this_rq();
kernel/sched/rt.c
2240
static void pull_rt_task(struct rq *this_rq)
kernel/sched/rt.c
2242
int this_cpu = this_rq->cpu, cpu;
kernel/sched/rt.c
2246
int rt_overload_count = rt_overloaded(this_rq);
kernel/sched/rt.c
2259
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
kernel/sched/rt.c
2264
tell_cpu_to_push(this_rq);
kernel/sched/rt.c
2269
for_each_cpu(cpu, this_rq->rd->rto_mask) {
kernel/sched/rt.c
2283
this_rq->rt.highest_prio.curr)
kernel/sched/rt.c
2292
double_lock_balance(this_rq, src_rq);
kernel/sched/rt.c
2304
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
kernel/sched/rt.c
2322
move_queued_task_locked(src_rq, this_rq, p);
kernel/sched/rt.c
2333
double_unlock_balance(this_rq, src_rq);
kernel/sched/rt.c
2337
raw_spin_rq_unlock(this_rq);
kernel/sched/rt.c
2341
raw_spin_rq_lock(this_rq);
kernel/sched/rt.c
2346
resched_curr(this_rq);
kernel/sched/rt.c
583
return this_rq()->rd->span;
kernel/sched/sched.h
107
extern void calc_global_load_tick(struct rq *this_rq);
kernel/sched/sched.h
108
extern long calc_load_fold_active(struct rq *this_rq, long adjust);
kernel/sched/sched.h
2004
rq = this_rq();
kernel/sched/sched.h
2585
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2621
void (*switching_from)(struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2622
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2623
void (*switching_to) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2624
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2625
u64 (*get_prio) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2626
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
kernel/sched/sched.h
2633
void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
kernel/sched/sched.h
3169
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3170
__must_hold(__rq_lockp(this_rq))
kernel/sched/sched.h
3173
raw_spin_rq_unlock(this_rq);
kernel/sched/sched.h
3174
double_rq_lock(this_rq, busiest);
kernel/sched/sched.h
3187
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3188
__must_hold(__rq_lockp(this_rq))
kernel/sched/sched.h
3191
if (__rq_lockp(this_rq) == __rq_lockp(busiest)) {
kernel/sched/sched.h
3193
double_rq_clock_clear_update(this_rq, busiest);
kernel/sched/sched.h
3198
double_rq_clock_clear_update(this_rq, busiest);
kernel/sched/sched.h
3202
if (rq_order_less(this_rq, busiest)) {
kernel/sched/sched.h
3204
double_rq_clock_clear_update(this_rq, busiest);
kernel/sched/sched.h
3208
raw_spin_rq_unlock(this_rq);
kernel/sched/sched.h
3209
double_rq_lock(this_rq, busiest);
kernel/sched/sched.h
3219
static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3220
__must_hold(__rq_lockp(this_rq))
kernel/sched/sched.h
3225
return _double_lock_balance(this_rq, busiest);
kernel/sched/sched.h
3228
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3231
if (__rq_lockp(this_rq) != __rq_lockp(busiest))
kernel/sched/sched.h
3235
lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
kernel/sched/syscalls.c
1411
rq = this_rq();
tools/testing/selftests/bpf/progs/test_access_variable_array.c
11
int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq,