rq_clock_pelt
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
u64 now = rq_clock_pelt(rq);
return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
return rq_clock_pelt(rq_of(cfs_rq));
u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq));
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);