rt_bandwidth
init_rt_bandwidth(&root_task_group.rt_bandwidth,
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
destroy_rt_bandwidth(&tg->rt_bandwidth);
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0);
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
rt_runtime = tg->rt_bandwidth.rt_runtime;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
if (rt_group_sched_enabled() && rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
return &rt_rq->tg->rt_bandwidth;
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (rt_b == &root_task_group.rt_bandwidth)
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
struct rt_bandwidth rt_bandwidth;
task_group(p)->rt_bandwidth.rt_runtime == 0 &&