SCHED_CAPACITY_SCALE
scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE
#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3)
perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1;
turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq);
per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
per_cpu_ptr(arch_cpu_scale, cpu)->capacity = SCHED_CAPACITY_SCALE;
return SCHED_CAPACITY_SCALE;
if (freq_scale > SCHED_CAPACITY_SCALE)
freq_scale = SCHED_CAPACITY_SCALE;
static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;
static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
#define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
scale = min(scale, SCHED_CAPACITY_SCALE);
unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
return SCHED_CAPACITY_SCALE;
.max_allowed_capacity = SCHED_CAPACITY_SCALE,
static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
rq->cpu_capacity = SCHED_CAPACITY_SCALE;
.util = SCHED_CAPACITY_SCALE,
if (util_clamp == SCHED_CAPACITY_SCALE) {
#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
y = SCHED_CAPACITY_SCALE - tmp;
do_div(y, SCHED_CAPACITY_SCALE);
local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
) / SCHED_CAPACITY_SCALE;
sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
return SCHED_CAPACITY_SCALE;
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
return SCHED_CAPACITY_SCALE;
#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
return SCHED_CAPACITY_SCALE;
return SCHED_CAPACITY_SCALE;
if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);