SCHED_CAPACITY_SHIFT
>> (SCHED_CAPACITY_SHIFT+1);
>> (SCHED_CAPACITY_SHIFT-1)) + 1;
ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
freq >>= SCHED_CAPACITY_SHIFT;
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT);
div_u64(cap << SCHED_CAPACITY_SHIFT, max_cap));
div_u64(cap_freq << SCHED_CAPACITY_SHIFT, base_freq));
if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
perf <<= SCHED_CAPACITY_SHIFT;
cur_freq <<= SCHED_CAPACITY_SHIFT;
# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
req.util = req.percent << SCHED_CAPACITY_SHIFT;
return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
-(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT);