#include <linux/acpi.h>
#include <linux/arch_topology.h>
#include <linux/cacheinfo.h>
#include <linux/cpufreq.h>
#include <linux/cpu_smt.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/sched/isolation.h>
#include <linux/xarray.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
#ifdef CONFIG_ARM64_AMU_EXTN
#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
#else
#define read_corecnt() (0UL)
#define read_constcnt() (0UL)
#endif
#undef pr_fmt
#define pr_fmt(fmt) "AMU: " fmt
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT);
static cpumask_var_t amu_fie_cpus;
struct amu_cntr_sample {
u64 arch_const_cycles_prev;
u64 arch_core_cycles_prev;
unsigned long last_scale_update;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct amu_cntr_sample, cpu_amu_samples);
void update_freq_counters_refs(void)
{
struct amu_cntr_sample *amu_sample = this_cpu_ptr(&cpu_amu_samples);
amu_sample->arch_core_cycles_prev = read_corecnt();
amu_sample->arch_const_cycles_prev = read_constcnt();
}
static inline bool freq_counters_valid(int cpu)
{
struct amu_cntr_sample *amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu);
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
return false;
if (!cpu_has_amu_feat(cpu)) {
pr_debug("CPU%d: counters are not supported.\n", cpu);
return false;
}
if (unlikely(!amu_sample->arch_const_cycles_prev ||
!amu_sample->arch_core_cycles_prev)) {
pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
return false;
}
return true;
}
void freq_inv_set_max_ratio(int cpu, u64 max_rate)
{
u64 ratio, ref_rate = arch_timer_get_rate();
if (unlikely(!max_rate || !ref_rate)) {
WARN_ONCE(1, "CPU%d: invalid maximum or reference frequency.\n",
cpu);
return;
}
ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
ratio = div64_u64(ratio, max_rate);
if (!ratio) {
WARN_ONCE(1, "Reference frequency too low.\n");
return;
}
WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio);
}
static void amu_scale_freq_tick(void)
{
struct amu_cntr_sample *amu_sample = this_cpu_ptr(&cpu_amu_samples);
u64 prev_core_cnt, prev_const_cnt;
u64 core_cnt, const_cnt, scale;
prev_const_cnt = amu_sample->arch_const_cycles_prev;
prev_core_cnt = amu_sample->arch_core_cycles_prev;
update_freq_counters_refs();
const_cnt = amu_sample->arch_const_cycles_prev;
core_cnt = amu_sample->arch_core_cycles_prev;
if (unlikely(core_cnt <= prev_core_cnt ||
const_cnt <= prev_const_cnt))
return;
scale = core_cnt - prev_core_cnt;
scale *= this_cpu_read(arch_max_freq_scale);
scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
const_cnt - prev_const_cnt);
scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
this_cpu_write(arch_freq_scale, (unsigned long)scale);
amu_sample->last_scale_update = jiffies;
}
static struct scale_freq_data amu_sfd = {
.source = SCALE_FREQ_SOURCE_ARCH,
.set_freq_scale = amu_scale_freq_tick,
};
static __always_inline bool amu_fie_cpu_supported(unsigned int cpu)
{
return cpumask_available(amu_fie_cpus) &&
cpumask_test_cpu(cpu, amu_fie_cpus);
}
void arch_cpu_idle_enter(void)
{
unsigned int cpu = smp_processor_id();
if (!amu_fie_cpu_supported(cpu))
return;
if (housekeeping_cpu(cpu, HK_TYPE_TICK) &&
time_is_before_jiffies(per_cpu(cpu_amu_samples.last_scale_update, cpu)))
amu_scale_freq_tick();
}
#define AMU_SAMPLE_EXP_MS 20
int arch_freq_get_on_cpu(int cpu)
{
struct amu_cntr_sample *amu_sample;
unsigned int start_cpu = cpu;
unsigned long last_update;
unsigned int freq = 0;
u64 scale;
if (!amu_fie_cpu_supported(cpu) || !arch_scale_freq_ref(cpu))
return -EOPNOTSUPP;
while (1) {
amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu);
last_update = amu_sample->last_scale_update;
if (!housekeeping_cpu(cpu, HK_TYPE_TICK) ||
time_is_before_jiffies(last_update + msecs_to_jiffies(AMU_SAMPLE_EXP_MS))) {
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
int ref_cpu;
if (!policy)
return -EINVAL;
if (!cpumask_intersects(policy->related_cpus,
housekeeping_cpumask(HK_TYPE_TICK))) {
cpufreq_cpu_put(policy);
return -EOPNOTSUPP;
}
for_each_cpu_wrap(ref_cpu, policy->cpus, cpu + 1) {
if (ref_cpu == start_cpu) {
ref_cpu = nr_cpu_ids;
break;
}
if (!idle_cpu(ref_cpu))
break;
}
cpufreq_cpu_put(policy);
if (ref_cpu >= nr_cpu_ids)
return -EAGAIN;
cpu = ref_cpu;
} else {
break;
}
}
scale = arch_scale_freq_capacity(cpu);
freq = scale * arch_scale_freq_ref(cpu);
freq >>= SCHED_CAPACITY_SHIFT;
return freq;
}
static void amu_fie_setup(const struct cpumask *cpus)
{
int cpu;
if (cpumask_available(amu_fie_cpus) &&
unlikely(cpumask_subset(cpus, amu_fie_cpus)))
return;
for_each_cpu(cpu, cpus)
if (!freq_counters_valid(cpu))
return;
if (!cpumask_available(amu_fie_cpus) &&
!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
WARN_ONCE(1, "Failed to allocate FIE cpumask for CPUs[%*pbl]\n",
cpumask_pr_args(cpus));
return;
}
cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
topology_set_scale_freq_source(&amu_sfd, cpus);
pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
cpumask_pr_args(cpus));
}
static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_policy *policy = data;
if (val == CPUFREQ_CREATE_POLICY)
amu_fie_setup(policy->cpus);
return 0;
}
static struct notifier_block init_amu_fie_notifier = {
.notifier_call = init_amu_fie_callback,
};
static int cpuhp_topology_online(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_policy(cpu);
if (unlikely(!policy) || !cpumask_available(amu_fie_cpus) ||
cpumask_test_cpu(cpu, amu_fie_cpus))
return 0;
if (unlikely(!cpumask_subset(policy->cpus, amu_fie_cpus)))
return 0;
if (!freq_counters_valid(cpu)) {
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_ARCH,
policy->related_cpus);
cpumask_andnot(amu_fie_cpus, amu_fie_cpus, policy->related_cpus);
return 0;
}
cpumask_set_cpu(cpu, amu_fie_cpus);
topology_set_scale_freq_source(&amu_sfd, cpumask_of(cpu));
pr_debug("CPU[%u]: counter will be used for FIE.", cpu);
return 0;
}
static int __init init_amu_fie(void)
{
int ret;
ret = cpufreq_register_notifier(&init_amu_fie_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
return ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"arm64/topology:online",
cpuhp_topology_online,
NULL);
if (ret < 0) {
cpufreq_unregister_notifier(&init_amu_fie_notifier,
CPUFREQ_POLICY_NOTIFIER);
return ret;
}
return 0;
}
core_initcall(init_amu_fie);
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
static void cpu_read_corecnt(void *val)
{
*(u64 *)val = read_corecnt();
}
static void cpu_read_constcnt(void *val)
{
*(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
0UL : read_constcnt();
}
static inline
int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
{
if (!cpu_has_amu_feat(cpu))
return -EOPNOTSUPP;
if (irqs_disabled()) {
if (WARN_ON_ONCE(cpu != smp_processor_id()))
return -EPERM;
func(val);
} else {
smp_call_function_single(cpu, func, val, 1);
}
return 0;
}
bool cpc_ffh_supported(void)
{
int cpu = get_cpu_with_amu_feat();
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
return false;
return true;
}
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
{
int ret = -EOPNOTSUPP;
switch ((u64)reg->address) {
case 0x0:
ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val);
break;
case 0x1:
ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val);
break;
}
if (!ret) {
*val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
reg->bit_offset);
*val >>= reg->bit_offset;
}
return ret;
}
int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
return -EOPNOTSUPP;
}
#endif