#define pr_fmt(fmt) "energy_model: " fmt
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/debugfs.h>
#include <linux/energy_model.h>
#include <linux/sched/topology.h>
#include <linux/slab.h>
#include "em_netlink.h"
static DEFINE_MUTEX(em_pd_mutex);
static DEFINE_IDA(em_pd_ida);
static LIST_HEAD(em_pd_list);
static DEFINE_MUTEX(em_pd_list_mutex);
static void em_cpufreq_update_efficiencies(struct device *dev,
struct em_perf_state *table);
static void em_check_capacity_update(void);
static void em_update_workfn(struct work_struct *work);
static DECLARE_DELAYED_WORK(em_update_work, em_update_workfn);
static bool _is_cpu_device(struct device *dev)
{
return (dev->bus == &cpu_subsys);
}
#ifdef CONFIG_DEBUG_FS
static struct dentry *rootdir;
struct em_dbg_info {
struct em_perf_domain *pd;
int ps_id;
};
#define DEFINE_EM_DBG_SHOW(name, fname) \
static int em_debug_##fname##_show(struct seq_file *s, void *unused) \
{ \
struct em_dbg_info *em_dbg = s->private; \
struct em_perf_state *table; \
unsigned long val; \
\
rcu_read_lock(); \
table = em_perf_state_from_pd(em_dbg->pd); \
val = table[em_dbg->ps_id].name; \
rcu_read_unlock(); \
\
seq_printf(s, "%lu\n", val); \
return 0; \
} \
DEFINE_SHOW_ATTRIBUTE(em_debug_##fname)
DEFINE_EM_DBG_SHOW(frequency, frequency);
DEFINE_EM_DBG_SHOW(power, power);
DEFINE_EM_DBG_SHOW(cost, cost);
DEFINE_EM_DBG_SHOW(performance, performance);
DEFINE_EM_DBG_SHOW(flags, inefficiency);
static void em_debug_create_ps(struct em_perf_domain *em_pd,
struct em_dbg_info *em_dbg, int i,
struct dentry *pd)
{
struct em_perf_state *table;
unsigned long freq;
struct dentry *d;
char name[24];
em_dbg[i].pd = em_pd;
em_dbg[i].ps_id = i;
rcu_read_lock();
table = em_perf_state_from_pd(em_pd);
freq = table[i].frequency;
rcu_read_unlock();
snprintf(name, sizeof(name), "ps:%lu", freq);
d = debugfs_create_dir(name, pd);
debugfs_create_file("frequency", 0444, d, &em_dbg[i],
&em_debug_frequency_fops);
debugfs_create_file("power", 0444, d, &em_dbg[i],
&em_debug_power_fops);
debugfs_create_file("cost", 0444, d, &em_dbg[i],
&em_debug_cost_fops);
debugfs_create_file("performance", 0444, d, &em_dbg[i],
&em_debug_performance_fops);
debugfs_create_file("inefficient", 0444, d, &em_dbg[i],
&em_debug_inefficiency_fops);
}
static int em_debug_cpus_show(struct seq_file *s, void *unused)
{
seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
static int em_debug_flags_show(struct seq_file *s, void *unused)
{
struct em_perf_domain *pd = s->private;
seq_printf(s, "%#lx\n", pd->flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(em_debug_flags);
static int em_debug_id_show(struct seq_file *s, void *unused)
{
struct em_perf_domain *pd = s->private;
seq_printf(s, "%d\n", pd->id);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(em_debug_id);
static void em_debug_create_pd(struct device *dev)
{
struct em_dbg_info *em_dbg;
struct dentry *d;
int i;
d = debugfs_create_dir(dev_name(dev), rootdir);
if (_is_cpu_device(dev))
debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
&em_debug_cpus_fops);
debugfs_create_file("flags", 0444, d, dev->em_pd,
&em_debug_flags_fops);
debugfs_create_file("id", 0444, d, dev->em_pd, &em_debug_id_fops);
em_dbg = devm_kcalloc(dev, dev->em_pd->nr_perf_states,
sizeof(*em_dbg), GFP_KERNEL);
if (!em_dbg)
return;
for (i = 0; i < dev->em_pd->nr_perf_states; i++)
em_debug_create_ps(dev->em_pd, em_dbg, i, d);
}
static void em_debug_remove_pd(struct device *dev)
{
debugfs_lookup_and_remove(dev_name(dev), rootdir);
}
static int __init em_debug_init(void)
{
rootdir = debugfs_create_dir("energy_model", NULL);
return 0;
}
fs_initcall(em_debug_init);
#else
static void em_debug_create_pd(struct device *dev) {}
static void em_debug_remove_pd(struct device *dev) {}
#endif
static void em_release_table_kref(struct kref *kref)
{
kfree_rcu(container_of(kref, struct em_perf_table, kref), rcu);
}
void em_table_free(struct em_perf_table *table)
{
kref_put(&table->kref, em_release_table_kref);
}
struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
{
struct em_perf_table *table;
int table_size;
table_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
table = kzalloc(sizeof(*table) + table_size, GFP_KERNEL);
if (!table)
return NULL;
kref_init(&table->kref);
return table;
}
static void em_init_performance(struct device *dev, struct em_perf_domain *pd,
struct em_perf_state *table, int nr_states)
{
u64 fmax, max_cap;
int i, cpu;
if (!_is_cpu_device(dev))
return;
cpu = cpumask_first(em_span_cpus(pd));
fmax = (u64) table[nr_states - 1].frequency;
max_cap = (u64) arch_scale_cpu_capacity(cpu);
for (i = 0; i < nr_states; i++)
table[i].performance = div64_u64(max_cap * table[i].frequency,
fmax);
}
static int em_compute_costs(struct device *dev, struct em_perf_state *table,
const struct em_data_callback *cb, int nr_states,
unsigned long flags)
{
unsigned long prev_cost = ULONG_MAX;
int i, ret;
if (!_is_cpu_device(dev))
return 0;
for (i = nr_states - 1; i >= 0; i--) {
unsigned long power_res, cost;
if ((flags & EM_PERF_DOMAIN_ARTIFICIAL) && cb->get_cost) {
ret = cb->get_cost(dev, table[i].frequency, &cost);
if (ret || !cost || cost > EM_MAX_POWER) {
dev_err(dev, "EM: invalid cost %lu %d\n",
cost, ret);
return -EINVAL;
}
} else {
power_res = table[i].power * 10;
cost = power_res / table[i].performance;
}
table[i].cost = cost;
if (table[i].cost >= prev_cost) {
table[i].flags = EM_PERF_STATE_INEFFICIENT;
dev_dbg(dev, "EM: OPP:%lu is inefficient\n",
table[i].frequency);
} else {
prev_cost = table[i].cost;
}
}
return 0;
}
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int nr_states)
{
return em_compute_costs(dev, table, NULL, nr_states, 0);
}
int em_dev_update_perf_domain(struct device *dev,
struct em_perf_table *new_table)
{
struct em_perf_table *old_table;
struct em_perf_domain *pd;
if (!dev)
return -EINVAL;
mutex_lock(&em_pd_mutex);
if (!dev->em_pd) {
mutex_unlock(&em_pd_mutex);
return -EINVAL;
}
pd = dev->em_pd;
kref_get(&new_table->kref);
old_table = rcu_dereference_protected(pd->em_table,
lockdep_is_held(&em_pd_mutex));
rcu_assign_pointer(pd->em_table, new_table);
em_cpufreq_update_efficiencies(dev, new_table->state);
em_table_free(old_table);
mutex_unlock(&em_pd_mutex);
em_notify_pd_updated(pd);
return 0;
}
EXPORT_SYMBOL_GPL(em_dev_update_perf_domain);
static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
struct em_perf_state *table,
const struct em_data_callback *cb,
unsigned long flags)
{
unsigned long power, freq, prev_freq = 0;
int nr_states = pd->nr_perf_states;
int i, ret;
for (i = 0, freq = 0; i < nr_states; i++, freq++) {
ret = cb->active_power(dev, &power, &freq);
if (ret) {
dev_err(dev, "EM: invalid perf. state: %d\n",
ret);
return -EINVAL;
}
if (freq <= prev_freq) {
dev_err(dev, "EM: non-increasing freq: %lu\n",
freq);
return -EINVAL;
}
if (!power || power > EM_MAX_POWER) {
dev_err(dev, "EM: invalid power: %lu\n",
power);
return -EINVAL;
}
table[i].power = power;
table[i].frequency = prev_freq = freq;
}
em_init_performance(dev, pd, table, nr_states);
ret = em_compute_costs(dev, table, cb, nr_states, flags);
if (ret)
return -EINVAL;
return 0;
}
static int em_create_pd(struct device *dev, int nr_states,
const struct em_data_callback *cb,
const cpumask_t *cpus,
unsigned long flags)
{
struct em_perf_table *em_table;
struct em_perf_domain *pd;
struct device *cpu_dev;
int cpu, ret, num_cpus, id;
if (_is_cpu_device(dev)) {
num_cpus = cpumask_weight(cpus);
if (num_cpus > EM_MAX_NUM_CPUS) {
dev_err(dev, "EM: too many CPUs, overflow possible\n");
return -EINVAL;
}
pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
if (!pd)
return -ENOMEM;
cpumask_copy(em_span_cpus(pd), cpus);
} else {
pd = kzalloc_obj(*pd);
if (!pd)
return -ENOMEM;
}
pd->nr_perf_states = nr_states;
INIT_LIST_HEAD(&pd->node);
id = ida_alloc(&em_pd_ida, GFP_KERNEL);
if (id < 0) {
kfree(pd);
return id;
}
pd->id = id;
em_table = em_table_alloc(pd);
if (!em_table)
goto free_pd;
ret = em_create_perf_table(dev, pd, em_table->state, cb, flags);
if (ret)
goto free_pd_table;
rcu_assign_pointer(pd->em_table, em_table);
if (_is_cpu_device(dev))
for_each_cpu(cpu, cpus) {
cpu_dev = get_cpu_device(cpu);
cpu_dev->em_pd = pd;
}
dev->em_pd = pd;
return 0;
free_pd_table:
kfree(em_table);
free_pd:
kfree(pd);
ida_free(&em_pd_ida, id);
return -EINVAL;
}
static void
em_cpufreq_update_efficiencies(struct device *dev, struct em_perf_state *table)
{
struct em_perf_domain *pd = dev->em_pd;
struct cpufreq_policy *policy;
int found = 0;
int i, cpu;
if (!_is_cpu_device(dev))
return;
cpu = cpumask_first_and(em_span_cpus(pd), cpu_active_mask);
if (cpu >= nr_cpu_ids) {
dev_warn(dev, "EM: No online CPU for CPUFreq policy\n");
return;
}
policy = cpufreq_cpu_get(cpu);
if (!policy) {
dev_warn(dev, "EM: Access to CPUFreq policy failed\n");
return;
}
for (i = 0; i < pd->nr_perf_states; i++) {
if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT))
continue;
if (!cpufreq_table_set_inefficient(policy, table[i].frequency))
found++;
}
cpufreq_cpu_put(policy);
if (!found)
return;
pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES;
}
struct em_perf_domain *em_pd_get(struct device *dev)
{
if (IS_ERR_OR_NULL(dev))
return NULL;
return dev->em_pd;
}
EXPORT_SYMBOL_GPL(em_pd_get);
struct em_perf_domain *em_cpu_get(int cpu)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return NULL;
return em_pd_get(cpu_dev);
}
EXPORT_SYMBOL_GPL(em_cpu_get);
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
const struct em_data_callback *cb,
const cpumask_t *cpus, bool microwatts)
{
int ret = em_dev_register_pd_no_update(dev, nr_states, cb, cpus, microwatts);
if (_is_cpu_device(dev))
em_check_capacity_update();
return ret;
}
EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
const struct em_data_callback *cb,
const cpumask_t *cpus, bool microwatts)
{
struct em_perf_table *em_table;
unsigned long cap, prev_cap = 0;
unsigned long flags = 0;
int cpu, ret;
if (!dev || !nr_states || !cb)
return -EINVAL;
mutex_lock(&em_pd_mutex);
if (dev->em_pd) {
ret = -EEXIST;
goto unlock;
}
if (_is_cpu_device(dev)) {
if (!cpus) {
dev_err(dev, "EM: invalid CPU mask\n");
ret = -EINVAL;
goto unlock;
}
for_each_cpu(cpu, cpus) {
if (em_cpu_get(cpu)) {
dev_err(dev, "EM: exists for CPU%d\n", cpu);
ret = -EEXIST;
goto unlock;
}
cap = arch_scale_cpu_capacity(cpu);
if (prev_cap && prev_cap != cap) {
dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n",
cpumask_pr_args(cpus));
ret = -EINVAL;
goto unlock;
}
prev_cap = cap;
}
}
if (microwatts)
flags |= EM_PERF_DOMAIN_MICROWATTS;
else if (cb->get_cost)
flags |= EM_PERF_DOMAIN_ARTIFICIAL;
if (!microwatts && !(flags & EM_PERF_DOMAIN_ARTIFICIAL)) {
dev_err(dev, "EM: only supports uW power values\n");
ret = -EINVAL;
goto unlock;
}
ret = em_create_pd(dev, nr_states, cb, cpus, flags);
if (ret)
goto unlock;
dev->em_pd->flags |= flags;
dev->em_pd->min_perf_state = 0;
dev->em_pd->max_perf_state = nr_states - 1;
em_table = rcu_dereference_protected(dev->em_pd->em_table,
lockdep_is_held(&em_pd_mutex));
em_cpufreq_update_efficiencies(dev, em_table->state);
em_debug_create_pd(dev);
dev_info(dev, "EM: created perf domain\n");
unlock:
mutex_unlock(&em_pd_mutex);
if (ret)
return ret;
mutex_lock(&em_pd_list_mutex);
list_add_tail(&dev->em_pd->node, &em_pd_list);
mutex_unlock(&em_pd_list_mutex);
em_notify_pd_created(dev->em_pd);
return 0;
}
EXPORT_SYMBOL_GPL(em_dev_register_pd_no_update);
void em_dev_unregister_perf_domain(struct device *dev)
{
if (IS_ERR_OR_NULL(dev) || !dev->em_pd)
return;
if (_is_cpu_device(dev))
return;
mutex_lock(&em_pd_list_mutex);
list_del_init(&dev->em_pd->node);
mutex_unlock(&em_pd_list_mutex);
em_notify_pd_deleted(dev->em_pd);
mutex_lock(&em_pd_mutex);
em_debug_remove_pd(dev);
em_table_free(rcu_dereference_protected(dev->em_pd->em_table,
lockdep_is_held(&em_pd_mutex)));
ida_free(&em_pd_ida, dev->em_pd->id);
kfree(dev->em_pd);
dev->em_pd = NULL;
mutex_unlock(&em_pd_mutex);
}
EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain);
static struct em_perf_table *em_table_dup(struct em_perf_domain *pd)
{
struct em_perf_table *em_table;
struct em_perf_state *ps, *new_ps;
int ps_size;
em_table = em_table_alloc(pd);
if (!em_table)
return NULL;
new_ps = em_table->state;
rcu_read_lock();
ps = em_perf_state_from_pd(pd);
ps_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
memcpy(new_ps, ps, ps_size);
rcu_read_unlock();
return em_table;
}
static int em_recalc_and_update(struct device *dev, struct em_perf_domain *pd,
struct em_perf_table *em_table)
{
int ret;
if (!em_is_artificial(pd)) {
ret = em_compute_costs(dev, em_table->state, NULL,
pd->nr_perf_states, pd->flags);
if (ret)
goto free_em_table;
}
ret = em_dev_update_perf_domain(dev, em_table);
if (ret)
goto free_em_table;
free_em_table:
em_table_free(em_table);
return ret;
}
static void em_adjust_new_capacity(unsigned int cpu, struct device *dev,
struct em_perf_domain *pd)
{
unsigned long cpu_capacity = arch_scale_cpu_capacity(cpu);
struct em_perf_table *em_table;
struct em_perf_state *table;
unsigned long em_max_perf;
rcu_read_lock();
table = em_perf_state_from_pd(pd);
em_max_perf = table[pd->nr_perf_states - 1].performance;
rcu_read_unlock();
if (em_max_perf == cpu_capacity)
return;
pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n", cpu,
cpu_capacity, em_max_perf);
em_table = em_table_dup(pd);
if (!em_table) {
dev_warn(dev, "EM: allocation failed\n");
return;
}
em_init_performance(dev, pd, em_table->state, pd->nr_perf_states);
em_recalc_and_update(dev, pd, em_table);
}
void em_adjust_cpu_capacity(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
struct em_perf_domain *pd;
pd = em_pd_get(dev);
if (pd)
em_adjust_new_capacity(cpu, dev, pd);
}
static void em_check_capacity_update(void)
{
cpumask_var_t cpu_done_mask;
int cpu, failed_cpus = 0;
if (!zalloc_cpumask_var(&cpu_done_mask, GFP_KERNEL)) {
pr_warn("no free memory\n");
return;
}
for_each_possible_cpu(cpu) {
struct cpufreq_policy *policy;
struct em_perf_domain *pd;
struct device *dev;
if (cpumask_test_cpu(cpu, cpu_done_mask))
continue;
policy = cpufreq_cpu_get(cpu);
if (!policy) {
failed_cpus++;
continue;
}
cpufreq_cpu_put(policy);
dev = get_cpu_device(cpu);
pd = em_pd_get(dev);
if (!pd || em_is_artificial(pd))
continue;
cpumask_or(cpu_done_mask, cpu_done_mask,
em_span_cpus(pd));
em_adjust_new_capacity(cpu, dev, pd);
}
if (failed_cpus)
schedule_delayed_work(&em_update_work, msecs_to_jiffies(1000));
free_cpumask_var(cpu_done_mask);
}
static void em_update_workfn(struct work_struct *work)
{
em_check_capacity_update();
}
int em_dev_update_chip_binning(struct device *dev)
{
struct em_perf_table *em_table;
struct em_perf_domain *pd;
int i, ret;
if (IS_ERR_OR_NULL(dev))
return -EINVAL;
pd = em_pd_get(dev);
if (!pd) {
dev_warn(dev, "Couldn't find Energy Model\n");
return -EINVAL;
}
em_table = em_table_dup(pd);
if (!em_table) {
dev_warn(dev, "EM: allocation failed\n");
return -ENOMEM;
}
for (i = 0; i < pd->nr_perf_states; i++) {
unsigned long freq = em_table->state[i].frequency;
unsigned long power;
ret = dev_pm_opp_calc_power(dev, &power, &freq);
if (ret) {
em_table_free(em_table);
return ret;
}
em_table->state[i].power = power;
}
return em_recalc_and_update(dev, pd, em_table);
}
EXPORT_SYMBOL_GPL(em_dev_update_chip_binning);
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz)
{
struct em_perf_state *table;
int min_ps = -1;
int max_ps = -1;
int i;
if (!pd)
return -EINVAL;
rcu_read_lock();
table = em_perf_state_from_pd(pd);
for (i = 0; i < pd->nr_perf_states; i++) {
if (freq_min_khz == table[i].frequency)
min_ps = i;
if (freq_max_khz == table[i].frequency)
max_ps = i;
}
rcu_read_unlock();
if (min_ps < 0 || max_ps < 0 || max_ps < min_ps)
return -EINVAL;
mutex_lock(&em_pd_mutex);
pd->min_perf_state = min_ps;
pd->max_perf_state = max_ps;
mutex_unlock(&em_pd_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(em_update_performance_limits);
static void rebuild_sd_workfn(struct work_struct *work)
{
rebuild_sched_domains_energy();
}
void em_rebuild_sched_domains(void)
{
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
schedule_work(&rebuild_sd_work);
}
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_NET)
int for_each_em_perf_domain(int (*cb)(struct em_perf_domain*, void *),
void *data)
{
struct em_perf_domain *pd;
lockdep_assert_not_held(&em_pd_mutex);
guard(mutex)(&em_pd_list_mutex);
list_for_each_entry(pd, &em_pd_list, node) {
int ret;
ret = cb(pd, data);
if (ret)
return ret;
}
return 0;
}
struct em_perf_domain *em_perf_domain_get_by_id(int id)
{
struct em_perf_domain *pd;
lockdep_assert_not_held(&em_pd_mutex);
guard(mutex)(&em_pd_list_mutex);
list_for_each_entry(pd, &em_pd_list, node) {
if (pd->id == id)
return pd;
}
return NULL;
}
#endif