#include <linux/cpuhotplug.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/idle.h>
#include <asm/mips-cps.h>
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#include <asm/pm-cps.h>
#include <asm/regdef.h>
#include <asm/smp-cps.h>
#include <asm/uasm.h>
typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
nc_asm_enter);
static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
static struct uasm_label labels[32];
static struct uasm_reloc relocs[32];
bool cps_pm_support_state(enum cps_pm_state state)
{
return test_bit(state, state_support);
}
static void coupled_barrier(atomic_t *a, unsigned online)
{
if (!coupled_coherence)
return;
smp_mb__before_atomic();
atomic_inc(a);
while (atomic_read(a) < online)
cpu_relax();
if (atomic_inc_return(a) == online * 2) {
atomic_set(a, 0);
return;
}
while (atomic_read(a) > online)
cpu_relax();
}
int cps_pm_enter_state(enum cps_pm_state state)
{
unsigned cpu = smp_processor_id();
unsigned int cluster = cpu_cluster(¤t_cpu_data);
unsigned core = cpu_core(¤t_cpu_data);
unsigned online, left;
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
u32 *core_ready_count, *nc_core_ready_count;
void *nc_addr;
cps_nc_entry_fn entry;
struct cluster_boot_config *cluster_cfg;
struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg;
atomic_t *barrier;
entry = per_cpu(nc_asm_enter, cpu)[state];
if (!entry)
return -EINVAL;
#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
if (cpu_online(cpu)) {
cpumask_and(coupled_mask, cpu_online_mask,
&cpu_sibling_map[cpu]);
online = cpumask_weight(coupled_mask);
cpumask_clear_cpu(cpu, coupled_mask);
} else
#endif
{
cpumask_clear(coupled_mask);
online = 1;
}
if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
if (!mips_cps_smp_in_use())
return -EINVAL;
cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
core_cfg = &cluster_cfg->core_config[core];
vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)];
vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
vpe_cfg->gp = (unsigned long)current_thread_info();
vpe_cfg->sp = 0;
}
cpumask_clear_cpu(cpu, &cpu_coherent_mask);
smp_mb__after_atomic();
core_ready_count = per_cpu(ready_count, cpu);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
nc_core_ready_count = nc_addr;
WRITE_ONCE(*nc_core_ready_count, 0);
barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
coupled_barrier(barrier, online);
left = entry(online, nc_core_ready_count);
kunmap_noncoherent();
cpumask_set_cpu(cpu, &cpu_coherent_mask);
if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
arch_send_call_function_ipi_mask(coupled_mask);
return 0;
}
static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cache_desc *cache,
unsigned op, int lbl)
{
unsigned cache_size = cache->ways << cache->waybit;
unsigned i;
const unsigned unroll_lines = 32;
if (cache->flags & MIPS_CACHE_NOT_PRESENT)
return;
UASM_i_LA(pp, GPR_T0, (long)CKSEG0);
if (cache_size < 0x8000)
uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size);
else
UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size));
uasm_build_label(pl, *pp, lbl);
for (i = 0; i < unroll_lines; i++) {
if (cpu_has_mips_r6) {
uasm_i_cache(pp, op, 0, GPR_T0);
uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz);
} else {
uasm_i_cache(pp, op, i * cache->linesz, GPR_T0);
}
}
if (!cpu_has_mips_r6)
uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz);
uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl);
uasm_i_nop(pp);
}
static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cpuinfo_mips *cpu_info,
int lbl)
{
unsigned i, fsb_size = 8;
unsigned num_loads = (fsb_size * 3) / 2;
unsigned line_stride = 2;
unsigned line_size = cpu_info->dcache.linesz;
unsigned perf_counter, perf_event;
unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
switch (__get_cpu_type(cpu_info->cputype)) {
case CPU_INTERAPTIV:
perf_counter = 1;
perf_event = 51;
break;
case CPU_PROAPTIV:
if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
return 0;
return -1;
default:
return 0;
}
uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0);
uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1);
uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf);
uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0);
uasm_i_ehb(pp);
uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1);
uasm_i_ehb(pp);
UASM_i_LA(pp, GPR_T0, (long)CKSEG0);
uasm_build_label(pl, *pp, lbl);
for (i = 0; i < num_loads; i++)
uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0);
for (i = 0; i < num_loads; i++) {
uasm_i_cache(pp, Hit_Invalidate_D,
i * line_size * line_stride, GPR_T0);
uasm_i_cache(pp, Hit_Writeback_Inv_SD,
i * line_size * line_stride, GPR_T0);
}
uasm_i_sync(pp, __SYNC_full);
uasm_i_ehb(pp);
uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1);
uasm_il_beqz(pp, pr, GPR_T1, lbl);
uasm_i_nop(pp);
uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0);
uasm_i_ehb(pp);
uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1);
uasm_i_ehb(pp);
return 0;
}
static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
unsigned r_addr, int lbl)
{
uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000));
uasm_build_label(pl, *pp, lbl);
uasm_i_ll(pp, GPR_T1, 0, r_addr);
uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0);
uasm_i_sc(pp, GPR_T1, 0, r_addr);
uasm_il_beqz(pp, pr, GPR_T1, lbl);
uasm_i_nop(pp);
}
static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
u32 *buf, *p;
const unsigned r_online = GPR_A0;
const unsigned r_nc_count = GPR_A1;
const unsigned r_pcohctl = GPR_T8;
const unsigned max_instrs = 256;
unsigned cpc_cmd;
int err;
enum {
lbl_incready = 1,
lbl_poll_cont,
lbl_secondary_hang,
lbl_disable_coherence,
lbl_flush_fsb,
lbl_invicache,
lbl_flushdcache,
lbl_hang,
lbl_set_cont,
lbl_secondary_cont,
lbl_decready,
};
p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
if (!buf)
return NULL;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
if (!mips_cps_smp_in_use())
goto out_err;
UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save);
uasm_i_jalr(&p, GPR_V0, GPR_T0);
uasm_i_nop(&p);
}
UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
if (coupled_coherence) {
uasm_i_sync(&p, __SYNC_mb);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, GPR_T1, 0, r_nc_count);
uasm_i_addiu(&p, GPR_T2, GPR_T1, 1);
uasm_i_sc(&p, GPR_T2, 0, r_nc_count);
uasm_il_beqz(&p, &r, GPR_T2, lbl_incready);
uasm_i_addiu(&p, GPR_T1, GPR_T1, 1);
uasm_i_sync(&p, __SYNC_mb);
uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence);
uasm_i_nop(&p);
if (state < CPS_PM_POWER_GATED) {
uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1);
uasm_build_label(&l, p, lbl_poll_cont);
uasm_i_lw(&p, GPR_T0, 0, r_nc_count);
uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont);
uasm_i_ehb(&p);
if (cpu_has_mipsmt)
uasm_i_yield(&p, GPR_ZERO, GPR_T1);
uasm_il_b(&p, &r, lbl_poll_cont);
uasm_i_nop(&p);
} else {
if (cpu_has_mipsmt) {
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H);
uasm_i_mtc0(&p, GPR_T0, 2, 4);
} else if (cpu_has_vp) {
unsigned int vpe_id;
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id);
UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop());
uasm_i_sw(&p, GPR_T0, 0, GPR_T1);
} else {
BUG();
}
uasm_build_label(&l, p, lbl_secondary_hang);
uasm_il_b(&p, &r, lbl_secondary_hang);
uasm_i_nop(&p);
}
}
uasm_build_label(&l, p, lbl_disable_coherence);
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
Index_Invalidate_I, lbl_invicache);
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
Index_Writeback_Inv_D, lbl_flushdcache);
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
if (mips_cm_revision() < CM_REV_CM3) {
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu]));
uasm_i_sw(&p, GPR_T0, 0, r_pcohctl);
uasm_i_lw(&p, GPR_T0, 0, r_pcohctl);
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
}
uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl);
uasm_i_lw(&p, GPR_T0, 0, r_pcohctl);
if (state >= CPS_PM_CLOCK_GATED) {
err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
lbl_flush_fsb);
if (err)
goto out_err;
switch (state) {
case CPS_PM_CLOCK_GATED:
cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
break;
case CPS_PM_POWER_GATED:
cpc_cmd = CPC_Cx_CMD_PWRDOWN;
break;
default:
BUG();
goto out_err;
}
UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd());
uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd);
uasm_i_sw(&p, GPR_T1, 0, GPR_T0);
if (state == CPS_PM_POWER_GATED) {
uasm_build_label(&l, p, lbl_hang);
uasm_il_b(&p, &r, lbl_hang);
uasm_i_nop(&p);
goto gen_done;
}
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
}
if (state == CPS_PM_NC_WAIT) {
if (coupled_coherence)
cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
lbl_set_cont);
uasm_build_label(&l, p, lbl_secondary_cont);
uasm_i_wait(&p, 0);
}
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3
? CM_GCR_Cx_COHERENCE_COHDOMAINEN
: CM3_GCR_Cx_COHERENCE_COHEN);
uasm_i_sw(&p, GPR_T0, 0, r_pcohctl);
uasm_i_lw(&p, GPR_T0, 0, r_pcohctl);
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
uasm_build_label(&l, p, lbl_decready);
uasm_i_sync(&p, __SYNC_mb);
uasm_i_ll(&p, GPR_T1, 0, r_nc_count);
uasm_i_addiu(&p, GPR_T2, GPR_T1, -1);
uasm_i_sc(&p, GPR_T2, 0, r_nc_count);
uasm_il_beqz(&p, &r, GPR_T2, lbl_decready);
uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1);
uasm_i_sync(&p, __SYNC_mb);
}
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
uasm_build_label(&l, p, lbl_secondary_cont);
uasm_i_sync(&p, __SYNC_mb);
}
uasm_i_jr(&p, GPR_RA);
uasm_i_nop(&p);
gen_done:
BUG_ON((p - buf) > max_instrs);
BUG_ON((l - labels) > ARRAY_SIZE(labels));
BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
uasm_resolve_relocs(relocs, labels);
local_flush_icache_range((unsigned long)buf, (unsigned long)p);
return buf;
out_err:
kfree(buf);
return NULL;
}
static int cps_pm_online_cpu(unsigned int cpu)
{
unsigned int sibling, core;
void *entry_fn, *core_rc;
enum cps_pm_state state;
core = cpu_core(&cpu_data[cpu]);
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
if (per_cpu(nc_asm_enter, cpu)[state])
continue;
if (!test_bit(state, state_support))
continue;
entry_fn = cps_gen_entry_code(cpu, state);
if (!entry_fn) {
pr_err("Failed to generate core %u state %u entry\n",
core, state);
clear_bit(state, state_support);
}
for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
}
if (!per_cpu(ready_count, cpu)) {
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(ready_count, sibling) = core_rc;
}
return 0;
}
static int cps_pm_power_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
unsigned int stat;
switch (event) {
case PM_SUSPEND_PREPARE:
stat = read_cpc_cl_stat_conf();
if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) {
pr_warn("JTAG probe is connected - abort suspend\n");
return NOTIFY_BAD;
}
return NOTIFY_DONE;
default:
return NOTIFY_DONE;
}
}
static int __init cps_pm_init(void)
{
if (!mips_cm_present()) {
pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
return 0;
}
if (cpu_wait == r4k_wait_irqoff)
set_bit(CPS_PM_NC_WAIT, state_support);
else
pr_warn("pm-cps: non-coherent wait unavailable\n");
if (mips_cpc_present()) {
if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
set_bit(CPS_PM_CLOCK_GATED, state_support);
else
pr_warn("pm-cps: CPC does not support clock gating\n");
if (mips_cps_smp_in_use())
set_bit(CPS_PM_POWER_GATED, state_support);
else
pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
} else {
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
}
pm_notifier(cps_pm_power_notifier, 0);
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
cps_pm_online_cpu, NULL);
}
arch_initcall(cps_pm_init);