#include <sys/x86_archext.h>
#include <sys/machsystm.h>
#include <sys/x_call.h>
#include <sys/acpi/acpi.h>
#include <sys/acpica.h>
#include <sys/pwrnow.h>
#include <sys/cpu_acpi.h>
#include <sys/cpupm.h>
#include <sys/dtrace.h>
#include <sys/sdt.h>
static int pwrnow_init(cpu_t *);
static void pwrnow_fini(cpu_t *);
static void pwrnow_power(cpuset_t, uint32_t);
static void pwrnow_stop(cpu_t *);
static boolean_t pwrnow_cpb_supported(void);
cpupm_state_ops_t pwrnow_ops = {
"PowerNow! Technology",
pwrnow_init,
pwrnow_fini,
pwrnow_power,
pwrnow_stop
};
#define PWRNOW_RET_SUCCESS 0x00
#define PWRNOW_RET_NO_PM 0x01
#define PWRNOW_RET_UNSUP_STATE 0x02
#define PWRNOW_RET_TRANS_INCOMPLETE 0x03
#define PWRNOW_LATENCY_WAIT 10
#define PWRNOW_PERF_CTL_MSR 0xC0010062
#define PWRNOW_PERF_STATUS_MSR 0xC0010063
#define AMD_CPUID_PSTATE_HARDWARE (1<<7)
#define AMD_CPUID_TSC_CONSTANT (1<<8)
#define AMD_CPUID_CPB (1<<9)
#ifdef DEBUG
volatile int pwrnow_debug = 0;
#define PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist;
#else
#define PWRNOW_DEBUG(arglist)
#endif
static void
write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
{
cpu_acpi_pct_t *pct_ctrl;
uint64_t reg;
pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
switch (pct_ctrl->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
reg = ctrl;
wrmsr(PWRNOW_PERF_CTL_MSR, reg);
break;
default:
DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
pct_ctrl->cr_addrspace_id);
return;
}
DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
}
static int
pwrnow_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
xc_arg_t arg3 __unused)
{
uint32_t req_state = (uint32_t)arg1;
cpupm_mach_state_t *mach_state =
(cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
cpu_acpi_pstate_t *req_pstate;
uint32_t ctrl;
req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
req_pstate += req_state;
DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
CPU_ACPI_FREQ(req_pstate));
ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
write_ctrl(handle, ctrl);
if (mach_state->ms_turbo != NULL)
cpupm_record_turbo_info(mach_state->ms_turbo,
mach_state->ms_pstate.cma_state.pstate, req_state);
mach_state->ms_pstate.cma_state.pstate = req_state;
cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
return (0);
}
static void
pwrnow_power(cpuset_t set, uint32_t req_state)
{
kpreempt_disable();
if (CPU_IN_SET(set, CPU->cpu_id)) {
(void) pwrnow_pstate_transition(req_state, 0, 0);
CPUSET_DEL(set, CPU->cpu_id);
}
if (!CPUSET_ISNULL(set)) {
xc_call((xc_arg_t)req_state, 0, 0,
CPUSET2BV(set), pwrnow_pstate_transition);
}
kpreempt_enable();
}
static int
pwrnow_init(cpu_t *cp)
{
cpupm_mach_state_t *mach_state =
(cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
cpu_acpi_pct_t *pct_stat;
static int logged = 0;
PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id));
if (cpu_acpi_cache_pstate_data(handle) != 0) {
if (!logged) {
cmn_err(CE_NOTE, "!PowerNow! support is being "
"disabled due to errors parsing ACPI P-state "
"objects exported by BIOS.");
logged = 1;
}
pwrnow_fini(cp);
return (PWRNOW_RET_NO_PM);
}
pct_stat = CPU_ACPI_PCT_STATUS(handle);
switch (pct_stat->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
break;
default:
cmn_err(CE_WARN, "!_PCT configured for unsupported "
"addrspace = %d.", pct_stat->cr_addrspace_id);
cmn_err(CE_NOTE, "!CPU power management will not function.");
pwrnow_fini(cp);
return (PWRNOW_RET_NO_PM);
}
cpupm_alloc_domains(cp, CPUPM_P_STATES);
if (pwrnow_cpb_supported())
mach_state->ms_turbo = cpupm_turbo_init(cp);
PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id))
return (PWRNOW_RET_SUCCESS);
}
static void
pwrnow_fini(cpu_t *cp)
{
cpupm_mach_state_t *mach_state =
(cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
cpupm_free_domains(&cpupm_pstate_domains);
cpu_acpi_free_pstate_data(handle);
if (mach_state->ms_turbo != NULL)
cpupm_turbo_fini(mach_state->ms_turbo);
mach_state->ms_turbo = NULL;
}
boolean_t
pwrnow_supported()
{
struct cpuid_regs cpu_regs;
ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
if (!is_x86_feature(x86_featureset, X86FSET_MSR)) {
PWRNOW_DEBUG(("No CPUID or MSR support."));
return (B_FALSE);
}
cpu_regs.cp_eax = 0x80000007;
(void) __cpuid_insn(&cpu_regs);
if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
PWRNOW_DEBUG(("No support for CPUs that are not P-state "
"TSC invariant.\n"));
return (B_FALSE);
}
if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
pwrnow_cpb_supported(void)
{
struct cpuid_regs cpu_regs;
ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
if (!is_x86_feature(x86_featureset, X86FSET_MSR)) {
PWRNOW_DEBUG(("No CPUID or MSR support."));
return (B_FALSE);
}
cpu_regs.cp_eax = 0x80000007;
(void) __cpuid_insn(&cpu_regs);
if (!(cpu_regs.cp_edx & AMD_CPUID_CPB))
return (B_FALSE);
return (B_TRUE);
}
static void
pwrnow_stop(cpu_t *cp)
{
cpupm_mach_state_t *mach_state =
(cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
cpu_acpi_free_pstate_data(handle);
if (mach_state->ms_turbo != NULL)
cpupm_turbo_fini(mach_state->ms_turbo);
mach_state->ms_turbo = NULL;
}