#include <sys/cdefs.h>
#include "opt_clock.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cpu.h>
#include <sys/eventhandler.h>
#include <sys/limits.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/timetc.h>
#include <sys/kernel.h>
#include <sys/smp.h>
#include <sys/vdso.h>
#include <machine/clock.h>
#include <machine/cputypes.h>
#include <machine/fpu.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
#include <x86/vmware.h>
#include <dev/acpica/acpi_hpet.h>
#include <contrib/dev/acpica/include/acpi.h>
#include "cpufreq_if.h"
uint64_t tsc_freq;
int tsc_is_invariant;
int tsc_perf_stat;
static int tsc_early_calib_exact;
static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag;
SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN,
&tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant");
#ifdef SMP
int smp_tsc;
SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0,
"Indicates whether the TSC is safe to use in SMP mode");
int smp_tsc_adjust = 0;
SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN,
&smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP");
#endif
static int tsc_shift = 1;
SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN,
&tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency");
static int tsc_disabled;
SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0,
"Disable x86 Time Stamp Counter");
static int tsc_skip_calibration;
SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN,
&tsc_skip_calibration, 0,
"Disable early TSC frequency calibration");
static void tsc_freq_changed(void *arg, const struct cf_level *level,
int status);
static void tsc_freq_changing(void *arg, const struct cf_level *level,
int *status);
static u_int tsc_get_timecount(struct timecounter *tc);
static inline u_int tsc_get_timecount_low(struct timecounter *tc);
static u_int tsc_get_timecount_lfence(struct timecounter *tc);
static u_int tsc_get_timecount_low_lfence(struct timecounter *tc);
static u_int tsc_get_timecount_mfence(struct timecounter *tc);
static u_int tsc_get_timecount_low_mfence(struct timecounter *tc);
static u_int tscp_get_timecount(struct timecounter *tc);
static u_int tscp_get_timecount_low(struct timecounter *tc);
static void tsc_levels_changed(void *arg, int unit);
static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th,
struct timecounter *tc);
#ifdef COMPAT_FREEBSD32
static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
struct timecounter *tc);
#endif
static struct timecounter tsc_timecounter = {
.tc_get_timecount = tsc_get_timecount,
.tc_counter_mask = ~0u,
.tc_name = "TSC",
.tc_quality = 800,
.tc_fill_vdso_timehands = x86_tsc_vdso_timehands,
#ifdef COMPAT_FREEBSD32
.tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32,
#endif
};
static int
tsc_freq_cpuid_vm(void)
{
u_int regs[4];
if (vm_guest == VM_GUEST_NO)
return (false);
if (hv_high < 0x40000010)
return (false);
do_cpuid(0x40000010, regs);
tsc_freq = (uint64_t)(regs[0]) * 1000;
tsc_early_calib_exact = 1;
return (true);
}
static void
tsc_freq_vmware(void)
{
u_int regs[4];
vmware_hvcall(0, VMW_HVCMD_GETHZ, VMW_HVCMD_DEFAULT_PARAM, regs);
if (regs[1] != UINT_MAX)
tsc_freq = regs[0] | ((uint64_t)regs[1] << 32);
tsc_early_calib_exact = 1;
}
static void
tsc_freq_xen(void)
{
u_int regs[4];
KASSERT(hv_high >= 0x40000003, ("Invalid max hypervisor leaf on Xen"));
cpuid_count(0x40000003, 0, regs);
tsc_freq = (uint64_t)(regs[2]) * 1000;
tsc_early_calib_exact = 1;
}
static bool
tsc_freq_cpuid(uint64_t *res)
{
u_int regs[4];
if (cpu_high < 0x15)
return (false);
do_cpuid(0x15, regs);
if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) {
*res = (uint64_t)regs[2] * regs[1] / regs[0];
return (true);
}
if (cpu_high < 0x16)
return (false);
do_cpuid(0x16, regs);
if (regs[0] != 0) {
*res = (uint64_t)regs[0] * 1000000;
return (true);
}
return (false);
}
static bool
tsc_freq_intel_brand(uint64_t *res)
{
char brand[48];
u_int regs[4];
uint64_t freq;
char *p;
u_int i;
if (cpu_exthigh >= 0x80000004) {
p = brand;
for (i = 0x80000002; i < 0x80000005; i++) {
do_cpuid(i, regs);
memcpy(p, regs, sizeof(regs));
p += sizeof(regs);
}
p = NULL;
for (i = 0; i < sizeof(brand) - 1; i++)
if (brand[i] == 'H' && brand[i + 1] == 'z')
p = brand + i;
if (p != NULL) {
p -= 5;
switch (p[4]) {
case 'M':
i = 1;
break;
case 'G':
i = 1000;
break;
case 'T':
i = 1000000;
break;
default:
return (false);
}
#define C2D(c) ((c) - '0')
if (p[1] == '.') {
freq = C2D(p[0]) * 1000;
freq += C2D(p[2]) * 100;
freq += C2D(p[3]) * 10;
freq *= i * 1000;
} else {
freq = C2D(p[0]) * 1000;
freq += C2D(p[1]) * 100;
freq += C2D(p[2]) * 10;
freq += C2D(p[3]);
freq *= i * 1000000;
}
#undef C2D
*res = freq;
return (true);
}
}
return (false);
}
static void
tsc_freq_tc(uint64_t *res)
{
uint64_t tsc1, tsc2;
int64_t overhead;
int count, i;
overhead = 0;
for (i = 0, count = 8; i < count; i++) {
tsc1 = rdtsc_ordered();
DELAY(0);
tsc2 = rdtsc_ordered();
if (i > 0)
overhead += tsc2 - tsc1;
}
overhead /= count;
tsc1 = rdtsc_ordered();
DELAY(100000);
tsc2 = rdtsc_ordered();
tsc_freq = (tsc2 - tsc1 - overhead) * 10;
}
static void
probe_tsc_freq_early(void)
{
#ifdef __i386__
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
switch (cpu_id & 0xFF0) {
case 0x500:
tsc_disabled = 1;
return;
}
break;
case CPU_VENDOR_CENTAUR:
switch (cpu_id & 0xff0) {
case 0x540:
tsc_disabled = 1;
return;
}
break;
case CPU_VENDOR_NSC:
switch (cpu_id & 0xff0) {
case 0x540:
if ((cpu_id & CPUID_STEPPING) == 0) {
tsc_disabled = 1;
return;
}
break;
}
break;
}
#endif
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
case CPU_VENDOR_HYGON:
if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
(vm_guest == VM_GUEST_NO &&
CPUID_TO_FAMILY(cpu_id) >= 0x10))
tsc_is_invariant = 1;
if (cpu_feature & CPUID_SSE2) {
tsc_timecounter.tc_get_timecount =
tsc_get_timecount_mfence;
}
break;
case CPU_VENDOR_INTEL:
if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
(vm_guest == VM_GUEST_NO &&
((CPUID_TO_FAMILY(cpu_id) == 0x6 &&
CPUID_TO_MODEL(cpu_id) >= 0xe) ||
(CPUID_TO_FAMILY(cpu_id) == 0xf &&
CPUID_TO_MODEL(cpu_id) >= 0x3))))
tsc_is_invariant = 1;
if (cpu_feature & CPUID_SSE2) {
tsc_timecounter.tc_get_timecount =
tsc_get_timecount_lfence;
}
break;
case CPU_VENDOR_CENTAUR:
if (vm_guest == VM_GUEST_NO &&
CPUID_TO_FAMILY(cpu_id) == 0x6 &&
CPUID_TO_MODEL(cpu_id) >= 0xf &&
(rdmsr(0x1203) & 0x100000000ULL) == 0)
tsc_is_invariant = 1;
if (cpu_feature & CPUID_SSE2) {
tsc_timecounter.tc_get_timecount =
tsc_get_timecount_lfence;
}
break;
}
if (tsc_freq_cpuid_vm()) {
if (bootverbose)
printf(
"Early TSC frequency %juHz derived from hypervisor CPUID\n",
(uintmax_t)tsc_freq);
} else if (vm_guest == VM_GUEST_VMWARE) {
tsc_freq_vmware();
if (bootverbose)
printf(
"Early TSC frequency %juHz derived from VMWare hypercall\n",
(uintmax_t)tsc_freq);
} else if (vm_guest == VM_GUEST_XEN) {
tsc_freq_xen();
if (bootverbose)
printf(
"Early TSC frequency %juHz derived from Xen CPUID\n",
(uintmax_t)tsc_freq);
} else if (tsc_freq_cpuid(&tsc_freq)) {
if (bootverbose)
printf("Early TSC frequency %juHz derived from CPUID\n",
(uintmax_t)tsc_freq);
}
}
static void
probe_tsc_freq_late(void)
{
if (tsc_freq != 0)
return;
if (tsc_skip_calibration) {
if (cpu_vendor_id == CPU_VENDOR_INTEL &&
tsc_freq_intel_brand(&tsc_freq)) {
if (bootverbose)
printf(
"Early TSC frequency %juHz derived from brand string\n",
(uintmax_t)tsc_freq);
} else {
tsc_disabled = 1;
}
} else {
tsc_freq_tc(&tsc_freq);
if (bootverbose)
printf(
"Early TSC frequency %juHz calibrated from 8254 PIT\n",
(uintmax_t)tsc_freq);
}
}
void
start_TSC(void)
{
uint64_t mperf, aperf;
if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
return;
probe_tsc_freq_late();
if (cpu_power_ecx & CPUID_PERF_STAT) {
mperf = rdmsr(MSR_MPERF);
aperf = rdmsr(MSR_APERF);
DELAY(10);
if (rdmsr(MSR_MPERF) != mperf && rdmsr(MSR_APERF) != aperf)
tsc_perf_stat = 1;
}
if (tsc_freq != 0)
set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
if (tsc_is_invariant)
return;
tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change,
tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST);
tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change,
tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST);
tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed,
tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY);
}
#ifdef SMP
#define TSC_READ(x) \
static void \
tsc_read_##x(void *arg) \
{ \
uint64_t *tsc = arg; \
u_int cpu = PCPU_GET(cpuid); \
\
__asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \
tsc[cpu * 3 + x] = rdtsc(); \
}
TSC_READ(0)
TSC_READ(1)
TSC_READ(2)
#undef TSC_READ
#define N 1000
static void
comp_smp_tsc(void *arg)
{
uint64_t *tsc;
int64_t d1, d2;
u_int cpu = PCPU_GET(cpuid);
u_int i, j, size;
size = (mp_maxid + 1) * 3;
for (i = 0, tsc = arg; i < N; i++, tsc += size)
CPU_FOREACH(j) {
if (j == cpu)
continue;
d1 = tsc[cpu * 3 + 1] - tsc[j * 3];
d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1];
if (d1 <= 0 || d2 <= 0) {
smp_tsc = 0;
return;
}
}
}
static void
adj_smp_tsc(void *arg)
{
uint64_t *tsc;
int64_t d, min, max;
u_int cpu = PCPU_GET(cpuid);
u_int first, i, size;
first = CPU_FIRST();
if (cpu == first)
return;
min = INT64_MIN;
max = INT64_MAX;
size = (mp_maxid + 1) * 3;
for (i = 0, tsc = arg; i < N; i++, tsc += size) {
d = tsc[first * 3] - tsc[cpu * 3 + 1];
if (d > min)
min = d;
d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2];
if (d > min)
min = d;
d = tsc[first * 3 + 1] - tsc[cpu * 3];
if (d < max)
max = d;
d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1];
if (d < max)
max = d;
}
if (min > max)
return;
d = min / 2 + max / 2;
__asm __volatile (
"movl $0x10, %%ecx\n\t"
"rdmsr\n\t"
"addl %%edi, %%eax\n\t"
"adcl %%esi, %%edx\n\t"
"wrmsr\n"
:
: "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32))
: "ax", "cx", "dx", "cc"
);
}
static int
test_tsc(int adj_max_count)
{
uint64_t *data, *tsc;
u_int i, size, adj;
if ((!smp_tsc && !tsc_is_invariant))
return (-100);
if (vm_guest == VM_GUEST_VBOX)
return (0);
TSENTER();
size = (mp_maxid + 1) * 3;
data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK);
adj = 0;
retry:
for (i = 0, tsc = data; i < N; i++, tsc += size)
smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc);
smp_tsc = 1;
smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc,
smp_no_rendezvous_barrier, data);
if (!smp_tsc && adj < adj_max_count) {
adj++;
smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc,
smp_no_rendezvous_barrier, data);
goto retry;
}
free(data, M_TEMP);
if (bootverbose)
printf("SMP: %sed TSC synchronization test%s\n",
smp_tsc ? "pass" : "fail",
adj > 0 ? " after adjustment" : "");
TSEXIT();
if (smp_tsc && tsc_is_invariant) {
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
case CPU_VENDOR_HYGON:
if (CPUID_TO_FAMILY(cpu_id) >= 0x17)
return (1000);
if (CPUID_TO_FAMILY(cpu_id) < 0x15 ||
(amd_feature2 & AMDID2_CMP) == 0 ||
smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1)
break;
return (1000);
case CPU_VENDOR_INTEL:
return (1000);
}
return (800);
}
return (-100);
}
#undef N
#endif
static void
init_TSC_tc(void *dummy __unused)
{
uint64_t max_freq;
int shift;
if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
return;
max_freq = UINT_MAX;
if (cpu_vendor_id == CPU_VENDOR_INTEL &&
(amd_pminfo & AMDPM_TSC_INVARIANT) == 0) {
tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP;
if (bootverbose)
printf("TSC timecounter disables C2 and C3.\n");
}
#ifdef SMP
if (mp_ncpus > 1)
tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust);
else
#endif
if (tsc_is_invariant)
tsc_timecounter.tc_quality = 1000;
max_freq >>= tsc_shift;
for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++)
;
if ((amd_feature & AMDID_RDTSCP) != 0) {
tsc_timecounter.tc_get_timecount = shift > 0 ?
tscp_get_timecount_low : tscp_get_timecount;
} else if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) {
if (cpu_vendor_id == CPU_VENDOR_AMD ||
cpu_vendor_id == CPU_VENDOR_HYGON) {
tsc_timecounter.tc_get_timecount = shift > 0 ?
tsc_get_timecount_low_mfence :
tsc_get_timecount_mfence;
} else {
tsc_timecounter.tc_get_timecount = shift > 0 ?
tsc_get_timecount_low_lfence :
tsc_get_timecount_lfence;
}
} else {
tsc_timecounter.tc_get_timecount = shift > 0 ?
tsc_get_timecount_low : tsc_get_timecount;
}
if (shift > 0) {
tsc_timecounter.tc_name = "TSC-low";
if (bootverbose)
printf("TSC timecounter discards lower %d bit(s)\n",
shift);
}
if (tsc_freq != 0) {
tsc_timecounter.tc_frequency = tsc_freq >> shift;
tsc_timecounter.tc_priv = (void *)(intptr_t)shift;
}
}
SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL);
static void
tsc_update_freq(uint64_t new_freq)
{
atomic_store_rel_64(&tsc_freq, new_freq);
atomic_store_rel_64(&tsc_timecounter.tc_frequency,
new_freq >> (int)(intptr_t)tsc_timecounter.tc_priv);
}
void
tsc_init(void)
{
if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
return;
probe_tsc_freq_early();
}
void
tsc_calibrate(void)
{
uint64_t freq;
if (tsc_disabled)
return;
if (tsc_early_calib_exact)
goto calibrated;
fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX);
freq = clockcalib(rdtsc_ordered, "TSC");
fpu_kern_leave(curthread, NULL);
tsc_update_freq(freq);
calibrated:
tc_init(&tsc_timecounter);
set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
}
void
resume_TSC(void)
{
#ifdef SMP
int quality;
if (tsc_timecounter.tc_quality < 0)
return;
if (mp_ncpus < 2)
return;
quality = test_tsc(MAX(smp_tsc_adjust, 1));
if (quality != tsc_timecounter.tc_quality) {
printf("TSC timecounter quality changed: %d -> %d\n",
tsc_timecounter.tc_quality, quality);
tsc_timecounter.tc_quality = quality;
}
#endif
}
static void
tsc_levels_changed(void *arg, int unit)
{
device_t cf_dev;
struct cf_level *levels;
int count, error;
uint64_t max_freq;
if (unit != 0)
return;
cf_dev = devclass_get_device(devclass_find("cpufreq"), unit);
if (cf_dev == NULL) {
printf("tsc_levels_changed() called but no cpufreq device?\n");
return;
}
count = 64;
levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
if (levels == NULL)
return;
error = CPUFREQ_LEVELS(cf_dev, levels, &count);
if (error == 0 && count != 0) {
max_freq = (uint64_t)levels[0].total_set.freq * 1000000;
set_cputicker(rdtsc, max_freq, true);
} else
printf("tsc_levels_changed: no max freq found\n");
free(levels, M_TEMP);
}
static void
tsc_freq_changing(void *arg, const struct cf_level *level, int *status)
{
if (*status != 0 || timecounter != &tsc_timecounter)
return;
printf("timecounter TSC must not be in use when "
"changing frequencies; change denied\n");
*status = EBUSY;
}
static void
tsc_freq_changed(void *arg, const struct cf_level *level, int status)
{
uint64_t freq;
if (tsc_disabled || status != 0)
return;
freq = (uint64_t)level->total_set.freq * 1000000;
tsc_update_freq(freq);
}
static int
sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
{
int error;
uint64_t freq;
freq = atomic_load_acq_64(&tsc_freq);
if (freq == 0)
return (EOPNOTSUPP);
error = sysctl_handle_64(oidp, &freq, 0, req);
if (error == 0 && req->newptr != NULL)
tsc_update_freq(freq);
return (error);
}
SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq,
CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
0, 0, sysctl_machdep_tsc_freq, "QU",
"Time Stamp Counter frequency");
static u_int
tsc_get_timecount(struct timecounter *tc __unused)
{
return (rdtsc32());
}
static u_int
tscp_get_timecount(struct timecounter *tc __unused)
{
return (rdtscp32());
}
static inline u_int
tsc_get_timecount_low(struct timecounter *tc)
{
uint32_t rv;
__asm __volatile("rdtsc; shrd %%cl, %%edx, %0"
: "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx");
return (rv);
}
static u_int
tscp_get_timecount_low(struct timecounter *tc)
{
uint32_t rv;
__asm __volatile("rdtscp; movl %1, %%ecx; shrd %%cl, %%edx, %0"
: "=&a" (rv) : "m" (tc->tc_priv) : "ecx", "edx");
return (rv);
}
static u_int
tsc_get_timecount_lfence(struct timecounter *tc __unused)
{
lfence();
return (rdtsc32());
}
static u_int
tsc_get_timecount_low_lfence(struct timecounter *tc)
{
lfence();
return (tsc_get_timecount_low(tc));
}
static u_int
tsc_get_timecount_mfence(struct timecounter *tc __unused)
{
mfence();
return (rdtsc32());
}
static u_int
tsc_get_timecount_low_mfence(struct timecounter *tc)
{
mfence();
return (tsc_get_timecount_low(tc));
}
static uint32_t
x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
{
vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC;
vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv;
vdso_th->th_x86_hpet_idx = 0xffffffff;
vdso_th->th_x86_pvc_last_systime = 0;
vdso_th->th_x86_pvc_stable_mask = 0;
bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
return (1);
}
#ifdef COMPAT_FREEBSD32
static uint32_t
x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
struct timecounter *tc)
{
vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC;
vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv;
vdso_th32->th_x86_hpet_idx = 0xffffffff;
vdso_th32->th_x86_pvc_last_systime[0] = 0;
vdso_th32->th_x86_pvc_last_systime[1] = 0;
vdso_th32->th_x86_pvc_stable_mask = 0;
bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
return (1);
}
#endif