#include <sys/types.h>
#include <machine/atomic.h>
#include <machine/cpufunc.h>
#include <machine/specialreg.h>
#include <stddef.h>
#include <string.h>
#include "amd64_archlevel.h"
#include "libc_private.h"
#define ARCHLEVEL_ENV "ARCHLEVEL"
static volatile int amd64_archlevel = X86_64_UNDEFINED;
static const struct archlevel {
char name[10];
u_int feat_edx, feat_ecx, amd_ecx, ext_ebx;
} levels[] = {
{
.name = "scalar",
.feat_edx = 0,
.feat_ecx = 0,
.amd_ecx = 0,
.ext_ebx = 0,
}, {
#define FEAT_EDX_BASELINE (CPUID_FPU | CPUID_CX8 | CPUID_CMOV | CPUID_MMX | \
CPUID_FXSR | CPUID_SSE | CPUID_SSE2)
.name = "baseline",
.feat_edx = FEAT_EDX_BASELINE,
.feat_ecx = 0,
.amd_ecx = 0,
.ext_ebx = 0,
}, {
#define FEAT_ECX_V2 (CPUID2_SSE3 | CPUID2_SSSE3 | CPUID2_CX16 | CPUID2_SSE41 | \
CPUID2_SSE42 | CPUID2_POPCNT)
#define AMD_ECX_V2 AMDID2_LAHF
.name = "x86-64-v2",
.feat_edx = FEAT_EDX_BASELINE,
.feat_ecx = FEAT_ECX_V2,
.amd_ecx = AMD_ECX_V2,
.ext_ebx = 0,
}, {
#define FEAT_ECX_V3 (FEAT_ECX_V2 | CPUID2_FMA | CPUID2_MOVBE | \
CPUID2_OSXSAVE | CPUID2_AVX | CPUID2_F16C)
#define AMD_ECX_V3 (AMD_ECX_V2 | AMDID2_ABM)
#define EXT_EBX_V3 (CPUID_STDEXT_BMI1 | CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2)
.name = "x86-64-v3",
.feat_edx = FEAT_EDX_BASELINE,
.feat_ecx = FEAT_ECX_V3,
.amd_ecx = AMD_ECX_V3,
.ext_ebx = EXT_EBX_V3,
}, {
#define EXT_EBX_V4 (EXT_EBX_V3 | CPUID_STDEXT_AVX512F | \
CPUID_STDEXT_AVX512DQ | CPUID_STDEXT_AVX512CD | \
CPUID_STDEXT_AVX512BW | CPUID_STDEXT_AVX512VL)
.name = "x86-64-v4",
.feat_edx = FEAT_EDX_BASELINE,
.feat_ecx = FEAT_ECX_V3,
.amd_ecx = AMD_ECX_V3,
.ext_ebx = EXT_EBX_V4,
}
};
static int
supported_archlevel(u_int feat_edx, u_int feat_ecx, u_int ext_ebx, u_int ext_ecx)
{
int level;
u_int p[4], max_leaf;
u_int amd_ecx = 0;
(void)ext_ecx;
do_cpuid(0x80000000, p);
max_leaf = p[0];
if (max_leaf >= 0x80000001) {
do_cpuid(0x80000001, p);
amd_ecx = p[2];
}
for (level = X86_64_BASELINE; level <= X86_64_MAX; level++) {
const struct archlevel *lvl = &levels[level];
if ((lvl->feat_edx & feat_edx) != lvl->feat_edx ||
(lvl->feat_ecx & feat_ecx) != lvl->feat_ecx ||
(lvl->amd_ecx & amd_ecx) != lvl->amd_ecx ||
(lvl->ext_ebx & ext_ebx) != lvl->ext_ebx)
return (level - 1);
}
return (X86_64_MAX);
}
static int
match_archlevel(const char *str, int *force)
{
int level, want_force = 0;
*force = 0;
if (str[0] == '!') {
str++;
want_force = 1;
}
for (level = 0; level <= X86_64_MAX; level++) {
size_t i;
const char *candidate = levels[level].name;
for (i = 0; str[i] == candidate[i]; i++)
if (str[i] == '\0' || str[i] == ':' || str[i] == '+') {
if (want_force)
*force = 1;
return (level);
}
}
return (X86_64_UNDEFINED);
}
static int
env_archlevel(int *force)
{
size_t i;
if (environ == NULL)
return (X86_64_UNDEFINED);
for (i = 0; environ[i] != NULL; i++) {
size_t j;
for (j = 0; environ[i][j] == ARCHLEVEL_ENV "="[j]; j++)
if (environ[i][j] == '=')
return (match_archlevel(&environ[i][j + 1], force));
}
*force = 0;
return (X86_64_UNDEFINED);
}
static int
archlevel(u_int feat_edx, u_int feat_ecx, u_int ext_ebx, u_int ext_ecx)
{
int islevel, wantlevel, hwlevel, force;
islevel = atomic_load_int(&amd64_archlevel);
if (islevel != X86_64_UNDEFINED)
return (islevel);
wantlevel = env_archlevel(&force);
if (!force) {
hwlevel = supported_archlevel(feat_edx, feat_ecx, ext_ebx, ext_ecx);
if (wantlevel == X86_64_UNDEFINED || wantlevel > hwlevel)
wantlevel = hwlevel;
}
if (atomic_cmpset_int(&amd64_archlevel, islevel, wantlevel))
return (wantlevel);
else
return (atomic_load_int(&amd64_archlevel));
}
dlfunc_t
__archlevel_resolve(u_int feat_edx, u_int feat_ecx, u_int ext_ebx,
u_int ext_ecx, int32_t funcs[static X86_64_MAX + 1])
{
int level;
for (level = archlevel(feat_edx, feat_ecx, ext_ebx, ext_ecx); level >= 0; level--)
if (funcs[level] != 0)
return (dlfunc_t)((uintptr_t)funcs + (ptrdiff_t)funcs[level]);
__builtin_trap();
}