#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/pcpu.h>
#include <sys/systm.h>
#include <machine/cpu.h>
#include <machine/cpu_feat.h>
#include <dev/psci/psci.h>
#include <dev/psci/smccc.h>
static enum {
SSBD_FORCE_ON,
SSBD_FORCE_OFF,
SSBD_KERNEL,
} ssbd_method = SSBD_KERNEL;
struct psci_bp_hardening_impl {
u_int midr_mask;
u_int midr_value;
};
static struct psci_bp_hardening_impl psci_bp_hardening_impl[] = {
{
.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
},
{
.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
},
{
.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
},
{
.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
},
{
.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
.midr_value =
CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
}
};
static cpu_feat_en
psci_bp_hardening_check(const struct cpu_feat *feat __unused, u_int midr)
{
size_t i;
for (i = 0; i < nitems(psci_bp_hardening_impl); i++) {
if ((midr & psci_bp_hardening_impl[i].midr_mask) ==
psci_bp_hardening_impl[i].midr_value) {
if (!psci_present)
return (FEAT_ALWAYS_DISABLE);
if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) !=
SMCCC_RET_SUCCESS)
return (FEAT_ALWAYS_DISABLE);
return (FEAT_DEFAULT_ENABLE);
}
}
return (FEAT_ALWAYS_DISABLE);
}
static bool
psci_bp_hardening_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
u_int errata_count __unused)
{
PCPU_SET(bp_harden, smccc_arch_workaround_1);
return (true);
}
CPU_FEAT(feat_csv2_missing, "Branch Predictor Hardening",
psci_bp_hardening_check, NULL, psci_bp_hardening_enable, NULL,
CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
static cpu_feat_en
ssbd_workaround_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
char *env;
if (PCPU_GET(cpuid) == 0) {
env = kern_getenv("kern.cfg.ssbd");
if (env != NULL) {
if (strcmp(env, "force-on") == 0) {
ssbd_method = SSBD_FORCE_ON;
} else if (strcmp(env, "force-off") == 0) {
ssbd_method = SSBD_FORCE_OFF;
}
}
}
if (!psci_present)
return (FEAT_ALWAYS_DISABLE);
if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
return (FEAT_ALWAYS_DISABLE);
return (FEAT_DEFAULT_ENABLE);
}
static bool
ssbd_workaround_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
u_int errata_count __unused)
{
switch(ssbd_method) {
case SSBD_FORCE_ON:
smccc_arch_workaround_2(1);
break;
case SSBD_FORCE_OFF:
smccc_arch_workaround_2(0);
break;
case SSBD_KERNEL:
default:
PCPU_SET(ssbd, smccc_arch_workaround_2);
break;
}
return (true);
}
CPU_FEAT(feat_ssbs_missing, "Speculator Store Bypass Disable Workaround",
ssbd_workaround_check, NULL, ssbd_workaround_enable, NULL,
CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);