#include <sys/param.h>
#include <sys/domainset.h>
#include <sys/fail.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/random.h>
#include <sys/sdt.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
#include <machine/atomic.h>
#include <machine/smp.h>
#include <dev/random/randomdev.h>
#include <dev/random/random_harvestq.h>
#include <dev/random/fenestrasX/fx_brng.h>
#include <dev/random/fenestrasX/fx_hash.h>
#include <dev/random/fenestrasX/fx_pool.h>
#include <dev/random/fenestrasX/fx_priv.h>
#include <dev/random/fenestrasX/fx_pub.h>
#define FXENT_RESSED_INTVL_GFACT 3
#define FXENT_RESEED_INTVL_MAX 3600
#define FXENT_RESEED_BASE 3
#define FXENT_HI_SRC_POOL0_BYTES 32
enum fxrng_ent_access_cls {
FXRNG_PUSH,
FXRNG_PULL,
};
enum fxrng_ent_source_cls {
FXRNG_HI,
FXRNG_LO,
FXRNG_GARBAGE,
};
struct fxrng_ent_cls {
enum fxrng_ent_access_cls entc_axx_cls;
enum fxrng_ent_source_cls entc_src_cls;
};
static const struct fxrng_ent_cls fxrng_hi_pull = {
.entc_axx_cls = FXRNG_PULL,
.entc_src_cls = FXRNG_HI,
};
static const struct fxrng_ent_cls fxrng_hi_push = {
.entc_axx_cls = FXRNG_PUSH,
.entc_src_cls = FXRNG_HI,
};
static const struct fxrng_ent_cls fxrng_lo_push = {
.entc_axx_cls = FXRNG_PUSH,
.entc_src_cls = FXRNG_LO,
};
static const struct fxrng_ent_cls fxrng_garbage = {
.entc_axx_cls = FXRNG_PUSH,
.entc_src_cls = FXRNG_GARBAGE,
};
static const struct fxrng_ent_char {
const struct fxrng_ent_cls *entc_cls;
} fxrng_ent_char[] = {
[RANDOM_CACHED] = {
.entc_cls = &fxrng_hi_push,
},
[RANDOM_ATTACH] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_KEYBOARD] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_MOUSE] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_NET_TUN] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_NET_ETHER] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_NET_NG] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_INTERRUPT] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_SWI] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_FS_ATIME] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_UMA] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_CALLOUT] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_RANDOMDEV] = {
.entc_cls = &fxrng_lo_push,
},
[RANDOM_PURE_TPM] = {
.entc_cls = &fxrng_hi_push,
},
[RANDOM_PURE_RDRAND] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_RDSEED] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_NEHEMIAH] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_RNDTEST] = {
.entc_cls = &fxrng_garbage,
},
[RANDOM_PURE_VIRTIO] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_BROADCOM] = {
.entc_cls = &fxrng_hi_push,
},
[RANDOM_PURE_CCP] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_DARN] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_VMGENID] = {
.entc_cls = &fxrng_hi_push,
},
[RANDOM_PURE_QUALCOMM] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_ARMV8] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_ARM_TRNG] = {
.entc_cls = &fxrng_hi_pull,
},
[RANDOM_PURE_SAFE] = {
.entc_cls = &fxrng_hi_push,
},
[RANDOM_PURE_GLXSB] = {
.entc_cls = &fxrng_hi_push,
},
};
CTASSERT(nitems(fxrng_ent_char) == ENTROPYSOURCE);
BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
#ifndef BIT_TEST_SET_ATOMIC_ACQ
#define BIT_TEST_SET_ATOMIC_ACQ(_s, n, p) \
(atomic_testandset_acq_long( \
&(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
#endif
#define FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
static struct fxrng_bits __read_mostly fxrng_seen;
_Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
static struct mtx fxent_pool_lk;
MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
#define FXENT_LOCK() mtx_lock(&fxent_pool_lk)
#define FXENT_UNLOCK() mtx_unlock(&fxent_pool_lk)
#define FXENT_ASSERT(rng) mtx_assert(&fxent_pool_lk, MA_OWNED)
#define FXENT_ASSERT_NOT(rng) mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
static unsigned __read_mostly fxent_nactpools = 1;
static struct timeout_task fxent_reseed_timer;
static int __read_mostly fxent_timer_ready;
static unsigned long fxrng_preseed_ent;
void
fxrng_pools_init(void)
{
size_t i;
for (i = 0; i < nitems(fxent_pool); i++)
fxrng_hash_init(&fxent_pool[i]);
}
static inline bool
fxrng_hi_source(enum random_entropy_source src)
{
return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
}
static inline bool
fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
{
return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
FXENT_HI_SRC_POOL0_BYTES);
}
void
fxrng_event_processor(struct harvest_event *event)
{
enum random_entropy_source src;
unsigned pool;
bool first_time, first_32;
src = event->he_source;
ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
"%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
(unsigned)event->he_size, sizeof(event->he_entropy));
first_time = event->he_size > 0 &&
!FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
if (__predict_false(first_time)) {
FXENT_LOCK();
FXRNG_BRNG_LOCK(&fxrng_root);
if (__predict_true(fxrng_root.brng_generation > 0)) {
FXENT_UNLOCK();
fxrng_brng_src_reseed(event);
FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
return;
}
FXRNG_BRNG_UNLOCK(&fxrng_root);
fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
sizeof(event->he_somecounter));
fxrng_hash_update(&fxent_pool[0], event->he_entropy,
event->he_size);
if (fxrng_hi_source(src)) {
if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
fxrng_preseed_ent += event->he_size;
}
FXENT_UNLOCK();
return;
}
first_32 = event->he_size > 0 &&
fxrng_hi_source(src) &&
atomic_load_acq_int(&fxent_nactpools) > 1 &&
fxrng_hi_pool0_eligible_racy(src);
if (__predict_false(first_32)) {
unsigned rem, seen;
FXENT_LOCK();
seen = fxrng_reseed_seen[src];
if (seen == FXENT_HI_SRC_POOL0_BYTES)
goto round_robin;
rem = FXENT_HI_SRC_POOL0_BYTES - seen;
rem = MIN(rem, event->he_size);
fxrng_reseed_seen[src] = seen + rem;
fxrng_hash_update(&fxent_pool[0],
((uint8_t *)event->he_entropy) + event->he_size - rem,
rem);
if (rem == event->he_size) {
fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
sizeof(event->he_somecounter));
FXENT_UNLOCK();
return;
}
event->he_size -= rem;
goto round_robin;
}
FXENT_LOCK();
round_robin:
FXENT_ASSERT();
pool = event->he_destination % fxent_nactpools;
fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
event->he_size);
fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
sizeof(event->he_somecounter));
if (__predict_false(fxrng_hi_source(src) &&
atomic_load_acq_64(&fxrng_root_generation) == 0)) {
if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
fxrng_preseed_ent += event->he_size;
}
FXENT_UNLOCK();
}
bool
fxrng_alg_seeded(void)
{
uint8_t hash[FXRNG_HASH_SZ];
sbintime_t sbt;
if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
return (true);
FXENT_LOCK();
if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
FXENT_UNLOCK();
return (true);
}
fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
fxrng_hash_init(&fxent_pool[0]);
fxrng_brng_reseed(hash, sizeof(hash));
FXENT_UNLOCK();
randomdev_unblock();
explicit_bzero(hash, sizeof(hash));
if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
sbt = SBT_1S;
taskqueue_enqueue_timeout_sbt(taskqueue_thread,
&fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
}
return (true);
}
static void
fxent_timer_reseed_npools(unsigned n)
{
uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
unsigned i;
ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
FXENT_ASSERT();
for (i = 0; i < n; i++) {
fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
FXRNG_HASH_SZ);
fxrng_hash_init(&fxent_pool[i]);
}
fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
explicit_bzero(hash, n * FXRNG_HASH_SZ);
memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
FXENT_ASSERT();
}
static void
fxent_timer_reseed(void *ctx __unused, int pending __unused)
{
static unsigned reseed_intvl_sec = 1;
static uint64_t reseed_number = 1;
unsigned next_ival, i, k;
sbintime_t sbt;
if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
if (next_ival > FXENT_RESEED_INTVL_MAX)
next_ival = FXENT_RESEED_INTVL_MAX;
FXENT_LOCK();
fxent_timer_reseed_npools(1);
FXENT_UNLOCK();
} else {
next_ival = reseed_intvl_sec;
k = reseed_number;
reseed_number++;
for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
if ((k % FXENT_RESEED_BASE) != 0)
break;
k /= FXENT_RESEED_BASE;
}
FXENT_LOCK();
if (i == fxent_nactpools + 1) {
fxent_timer_reseed_npools(fxent_nactpools);
fxent_nactpools++;
} else {
fxent_timer_reseed_npools(i);
}
FXENT_UNLOCK();
}
sbt = next_ival * SBT_1S;
taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
-sbt, (sbt / 3), C_PREL(2));
reseed_intvl_sec = next_ival;
}
static void
fxent_pool_timer_init(void *dummy __unused)
{
sbintime_t sbt;
TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
fxent_timer_reseed, NULL);
if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
sbt = SBT_1S;
taskqueue_enqueue_timeout_sbt(taskqueue_thread,
&fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
}
atomic_store_rel_int(&fxent_timer_ready, 1);
}
SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
fxent_pool_timer_init, NULL);