#include <kunit/run-in-irq-context.h>
#include <kunit/test.h>
#include <linux/vmalloc.h>
#define TEST_BUF_LEN 16384
static u8 *test_buf;
static u8 *orig_test_buf;
static u64 random_seed;
static u32 rand32(void)
{
random_seed = (random_seed * 25214903917 + 11) & ((1ULL << 48) - 1);
return random_seed >> 16;
}
static void rand_bytes(u8 *out, size_t len)
{
for (size_t i = 0; i < len; i++)
out[i] = rand32();
}
static void rand_bytes_seeded_from_len(u8 *out, size_t len)
{
random_seed = len;
rand_bytes(out, len);
}
static bool rand_bool(void)
{
return rand32() % 2;
}
static size_t rand_length(size_t max_len)
{
size_t len;
switch (rand32() % 3) {
case 0:
len = rand32() % 128;
break;
case 1:
len = rand32() % 3072;
break;
default:
len = rand32();
break;
}
return len % (max_len + 1);
}
static size_t rand_offset(size_t max_offset)
{
return min(rand32() % 128, max_offset);
}
static int hash_suite_init(struct kunit_suite *suite)
{
size_t alloc_len = round_up(TEST_BUF_LEN, PAGE_SIZE);
orig_test_buf = vmalloc(alloc_len);
if (!orig_test_buf)
return -ENOMEM;
test_buf = orig_test_buf + alloc_len - TEST_BUF_LEN;
return 0;
}
static void hash_suite_exit(struct kunit_suite *suite)
{
vfree(orig_test_buf);
orig_test_buf = NULL;
test_buf = NULL;
}
static void test_hash_test_vectors(struct kunit *test)
{
for (size_t i = 0; i < ARRAY_SIZE(hash_testvecs); i++) {
size_t data_len = hash_testvecs[i].data_len;
u8 actual_hash[HASH_SIZE];
KUNIT_ASSERT_LE(test, data_len, TEST_BUF_LEN);
rand_bytes_seeded_from_len(test_buf, data_len);
HASH(test_buf, data_len, actual_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, actual_hash, hash_testvecs[i].digest, HASH_SIZE,
"Wrong result with test vector %zu; data_len=%zu", i,
data_len);
}
}
static void test_hash_all_lens_up_to_4096(struct kunit *test)
{
struct HASH_CTX ctx;
u8 hash[HASH_SIZE];
static_assert(TEST_BUF_LEN >= 4096);
rand_bytes_seeded_from_len(test_buf, 4096);
HASH_INIT(&ctx);
for (size_t len = 0; len <= 4096; len++) {
HASH(test_buf, len, hash);
HASH_UPDATE(&ctx, hash, HASH_SIZE);
}
HASH_FINAL(&ctx, hash);
KUNIT_ASSERT_MEMEQ(test, hash, hash_testvec_consolidated, HASH_SIZE);
}
static void test_hash_incremental_updates(struct kunit *test)
{
for (int i = 0; i < 1000; i++) {
size_t total_len, offset;
struct HASH_CTX ctx;
u8 hash1[HASH_SIZE];
u8 hash2[HASH_SIZE];
size_t num_parts = 0;
size_t remaining_len, cur_offset;
total_len = rand_length(TEST_BUF_LEN);
offset = rand_offset(TEST_BUF_LEN - total_len);
rand_bytes(&test_buf[offset], total_len);
HASH(&test_buf[offset], total_len, hash1);
HASH_INIT(&ctx);
remaining_len = total_len;
cur_offset = offset;
while (rand_bool()) {
size_t part_len = rand_length(remaining_len);
HASH_UPDATE(&ctx, &test_buf[cur_offset], part_len);
num_parts++;
cur_offset += part_len;
remaining_len -= part_len;
}
if (remaining_len != 0 || rand_bool()) {
HASH_UPDATE(&ctx, &test_buf[cur_offset], remaining_len);
num_parts++;
}
HASH_FINAL(&ctx, hash2);
KUNIT_ASSERT_MEMEQ_MSG(
test, hash1, hash2, HASH_SIZE,
"Incremental test failed with total_len=%zu num_parts=%zu offset=%zu",
total_len, num_parts, offset);
}
}
static void test_hash_buffer_overruns(struct kunit *test)
{
const size_t max_tested_len = TEST_BUF_LEN - sizeof(struct HASH_CTX);
void *const buf_end = &test_buf[TEST_BUF_LEN];
struct HASH_CTX *guarded_ctx = buf_end - sizeof(*guarded_ctx);
rand_bytes(test_buf, TEST_BUF_LEN);
for (int i = 0; i < 100; i++) {
size_t len = rand_length(max_tested_len);
struct HASH_CTX ctx;
u8 hash[HASH_SIZE];
HASH(buf_end - len, len, hash);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, buf_end - len, len);
HASH_FINAL(&ctx, hash);
HASH(test_buf, len, buf_end - HASH_SIZE);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, test_buf, len);
HASH_FINAL(&ctx, buf_end - HASH_SIZE);
HASH_INIT(guarded_ctx);
HASH_UPDATE(guarded_ctx, test_buf, len);
HASH_FINAL(guarded_ctx, hash);
}
}
static void test_hash_overlaps(struct kunit *test)
{
const size_t max_tested_len = TEST_BUF_LEN - HASH_SIZE;
struct HASH_CTX ctx;
u8 hash[HASH_SIZE];
rand_bytes(test_buf, TEST_BUF_LEN);
for (int i = 0; i < 100; i++) {
size_t len = rand_length(max_tested_len);
size_t offset = HASH_SIZE + rand_offset(max_tested_len - len);
bool left_end = rand_bool();
u8 *ovl_hash = left_end ? &test_buf[offset] :
&test_buf[offset + len - HASH_SIZE];
HASH(&test_buf[offset], len, hash);
HASH(&test_buf[offset], len, ovl_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, hash, ovl_hash, HASH_SIZE,
"Overlap test 1 failed with len=%zu offset=%zu left_end=%d",
len, offset, left_end);
HASH(&test_buf[offset], len, hash);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, &test_buf[offset], len);
HASH_FINAL(&ctx, ovl_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, hash, ovl_hash, HASH_SIZE,
"Overlap test 2 failed with len=%zu offset=%zu left_end=%d",
len, offset, left_end);
HASH(&test_buf[offset], len, hash);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, &test_buf[offset], len);
rand_bytes(&test_buf[offset], len);
HASH_FINAL(&ctx, ovl_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, hash, ovl_hash, HASH_SIZE,
"Overlap test 3 failed with len=%zu offset=%zu left_end=%d",
len, offset, left_end);
}
}
static void test_hash_alignment_consistency(struct kunit *test)
{
u8 hash1[128 + HASH_SIZE];
u8 hash2[128 + HASH_SIZE];
for (int i = 0; i < 100; i++) {
size_t len = rand_length(TEST_BUF_LEN);
size_t data_offs1 = rand_offset(TEST_BUF_LEN - len);
size_t data_offs2 = rand_offset(TEST_BUF_LEN - len);
size_t hash_offs1 = rand_offset(128);
size_t hash_offs2 = rand_offset(128);
rand_bytes(&test_buf[data_offs1], len);
HASH(&test_buf[data_offs1], len, &hash1[hash_offs1]);
memmove(&test_buf[data_offs2], &test_buf[data_offs1], len);
HASH(&test_buf[data_offs2], len, &hash2[hash_offs2]);
KUNIT_ASSERT_MEMEQ_MSG(
test, &hash1[hash_offs1], &hash2[hash_offs2], HASH_SIZE,
"Alignment consistency test failed with len=%zu data_offs=(%zu,%zu) hash_offs=(%zu,%zu)",
len, data_offs1, data_offs2, hash_offs1, hash_offs2);
}
}
static void test_hash_ctx_zeroization(struct kunit *test)
{
static const u8 zeroes[sizeof(struct HASH_CTX)];
struct HASH_CTX ctx;
rand_bytes(test_buf, 128);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, test_buf, 128);
HASH_FINAL(&ctx, test_buf);
KUNIT_ASSERT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
"Hash context was not zeroized by finalization");
}
#define IRQ_TEST_DATA_LEN 256
#define IRQ_TEST_NUM_BUFFERS 3
struct hash_irq_test1_state {
u8 expected_hashes[IRQ_TEST_NUM_BUFFERS][HASH_SIZE];
atomic_t seqno;
};
static bool hash_irq_test1_func(void *state_)
{
struct hash_irq_test1_state *state = state_;
u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;
u8 actual_hash[HASH_SIZE];
HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN, actual_hash);
return memcmp(actual_hash, state->expected_hashes[i], HASH_SIZE) == 0;
}
static void test_hash_interrupt_context_1(struct kunit *test)
{
struct hash_irq_test1_state state = {};
rand_bytes(test_buf, IRQ_TEST_NUM_BUFFERS * IRQ_TEST_DATA_LEN);
for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++)
HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN,
state.expected_hashes[i]);
kunit_run_irq_test(test, hash_irq_test1_func, 100000, &state);
}
struct hash_irq_test2_hash_ctx {
struct HASH_CTX hash_ctx;
atomic_t in_use;
int offset;
int step;
};
struct hash_irq_test2_state {
struct hash_irq_test2_hash_ctx ctxs[IRQ_TEST_NUM_BUFFERS];
u8 expected_hash[HASH_SIZE];
u16 update_lens[32];
int num_steps;
};
static bool hash_irq_test2_func(void *state_)
{
struct hash_irq_test2_state *state = state_;
struct hash_irq_test2_hash_ctx *ctx;
bool ret = true;
for (ctx = &state->ctxs[0]; ctx < &state->ctxs[ARRAY_SIZE(state->ctxs)];
ctx++) {
if (atomic_cmpxchg(&ctx->in_use, 0, 1) == 0)
break;
}
if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) {
return false;
}
if (ctx->step == 0) {
HASH_INIT(&ctx->hash_ctx);
ctx->offset = 0;
ctx->step++;
} else if (ctx->step < state->num_steps - 1) {
HASH_UPDATE(&ctx->hash_ctx, &test_buf[ctx->offset],
state->update_lens[ctx->step - 1]);
ctx->offset += state->update_lens[ctx->step - 1];
ctx->step++;
} else {
u8 actual_hash[HASH_SIZE];
if (WARN_ON_ONCE(ctx->offset != TEST_BUF_LEN))
ret = false;
HASH_FINAL(&ctx->hash_ctx, actual_hash);
if (memcmp(actual_hash, state->expected_hash, HASH_SIZE) != 0)
ret = false;
ctx->step = 0;
}
atomic_set_release(&ctx->in_use, 0);
return ret;
}
static void test_hash_interrupt_context_2(struct kunit *test)
{
struct hash_irq_test2_state *state;
int remaining = TEST_BUF_LEN;
state = kunit_kzalloc(test, sizeof(*state), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, state);
rand_bytes(test_buf, TEST_BUF_LEN);
HASH(test_buf, TEST_BUF_LEN, state->expected_hash);
static_assert(TEST_BUF_LEN / 4096 > 1);
for (state->num_steps = 0;
state->num_steps < ARRAY_SIZE(state->update_lens) - 1 && remaining;
state->num_steps++) {
state->update_lens[state->num_steps] =
rand_length(min(remaining, 4096));
remaining -= state->update_lens[state->num_steps];
}
if (remaining)
state->update_lens[state->num_steps++] = remaining;
state->num_steps += 2;
kunit_run_irq_test(test, hash_irq_test2_func, 250000, state);
}
#define UNKEYED_HASH_KUNIT_CASES \
KUNIT_CASE(test_hash_test_vectors), \
KUNIT_CASE(test_hash_all_lens_up_to_4096), \
KUNIT_CASE(test_hash_incremental_updates), \
KUNIT_CASE(test_hash_buffer_overruns), \
KUNIT_CASE(test_hash_overlaps), \
KUNIT_CASE(test_hash_alignment_consistency), \
KUNIT_CASE(test_hash_ctx_zeroization), \
KUNIT_CASE(test_hash_interrupt_context_1), \
KUNIT_CASE(test_hash_interrupt_context_2)
#ifdef HMAC
static void test_hmac(struct kunit *test)
{
static const u8 zeroes[sizeof(struct HMAC_CTX)];
u8 *raw_key;
struct HMAC_KEY key;
struct HMAC_CTX ctx;
u8 mac[HASH_SIZE];
u8 mac2[HASH_SIZE];
static_assert(TEST_BUF_LEN >= 4096 + 293);
rand_bytes_seeded_from_len(test_buf, 4096);
raw_key = &test_buf[4096];
rand_bytes_seeded_from_len(raw_key, 32);
HMAC_PREPAREKEY(&key, raw_key, 32);
HMAC_INIT(&ctx, &key);
for (size_t data_len = 0; data_len <= 4096; data_len++) {
size_t key_len = data_len % 293;
HMAC_UPDATE(&ctx, test_buf, data_len);
rand_bytes_seeded_from_len(raw_key, key_len);
HMAC_USINGRAWKEY(raw_key, key_len, test_buf, data_len, mac);
HMAC_UPDATE(&ctx, mac, HASH_SIZE);
HMAC_PREPAREKEY(&key, raw_key, key_len);
HMAC(&key, test_buf, data_len, mac2);
KUNIT_ASSERT_MEMEQ_MSG(
test, mac, mac2, HASH_SIZE,
"HMAC gave different results with raw and prepared keys");
}
HMAC_FINAL(&ctx, mac);
KUNIT_EXPECT_MEMEQ_MSG(test, mac, hmac_testvec_consolidated, HASH_SIZE,
"HMAC gave wrong result");
KUNIT_EXPECT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
"HMAC context was not zeroized by finalization");
}
#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES, KUNIT_CASE(test_hmac)
#else
#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES
#endif
static void benchmark_hash(struct kunit *test)
{
static const size_t lens_to_test[] = {
1, 16, 64, 127, 128, 200, 256,
511, 512, 1024, 3173, 4096, 16384,
};
u8 hash[HASH_SIZE];
if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK))
kunit_skip(test, "not enabled");
for (size_t i = 0; i < 10000000; i += TEST_BUF_LEN)
HASH(test_buf, TEST_BUF_LEN, hash);
for (size_t i = 0; i < ARRAY_SIZE(lens_to_test); i++) {
size_t len = lens_to_test[i];
size_t num_iters = 10000000 / (len + 128);
u64 t;
KUNIT_ASSERT_LE(test, len, TEST_BUF_LEN);
preempt_disable();
t = ktime_get_ns();
for (size_t j = 0; j < num_iters; j++)
HASH(test_buf, len, hash);
t = ktime_get_ns() - t;
preempt_enable();
kunit_info(test, "len=%zu: %llu MB/s", len,
div64_u64((u64)len * num_iters * 1000, t ?: 1));
}
}