#include <stdio.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
#include "vmx.h"
#define TEST_MEM_SLOT_INDEX 1
#define TEST_MEM_PAGES 4
#define TEST_MEM_BASE 0xc0000000
#define TEST_MEM_ALIAS_BASE 0xc0002000
#define TEST_GUEST_ADDR(base, idx) ((base) + (idx) * PAGE_SIZE)
#define TEST_GVA(idx) TEST_GUEST_ADDR(TEST_MEM_BASE, idx)
#define TEST_GPA(idx) TEST_GUEST_ADDR(TEST_MEM_BASE, idx)
#define TEST_ALIAS_GPA(idx) TEST_GUEST_ADDR(TEST_MEM_ALIAS_BASE, idx)
#define TEST_HVA(vm, idx) addr_gpa2hva(vm, TEST_GPA(idx))
#define L2_GUEST_STACK_SIZE 64
#define TEST_SYNC_READ_FAULT BIT(0)
#define TEST_SYNC_WRITE_FAULT BIT(1)
#define TEST_SYNC_NO_FAULT BIT(2)
static void l2_guest_code(vm_vaddr_t base)
{
vm_vaddr_t page0 = TEST_GUEST_ADDR(base, 0);
vm_vaddr_t page1 = TEST_GUEST_ADDR(base, 1);
READ_ONCE(*(u64 *)page0);
GUEST_SYNC(page0 | TEST_SYNC_READ_FAULT);
WRITE_ONCE(*(u64 *)page0, 1);
GUEST_SYNC(page0 | TEST_SYNC_WRITE_FAULT);
READ_ONCE(*(u64 *)page0);
GUEST_SYNC(page0 | TEST_SYNC_NO_FAULT);
WRITE_ONCE(*(u64 *)page1, 1);
GUEST_SYNC(page1 | TEST_SYNC_WRITE_FAULT);
WRITE_ONCE(*(u64 *)page1, 1);
GUEST_SYNC(page1 | TEST_SYNC_WRITE_FAULT);
READ_ONCE(*(u64 *)page1);
GUEST_SYNC(page1 | TEST_SYNC_NO_FAULT);
vmcall();
}
static void l2_guest_code_tdp_enabled(void)
{
l2_guest_code(TEST_MEM_ALIAS_BASE);
}
static void l2_guest_code_tdp_disabled(void)
{
l2_guest_code(TEST_MEM_BASE);
}
void l1_vmx_code(struct vmx_pages *vmx)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
void *l2_rip;
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
GUEST_ASSERT(load_vmcs(vmx));
if (vmx->eptp_gpa)
l2_rip = l2_guest_code_tdp_enabled;
else
l2_rip = l2_guest_code_tdp_disabled;
prepare_vmcs(vmx, l2_rip, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(TEST_SYNC_NO_FAULT);
GUEST_ASSERT(!vmlaunch());
GUEST_SYNC(TEST_SYNC_NO_FAULT);
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_VMCALL);
GUEST_DONE();
}
static void l1_svm_code(struct svm_test_data *svm)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
void *l2_rip;
if (svm->ncr3_gpa)
l2_rip = l2_guest_code_tdp_enabled;
else
l2_rip = l2_guest_code_tdp_disabled;
generic_svm_setup(svm, l2_rip, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(TEST_SYNC_NO_FAULT);
run_guest(svm->vmcb, svm->vmcb_gpa);
GUEST_SYNC(TEST_SYNC_NO_FAULT);
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
GUEST_DONE();
}
static void l1_guest_code(void *data)
{
if (this_cpu_has(X86_FEATURE_VMX))
l1_vmx_code(data);
else
l1_svm_code(data);
}
static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg,
unsigned long *bmap)
{
vm_vaddr_t gva = arg & ~(PAGE_SIZE - 1);
int page_nr, i;
if (!gva)
page_nr = 0;
else if (gva >= TEST_MEM_ALIAS_BASE)
page_nr = (gva - TEST_MEM_ALIAS_BASE) >> PAGE_SHIFT;
else
page_nr = (gva - TEST_MEM_BASE) >> PAGE_SHIFT;
TEST_ASSERT(page_nr == 0 || page_nr == 1,
"Test bug, unexpected frame number '%u' for arg = %lx", page_nr, arg);
TEST_ASSERT(gva || (arg & TEST_SYNC_NO_FAULT),
"Test bug, gva must be valid if a fault is expected");
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
for (i = 0; i < TEST_MEM_PAGES; i++) {
if (i == page_nr && (arg & TEST_SYNC_WRITE_FAULT))
TEST_ASSERT(*(u64 *)TEST_HVA(vm, i) == 1,
"Page %u incorrectly not written by guest", i);
else
TEST_ASSERT(*(u64 *)TEST_HVA(vm, i) == 0xaaaaaaaaaaaaaaaaULL,
"Page %u incorrectly written by guest", i);
if (i == page_nr && !(arg & TEST_SYNC_NO_FAULT))
TEST_ASSERT(test_bit(i, bmap),
"Page %u incorrectly reported clean on %s fault",
i, arg & TEST_SYNC_READ_FAULT ? "read" : "write");
else
TEST_ASSERT(!test_bit(i, bmap),
"Page %u incorrectly reported dirty", i);
}
}
static void test_dirty_log(bool nested_tdp)
{
vm_vaddr_t nested_gva = 0;
unsigned long *bmap;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
bool done = false;
pr_info("Nested TDP: %s\n", nested_tdp ? "enabled" : "disabled");
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
if (nested_tdp)
vm_enable_tdp(vm);
if (kvm_cpu_has(X86_FEATURE_VMX))
vcpu_alloc_vmx(vm, &nested_gva);
else
vcpu_alloc_svm(vm, &nested_gva);
vcpu_args_set(vcpu, 1, nested_gva);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
TEST_MEM_BASE,
TEST_MEM_SLOT_INDEX,
TEST_MEM_PAGES,
KVM_MEM_LOG_DIRTY_PAGES);
virt_map(vm, TEST_MEM_BASE, TEST_MEM_BASE, TEST_MEM_PAGES);
if (nested_tdp) {
tdp_identity_map_default_memslots(vm);
tdp_map(vm, TEST_ALIAS_GPA(0), TEST_GPA(0), PAGE_SIZE);
tdp_map(vm, TEST_ALIAS_GPA(1), TEST_GPA(1), PAGE_SIZE);
*tdp_get_pte(vm, TEST_ALIAS_GPA(0)) |= PTE_DIRTY_MASK(&vm->stage2_mmu);
*tdp_get_pte(vm, TEST_ALIAS_GPA(1)) |= PTE_DIRTY_MASK(&vm->stage2_mmu);
} else {
*vm_get_pte(vm, TEST_GVA(0)) |= PTE_DIRTY_MASK(&vm->mmu);
*vm_get_pte(vm, TEST_GVA(1)) |= PTE_DIRTY_MASK(&vm->mmu);
}
bmap = bitmap_zalloc(TEST_MEM_PAGES);
while (!done) {
memset(TEST_HVA(vm, 0), 0xaa, TEST_MEM_PAGES * PAGE_SIZE);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
case UCALL_SYNC:
test_handle_ucall_sync(vm, uc.args[1], bmap);
break;
case UCALL_DONE:
done = true;
break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
}
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || kvm_cpu_has(X86_FEATURE_SVM));
test_dirty_log(false);
if (kvm_cpu_has_tdp())
test_dirty_log(true);
return 0;
}