#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <libgen.h>
#include <sys/stat.h>
#include <errno.h>
#include <err.h>
#include <assert.h>
#include <sys/sysmacros.h>
#include <stdbool.h>
#include <sys/vmm.h>
#include <sys/vmm_dev.h>
#include <sys/vmm_data.h>
#include <vmmapi.h>
#include "common.h"
#define PAGESZ 4096
#define TEST_PAGE_COUNT 256
#define TEST_MEM_SZ (PAGESZ * 256)
static struct vmctx *
check_vmm_capability(const char *tname)
{
char vmname[VM_MAX_NAMELEN];
name_test_vm(tname, vmname);
int res = vm_create(vmname, VCF_TRACK_DIRTY);
if (res != 0) {
if (errno == ENOTSUP) {
(void) fprintf(stderr,
"VMM lacks dirty page tracking capability");
(void) printf("%s\tSKIP\n", tname);
exit(EXIT_SUCCESS);
}
err(EXIT_FAILURE, "could not create VM");
}
struct vmctx *ctx = vm_open(vmname);
if (ctx == NULL) {
err(EXIT_FAILURE, "could not open test VM");
}
return (ctx);
}
static void
expect_errno(int expected)
{
if (errno != expected) {
errx(EXIT_FAILURE, "unexpected errno %d != %d",
errno, expected);
}
}
static uint8_t
popc8(uint8_t val)
{
uint8_t cnt;
for (cnt = 0; val != 0; val &= (val - 1)) {
cnt++;
}
return (cnt);
}
static uint_t
legacy_clear_dirty(struct vmctx *ctx)
{
uint8_t bitmap[TEST_PAGE_COUNT / 8] = { 0 };
struct vmm_dirty_tracker req = {
.vdt_start_gpa = 0,
.vdt_len = TEST_MEM_SZ,
.vdt_pfns = bitmap,
};
if (ioctl(vm_get_device_fd(ctx), VM_TRACK_DIRTY_PAGES, &req) != 0) {
err(EXIT_FAILURE, "VM_TRACK_DIRTY_PAGES failed");
}
uint_t bits_set = 0;
for (uint_t i = 0; i < (TEST_PAGE_COUNT / 8); i++) {
bits_set += popc8(bitmap[i]);
}
return (bits_set);
}
static void
do_npt_op(int vmfd, struct vm_npt_operation *vno)
{
if (ioctl(vmfd, VM_NPT_OPERATION, vno) != 0) {
err(EXIT_FAILURE, "VM_NPT_OPERATION failed");
}
}
static void
test_legacy(struct vmctx *ctx)
{
const int vmfd = vm_get_device_fd(ctx);
uint8_t *datap = vm_map_gpa(ctx, 0, PAGESZ);
*datap = 0xff;
uint8_t bitmap[TEST_PAGE_COUNT / 8] = { 0 };
struct vmm_dirty_tracker req = {
.vdt_start_gpa = 0,
.vdt_len = TEST_MEM_SZ,
.vdt_pfns = bitmap,
};
if (ioctl(vmfd, VM_TRACK_DIRTY_PAGES, &req) != 0) {
err(EXIT_FAILURE, "VM_TRACK_DIRTY_PAGES failed");
}
if (bitmap[0] != 1) {
errx(EXIT_FAILURE, "first page not marked dirty");
}
for (uint_t i = 1; i < (TEST_PAGE_COUNT / 8); i++) {
if (bitmap[i] != 0) {
errx(EXIT_FAILURE,
"unexpected non-zero entry: bitmap[%u] = %x\n",
i, bitmap[i]);
}
}
}
static void
test_toggle_tracking(struct vmctx *ctx)
{
const int vmfd = vm_get_device_fd(ctx);
struct vm_npt_operation vno = {
.vno_operation = VNO_OP_GET_TRACK_DIRTY,
.vno_gpa = 0,
.vno_len = 0,
};
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 1) {
errx(EXIT_FAILURE, "expected dirty tracking to be active");
}
vno.vno_operation = VNO_OP_DIS_TRACK_DIRTY;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 0) {
err(EXIT_FAILURE, "VM_NPT_OPERATION failed");
}
vno.vno_operation = VNO_OP_GET_TRACK_DIRTY;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 0) {
errx(EXIT_FAILURE, "expected dirty tracking to be inactive");
}
vno.vno_operation = VNO_OP_EN_TRACK_DIRTY;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 0) {
err(EXIT_FAILURE, "VM_NPT_OPERATION failed");
}
vno.vno_operation = VNO_OP_GET_TRACK_DIRTY;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 1) {
errx(EXIT_FAILURE,
"expected dirty tracking to be active again");
}
}
static void
test_inval_args(struct vmctx *ctx)
{
const int vmfd = vm_get_device_fd(ctx);
struct vm_npt_operation vno = { 0 };
vno.vno_operation = ~0;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
}
expect_errno(EINVAL);
vno.vno_operation = VNO_OP_GET_DIRTY | VNO_FLAG_BITMAP_IN;
vno.vno_gpa = 0x100;
vno.vno_len = PAGESZ;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
}
expect_errno(EINVAL);
vno.vno_gpa = 0;
vno.vno_len = PAGESZ + 0x100;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
}
expect_errno(EINVAL);
vno.vno_gpa = 0xffffffffffffe000;
vno.vno_len = 512 * PAGESZ;
if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
}
expect_errno(EOVERFLOW);
}
static void
test_op_get_dirty(struct vmctx *ctx)
{
const int vmfd = vm_get_device_fd(ctx);
uint8_t *datap = vm_map_gpa(ctx, 0, TEST_MEM_SZ);
(void) legacy_clear_dirty(ctx);
for (uint_t i = 0; i < TEST_MEM_SZ; i += (PAGESZ * 8)) {
datap[i] = 0xff;
}
uint8_t bits[TEST_PAGE_COUNT / 8] = { 0 };
struct vm_npt_operation vno = {
.vno_gpa = 0,
.vno_len = TEST_MEM_SZ,
.vno_operation = VNO_OP_GET_DIRTY | VNO_FLAG_BITMAP_OUT,
.vno_bitmap = bits,
};
do_npt_op(vmfd, &vno);
for (uint_t i = 0; i < TEST_PAGE_COUNT / 8; i++) {
if (bits[i] != 0x01) {
errx(EXIT_FAILURE,
"unexpected dirty bits %02x at base gpa %08x",
bits[i], i * PAGESZ * 8);
}
}
(void) legacy_clear_dirty(ctx);
do_npt_op(vmfd, &vno);
for (uint_t i = 0; i < TEST_PAGE_COUNT / 8; i++) {
if (bits[i] != 0) {
errx(EXIT_FAILURE,
"unexpected dirty bits %02x at base gpa %08x",
bits[i], i * PAGESZ * 8);
}
}
}
static void
test_op_set_dirty(struct vmctx *ctx)
{
const int vmfd = vm_get_device_fd(ctx);
(void) legacy_clear_dirty(ctx);
uint8_t bits[TEST_PAGE_COUNT / 8] = { 0xff, 0xff, 0x80 };
struct vm_npt_operation vno = {
.vno_gpa = 0,
.vno_len = TEST_MEM_SZ,
.vno_operation = VNO_OP_SET_DIRTY | VNO_FLAG_BITMAP_IN,
.vno_bitmap = bits,
};
do_npt_op(vmfd, &vno);
uint_t legacy_dirty = legacy_clear_dirty(ctx);
if (legacy_dirty != 17) {
errx(EXIT_FAILURE, "unexpected dirty count after OP_SET_DIRTY");
}
}
#define BMAP_IDX(gpa) ((gpa) / (PAGESZ * 8))
#define BMAP_BIT(gpa) (((gpa) / PAGESZ) % 8)
static void
test_op_reset_dirty(struct vmctx *ctx)
{
const int vmfd = vm_get_device_fd(ctx);
uint8_t *datap = vm_map_gpa(ctx, 0, TEST_MEM_SZ);
(void) legacy_clear_dirty(ctx);
for (uintptr_t gpa = 0; gpa < (TEST_MEM_SZ / 2); gpa += PAGESZ) {
datap[gpa] = 0xff;
}
uint8_t bits[TEST_PAGE_COUNT / 8] = { 0 };
for (uintptr_t gpa = 0; gpa < TEST_MEM_SZ; gpa += (2 * PAGESZ)) {
bits[BMAP_IDX(gpa)] |= (1 << BMAP_BIT(gpa));
}
struct vm_npt_operation vno = {
.vno_gpa = 0,
.vno_len = TEST_MEM_SZ,
.vno_operation = VNO_OP_RESET_DIRTY |
VNO_FLAG_BITMAP_IN | VNO_FLAG_BITMAP_OUT,
.vno_bitmap = bits,
};
do_npt_op(vmfd, &vno);
for (uintptr_t gpa = 0; gpa < TEST_MEM_SZ; gpa += PAGESZ) {
const bool is_even_page = (BMAP_BIT(gpa) % 2) == 0;
const bool is_dirty =
(bits[BMAP_IDX(gpa)] & (1 << BMAP_BIT(gpa))) != 0;
if (is_even_page && gpa < (TEST_MEM_SZ / 2) && !is_dirty) {
errx(EXIT_FAILURE,
"missing dirty bit set at gpa %08lx", gpa);
}
if (is_dirty && (!is_even_page || gpa >= (TEST_MEM_SZ / 2))) {
errx(EXIT_FAILURE,
"unexpected dirty bit set at gpa %08lx", gpa);
}
}
uint_t remaining_dirty = legacy_clear_dirty(ctx);
if (remaining_dirty != (TEST_PAGE_COUNT / 4)) {
errx(EXIT_FAILURE,
"expected %u pages remaining dirty, found %u",
TEST_PAGE_COUNT / 2, remaining_dirty);
}
}
int
main(int argc, char *argv[])
{
const char *suite_name = basename(argv[0]);
struct vmctx *ctx;
ctx = check_vmm_capability(suite_name);
if (vm_setup_memory(ctx, TEST_MEM_SZ, VM_MMAP_ALL) != 0) {
err(EXIT_FAILURE, "could not setup VM memory");
}
test_legacy(ctx);
test_toggle_tracking(ctx);
test_inval_args(ctx);
test_op_get_dirty(ctx);
test_op_set_dirty(ctx);
test_op_reset_dirty(ctx);
vm_destroy(ctx);
(void) printf("%s\tPASS\n", suite_name);
return (EXIT_SUCCESS);
}