#include <fcntl.h>
#include <dirent.h>
#define __EXPORTED_HEADERS__
#include <linux/vfio.h>
#include "iommufd_utils.h"
static bool have_fault_injection;
static int writeat(int dfd, const char *fn, const char *val)
{
size_t val_len = strlen(val);
ssize_t res;
int fd;
fd = openat(dfd, fn, O_WRONLY);
if (fd == -1)
return -1;
res = write(fd, val, val_len);
assert(res == val_len);
close(fd);
return 0;
}
static __attribute__((constructor)) void setup_buffer(void)
{
PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
BUFFER_SIZE = 2*1024*1024;
buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
&mfd);
}
static __attribute__((constructor)) void setup_fault_injection(void)
{
DIR *debugfs = opendir("/sys/kernel/debug/");
struct dirent *dent;
if (!debugfs)
return;
if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
return;
writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
while ((dent = readdir(debugfs))) {
char fn[300];
if (strncmp(dent->d_name, "fail", 4) != 0)
continue;
snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
writeat(dirfd(debugfs), fn, "0");
}
closedir(debugfs);
have_fault_injection = true;
}
struct fail_nth_state {
int proc_fd;
unsigned int iteration;
};
static void fail_nth_first(struct __test_metadata *_metadata,
struct fail_nth_state *nth_state)
{
char buf[300];
snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
nth_state->proc_fd = open(buf, O_RDWR);
ASSERT_NE(-1, nth_state->proc_fd);
}
static bool fail_nth_next(struct __test_metadata *_metadata,
struct fail_nth_state *nth_state,
int test_result)
{
static const char disable_nth[] = "0";
char buf[300];
ASSERT_GT(1000, nth_state->iteration);
if (nth_state->iteration != 0) {
ssize_t res;
ssize_t res2;
buf[0] = 0;
res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
if (res == -1 && errno == EFAULT) {
buf[0] = '1';
buf[1] = '\n';
res = 2;
}
res2 = pwrite(nth_state->proc_fd, disable_nth,
ARRAY_SIZE(disable_nth) - 1, 0);
if (res2 == -1 && errno == EFAULT) {
res2 = pwrite(nth_state->proc_fd, disable_nth,
ARRAY_SIZE(disable_nth) - 1, 0);
buf[0] = '1';
buf[1] = '\n';
}
ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
fflush(stdout);
ASSERT_LT(1, res);
if (res != 2 || buf[0] != '0' || buf[1] != '\n')
return false;
} else {
}
nth_state->iteration++;
return true;
}
void __fail_nth_enable(struct __test_metadata *_metadata,
struct fail_nth_state *nth_state)
{
char buf[300];
size_t len;
if (!nth_state->iteration)
return;
len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
}
#define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
#define TEST_FAIL_NTH(fixture_name, name) \
static int test_nth_##name(struct __test_metadata *_metadata, \
FIXTURE_DATA(fixture_name) *self, \
const FIXTURE_VARIANT(fixture_name) \
*variant, \
struct fail_nth_state *_nth_state); \
TEST_F(fixture_name, name) \
{ \
struct fail_nth_state nth_state = {}; \
int test_result = 0; \
\
if (!have_fault_injection) \
SKIP(return, \
"fault injection is not enabled in the kernel"); \
fail_nth_first(_metadata, &nth_state); \
ASSERT_EQ(0, test_nth_##name(_metadata, self, variant, \
&nth_state)); \
while (fail_nth_next(_metadata, &nth_state, test_result)) { \
fixture_name##_teardown(_metadata, self, variant); \
fixture_name##_setup(_metadata, self, variant); \
test_result = test_nth_##name(_metadata, self, \
variant, &nth_state); \
}; \
ASSERT_EQ(0, test_result); \
} \
static int test_nth_##name( \
struct __test_metadata __attribute__((unused)) *_metadata, \
FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
const FIXTURE_VARIANT(fixture_name) __attribute__((unused)) \
*variant, \
struct fail_nth_state *_nth_state)
FIXTURE(basic_fail_nth)
{
int fd;
uint32_t access_id;
uint32_t stdev_id;
uint32_t pasid;
};
FIXTURE_SETUP(basic_fail_nth)
{
self->fd = -1;
self->access_id = 0;
self->stdev_id = 0;
self->pasid = 0;
}
FIXTURE_TEARDOWN(basic_fail_nth)
{
int rc;
if (self->access_id) {
rc = _test_cmd_destroy_access(self->access_id);
assert(rc == 0);
}
if (self->pasid && self->stdev_id)
_test_cmd_pasid_detach(self->fd, self->stdev_id, self->pasid);
teardown_iommufd(self->fd, _metadata);
}
TEST_FAIL_NTH(basic_fail_nth, basic)
{
struct iommu_iova_range ranges[10];
uint32_t ioas_id;
__u64 iova;
fail_nth_enable();
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
{
struct iommu_ioas_iova_ranges ranges_cmd = {
.size = sizeof(ranges_cmd),
.num_iovas = ARRAY_SIZE(ranges),
.ioas_id = ioas_id,
.allowed_iovas = (uintptr_t)ranges,
};
if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
return -1;
}
{
struct iommu_ioas_allow_iovas allow_cmd = {
.size = sizeof(allow_cmd),
.ioas_id = ioas_id,
.num_iovas = 1,
.allowed_iovas = (uintptr_t)ranges,
};
ranges[0].start = 16*1024;
ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
return -1;
}
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
{
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
.flags = IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE,
.dst_ioas_id = ioas_id,
.src_ioas_id = ioas_id,
.src_iova = iova,
.length = sizeof(ranges),
};
if (ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd))
return -1;
}
if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
NULL))
return -1;
_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, map_domain)
{
uint32_t ioas_id;
__u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, map_file_domain)
{
uint32_t ioas_id;
__u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
if (_test_ioctl_ioas_map_file(self->fd, ioas_id, mfd, 0, 262144, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
{
uint32_t ioas_id;
__u32 stdev_id2;
__u32 stdev_id;
__u32 hwpt_id2;
__u32 hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
NULL))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
if (_test_ioctl_destroy(self->fd, stdev_id2))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
NULL))
return -1;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, access_rw)
{
uint64_t tmp_big[4096];
uint32_t ioas_id;
uint16_t tmp[32];
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
fail_nth_enable();
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
return -1;
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_RW,
.id = self->access_id,
.access_rw = { .iova = iova,
.length = sizeof(tmp),
.uptr = (uintptr_t)tmp },
};
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
MOCK_ACCESS_RW_WRITE;
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
}
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_RW,
.id = self->access_id,
.access_rw = { .iova = iova,
.flags = MOCK_ACCESS_RW_SLOW_PATH,
.length = sizeof(tmp_big),
.uptr = (uintptr_t)tmp_big },
};
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
}
if (_test_cmd_destroy_access(self->access_id))
return -1;
self->access_id = 0;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, access_pin)
{
uint32_t access_pages_id;
uint32_t ioas_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
return -1;
fail_nth_enable();
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.id = self->access_id,
.access_pages = { .iova = iova,
.length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_pages_id = access_cmd.access_pages.out_access_pages_id;
}
if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
access_pages_id))
return -1;
if (_test_cmd_destroy_access(self->access_id))
return -1;
self->access_id = 0;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
{
uint32_t access_pages_id;
uint32_t ioas_id;
__u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
return -1;
fail_nth_enable();
{
struct iommu_test_cmd access_cmd = {
.size = sizeof(access_cmd),
.op = IOMMU_TEST_OP_ACCESS_PAGES,
.id = self->access_id,
.access_pages = { .iova = iova,
.length = BUFFER_SIZE,
.uptr = (uintptr_t)buffer },
};
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
&access_cmd))
return -1;
access_pages_id = access_cmd.access_pages.out_access_pages_id;
}
if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
access_pages_id))
return -1;
if (_test_cmd_destroy_access(self->access_id))
return -1;
self->access_id = 0;
if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
return 0;
}
TEST_FAIL_NTH(basic_fail_nth, device)
{
struct iommu_hwpt_selftest data = {
.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
};
struct iommu_test_hw_info info;
uint32_t fault_id, fault_fd;
uint32_t veventq_id, veventq_fd;
uint32_t fault_hwpt_id;
uint32_t test_hwpt_id;
uint32_t ioas_id;
uint32_t ioas_id2;
uint32_t idev_id;
uint32_t hwpt_id;
uint32_t viommu_id;
uint32_t hw_queue_id;
uint32_t vdev_id;
__u64 iova;
self->fd = open("/dev/iommu", O_RDWR);
if (self->fd == -1)
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
return -1;
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id2))
return -1;
iova = MOCK_APERTURE_START;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
IOMMU_IOAS_MAP_FIXED_IOVA |
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
IOMMU_IOAS_MAP_FIXED_IOVA |
IOMMU_IOAS_MAP_WRITEABLE |
IOMMU_IOAS_MAP_READABLE))
return -1;
fail_nth_enable();
if (_test_cmd_mock_domain_flags(self->fd, ioas_id,
MOCK_FLAGS_DEVICE_PASID,
&self->stdev_id, NULL, &idev_id))
return -1;
if (_test_cmd_get_hw_info(self->fd, idev_id, IOMMU_HW_INFO_TYPE_DEFAULT,
&info, sizeof(info), NULL, NULL))
return -1;
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
IOMMU_HWPT_ALLOC_PASID, &hwpt_id,
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
if (_test_cmd_mock_domain_replace(self->fd, self->stdev_id, ioas_id2, NULL))
return -1;
if (_test_cmd_mock_domain_replace(self->fd, self->stdev_id, hwpt_id, NULL))
return -1;
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
IOMMU_HWPT_ALLOC_NEST_PARENT |
IOMMU_HWPT_ALLOC_PASID,
&hwpt_id,
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
if (_test_cmd_viommu_alloc(self->fd, idev_id, hwpt_id, 0,
IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
&viommu_id))
return -1;
if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
return -1;
if (_test_cmd_hw_queue_alloc(self->fd, viommu_id,
IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, iova,
PAGE_SIZE, &hw_queue_id))
return -1;
if (_test_ioctl_fault_alloc(self->fd, &fault_id, &fault_fd))
return -1;
close(fault_fd);
if (_test_cmd_hwpt_alloc(self->fd, idev_id, hwpt_id, fault_id,
IOMMU_HWPT_FAULT_ID_VALID, &fault_hwpt_id,
IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data)))
return -1;
if (_test_cmd_veventq_alloc(self->fd, viommu_id,
IOMMU_VEVENTQ_TYPE_SELFTEST, &veventq_id,
&veventq_fd))
return -1;
close(veventq_fd);
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
IOMMU_HWPT_ALLOC_PASID,
&test_hwpt_id,
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
self->pasid = 200;
if (_test_cmd_pasid_attach(self->fd, self->stdev_id,
self->pasid, hwpt_id)) {
self->pasid = 0;
return -1;
}
if (_test_cmd_pasid_replace(self->fd, self->stdev_id,
self->pasid, test_hwpt_id))
return -1;
if (_test_cmd_pasid_detach(self->fd, self->stdev_id, self->pasid))
return -1;
self->pasid = 0;
return 0;
}
TEST_HARNESS_MAIN