#include <cpuid.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/auxv.h>
#include "defines.h"
#include "kselftest_harness.h"
#include "main.h"
static const uint64_t MAGIC = 0x1122334455667788ULL;
static const uint64_t MAGIC2 = 0x8877665544332211ULL;
vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
enum sgx_secinfo_page_state {
SGX_SECINFO_PENDING = (1 << 3),
SGX_SECINFO_MODIFIED = (1 << 4),
SGX_SECINFO_PR = (1 << 5),
};
struct vdso_symtab {
Elf64_Sym *elf_symtab;
const char *elf_symstrtab;
Elf64_Word *elf_hashtab;
};
static Elf64_Dyn *vdso_get_dyntab(void *addr)
{
Elf64_Ehdr *ehdr = addr;
Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
int i;
for (i = 0; i < ehdr->e_phnum; i++)
if (phdrtab[i].p_type == PT_DYNAMIC)
return addr + phdrtab[i].p_offset;
return NULL;
}
static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
{
int i;
for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
if (dyntab[i].d_tag == tag)
return addr + dyntab[i].d_un.d_ptr;
return NULL;
}
static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
{
Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
if (!symtab->elf_symtab)
return false;
symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
if (!symtab->elf_symstrtab)
return false;
symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
if (!symtab->elf_hashtab)
return false;
return true;
}
static inline int sgx2_supported(void)
{
unsigned int eax, ebx, ecx, edx;
__cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
return eax & 0x2;
}
static unsigned long elf_sym_hash(const char *name)
{
unsigned long h = 0, high;
while (*name) {
h = (h << 4) + *name++;
high = h & 0xf0000000;
if (high)
h ^= high >> 24;
h &= ~high;
}
return h;
}
static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
{
Elf64_Word bucketnum = symtab->elf_hashtab[0];
Elf64_Word *buckettab = &symtab->elf_hashtab[2];
Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
Elf64_Sym *sym;
Elf64_Word i;
for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
i = chaintab[i]) {
sym = &symtab->elf_symtab[i];
if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
return sym;
}
return NULL;
}
static off_t encl_get_tcs_offset(struct encl *encl)
{
int i;
for (i = 0; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE))
return seg->offset;
}
return -1;
}
static off_t encl_get_data_offset(struct encl *encl)
{
int i;
for (i = 1; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
if (seg->prot == (PROT_READ | PROT_WRITE))
return seg->offset;
}
return -1;
}
FIXTURE(enclave) {
struct encl encl;
struct sgx_enclave_run run;
};
static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
struct __test_metadata *_metadata)
{
Elf64_Sym *sgx_enter_enclave_sym = NULL;
struct vdso_symtab symtab;
struct encl_segment *seg;
char maps_line[256];
FILE *maps_file;
unsigned int i;
void *addr;
if (!encl_load("test_encl.elf", encl, heap_size)) {
encl_delete(encl);
TH_LOG("Failed to load the test enclave.");
return false;
}
if (!encl_measure(encl))
goto err;
if (!encl_build(encl))
goto err;
for (i = 0; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
EXPECT_NE(addr, MAP_FAILED);
if (addr == MAP_FAILED)
goto err;
}
addr = (void *)getauxval(AT_SYSINFO_EHDR);
if (!addr)
goto err;
if (!vdso_get_symtab(addr, &symtab))
goto err;
sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
if (!sgx_enter_enclave_sym)
goto err;
vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
return true;
err:
for (i = 0; i < encl->nr_segments; i++) {
seg = &encl->segment_tbl[i];
TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
}
maps_file = fopen("/proc/self/maps", "r");
if (maps_file != NULL) {
while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
maps_line[strlen(maps_line) - 1] = '\0';
if (strstr(maps_line, "/dev/sgx_enclave"))
TH_LOG("%s", maps_line);
}
fclose(maps_file);
}
TH_LOG("Failed to initialize the test enclave.");
encl_delete(encl);
return false;
}
FIXTURE_SETUP(enclave)
{
}
FIXTURE_TEARDOWN(enclave)
{
encl_delete(&self->encl);
}
#define ENCL_CALL(op, run, clobbered) \
({ \
int ret; \
if ((clobbered)) \
ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
EENTER, 0, 0, (run)); \
else \
ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
(run)); \
ret; \
})
#define EXPECT_EEXIT(run) \
do { \
EXPECT_EQ((run)->function, EEXIT); \
if ((run)->function != EEXIT) \
TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
(run)->exception_error_code, (run)->exception_addr); \
} while (0)
TEST_F(enclave, unclobbered_vdso)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
static unsigned long sgx_calc_section_metric(unsigned int low,
unsigned int high)
{
return (low & GENMASK_ULL(31, 12)) +
((high & GENMASK_ULL(19, 0)) << 32);
}
static unsigned long get_total_epc_mem(void)
{
unsigned int eax, ebx, ecx, edx;
unsigned long total_size = 0;
unsigned int type;
int section = 0;
while (true) {
__cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
type = eax & SGX_CPUID_EPC_MASK;
if (type == SGX_CPUID_EPC_INVALID)
break;
if (type != SGX_CPUID_EPC_SECTION)
break;
total_size += sgx_calc_section_metric(ecx, edx);
section++;
}
return total_size;
}
TEST_F(enclave, unclobbered_vdso_oversubscribed)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
unsigned long total_mem;
total_mem = get_total_epc_mem();
ASSERT_NE(total_mem, 0);
ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900)
{
struct sgx_enclave_remove_pages remove_ioc;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_get_from_buf get_op;
struct encl_op_eaccept eaccept_op;
struct encl_op_put_to_buf put_op;
struct encl_segment *heap;
unsigned long total_mem;
int ret, errno_save;
unsigned long addr;
unsigned long i;
total_mem = get_total_epc_mem();
ASSERT_NE(total_mem, 0);
TH_LOG("Creating an enclave with %lu bytes heap may take a while ...",
total_mem);
ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
EXPECT_EQ(ret, -1);
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
heap = &self->encl.segment_tbl[self->encl.nr_segments - 1];
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = heap->offset;
modt_ioc.length = heap->size;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
TH_LOG("Changing type of %zd bytes to trimmed may take a while ...",
heap->size);
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, heap->size);
addr = self->encl.encl_base + heap->offset;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.header.type = ENCL_OP_EACCEPT;
TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
heap->size);
for (i = 0; i < heap->size; i += 4096) {
eaccept_op.epc_addr = addr + i;
eaccept_op.ret = 0;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
ASSERT_EQ(eaccept_op.ret, 0);
ASSERT_EQ(self->run.function, EEXIT);
}
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = heap->offset;
remove_ioc.length = heap->size;
TH_LOG("Removing %zd bytes from enclave may take a while ...",
heap->size);
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(remove_ioc.count, heap->size);
}
TEST_F(enclave, clobbered_vdso)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
struct sgx_enclave_run *run)
{
run->user_data = 0;
return 0;
}
TEST_F(enclave, clobbered_vdso_and_user_function)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
self->run.user_handler = (__u64)test_handler;
self->run.user_data = 0xdeadbeef;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
TEST_F(enclave, tcs_entry)
{
struct encl_op_header op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
op.type = ENCL_OP_NOP;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
TEST_F(enclave, pte_permissions)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
unsigned long data_start;
int ret;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) +
PAGE_SIZE;
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
if (ret)
perror("mprotect");
put_addr_op.value = MAGIC2;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x7);
EXPECT_EQ(self->run.exception_addr, data_start);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
if (ret)
perror("mprotect");
EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
0, ERESUME, 0, 0, &self->run),
0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC2);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
TEST_F(enclave, tcs_permissions)
{
struct sgx_enclave_restrict_permissions ioc;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
memset(&ioc, 0, sizeof(ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
errno_save = ret == -1 ? errno : 0;
ASSERT_EQ(ret, -1);
if (errno_save == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
else if (errno_save == ENODEV)
SKIP(return, "System does not support SGX2");
ioc.offset = encl_get_tcs_offset(&self->encl);
ioc.length = PAGE_SIZE;
ioc.permissions = SGX_SECINFO_R;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno_save, EINVAL);
EXPECT_EQ(ioc.result, 0);
EXPECT_EQ(ioc.count, 0);
}
TEST_F(enclave, epcm_permissions)
{
struct sgx_enclave_restrict_permissions restrict_ioc;
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_eaccept eaccept_op;
struct encl_op_emodpe emodpe_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
memset(&restrict_ioc, 0, sizeof(restrict_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
&restrict_ioc);
errno_save = ret == -1 ? errno : 0;
ASSERT_EQ(ret, -1);
if (errno_save == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
else if (errno_save == ENODEV)
SKIP(return, "System does not support SGX2");
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
memset(&restrict_ioc, 0, sizeof(restrict_ioc));
restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
restrict_ioc.length = PAGE_SIZE;
restrict_ioc.permissions = SGX_SECINFO_R;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
&restrict_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(restrict_ioc.result, 0);
EXPECT_EQ(restrict_ioc.count, 4096);
eaccept_op.epc_addr = data_start;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
put_addr_op.value = MAGIC2;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x8007);
EXPECT_EQ(self->run.exception_addr, data_start);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
emodpe_op.epc_addr = data_start;
emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W;
emodpe_op.header.type = ENCL_OP_EMODPE;
EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
self->run.tcs = self->encl.encl_base;
self->run.tcs = self->encl.encl_base;
EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
ERESUME, 0, 0,
&self->run),
0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC2);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
TEST_F(enclave, augment)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_eaccept eaccept_op;
size_t total_size = 0;
void *addr;
int i;
if (!sgx2_supported())
SKIP(return, "SGX2 not supported");
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
for (i = 0; i < self->encl.nr_segments; i++) {
struct encl_segment *seg = &self->encl.segment_tbl[i];
total_size += seg->size;
}
EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
EXPECT_NE(addr, MAP_FAILED);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
put_addr_op.value = MAGIC;
put_addr_op.addr = (unsigned long)addr;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
if (self->run.exception_error_code == 0x6) {
munmap(addr, PAGE_SIZE);
SKIP(return, "Kernel does not support adding pages to initialized enclave");
}
EXPECT_EQ(self->run.exception_error_code, 0x8007);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
eaccept_op.epc_addr = self->encl.encl_base + total_size;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
self->run.tcs = self->encl.encl_base;
EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
ERESUME, 0, 0,
&self->run),
0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = (unsigned long)addr;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
munmap(addr, PAGE_SIZE);
}
TEST_F(enclave, augment_via_eaccept)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_eaccept eaccept_op;
size_t total_size = 0;
void *addr;
int i;
if (!sgx2_supported())
SKIP(return, "SGX2 not supported");
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
for (i = 0; i < self->encl.nr_segments; i++) {
struct encl_segment *seg = &self->encl.segment_tbl[i];
total_size += seg->size;
}
EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
self->encl.fd, 0);
EXPECT_NE(addr, MAP_FAILED);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
eaccept_op.epc_addr = self->encl.encl_base + total_size;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
if (self->run.exception_vector == 14 &&
self->run.exception_error_code == 4 &&
self->run.exception_addr == self->encl.encl_base + total_size) {
munmap(addr, PAGE_SIZE);
SKIP(return, "Kernel does not support adding pages to initialized enclave");
}
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
put_addr_op.value = MAGIC;
put_addr_op.addr = (unsigned long)addr;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = (unsigned long)addr;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
munmap(addr, PAGE_SIZE);
}
TEST_F(enclave, tcs_create)
{
struct encl_op_init_tcs_page init_tcs_page_op;
struct sgx_enclave_remove_pages remove_ioc;
struct encl_op_get_from_addr get_addr_op;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_put_to_addr put_addr_op;
struct encl_op_get_from_buf get_buf_op;
struct encl_op_put_to_buf put_buf_op;
void *addr, *tcs, *stack_end, *ssa;
struct encl_op_eaccept eaccept_op;
size_t total_size = 0;
uint64_t val_64;
int errno_save;
int ret, i;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl,
_metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
EXPECT_EQ(ret, -1);
for (i = 0; i < self->encl.nr_segments; i++) {
struct encl_segment *seg = &self->encl.segment_tbl[i];
total_size += seg->size;
}
EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
self->encl.fd, 0);
EXPECT_NE(addr, MAP_FAILED);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
stack_end = (void *)self->encl.encl_base + total_size;
tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
eaccept_op.epc_addr = (unsigned long)stack_end;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
if (self->run.exception_vector == 14 &&
self->run.exception_error_code == 4 &&
self->run.exception_addr == (unsigned long)stack_end) {
munmap(addr, 3 * PAGE_SIZE);
SKIP(return, "Kernel does not support adding pages to initialized enclave");
}
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)ssa;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)tcs;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
val_64 = encl_get_entry(&self->encl, "encl_dyn_entry");
EXPECT_NE(val_64, 0);
init_tcs_page_op.tcs_page = (unsigned long)tcs;
init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
init_tcs_page_op.entry = val_64;
init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE;
EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = total_size + PAGE_SIZE;
modt_ioc.length = PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TCS;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 4096);
eaccept_op.epc_addr = (unsigned long)tcs;
eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
self->run.tcs = (unsigned long)tcs;
put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_buf_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_buf_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0);
EXPECT_EQ(get_buf_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = total_size;
modt_ioc.length = 3 * PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
self->run.tcs = self->encl.encl_base;
eaccept_op.epc_addr = (unsigned long)stack_end;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)tcs;
eaccept_op.ret = 0;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
eaccept_op.epc_addr = (unsigned long)ssa;
eaccept_op.ret = 0;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = total_size;
remove_ioc.length = 3 * PAGE_SIZE;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
eaccept_op.epc_addr = (unsigned long)tcs;
eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
put_addr_op.value = MAGIC;
put_addr_op.addr = (unsigned long)tcs;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = (unsigned long)tcs;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
munmap(addr, 3 * PAGE_SIZE);
}
TEST_F(enclave, remove_added_page_no_eaccept)
{
struct sgx_enclave_remove_pages remove_ioc;
struct encl_op_get_from_addr get_addr_op;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_put_to_addr put_addr_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
EXPECT_EQ(ret, -1);
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
modt_ioc.length = PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 4096);
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
remove_ioc.length = PAGE_SIZE;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno_save, EPERM);
EXPECT_EQ(remove_ioc.count, 0);
}
TEST_F(enclave, remove_added_page_invalid_access)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct sgx_enclave_modify_types ioc;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
memset(&ioc, 0, sizeof(ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
EXPECT_EQ(ret, -1);
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
memset(&ioc, 0, sizeof(ioc));
ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
ioc.length = PAGE_SIZE;
ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(ioc.result, 0);
EXPECT_EQ(ioc.count, 4096);
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x8005);
EXPECT_EQ(self->run.exception_addr, data_start);
}
TEST_F(enclave, remove_added_page_invalid_access_after_eaccept)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
struct sgx_enclave_modify_types ioc;
struct encl_op_eaccept eaccept_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
memset(&ioc, 0, sizeof(ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
EXPECT_EQ(ret, -1);
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
memset(&ioc, 0, sizeof(ioc));
ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
ioc.length = PAGE_SIZE;
ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(ioc.result, 0);
EXPECT_EQ(ioc.count, 4096);
eaccept_op.epc_addr = (unsigned long)data_start;
eaccept_op.ret = 0;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.function, ERESUME);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x8005);
EXPECT_EQ(self->run.exception_addr, data_start);
}
TEST_F(enclave, remove_untouched_page)
{
struct sgx_enclave_remove_pages remove_ioc;
struct sgx_enclave_modify_types modt_ioc;
struct encl_op_eaccept eaccept_op;
unsigned long data_start;
int ret, errno_save;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&modt_ioc, 0, sizeof(modt_ioc));
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
if (ret == -1) {
if (errno == ENOTTY)
SKIP(return,
"Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
else if (errno == ENODEV)
SKIP(return, "System does not support SGX2");
}
EXPECT_EQ(ret, -1);
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) + PAGE_SIZE;
memset(&modt_ioc, 0, sizeof(modt_ioc));
modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
modt_ioc.length = PAGE_SIZE;
modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(modt_ioc.result, 0);
EXPECT_EQ(modt_ioc.count, 4096);
eaccept_op.epc_addr = data_start;
eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
eaccept_op.ret = 0;
eaccept_op.header.type = ENCL_OP_EACCEPT;
EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
EXPECT_EQ(eaccept_op.ret, 0);
memset(&remove_ioc, 0, sizeof(remove_ioc));
remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
remove_ioc.length = PAGE_SIZE;
ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
errno_save = ret == -1 ? errno : 0;
EXPECT_EQ(ret, 0);
EXPECT_EQ(errno_save, 0);
EXPECT_EQ(remove_ioc.count, 4096);
}
TEST_HARNESS_MAIN