#define _GNU_SOURCE
#include "kselftest_harness.h"
#include <asm-generic/mman.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/limits.h>
#include <linux/userfaultfd.h>
#include <linux/fs.h>
#include <setjmp.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/uio.h>
#include <unistd.h>
#include "vm_util.h"
#include "../pidfd/pidfd.h"
static volatile sig_atomic_t signal_jump_set;
static sigjmp_buf signal_jmp_buf;
enum backing_type {
ANON_BACKED,
SHMEM_BACKED,
LOCAL_FILE_BACKED,
};
FIXTURE(guard_regions)
{
unsigned long page_size;
char path[PATH_MAX];
int fd;
};
FIXTURE_VARIANT(guard_regions)
{
enum backing_type backing;
};
FIXTURE_VARIANT_ADD(guard_regions, anon)
{
.backing = ANON_BACKED,
};
FIXTURE_VARIANT_ADD(guard_regions, shmem)
{
.backing = SHMEM_BACKED,
};
FIXTURE_VARIANT_ADD(guard_regions, file)
{
.backing = LOCAL_FILE_BACKED,
};
static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
{
switch (variant->backing) {
case ANON_BACKED:
case SHMEM_BACKED:
return true;
default:
return false;
}
}
static void *mmap_(FIXTURE_DATA(guard_regions) * self,
const FIXTURE_VARIANT(guard_regions) * variant,
void *addr, size_t length, int prot, int extra_flags,
off_t offset)
{
int fd;
int flags = extra_flags;
switch (variant->backing) {
case ANON_BACKED:
flags |= MAP_PRIVATE | MAP_ANON;
fd = -1;
offset = 0;
break;
case SHMEM_BACKED:
case LOCAL_FILE_BACKED:
flags |= MAP_SHARED;
fd = self->fd;
break;
default:
ksft_exit_fail();
break;
}
return mmap(addr, length, prot, flags, fd, offset);
}
static int userfaultfd(int flags)
{
return syscall(SYS_userfaultfd, flags);
}
static void handle_fatal(int c)
{
if (!signal_jump_set)
return;
siglongjmp(signal_jmp_buf, c);
}
static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
size_t n, int advice, unsigned int flags)
{
return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
}
static bool try_access_buf(char *ptr, bool write)
{
bool failed;
signal_jump_set = true;
failed = sigsetjmp(signal_jmp_buf, 0) != 0;
if (!failed) {
if (write)
*ptr = 'x';
else
FORCE_READ(*ptr);
}
signal_jump_set = false;
return !failed;
}
static bool try_read_buf(char *ptr)
{
return try_access_buf(ptr, false);
}
static bool try_write_buf(char *ptr)
{
return try_access_buf(ptr, true);
}
static bool try_read_write_buf(char *ptr)
{
return try_read_buf(ptr) && try_write_buf(ptr);
}
static void setup_sighandler(void)
{
struct sigaction act = {
.sa_handler = &handle_fatal,
.sa_flags = SA_NODEFER,
};
sigemptyset(&act.sa_mask);
if (sigaction(SIGSEGV, &act, NULL))
ksft_exit_fail_perror("sigaction");
}
static void teardown_sighandler(void)
{
struct sigaction act = {
.sa_handler = SIG_DFL,
.sa_flags = SA_NODEFER,
};
sigemptyset(&act.sa_mask);
sigaction(SIGSEGV, &act, NULL);
}
static int open_file(const char *prefix, char *path)
{
int fd;
snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
fd = mkstemp(path);
if (fd < 0)
ksft_exit_fail_perror("mkstemp");
return fd;
}
static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
{
size_t i;
for (i = 0; i < num_pages; i++) {
char *ptr2 = &ptr[i * page_size];
memset(ptr2, 'a' + (i % 26), page_size);
}
}
static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
size_t pgoff)
{
size_t i;
for (i = 0; i < num_pages * page_size; i++) {
size_t offset = pgoff * page_size + i;
char actual = ptr[offset];
char expected = 'a' + ((offset / page_size) % 26);
if (actual != expected)
return false;
}
return true;
}
static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
{
return check_pattern_offset(ptr, num_pages, page_size, 0);
}
static bool is_buf_eq(char *buf, size_t size, char chr)
{
size_t i;
for (i = 0; i < size; i++) {
if (buf[i] != chr)
return false;
}
return true;
}
static bool local_fs_has_sane_mmap(FIXTURE_DATA(guard_regions) * self,
const FIXTURE_VARIANT(guard_regions) * variant)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr2;
struct procmap_fd procmap;
if (variant->backing != LOCAL_FILE_BACKED)
return true;
ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
if (ptr == MAP_FAILED)
return false;
munmap(&ptr[5 * page_size], page_size);
ptr2 = mmap_(self, variant, &ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE,
MAP_FIXED, 5 * page_size);
if (ptr2 == MAP_FAILED)
return false;
if (open_self_procmap(&procmap) != 0)
return false;
if (!find_vma_procmap(&procmap, ptr))
return false;
if (procmap.query.vma_start != (unsigned long)ptr)
return false;
if (procmap.query.vma_end != (unsigned long)ptr + 10 * page_size)
return false;
close_procmap(&procmap);
return true;
}
FIXTURE_SETUP(guard_regions)
{
self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
setup_sighandler();
switch (variant->backing) {
case ANON_BACKED:
return;
case LOCAL_FILE_BACKED:
self->fd = open_file("", self->path);
break;
case SHMEM_BACKED:
self->fd = memfd_create(self->path, 0);
break;
}
ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
};
FIXTURE_TEARDOWN_PARENT(guard_regions)
{
teardown_sighandler();
if (variant->backing == ANON_BACKED)
return;
if (self->fd >= 0)
close(self->fd);
if (self->path[0] != '\0')
unlink(self->path);
}
TEST_F(guard_regions, basic)
{
const unsigned long NUM_PAGES = 10;
const unsigned long page_size = self->page_size;
char *ptr;
int i;
ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_TRUE(try_read_write_buf(ptr));
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_write_buf(ptr));
for (i = 1; i < NUM_PAGES; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
ASSERT_TRUE(try_read_write_buf(ptr));
ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
MADV_GUARD_REMOVE));
ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 3; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 3; i < NUM_PAGES; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < NUM_PAGES; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
char chr = ptr[i];
ASSERT_EQ(chr, 'x');
}
ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
}
TEST_F(guard_regions, multi_vma)
{
const unsigned long page_size = self->page_size;
char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
int i;
ptr_region = mmap_(self, variant, NULL, 100 * page_size,
PROT_NONE, 0, 0);
ASSERT_NE(ptr_region, MAP_FAILED);
ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr1, MAP_FAILED);
ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr2, MAP_FAILED);
ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr3, MAP_FAILED);
ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
ASSERT_EQ(errno, ENOMEM);
for (i = 0; i < 10; i++) {
char *curr = &ptr1[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 0; i < 5; i++) {
char *curr = &ptr2[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 0; i < 20; i++) {
char *curr = &ptr3[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
ASSERT_EQ(errno, ENOMEM);
for (i = 0; i < 10; i++) {
char *curr = &ptr1[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
for (i = 0; i < 5; i++) {
char *curr = &ptr2[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
for (i = 0; i < 20; i++) {
char *curr = &ptr3[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
ASSERT_NE(ptr, MAP_FAILED);
ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 100; i++) {
char *curr = &ptr_region[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 100; i++) {
char *curr = &ptr_region[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
}
TEST_F(guard_regions, process_madvise)
{
const unsigned long page_size = self->page_size;
char *ptr_region, *ptr1, *ptr2, *ptr3;
ssize_t count;
struct iovec vec[6];
ptr_region = mmap_(self, variant, NULL, 100 * page_size,
PROT_NONE, 0, 0);
ASSERT_NE(ptr_region, MAP_FAILED);
ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
ASSERT_NE(ptr1, MAP_FAILED);
vec[0].iov_base = ptr1;
vec[0].iov_len = page_size;
vec[1].iov_base = &ptr1[9 * page_size];
vec[1].iov_len = page_size;
ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr2, MAP_FAILED);
vec[2].iov_base = ptr2;
vec[2].iov_len = page_size;
vec[3].iov_base = &ptr2[4 * page_size];
vec[3].iov_len = page_size;
ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr3, MAP_FAILED);
vec[4].iov_base = ptr3;
vec[4].iov_len = page_size;
vec[5].iov_base = &ptr3[19 * page_size];
vec[5].iov_len = page_size;
ASSERT_EQ(munmap(ptr_region, page_size), 0);
ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
if (count == -1 && errno == EPERM)
SKIP(return, "No process_madvise() permissions, try running as root.\n");
ASSERT_EQ(count, 6 * page_size);
ASSERT_FALSE(try_read_write_buf(ptr1));
ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
ASSERT_FALSE(try_read_write_buf(ptr2));
ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
ASSERT_FALSE(try_read_write_buf(ptr3));
ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
ASSERT_TRUE(try_read_write_buf(ptr1));
ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
ASSERT_TRUE(try_read_write_buf(ptr2));
ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
ASSERT_TRUE(try_read_write_buf(ptr3));
ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
}
TEST_F(guard_regions, munmap)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new1, *ptr_new2;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
ASSERT_EQ(munmap(ptr, page_size), 0);
ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
MAP_FIXED, 0);
ASSERT_NE(ptr_new1, MAP_FAILED);
ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new2, MAP_FAILED);
ASSERT_TRUE(try_read_write_buf(ptr_new1));
ASSERT_TRUE(try_read_write_buf(ptr_new2));
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, mprotect)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_buf(curr));
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, split_merge)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
int i;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
for (i = 0; i < 2; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 2; i < 5; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 6; i < 8; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 9; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
bool result = try_read_write_buf(curr);
bool expect_true = i == 2 || i == 5 || i == 8;
ASSERT_TRUE(expect_true ? result : !result);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_buf(curr));
}
ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
PROT_READ | PROT_WRITE), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_buf(curr));
}
ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
PROT_READ | PROT_WRITE), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, dontneed)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
*curr = 'y';
}
for (i = 0; i < 10; i += 2) {
char *curr = &ptr[i * page_size];
int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
ASSERT_EQ(res, 0);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
bool result = try_read_buf(curr);
if (i % 2 == 0) {
ASSERT_FALSE(result);
} else {
ASSERT_TRUE(result);
switch (variant->backing) {
case ANON_BACKED:
ASSERT_EQ(*curr, '\0');
break;
default:
ASSERT_EQ(*curr, 'y');
break;
}
}
result = try_write_buf(&ptr[i * page_size]);
ASSERT_TRUE(i % 2 != 0 ? result : !result);
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, mlock)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
*curr = 'y';
}
ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
ASSERT_EQ(errno, EINVAL);
ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
bool result = try_read_write_buf(curr);
if (i < 5) {
ASSERT_FALSE(result);
} else {
ASSERT_TRUE(result);
ASSERT_EQ(*curr, 'x');
}
}
ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, mremap_move)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
ptr = mmap_(self, variant, NULL, 5 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
ASSERT_FALSE(try_read_write_buf(ptr_new));
ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
munmap(ptr_new, 5 * page_size);
}
TEST_F(guard_regions, mremap_expand)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ptr = mremap(ptr, 10 * page_size, 20 * page_size,
MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
ASSERT_EQ(ptr, ptr_new);
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
munmap(ptr, 20 * page_size);
}
TEST_F(guard_regions, mremap_shrink)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
ptr = mmap_(self, variant, NULL, 5 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_FALSE(try_read_write_buf(ptr));
for (i = 1; i < 3; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_FALSE(try_read_write_buf(ptr));
for (i = 1; i < 5; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
munmap(ptr, 5 * page_size);
}
TEST_F(guard_regions, fork)
{
const unsigned long page_size = self->page_size;
char *ptr;
pid_t pid;
int i;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
pid = fork();
ASSERT_NE(pid, -1);
if (!pid) {
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
bool result = try_read_write_buf(curr);
ASSERT_TRUE(i >= 5 ? result : !result);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
exit(0);
}
waitpid(pid, NULL, 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
bool result = try_read_write_buf(curr);
ASSERT_TRUE(i >= 5 ? result : !result);
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, fork_cow)
{
const unsigned long page_size = self->page_size;
char *ptr;
pid_t pid;
int i;
if (variant->backing != ANON_BACKED)
SKIP(return, "CoW only supported on anon mappings");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
for (i = 0; i < 10 * page_size; i++) {
char chr = 'a' + (i % 26);
ptr[i] = chr;
}
pid = fork();
ASSERT_NE(pid, -1);
if (!pid) {
for (i = 0; i < 10 * page_size; i++) {
char expected = 'a' + (i % 26);
char actual = ptr[i];
ASSERT_EQ(actual, expected);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10 * page_size; i++) {
char actual = ptr[i];
ASSERT_EQ(actual, '\0');
}
exit(0);
}
waitpid(pid, NULL, 0);
for (i = 0; i < 10 * page_size; i++) {
char expected = 'a' + (i % 26);
char actual = ptr[i];
ASSERT_EQ(actual, expected);
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, fork_wipeonfork)
{
const unsigned long page_size = self->page_size;
char *ptr;
pid_t pid;
int i;
if (variant->backing != ANON_BACKED)
SKIP(return, "Wipe on fork only supported on anon mappings");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
pid = fork();
ASSERT_NE(pid, -1);
if (!pid) {
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
exit(0);
}
waitpid(pid, NULL, 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
bool result = try_read_write_buf(curr);
ASSERT_TRUE(i >= 5 ? result : !result);
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, lazyfree)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
if (variant->backing != ANON_BACKED)
SKIP(return, "MADV_FREE only supported on anon mappings");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, populate)
{
const unsigned long page_size = self->page_size;
char *ptr;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
ASSERT_EQ(errno, EFAULT);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
ASSERT_EQ(errno, EFAULT);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, cold_pageout)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, uffd)
{
const unsigned long page_size = self->page_size;
int uffd;
char *ptr;
int i;
struct uffdio_api api = {
.api = UFFD_API,
.features = 0,
};
struct uffdio_register reg;
struct uffdio_range range;
if (!is_anon_backed(variant))
SKIP(return, "uffd only works on anon backing");
uffd = userfaultfd(0);
if (uffd == -1) {
switch (errno) {
case EPERM:
SKIP(return, "No userfaultfd permissions, try running as root.");
break;
case ENOSYS:
SKIP(return, "userfaultfd is not supported/not enabled.");
break;
default:
ksft_exit_fail_msg("userfaultfd failed with %s\n",
strerror(errno));
break;
}
}
ASSERT_NE(uffd, -1);
ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
range.start = (unsigned long)ptr;
range.len = 10 * page_size;
reg.range = range;
reg.mode = UFFDIO_REGISTER_MODE_MISSING;
ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, ®), 0);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
close(uffd);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, madvise_sequential)
{
char *ptr;
int i;
const unsigned long page_size = self->page_size;
if (variant->backing == ANON_BACKED)
SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
set_pattern(ptr, 10, page_size);
ASSERT_TRUE(check_pattern(ptr, 10, page_size));
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
for (i = 0; i < 10; i += 2) {
char *ptr2 = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
for (i = 0; i < 10; i++) {
char *chrp = &ptr[i * page_size];
if (i % 2 == 0) {
bool result = try_read_write_buf(chrp);
ASSERT_FALSE(result);
} else {
ASSERT_EQ(*chrp, 'a' + i);
}
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
if (!check_pattern(ptr, 10, page_size))
ASSERT_TRUE(false);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, map_private)
{
const unsigned long page_size = self->page_size;
char *ptr_shared, *ptr_private;
int i;
if (variant->backing == ANON_BACKED)
SKIP(return, "MAP_PRIVATE test specific to file-backed");
ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr_shared, MAP_FAILED);
ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
ASSERT_NE(ptr_private, MAP_FAILED);
set_pattern(ptr_shared, 10, page_size);
for (i = 0; i < 10; i += 2) {
char *ptr = &ptr_shared[i * page_size];
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 0; i < 10; i++) {
ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
}
for (i = 0; i < 10; i += 2) {
char *ptr = &ptr_private[i * page_size];
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 0; i < 10; i++) {
ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
}
ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) {
ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
}
ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) {
ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
}
ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
for (i = 0; i < 10; i += 2) {
char *ptr = &ptr_private[i * page_size];
memset(ptr, 'a' + i, page_size);
}
ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *ptr = &ptr_private[i * page_size];
ASSERT_EQ(try_read_buf(ptr), i >= 5);
ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
}
ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) {
char *ptr = &ptr_private[i * page_size];
if (i < 5 || i % 2 == 0) {
char *ptr_s = &ptr_shared[i * page_size];
ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
continue;
}
ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
}
ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
}
TEST_F(guard_regions, readonly_file)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
if (variant->backing != LOCAL_FILE_BACKED)
SKIP(return, "Read-only test specific to file-backed");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
set_pattern(ptr, 10, page_size);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
ASSERT_EQ(close(self->fd), 0);
self->fd = open(self->path, O_RDONLY);
ASSERT_NE(self->fd, -1);
ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
for (i = 0; i < 10; i += 2) {
char *ptr_pg = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 0; i < 10; i++) {
char *ptr_pg = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
ASSERT_TRUE(check_pattern(ptr, 10, page_size));
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, fault_around)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
if (variant->backing == ANON_BACKED)
SKIP(return, "Fault-around test specific to file-backed");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
set_pattern(ptr, 10, page_size);
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
for (i = 0; i < 10; i += 2) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 1; i < 10; i += 2) {
char *ptr_p = &ptr[i * page_size];
ASSERT_TRUE(try_read_buf(ptr_p));
}
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, truncation)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
if (variant->backing == ANON_BACKED)
SKIP(return, "Truncation test specific to file-backed");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
set_pattern(ptr, 10, page_size);
for (i = 0; i < 10; i += 2) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
}
ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
}
ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, hole_punch)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
if (variant->backing == ANON_BACKED)
SKIP(return, "Truncation test specific to file-backed");
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
set_pattern(ptr, 10, page_size);
ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
MADV_GUARD_INSTALL), 0);
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
}
ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
MADV_REMOVE), 0);
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
ASSERT_TRUE(check_pattern(ptr, 3, page_size));
ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, memfd_write_seal)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
if (variant->backing != SHMEM_BACKED)
SKIP(return, "memfd write seal test specific to shmem");
ASSERT_EQ(close(self->fd), 0);
self->fd = memfd_create("guard_regions_memfd_seals_test",
MFD_ALLOW_SEALING);
ASSERT_NE(self->fd, -1);
ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
set_pattern(ptr, 10, page_size);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_TRUE(check_pattern(ptr, 10, page_size));
for (i = 0; i < 10; i += 2) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
ASSERT_TRUE(check_pattern(ptr, 10, page_size));
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_FALSE(try_write_buf(ptr_p));
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, anon_zeropage)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
if (!is_anon_backed(variant))
SKIP(return, "anon zero page test specific to anon/shmem");
ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
for (i = 0; i < 10; i += 2) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
}
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
ASSERT_TRUE(try_read_buf(ptr_p));
}
ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, pagemap)
{
const unsigned long page_size = self->page_size;
int proc_fd;
char *ptr;
int i;
proc_fd = open("/proc/self/pagemap", O_RDONLY);
ASSERT_NE(proc_fd, -1);
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
unsigned long masked = entry & PM_GUARD_REGION;
ASSERT_EQ(masked, 0);
}
for (i = 0; i < 10; i += 2) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
}
for (i = 0; i < 10; i++) {
char *ptr_p = &ptr[i * page_size];
unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
unsigned long masked = entry & PM_GUARD_REGION;
ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
}
ASSERT_EQ(close(proc_fd), 0);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, pagemap_scan)
{
const unsigned long page_size = self->page_size;
struct page_region pm_regs[10];
struct pm_scan_arg pm_scan_args = {
.size = sizeof(struct pm_scan_arg),
.category_anyof_mask = PAGE_IS_GUARD,
.return_mask = PAGE_IS_GUARD,
.vec = (long)&pm_regs,
.vec_len = ARRAY_SIZE(pm_regs),
};
int proc_fd, i;
char *ptr;
proc_fd = open("/proc/self/pagemap", O_RDONLY);
ASSERT_NE(proc_fd, -1);
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
pm_scan_args.start = (long)ptr;
pm_scan_args.end = (long)ptr + 10 * page_size;
ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 0);
ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
for (i = 0; i < 10; i += 2) {
char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(syscall(__NR_madvise, ptr_p, page_size, MADV_GUARD_INSTALL), 0);
}
ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 5);
ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
for (i = 0; i < 5; i++) {
long ptr_p = (long)&ptr[2 * i * page_size];
ASSERT_EQ(pm_regs[i].start, ptr_p);
ASSERT_EQ(pm_regs[i].end, ptr_p + page_size);
ASSERT_EQ(pm_regs[i].categories, PAGE_IS_GUARD);
}
ASSERT_EQ(close(proc_fd), 0);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
TEST_F(guard_regions, collapse)
{
const unsigned long page_size = self->page_size;
const unsigned long size = 2 * HPAGE_SIZE;
const unsigned long num_pages = size / page_size;
char *ptr;
int i;
if (variant->backing != ANON_BACKED)
ASSERT_EQ(ftruncate(self->fd, size), 0);
if (variant->backing == LOCAL_FILE_BACKED) {
ASSERT_EQ(close(self->fd), 0);
self->fd = open(self->path, O_RDONLY);
ASSERT_GE(self->fd, 0);
}
ptr = mmap_(self, variant, NULL, size, PROT_READ, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_EQ(madvise(ptr, size, MADV_NOHUGEPAGE), 0);
ASSERT_EQ(madvise(ptr, size, MADV_POPULATE_READ), 0);
for (i = 0; i < num_pages; i += 2) {
char *ptr_page = &ptr[i * page_size];
ASSERT_EQ(madvise(ptr_page, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_FALSE(try_read_buf(ptr_page));
}
ASSERT_EQ(madvise(ptr, size, MADV_HUGEPAGE), 0);
EXPECT_NE(madvise(ptr, size, MADV_COLLAPSE), 0);
for (i = 0; i < num_pages; i += 2) {
char *ptr_page = &ptr[i * page_size];
ASSERT_FALSE(try_read_buf(ptr_page));
}
}
TEST_F(guard_regions, smaps)
{
const unsigned long page_size = self->page_size;
struct procmap_fd procmap;
char *ptr, *ptr2;
int i;
ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_FALSE(check_vmflag_guard(ptr));
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_TRUE(check_vmflag_guard(ptr));
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_REMOVE), 0);
ASSERT_TRUE(check_vmflag_guard(ptr));
for (i = 0; i < 10; i++) {
ASSERT_EQ(madvise(&ptr[i * page_size], page_size, MADV_GUARD_INSTALL), 0);
ASSERT_TRUE(check_vmflag_guard(ptr));
}
ASSERT_EQ(munmap(&ptr[4 * page_size], page_size), 0);
ASSERT_TRUE(check_vmflag_guard(ptr));
ASSERT_TRUE(check_vmflag_guard(&ptr[5 * page_size]));
if (!local_fs_has_sane_mmap(self, variant)) {
TH_LOG("local filesystem does not support sane merging skipping merge test");
return;
}
ptr2 = mmap_(self, variant, &ptr[4 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 4 * page_size);
ASSERT_NE(ptr2, MAP_FAILED);
ASSERT_EQ(open_self_procmap(&procmap), 0);
ASSERT_TRUE(find_vma_procmap(&procmap, ptr));
ASSERT_EQ(procmap.query.vma_start, (unsigned long)ptr);
ASSERT_EQ(procmap.query.vma_end, (unsigned long)ptr + 10 * page_size);
ASSERT_EQ(close_procmap(&procmap), 0);
ASSERT_TRUE(check_vmflag_guard(ptr));
}
TEST_HARNESS_MAIN