#define _GNU_SOURCE
#include "futextest.h"
#include "../../kselftest_harness.h"
#include <errno.h>
#include <pthread.h>
#include <signal.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stddef.h>
#include <sys/mman.h>
#include <sys/wait.h>
#define STACK_SIZE (1024 * 1024)
#define FUTEX_TIMEOUT 3
#define SLEEP_US 100
static pthread_barrier_t barrier, barrier2;
static int set_robust_list(struct robust_list_head *head, size_t len)
{
return syscall(SYS_set_robust_list, head, len);
}
static int get_robust_list(int pid, struct robust_list_head **head, size_t *len_ptr)
{
return syscall(SYS_get_robust_list, pid, head, len_ptr);
}
struct lock_struct {
_Atomic(unsigned int) futex;
struct robust_list list;
};
static int create_child(int (*fn)(void *arg), void *arg)
{
char *stack;
pid_t pid;
stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (stack == MAP_FAILED)
return -1;
stack += STACK_SIZE;
pid = clone(fn, stack, CLONE_VM | SIGCHLD, arg);
if (pid == -1)
return -1;
return pid;
}
static int set_list(struct robust_list_head *head)
{
int ret;
ret = set_robust_list(head, sizeof(*head));
if (ret)
return ret;
head->futex_offset = (size_t) offsetof(struct lock_struct, futex) -
(size_t) offsetof(struct lock_struct, list);
head->list.next = &head->list;
head->list_op_pending = NULL;
return 0;
}
static int mutex_lock(struct lock_struct *lock, struct robust_list_head *head, bool error_inject)
{
_Atomic(unsigned int) *futex = &lock->futex;
unsigned int zero = 0;
pid_t tid = gettid();
int ret = -1;
head->list_op_pending = &lock->list;
if (atomic_compare_exchange_strong(futex, &zero, tid)) {
struct robust_list *list = &head->list;
if (error_inject)
return 0;
while (list->next != &head->list)
list = list->next;
list->next = &lock->list;
lock->list.next = &head->list;
ret = 0;
} else {
struct timespec to;
to.tv_sec = FUTEX_TIMEOUT;
to.tv_nsec = 0;
tid = atomic_load(futex);
tid |= FUTEX_WAITERS;
atomic_store(futex, tid);
ret = futex_wait((futex_t *) futex, tid, &to, 0);
}
head->list_op_pending = NULL;
return ret;
}
static int child_fn_lock(void *arg)
{
struct lock_struct *lock = arg;
struct robust_list_head head;
int ret;
ret = set_list(&head);
if (ret) {
ksft_test_result_fail("set_robust_list error\n");
return ret;
}
ret = mutex_lock(lock, &head, false);
if (ret) {
ksft_test_result_fail("mutex_lock error\n");
return ret;
}
pthread_barrier_wait(&barrier);
usleep(SLEEP_US);
return 0;
}
TEST(test_robustness)
{
struct lock_struct lock = { .futex = 0 };
_Atomic(unsigned int) *futex = &lock.futex;
struct robust_list_head head;
int ret, pid, wstatus;
ret = set_list(&head);
ASSERT_EQ(ret, 0);
ret = pthread_barrier_init(&barrier, NULL, 2);
ASSERT_EQ(ret, 0);
pid = create_child(&child_fn_lock, &lock);
ASSERT_NE(pid, -1);
pthread_barrier_wait(&barrier);
ret = mutex_lock(&lock, &head, false);
ASSERT_EQ(ret, 0);
ASSERT_TRUE(*futex & FUTEX_OWNER_DIED);
wait(&wstatus);
pthread_barrier_destroy(&barrier);
if (!WEXITSTATUS(wstatus))
ksft_test_result_pass("%s\n", __func__);
}
TEST(test_set_robust_list_invalid_size)
{
struct robust_list_head head;
size_t head_size = sizeof(head);
int ret;
ret = set_robust_list(&head, head_size);
ASSERT_EQ(ret, 0);
ret = set_robust_list(&head, head_size * 2);
ASSERT_EQ(ret, -1);
ASSERT_EQ(errno, EINVAL);
ret = set_robust_list(&head, head_size - 1);
ASSERT_EQ(ret, -1);
ASSERT_EQ(errno, EINVAL);
ret = set_robust_list(&head, 0);
ASSERT_EQ(ret, -1);
ASSERT_EQ(errno, EINVAL);
ksft_test_result_pass("%s\n", __func__);
}
TEST(test_get_robust_list_self)
{
struct robust_list_head head, head2, *get_head;
size_t head_size = sizeof(head), len_ptr;
int ret;
ret = set_robust_list(&head, head_size);
ASSERT_EQ(ret, 0);
ret = get_robust_list(0, &get_head, &len_ptr);
ASSERT_EQ(ret, 0);
ASSERT_EQ(get_head, &head);
ASSERT_EQ(head_size, len_ptr);
ret = set_robust_list(&head2, head_size);
ASSERT_EQ(ret, 0);
ret = get_robust_list(0, &get_head, &len_ptr);
ASSERT_EQ(ret, 0);
ASSERT_EQ(get_head, &head2);
ASSERT_EQ(head_size, len_ptr);
ksft_test_result_pass("%s\n", __func__);
}
static int child_list(void *arg)
{
struct robust_list_head *head = arg;
int ret;
ret = set_robust_list(head, sizeof(*head));
if (ret) {
ksft_test_result_fail("set_robust_list error\n");
return -1;
}
pthread_barrier_wait(&barrier);
pthread_barrier_wait(&barrier2);
return 0;
}
TEST(test_get_robust_list_child)
{
struct robust_list_head head, *get_head;
int ret, wstatus;
size_t len_ptr;
pid_t tid;
ret = pthread_barrier_init(&barrier, NULL, 2);
ret = pthread_barrier_init(&barrier2, NULL, 2);
ASSERT_EQ(ret, 0);
tid = create_child(&child_list, &head);
ASSERT_NE(tid, -1);
pthread_barrier_wait(&barrier);
ret = get_robust_list(tid, &get_head, &len_ptr);
ASSERT_EQ(ret, 0);
ASSERT_EQ(&head, get_head);
pthread_barrier_wait(&barrier2);
wait(&wstatus);
pthread_barrier_destroy(&barrier);
pthread_barrier_destroy(&barrier2);
if (!WEXITSTATUS(wstatus))
ksft_test_result_pass("%s\n", __func__);
}
static int child_fn_lock_with_error(void *arg)
{
struct lock_struct *lock = arg;
struct robust_list_head head;
int ret;
ret = set_list(&head);
if (ret) {
ksft_test_result_fail("set_robust_list error\n");
return -1;
}
ret = mutex_lock(lock, &head, true);
if (ret) {
ksft_test_result_fail("mutex_lock error\n");
return -1;
}
pthread_barrier_wait(&barrier);
usleep(SLEEP_US);
return 0;
}
TEST(test_set_list_op_pending)
{
struct lock_struct lock = { .futex = 0 };
_Atomic(unsigned int) *futex = &lock.futex;
struct robust_list_head head;
int ret, wstatus;
ret = set_list(&head);
ASSERT_EQ(ret, 0);
ret = pthread_barrier_init(&barrier, NULL, 2);
ASSERT_EQ(ret, 0);
ret = create_child(&child_fn_lock_with_error, &lock);
ASSERT_NE(ret, -1);
pthread_barrier_wait(&barrier);
ret = mutex_lock(&lock, &head, false);
ASSERT_EQ(ret, 0);
ASSERT_TRUE(*futex & FUTEX_OWNER_DIED);
wait(&wstatus);
pthread_barrier_destroy(&barrier);
if (!WEXITSTATUS(wstatus))
ksft_test_result_pass("%s\n", __func__);
else
ksft_test_result_fail("%s\n", __func__);
}
#define CHILD_NR 10
static int child_lock_holder(void *arg)
{
struct lock_struct *locks = arg;
struct robust_list_head head;
int i;
set_list(&head);
for (i = 0; i < CHILD_NR; i++) {
locks[i].futex = 0;
mutex_lock(&locks[i], &head, false);
}
pthread_barrier_wait(&barrier);
pthread_barrier_wait(&barrier2);
usleep(SLEEP_US);
return 0;
}
static int child_wait_lock(void *arg)
{
struct lock_struct *lock = arg;
struct robust_list_head head;
int ret;
pthread_barrier_wait(&barrier2);
ret = mutex_lock(lock, &head, false);
if (ret) {
ksft_test_result_fail("mutex_lock error\n");
return -1;
}
if (!(lock->futex & FUTEX_OWNER_DIED)) {
ksft_test_result_fail("futex not marked with FUTEX_OWNER_DIED\n");
return -1;
}
return 0;
}
TEST(test_robust_list_multiple_elements)
{
struct lock_struct locks[CHILD_NR];
pid_t pids[CHILD_NR + 1];
int i, ret, wstatus;
ret = pthread_barrier_init(&barrier, NULL, 2);
ASSERT_EQ(ret, 0);
ret = pthread_barrier_init(&barrier2, NULL, CHILD_NR + 1);
ASSERT_EQ(ret, 0);
pids[0] = create_child(&child_lock_holder, &locks);
pthread_barrier_wait(&barrier);
for (i = 0; i < CHILD_NR; i++)
pids[i+1] = create_child(&child_wait_lock, &locks[i]);
ret = 0;
for (i = 0; i < CHILD_NR; i++) {
waitpid(pids[i], &wstatus, 0);
if (WEXITSTATUS(wstatus))
ret = -1;
}
pthread_barrier_destroy(&barrier);
pthread_barrier_destroy(&barrier2);
if (!ret)
ksft_test_result_pass("%s\n", __func__);
}
static int child_circular_list(void *arg)
{
static struct robust_list_head head;
struct lock_struct a, b, c;
int ret;
ret = set_list(&head);
if (ret) {
ksft_test_result_fail("set_list error\n");
return -1;
}
head.list.next = &a.list;
a.list.next = &b.list;
b.list.next = &c.list;
c.list.next = &a.list;
return 0;
}
TEST(test_circular_list)
{
int wstatus;
create_child(child_circular_list, NULL);
wait(&wstatus);
if (!WEXITSTATUS(wstatus))
ksft_test_result_pass("%s\n", __func__);
}
TEST_HARNESS_MAIN