test_args
u32 *test_args)
ut_cmd_args[i] = test_args[i];
u32 *test_args)
ut_cmd_args[i] = test_args[i];
usleep(msecs_to_usecs(test_args.migration_freq_ms));
for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
} while (test_args.nr_vcpus != n_done);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
if (test_args.migration_freq_ms) {
for (i = 0; i < test_args.nr_vcpus; i++)
if (test_args.migration_freq_ms)
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
test_args.nr_iter = atoi_positive("Number of iterations", optarg);
test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg);
test_args.counter_offset = strtol(optarg, NULL, 0);
test_args.reserved = 0;
__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
struct test_args test_args = {
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
udelay(msecs_to_usecs(test_args.timer_period_ms) +
test_args.timer_err_margin_us);
gic_init(GIC_V3, test_args.nr_vcpus);
int nr_vcpus = test_args.nr_vcpus;
if (!test_args.reserved) {
.counter_offset = test_args.counter_offset,
sync_global_to_guest(vm, test_args);
timer_set_next_cval_ms(VIRTUAL, test_args.timer_period_ms);
timer_set_next_tval_ms(VIRTUAL, test_args.timer_period_ms);
timer_set_next_cval_ms(PHYSICAL, test_args.timer_period_ms);
timer_set_next_tval_ms(PHYSICAL, test_args.timer_period_ms);
test_args.test_virtual = true;
test_args.test_physical = false;
test_args.wait_ms = atoi_positive("Wait time", optarg);
if (test_args.test_virtual) {
if (test_args.test_physical) {
uint64_t base_wait = test_args.wait_ms;
int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms);
uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
msec_to_cycles(test_args.wait_ms));
msec_to_cycles(test_args.wait_ms));
msec_to_cycles(test_args.wait_ms));
msec_to_cycles(test_args.wait_ms));
timer_set_tval(timer, -1 * msec_to_cycles(test_args.wait_ms));
GUEST_ASSERT(timer_get_cval(timer) >= (CVAL_MAX - msec_to_cycles(test_args.wait_ms)));
msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE);
msec_to_cycles(test_args.wait_ms);
msecs_to_usecs(test_args.wait_ms) +
struct test_args test_args = {
int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms);
cval = DEF_CNT - msec_to_cycles(test_args.wait_ms);
set_counter(timer, msec_to_cycles(test_args.wait_ms));
int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms);
uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
for (i = 0; i < test_args.iterations; i++) {
sync_global_to_guest(*vm, test_args);
test_args.test_physical = true;
test_args.test_virtual = true;
test_args.iterations =
test_args.long_wait_ms =
test_args.test_physical = true;
test_args.test_virtual = false;
static void reset_priorities(struct test_args *args)
static void test_inject_fail(struct test_args *args,
static void guest_inject(struct test_args *args,
static void guest_restore_active(struct test_args *args,
static void test_inject_preemption(struct test_args *args,
static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
static void test_injection_failure(struct test_args *args,
static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
static void guest_code(struct test_args *args)
struct test_args *test_args, bool expect_failure)
struct test_args *test_args)
kvm_irq_line_check(vm, i, 1, test_args,
kvm_irq_line_check(vm, i, 0, test_args,
kvm_irq_line_check(vm, i, level, test_args,
kvm_irq_line_check(vm, i, 1, test_args,
test_args->kvm_max_routes,
static void print_args(struct test_args *args)
struct test_args args = {
static void guest_code_asym_dir(struct test_args *args, int cpuid)
static void guest_code_group_en(struct test_args *args, int cpuid)
static void guest_code_timer_spi(struct test_args *args, int cpuid)
struct test_args args = {};
extern struct test_args test_args;
guest_code, test_args.vcpus);
test_args.vm = vm;
test_args.guest_test_virt_mem = guest_test_virt_mem;
test_args.host_page_size = host_page_size;
test_args.host_num_pages = test_mem_size / host_page_size;
test_args.large_page_size = large_page_size;
test_args.large_num_pages = test_mem_size / large_page_size;
test_args.host_pages_per_lpage = large_page_size / host_page_size;
test_args.src_type = src_type;
sync_global_to_guest(vm, test_args);
test_args.vcpus[i]);
static struct test_args test_args;
struct test_args *p = &test_args;
timer_set_next_cmp_ms(test_args.timer_period_ms, false);
us = msecs_to_usecs(test_args.timer_period_ms);
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
timer_set_next_cmp_ms(test_args.timer_period_ms, false);
us = msec_to_cycles(test_args.timer_period_ms);
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
int nr_vcpus = test_args.nr_vcpus;
sync_global_to_guest(vm, test_args);
shared_data->nr_iter = test_args.nr_iter;
us = msecs_to_usecs(test_args.timer_period_ms) + test_args.timer_err_margin_us;
timer_set_next_cmp_ms(test_args.timer_period_ms, true);
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
us = msecs_to_usecs(test_args.timer_period_ms) + test_args.timer_err_margin_us;
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
const struct test_args *targs,
struct test_args targs = {
static void help(char *name, struct test_args *targs)
struct test_args *targs)
sync_global_to_guest(vm, test_args);
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
timer_set_next_cmp_ms(test_args.timer_period_ms);
udelay(msecs_to_usecs(test_args.timer_period_ms) +
test_args.timer_err_margin_us);
int nr_vcpus = test_args.nr_vcpus;
static struct test_args targs;