root/kernel/locking/locktorture.c
// SPDX-License-Identifier: GPL-2.0+
/*
 * Module-based torture test facility for locking
 *
 * Copyright (C) IBM Corporation, 2014
 *
 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 *          Davidlohr Bueso <dave@stgolabs.net>
 *      Based on kernel/rcu/torture.c.
 */

#define pr_fmt(fmt) fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/sched/rt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <linux/rtmutex.h>
#include <linux/atomic.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/torture.h>
#include <linux/reboot.h>

MODULE_DESCRIPTION("torture test facility for locking");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");

torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
torture_param(int, rt_boost, 2,
                   "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
#define MAX_NESTED_LOCKS 8

static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
                 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");

static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.

// Parse a cpumask kernel parameter.  If there are more users later on,
// this might need to got to a more central location.
static int param_set_cpumask(const char *val, const struct kernel_param *kp)
{
        cpumask_var_t *cm_bind = kp->arg;
        int ret;
        char *s;

        if (!alloc_cpumask_var(cm_bind, GFP_KERNEL)) {
                s = "Out of memory";
                ret = -ENOMEM;
                goto out_err;
        }
        ret = cpulist_parse(val, *cm_bind);
        if (!ret)
                return ret;
        s = "Bad CPU range";
out_err:
        pr_warn("%s: %s, all CPUs set\n", kp->name, s);
        cpumask_setall(*cm_bind);
        return ret;
}

// Output a cpumask kernel parameter.
static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
{
        cpumask_var_t *cm_bind = kp->arg;

        return sprintf(buffer, "%*pbl", cpumask_pr_args(*cm_bind));
}

static bool cpumask_nonempty(cpumask_var_t mask)
{
        return cpumask_available(mask) && !cpumask_empty(mask);
}

static const struct kernel_param_ops lt_bind_ops = {
        .set = param_set_cpumask,
        .get = param_get_cpumask,
};

module_param_cb(bind_readers, &lt_bind_ops, &bind_readers, 0444);
module_param_cb(bind_writers, &lt_bind_ops, &bind_writers, 0444);

long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn);

static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
static struct task_struct **reader_tasks;

static bool lock_is_write_held;
static atomic_t lock_is_read_held;
static unsigned long last_lock_release;

struct lock_stress_stats {
        long n_lock_fail;
        long n_lock_acquired;
};

struct call_rcu_chain {
        struct rcu_head crc_rh;
        bool crc_stop;
};
struct call_rcu_chain *call_rcu_chain_list;

/* Forward reference. */
static void lock_torture_cleanup(void);

/*
 * Operations vector for selecting different types of tests.
 */
struct lock_torture_ops {
        void (*init)(void);
        void (*exit)(void);
        int (*nested_lock)(int tid, u32 lockset);
        int (*writelock)(int tid);
        void (*write_delay)(struct torture_random_state *trsp);
        void (*task_boost)(struct torture_random_state *trsp);
        void (*writeunlock)(int tid);
        void (*nested_unlock)(int tid, u32 lockset);
        int (*readlock)(int tid);
        void (*read_delay)(struct torture_random_state *trsp);
        void (*readunlock)(int tid);

        unsigned long flags; /* for irq spinlocks */
        const char *name;
};

struct lock_torture_cxt {
        int nrealwriters_stress;
        int nrealreaders_stress;
        bool debug_lock;
        bool init_called;
        atomic_t n_lock_torture_errors;
        struct lock_torture_ops *cur_ops;
        struct lock_stress_stats *lwsa; /* writer statistics */
        struct lock_stress_stats *lrsa; /* reader statistics */
};
static struct lock_torture_cxt cxt = { 0, 0, false, false,
                                       ATOMIC_INIT(0),
                                       NULL, NULL};
/*
 * Definitions for lock torture testing.
 */

static int torture_lock_busted_write_lock(int tid __maybe_unused)
{
        return 0;  /* BUGGY, do not use in real life!!! */
}

static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
{
        /* We want a long delay occasionally to force massive contention.  */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
                mdelay(long_hold);
        if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
                torture_preempt_schedule();  /* Allow test to be preempted. */
}

static void torture_lock_busted_write_unlock(int tid __maybe_unused)
{
          /* BUGGY, do not use in real life!!! */
}

static void __torture_rt_boost(struct torture_random_state *trsp)
{
        const unsigned int factor = rt_boost_factor;

        if (!rt_task(current)) {
                /*
                 * Boost priority once every rt_boost_factor operations. When
                 * the task tries to take the lock, the rtmutex it will account
                 * for the new priority, and do any corresponding pi-dance.
                 */
                if (trsp && !(torture_random(trsp) %
                              (cxt.nrealwriters_stress * factor))) {
                        sched_set_fifo(current);
                } else /* common case, do nothing */
                        return;
        } else {
                /*
                 * The task will remain boosted for another 10 * rt_boost_factor
                 * operations, then restored back to its original prio, and so
                 * forth.
                 *
                 * When @trsp is nil, we want to force-reset the task for
                 * stopping the kthread.
                 */
                if (!trsp || !(torture_random(trsp) %
                               (cxt.nrealwriters_stress * factor * 2))) {
                        sched_set_normal(current, 0);
                } else /* common case, do nothing */
                        return;
        }
}

static void torture_rt_boost(struct torture_random_state *trsp)
{
        if (rt_boost != 2)
                return;

        __torture_rt_boost(trsp);
}

static struct lock_torture_ops lock_busted_ops = {
        .writelock      = torture_lock_busted_write_lock,
        .write_delay    = torture_lock_busted_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_lock_busted_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "lock_busted"
};

static DEFINE_SPINLOCK(torture_spinlock);

static int torture_spin_lock_write_lock(int tid __maybe_unused)
__acquires(torture_spinlock)
{
        spin_lock(&torture_spinlock);
        return 0;
}

static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
{
        const unsigned long shortdelay_us = 2;
        unsigned long j;

        /* We want a short delay mostly to emulate likely code, and
         * we want a long delay occasionally to force massive contention.
         */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) {
                j = jiffies;
                mdelay(long_hold);
                pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
        }
        if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
                udelay(shortdelay_us);
        if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
                torture_preempt_schedule();  /* Allow test to be preempted. */
}

static void torture_spin_lock_write_unlock(int tid __maybe_unused)
__releases(torture_spinlock)
{
        spin_unlock(&torture_spinlock);
}

static struct lock_torture_ops spin_lock_ops = {
        .writelock      = torture_spin_lock_write_lock,
        .write_delay    = torture_spin_lock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_spin_lock_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "spin_lock"
};

static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_spinlock)
{
        unsigned long flags;

        spin_lock_irqsave(&torture_spinlock, flags);
        cxt.cur_ops->flags = flags;
        return 0;
}

static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
__releases(torture_spinlock)
{
        spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
}

static struct lock_torture_ops spin_lock_irq_ops = {
        .writelock      = torture_spin_lock_write_lock_irq,
        .write_delay    = torture_spin_lock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_lock_spin_write_unlock_irq,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "spin_lock_irq"
};

static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);

static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{
        raw_spin_lock(&torture_raw_spinlock);
        return 0;
}

static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{
        raw_spin_unlock(&torture_raw_spinlock);
}

static struct lock_torture_ops raw_spin_lock_ops = {
        .writelock      = torture_raw_spin_lock_write_lock,
        .write_delay    = torture_spin_lock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_raw_spin_lock_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "raw_spin_lock"
};

static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{
        unsigned long flags;

        raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
        cxt.cur_ops->flags = flags;
        return 0;
}

static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{
        raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
}

static struct lock_torture_ops raw_spin_lock_irq_ops = {
        .writelock      = torture_raw_spin_lock_write_lock_irq,
        .write_delay    = torture_spin_lock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_raw_spin_lock_write_unlock_irq,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "raw_spin_lock_irq"
};

#ifdef CONFIG_BPF_SYSCALL

#include <asm/rqspinlock.h>
static rqspinlock_t rqspinlock;

static int torture_raw_res_spin_write_lock(int tid __maybe_unused)
{
        raw_res_spin_lock(&rqspinlock);
        return 0;
}

static void torture_raw_res_spin_write_unlock(int tid __maybe_unused)
{
        raw_res_spin_unlock(&rqspinlock);
}

static struct lock_torture_ops raw_res_spin_lock_ops = {
        .writelock      = torture_raw_res_spin_write_lock,
        .write_delay    = torture_spin_lock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_raw_res_spin_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "raw_res_spin_lock"
};

static int torture_raw_res_spin_write_lock_irq(int tid __maybe_unused)
{
        unsigned long flags;

        raw_res_spin_lock_irqsave(&rqspinlock, flags);
        cxt.cur_ops->flags = flags;
        return 0;
}

static void torture_raw_res_spin_write_unlock_irq(int tid __maybe_unused)
{
        raw_res_spin_unlock_irqrestore(&rqspinlock, cxt.cur_ops->flags);
}

static struct lock_torture_ops raw_res_spin_lock_irq_ops = {
        .writelock      = torture_raw_res_spin_write_lock_irq,
        .write_delay    = torture_spin_lock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_raw_res_spin_write_unlock_irq,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "raw_res_spin_lock_irq"
};

#endif

static DEFINE_RWLOCK(torture_rwlock);

static int torture_rwlock_write_lock(int tid __maybe_unused)
__acquires(torture_rwlock)
{
        write_lock(&torture_rwlock);
        return 0;
}

static void torture_rwlock_write_delay(struct torture_random_state *trsp)
{
        const unsigned long shortdelay_us = 2;

        /* We want a short delay mostly to emulate likely code, and
         * we want a long delay occasionally to force massive contention.
         */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
                mdelay(long_hold);
        else
                udelay(shortdelay_us);
}

static void torture_rwlock_write_unlock(int tid __maybe_unused)
__releases(torture_rwlock)
{
        write_unlock(&torture_rwlock);
}

static int torture_rwlock_read_lock(int tid __maybe_unused)
__acquires(torture_rwlock)
{
        read_lock(&torture_rwlock);
        return 0;
}

static void torture_rwlock_read_delay(struct torture_random_state *trsp)
{
        const unsigned long shortdelay_us = 10;

        /* We want a short delay mostly to emulate likely code, and
         * we want a long delay occasionally to force massive contention.
         */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
                mdelay(long_hold);
        else
                udelay(shortdelay_us);
}

static void torture_rwlock_read_unlock(int tid __maybe_unused)
__releases(torture_rwlock)
{
        read_unlock(&torture_rwlock);
}

static struct lock_torture_ops rw_lock_ops = {
        .writelock      = torture_rwlock_write_lock,
        .write_delay    = torture_rwlock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_rwlock_write_unlock,
        .readlock       = torture_rwlock_read_lock,
        .read_delay     = torture_rwlock_read_delay,
        .readunlock     = torture_rwlock_read_unlock,
        .name           = "rw_lock"
};

static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_rwlock)
{
        unsigned long flags;

        write_lock_irqsave(&torture_rwlock, flags);
        cxt.cur_ops->flags = flags;
        return 0;
}

static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
__releases(torture_rwlock)
{
        write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}

static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
__acquires(torture_rwlock)
{
        unsigned long flags;

        read_lock_irqsave(&torture_rwlock, flags);
        cxt.cur_ops->flags = flags;
        return 0;
}

static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
__releases(torture_rwlock)
{
        read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}

static struct lock_torture_ops rw_lock_irq_ops = {
        .writelock      = torture_rwlock_write_lock_irq,
        .write_delay    = torture_rwlock_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_rwlock_write_unlock_irq,
        .readlock       = torture_rwlock_read_lock_irq,
        .read_delay     = torture_rwlock_read_delay,
        .readunlock     = torture_rwlock_read_unlock_irq,
        .name           = "rw_lock_irq"
};

static DEFINE_MUTEX(torture_mutex);
static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];

static void torture_mutex_init(void)
{
        int i;

        for (i = 0; i < MAX_NESTED_LOCKS; i++)
                __mutex_init(&torture_nested_mutexes[i], __func__,
                             &nested_mutex_keys[i]);
}

static int torture_mutex_nested_lock(int tid __maybe_unused,
                                     u32 lockset)
{
        int i;

        for (i = 0; i < nested_locks; i++)
                if (lockset & (1 << i))
                        mutex_lock(&torture_nested_mutexes[i]);
        return 0;
}

static int torture_mutex_lock(int tid __maybe_unused)
__acquires(torture_mutex)
{
        mutex_lock(&torture_mutex);
        return 0;
}

static void torture_mutex_delay(struct torture_random_state *trsp)
{
        /* We want a long delay occasionally to force massive contention.  */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
                mdelay(long_hold * 5);
        if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
                torture_preempt_schedule();  /* Allow test to be preempted. */
}

static void torture_mutex_unlock(int tid __maybe_unused)
__releases(torture_mutex)
{
        mutex_unlock(&torture_mutex);
}

static void torture_mutex_nested_unlock(int tid __maybe_unused,
                                        u32 lockset)
{
        int i;

        for (i = nested_locks - 1; i >= 0; i--)
                if (lockset & (1 << i))
                        mutex_unlock(&torture_nested_mutexes[i]);
}

static struct lock_torture_ops mutex_lock_ops = {
        .init           = torture_mutex_init,
        .nested_lock    = torture_mutex_nested_lock,
        .writelock      = torture_mutex_lock,
        .write_delay    = torture_mutex_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_mutex_unlock,
        .nested_unlock  = torture_mutex_nested_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "mutex_lock"
};

#include <linux/ww_mutex.h>
/*
 * The torture ww_mutexes should belong to the same lock class as
 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
 * function is called for initialization to ensure that.
 */
static DEFINE_WD_CLASS(torture_ww_class);
static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
static struct ww_acquire_ctx *ww_acquire_ctxs;

static void torture_ww_mutex_init(void)
{
        ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
        ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
        ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);

        ww_acquire_ctxs = kmalloc_objs(*ww_acquire_ctxs,
                                       cxt.nrealwriters_stress);
        if (!ww_acquire_ctxs)
                VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
}

static void torture_ww_mutex_exit(void)
{
        kfree(ww_acquire_ctxs);
}

static int torture_ww_mutex_lock(int tid)
__acquires(torture_ww_mutex_0)
__acquires(torture_ww_mutex_1)
__acquires(torture_ww_mutex_2)
{
        LIST_HEAD(list);
        struct reorder_lock {
                struct list_head link;
                struct ww_mutex *lock;
        } locks[3], *ll, *ln;
        struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];

        locks[0].lock = &torture_ww_mutex_0;
        list_add(&locks[0].link, &list);

        locks[1].lock = &torture_ww_mutex_1;
        list_add(&locks[1].link, &list);

        locks[2].lock = &torture_ww_mutex_2;
        list_add(&locks[2].link, &list);

        ww_acquire_init(ctx, &torture_ww_class);

        list_for_each_entry(ll, &list, link) {
                int err;

                err = ww_mutex_lock(ll->lock, ctx);
                if (!err)
                        continue;

                ln = ll;
                list_for_each_entry_continue_reverse(ln, &list, link)
                        ww_mutex_unlock(ln->lock);

                if (err != -EDEADLK)
                        return err;

                ww_mutex_lock_slow(ll->lock, ctx);
                list_move(&ll->link, &list);
        }

        return 0;
}

static void torture_ww_mutex_unlock(int tid)
__releases(torture_ww_mutex_0)
__releases(torture_ww_mutex_1)
__releases(torture_ww_mutex_2)
{
        struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];

        ww_mutex_unlock(&torture_ww_mutex_0);
        ww_mutex_unlock(&torture_ww_mutex_1);
        ww_mutex_unlock(&torture_ww_mutex_2);
        ww_acquire_fini(ctx);
}

static struct lock_torture_ops ww_mutex_lock_ops = {
        .init           = torture_ww_mutex_init,
        .exit           = torture_ww_mutex_exit,
        .writelock      = torture_ww_mutex_lock,
        .write_delay    = torture_mutex_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_ww_mutex_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "ww_mutex_lock"
};

#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(torture_rtmutex);
static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];

static void torture_rtmutex_init(void)
{
        int i;

        for (i = 0; i < MAX_NESTED_LOCKS; i++)
                __rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
                                &nested_rtmutex_keys[i]);
}

static int torture_rtmutex_nested_lock(int tid __maybe_unused,
                                       u32 lockset)
{
        int i;

        for (i = 0; i < nested_locks; i++)
                if (lockset & (1 << i))
                        rt_mutex_lock(&torture_nested_rtmutexes[i]);
        return 0;
}

static int torture_rtmutex_lock(int tid __maybe_unused)
__acquires(torture_rtmutex)
{
        rt_mutex_lock(&torture_rtmutex);
        return 0;
}

static void torture_rtmutex_delay(struct torture_random_state *trsp)
{
        const unsigned long shortdelay_us = 2;

        /*
         * We want a short delay mostly to emulate likely code, and
         * we want a long delay occasionally to force massive contention.
         */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
                mdelay(long_hold);
        if (!(torture_random(trsp) %
              (cxt.nrealwriters_stress * 200 * shortdelay_us)))
                udelay(shortdelay_us);
        if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
                torture_preempt_schedule();  /* Allow test to be preempted. */
}

static void torture_rtmutex_unlock(int tid __maybe_unused)
__releases(torture_rtmutex)
{
        rt_mutex_unlock(&torture_rtmutex);
}

static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
{
        if (!rt_boost)
                return;

        __torture_rt_boost(trsp);
}

static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
                                          u32 lockset)
{
        int i;

        for (i = nested_locks - 1; i >= 0; i--)
                if (lockset & (1 << i))
                        rt_mutex_unlock(&torture_nested_rtmutexes[i]);
}

static struct lock_torture_ops rtmutex_lock_ops = {
        .init           = torture_rtmutex_init,
        .nested_lock    = torture_rtmutex_nested_lock,
        .writelock      = torture_rtmutex_lock,
        .write_delay    = torture_rtmutex_delay,
        .task_boost     = torture_rt_boost_rtmutex,
        .writeunlock    = torture_rtmutex_unlock,
        .nested_unlock  = torture_rtmutex_nested_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
        .readunlock     = NULL,
        .name           = "rtmutex_lock"
};
#endif

static DECLARE_RWSEM(torture_rwsem);
static int torture_rwsem_down_write(int tid __maybe_unused)
__acquires(torture_rwsem)
{
        down_write(&torture_rwsem);
        return 0;
}

static void torture_rwsem_write_delay(struct torture_random_state *trsp)
{
        /* We want a long delay occasionally to force massive contention.  */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
                mdelay(long_hold * 10);
        if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
                torture_preempt_schedule();  /* Allow test to be preempted. */
}

static void torture_rwsem_up_write(int tid __maybe_unused)
__releases(torture_rwsem)
{
        up_write(&torture_rwsem);
}

static int torture_rwsem_down_read(int tid __maybe_unused)
__acquires(torture_rwsem)
{
        down_read(&torture_rwsem);
        return 0;
}

static void torture_rwsem_read_delay(struct torture_random_state *trsp)
{
        /* We want a long delay occasionally to force massive contention.  */
        if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
                mdelay(long_hold * 2);
        else
                mdelay(long_hold / 2);
        if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
                torture_preempt_schedule();  /* Allow test to be preempted. */
}

static void torture_rwsem_up_read(int tid __maybe_unused)
__releases(torture_rwsem)
{
        up_read(&torture_rwsem);
}

static struct lock_torture_ops rwsem_lock_ops = {
        .writelock      = torture_rwsem_down_write,
        .write_delay    = torture_rwsem_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_rwsem_up_write,
        .readlock       = torture_rwsem_down_read,
        .read_delay     = torture_rwsem_read_delay,
        .readunlock     = torture_rwsem_up_read,
        .name           = "rwsem_lock"
};

#include <linux/percpu-rwsem.h>
static struct percpu_rw_semaphore pcpu_rwsem;

static void torture_percpu_rwsem_init(void)
{
        BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
}

static void torture_percpu_rwsem_exit(void)
{
        percpu_free_rwsem(&pcpu_rwsem);
}

static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
__acquires(pcpu_rwsem)
{
        percpu_down_write(&pcpu_rwsem);
        return 0;
}

static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
__releases(pcpu_rwsem)
{
        percpu_up_write(&pcpu_rwsem);
}

static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
__acquires(pcpu_rwsem)
{
        percpu_down_read(&pcpu_rwsem);
        return 0;
}

static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
__releases(pcpu_rwsem)
{
        percpu_up_read(&pcpu_rwsem);
}

static struct lock_torture_ops percpu_rwsem_lock_ops = {
        .init           = torture_percpu_rwsem_init,
        .exit           = torture_percpu_rwsem_exit,
        .writelock      = torture_percpu_rwsem_down_write,
        .write_delay    = torture_rwsem_write_delay,
        .task_boost     = torture_rt_boost,
        .writeunlock    = torture_percpu_rwsem_up_write,
        .readlock       = torture_percpu_rwsem_down_read,
        .read_delay     = torture_rwsem_read_delay,
        .readunlock     = torture_percpu_rwsem_up_read,
        .name           = "percpu_rwsem_lock"
};

/*
 * Lock torture writer kthread.  Repeatedly acquires and releases
 * the lock, checking for duplicate acquisitions.
 */
static int lock_torture_writer(void *arg)
{
        unsigned long j;
        unsigned long j1;
        u32 lockset_mask;
        struct lock_stress_stats *lwsp = arg;
        DEFINE_TORTURE_RANDOM(rand);
        bool skip_main_lock;
        int tid = lwsp - cxt.lwsa;

        VERBOSE_TOROUT_STRING("lock_torture_writer task started");
        if (!rt_task(current))
                set_user_nice(current, MAX_NICE);

        do {
                if ((torture_random(&rand) & 0xfffff) == 0)
                        schedule_timeout_uninterruptible(1);

                lockset_mask = torture_random(&rand);
                /*
                 * When using nested_locks, we want to occasionally
                 * skip the main lock so we can avoid always serializing
                 * the lock chains on that central lock. By skipping the
                 * main lock occasionally, we can create different
                 * contention patterns (allowing for multiple disjoint
                 * blocked trees)
                 */
                skip_main_lock = (nested_locks &&
                                 !(torture_random(&rand) % 100));

                cxt.cur_ops->task_boost(&rand);
                if (cxt.cur_ops->nested_lock)
                        cxt.cur_ops->nested_lock(tid, lockset_mask);

                if (!skip_main_lock) {
                        if (acq_writer_lim > 0)
                                j = jiffies;
                        cxt.cur_ops->writelock(tid);
                        if (WARN_ON_ONCE(lock_is_write_held))
                                lwsp->n_lock_fail++;
                        lock_is_write_held = true;
                        if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
                                lwsp->n_lock_fail++; /* rare, but... */
                        if (acq_writer_lim > 0) {
                                j1 = jiffies;
                                WARN_ONCE(time_after(j1, j + acq_writer_lim),
                                          "%s: Lock acquisition took %lu jiffies.\n",
                                          __func__, j1 - j);
                        }
                        lwsp->n_lock_acquired++;

                        cxt.cur_ops->write_delay(&rand);

                        lock_is_write_held = false;
                        WRITE_ONCE(last_lock_release, jiffies);
                        cxt.cur_ops->writeunlock(tid);
                }
                if (cxt.cur_ops->nested_unlock)
                        cxt.cur_ops->nested_unlock(tid, lockset_mask);

                stutter_wait("lock_torture_writer");
        } while (!torture_must_stop());

        cxt.cur_ops->task_boost(NULL); /* reset prio */
        torture_kthread_stopping("lock_torture_writer");
        return 0;
}

/*
 * Lock torture reader kthread.  Repeatedly acquires and releases
 * the reader lock.
 */
static int lock_torture_reader(void *arg)
{
        struct lock_stress_stats *lrsp = arg;
        int tid = lrsp - cxt.lrsa;
        DEFINE_TORTURE_RANDOM(rand);

        VERBOSE_TOROUT_STRING("lock_torture_reader task started");
        set_user_nice(current, MAX_NICE);

        do {
                if ((torture_random(&rand) & 0xfffff) == 0)
                        schedule_timeout_uninterruptible(1);

                cxt.cur_ops->readlock(tid);
                atomic_inc(&lock_is_read_held);
                if (WARN_ON_ONCE(lock_is_write_held))
                        lrsp->n_lock_fail++; /* rare, but... */

                lrsp->n_lock_acquired++;
                cxt.cur_ops->read_delay(&rand);
                atomic_dec(&lock_is_read_held);
                cxt.cur_ops->readunlock(tid);

                stutter_wait("lock_torture_reader");
        } while (!torture_must_stop());
        torture_kthread_stopping("lock_torture_reader");
        return 0;
}

/*
 * Create an lock-torture-statistics message in the specified buffer.
 */
static void __torture_print_stats(char *page,
                                  struct lock_stress_stats *statp, bool write)
{
        long cur;
        bool fail = false;
        int i, n_stress;
        long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
        long long sum = 0;

        n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
        for (i = 0; i < n_stress; i++) {
                if (data_race(statp[i].n_lock_fail))
                        fail = true;
                cur = data_race(statp[i].n_lock_acquired);
                sum += cur;
                if (max < cur)
                        max = cur;
                if (min > cur)
                        min = cur;
        }
        page += sprintf(page,
                        "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
                        write ? "Writes" : "Reads ",
                        sum, max, min,
                        !onoff_interval && max / 2 > min ? "???" : "",
                        fail, fail ? "!!!" : "");
        if (fail)
                atomic_inc(&cxt.n_lock_torture_errors);
}

/*
 * Print torture statistics.  Caller must ensure that there is only one
 * call to this function at a given time!!!  This is normally accomplished
 * by relying on the module system to only have one copy of the module
 * loaded, and then by giving the lock_torture_stats kthread full control
 * (or the init/cleanup functions when lock_torture_stats thread is not
 * running).
 */
static void lock_torture_stats_print(void)
{
        int size = cxt.nrealwriters_stress * 200 + 8192;
        char *buf;

        if (cxt.cur_ops->readlock)
                size += cxt.nrealreaders_stress * 200 + 8192;

        buf = kmalloc(size, GFP_KERNEL);
        if (!buf) {
                pr_err("lock_torture_stats_print: Out of memory, need: %d",
                       size);
                return;
        }

        __torture_print_stats(buf, cxt.lwsa, true);
        pr_alert("%s", buf);
        kfree(buf);

        if (cxt.cur_ops->readlock) {
                buf = kmalloc(size, GFP_KERNEL);
                if (!buf) {
                        pr_err("lock_torture_stats_print: Out of memory, need: %d",
                               size);
                        return;
                }

                __torture_print_stats(buf, cxt.lrsa, false);
                pr_alert("%s", buf);
                kfree(buf);
        }
}

/*
 * Periodically prints torture statistics, if periodic statistics printing
 * was specified via the stat_interval module parameter.
 *
 * No need to worry about fullstop here, since this one doesn't reference
 * volatile state or register callbacks.
 */
static int lock_torture_stats(void *arg)
{
        VERBOSE_TOROUT_STRING("lock_torture_stats task started");
        do {
                schedule_timeout_interruptible(stat_interval * HZ);
                lock_torture_stats_print();
                torture_shutdown_absorb("lock_torture_stats");
        } while (!torture_must_stop());
        torture_kthread_stopping("lock_torture_stats");
        return 0;
}


static inline void
lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
                                const char *tag)
{
        static cpumask_t cpumask_all;
        cpumask_t *rcmp = cpumask_nonempty(bind_readers) ? bind_readers : &cpumask_all;
        cpumask_t *wcmp = cpumask_nonempty(bind_writers) ? bind_writers : &cpumask_all;

        cpumask_setall(&cpumask_all);
        pr_alert("%s" TORTURE_FLAG
                 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
                 torture_type, tag, cxt.debug_lock ? " [debug]": "",
                 acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp),
                 call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress,
                 cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost,
                 rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter,
                 verbose, writer_fifo);
}

// If requested, maintain call_rcu() chains to keep a grace period always
// in flight.  These increase the probability of getting an RCU CPU stall
// warning and associated diagnostics when a locking primitive stalls.

static void call_rcu_chain_cb(struct rcu_head *rhp)
{
        struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh);

        if (!smp_load_acquire(&crcp->crc_stop)) {
                (void)start_poll_synchronize_rcu(); // Start one grace period...
                call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another.
        }
}

// Start the requested number of call_rcu() chains.
static int call_rcu_chain_init(void)
{
        int i;

        if (call_rcu_chains <= 0)
                return 0;
        call_rcu_chain_list = kzalloc_objs(*call_rcu_chain_list,
                                           call_rcu_chains);
        if (!call_rcu_chain_list)
                return -ENOMEM;
        for (i = 0; i < call_rcu_chains; i++) {
                call_rcu_chain_list[i].crc_stop = false;
                call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
        }
        return 0;
}

// Stop all of the call_rcu() chains.
static void call_rcu_chain_cleanup(void)
{
        int i;

        if (!call_rcu_chain_list)
                return;
        for (i = 0; i < call_rcu_chains; i++)
                smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
        rcu_barrier();
        kfree(call_rcu_chain_list);
        call_rcu_chain_list = NULL;
}

static void lock_torture_cleanup(void)
{
        int i;

        if (torture_cleanup_begin())
                return;

        /*
         * Indicates early cleanup, meaning that the test has not run,
         * such as when passing bogus args when loading the module.
         * However cxt->cur_ops.init() may have been invoked, so beside
         * perform the underlying torture-specific cleanups, cur_ops.exit()
         * will be invoked if needed.
         */
        if (!cxt.lwsa && !cxt.lrsa)
                goto end;

        if (writer_tasks) {
                for (i = 0; i < cxt.nrealwriters_stress; i++)
                        torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
                kfree(writer_tasks);
                writer_tasks = NULL;
        }

        if (reader_tasks) {
                for (i = 0; i < cxt.nrealreaders_stress; i++)
                        torture_stop_kthread(lock_torture_reader,
                                             reader_tasks[i]);
                kfree(reader_tasks);
                reader_tasks = NULL;
        }

        torture_stop_kthread(lock_torture_stats, stats_task);
        lock_torture_stats_print();  /* -After- the stats thread is stopped! */

        if (atomic_read(&cxt.n_lock_torture_errors))
                lock_torture_print_module_parms(cxt.cur_ops,
                                                "End of test: FAILURE");
        else if (torture_onoff_failures())
                lock_torture_print_module_parms(cxt.cur_ops,
                                                "End of test: LOCK_HOTPLUG");
        else
                lock_torture_print_module_parms(cxt.cur_ops,
                                                "End of test: SUCCESS");

        kfree(cxt.lwsa);
        cxt.lwsa = NULL;
        kfree(cxt.lrsa);
        cxt.lrsa = NULL;

        call_rcu_chain_cleanup();

end:
        if (cxt.init_called) {
                if (cxt.cur_ops->exit)
                        cxt.cur_ops->exit();
                cxt.init_called = false;
        }

        free_cpumask_var(bind_readers);
        free_cpumask_var(bind_writers);

        torture_cleanup_end();
}

static int __init lock_torture_init(void)
{
        int i, j;
        int firsterr = 0;
        static struct lock_torture_ops *torture_ops[] = {
                &lock_busted_ops,
                &spin_lock_ops, &spin_lock_irq_ops,
                &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
#ifdef CONFIG_BPF_SYSCALL
                &raw_res_spin_lock_ops, &raw_res_spin_lock_irq_ops,
#endif
                &rw_lock_ops, &rw_lock_irq_ops,
                &mutex_lock_ops,
                &ww_mutex_lock_ops,
#ifdef CONFIG_RT_MUTEXES
                &rtmutex_lock_ops,
#endif
                &rwsem_lock_ops,
                &percpu_rwsem_lock_ops,
        };

        if (!torture_init_begin(torture_type, verbose))
                return -EBUSY;

        /* Process args and tell the world that the torturer is on the job. */
        for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
                cxt.cur_ops = torture_ops[i];
                if (strcmp(torture_type, cxt.cur_ops->name) == 0)
                        break;
        }
        if (i == ARRAY_SIZE(torture_ops)) {
                pr_alert("lock-torture: invalid torture type: \"%s\"\n",
                         torture_type);
                pr_alert("lock-torture types:");
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
                        pr_alert(" %s", torture_ops[i]->name);
                pr_alert("\n");
                firsterr = -EINVAL;
                goto unwind;
        }

        if (nwriters_stress == 0 &&
            (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
                pr_alert("lock-torture: must run at least one locking thread\n");
                firsterr = -EINVAL;
                goto unwind;
        }

        if (nwriters_stress >= 0)
                cxt.nrealwriters_stress = nwriters_stress;
        else
                cxt.nrealwriters_stress = 2 * num_online_cpus();

        if (cxt.cur_ops->init) {
                cxt.cur_ops->init();
                cxt.init_called = true;
        }

#ifdef CONFIG_DEBUG_MUTEXES
        if (str_has_prefix(torture_type, "mutex"))
                cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_RT_MUTEXES
        if (str_has_prefix(torture_type, "rtmutex"))
                cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
        if ((str_has_prefix(torture_type, "spin")) ||
            (str_has_prefix(torture_type, "rw_lock")))
                cxt.debug_lock = true;
#endif

        /* Initialize the statistics so that each run gets its own numbers. */
        if (nwriters_stress) {
                lock_is_write_held = false;
                cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress);
                if (cxt.lwsa == NULL) {
                        VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
                        firsterr = -ENOMEM;
                        goto unwind;
                }

                for (i = 0; i < cxt.nrealwriters_stress; i++) {
                        cxt.lwsa[i].n_lock_fail = 0;
                        cxt.lwsa[i].n_lock_acquired = 0;
                }
        }

        if (cxt.cur_ops->readlock) {
                if (nreaders_stress >= 0)
                        cxt.nrealreaders_stress = nreaders_stress;
                else {
                        /*
                         * By default distribute evenly the number of
                         * readers and writers. We still run the same number
                         * of threads as the writer-only locks default.
                         */
                        if (nwriters_stress < 0) /* user doesn't care */
                                cxt.nrealwriters_stress = num_online_cpus();
                        cxt.nrealreaders_stress = cxt.nrealwriters_stress;
                }

                if (nreaders_stress) {
                        cxt.lrsa = kmalloc_objs(*cxt.lrsa,
                                                cxt.nrealreaders_stress);
                        if (cxt.lrsa == NULL) {
                                VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
                                firsterr = -ENOMEM;
                                kfree(cxt.lwsa);
                                cxt.lwsa = NULL;
                                goto unwind;
                        }

                        for (i = 0; i < cxt.nrealreaders_stress; i++) {
                                cxt.lrsa[i].n_lock_fail = 0;
                                cxt.lrsa[i].n_lock_acquired = 0;
                        }
                }
        }

        firsterr = call_rcu_chain_init();
        if (torture_init_error(firsterr))
                goto unwind;

        lock_torture_print_module_parms(cxt.cur_ops, "Start of test");

        /* Prepare torture context. */
        if (onoff_interval > 0) {
                firsterr = torture_onoff_init(onoff_holdoff * HZ,
                                              onoff_interval * HZ, NULL);
                if (torture_init_error(firsterr))
                        goto unwind;
        }
        if (shuffle_interval > 0) {
                firsterr = torture_shuffle_init(shuffle_interval);
                if (torture_init_error(firsterr))
                        goto unwind;
        }
        if (shutdown_secs > 0) {
                firsterr = torture_shutdown_init(shutdown_secs,
                                                 lock_torture_cleanup);
                if (torture_init_error(firsterr))
                        goto unwind;
        }
        if (stutter > 0) {
                firsterr = torture_stutter_init(stutter, stutter);
                if (torture_init_error(firsterr))
                        goto unwind;
        }

        if (nwriters_stress) {
                writer_tasks = kzalloc_objs(writer_tasks[0],
                                            cxt.nrealwriters_stress);
                if (writer_tasks == NULL) {
                        TOROUT_ERRSTRING("writer_tasks: Out of memory");
                        firsterr = -ENOMEM;
                        goto unwind;
                }
        }

        /* cap nested_locks to MAX_NESTED_LOCKS */
        if (nested_locks > MAX_NESTED_LOCKS)
                nested_locks = MAX_NESTED_LOCKS;

        if (cxt.cur_ops->readlock) {
                reader_tasks = kzalloc_objs(reader_tasks[0],
                                            cxt.nrealreaders_stress);
                if (reader_tasks == NULL) {
                        TOROUT_ERRSTRING("reader_tasks: Out of memory");
                        kfree(writer_tasks);
                        writer_tasks = NULL;
                        firsterr = -ENOMEM;
                        goto unwind;
                }
        }

        /*
         * Create the kthreads and start torturing (oh, those poor little locks).
         *
         * TODO: Note that we interleave writers with readers, giving writers a
         * slight advantage, by creating its kthread first. This can be modified
         * for very specific needs, or even let the user choose the policy, if
         * ever wanted.
         */
        for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
                    j < cxt.nrealreaders_stress; i++, j++) {
                if (i >= cxt.nrealwriters_stress)
                        goto create_reader;

                /* Create writer. */
                firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
                                                     writer_tasks[i],
                                                     writer_fifo ? sched_set_fifo : NULL);
                if (torture_init_error(firsterr))
                        goto unwind;
                if (cpumask_nonempty(bind_writers))
                        torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers, true);

        create_reader:
                if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
                        continue;
                /* Create reader. */
                firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
                                                  reader_tasks[j]);
                if (torture_init_error(firsterr))
                        goto unwind;
                if (cpumask_nonempty(bind_readers))
                        torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers, true);
        }
        if (stat_interval > 0) {
                firsterr = torture_create_kthread(lock_torture_stats, NULL,
                                                  stats_task);
                if (torture_init_error(firsterr))
                        goto unwind;
        }
        torture_init_end();
        return 0;

unwind:
        torture_init_end();
        lock_torture_cleanup();
        if (shutdown_secs) {
                WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
                kernel_power_off();
        }
        return firsterr;
}

module_init(lock_torture_init);
module_exit(lock_torture_cleanup);