__ARCH_SPIN_LOCK_UNLOCKED
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
# define __ARCH_SPIN_LOCK_UNLOCKED \
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
[0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t kmmio_lock = __ARCH_SPIN_LOCK_UNLOCKED;
.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; })
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
.lock = __ARCH_SPIN_LOCK_UNLOCKED,