arch_spinlock_t
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline void arch_spin_unlock(arch_spinlock_t * lock)
static inline void arch_spin_lock(arch_spinlock_t * lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
extern arch_spinlock_t smp_atomic_ops_lock;
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
arch_spinlock_t lock_mutex;
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
arch_spinlock_t lockval;
static inline int arch_spin_trylock(arch_spinlock_t *lock)
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spinlock_t *s = ATOMIC_HASH(l); \
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
_futex_spin_lock_irqsave(arch_spinlock_t *s, unsigned long *flags)
_futex_spin_unlock_irqrestore(arch_spinlock_t *s, unsigned long *flags)
arch_spinlock_t *s;
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
arch_spinlock_t *s;
static inline int arch_spin_is_locked(arch_spinlock_t *x)
static inline void arch_spin_lock(arch_spinlock_t *x)
static inline void arch_spin_unlock(arch_spinlock_t *x)
static inline int arch_spin_trylock(arch_spinlock_t *x)
arch_spinlock_t lock_mutex;
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
void splpar_spin_yield(arch_spinlock_t *lock);
static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
static inline void spin_yield(arch_spinlock_t *lock)
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t lock;
arch_spinlock_t lock;
void splpar_spin_yield(arch_spinlock_t *lock)
static arch_spinlock_t timebase_lock;
SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
void arch_spin_relax(arch_spinlock_t *lock);
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lp)
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
static inline void arch_spin_lock(arch_spinlock_t *lp)
static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
arch_spinlock_t wait;
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
void arch_spin_lock_wait(arch_spinlock_t *lp)
int arch_spin_trylock_retry(arch_spinlock_t *lp)
void arch_spin_relax(arch_spinlock_t *lp)
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t lock;
static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t kmmio_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t wait_lock;
static __always_inline void ticket_spin_lock(arch_spinlock_t *lock)
static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock)
static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock)
static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock)
static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock)
arch_spinlock_t val = READ_ONCE(*lock);
static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
arch_spinlock_t raw_lock;
static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
arch_spinlock_t *l = (void *)lock;
arch_spinlock_t lock;
arch_spinlock_t *l = (void *)lock;
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t lock;
global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t max_lock;
arch_spinlock_t lock;
.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t *lock = lock_addr(v); \
arch_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
arch_spinlock_t lock;
static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
arch_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v); \
arch_spinlock_t *lock = lock_addr(v); \
static inline void arch_spin_lock(arch_spinlock_t *mutex)
static inline void arch_spin_unlock(arch_spinlock_t *mutex)
static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)