__lock
unsigned long *lock = &zram->table[index].__lock;
unsigned long *lock = &zram->table[index].__lock;
unsigned long *lock = &zram->table[index].__lock;
unsigned long __lock;
#define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock) \
__lock ? &(__vm_bo)->vm->__list_name.lock : \
#define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock) \
__lock ? &(__vm_bo)->vm->__list_name.lock : \
#define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock) \
__lock ? &(__vm_bo)->vm->__list_name.lock : \
r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
#define __local_lock(__lock) \
spin_lock((__lock)); \
#define __local_unlock(__lock) \
spin_unlock((__lock)); \
#define __local_lock_is_locked(__lock) \
(rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
#define MTREE_INIT_EXT(name, __flags, __lock) { \
.ma_external_lock = &(__lock).dep_map, \
#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
arch_spin_lock(&__lock);
arch_spin_unlock(&__lock);
__acquires(&runqueues.__lock) /* overapproximation */
raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
__releases(&runqueues.__lock) /* overapproximation */
raw_spin_unlock(&cpu_rq(t)->__lock);
raw_spin_lock_nested(&rq->__lock, subclass);
ret = raw_spin_trylock(&rq->__lock);
raw_spin_lock_init(&rq->__lock);
raw_spinlock_t __lock;
return &rq->core->__lock;
return &rq->__lock;
return &rq->core->__lock;
return &rq->__lock;
return &rq->__lock;
return &rq->__lock;
raw_spinlock_t __lock;
if (bpf_core_field_exists(struct rq___new, __lock))
lock_off = offsetof(struct rq___new, __lock);