mtx_owner
if (__predict_false(mtx->mtx_owner == ci))
mtx->mtx_owner = ci;
mtx->mtx_owner = NULL;
mtx->mtx_owner = NULL;
if (__predict_false(mtx->mtx_owner == ci))
mtx->mtx_owner = ci;
volatile void *mtx_owner;
if ((mtx)->mtx_owner != curcpu()) \
if ((mtx)->mtx_owner == curcpu()) \
volatile void *mtx_owner;
if (((mtx)->mtx_owner != curcpu()) && !(panicstr || db_active)) \
if (((mtx)->mtx_owner == curcpu()) && !(panicstr || db_active)) \
if (__predict_false(mtx->mtx_owner == ci))
if (__predict_false(mtx->mtx_owner == ci))
mtx->mtx_owner = ci;
mtx->mtx_owner = NULL;
mtx->mtx_owner = NULL;
mtx->mtx_owner = ci;
mtx->mtx_owner = 0;
return atomic_cas_ulong(&mtx->mtx_owner, e, v);
owner = mtx->mtx_owner;
owner = atomic_cas_ulong(&mtx->mtx_owner, self, 0);
mtx->mtx_owner = 0;
if (__predict_false(mtx_owner(mtx) == self))
mtx->mtx_owner = self;
mtx->mtx_owner = 0;
if (__predict_false(mtx->mtx_owner == ci))
owner = mtx->mtx_owner;
owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
if (__predict_false(mtx->mtx_owner != ci))
mtx->mtx_owner = NULL;
if (mtx_owner(&w_mtx) != mtx_curcpu())
((mtx_owner(mtx) == mtx_curcpu()) || panicstr || db_active)
struct cpu_info *volatile mtx_owner;
volatile unsigned long mtx_owner;
#define mtx_owner(mtx) ((mtx)->mtx_owner & ~1UL)
if (mtx_owner(mtx) != mtx_curcpu() && !(panicstr || db_active)) \
if (mtx_owner(mtx) == mtx_curcpu() && !(panicstr || db_active)) \