mutex_owner
uintptr_t owner = mutex.mutex_owner;
if ((owner = mutex.mutex_owner) == (uintptr_t)NULL) {
#define MUTEX_OWNER(mp) ((ulwp_t *)(uintptr_t)(mp)->mutex_owner)
ultos((uint64_t)mcopy.mutex_owner, 16, buf + strlen(buf));
ultos((uint64_t)mcopy.mutex_owner, 16, buf + strlen(buf));
ASSERT(mp->mutex_owner == (uintptr_t)self);
ASSERT(mp->mutex_owner == (uintptr_t)self);
volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner;
ownerp = (volatile uint64_t *)&mp->mutex_owner;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = 0;
mp->mutex_owner = 0;
mp->mutex_owner = 0;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = 0;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = 0;
mp->mutex_owner = 0;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = 0;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = 0;
mp->mutex_owner = 0;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = (uintptr_t)self;
mp->mutex_owner = 0;
stall_mutex.mutex_owner = (uintptr_t)curthread;
udp->link_lock.mutex_owner = (uintptr_t)self;
ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self;
rw_m.mx.mutex_owner == ocb->th_p->th_unique)
if (rwlock->mutex_owner == ocb->th_p->th_unique)
si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
extern void *mutex_owner(const kmutex_t *);
ASSERT(mutex_owner(&hd->femh_lock) == curthread);
return ((mutex_owner(&ulp->ul_lock) != curthread) ? 0 : 1);
if (mutex_owner(&ufs_fix.uq_mutex) == curthread)
if (mutex_owner(&spa_namespace_lock) != curthread) {
if (mutex_owner(&zp->z_lock) != curthread) {
if (mutex_owner(&lofi_lock) == curthread)
mutex_owner(&domp->lock);
SOBJ_MUTEX, mutex_owner, turnstile_stay_asleep, turnstile_change_pri
mutex_owner(mp) == curthread)
while (mutex_owner(upi_lock) == owner) {
return (mutex_owner(&vmem_sleep_lock) == curthread ||
mutex_owner(&vmem_nosleep_lock) == curthread ||
mutex_owner(&vmem_pushpage_lock) == curthread ||
mutex_owner(&vmem_panic_lock) == curthread);
#define MBX_REGISTER_LOCK_OWNER(ha) mutex_owner(&ha->pha->mbx_mutex)
extern struct _kthread *mutex_owner(const kmutex_t *);
suword64_noerr(&lp->mutex_owner, un.word64);
suword32_noerr((uint32_t *)&lp->mutex_owner, un.word32[0]);
suword32_noerr((uint32_t *)&lp->mutex_owner + 1, un.word32[1]);
if (mutex_owner(&xenwatch_mutex) != curthread) {
if (mutex_owner(&xcp->excl) == curthread) {
while (mutex_owner(poll_lock) != NULL)
if (mutex_owner(&contig_list_lock) != curthread) {
if (mutex_owner(&ec_lock) != curthread) {
while (prom_cpu || mutex_owner(&prom_mutex)) {
while (prom_cpu || mutex_owner(&prom_mutex)) {
mutex_owner(&domp->lock);