LK_SHARE
if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
LK_SHARE)
if (fp || (!(x & LK_SHARE)))
if (__predict_true(x & LK_SHARE) != 0) {
if (__predict_true(x & LK_SHARE) != 0) {
(((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
(v & LK_SHARE) && !LK_SHARERS(v);
else if (lk->lk_lock & LK_SHARE)
if ((x & LK_SHARE) == 0) {
if (slocked || (lk->lk_lock & LK_SHARE)) {
((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
if ((lk->lk_lock & LK_SHARE) == 0) {
if (lk->lk_lock & LK_SHARE)
else if (lk->lk_lock & LK_SHARE)
return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
LOCKSTAT_READER, (x & LK_SHARE) == 0,
(x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
(x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
(LK_SHARE | LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS | LK_WRITER_RECURSED)
#define LK_SHARERS_LOCK(x) ((x) << LK_SHARERS_SHIFT | LK_SHARE)