atomic_get
#define B_SPINLOCK_IS_LOCKED(spinlock) (atomic_get(&(spinlock)->lock) > 0)
{ return atomic_get((int32*)&fReferenceCount); }
extern int32 atomic_get(int32 *value);
int32 EntriesCount() { return atomic_get(&fEntriesCount); }
return ((uint32)atomic_get(element) & (1u << (cpu % kArrayBits))) != 0;
return (uint32)atomic_get((int32*)&lock->count);
uint32 current = (uint32)atomic_get((int32*)&lock->count);
return atomic_get(&thread->wait.status) == 1;
return (PointerType*)atomic_get((int32*)_pointer);
pending = atomic_get(&sc->ec_sci_pending);
uint32 readIndexNew = atomic_get((int32*)&fRXRing->read_index);
atomic_get((int32*)&fRXRing->write_index));
atomic_get((int32*)&fRXRing->read_index), atomic_get((int32*)&fRXRing->write_index));
uint32 pendingSendLength = atomic_get((int32*)&fRXRing->pending_send_size);
uint32 readIndex = atomic_get((int32*)&fTXRing->read_index);
uint32 writeIndex = atomic_get((int32*)&fTXRing->write_index);
uint32 writeIndexOld = atomic_get((int32*)&fTXRing->write_index);
atomic_get((int32*)&fTXRing->read_index));
atomic_get((int32*)&fTXRing->write_index), atomic_get((int32*)&fTXRing->read_index));
&& writeIndexOld == (uint32)atomic_get((int32*)&fTXRing->read_index)) {
uint32 readIndex = atomic_get((int32*)&fRXRing->read_index);
uint32 writeIndex = atomic_get((int32*)&fRXRing->write_index);
if (atomic_get(&sIgnoreInterrupts)) {
if (atomic_get(&dev->flags) & PS2_FLAG_KEYB) {
while (!(atomic_get(&dev->parent_dev->flags) & PS2_FLAG_ENABLED)) {
flags = atomic_get(&dev->flags);
if (!(atomic_get(&dev->flags) & PS2_FLAG_KEYB)) {
if (atomic_get(&dev->flags) & PS2_FLAG_ACK) {
if (atomic_get(&dev->flags) & PS2_FLAG_NACK) {
if (atomic_get(&sKeyboardOpenCount) == 0)
if (atomic_get(&sKeyboardOpenCount) != 0)
if (atomic_get(&sKeyboardOpenCount) == 0) {
if ((int32)(addr_t)cookie == atomic_get(&fCurrentRequest))
while (atomic_get(&fCleanupCount) != 0) {
if (atomic_get(&controller->opened) != 0)
while (atomic_get(&fInsideNotify) != 0)
while (atomic_get(&fInsideNotify) != 0)
if (atomic_get(&fProcessedBuffers) <= 0)
return atomic_get(v);
if ((int32)(addr_t)cookie == atomic_get(&info->currentRequest))
while (atomic_get(&fBusyCount) != 0)
while (atomic_get(&fBusyCount) != 0)
status = atomic_get((int32 *)&sc->xl_intr_status);
statack = (uint8_t)atomic_get((int32 *)&sc->sc_statack);
status = atomic_get((int32 *)&sc->sk_intstatus);
status = atomic_get((int32 *)&sc->sc_intr_status);
intr_status = atomic_get((int32 *)&sc->sc_intr_status);
r = atomic_get((int32 *)&sc->sc_intr_status);
r = atomic_get((int32 *)&sc->sc_intr_status);
r1 = atomic_get((int32 *)&sc->sc_intr_status_1);
r2 = atomic_get((int32 *)&sc->sc_intr_status_2);
status = atomic_get((int32 *)&sc->sc_intr_status);
r = atomic_get((int32 *)&sc->sc_intr_status);
status = atomic_get(&pc->pc_intr_status);
tx_rings = atomic_get(&pc->pc_intr_tx_rings);
size_t bytesLeft = DEF_BUFFER_SIZE - atomic_get(&fWriteBufferAvail);
if (atomic_get(&(device->stop_watching))) {
return atomic_get(&fReferenceCount);
if (atomic_get(&fOpenFiles) > 0) {
if (atomic_get(&fOpenDirectories) > 0) {
if (atomic_get(&fOpenAttributeDirectories) > 0) {
if (atomic_get(&fOpenAttributes) > 0) {
if (atomic_get(&fOpenIndexDirectories) > 0) {
if (atomic_get(&fOpenQueries) > 0) {
if (atomic_get(&device->open_count) != 0)
if (atomic_get(&interface->DeviceInterface()->monitor_count) > 0)
if (atomic_get(&interface->DeviceInterface()->monitor_count) > 0)
while (atomic_get(&interface->ref_count) > 0) {
if (atomic_get(&interface->monitor_count) > 0)
inline bool IsBusy() const { return atomic_get((int32*)&fBusy) == 1 ; }
int32 max = atomic_get(&sMaxAllocatedDataHeaderCount);
int32 max = atomic_get(&sMaxAllocatedNetBufferCount);
while (atomic_get(&fQuitThread) == 0) {
int32 index = atomic_get(&fCurrentScreenshotIndex);
const int32 currentIndex = atomic_get(&fCurrentScreenshotIndex);
while (atomic_get(&fIsWatching) > 0) {
while (atomic_get(&fIsWatching) > 0) {
if (atomic_get(&fReadBufferSize) == 0) {
while (atomic_get(&fReadBufferSize) == 0 && status == B_OK) {
int32 toRead = atomic_get(&fReadBufferSize);
return (atomic_get(&fFlags) & F_HAS_DATA) != 0;
return atomic_get(&requestStatus);
return atomic_get(&canCancel) == 1;
if (atomic_get(std::addressof(it->second)) == 0) {
if (static_cast<size_t>(atomic_get(std::addressof(it->second)))
count = atomic_get(&fUseCount);
if (atomic_get(&_ReferenceCount()) > 1) {
if (fPrivateData != NULL && atomic_get(&_ReferenceCount()) > 1) {
#define atomic_load_32(ptr) atomic_get((int32*)ptr)
&& atomic_get(&fWakeupRetry) == 0)
uint32 pos = atomic_get((int32*)&fCursorBuffer->pos);
if (atomic_get(&it->fMask->fNextMaskCount) > 0) {
return (atomic_get(&fStopped) != 0);
return (atomic_get(&fBusy) != 0);
int32 count = atomic_get(&sWorkerCount);
if (atomic_get(&fAnimationActive) == enable)
while (atomic_get(&fAnimationActive)) {
while (!locked && atomic_get(&userTimer->fSkip) == 0) {
int32 oldValue = atomic_get(value);
= atomic_get(&thread->team->debug_info.flags);
if (atomic_get(&block->ref_count) == 0 && !block->unused) {
const int32 removedCount = atomic_get(&fEntriesCount) - 1;
while (atomic_get(&fEntriesCount) != removedCount) {
while ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
while (atomic_get(&sHandOverKDLToCPU) >= 0)
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
teamDebugFlags = atomic_get(&team->debug_info.flags);
int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0)
if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
debuggerPort = atomic_get(&sDefaultDebuggerPort);
if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
bool setPort = !(atomic_get(&thread->debug_info.flags)
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
events = atomic_get(&event->events);
uint32 writeAvailable = atomic_get((int32*)&fWriteAvailable);
uint32 readEnd = atomic_get((int32*)&fWriteHead);
readHead = atomic_get((int32*)&fReadHead);
return fBufferSize - atomic_get((int32*)&fWriteAvailable);
return atomic_get((int32*)&fWriteAvailable);
if (atomic_get(&sStackInterfaceConsumers) > 0)
int32 count = atomic_get(&sNextHotVnodeIndex);
uint32 unusedCount = atomic_get((int32*)&sUnusedVnodes);
const int32 oldRefCount = atomic_get(&vnode->ref_count);
if ((atomic_get(&lock->count) != 0 || lock->waiters != NULL)
result = atomic_get(value);
result = atomic_get(value);
if (atomic_get(&entry->ref_count) > 0)
int32 oldCount = atomic_get(&sXsiMessageCount);
if (atomic_get(&sXsiMessageQueueCount) >= MAX_XSI_MESSAGE_QUEUE) {
bool debugSignal = (~atomic_get(&team->debug_info.flags)
if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
int32 lastIndex = (uint32)atomic_get(&sLastIndex) % NUM_LAST_CALLERS;
return atomic_get((int32*)&lock->count);
} else if (atomic_get(&get_cpu_struct()->ici_counter)
!= atomic_get(&sBroadcastMessageCounter)) {
team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
int32 debugFlags = atomic_get(¤tThread->debug_info.flags)
int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
while (atomic_get(&cpuData.current_event_in_progress) == 1)
int32 freePages = atomic_get(&sUnreservedFreePages);
if (atomic_get(&sUnsatisfiedPageReservations) != 0) {
|| atomic_get(&sUnreservedFreePages)
if (atomic_get(&sUnreservedFreePages) > dontTouch) {
atomic_get(int32_t* ptr)
while (atomic_get(control) == STATE_SPINNING);
while (atomic_get((int32*)&barrier->mutex) != (B_USER_MUTEX_LOCKED | B_USER_MUTEX_DISABLED))
while (atomic_get((int32*)&barrier->waiter_count) < 0) {
if ((~atomic_get(&thread->flags) & kFlags) == 0)
if ((atomic_get(&thread->flags) & THREAD_CANCEL_ASYNCHRONOUS) != 0)
int32 sequence = atomic_get(&sKeyTable[key].sequence);
int32 sequence = atomic_get(&sKeyTable[key].sequence);
while (atomic_get((int32*)&onceControl->state) == STATE_SPINNING);
int32 current = atomic_get(value);
return atomic_get((int32*)value);