atomic_get_and_set
extern int32 atomic_get_and_set(int32* value, int32 newValue);
#define atomic_readandclear_int(ptr) atomic_get_and_set((int32*)(ptr), 0)
return atomic_get_and_set(&lock->lock, 1) == 0;
return (PointerType*)atomic_get_and_set((int32*)_pointer, (int32)set);
uint32 flags = (atomic_get_and_set((int32*)eventFlags, 0)) >> 1;
flags = atomic_get_and_set((int32*)eventFlags++, 0);
if (atomic_get_and_set((int32*)&fCPUEventFlags[cpu].interrupts[VMBUS_SINT_MESSAGE].flags32, 0) == 0)
uint32 flags = atomic_get_and_set((int32*)rxFlags, 0) >> 1;
flags = atomic_get_and_set((int32*)rxFlags++, 0);
if (atomic_get_and_set(&fTransferScheduled, 1) != 0) {
if (atomic_get_and_set(&fTransferScheduled, 1) != 0) {
if (atomic_get_and_set(&fTransferScheduled, 1) != 0) {
bool wasPending = atomic_get_and_set(&fPendingDPC, 1) != 0;
return atomic_get_and_set(&fInUse, 1) != 0;
bool wasPending = atomic_get_and_set(&fPendingDPC, 1) != 0;
int32 oldValue = atomic_get_and_set(value, newValue);
return atomic_get_and_set((int32*)entry, newEntry);
return atomic_get_and_set((int32*)entry, newEntry);
return atomic_get_and_set((int32*)entry, newEntry);
const int32 oldGeneration = atomic_get_and_set(&entry->generation,
} else if (atomic_get_and_set(&gCPU[targetCPU->ID()].invoke_scheduler, true) != true) {
if (atomic_get_and_set(&lock->lock, 1) != 0) {
if (atomic_get_and_set(&lock->lock, 1) == 0)
int32 oldValue = atomic_get_and_set(&lock->lock, 1);
if (atomic_get_and_set(&lock->lock, 0) != 1)
if (atomic_get_and_set(&lock->lock, 0) != 1)
uint32 previous = atomic_get_and_set(&lock->lock, 0);
atomic_get_and_set(int32_t* ptr, int32_t value)
value = atomic_get_and_set(control, STATE_INITIALIZED);
int32 sequence = atomic_get_and_set(&sKeyTable[key].sequence,
int32 value = atomic_get_and_set((int32*)&onceControl->state,
value = atomic_get_and_set((int32*)&onceControl->state,
return atomic_get_and_set((int32*)value, newValue);