atomic_add_return
#define atomic_add(i, v) atomic_add_return((i), (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
#define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic_inc_return(p) atomic_add_return(1, p)
#define atomic_add_and_test(v, p) (atomic_add_return(v, p) == 0)
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
unsigned pool_offset = atomic_add_return(1, &start_pool);
passive_state = atomic_add_return(1, &cm_node->passive_state);
passive_state = atomic_add_return(1, &cm_node->passive_state);
#define atomic_read(p) atomic_add_return(0, p)
#define atomic_inc_return(p) atomic_add_return(1, p)
if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
flight = atomic_add_return(numberRequests, ring->inflights);
flight = atomic_add_return(1, ring->inflights);
return atomic_add_return(inValue, (QatUtilsAtomic *)pAtomicVar);
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
if (atomic_add_return(1, ring->inflights) > ring->max_inflights) {