atomic_long_add_return
long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
count = atomic_long_add_return(adjust, &sem->count);
return atomic_long_add_return(new - old, &user->pipe_bufs);
#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
new_used = atomic_long_add_return(nslots, &mem->total_used);
ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic);
count = atomic_long_add_return(adjustment, &sem->count);
long new = atomic_long_add_return(v, &iter->rlimit[type]);
long new = atomic_long_add_return(1, &iter->rlimit[type]);
WARN_ON_ONCE(atomic_long_add_return(delta,
new = atomic_long_add_return(nr_pages, &c->usage);
new = atomic_long_add_return(nr_pages, &c->usage);
atomic_long_add_return(nr, &unit->nr_deferred[shrinker_id_to_offset(shrinker->id)]);
return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);