atomic_fetchadd_long
count = atomic_fetchadd_long(&(obj->retain_count), -1) - 1;
u_long cnt = atomic_fetchadd_long(&cnt_efirt_faults, 1);
#define atomic_fetchadd_64 atomic_fetchadd_long
return i + atomic_fetchadd_long(&v->counter, i);
return atomic_fetchadd_long(&v->counter, -i) - i;
return atomic_fetchadd_long(&v->counter, 1) + 1;
return atomic_fetchadd_long(&v->counter, -1) - 1;
ftick->tk_unique = atomic_fetchadd_long(&data->ticketer, 1);
ftick->tk_unique = atomic_fetchadd_long(&data->ticketer, 1);
if (atomic_fetchadd_long(&wip->wip_numios, -1) == 1) {
left = atomic_fetchadd_long(&watchdog_ticks, -newticks);
new = atomic_fetchadd_long(limit, (long)diff) + diff;
pos = atomic_fetchadd_long(&nrecs, 1);
space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
space = atomic_fetchadd_long(&bd->bd_bufspace, size);
space = atomic_fetchadd_long(&runningbufspace, -bspace);
old = atomic_fetchadd_long(&runningbufspace, space);
lnumcache = atomic_fetchadd_long(&numcache, 1) + 1;
rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1;
(atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1);
atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1);
#define atomic_fetchadd_64 atomic_fetchadd_long
value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta;
key = atomic_fetchadd_long(&masterkey, 1);
prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr);
prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr);
prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
prev = atomic_fetchadd_long(&swap_reserved, pincr);
prev = atomic_fetchadd_long(&swap_reserved, -pincr);
prev = atomic_fetchadd_long(&swap_reserved, -pincr);
prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)