atomic_inc_32_nv
qid = atomic_inc_32_nv(&state->qcount) - 1;
if ((num = atomic_inc_32_nv(&num_threads)) > max_threads) {
nsid = (uint64_t)getpid() << 32 | atomic_inc_32_nv(&fmev_subid);
qid = atomic_inc_32_nv(&state->qcount) - 1;
i = atomic_inc_32_nv(&ioc_id);
u = atomic_inc_32_nv(&cmt->cmt_utilization);
atomic_inc_32_nv(&ctfs_minor) & L_MAXMIN32);
ap->cs_id = key.impl_id.s_id = atomic_inc_32_nv(&session_id_counter);
atomic_inc_32_nv(&objfs_minor) & L_MAXMIN32);
atomic_inc_32_nv(&sharefs_minor) & L_MAXMIN32);
mb_put_uint32le(mbp, atomic_inc_32_nv(&smbechoes));
#define SMB_UNIQ_FID() atomic_inc_32_nv(&smb_fids)
new_id = atomic_inc_32_nv(&tmp_id);
sp->sd_spool_num = atomic_inc_32_nv(&sv->sp_info.sp_cnt);
ncur = atomic_inc_32_nv(&ufs_cur_writes);
if (atomic_inc_32_nv(&zp->z_sync_cnt) == 1)
call_msg.rm_xid = atomic_inc_32_nv(&zs->message_id);
atomic_inc_32_nv(&ilb_nat_src_instance));
if (atomic_inc_32_nv(&oldixa->ixa_refcnt) == 2) {
if (atomic_inc_32_nv(&ipst->ips_src_generation) ==
ipha->ipha_ident = atomic_inc_32_nv(identp);
htonl(atomic_inc_32_nv(&assoc->ipsa_replay));
esph_ptr->esph_replay = htonl(atomic_inc_32_nv(&assoc->ipsa_replay));
next_tq = atomic_inc_32_nv(&sctps->sctps_recvq_tq_list_cur) %
if (atomic_inc_32_nv(&slc->slc_cnt) > slc->slc_max + 1) {
if (atomic_inc_32_nv(&tlc->tlc_cnt) > tlc->tlc_max + 1) {
#define mm_atomic_inc_imp(_p) atomic_inc_32_nv((volatile uint32_t *)(_p))
cnt = atomic_inc_32_nv(&irp->irp_rscn_counter); \
atomic_inc_32_nv(&taskq_cntr));
ct = atomic_inc_32_nv(&stmf_cur_ntasks);
new_hkey = atomic_inc_32_nv(&daplka_timer_hkey);
uint_t active = atomic_inc_32_nv(&state->id_rx_post_active);
(atomic_inc_32_nv(&state->id_running) != 1)) {
bufs = atomic_inc_32_nv(&state->id_rx_list.dl_bufs_outstanding);
grp->mbg_id = atomic_inc_32_nv(&mac_bcast_id);
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(&isp->sess_state_event_count);
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(&isp->sess_state_event_count);
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(&isp->sess_state_event_count);
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(
event_count = atomic_inc_32_nv(&isp->sess_state_event_count);
event_count = atomic_inc_32_nv(
idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
if (atomic_inc_32_nv(&(sp->s_nested_restarts)) > 1) {
def_instance = atomic_inc_32_nv(&anon_instance);
xid = atomic_inc_32_nv(&nlm_xid);
xid = atomic_inc_32_nv(&nlm_xid);
xid = atomic_inc_32_nv(&nlm_xid);
xid = atomic_inc_32_nv(&nlm_xid);
xid = atomic_inc_32_nv(&nlm_xid);
xid = atomic_inc_32_nv(&nlm_xid);
uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
uint32_t id = atomic_inc_32_nv(&vmem_id);
vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
kstat_instance = atomic_inc_32_nv((uint32_t *)&rpc_kstat_instance);
extern uint32_t atomic_inc_32_nv(volatile uint32_t *);
atomic_inc_32_nv(&acpidev_device_unitaddr) - 1);
dhdl->aod_portid = atomic_inc_32_nv(devid) - 1;
dhdl->aod_bdnum = atomic_inc_32_nv(&acpidev_dr_boards) - 1;
dhdl->aod_bdidx = atomic_inc_32_nv(&pdhdl->aod_chidx);
dhdl->aod_bdidx = atomic_inc_32_nv(&acpidev_dr_board_index);
atomic_inc_32_nv(&acpidev_scope_unitaddr) - 1);
*kspp = kstat_create("pci_intrs", atomic_inc_32_nv(&pci_ks_inst),
atomic_inc_32_nv(&pxintr_ks_instance), "config",
if (atomic_inc_32_nv(outstanding) == 1) {
atomic_inc_32_nv(&pciintr_ks_instance), "config", "interrupts",
test->info.handle = atomic_inc_32_nv(&mem_test_sequence_id);
atomic_inc_32_nv(&n2rng->n_frs.fips_round_robin_j) %
atomic_inc_32_nv(&n2rng->n_frs.fips_round_robin_j) %
msgp->seq_num = atomic_inc_32_nv(&ldcp->dringdata_msgid);
msgp->seq_num = atomic_inc_32_nv(&ldcp->dringdata_msgid);
dmsg->seq_num = atomic_inc_32_nv(&ldcp->dringdata_msgid);