atomic_dec_return
else if (!enable && (atomic_dec_return(&emu_count) == 0))
if (atomic_dec_return(&insn_ctr_ref) == 0)
if (atomic_dec_return(&itlb_miss_ref) == 0)
if (atomic_dec_return(&dtlb_miss_ref) == 0)
if (atomic_dec_return(&num_events) == 0)
if (atomic_dec_return(&num_events) == 0)
if (!atomic_dec_return(&cpuhw->ctr_set[i]))
if (!atomic_dec_return(&cpuhw->ctr_set[rc])) {
} while (atomic_dec_return(scheduled));
if (atomic_dec_return(&num_events) == 0)
if (atomic_dec_return(&pmu->activeboxes) == 0)
if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0)
if (atomic_dec_return(&srso_nr_vms))
if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count))
if (atomic_dec_return(&dev->enable_cnt) >= 0)
pm_only = atomic_dec_return(&q->pm_only);
inflight = atomic_dec_return(&rqw->inflight);
if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
inflight = atomic_dec_return(&rqw->inflight);
int ap_pending_cnt = atomic_dec_return(&device->ap_pending_cnt);
return atomic_dec_return(&peer_device->device->rs_pending_cnt);
return atomic_dec_return(&device->unacked_cnt);
int i = atomic_dec_return(&device->local_cnt);
int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
if (atomic_dec_return(&dd->irq_workers_active) == 0)
if (atomic_dec_return(&nbd->config->live_connections) == 0) {
counter = atomic_dec_return(v);
if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
if (atomic_dec_return(&cluster_usage[cluster]))
atomic_dec_return(&trng_active_devs);
atomic_dec_return(&trng_active_devs) == 0)
atomic_dec_return(&trng_active_devs) == 0)
if (atomic_dec_return(&devfreq->suspend_count) >= 1)
if (!atomic_dec_return(&first->next_pending_count)) {
if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
if (atomic_dec_return(&hive->number_devices) == 0) {
int count = atomic_dec_return(&node->kfd->compute_profile);
return atomic_dec_return(&spt->refcount);
if (atomic_dec_return(&kms->bandwidth_ref) > 0)
} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
if (atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
if (atomic_dec_return(&msm_fb->prepare_count))
if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
if (atomic_dec_return(&pfdev->cycle_counter.use_count) == 0)
map_count = atomic_dec_return(&vbo->map_count);
if (!atomic_dec_return(&config_desc->active_cnt))
if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
if (atomic_dec_return(&intr->users) > 0)
if (atomic_dec_return(&mcast->refcount) <= 1)
if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
skb_out = atomic_dec_return(&qp->skb_out);
depth = atomic_dec_return(&qp->req.rd_atomic);
req_lim = atomic_dec_return(&ch->req_lim);
if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
if (atomic_dec_return(&cache->nr_dirty) == 0)
if (atomic_dec_return(&m->pg_init_in_progress) > 0)
if (atomic_dec_return(&conf->preread_active_stripes)
if (atomic_dec_return(&conf->preread_active_stripes)
if (atomic_dec_return(&port->v4l_reader_count) == 0) {
if (atomic_dec_return(&port->v4l_reader_count) == 0) {
if (atomic_dec_return(&solo_dev->snd_users) == 0)
if (video->ece.enable && atomic_dec_return(&video->ece.clients) == 0) {
if (atomic_dec_return(&dev->num_instances) == 0)
s32 refs = atomic_dec_return(&ch->references);
s32 refs = atomic_dec_return(&part->references);
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
if (atomic_dec_return(&queued_msg->use_count) == 0) {
if (atomic_dec_return(&queued_msg->use_count) == 0) {
if (!atomic_dec_return(&host->shared_power_users))
while (atomic_dec_return(idle_cnt) > ES58X_TX_URBS_MAX) {
num_tasks = atomic_dec_return(&priv->opreq_count);
atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
if (!atomic_dec_return(&ar->tx_nr_pending)) {
if (atomic_dec_return(&sta_info->pending_frames) == 0)
atomic_dec_return(&sta_priv->pending_frames) == 0)
if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
atomic_dec_return(&sta_priv->pending_frames) == 0)
} while (atomic_dec_return(&mld_txq->tx_request));
} while (atomic_dec_return(&mvmtxq->tx_request));
atomic_dec_return(&adapter->pending_bridged_pkts);
if (atomic_dec_return(&priv->wmm_tx_pending[index]) < LOW_TX_PENDING) {
pending = atomic_dec_return(&wcid->non_aql_packets);
int tx_lock = atomic_dec_return(&wdev->tx_lock);
tx_lock = atomic_dec_return(&priv->tx_lock);
cnt = atomic_dec_return(&lport->act_rport_cnt);
cnt = atomic_dec_return(&rport->act_ctrl_cnt);
if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
if (atomic_dec_return(&dev->enable_cnt) != 0)
ret = atomic_dec_return(&hdptx->usage_count);
if (atomic_dec_return(&usbphyc->n_pll_cons) > 0)
WARN_ON(atomic_dec_return(&psy->use_cnt));
WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
if (atomic_dec_return(&rstc->deassert_count) != 0)
if (atomic_dec_return(&device->ref_count) == 0)
if (atomic_dec_return(&view->ref_count) == 0)
count = atomic_dec_return(&device->ref_count);
if (atomic_dec_return(&wka_port->refcount) != 0)
if (atomic_dec_return(&a->dis_ints_cnt) == 0)
if (atomic_dec_return(&a->disable_cnt) == 0)
atomic_dec_return(&sdev->device_blocked) > 0) {
if (atomic_dec_return(&starget->target_blocked) > 0)
if (atomic_dec_return(&shost->host_blocked) > 0)
if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
if (atomic_dec_return(&chan->ref_count) <= 0)
if (atomic_dec_return(&chan->dma->ref_count) <= 0)
if (atomic_dec_return(&dma->ref_count) == 0)
last = (atomic_dec_return(&qh->notifier_enabled) == 0);
last = (atomic_dec_return(&inst->num_notifiers) == 0);
if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
WARN_ON(atomic_dec_return(&state->refcount) < 0);
val = atomic_dec_return(&vsock->queued_replies);
a = atomic_dec_return(&cell->active);
o = atomic_dec_return(&net->nr_outstanding_calls);
if ((atomic_dec_return(&delayed_root->items) <
if (!atomic_dec_return(&mdsc->stopping_blockers) &&
if (atomic_dec_return(&cic->pending_pages))
queue_refs = atomic_dec_return(&ring->queue_refs);
if (atomic_dec_return(&gl->gl_revokes) == 0) {
v = atomic_dec_return(&kn->active);
n_accesses = atomic_dec_return(&cache->n_accesses);
n_accesses = atomic_dec_return(&cache->n_accesses);
n_accesses = atomic_dec_return(&cookie->n_accesses);
c = atomic_dec_return(&cookie->n_active);
n_accesses = atomic_dec_return(&volume->n_accesses);
n_accesses = atomic_dec_return(&volume->n_accesses);
if (atomic_dec_return(&timer->n_ops) < 0)
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
if (unlikely(atomic_dec_return(&pde->in_use) == BIAS))
length = atomic_dec_return(&tcpSesAllocCount);
current_recv_credits = atomic_dec_return(&sc->recv_io.credits.count);
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
current_recv_credits = atomic_dec_return(&sc->recv_io.credits.count);
if (atomic_dec_return(&tb->count) == 0)
BUG_ON(atomic_dec_return(&inode->i_readcount) < 0);
unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
int ret = atomic_dec_return(&page->_refcount);
if (atomic_dec_return(&sqd->park_pending))
if (atomic_dec_return(&nbcon_cpu_emergency_cnt) == 0) {
if (!atomic_dec_return(&rtsp->rts_refctr)) {
if (!atomic_dec_return(&n_started))
if (!atomic_dec_return(&n_warmedup))
if (!atomic_dec_return(&n_cooleddown))
if (!atomic_dec_return(&n_started))
if (atomic_dec_return(&file->sm_ref) > 0)
if (atomic_dec_return(&file->tm_ref) > 0)
if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max))
atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
if (atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
if (!atomic_dec_return(&oom_victims))
BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
if (atomic_dec_return(&brvcc->qspace) < 1) {
if (!atomic_dec_return(&jsk->skb_pending))
if (atomic_dec_return(&iucv->pendings) <= 0)
if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
!atomic_dec_return(&(skb_shinfo(skb)->dataref)))
atomic_dec_return(&genl_sk_destructing_cnt) == 0)
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
if (atomic_dec_return(&rfkill_input_disabled) == 0)
u = atomic_dec_return(&local->active_users);
int n = atomic_dec_return(select_skb_count(skb));
int n = atomic_dec_return(select_skb_count(skb));
if (!atomic_dec_return(&block->useswcnt))
if (!atomic_dec_return(&smcibdev->lnk_cnt))
if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
if (!atomic_dec_return(&lgr_cnt))
if (!atomic_dec_return(&smcibdev->lnk_cnt))
if (atomic_dec_return(&ep->re_receiving) > 0)
val = atomic_dec_return(&vsock->queued_replies);
int r = atomic_dec_return(&bss->hold);
r = atomic_dec_return(&bss->hold);
if (atomic_dec_return(&t->tunnel_users) == 1)
return atomic_dec_return(v);
if (atomic_dec_return(&ctx->num_preparing) == 0)
if (atomic_dec_return(&ctx->num_preparing) == 0)
if (atomic_dec_return(&ctx->num_unfinished) == 0)
if (atomic_dec_return(&shared_ctx->num_preparing) == 0)
if (atomic_dec_return(&shared_ctx->num_unfinished) == 0)
if (atomic_dec_return(&shared_ctx.num_preparing) > 0) {
if (atomic_dec_return(&shared_ctx.num_unfinished) > 0)
if (atomic_dec_return(&adev->l1sen_counter) == -1)
if (!atomic_dec_return(&ep->running)) {