atomic_add_unless
return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
if (!atomic_add_unless(&num_events, -1, 1)) {
if (!atomic_add_unless(&num_events, -1, 1)) {
return !!atomic_add_unless(&q->count, 1, max);
if (atomic_add_unless(&topology_poll, -1, 0))
if (!atomic_add_unless(&num_events, -1, 1)) {
int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
if (atomic_add_unless(&uv_in_nmi, 1, 1))
if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
atomic_add_unless(&parent->power.child_count, -1, 0);
atomic_add_unless(&parent->power.child_count, -1, 0);
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
if (!atomic_add_unless(&initialized, 1, 1))
if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
ret = atomic_add_unless(&coupled->ready_waiting_counts,
if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1))
if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
if (atomic_add_unless(&prange->queue_refcount, -1, 0)) {
atomic_add_unless(&pchild->queue_refcount, -1, 0);
if (atomic_add_unless(&ctx->win_updated, -1, 0))
if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
if (atomic_add_unless(&pt->used, -1, 1))
if (atomic_add_unless(&tl->pin_count, 1, 0))
if (atomic_add_unless(&tl->active_count, 1, 0))
if (atomic_add_unless(&tl->active_count, -1, 1))
if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
if (atomic_add_unless(&ref->count, -1, 1))
if (atomic_add_unless(&ref->count, -1, 1))
return atomic_add_unless(&ref->count, 1, 0);
if (atomic_add_unless(&vma->pages_count, 1, 0))
if (atomic_add_unless(&vma->pages_count, -1, 1))
if (atomic_add_unless(&wf->count, -1, 1))
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
if (atomic_add_unless(&pfdev->cycle_counter.use_count, -1, 1))
if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
if (!atomic_add_unless(&listen_ctx->backlog, -1, 0))
if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
if (atomic_add_unless(&sh->count, -1, 1))
max = atomic_add_unless(&core->insts_count, 1,
if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
atomic_add_unless(&tt->led_complete, 1, 1)) {
if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
if (atomic_add_unless(&twl->connected, 1, 1)) {
if (atomic_add_unless(&twl->connected, -1, 0)) {
if (!atomic_add_unless(&dytc_ignore_event, -1, 0))
if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
atomic_add_unless(&card->force_alloc_skb, -1, 0);
while (atomic_add_unless(&adapter->stat_miss, -1, 0))
if (atomic_add_unless(&irq_context->in_used, 1, 1))
if (!atomic_add_unless(&fusion->busy_mq_poll[queue_num], 1, 1))
if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
if (!atomic_add_unless(&reply_q->busy, 1, 1))
!atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
if (atomic_add_unless(&priv->active, 1, 1)) {
if (atomic_add_unless(&state->refcount, 1, 0))
WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
if (!atomic_add_unless(&map->in_use, 1, 1))
if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1))
if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
if (atomic_add_unless(&inode->i_count, -1, 1))
if (atomic_add_unless(&d->ref, -1, 2))
atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
atomic_add_unless(&nn->nfs4_client_count, -1, 0);
if (!atomic_add_unless(&s->s_active, -1, 1)) {
if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
ret = atomic_add_unless(&page->_refcount, nr, u);
atomic_add_unless(&dev->power.usage_count, -1, 0);
if (!atomic_add_unless(&perf_sched_count, -1, 1))
if (atomic_add_unless(cnt, -1, 1))
ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
return atomic_add_unless(&hibernate_atomic, -1, 0);
if (!atomic_add_unless(&sched_core_count, -1, 1))
if (atomic_add_unless(atomic, -1, 1))
if (atomic_add_unless(atomic, -1, 1))
if (atomic_add_unless(atomic, -1, 1))
if (atomic_add_unless(atomic, -1, 1))
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
#define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
if (!atomic_add_unless(&set->nelems, 1, set->size))
!atomic_add_unless(&pn->tx_credits, -1, 0)) {
avail_allocs = atomic_add_unless(&rds_ib_allocation,
if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
atomic_add_unless(&tmp->users, 1, lim);
atomic_add_unless(&tmp->users, -1, lim);
return atomic_add_unless(v, a, u);
if (!atomic_add_unless(&priv->dmic_probed, 1, 1))
if (!atomic_add_unless(&ipc->recovering, 1, 1)) {