atomic_add_return
ATOMIC_FETCH_OPS(atomic_add_return)
int result = atomic_add_return(1, &smp_capture_depth);
if (atomic_add_return(1, &blkg->use_delay) == 1)
alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
if (atomic_add_return(1, &intf->nr_users) > max_users) {
if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
if (atomic_add_return(1, ring->inflights) >
if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
line->req_seqno = atomic_add_return(diff_seqno,
le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
if (atomic_add_return(1, &vblank->refcount) == 1) {
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
return (u32)atomic_add_return(incrs, &sp->max_val);
seqno = atomic_add_return(1, &priv->seqno);
passive_state = atomic_add_return(1, &cm_node->passive_state);
passive_state = atomic_add_return(1, &cm_node->passive_state);
if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
atomic_add_return(
return (unsigned int)atomic_add_return(1, tx_port_affinity) %
wqe->ssn = atomic_add_return(1, &qp->ssn);
return (int)(atomic_add_return((tail - last) & queue->mask, &queue->tail) - head);
const unsigned int head = atomic_add_return(count, &queue->head);
unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
errors = atomic_add_return(1, &dc->io_errors);
sectors_dirty = atomic_add_return(s,
if (atomic_add_return(-1, &factory->ref_count) <= 0)
u32 claim_number = (u32) atomic_add_return(1, &lock->increments_claimed);
if (atomic_add_return(-1, zone_count) > 0)
} else if (done && (atomic_add_return(-1, &allocator->depot->zones_to_scrub) == 0)) {
return atomic_add_return(1, &md->uevent_seq);
i = atomic_add_return(1, &master_dev->hw_index) - 1;
i = atomic_add_return(1, &master_dev->hw_index) - 1;
if (atomic_add_return(1, &delayed_dg_host_queue_size)
atomic_add_return(1, &instance_count));
if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
if (atomic_add_return(num_entries, &tx_scrq->used)
if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
if ((on && atomic_add_return(1, &drvstate->pmcount) == 1) ||
if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
seqno = atomic_add_return(0x10, &intf->seqno);
req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
if (atomic_add_return(0, &priv->tx_lock))
if (atomic_add_return(1, &sony_laptop_input.users) > 1)
if (atomic_add_return(1, &sony_pf_users) > 1)
if (atomic_add_return(1, &mpam_num_msc) == fw_num_msc)
used = atomic_add_return(count, &q->nr_buf_used);
atomic_add_return(1, &efct->xport->io_active_count);
atomic_add_return(1, &efct->xport->io_total_alloc);
atomic_add_return(1, &efct->xport->io_total_free);
ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
atomic_add_return(1, &efct->tgt_efct.ios_in_use);
atomic_add_return(1, &efct->tgt_efct.ios_in_use);
atomic_add_return(1, &xport->io_alloc_failed_count);
atomic_add_return(1, &xport->io_alloc_failed_count);
if (atomic_add_return(1, &xport->io_pending_recursing)) {
atomic_add_return(1, &xport->io_pending_count);
atomic_add_return(1, &xport->io_total_pending);
atomic_add_return(1, &xport->io_total_pending);
atomic_add_return(1, &xport->io_pending_count);
atomic_add_return(1, &xport->io_pending_count);
atomic_add_return(1, &xport->io_total_pending);
hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
atomic_add_return(1, &efc->els_io_alloc_failed_count);
hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
ioc_pend_data_len = atomic_add_return(data_len_blks,
tg_pend_data_len = atomic_add_return(data_len_blks,
ioc_pend_data_len = atomic_add_return(data_len_blks,
ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
return atomic_add_return(1, &fc_event_seq);
session->sid = atomic_add_return(1, &iscsi_session_nr);
if (atomic_add_return(i - cnt, &global_page_count) >
irq_guard = atomic_add_return(1, &mux->irq_guard);
irq_guard = atomic_add_return(-1, &mux->irq_guard);
map_guard = atomic_add_return(-1, &mux->map_guard);
map_guard = atomic_add_return(1, &mux->map_guard);
map_guard = atomic_add_return(-1, &mux->map_guard);
irq_guard = atomic_add_return(1, &scc->irq_guard);
irq_guard = atomic_add_return(-1, &scc->irq_guard);
seqno = atomic_add_return(1, &epfile->seqno);
ret = atomic_add_return(nbits, &bfs->nr_locked);
val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
if (!atomic_add_return(bios, &io->pending_bios))
if (atomic_add_return(bios, &io->pending_bios))
int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
needed = atomic_add_return(total, &t->t_outstanding_credits);
needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
wanted = atomic_add_return(nblocks,
t_revokes = atomic_add_return(revokes,
if (atomic_add_return(size, sz) > KERNFS_USER_XATTR_SIZE_LIMIT) {
if (atomic_add_return(BIAS, &de->in_use) != BIAS)
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
space_used = atomic_add_return(cilpcp->space_used + len,
val = atomic_add_return(segs, &inet_sk(sk)->inet_id);
if (atomic_add_return(len, &stream->capacity) >= BPF_STREAM_MAX_CAPACITY) {
KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true);
KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true);
KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true);
KCSAN_CHECK_READ_BARRIER(atomic_add_return(1, &dummy));
KCSAN_CHECK_WRITE_BARRIER(atomic_add_return(1, &dummy));
KCSAN_CHECK_RW_BARRIER(atomic_add_return(1, &dummy));
if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
key = atomic_add_return(key_offset, &sk->sk_tskey);
wanted = atomic_add_return(deferred, &netstamp_wanted);
return atomic_add_return(segs + delta, p_id) - segs;
rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
acked = atomic_add_return(call->rx_consumed - old_consumed,
return atomic_add_return(i, v);