atomic_xchg
action = atomic_xchg(&info->message, 0);
fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
if (atomic_xchg(&run_once, 1)) {
if (atomic_xchg(&run_once, 1)) {
addr = (u32)&atomic_xchg;
int p = atomic_xchg(&q->pending_count, 0);
if (atomic_xchg(&parms->master_cpu, 1) == 1)
if (atomic_xchg(&prd_usage, 1) == 1)
atomic_xchg(&prd_usage, 0);
int p = atomic_xchg(&q->pending_count, 0);
if (atomic_xchg(&channel->busy, 1))
if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
sect_in = atomic_xchg(&device->rs_sect_in, 0);
if (atomic_xchg(&device->state,
irq = atomic_xchg(&ld->irq, 0);
return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0;
atomic_xchg(&pvr_dev->mmu_flush_cache_flags, 0);
if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
} else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
pending = atomic_xchg(&mdp4_crtc->pending, 0);
pending = atomic_xchg(&mdp5_crtc->pending, 0);
if (atomic_xchg(&ntfy->allowed, 0) == 1) {
if (atomic_xchg(&ntfy->allowed, 1) == 0) {
if (!atomic_xchg(&inth->allowed, 1)) {
if (atomic_xchg(&runl->changed, 0) && runl->func->update) {
rc = atomic_xchg(&runl->rc_pending, 0);
if (!atomic_xchg(&clk->waiting, 0))
u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
u32 events = atomic_xchg(&sched->fw_events, 0);
if (atomic_xchg(&rdev->ih.lock, 1))
if (atomic_xchg(&rdev->ih.lock, 1))
if (atomic_xchg(&rdev->ih.lock, 1))
if (atomic_xchg(&rdev->ih.lock, 1))
if (!atomic_xchg(&xe->wedged.flag, 1)) {
if (!atomic_xchg(&xe->irq.enabled, 0))
if (atomic_xchg(&f->signaling, 1)) {
if (atomic_xchg(&f->signaling, 1)) {
event = atomic_xchg(&data->regulator_events[i], 0);
atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
atomic_xchg(&txq->tx_ring.ring_full, 0)) {
if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) {
if (atomic_xchg(&txq->tx_ring.no_desc, 0))
!atomic_xchg(&txq->tx_ring.ring_full, 1)) {
cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
!atomic_xchg(&cookie->fq_timer_on, 1))
unsigned int t = atomic_xchg(&acc->collector.name, 0); \
if (atomic_xchg(&dc->running, 1)) {
!atomic_xchg(&dc->has_dirty, 1)) {
switch (atomic_xchg(&pin->work_irq_change,
data = atomic_xchg(&lis3->count, 0);
if (atomic_xchg(&priv->sie_pending, 0)) {
atomic_xchg(&tx->dqo_compl.free_pending_packets, -1);
atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1);
if (atomic_xchg(&slave->link_state, link_state) != link_state)
if (atomic_xchg(&pef2256->carrier, carrier) != carrier)
int rx = atomic_xchg(&wil->isr_count_rx, 0);
int tx = atomic_xchg(&wil->isr_count_tx, 0);
intstatus = atomic_xchg(&bus->intstatus, 0);
ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0);
ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback);
prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
rx = atomic_xchg(&priv->bh_rx, 0);
tx = atomic_xchg(&priv->bh_tx, 0);
term = atomic_xchg(&priv->bh_term, 0);
atomic_xchg(&priv->recent_scan, 0);
if (atomic_xchg(&priv->scan.in_progress, 0)) {
if (atomic_xchg(&priv->tx_lock, 1) != 1)
atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */
opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
terminating = atomic_xchg(&assoc->terminating, 1);
disconnect = atomic_xchg(&queue->connected, 0);
events = atomic_xchg(&ctrl->pending_events, 0);
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
rc = atomic_xchg(&arm_state->ka_release_count, 0);
uc = atomic_xchg(&arm_state->ka_use_count, 0);
u32 flags = atomic_xchg(&state->poll_services[group], 0);
service_flags = atomic_xchg(&service->poll_flags, 0);
(data = atomic_xchg(&smo8800->counter, 0)));
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
if (atomic_xchg(&hba->resetting, 1) == 0) {
io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
busy = atomic_xchg(&phba->cmf_busy, 0);
max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
if (atomic_xchg(&phba->cmf_bw_wait, 0))
if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
atomic_xchg(&devip->stopped, want_stop);
if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
still_used = atomic_xchg(&buf->mem_used, 0);
srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
if (atomic_xchg(&ctx->dead, 1)) {
ret = atomic_xchg(dev->dev_stat_values + index, 0);
f_released = atomic_xchg(&cache->f_released, 0);
if (atomic_xchg(&erofs_percpu_workers_initialized, 1))
if (!atomic_xchg(&erofs_percpu_workers_initialized, 0))
cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0);
cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0);
if (!atomic_xchg(&zone->dirty, false))
available = atomic_xchg(&sc->recv_io.credits.available, 0);
available = atomic_xchg(&sc->recv_io.credits.available, 0);
nr = atomic_xchg(io_get_user_counter(niov), 0);
u32 lost = atomic_xchg(&audit_lost, 0);
u32 actual = atomic_xchg(&audit_backlog_wait_time_actual, 0);
if (atomic_xchg(&insn_array->used, 1))
SRC = (u32) atomic_xchg(
} while (atomic_xchg(l, 1));
if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
if (atomic_xchg(&c->call_rcu_in_progress, 1)) {
(void)atomic_xchg(st, state);
atomic_xchg(&kgdb_active, cpu);
atomic_xchg(&kgdb_active, cpu);
events = atomic_xchg(&rb->poll, 0);
if (!atomic_xchg(&reset_hung_task, 0) &&
old = atomic_xchg(&lock->tail, curr);
return atomic_xchg(&sem->block, 1) == 0;
!atomic_xchg(&___rfd_beenhere, 1)) { \
if (!atomic_xchg(&dumped, 1)) {
atomic_xchg(&warned, 1)) {
os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
return atomic_xchg(&scx_enable_state_var, to);
if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
unsigned long cnt = atomic_xchg(&dropped_count, 0);
KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
int kmem = atomic_xchg(&memcg->kmem_stat, 0);
int slab = atomic_xchg(&pn->slab_reclaimable, 0);
int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
!atomic_xchg(&krcp->work_in_progress, 1)) {
hits = atomic_xchg(&swapin_readahead_hits, 0);
if (atomic_xchg(&pgdat->kswapd_failures, 0))
if (atomic_xchg(&pool->compaction_in_progress, 1))
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
drops = atomic_xchg(&po->tp_drops, 0);
atomic_xchg(&rx->key_distr, 0);
k = atomic_xchg(&rx->peer_rx_active, 0);
atomic_xchg(&rx->key_distr, 0);
return atomic_xchg(v, new);
if (atomic_xchg(&tascam->midi_in_active, 1) == 0) {
if (atomic_xchg(&tascam->midi_in_active, 0) == 1) {
cnt = atomic_xchg(&sample_cnt, 0);
cnt = atomic_xchg(&sample_cnt, 0);
cnt = atomic_xchg(&sample_cnt, 0);
cnt = atomic_xchg(&sample_cnt, 0);
cnt = atomic_xchg(&sample_cnt, 0);
cnt = atomic_xchg(&sample_cnt, 0);