data_race
if (!data_race(warned)) {
data_race(warned = true);
data_race(cpa_4k_install++);
if (data_race(READ_ONCE(lo->lo_state)) != Lo_bound)
data_race(READ_ONCE(lo->lo_state)) == Lo_unbound)
if (data_race(READ_ONCE(zlo->state)) == Zlo_deleting)
if (!data_race(priv->rx.initialized))
if (!data_race(priv->initialized)) {
data_race(vq->event_triggered = false);
data_race(vq->event_triggered = true);
return data_race(bg1->used > bg2->used);
return data_race(rsv->full);
max_count = data_race(delayed_refs->num_heads_ready);
if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
if (data_race(list_empty(&sinfo->ro_bgs)))
if (data_race(inode->logged_trans) == trans->transid)
ASSERT(data_race(BTRFS_I(inode)->logged_trans) > 0);
if (data_race(BTRFS_I(inode)->logged_trans) < trans->transid)
if (data_race(inode->logged_trans) == trans->transid) {
data_reloc_bg = data_race(fs_info->data_reloc_bg);
treelog_bg = data_race(fs_info->treelog_bg);
sum += data_race(per_cpu(mg_ctime_updates, i));
sum += data_race(per_cpu(mg_fine_stamps, i));
sum += data_race(per_cpu(mg_ctime_swaps, i));
if (unlikely(data_race(dio->error)))
if (data_race(jh->b_transaction != transaction &&
if (data_race(jh->b_modified == 1)) {
if (data_race(jh->b_transaction == transaction &&
if (!data_race(journal->j_running_transaction)) {
__entry->nrefs = data_race(dqp->q_lockref.count);
data_race(*ldst++ = *lsrc++);
return data_race(READ_ONCE(con->flags));
if (data_race(mm->hiwater_rss) < _rss)
data_race(mm->hiwater_rss = _rss);
return page_type_has_type(data_race(page->page_type));
return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
return data_race(page->page_type >> 24) == PGTY_##lname; \
VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
curr->rseq.slice.expires = data_race(rseq_slice_ext_nsecs) + ktime_get_mono_fast_ns();
idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
data_race(READ_ONCE(ssp->srcu_idx)),
data_race(READ_ONCE(ssp->srcu_idx_max)));
if (likely(data_race(!sk->sk_err)))
seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head));
seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail));
WARN_ON_ONCE(!(data_race(req->flags) & REQ_F_REFCOUNT));
tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts);
if (data_race(nr_threads >= max_threads))
return data_race(desc->tot_count);
sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu));
if (data_race(report_filterlist.used == report_filterlist.size)) {
static noinline void test_kernel_data_race(void) { data_race(test_var++); }
long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
if (data_race(statp[i].n_lock_fail))
cur = data_race(statp[i].n_lock_acquired);
if (data_race(prev->next) == node &&
con_dropped = data_race(READ_ONCE(con->dropped));
data_race(n_barrier_successes),
data_race(n_barrier_attempts),
data_race(n_rcu_torture_barrier_error));
pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
u0 = data_race(atomic_long_read(&sdp->srcu_ctrs[!idx].srcu_unlocks));
u1 = data_race(atomic_long_read(&sdp->srcu_ctrs[idx].srcu_unlocks));
l0 = data_race(atomic_long_read(&sdp->srcu_ctrs[!idx].srcu_locks));
l1 = data_race(atomic_long_read(&sdp->srcu_ctrs[idx].srcu_locks));
data_race(t->rcu_tasks_idle_cpu), cpu);
int i = data_race(rtp->gp_state); // Let KCSAN detect update races
rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
if (data_race(rtpcp->urgent_gp))
if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
jiffies - data_race(rtp->gp_jiffies),
data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
".k"[!!data_race(rtp->kthread_ptr)],
tt, tf, tst, data_race(rtp->tasks_gp_seq),
j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies),
data_race(rtp->gp_state), tasks_gp_state_getname(rtp));
data_race(rtp->percpu_enqueue_shift),
data_race(rtp->percpu_enqueue_lim),
data_race(rtp->percpu_dequeue_lim),
data_race(rtp->percpu_dequeue_gpseq));
data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start),
trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
"D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask),
".T"[!!data_race(rnp_root->exp_tasks)]);
rnp->level, rnp->grplo, rnp->grphi, data_race(rnp->expmask),
".T"[!!data_race(rnp->exp_tasks)]);
__func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
if (gp_seq != data_race(rcu_state.gp_seq)) {
if (!(data_race(rnp->qsmask) & leaf_node_cpu_bit(rnp, cpu)))
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
data_race(READ_ONCE(rcu_state.gp_flags)),
data_race(READ_ONCE(rcu_state.gp_state)),
gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
} else if (!(data_race(READ_ONCE(rdp->mynode->qsmask)) & rdp->grpmask)) {
data_race(READ_ONCE(rcu_state.gp_flags)), // Diagnostic read
data_race(READ_ONCE(gpk->__state)));
data_race(rcu_state.n_online_cpus)); // Diagnostic read
gpa = data_race(READ_ONCE(rcu_state.gp_activity));
data_race(READ_ONCE(jiffies_till_next_fqs)),
data_race(READ_ONCE(rcu_get_root()->qsmask)));
data_race(rcu_state.n_online_cpus)); // Diagnostic read
if (data_race(READ_ONCE(rnp->qsmask))) {
ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
js = j - data_race(READ_ONCE(rcu_state.gp_start));
jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
data_race(READ_ONCE(rcu_state.gp_state)),
t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
(long)data_race(READ_ONCE(rcu_state.gp_seq)),
(long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
data_race(READ_ONCE(rcu_state.gp_max)),
data_race(READ_ONCE(rcu_state.gp_flags)));
!data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
!data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
(long)data_race(READ_ONCE(rnp->gp_seq)),
(long)data_race(READ_ONCE(rnp->gp_seq_needed)),
data_race(READ_ONCE(rnp->qsmask)),
".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
data_race(READ_ONCE(rnp->n_boosts)));
cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
stats.exit += data_race(per_cpu(rseq_stats.exit, cpu));
stats.signal += data_race(per_cpu(rseq_stats.signal, cpu));
stats.slowpath += data_race(per_cpu(rseq_stats.slowpath, cpu));
stats.fastpath += data_race(per_cpu(rseq_stats.fastpath, cpu));
stats.ids += data_race(per_cpu(rseq_stats.ids, cpu));
stats.cs += data_race(per_cpu(rseq_stats.cs, cpu));
stats.clear += data_race(per_cpu(rseq_stats.clear, cpu));
stats.fixup += data_race(per_cpu(rseq_stats.fixup, cpu));
stats.s_granted += data_race(per_cpu(rseq_stats.s_granted, cpu));
stats.s_expired += data_race(per_cpu(rseq_stats.s_expired, cpu));
stats.s_revoked += data_race(per_cpu(rseq_stats.s_revoked, cpu));
stats.s_yielded += data_race(per_cpu(rseq_stats.s_yielded, cpu));
stats.s_aborted += data_race(per_cpu(rseq_stats.s_aborted, cpu));
invoked_count += data_race(per_cpu(scf_invoked_count, cpu));
if (data_race(!src->user_cpus_ptr))
return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
sum += data_race(per_cpu(timekeeping_mg_floor_swaps, cpu));
ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above.
seq_printf(m, "pool_min_free : %u\n", data_race(pool_global.stats.min_fill));
seq_printf(m, "pool_max_used : %u\n", data_race(pool_global.stats.max_used));
cpumask_copy(npresmsk, data_race(cpu_present_mask));
data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
seq_printf(seq, "pools: %d\n", data_race(pools_num));
seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
data_race(d->counter++); /* no warning */
if (data_race(list_empty(&folio->_deferred_list)))
distance = addr - data_race(meta->addr + meta->size);
if (!to_report || distance > data_race(meta->addr) - addr)
if (data_race(si->flags & SWP_SYNCHRONOUS_IO))
if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
data_race(si->flags & SWP_STABLE_WRITES)) {
data_race(c->failcnt++);
if (data_race(sis->flags & SWP_FS_OPS))
else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO))
if (data_race(sis->flags & SWP_FS_OPS)) {
return data_race(READ_ONCE(pcpu_nr_populated)) * pcpu_nr_units;
if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
if (data_race(page->page_type >> 24) != PGTY_slab)
if (!data_race(barn->nr_empty))
if (!data_race(barn->nr_full) && !data_race(barn->nr_empty))
if (!data_race(barn->nr_full))
if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES)
if (!data_race(barn->nr_empty))
if (!n || data_race(!n->nr_partial))
flc.counters = data_race(READ_ONCE(slab->counters));
if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES ||
if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) {
if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
if (data_race(folio_batch_count(fbatch))) {
data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared)));
return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
if (data_race(!list_empty(&folio->_deferred_list) &&
blocks = data_race(zone->free_area[order].nr_free);
seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
data_race(ts->rd->f_flags |= O_NONBLOCK);
data_race(ts->wr->f_flags |= O_NONBLOCK);
id = data_race(prog->aux->id);
if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
data_race(*(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr);
data_race(*(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr);
data_race(({ md5_update(ctx, key->key, keylen), 0; }));
data_race(memcpy(key->key, newkey, newkeylen));
if (data_race(po->tx_ring.pg_vec))
if (data_race(unlikely(isec->initialized != LABEL_INITIALIZED)))
if (data_race(unlikely(isec->initialized != LABEL_INITIALIZED)))
if (data_race(likely(isec->initialized == LABEL_INITIALIZED)))
if (data_race(likely(isec->initialized == LABEL_INITIALIZED)))
if (data_race(likely(isec->initialized == LABEL_INITIALIZED)))
is_write = data_race(head->write_buf != NULL);
perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);