smp_mb
smp_mb(); \
smp_mb(); \
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); \
smp_mb(); \
smp_mb();
smp_mb();
smp_mb(); \
smp_mb(); \
smp_mb(); \
smp_mb(); \
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); \
smp_mb() \
smp_mb(); \
smp_mb() \
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); \
smp_mb();
smp_mb();
smp_mb(); \
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); \
#define smp_mb__after_spinlock() smp_mb()
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
#define smp_mb__after_spinlock() smp_mb()
smp_mb();
smp_mb();
smp_mb();
#define cpu_relax() smp_mb()
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
#define smp_mb__after_spinlock() smp_mb()
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* End printing "critical section" */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* This orders prodded store vs ceded load */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* paired with smb_wmb in xive_esc_irq */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* see radix__flush_tlb_mm */
smp_mb(); /* see radix__flush_tlb_mm */
smp_mb(); /* see radix__flush_tlb_mm */
smp_mb(); /* see radix__flush_tlb_mm */
smp_mb();
smp_mb(); /* see radix__flush_tlb_mm */
smp_mb(); /* see radix__flush_tlb_mm */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
#define smp_mb__after_spinlock() smp_mb()
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* clear flag *before* checking for work */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* make sure dev->attached is checked */
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* finish all descriptor reads before incrementing tail */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* pairs with xe_pm_read_callback_task */
smp_mb(); /* pairs with xe_pm_write_callback_task */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* modify jump before enable thread */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* store-load barrier between "idle" and funnel queue */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* Match smp_mb in set_in_sync() */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* commit Tx queue .processed updates */
smp_mb(); /* commit Tx queue .processed updates */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* pairs with the one in fun_tx_check_stop() */
smp_mb(); /* Make sure it is visible to the workers on datapath */
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* Memory barrier before checking ring_space */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* Force any pending update before accessing. */
smp_mb(); /* Force the above update. */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
smp_mb();
smp_mb();
smp_mb(); /* ensure we change reset_pending before checking state */
smp_mb(); /* ensure we change state before checking reset_pending */
smp_mb(); /* ensure we change reset_pending before checking state */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* ensure we change state before checking reset_pending */
smp_mb(); /* ensure we change reset_pending before checking state */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* Force any pending update before accessing. */
smp_mb(); /* Force the above update. */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /*in order to force CPU ordering*/
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* See spi_finalize_current_message()... */
smp_mb(); /* See __spi_pump_transfer_message()... */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* against acm_suspend() */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); // paired with __legitimize_mnt()
smp_mb();
smp_mb();
smp_mb(); // see mntput_no_expire() and do_umount()
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
#ifndef smp_mb
# define smp_mb__after_switch_mm() smp_mb()
#ifndef smp_mb
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
smp_mb();
smp_mb(); /* B matches C */
smp_mb();
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
smp_mb(); // Placeholder for more selective ordering
smp_mb(); // Placeholder for more selective ordering
smp_mb(); // Provide ordering on noinstr-incomplete architectures.
smp_mb(); // Provide ordering on noinstr-incomplete architectures.
smp_mb();
smp_mb();
smp_mb(); /* paired with resched_curr() */
smp_mb();
smp_mb(); /* see mmdrop_lazy_tlb() above */
smp_mb();
smp_mb();
smp_mb(); \
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* (B) */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
KCSAN_EXPECT_READ_BARRIER(smp_mb(), true);
KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true);
KCSAN_EXPECT_RW_BARRIER(smp_mb(), true);
KCSAN_CHECK_READ_BARRIER(smp_mb());
KCSAN_CHECK_WRITE_BARRIER(smp_mb());
KCSAN_CHECK_RW_BARRIER(smp_mb());
smp_mb(); /* C matches B */
smp_mb(); /* A matches D */
smp_mb(); /* See comments above and below. */
smp_mb();
smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
smp_mb(); /* Ensure update-side operation before counter increment. */
smp_mb(); /* Above access must not bleed into critical section. */
smp_mb(); /* Ensure update-side operation after counter increment. */
smp_mb(); // Read header comment above.
smp_mb(); // Read header comment above.
smp_mb(); /* Ensure counts are updated before callback is entrained. */
smp_mb(); /* Wake before output. */
smp_mb(); /* Assign before wake. */
smp_mb(); /* Assign before wake. */
smp_mb(); /* Wake before output. */
smp_mb(); // Time check before grace-period check.
smp_mb(); // Sample jiffies after posting hrtimer.
smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
smp_mb(); /* Frees before return to avoid redoing OOM. */
smp_mb(); // Can't trust ordering if broken.
smp_mb(); // Store before wakeup.
smp_mb(); // Above write before wait.
smp_mb(); // Wake before output.
smp_mb(); /* E */ /* Pairs with B and C. */
smp_mb(); /* D */ /* Pairs with C. */
smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
smp_mb(); /* Order ->srcu_gp_seq with prior access. */
smp_mb();
smp_mb();
smp_mb(); // ^^^
smp_mb(); /* Force ordering following return. */
smp_mb(); /* A */
smp_mb(); /* B */ /* Avoid leaking the critical section. */
smp_mb(); /* C */ /* Avoid leaking the critical section. */
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
smp_mb();
smp_mb(); /* Ensure others see full kthread. */
smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
smp_mb(); /* ^^^ */
smp_mb(); /* ^^^ */
smp_mb(); /* Ensure GP ends before subsequent accesses. */
smp_mb(); // Order against root rcu_node structure grace-period cleanup.
smp_mb(); /* Ensure GP ends before subsequent accesses. */
smp_mb(); /* caller's subsequent code after above check. */
smp_mb(); /* caller's subsequent code after above check. */
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
smp_mb();
smp_mb(); /* Ensure that consecutive grace periods serialize. */
smp_mb(); /* Caller's modifications seen first by other CPUs. */
smp_mb(); /* All above changes before wakeup. */
smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
smp_mb();
smp_mb(); /* matches sched_clock_init_late() */
smp_mb(); /* matches {set,clear}_sched_clock_stable() */
smp_mb();
smp_mb();
smp_mb(); /* Pairing determined by caller's synchronization design. */
smp_mb();
smp_mb();
smp_mb(); /* IPIs should be serializing but paranoid. */
smp_mb(); /* IPIs should be serializing but paranoid. */
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* system call entry is not a mb. */
smp_mb(); /* exit from system call is not a mb */
smp_mb(); /* system call entry is not a mb. */
smp_mb(); /* exit from system call is not a mb */
smp_mb();
smp_mb();
smp_mb(); /* C */
smp_mb(); /* advised by wake_up_bit() */
smp_mb();
smp_mb(); /* NULL cur_csd after unlock. */
smp_mb(); /* Update cur_csd before function call. */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* wake_up_bit advises this */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* mutex lock should provide enough pairing */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb(); /* Read data before setting avail bit */
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
#ifndef smp_mb
smp_mb(); \
smp_mb(); \
smp_mb();
smp_mb();
smp_mb();
smp_mb(); \
smp_mb(); \
smp_mb(); \
smp_mb(); \
smp_mb();
smp_mb(); /* Enforce dependency ordering from x */ \
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();
smp_mb();