root/arch/x86/include/asm/processor.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PROCESSOR_H
#define _ASM_X86_PROCESSOR_H

#include <asm/processor-flags.h>

/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
struct io_bitmap;
struct vm86;

#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeatures.h>
#include <asm/cpuid/api.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <asm/special_insns.h>
#include <asm/fpu/types.h>
#include <asm/unwind_hints.h>
#include <asm/vmxfeatures.h>
#include <asm/vdso/processor.h>
#include <asm/shstk.h>

#include <linux/personality.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/mem_encrypt.h>

/*
 * We handle most unaligned accesses in hardware.  On the other hand
 * unaligned DMA can be quite expensive on some Nehalem processors.
 *
 * Based on this we disable the IP header alignment in network drivers.
 */
#define NET_IP_ALIGN    0

#define HBP_NUM 4

/*
 * These alignment constraints are for performance in the vSMP case,
 * but in the task_struct case we must also meet hardware imposed
 * alignment requirements of the FPU state:
 */
#ifdef CONFIG_X86_VSMP
# define ARCH_MIN_TASKALIGN             (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN        (1 << INTERNODE_CACHE_SHIFT)
#else
# define ARCH_MIN_TASKALIGN             __alignof__(union fpregs_state)
# define ARCH_MIN_MMSTRUCT_ALIGN        0
#endif

extern u16 __read_mostly tlb_lli_4k;
extern u16 __read_mostly tlb_lli_2m;
extern u16 __read_mostly tlb_lli_4m;
extern u16 __read_mostly tlb_lld_4k;
extern u16 __read_mostly tlb_lld_2m;
extern u16 __read_mostly tlb_lld_4m;
extern u16 __read_mostly tlb_lld_1g;

/*
 * CPU type and hardware bug flags. Kept separately for each CPU.
 */

struct cpuinfo_topology {
        // Real APIC ID read from the local APIC
        u32                     apicid;
        // The initial APIC ID provided by CPUID
        u32                     initial_apicid;

        // Physical package ID
        u32                     pkg_id;

        // Physical die ID on AMD, Relative on Intel
        u32                     die_id;

        // Compute unit ID - AMD specific
        u32                     cu_id;

        // Core ID relative to the package
        u32                     core_id;

        // Logical ID mappings
        u32                     logical_pkg_id;
        u32                     logical_die_id;
        u32                     logical_core_id;

        // AMD Node ID and Nodes per Package info
        u32                     amd_node_id;

        // Cache level topology IDs
        u32                     llc_id;
        u32                     l2c_id;

        // Hardware defined CPU-type
        union {
                u32             cpu_type;
                struct {
                        // CPUID.1A.EAX[23-0]
                        u32     intel_native_model_id   :24;
                        // CPUID.1A.EAX[31-24]
                        u32     intel_type              :8;
                };
                struct {
                        // CPUID 0x80000026.EBX
                        u32     amd_num_processors      :16,
                                amd_power_eff_ranking   :8,
                                amd_native_model_id     :4,
                                amd_type                :4;
                };
        };
};

struct cpuinfo_x86 {
        union {
                /*
                 * The particular ordering (low-to-high) of (vendor,
                 * family, model) is done in case range of models, like
                 * it is usually done on AMD, need to be compared.
                 */
                struct {
                        __u8    x86_model;
                        /* CPU family */
                        __u8    x86;
                        /* CPU vendor */
                        __u8    x86_vendor;
                        __u8    x86_reserved;
                };
                /* combined vendor, family, model */
                __u32           x86_vfm;
        };
        __u8                    x86_stepping;
#ifdef CONFIG_X86_64
        /* Number of 4K pages in DTLB/ITLB combined(in pages): */
        int                     x86_tlbsize;
#endif
#ifdef CONFIG_X86_VMX_FEATURE_NAMES
        __u32                   vmx_capability[NVMXINTS];
#endif
        __u8                    x86_virt_bits;
        __u8                    x86_phys_bits;
        /* Max extended CPUID function supported: */
        __u32                   extended_cpuid_level;
        /* Maximum supported CPUID level, -1=no CPUID: */
        int                     cpuid_level;
        /*
         * Align to size of unsigned long because the x86_capability array
         * is passed to bitops which require the alignment. Use unnamed
         * union to enforce the array is aligned to size of unsigned long.
         */
        union {
                __u32           x86_capability[NCAPINTS + NBUGINTS];
                unsigned long   x86_capability_alignment;
        };
        char                    x86_vendor_id[16];
        char                    x86_model_id[64];
        struct cpuinfo_topology topo;
        /* in KB - valid for CPUS which support this call: */
        unsigned int            x86_cache_size;
        int                     x86_cache_alignment;    /* In bytes */
        /* Cache QoS architectural values, valid only on the BSP: */
        int                     x86_cache_max_rmid;     /* max index */
        int                     x86_cache_occ_scale;    /* scale to bytes */
        int                     x86_cache_mbm_width_offset;
        int                     x86_power;
        unsigned long           loops_per_jiffy;
        /* protected processor identification number */
        u64                     ppin;
        u16                     x86_clflush_size;
        /* number of cores as seen by the OS: */
        u16                     booted_cores;
        /* Index into per_cpu list: */
        u16                     cpu_index;
        /*  Is SMT active on this core? */
        bool                    smt_active;
        u32                     microcode;
        /* Address space bits used by the cache internally */
        u8                      x86_cache_bits;
        unsigned                initialized : 1;
} __randomize_layout;

#define X86_VENDOR_INTEL        0
#define X86_VENDOR_CYRIX        1
#define X86_VENDOR_AMD          2
#define X86_VENDOR_UMC          3
#define X86_VENDOR_CENTAUR      5
#define X86_VENDOR_TRANSMETA    7
#define X86_VENDOR_NSC          8
#define X86_VENDOR_HYGON        9
#define X86_VENDOR_ZHAOXIN      10
#define X86_VENDOR_VORTEX       11
#define X86_VENDOR_NUM          12

#define X86_VENDOR_UNKNOWN      0xff

/*
 * capabilities of CPUs
 */
extern struct cpuinfo_x86       boot_cpu_data;
extern struct cpuinfo_x86       new_cpu_data;

extern __u32                    cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32                    cpu_caps_set[NCAPINTS + NBUGINTS];

DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu)           per_cpu(cpu_info, cpu)

extern const struct seq_operations cpuinfo_op;

#define cache_line_size()       (boot_cpu_data.x86_cache_alignment)

extern void cpu_detect(struct cpuinfo_x86 *c);

static inline unsigned long long l1tf_pfn_limit(void)
{
        return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}

void init_cpu_devs(void);
void get_cpu_vendor(struct cpuinfo_x86 *c);
extern void early_cpu_init(void);
extern void identify_secondary_cpu(unsigned int cpu);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);

/*
 * Friendlier CR3 helpers.
 */
static inline unsigned long read_cr3_pa(void)
{
        return __read_cr3() & CR3_ADDR_MASK;
}

static inline unsigned long native_read_cr3_pa(void)
{
        return __native_read_cr3() & CR3_ADDR_MASK;
}

static inline void load_cr3(pgd_t *pgdir)
{
        write_cr3(__sme_pa(pgdir));
}

/*
 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
 * unrelated to the task-switch mechanism:
 */
#ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
        unsigned short          back_link, __blh;
        unsigned long           sp0;
        unsigned short          ss0, __ss0h;
        unsigned long           sp1;

        /*
         * We don't use ring 1, so ss1 is a convenient scratch space in
         * the same cacheline as sp0.  We use ss1 to cache the value in
         * MSR_IA32_SYSENTER_CS.  When we context switch
         * MSR_IA32_SYSENTER_CS, we first check if the new value being
         * written matches ss1, and, if it's not, then we wrmsr the new
         * value and update ss1.
         *
         * The only reason we context switch MSR_IA32_SYSENTER_CS is
         * that we set it to zero in vm86 tasks to avoid corrupting the
         * stack if we were to go through the sysenter path from vm86
         * mode.
         */
        unsigned short          ss1;    /* MSR_IA32_SYSENTER_CS */

        unsigned short          __ss1h;
        unsigned long           sp2;
        unsigned short          ss2, __ss2h;
        unsigned long           __cr3;
        unsigned long           ip;
        unsigned long           flags;
        unsigned long           ax;
        unsigned long           cx;
        unsigned long           dx;
        unsigned long           bx;
        unsigned long           sp;
        unsigned long           bp;
        unsigned long           si;
        unsigned long           di;
        unsigned short          es, __esh;
        unsigned short          cs, __csh;
        unsigned short          ss, __ssh;
        unsigned short          ds, __dsh;
        unsigned short          fs, __fsh;
        unsigned short          gs, __gsh;
        unsigned short          ldt, __ldth;
        unsigned short          trace;
        unsigned short          io_bitmap_base;

} __attribute__((packed));
#else
struct x86_hw_tss {
        u32                     reserved1;
        u64                     sp0;
        u64                     sp1;

        /*
         * Since Linux does not use ring 2, the 'sp2' slot is unused by
         * hardware.  entry_SYSCALL_64 uses it as scratch space to stash
         * the user RSP value.
         */
        u64                     sp2;

        u64                     reserved2;
        u64                     ist[7];
        u32                     reserved3;
        u32                     reserved4;
        u16                     reserved5;
        u16                     io_bitmap_base;

} __attribute__((packed));
#endif

/*
 * IO-bitmap sizes:
 */
#define IO_BITMAP_BITS                  65536
#define IO_BITMAP_BYTES                 (IO_BITMAP_BITS / BITS_PER_BYTE)
#define IO_BITMAP_LONGS                 (IO_BITMAP_BYTES / sizeof(long))

#define IO_BITMAP_OFFSET_VALID_MAP                              \
        (offsetof(struct tss_struct, io_bitmap.bitmap) -        \
         offsetof(struct tss_struct, x86_tss))

#define IO_BITMAP_OFFSET_VALID_ALL                              \
        (offsetof(struct tss_struct, io_bitmap.mapall) -        \
         offsetof(struct tss_struct, x86_tss))

#ifdef CONFIG_X86_IOPL_IOPERM
/*
 * sizeof(unsigned long) coming from an extra "long" at the end of the
 * iobitmap. The limit is inclusive, i.e. the last valid byte.
 */
# define __KERNEL_TSS_LIMIT     \
        (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
         sizeof(unsigned long) - 1)
#else
# define __KERNEL_TSS_LIMIT     \
        (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
#endif

/* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
#define IO_BITMAP_OFFSET_INVALID        (__KERNEL_TSS_LIMIT + 1)

struct entry_stack {
        char    stack[PAGE_SIZE];
};

struct entry_stack_page {
        struct entry_stack stack;
} __aligned(PAGE_SIZE);

/*
 * All IO bitmap related data stored in the TSS:
 */
struct x86_io_bitmap {
        /* The sequence number of the last active bitmap. */
        u64                     prev_sequence;

        /*
         * Store the dirty size of the last io bitmap offender. The next
         * one will have to do the cleanup as the switch out to a non io
         * bitmap user will just set x86_tss.io_bitmap_base to a value
         * outside of the TSS limit. So for sane tasks there is no need to
         * actually touch the io_bitmap at all.
         */
        unsigned int            prev_max;

        /*
         * The extra 1 is there because the CPU will access an
         * additional byte beyond the end of the IO permission
         * bitmap. The extra byte must be all 1 bits, and must
         * be within the limit.
         */
        unsigned long           bitmap[IO_BITMAP_LONGS + 1];

        /*
         * Special I/O bitmap to emulate IOPL(3). All bytes zero,
         * except the additional byte at the end.
         */
        unsigned long           mapall[IO_BITMAP_LONGS + 1];
};

struct tss_struct {
        /*
         * The fixed hardware portion.  This must not cross a page boundary
         * at risk of violating the SDM's advice and potentially triggering
         * errata.
         */
        struct x86_hw_tss       x86_tss;

        struct x86_io_bitmap    io_bitmap;
} __aligned(PAGE_SIZE);

DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);

/* Per CPU interrupt stacks */
struct irq_stack {
        char            stack[IRQ_STACK_SIZE];
} __aligned(IRQ_STACK_SIZE);

DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
#ifdef CONFIG_X86_64
DECLARE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
#else
DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
#endif

DECLARE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack);
/* const-qualified alias provided by the linker. */
DECLARE_PER_CPU_CACHE_HOT(const unsigned long __percpu_seg_override,
                          const_cpu_current_top_of_stack);

#ifdef CONFIG_X86_64
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{
#ifdef CONFIG_SMP
        return per_cpu_offset(cpu);
#else
        return 0;
#endif
}

extern asmlinkage void entry_SYSCALL32_ignore(void);

/* Save actual FS/GS selectors and bases to current->thread */
void current_save_fsgs(void);
#endif  /* X86_64 */

struct perf_event;

struct thread_struct {
        /* Cached TLS descriptors: */
        struct desc_struct      tls_array[GDT_ENTRY_TLS_ENTRIES];
#ifdef CONFIG_X86_32
        unsigned long           sp0;
#endif
        unsigned long           sp;
#ifdef CONFIG_X86_32
        unsigned long           sysenter_cs;
#else
        unsigned short          es;
        unsigned short          ds;
        unsigned short          fsindex;
        unsigned short          gsindex;
#endif

#ifdef CONFIG_X86_64
        unsigned long           fsbase;
        unsigned long           gsbase;
#else
        /*
         * XXX: this could presumably be unsigned short.  Alternatively,
         * 32-bit kernels could be taught to use fsindex instead.
         */
        unsigned long fs;
        unsigned long gs;
#endif

        /* Save middle states of ptrace breakpoints */
        struct perf_event       *ptrace_bps[HBP_NUM];
        /* Debug status used for traps, single steps, etc... */
        unsigned long           virtual_dr6;
        /* Keep track of the exact dr7 value set by the user */
        unsigned long           ptrace_dr7;
        /* Fault info: */
        unsigned long           cr2;
        unsigned long           trap_nr;
        unsigned long           error_code;
#ifdef CONFIG_VM86
        /* Virtual 86 mode info */
        struct vm86             *vm86;
#endif
        /* IO permissions: */
        struct io_bitmap        *io_bitmap;

        /*
         * IOPL. Privilege level dependent I/O permission which is
         * emulated via the I/O bitmap to prevent user space from disabling
         * interrupts.
         */
        unsigned long           iopl_emul;

        unsigned int            iopl_warn:1;

        /*
         * Protection Keys Register for Userspace.  Loaded immediately on
         * context switch. Store it in thread_struct to avoid a lookup in
         * the tasks's FPU xstate buffer. This value is only valid when a
         * task is scheduled out. For 'current' the authoritative source of
         * PKRU is the hardware itself.
         */
        u32                     pkru;

#ifdef CONFIG_X86_USER_SHADOW_STACK
        unsigned long           features;
        unsigned long           features_locked;

        struct thread_shstk     shstk;
#endif
};

#ifdef CONFIG_X86_DEBUG_FPU
extern struct fpu *x86_task_fpu(struct task_struct *task);
#else
# define x86_task_fpu(task)     ((struct fpu *)((void *)(task) + sizeof(*(task))))
#endif

extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);

static inline void arch_thread_struct_whitelist(unsigned long *offset,
                                                unsigned long *size)
{
        fpu_thread_struct_whitelist(offset, size);
}

static inline void
native_load_sp0(unsigned long sp0)
{
        this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}

static __always_inline void native_swapgs(void)
{
#ifdef CONFIG_X86_64
        asm volatile("swapgs" ::: "memory");
#endif
}

static __always_inline unsigned long current_top_of_stack(void)
{
        /*
         *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
         *  and around vm86 mode and sp0 on x86_64 is special because of the
         *  entry trampoline.
         */
        if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
                return this_cpu_read_const(const_cpu_current_top_of_stack);

        return this_cpu_read_stable(cpu_current_top_of_stack);
}

static __always_inline bool on_thread_stack(void)
{
        return (unsigned long)(current_top_of_stack() -
                               current_stack_pointer) < THREAD_SIZE;
}

#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else

static inline void load_sp0(unsigned long sp0)
{
        native_load_sp0(sp0);
}

#endif /* CONFIG_PARAVIRT_XXL */

unsigned long __get_wchan(struct task_struct *p);

extern void select_idle_routine(void);
extern void amd_e400_c1e_apic_setup(void);

extern unsigned long            boot_option_idle_override;

enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
                         IDLE_POLL};

extern void enable_sep_cpu(void);


/* Defined in head.S */
extern struct desc_ptr          early_gdt_descr;

extern void switch_gdt_and_percpu_base(int);
extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void cpu_init(void);
extern void cpu_init_exception_handling(bool boot_cpu);
extern void cpu_init_replace_early_idt(void);
extern void cr4_init(void);

extern void set_task_blockstep(struct task_struct *task, bool on);

/* Boot loader type from the setup header: */
extern int                      bootloader_type;
extern int                      bootloader_version;

extern char                     ignore_fpu_irq;

#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW

#ifdef CONFIG_X86_32
# define BASE_PREFETCH          ""
# define ARCH_HAS_PREFETCH
#else
# define BASE_PREFETCH          "prefetcht0 %1"
#endif

/*
 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
 *
 * It's not worth to care about 3dnow prefetches for the K6
 * because they are microcoded there and very slow.
 */
static inline void prefetch(const void *x)
{
        alternative_input(BASE_PREFETCH, "prefetchnta %1",
                          X86_FEATURE_XMM,
                          "m" (*(const char *)x));
}

/*
 * 3dnow prefetch to get an exclusive cache line.
 * Useful for spinlocks to avoid one state transition in the
 * cache coherency protocol:
 */
static __always_inline void prefetchw(const void *x)
{
        alternative_input(BASE_PREFETCH, "prefetchw %1",
                          X86_FEATURE_3DNOWPREFETCH,
                          "m" (*(const char *)x));
}

#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
                           TOP_OF_KERNEL_STACK_PADDING)

#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))

#define task_pt_regs(task) \
({                                                                      \
        unsigned long __ptr = (unsigned long)task_stack_page(task);     \
        __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;             \
        ((struct pt_regs *)__ptr) - 1;                                  \
})

#ifdef CONFIG_X86_32
#define INIT_THREAD  {                                                    \
        .sp0                    = TOP_OF_INIT_STACK,                      \
        .sysenter_cs            = __KERNEL_CS,                            \
}

#else
extern unsigned long __top_init_kernel_stack[];

#define INIT_THREAD {                                                   \
        .sp     = (unsigned long)&__top_init_kernel_stack,              \
}

#endif /* CONFIG_X86_64 */

extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
                                               unsigned long new_sp);

/*
 * This decides where the kernel will search for a free chunk of vm
 * space during mmap's.
 */
#define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
#define TASK_UNMAPPED_BASE              __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)

#define KSTK_EIP(task)          (task_pt_regs(task)->ip)
#define KSTK_ESP(task)          (task_pt_regs(task)->sp)

/* Get/set a process' ability to use the timestamp counter instruction */
#define GET_TSC_CTL(adr)        get_tsc_mode((adr))
#define SET_TSC_CTL(val)        set_tsc_mode((val))

extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);

DECLARE_PER_CPU(u64, msr_misc_features_shadow);

static inline u32 per_cpu_llc_id(unsigned int cpu)
{
        return per_cpu(cpu_info.topo.llc_id, cpu);
}

static inline u32 per_cpu_l2c_id(unsigned int cpu)
{
        return per_cpu(cpu_info.topo.l2c_id, cpu);
}

#ifdef CONFIG_CPU_SUP_AMD
/*
 * Issue a DIV 0/1 insn to clear any division data from previous DIV
 * operations.
 */
static __always_inline void amd_clear_divider(void)
{
        asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
                     :: "a" (0), "d" (0), "r" (1));
}

extern void amd_check_microcode(void);
#else
static inline void amd_clear_divider(void)              { }
static inline void amd_check_microcode(void)            { }
#endif

extern unsigned long arch_align_stack(unsigned long sp);
void free_init_pages(const char *what, unsigned long begin, unsigned long end);
extern void free_kernel_image_pages(const char *what, void *begin, void *end);

void default_idle(void);
#ifdef  CONFIG_XEN
bool xen_set_default_idle(void);
#else
#define xen_set_default_idle 0
#endif

void __noreturn stop_this_cpu(void *dummy);
void microcode_check(struct cpuinfo_x86 *prev_info);
void store_cpu_caps(struct cpuinfo_x86 *info);

DECLARE_PER_CPU(bool, cache_state_incoherent);

enum l1tf_mitigations {
        L1TF_MITIGATION_OFF,
        L1TF_MITIGATION_AUTO,
        L1TF_MITIGATION_FLUSH_NOWARN,
        L1TF_MITIGATION_FLUSH,
        L1TF_MITIGATION_FLUSH_NOSMT,
        L1TF_MITIGATION_FULL,
        L1TF_MITIGATION_FULL_FORCE
};

extern enum l1tf_mitigations l1tf_mitigation;

enum mds_mitigations {
        MDS_MITIGATION_OFF,
        MDS_MITIGATION_AUTO,
        MDS_MITIGATION_FULL,
        MDS_MITIGATION_VMWERV,
};

extern bool gds_ucode_mitigated(void);

/*
 * Make previous memory operations globally visible before
 * a WRMSR.
 *
 * MFENCE makes writes visible, but only affects load/store
 * instructions.  WRMSR is unfortunately not a load/store
 * instruction and is unaffected by MFENCE.  The LFENCE ensures
 * that the WRMSR is not reordered.
 *
 * Most WRMSRs are full serializing instructions themselves and
 * do not require this barrier.  This is only required for the
 * IA32_TSC_DEADLINE and X2APIC MSRs.
 */
static inline void weak_wrmsr_fence(void)
{
        alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE));
}

#endif /* _ASM_X86_PROCESSOR_H */