root/arch/riscv/include/asm/pgtable.h
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2012 Regents of the University of California
 */

#ifndef _ASM_RISCV_PGTABLE_H
#define _ASM_RISCV_PGTABLE_H

#include <linux/mmzone.h>
#include <linux/sizes.h>

#include <asm/pgtable-bits.h>

#ifndef CONFIG_MMU
#ifdef CONFIG_RELOCATABLE
#define KERNEL_LINK_ADDR        UL(0)
#else
#define KERNEL_LINK_ADDR        _AC(CONFIG_PHYS_RAM_BASE, UL)
#endif
#define KERN_VIRT_SIZE          (UL(-1))
#else

#define ADDRESS_SPACE_END       (UL(-1))

#ifdef CONFIG_64BIT
/* Leave 2GB for kernel and BPF at the end of the address space */
#define KERNEL_LINK_ADDR        (ADDRESS_SPACE_END - SZ_2G + 1)
#else
#define KERNEL_LINK_ADDR        PAGE_OFFSET
#endif

/* Number of entries in the page global directory */
#define PTRS_PER_PGD    (PAGE_SIZE / sizeof(pgd_t))
/* Number of entries in the page table */
#define PTRS_PER_PTE    (PAGE_SIZE / sizeof(pte_t))

/*
 * Half of the kernel address space (1/4 of the entries of the page global
 * directory) is for the direct mapping.
 */
#define KERN_VIRT_SIZE          ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)

#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END      PAGE_OFFSET
#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)

#define BPF_JIT_REGION_SIZE     (SZ_128M)
#ifdef CONFIG_64BIT
#define BPF_JIT_REGION_START    (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END      (MODULES_END)
#else
#define BPF_JIT_REGION_START    (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END      (VMALLOC_END)
#endif

/* Modules always live before the kernel */
#ifdef CONFIG_64BIT
/* This is used to define the end of the KASAN shadow region */
#define MODULES_LOWEST_VADDR    (KERNEL_LINK_ADDR - SZ_2G)
#define MODULES_VADDR           (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
#define MODULES_END             (PFN_ALIGN((unsigned long)&_start))
#else
#define MODULES_VADDR           VMALLOC_START
#define MODULES_END             VMALLOC_END
#endif

/*
 * Roughly size the vmemmap space to be large enough to fit enough
 * struct pages to map half the virtual address space. Then
 * position vmemmap directly below the VMALLOC region.
 */
#define VA_BITS_SV32 32
#ifdef CONFIG_64BIT
#define VA_BITS_SV39 39
#define VA_BITS_SV48 48
#define VA_BITS_SV57 57

#define VA_BITS         (pgtable_l5_enabled ? \
                                VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
#else
#define VA_BITS         VA_BITS_SV32
#endif

#define VMEMMAP_SHIFT \
        (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE    BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END     VMALLOC_START
#define VMEMMAP_START   (VMALLOC_START - VMEMMAP_SIZE)

/*
 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
 */
#define vmemmap         ((struct page *)VMEMMAP_START - vmemmap_start_pfn)

#define PCI_IO_SIZE      SZ_16M
#define PCI_IO_END       VMEMMAP_START
#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)

#define FIXADDR_TOP      PCI_IO_START
#ifdef CONFIG_64BIT
#define MAX_FDT_SIZE     PMD_SIZE
#define FIX_FDT_SIZE     (MAX_FDT_SIZE + SZ_2M)
#define FIXADDR_SIZE     (PMD_SIZE + FIX_FDT_SIZE)
#else
#define MAX_FDT_SIZE     PGDIR_SIZE
#define FIX_FDT_SIZE     MAX_FDT_SIZE
#define FIXADDR_SIZE     (PGDIR_SIZE + FIX_FDT_SIZE)
#endif
#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)

#endif

#ifndef __ASSEMBLER__

#include <asm/page.h>
#include <asm/tlbflush.h>
#include <linux/mm_types.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>

#define __page_val_to_pfn(_val)  (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)

#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>

#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */

#include <linux/page_table_check.h>

#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({                                                      \
        extern char _sdata[], _start[], _end[];                                 \
        uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR                       \
                                + (uintptr_t)&_sdata - (uintptr_t)&_start;      \
        uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR                         \
                                + (uintptr_t)&_end - (uintptr_t)&_start;        \
        uintptr_t __a = (uintptr_t)(addr);                                      \
        (__a >= __rom_start_data && __a < __rom_end_data) ?                     \
                __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a;            \
        })
#else
#define XIP_FIXUP(addr)         (addr)
#endif /* CONFIG_XIP_KERNEL */

struct pt_alloc_ops {
        pte_t *(*get_pte_virt)(phys_addr_t pa);
        phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
        pmd_t *(*get_pmd_virt)(phys_addr_t pa);
        phys_addr_t (*alloc_pmd)(uintptr_t va);
        pud_t *(*get_pud_virt)(phys_addr_t pa);
        phys_addr_t (*alloc_pud)(uintptr_t va);
        p4d_t *(*get_p4d_virt)(phys_addr_t pa);
        phys_addr_t (*alloc_p4d)(uintptr_t va);
#endif
};

extern struct pt_alloc_ops pt_ops __meminitdata;

#ifdef CONFIG_MMU
/* Number of PGD entries that a user-mode program can use */
#define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)

/* Page protection bits */
#define _PAGE_BASE      (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)

#define PAGE_NONE               __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
#define PAGE_READ               __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_WRITE              __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
#define PAGE_EXEC               __pgprot(_PAGE_BASE | _PAGE_EXEC)
#define PAGE_READ_EXEC          __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_WRITE_EXEC         __pgprot(_PAGE_BASE | _PAGE_READ |      \
                                         _PAGE_EXEC | _PAGE_WRITE)
#define PAGE_SHADOWSTACK       __pgprot(_PAGE_BASE | _PAGE_WRITE)

#define PAGE_COPY               PAGE_READ
#define PAGE_COPY_EXEC          PAGE_READ_EXEC
#define PAGE_SHARED             PAGE_WRITE
#define PAGE_SHARED_EXEC        PAGE_WRITE_EXEC

#define _PAGE_KERNEL            (_PAGE_READ \
                                | _PAGE_WRITE \
                                | _PAGE_PRESENT \
                                | _PAGE_ACCESSED \
                                | _PAGE_DIRTY \
                                | _PAGE_GLOBAL)

#define PAGE_KERNEL             __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_READ        __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_EXEC        __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ_EXEC   __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
                                         | _PAGE_EXEC)

#define PAGE_TABLE              __pgprot(_PAGE_TABLE)

#define _PAGE_KERNEL_NC ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_NOCACHE)
#define _PAGE_IOREMAP   ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
#define PAGE_KERNEL_IO          __pgprot(_PAGE_IOREMAP)

extern pgd_t swapper_pg_dir[];
extern pgd_t trampoline_pg_dir[];
extern pgd_t early_pg_dir[];

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
        /*
         * Checking for _PAGE_LEAF is needed too because:
         * When splitting a THP, split_huge_page() will temporarily clear
         * the present bit, in this situation, pmd_present() and
         * pmd_trans_huge() still needs to return true.
         */
        return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
}
#else
static inline int pmd_present(pmd_t pmd)
{
        return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
#endif

static inline int pmd_none(pmd_t pmd)
{
        return (pmd_val(pmd) == 0);
}

static inline int pmd_bad(pmd_t pmd)
{
        return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
}

#define pmd_leaf        pmd_leaf
static inline bool pmd_leaf(pmd_t pmd)
{
        return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
}

static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
        WRITE_ONCE(*pmdp, pmd);
}

static inline void pmd_clear(pmd_t *pmdp)
{
        set_pmd(pmdp, __pmd(0));
}

static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
{
        unsigned long prot_val = pgprot_val(prot);

        ALT_THEAD_PMA(prot_val);

        return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
}

static inline unsigned long _pgd_pfn(pgd_t pgd)
{
        return __page_val_to_pfn(pgd_val(pgd));
}

static inline struct page *pmd_page(pmd_t pmd)
{
        return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
}

static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
        return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
}

static inline pte_t pmd_pte(pmd_t pmd)
{
        return __pte(pmd_val(pmd));
}

static inline pte_t pud_pte(pud_t pud)
{
        return __pte(pud_val(pud));
}

#ifdef CONFIG_RISCV_ISA_SVNAPOT

static __always_inline bool has_svnapot(void)
{
        return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
}

static inline unsigned long pte_napot(pte_t pte)
{
        return pte_val(pte) & _PAGE_NAPOT;
}

static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
{
        int pos = order - 1 + _PAGE_PFN_SHIFT;
        unsigned long napot_bit = BIT(pos);
        unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);

        return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
}

#else

static __always_inline bool has_svnapot(void) { return false; }

static inline unsigned long pte_napot(pte_t pte)
{
        return 0;
}

#endif /* CONFIG_RISCV_ISA_SVNAPOT */

/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
        unsigned long res  = __page_val_to_pfn(pte_val(pte));

        if (has_svnapot() && pte_napot(pte))
                res = res & (res - 1UL);

        return res;
}

#define pte_page(x)     pfn_to_page(pte_pfn(x))

/* Constructs a page table entry */
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
        unsigned long prot_val = pgprot_val(prot);

        ALT_THEAD_PMA(prot_val);

        return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
}

#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
        unsigned long pfn = pte_pfn(pte);

        return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
}

static inline int pte_present(pte_t pte)
{
        return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}

#define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
        if (pte_val(a) & _PAGE_PRESENT)
                return true;

        if ((pte_val(a) & _PAGE_PROT_NONE) &&
            atomic_read(&mm->tlb_flush_pending))
                return true;

        return false;
}

static inline int pte_none(pte_t pte)
{
        return (pte_val(pte) == 0);
}

static inline int pte_write(pte_t pte)
{
        return pte_val(pte) & _PAGE_WRITE;
}

static inline int pte_exec(pte_t pte)
{
        return pte_val(pte) & _PAGE_EXEC;
}

static inline int pte_user(pte_t pte)
{
        return pte_val(pte) & _PAGE_USER;
}

static inline int pte_huge(pte_t pte)
{
        return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
}

static inline int pte_dirty(pte_t pte)
{
        return pte_val(pte) & _PAGE_DIRTY;
}

static inline int pte_young(pte_t pte)
{
        return pte_val(pte) & _PAGE_ACCESSED;
}

static inline int pte_special(pte_t pte)
{
        return pte_val(pte) & _PAGE_SPECIAL;
}

/* static inline pte_t pte_rdprotect(pte_t pte) */

static inline pte_t pte_wrprotect(pte_t pte)
{
        return __pte((pte_val(pte) & ~(_PAGE_WRITE)) | (_PAGE_READ));
}

#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
#define pgtable_supports_uffd_wp()      \
        riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)

static inline bool pte_uffd_wp(pte_t pte)
{
        return !!(pte_val(pte) & _PAGE_UFFD_WP);
}

static inline pte_t pte_mkuffd_wp(pte_t pte)
{
        return pte_wrprotect(__pte(pte_val(pte) | _PAGE_UFFD_WP));
}

static inline pte_t pte_clear_uffd_wp(pte_t pte)
{
        return __pte(pte_val(pte) & ~(_PAGE_UFFD_WP));
}

static inline bool pte_swp_uffd_wp(pte_t pte)
{
        return !!(pte_val(pte) & _PAGE_SWP_UFFD_WP);
}

static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_SWP_UFFD_WP);
}

static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
{
        return __pte(pte_val(pte) & ~(_PAGE_SWP_UFFD_WP));
}
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */

/* static inline pte_t pte_mkread(pte_t pte) */

struct vm_area_struct;
pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma);
#define pte_mkwrite pte_mkwrite

static inline pte_t pte_mkwrite_novma(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_WRITE);
}

static inline pte_t pte_mkwrite_shstk(pte_t pte)
{
        return __pte((pte_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE);
}

/* static inline pte_t pte_mkexec(pte_t pte) */

static inline pte_t pte_mkdirty(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}

static inline pte_t pte_mkclean(pte_t pte)
{
        return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
}

static inline pte_t pte_mkyoung(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_ACCESSED);
}

static inline pte_t pte_mkold(pte_t pte)
{
        return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
}

static inline pte_t pte_mkspecial(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_SPECIAL);
}

static inline pte_t pte_mkhuge(pte_t pte)
{
        return pte;
}

#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#define pgtable_supports_soft_dirty()                           \
        (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) &&                   \
         riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B))

static inline bool pte_soft_dirty(pte_t pte)
{
        return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
}

static inline pte_t pte_mksoft_dirty(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
}

static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
        return __pte(pte_val(pte) & ~(_PAGE_SOFT_DIRTY));
}

static inline bool pte_swp_soft_dirty(pte_t pte)
{
        return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
}

static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
}

static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
        return __pte(pte_val(pte) & ~(_PAGE_SWP_SOFT_DIRTY));
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define pte_leaf_size(pte)      (pte_napot(pte) ?                               \
                                        napot_cont_size(napot_cont_order(pte)) :\
                                        PAGE_SIZE)
#endif

#ifdef CONFIG_NUMA_BALANCING
/*
 * See the comment in include/asm-generic/pgtable.h
 */
static inline int pte_protnone(pte_t pte)
{
        return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
}

static inline int pmd_protnone(pmd_t pmd)
{
        return pte_protnone(pmd_pte(pmd));
}
#endif

/* Modify page protection bits */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
        unsigned long newprot_val = pgprot_val(newprot);

        ALT_THEAD_PMA(newprot_val);

        return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
}

#define pgd_ERROR(e) \
        pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))


/* Commit new configuration to MMU hardware */
static inline void update_mmu_cache_range(struct vm_fault *vmf,
                struct vm_area_struct *vma, unsigned long address,
                pte_t *ptep, unsigned int nr)
{
        /*
         * Svvptc guarantees that the new valid pte will be visible within
         * a bounded timeframe, so when the uarch does not cache invalid
         * entries, we don't have to do anything.
         */
        if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
                return;

        /*
         * The kernel assumes that TLBs don't cache invalid entries, but
         * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
         * cache flush; it is necessary even after writing invalid entries.
         * Relying on flush_tlb_fix_spurious_fault would suffice, but
         * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
         */
        while (nr--)
                local_flush_tlb_page(address + nr * PAGE_SIZE);

}
#define update_mmu_cache(vma, addr, ptep) \
        update_mmu_cache_range(NULL, vma, addr, ptep, 1)

#define update_mmu_tlb_range(vma, addr, ptep, nr) \
        update_mmu_cache_range(NULL, vma, addr, ptep, nr)

static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
                unsigned long address, pmd_t *pmdp)
{
        pte_t *ptep = (pte_t *)pmdp;

        update_mmu_cache(vma, address, ptep);
}

#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
        return pte_val(pte_a) == pte_val(pte_b);
}

/*
 * Certain architectures need to do special things when PTEs within
 * a page table are directly modified.  Thus, the following hook is
 * made available.
 */
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
        WRITE_ONCE(*ptep, pteval);
}

void flush_icache_pte(struct mm_struct *mm, pte_t pte);

static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
{
        if (pte_present(pteval) && pte_exec(pteval))
                flush_icache_pte(mm, pteval);

        set_pte(ptep, pteval);
}

#define PFN_PTE_SHIFT           _PAGE_PFN_SHIFT

static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
                pte_t *ptep, pte_t pteval, unsigned int nr)
{
        page_table_check_ptes_set(mm, addr, ptep, pteval, nr);

        for (;;) {
                __set_pte_at(mm, ptep, pteval);
                if (--nr == 0)
                        break;
                ptep++;
                pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
        }
}
#define set_ptes set_ptes

static inline void pte_clear(struct mm_struct *mm,
        unsigned long addr, pte_t *ptep)
{
        __set_pte_at(mm, ptep, __pte(0));
}

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS       /* defined in mm/pgtable.c */
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
                                 pte_t *ptep, pte_t entry, int dirty);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG   /* defined in mm/pgtable.c */
extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
                                     pte_t *ptep);

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
                                       unsigned long address, pte_t *ptep)
{
#ifdef CONFIG_SMP
        pte_t pte = __pte(xchg(&ptep->pte, 0));
#else
        pte_t pte = *ptep;

        set_pte(ptep, __pte(0));
#endif

        page_table_check_pte_clear(mm, address, pte);

        return pte;
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
                                      unsigned long address, pte_t *ptep)
{
        pte_t read_pte = READ_ONCE(*ptep);
        /*
         * ptep_set_wrprotect can be called for shadow stack ranges too.
         * shadow stack memory is XWR = 010 and thus clearing _PAGE_WRITE will lead to
         * encoding 000b which is wrong encoding with V = 1. This should lead to page fault
         * but we dont want this wrong configuration to be set in page tables.
         */
        atomic_long_set((atomic_long_t *)ptep,
                        ((pte_val(read_pte) & ~(unsigned long)_PAGE_WRITE) | _PAGE_READ));
}

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
                                         unsigned long address, pte_t *ptep)
{
        /*
         * This comment is borrowed from x86, but applies equally to RISC-V:
         *
         * Clearing the accessed bit without a TLB flush
         * doesn't cause data corruption. [ It could cause incorrect
         * page aging and the (mistaken) reclaim of hot pages, but the
         * chance of that should be relatively low. ]
         *
         * So as a performance optimization don't flush the TLB when
         * clearing the accessed bit, it will eventually be flushed by
         * a context switch or a VM operation anyway. [ In the rare
         * event of it not getting flushed for a long time the delay
         * shouldn't really matter because there's no real memory
         * pressure for swapout to react to. ]
         */
        return ptep_test_and_clear_young(vma, address, ptep);
}

#define pgprot_nx pgprot_nx
static inline pgprot_t pgprot_nx(pgprot_t _prot)
{
        return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
}

#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
        unsigned long prot = pgprot_val(_prot);

        prot &= ~_PAGE_MTMASK;
        prot |= _PAGE_IO;

        return __pgprot(prot);
}

#define pgprot_writecombine pgprot_writecombine
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
{
        unsigned long prot = pgprot_val(_prot);

        prot &= ~_PAGE_MTMASK;
        prot |= _PAGE_NOCACHE;

        return __pgprot(prot);
}

#define pgprot_dmacoherent pgprot_writecombine

/*
 * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
 * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
 * DT.
 */
#define arch_has_hw_pte_young arch_has_hw_pte_young
static inline bool arch_has_hw_pte_young(void)
{
        return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU);
}

/*
 * THP functions
 */
static inline pmd_t pte_pmd(pte_t pte)
{
        return __pmd(pte_val(pte));
}

static inline pud_t pte_pud(pte_t pte)
{
        return __pud(pte_val(pte));
}

static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
        return pmd;
}

static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
        return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
}

#define __pmd_to_phys(pmd)  (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)

static inline unsigned long pmd_pfn(pmd_t pmd)
{
        return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
}

#define __pud_to_phys(pud)  (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)

#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
        return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
}

#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t pmd)
{
        return pte_pgprot(pmd_pte(pmd));
}

#define pud_pgprot pud_pgprot
static inline pgprot_t pud_pgprot(pud_t pud)
{
        return pte_pgprot(pud_pte(pud));
}

static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
        return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}

#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
        return pte_write(pmd_pte(pmd));
}

#define pud_write pud_write
static inline int pud_write(pud_t pud)
{
        return pte_write(pud_pte(pud));
}

#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
        return pte_dirty(pmd_pte(pmd));
}

#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
        return pte_young(pmd_pte(pmd));
}

static inline int pmd_user(pmd_t pmd)
{
        return pte_user(pmd_pte(pmd));
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
        return pte_pmd(pte_mkold(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
        return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
}

pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
#define pmd_mkwrite pmd_mkwrite

static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
{
        return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkwrite_shstk(pmd_t pte)
{
        return __pmd((pmd_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE);
}

static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
        return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkclean(pmd_t pmd)
{
        return pte_pmd(pte_mkclean(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
        return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}

#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
static inline bool pmd_special(pmd_t pmd)
{
        return pte_special(pmd_pte(pmd));
}

static inline pmd_t pmd_mkspecial(pmd_t pmd)
{
        return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
}
#endif

#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
static inline bool pud_special(pud_t pud)
{
        return pte_special(pud_pte(pud));
}

static inline pud_t pud_mkspecial(pud_t pud)
{
        return pte_pud(pte_mkspecial(pud_pte(pud)));
}
#endif

#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static inline bool pmd_uffd_wp(pmd_t pmd)
{
        return pte_uffd_wp(pmd_pte(pmd));
}

static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
{
        return pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)));
}

static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
{
        return pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)));
}

static inline bool pmd_swp_uffd_wp(pmd_t pmd)
{
        return pte_swp_uffd_wp(pmd_pte(pmd));
}

static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
{
        return pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)));
}

static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
{
        return pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)));
}
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */

#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline bool pmd_soft_dirty(pmd_t pmd)
{
        return pte_soft_dirty(pmd_pte(pmd));
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
        return pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)));
}

static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
        return pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)));
}

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline bool pmd_swp_soft_dirty(pmd_t pmd)
{
        return pte_swp_soft_dirty(pmd_pte(pmd));
}

static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
        return pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)));
}

static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
        return pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)));
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                                pmd_t *pmdp, pmd_t pmd)
{
        page_table_check_pmd_set(mm, addr, pmdp, pmd);
        return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
}

static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
                                pud_t *pudp, pud_t pud)
{
        page_table_check_pud_set(mm, addr, pudp, pud);
        return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
}

#ifdef CONFIG_PAGE_TABLE_CHECK
static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
{
        return pte_present(pte) && pte_user(pte);
}

static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
{
        return pmd_leaf(pmd) && pmd_user(pmd);
}

static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
{
        return pud_leaf(pud) && pud_user(pud);
}
#endif

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
        return pmd_leaf(pmd);
}

#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
                                        unsigned long address, pmd_t *pmdp,
                                        pmd_t entry, int dirty)
{
        return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}

#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
                                        unsigned long address, pmd_t *pmdp)
{
        return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}

#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
                                        unsigned long address, pmd_t *pmdp)
{
#ifdef CONFIG_SMP
        pmd_t pmd = __pmd(xchg(&pmdp->pmd, 0));
#else
        pmd_t pmd = *pmdp;

        pmd_clear(pmdp);
#endif

        page_table_check_pmd_clear(mm, address, pmd);

        return pmd;
}

#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
                                        unsigned long address, pmd_t *pmdp)
{
        ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}

#define pmdp_establish pmdp_establish
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
                                unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
        page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
        return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}

#define pmdp_collapse_flush pmdp_collapse_flush
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
                                 unsigned long address, pmd_t *pmdp);

static inline pud_t pud_wrprotect(pud_t pud)
{
        return pte_pud(pte_wrprotect(pud_pte(pud)));
}

static inline int pud_trans_huge(pud_t pud)
{
        return pud_leaf(pud);
}

static inline int pud_dirty(pud_t pud)
{
        return pte_dirty(pud_pte(pud));
}

static inline pud_t pud_mkyoung(pud_t pud)
{
        return pte_pud(pte_mkyoung(pud_pte(pud)));
}

static inline pud_t pud_mkold(pud_t pud)
{
        return pte_pud(pte_mkold(pud_pte(pud)));
}

static inline pud_t pud_mkdirty(pud_t pud)
{
        return pte_pud(pte_mkdirty(pud_pte(pud)));
}

static inline pud_t pud_mkclean(pud_t pud)
{
        return pte_pud(pte_mkclean(pud_pte(pud)));
}

static inline pud_t pud_mkwrite(pud_t pud)
{
        return pte_pud(pte_mkwrite_novma(pud_pte(pud)));
}

static inline pud_t pud_mkhuge(pud_t pud)
{
        return pud;
}

static inline int pudp_set_access_flags(struct vm_area_struct *vma,
                                        unsigned long address, pud_t *pudp,
                                        pud_t entry, int dirty)
{
        return ptep_set_access_flags(vma, address, (pte_t *)pudp, pud_pte(entry), dirty);
}

static inline int pudp_test_and_clear_young(struct vm_area_struct *vma,
                                            unsigned long address, pud_t *pudp)
{
        return ptep_test_and_clear_young(vma, address, (pte_t *)pudp);
}

#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
                                            unsigned long address,  pud_t *pudp)
{
#ifdef CONFIG_SMP
        pud_t pud = __pud(xchg(&pudp->pud, 0));
#else
        pud_t pud = *pudp;

        pud_clear(pudp);
#endif

        page_table_check_pud_clear(mm, address, pud);

        return pud;
}

static inline int pud_young(pud_t pud)
{
        return pte_young(pud_pte(pud));
}

static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
                                        unsigned long address, pud_t *pudp)
{
        pte_t *ptep = (pte_t *)pudp;

        update_mmu_cache(vma, address, ptep);
}

static inline pud_t pudp_establish(struct vm_area_struct *vma,
                                   unsigned long address, pud_t *pudp, pud_t pud)
{
        page_table_check_pud_set(vma->vm_mm, address, pudp, pud);
        return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud)));
}

static inline pud_t pud_mkinvalid(pud_t pud)
{
        return __pud(pud_val(pud) & ~(_PAGE_PRESENT | _PAGE_PROT_NONE));
}

extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
                             pud_t *pudp);

static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
{
        return pte_pud(pte_modify(pud_pte(pud), newprot));
}

#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

/*
 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
 * are !pte_none() && !pte_present().
 *
 * Format of swap PTE:
 *      bit            0:       _PAGE_PRESENT (zero)
 *      bit       1 to 2:       (zero)
 *      bit            3:       _PAGE_SWP_SOFT_DIRTY
 *      bit            4:       _PAGE_SWP_UFFD_WP
 *      bit            5:       _PAGE_PROT_NONE (zero)
 *      bit            6:       exclusive marker
 *      bits      7 to 11:      swap type
 *      bits 12 to XLEN-1:      swap offset
 */
#define __SWP_TYPE_SHIFT        7
#define __SWP_TYPE_BITS         5
#define __SWP_TYPE_MASK         ((1UL << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT      (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)

#define MAX_SWAPFILES_CHECK()   \
        BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)

#define __swp_type(x)   (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) \
        { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
          ((offset) << __SWP_OFFSET_SHIFT) })

#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })

static inline bool pte_swp_exclusive(pte_t pte)
{
        return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}

static inline pte_t pte_swp_mkexclusive(pte_t pte)
{
        return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
}

static inline pte_t pte_swp_clear_exclusive(pte_t pte)
{
        return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
}

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(swp) __pmd((swp).val)
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */

/*
 * In the RV64 Linux scheme, we give the user half of the virtual-address space
 * and give the kernel the other (upper) half.
 */
#ifdef CONFIG_64BIT
#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
#else
#define KERN_VIRT_START FIXADDR_START
#endif

/*
 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
 * Task size is:
 * -        0x9fc00000  (~2.5GB) for RV32.
 * -      0x4000000000  ( 256GB) for RV64 using SV39 mmu
 * -    0x800000000000  ( 128TB) for RV64 using SV48 mmu
 * - 0x100000000000000  (  64PB) for RV64 using SV57 mmu
 *
 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
 * Instruction Set Manual Volume II: Privileged Architecture" states that
 * "load and store effective addresses, which are 64bits, must have bits
 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
 * Similarly for SV57, bits 63–57 must be equal to bit 56.
 */
#ifdef CONFIG_64BIT
#define TASK_SIZE_64    (PGDIR_SIZE * PTRS_PER_PGD / 2)

#ifdef CONFIG_COMPAT
#define TASK_SIZE_32    (_AC(0x80000000, UL) - PAGE_SIZE)
#define TASK_SIZE       (is_compat_task() ? \
                         TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE       TASK_SIZE_64
#endif

#else
#define TASK_SIZE       FIXADDR_START
#endif

#else /* CONFIG_MMU */

#define PAGE_SHARED             __pgprot(0)
#define PAGE_KERNEL             __pgprot(0)
#define swapper_pg_dir          NULL
#define TASK_SIZE               _AC(-1, UL)
#define VMALLOC_START           _AC(0, UL)
#define VMALLOC_END             TASK_SIZE

#endif /* !CONFIG_MMU */

extern char _start[];
extern void *_dtb_early_va;
extern uintptr_t _dtb_early_pa;
#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
#define dtb_early_va    (*(void **)XIP_FIXUP(&_dtb_early_va))
#define dtb_early_pa    (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
#else
#define dtb_early_va    _dtb_early_va
#define dtb_early_pa    _dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
extern u64 satp_mode;

void paging_init(void);
void misc_mem_init(void);

/*
 * ZERO_PAGE is a global shared page that is always zero,
 * used for zero-mapped memory areas, etc.
 */
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

/*
 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
 * TLB flush will be required as a result of the "set". For example, use
 * in scenarios where it is known ahead of time that the routine is
 * setting non-present entries, or re-setting an existing entry to the
 * same value. Otherwise, use the typical "set" helpers and flush the
 * TLB.
 */
#define set_p4d_safe(p4dp, p4d) \
({ \
        WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
        set_p4d(p4dp, p4d); \
})

#define set_pgd_safe(pgdp, pgd) \
({ \
        WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
        set_pgd(pgdp, pgd); \
})
#endif /* !__ASSEMBLER__ */

#endif /* _ASM_RISCV_PGTABLE_H */