#include <linux/pagewalk.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
#include <linux/mmu_context.h>
#include <linux/swap.h>
#include <linux/leafops.h>
#include <asm/tlbflush.h>
#include "internal.h"
static int real_depth(int depth)
{
if (depth == 3 && PTRS_PER_PMD == 1)
depth = 2;
if (depth == 2 && PTRS_PER_PUD == 1)
depth = 1;
if (depth == 1 && PTRS_PER_P4D == 1)
depth = 0;
return depth;
}
static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
for (;;) {
if (ops->install_pte && pte_none(ptep_get(pte))) {
pte_t new_pte;
err = ops->install_pte(addr, addr + PAGE_SIZE, &new_pte,
walk);
if (err)
break;
set_pte_at(walk->mm, addr, pte, new_pte);
if (!WARN_ON_ONCE(walk->no_vma))
update_mmu_cache(walk->vma, addr, pte);
} else {
err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
if (err)
break;
}
if (addr >= end - PAGE_SIZE)
break;
addr += PAGE_SIZE;
pte++;
}
return err;
}
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pte_t *pte;
int err = 0;
spinlock_t *ptl;
if (walk->no_vma) {
if (walk->mm == &init_mm || addr >= TASK_SIZE)
pte = pte_offset_kernel(pmd, addr);
else
pte = pte_offset_map(pmd, addr);
if (pte) {
err = walk_pte_range_inner(pte, addr, end, walk);
if (walk->mm != &init_mm && addr < TASK_SIZE)
pte_unmap(pte);
}
} else {
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (pte) {
err = walk_pte_range_inner(pte, addr, end, walk);
pte_unmap_unlock(pte, ptl);
}
}
if (!pte)
walk->action = ACTION_AGAIN;
return err;
}
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pud_t pudval = pudp_get(pud);
pmd_t *pmd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
bool has_handler = ops->pte_entry;
bool has_install = ops->install_pte;
int err = 0;
int depth = real_depth(3);
if (!pud_present(pudval) || pud_leaf(pudval)) {
walk->action = ACTION_AGAIN;
return 0;
}
pmd = pmd_offset(pud, addr);
do {
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
if (has_install)
err = __pte_alloc(walk->mm, pmd);
else if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
if (!has_install)
continue;
}
walk->action = ACTION_SUBTREE;
if (ops->pmd_entry)
err = ops->pmd_entry(pmd, addr, next, walk);
if (err)
break;
if (walk->action == ACTION_AGAIN)
goto again;
if (walk->action == ACTION_CONTINUE)
continue;
if (!has_handler) {
if (!has_install)
continue;
if (pmd_present(*pmd) && pmd_trans_huge(*pmd))
continue;
}
if (walk->vma)
split_huge_pmd(walk->vma, pmd, addr);
else if (pmd_leaf(*pmd) || !pmd_present(*pmd))
continue;
err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
if (walk->action == ACTION_AGAIN)
goto again;
} while (pmd++, addr = next, addr != end);
return err;
}
static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pud_t *pud;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
bool has_handler = ops->pmd_entry || ops->pte_entry;
bool has_install = ops->install_pte;
int err = 0;
int depth = real_depth(2);
pud = pud_offset(p4d, addr);
do {
again:
next = pud_addr_end(addr, end);
if (pud_none(*pud)) {
if (has_install)
err = __pmd_alloc(walk->mm, pud, addr);
else if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
if (!has_install)
continue;
}
walk->action = ACTION_SUBTREE;
if (ops->pud_entry)
err = ops->pud_entry(pud, addr, next, walk);
if (err)
break;
if (walk->action == ACTION_AGAIN)
goto again;
if (walk->action == ACTION_CONTINUE)
continue;
if (!has_handler) {
if (!has_install)
continue;
if (pud_present(*pud) && pud_trans_huge(*pud))
continue;
}
if (walk->vma)
split_huge_pud(walk->vma, pud, addr);
else if (pud_leaf(*pud) || !pud_present(*pud))
continue;
err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
if (walk->action == ACTION_AGAIN)
goto again;
} while (pud++, addr = next, addr != end);
return err;
}
static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
p4d_t *p4d;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
bool has_handler = ops->pud_entry || ops->pmd_entry || ops->pte_entry;
bool has_install = ops->install_pte;
int err = 0;
int depth = real_depth(1);
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d)) {
if (has_install)
err = __pud_alloc(walk->mm, p4d, addr);
else if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
if (!has_install)
continue;
}
if (ops->p4d_entry) {
err = ops->p4d_entry(p4d, addr, next, walk);
if (err)
break;
}
if (has_handler || has_install)
err = walk_pud_range(p4d, addr, next, walk);
if (err)
break;
} while (p4d++, addr = next, addr != end);
return err;
}
static int walk_pgd_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pgd_t *pgd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
bool has_handler = ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
ops->pte_entry;
bool has_install = ops->install_pte;
int err = 0;
if (walk->pgd)
pgd = walk->pgd + pgd_index(addr);
else
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) {
if (has_install)
err = __p4d_alloc(walk->mm, pgd, addr);
else if (ops->pte_hole)
err = ops->pte_hole(addr, next, 0, walk);
if (err)
break;
if (!has_install)
continue;
}
if (ops->pgd_entry) {
err = ops->pgd_entry(pgd, addr, next, walk);
if (err)
break;
}
if (has_handler || has_install)
err = walk_p4d_range(pgd, addr, next, walk);
if (err)
break;
} while (pgd++, addr = next, addr != end);
return err;
}
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
unsigned long end)
{
unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
return min(boundary, end);
}
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
unsigned long sz = huge_page_size(h);
pte_t *pte;
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
hugetlb_vma_lock_read(vma);
do {
next = hugetlb_entry_end(h, addr, end);
pte = hugetlb_walk(vma, addr & hmask, sz);
if (pte)
err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
else if (ops->pte_hole)
err = ops->pte_hole(addr, next, -1, walk);
if (err)
break;
} while (addr = next, addr != end);
hugetlb_vma_unlock_read(vma);
return err;
}
#else
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
return 0;
}
#endif
static int walk_page_test(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
const struct mm_walk_ops *ops = walk->ops;
if (ops->test_walk)
return ops->test_walk(start, end, walk);
if (vma->vm_flags & VM_PFNMAP) {
int err = 1;
if (ops->pte_hole)
err = ops->pte_hole(start, end, -1, walk);
return err ? err : 1;
}
return 0;
}
static int __walk_page_range(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
int err = 0;
struct vm_area_struct *vma = walk->vma;
const struct mm_walk_ops *ops = walk->ops;
bool is_hugetlb = is_vm_hugetlb_page(vma);
if (ops->install_pte && is_hugetlb)
return -EINVAL;
if (ops->pre_vma) {
err = ops->pre_vma(start, end, walk);
if (err)
return err;
}
if (is_hugetlb) {
if (ops->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
err = walk_pgd_range(start, end, walk);
if (ops->post_vma)
ops->post_vma(walk);
return err;
}
static inline void process_mm_walk_lock(struct mm_struct *mm,
enum page_walk_lock walk_lock)
{
if (walk_lock == PGWALK_RDLOCK)
mmap_assert_locked(mm);
else if (walk_lock != PGWALK_VMA_RDLOCK_VERIFY)
mmap_assert_write_locked(mm);
}
static inline void process_vma_walk_lock(struct vm_area_struct *vma,
enum page_walk_lock walk_lock)
{
#ifdef CONFIG_PER_VMA_LOCK
switch (walk_lock) {
case PGWALK_WRLOCK:
vma_start_write(vma);
break;
case PGWALK_WRLOCK_VERIFY:
vma_assert_write_locked(vma);
break;
case PGWALK_VMA_RDLOCK_VERIFY:
vma_assert_locked(vma);
break;
case PGWALK_RDLOCK:
break;
}
#endif
}
int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
{
int err = 0;
unsigned long next;
struct vm_area_struct *vma;
struct mm_walk walk = {
.ops = ops,
.mm = mm,
.private = private,
};
if (start >= end)
return -EINVAL;
if (!walk.mm)
return -EINVAL;
process_mm_walk_lock(walk.mm, ops->walk_lock);
vma = find_vma(walk.mm, start);
do {
if (!vma) {
walk.vma = NULL;
next = end;
if (ops->pte_hole)
err = ops->pte_hole(start, next, -1, &walk);
} else if (start < vma->vm_start) {
walk.vma = NULL;
next = min(end, vma->vm_start);
if (ops->pte_hole)
err = ops->pte_hole(start, next, -1, &walk);
} else {
process_vma_walk_lock(vma, ops->walk_lock);
walk.vma = vma;
next = min(end, vma->vm_end);
vma = find_vma(mm, vma->vm_end);
err = walk_page_test(start, next, &walk);
if (err > 0) {
err = 0;
continue;
}
if (err < 0)
break;
err = __walk_page_range(start, next, &walk);
}
if (err)
break;
} while (start = next, start < end);
return err;
}
static bool check_ops_safe(const struct mm_walk_ops *ops)
{
if (ops->install_pte)
return false;
return true;
}
int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
{
if (!check_ops_safe(ops))
return -EINVAL;
return walk_page_range_mm_unsafe(mm, start, end, ops, private);
}
int walk_kernel_page_table_range(unsigned long start, unsigned long end,
const struct mm_walk_ops *ops, pgd_t *pgd, void *private)
{
mmap_assert_locked(&init_mm);
return walk_kernel_page_table_range_lockless(start, end, ops, pgd,
private);
}
int walk_kernel_page_table_range_lockless(unsigned long start, unsigned long end,
const struct mm_walk_ops *ops, pgd_t *pgd, void *private)
{
struct mm_walk walk = {
.ops = ops,
.mm = &init_mm,
.pgd = pgd,
.private = private,
.no_vma = true
};
if (start >= end)
return -EINVAL;
if (!check_ops_safe(ops))
return -EINVAL;
return walk_pgd_range(start, end, &walk);
}
int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
pgd_t *pgd, void *private)
{
struct mm_walk walk = {
.ops = ops,
.mm = mm,
.pgd = pgd,
.private = private,
.no_vma = true
};
if (mm == &init_mm)
return walk_kernel_page_table_range(start, end, ops,
pgd, private);
if (start >= end || !walk.mm)
return -EINVAL;
if (!check_ops_safe(ops))
return -EINVAL;
mmap_assert_write_locked(mm);
return walk_pgd_range(start, end, &walk);
}
int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops, void *private)
{
struct mm_walk walk = {
.ops = ops,
.mm = vma->vm_mm,
.vma = vma,
.private = private,
};
if (start >= end || !walk.mm)
return -EINVAL;
if (start < vma->vm_start || end > vma->vm_end)
return -EINVAL;
process_mm_walk_lock(walk.mm, ops->walk_lock);
process_vma_walk_lock(vma, ops->walk_lock);
return __walk_page_range(start, end, &walk);
}
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
{
if (!check_ops_safe(ops))
return -EINVAL;
return walk_page_range_vma_unsafe(vma, start, end, ops, private);
}
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private)
{
struct mm_walk walk = {
.ops = ops,
.mm = vma->vm_mm,
.vma = vma,
.private = private,
};
if (!walk.mm)
return -EINVAL;
if (!check_ops_safe(ops))
return -EINVAL;
process_mm_walk_lock(walk.mm, ops->walk_lock);
process_vma_walk_lock(vma, ops->walk_lock);
return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
}
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
pgoff_t nr, const struct mm_walk_ops *ops,
void *private)
{
struct mm_walk walk = {
.ops = ops,
.private = private,
};
struct vm_area_struct *vma;
pgoff_t vba, vea, cba, cea;
unsigned long start_addr, end_addr;
int err = 0;
if (!check_ops_safe(ops))
return -EINVAL;
lockdep_assert_held(&mapping->i_mmap_rwsem);
vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
first_index + nr - 1) {
vba = vma->vm_pgoff;
vea = vba + vma_pages(vma);
cba = first_index;
cba = max(cba, vba);
cea = first_index + nr;
cea = min(cea, vea);
start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
if (start_addr >= end_addr)
continue;
walk.vma = vma;
walk.mm = vma->vm_mm;
err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
if (err > 0) {
err = 0;
break;
} else if (err < 0)
break;
err = __walk_page_range(start_addr, end_addr, &walk);
if (err)
break;
}
return err;
}
struct folio *folio_walk_start(struct folio_walk *fw,
struct vm_area_struct *vma, unsigned long addr,
folio_walk_flags_t flags)
{
unsigned long entry_size;
bool expose_page = true;
struct page *page;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
pgd_t *pgdp;
p4d_t *p4dp;
mmap_assert_locked(vma->vm_mm);
vma_pgtable_walk_begin(vma);
if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end))
goto not_found;
pgdp = pgd_offset(vma->vm_mm, addr);
if (pgd_none_or_clear_bad(pgdp))
goto not_found;
p4dp = p4d_offset(pgdp, addr);
if (p4d_none_or_clear_bad(p4dp))
goto not_found;
pudp = pud_offset(p4dp, addr);
pud = pudp_get(pudp);
if (pud_none(pud))
goto not_found;
if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) &&
(!pud_present(pud) || pud_leaf(pud))) {
ptl = pud_lock(vma->vm_mm, pudp);
pud = pudp_get(pudp);
entry_size = PUD_SIZE;
fw->level = FW_LEVEL_PUD;
fw->pudp = pudp;
fw->pud = pud;
if (pud_none(pud)) {
spin_unlock(ptl);
goto not_found;
} else if (pud_present(pud) && !pud_leaf(pud)) {
spin_unlock(ptl);
goto pmd_table;
} else if (pud_present(pud)) {
page = vm_normal_page_pud(vma, addr, pud);
if (page)
goto found;
}
spin_unlock(ptl);
goto not_found;
}
pmd_table:
VM_WARN_ON_ONCE(!pud_present(pud) || pud_leaf(pud));
pmdp = pmd_offset(pudp, addr);
pmd = pmdp_get_lockless(pmdp);
if (pmd_none(pmd))
goto not_found;
if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) &&
(!pmd_present(pmd) || pmd_leaf(pmd))) {
ptl = pmd_lock(vma->vm_mm, pmdp);
pmd = pmdp_get(pmdp);
entry_size = PMD_SIZE;
fw->level = FW_LEVEL_PMD;
fw->pmdp = pmdp;
fw->pmd = pmd;
if (pmd_none(pmd)) {
spin_unlock(ptl);
goto not_found;
} else if (pmd_present(pmd) && !pmd_leaf(pmd)) {
spin_unlock(ptl);
goto pte_table;
} else if (pmd_present(pmd)) {
page = vm_normal_page_pmd(vma, addr, pmd);
if (page) {
goto found;
} else if ((flags & FW_ZEROPAGE) &&
is_huge_zero_pmd(pmd)) {
page = pfn_to_page(pmd_pfn(pmd));
expose_page = false;
goto found;
}
} else if ((flags & FW_MIGRATION) &&
pmd_is_migration_entry(pmd)) {
const softleaf_t entry = softleaf_from_pmd(pmd);
page = softleaf_to_page(entry);
expose_page = false;
goto found;
}
spin_unlock(ptl);
goto not_found;
}
pte_table:
VM_WARN_ON_ONCE(!pmd_present(pmd) || pmd_leaf(pmd));
ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl);
if (!ptep)
goto not_found;
pte = ptep_get(ptep);
entry_size = PAGE_SIZE;
fw->level = FW_LEVEL_PTE;
fw->ptep = ptep;
fw->pte = pte;
if (pte_present(pte)) {
page = vm_normal_page(vma, addr, pte);
if (page)
goto found;
if ((flags & FW_ZEROPAGE) &&
is_zero_pfn(pte_pfn(pte))) {
page = pfn_to_page(pte_pfn(pte));
expose_page = false;
goto found;
}
} else if (!pte_none(pte)) {
const softleaf_t entry = softleaf_from_pte(pte);
if ((flags & FW_MIGRATION) && softleaf_is_migration(entry)) {
page = softleaf_to_page(entry);
expose_page = false;
goto found;
}
}
pte_unmap_unlock(ptep, ptl);
not_found:
vma_pgtable_walk_end(vma);
return NULL;
found:
if (expose_page)
fw->page = page + ((addr & (entry_size - 1)) >> PAGE_SHIFT);
else
fw->page = NULL;
fw->ptl = ptl;
return page_folio(page);
}