#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/leafops.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/mmu_notifier.h>
#include <linux/uaccess.h>
#include <linux/userfaultfd_k.h>
#include <linux/mempolicy.h>
#include <linux/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include "internal.h"
enum mremap_type {
MREMAP_INVALID,
MREMAP_NO_RESIZE,
MREMAP_SHRINK,
MREMAP_EXPAND,
};
struct vma_remap_struct {
unsigned long addr;
unsigned long old_len;
unsigned long new_len;
const unsigned long flags;
unsigned long new_addr;
struct vm_userfaultfd_ctx *uf;
struct list_head *uf_unmap_early;
struct list_head *uf_unmap;
struct vm_area_struct *vma;
unsigned long delta;
bool populate_expand;
enum mremap_type remap_type;
bool mmap_locked;
unsigned long charged;
bool vmi_needs_invalidate;
};
static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pgd = pgd_offset(mm, addr);
if (pgd_none_or_clear_bad(pgd))
return NULL;
p4d = p4d_offset(pgd, addr);
if (p4d_none_or_clear_bad(p4d))
return NULL;
pud = pud_offset(p4d, addr);
if (pud_none_or_clear_bad(pud))
return NULL;
return pud;
}
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
{
pud_t *pud;
pmd_t *pmd;
pud = get_old_pud(mm, addr);
if (!pud)
return NULL;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return NULL;
return pmd;
}
static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
if (!p4d)
return NULL;
return pud_alloc(mm, p4d, addr);
}
static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
{
pud_t *pud;
pmd_t *pmd;
pud = alloc_new_pud(mm, addr);
if (!pud)
return NULL;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
VM_BUG_ON(pmd_trans_huge(*pmd));
return pmd;
}
static void take_rmap_locks(struct vm_area_struct *vma)
{
if (vma->vm_file)
i_mmap_lock_write(vma->vm_file->f_mapping);
if (vma->anon_vma)
anon_vma_lock_write(vma->anon_vma);
}
static void drop_rmap_locks(struct vm_area_struct *vma)
{
if (vma->anon_vma)
anon_vma_unlock_write(vma->anon_vma);
if (vma->vm_file)
i_mmap_unlock_write(vma->vm_file->f_mapping);
}
static pte_t move_soft_dirty_pte(pte_t pte)
{
if (pte_none(pte))
return pte;
if (pgtable_supports_soft_dirty()) {
if (pte_present(pte))
pte = pte_mksoft_dirty(pte);
else
pte = pte_swp_mksoft_dirty(pte);
}
return pte;
}
static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t pte, int max_nr)
{
struct folio *folio;
if (max_nr == 1)
return 1;
if (pte_batch_hint(ptep, pte) == 1)
return 1;
folio = vm_normal_folio(vma, addr, pte);
if (!folio || !folio_test_large(folio))
return 1;
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, FPB_RESPECT_WRITE);
}
static int move_ptes(struct pagetable_move_control *pmc,
unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
{
struct vm_area_struct *vma = pmc->old;
bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
struct mm_struct *mm = vma->vm_mm;
pte_t *old_ptep, *new_ptep;
pte_t old_pte, pte;
pmd_t dummy_pmdval;
spinlock_t *old_ptl, *new_ptl;
bool force_flush = false;
unsigned long old_addr = pmc->old_addr;
unsigned long new_addr = pmc->new_addr;
unsigned long old_end = old_addr + extent;
unsigned long len = old_end - old_addr;
int max_nr_ptes;
int nr_ptes;
int err = 0;
if (pmc->need_rmap_locks)
take_rmap_locks(vma);
old_ptep = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
if (!old_ptep) {
err = -EAGAIN;
goto out;
}
new_ptep = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
&new_ptl);
if (!new_ptep) {
pte_unmap_unlock(old_ptep, old_ptl);
err = -EAGAIN;
goto out;
}
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
flush_tlb_batched_pending(vma->vm_mm);
lazy_mmu_mode_enable();
for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
VM_WARN_ON_ONCE(!pte_none(*new_ptep));
nr_ptes = 1;
max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT;
old_pte = ptep_get(old_ptep);
if (pte_none(old_pte))
continue;
if (pte_present(old_pte)) {
nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
old_pte, max_nr_ptes);
force_flush = true;
}
pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes);
pte = move_pte(pte, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte))
pte_clear(mm, new_addr, new_ptep);
else {
if (need_clear_uffd_wp) {
if (pte_present(pte))
pte = pte_clear_uffd_wp(pte);
else
pte = pte_swp_clear_uffd_wp(pte);
}
set_ptes(mm, new_addr, new_ptep, pte, nr_ptes);
}
}
lazy_mmu_mode_disable();
if (force_flush)
flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_ptep - 1);
pte_unmap_unlock(old_ptep - 1, old_ptl);
out:
if (pmc->need_rmap_locks)
drop_rmap_locks(vma);
return err;
}
#ifndef arch_supports_page_table_move
#define arch_supports_page_table_move arch_supports_page_table_move
static inline bool arch_supports_page_table_move(void)
{
return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
}
#endif
static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc)
{
return !vma_has_uffd_without_event_remap(pmc->old) &&
!vma_has_uffd_without_event_remap(pmc->new);
}
#ifdef CONFIG_HAVE_MOVE_PMD
static bool move_normal_pmd(struct pagetable_move_control *pmc,
pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
struct vm_area_struct *vma = pmc->old;
struct mm_struct *mm = vma->vm_mm;
bool res = false;
pmd_t pmd;
if (!arch_supports_page_table_move())
return false;
if (!uffd_supports_page_table_move(pmc))
return false;
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
return false;
old_ptl = pmd_lock(mm, old_pmd);
new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = *old_pmd;
if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
goto out_unlock;
pmd_clear(old_pmd);
res = true;
VM_BUG_ON(!pmd_none(*new_pmd));
pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
out_unlock:
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
return res;
}
#else
static inline bool move_normal_pmd(struct pagetable_move_control *pmc,
pmd_t *old_pmd, pmd_t *new_pmd)
{
return false;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
static bool move_normal_pud(struct pagetable_move_control *pmc,
pud_t *old_pud, pud_t *new_pud)
{
spinlock_t *old_ptl, *new_ptl;
struct vm_area_struct *vma = pmc->old;
struct mm_struct *mm = vma->vm_mm;
pud_t pud;
if (!arch_supports_page_table_move())
return false;
if (!uffd_supports_page_table_move(pmc))
return false;
if (WARN_ON_ONCE(!pud_none(*new_pud)))
return false;
old_ptl = pud_lock(mm, old_pud);
new_ptl = pud_lockptr(mm, new_pud);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pud = *old_pud;
pud_clear(old_pud);
VM_BUG_ON(!pud_none(*new_pud));
pud_populate(mm, new_pud, pud_pgtable(pud));
flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}
#else
static inline bool move_normal_pud(struct pagetable_move_control *pmc,
pud_t *old_pud, pud_t *new_pud)
{
return false;
}
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static bool move_huge_pud(struct pagetable_move_control *pmc,
pud_t *old_pud, pud_t *new_pud)
{
spinlock_t *old_ptl, *new_ptl;
struct vm_area_struct *vma = pmc->old;
struct mm_struct *mm = vma->vm_mm;
pud_t pud;
if (WARN_ON_ONCE(!pud_none(*new_pud)))
return false;
old_ptl = pud_lock(mm, old_pud);
new_ptl = pud_lockptr(mm, new_pud);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pud = *old_pud;
pud_clear(old_pud);
VM_BUG_ON(!pud_none(*new_pud));
set_pud_at(mm, pmc->new_addr, new_pud, pud);
flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}
#else
static bool move_huge_pud(struct pagetable_move_control *pmc,
pud_t *old_pud, pud_t *new_pud)
{
WARN_ON_ONCE(1);
return false;
}
#endif
enum pgt_entry {
NORMAL_PMD,
HPAGE_PMD,
NORMAL_PUD,
HPAGE_PUD,
};
static __always_inline unsigned long get_extent(enum pgt_entry entry,
struct pagetable_move_control *pmc)
{
unsigned long next, extent, mask, size;
unsigned long old_addr = pmc->old_addr;
unsigned long old_end = pmc->old_end;
unsigned long new_addr = pmc->new_addr;
switch (entry) {
case HPAGE_PMD:
case NORMAL_PMD:
mask = PMD_MASK;
size = PMD_SIZE;
break;
case HPAGE_PUD:
case NORMAL_PUD:
mask = PUD_MASK;
size = PUD_SIZE;
break;
default:
BUILD_BUG();
break;
}
next = (old_addr + size) & mask;
extent = next - old_addr;
if (extent > old_end - old_addr)
extent = old_end - old_addr;
next = (new_addr + size) & mask;
if (extent > next - new_addr)
extent = next - new_addr;
return extent;
}
static bool should_take_rmap_locks(struct pagetable_move_control *pmc,
enum pgt_entry entry)
{
switch (entry) {
case NORMAL_PMD:
case NORMAL_PUD:
return true;
default:
return pmc->need_rmap_locks;
}
}
static bool move_pgt_entry(struct pagetable_move_control *pmc,
enum pgt_entry entry, void *old_entry, void *new_entry)
{
bool moved = false;
bool need_rmap_locks = should_take_rmap_locks(pmc, entry);
if (need_rmap_locks)
take_rmap_locks(pmc->old);
switch (entry) {
case NORMAL_PMD:
moved = move_normal_pmd(pmc, old_entry, new_entry);
break;
case NORMAL_PUD:
moved = move_normal_pud(pmc, old_entry, new_entry);
break;
case HPAGE_PMD:
moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry,
new_entry);
break;
case HPAGE_PUD:
moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
move_huge_pud(pmc, old_entry, new_entry);
break;
default:
WARN_ON_ONCE(1);
break;
}
if (need_rmap_locks)
drop_rmap_locks(pmc->old);
return moved;
}
static bool can_align_down(struct pagetable_move_control *pmc,
struct vm_area_struct *vma, unsigned long addr_to_align,
unsigned long mask)
{
unsigned long addr_masked = addr_to_align & mask;
if (!pmc->for_stack && vma->vm_start != addr_to_align)
return false;
if (pmc->for_stack && addr_masked >= vma->vm_start)
return true;
return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
}
static bool can_realign_addr(struct pagetable_move_control *pmc,
unsigned long pagetable_mask)
{
unsigned long align_mask = ~pagetable_mask;
unsigned long old_align = pmc->old_addr & align_mask;
unsigned long new_align = pmc->new_addr & align_mask;
unsigned long pagetable_size = align_mask + 1;
unsigned long old_align_next = pagetable_size - old_align;
if (pmc->len_in < old_align_next)
return false;
if (old_align == 0)
return false;
if (old_align != new_align)
return false;
if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) ||
!can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask))
return false;
return true;
}
static void try_realign_addr(struct pagetable_move_control *pmc,
unsigned long pagetable_mask)
{
if (!can_realign_addr(pmc, pagetable_mask))
return;
pmc->old_addr &= pagetable_mask;
pmc->new_addr &= pagetable_mask;
}
static bool pmc_done(struct pagetable_move_control *pmc)
{
return pmc->old_addr >= pmc->old_end;
}
static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent)
{
pmc->old_addr += extent;
pmc->new_addr += extent;
}
static unsigned long pmc_progress(struct pagetable_move_control *pmc)
{
unsigned long orig_old_addr = pmc->old_end - pmc->len_in;
unsigned long old_addr = pmc->old_addr;
return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr;
}
unsigned long move_page_tables(struct pagetable_move_control *pmc)
{
unsigned long extent;
struct mmu_notifier_range range;
pmd_t *old_pmd, *new_pmd;
pud_t *old_pud, *new_pud;
struct mm_struct *mm = pmc->old->vm_mm;
if (!pmc->len_in)
return 0;
if (is_vm_hugetlb_page(pmc->old))
return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr,
pmc->new_addr, pmc->len_in);
try_realign_addr(pmc, PMD_MASK);
flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end);
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm,
pmc->old_addr, pmc->old_end);
mmu_notifier_invalidate_range_start(&range);
for (; !pmc_done(pmc); pmc_next(pmc, extent)) {
cond_resched();
extent = get_extent(NORMAL_PUD, pmc);
old_pud = get_old_pud(mm, pmc->old_addr);
if (!old_pud)
continue;
new_pud = alloc_new_pud(mm, pmc->new_addr);
if (!new_pud)
break;
if (pud_trans_huge(*old_pud)) {
if (extent == HPAGE_PUD_SIZE) {
move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
continue;
}
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud))
continue;
}
extent = get_extent(NORMAL_PMD, pmc);
old_pmd = get_old_pmd(mm, pmc->old_addr);
if (!old_pmd)
continue;
new_pmd = alloc_new_pmd(mm, pmc->new_addr);
if (!new_pmd)
break;
again:
if (pmd_is_huge(*old_pmd)) {
if (extent == HPAGE_PMD_SIZE &&
move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
continue;
split_huge_pmd(pmc->old, old_pmd, pmc->old_addr);
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
extent == PMD_SIZE) {
if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd))
continue;
}
if (pmd_none(*old_pmd))
continue;
if (pte_alloc(pmc->new->vm_mm, new_pmd))
break;
if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0)
goto again;
}
mmu_notifier_invalidate_range_end(&range);
return pmc_progress(pmc);
}
static void vrm_set_delta(struct vma_remap_struct *vrm)
{
vrm->delta = abs_diff(vrm->old_len, vrm->new_len);
}
static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm)
{
if (vrm->delta == 0)
return MREMAP_NO_RESIZE;
if (vrm->old_len > vrm->new_len)
return MREMAP_SHRINK;
return MREMAP_EXPAND;
}
static bool vrm_overlaps(struct vma_remap_struct *vrm)
{
unsigned long start_old = vrm->addr;
unsigned long start_new = vrm->new_addr;
unsigned long end_old = vrm->addr + vrm->old_len;
unsigned long end_new = vrm->new_addr + vrm->new_len;
if (end_old > start_new && end_new > start_old)
return true;
return false;
}
static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
{
return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP);
}
static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm)
{
struct vm_area_struct *vma = vrm->vma;
unsigned long map_flags = 0;
pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT;
pgoff_t pgoff = vma->vm_pgoff + internal_pgoff;
unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0;
unsigned long res;
if (vrm->flags & MREMAP_FIXED)
map_flags |= MAP_FIXED;
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff,
map_flags);
if (IS_ERR_VALUE(res))
return res;
vrm->new_addr = res;
return 0;
}
static bool vrm_calc_charge(struct vma_remap_struct *vrm)
{
unsigned long charged;
if (!(vrm->vma->vm_flags & VM_ACCOUNT))
return true;
if (vrm->flags & MREMAP_DONTUNMAP)
charged = vrm->new_len >> PAGE_SHIFT;
else
charged = vrm->delta >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(current->mm, charged))
return false;
vrm->charged = charged;
return true;
}
static void vrm_uncharge(struct vma_remap_struct *vrm)
{
if (!(vrm->vma->vm_flags & VM_ACCOUNT))
return;
vm_unacct_memory(vrm->charged);
vrm->charged = 0;
}
static void vrm_stat_account(struct vma_remap_struct *vrm,
unsigned long bytes)
{
unsigned long pages = bytes >> PAGE_SHIFT;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = vrm->vma;
vm_stat_account(mm, vma->vm_flags, pages);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += pages;
}
static unsigned long prep_move_vma(struct vma_remap_struct *vrm)
{
unsigned long err = 0;
struct vm_area_struct *vma = vrm->vma;
unsigned long old_addr = vrm->addr;
unsigned long old_len = vrm->old_len;
vm_flags_t dummy = vma->vm_flags;
if (current->mm->map_count >= sysctl_max_map_count - 3)
return -ENOMEM;
if (vma->vm_ops && vma->vm_ops->may_split) {
if (vma->vm_start != old_addr)
err = vma->vm_ops->may_split(vma, old_addr);
if (!err && vma->vm_end != old_addr + old_len)
err = vma->vm_ops->may_split(vma, old_addr + old_len);
if (err)
return err;
}
err = ksm_madvise(vma, old_addr, old_addr + old_len,
MADV_UNMERGEABLE, &dummy);
if (err)
return err;
return 0;
}
static void unmap_source_vma(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
unsigned long addr = vrm->addr;
unsigned long len = vrm->old_len;
struct vm_area_struct *vma = vrm->vma;
VMA_ITERATOR(vmi, mm, addr);
int err;
unsigned long vm_start;
unsigned long vm_end;
bool accountable_move = (vma->vm_flags & VM_ACCOUNT) &&
!(vrm->flags & MREMAP_DONTUNMAP);
if (accountable_move) {
vm_flags_clear(vma, VM_ACCOUNT);
vm_start = vma->vm_start;
vm_end = vma->vm_end;
}
err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, false);
vrm->vma = NULL;
vrm->vmi_needs_invalidate = true;
if (err) {
vm_acct_memory(len >> PAGE_SHIFT);
return;
}
if (accountable_move) {
unsigned long end = addr + len;
if (vm_start < addr) {
struct vm_area_struct *prev = vma_prev(&vmi);
vm_flags_set(prev, VM_ACCOUNT);
}
if (vm_end > end) {
struct vm_area_struct *next = vma_next(&vmi);
vm_flags_set(next, VM_ACCOUNT);
}
}
}
static int copy_vma_and_data(struct vma_remap_struct *vrm,
struct vm_area_struct **new_vma_ptr)
{
unsigned long internal_offset = vrm->addr - vrm->vma->vm_start;
unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT;
unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff;
unsigned long moved_len;
struct vm_area_struct *vma = vrm->vma;
struct vm_area_struct *new_vma;
int err = 0;
PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len);
new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff,
&pmc.need_rmap_locks);
if (!new_vma) {
vrm_uncharge(vrm);
*new_vma_ptr = NULL;
return -ENOMEM;
}
if (vma != vrm->vma)
vrm->vmi_needs_invalidate = true;
vrm->vma = vma;
pmc.old = vma;
pmc.new = new_vma;
moved_len = move_page_tables(&pmc);
if (moved_len < vrm->old_len)
err = -ENOMEM;
else if (vma->vm_ops && vma->vm_ops->mremap)
err = vma->vm_ops->mremap(new_vma);
if (unlikely(err)) {
PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr,
vrm->addr, moved_len);
pmc_revert.need_rmap_locks = true;
move_page_tables(&pmc_revert);
vrm->vma = new_vma;
vrm->old_len = vrm->new_len;
vrm->addr = vrm->new_addr;
} else {
mremap_userfaultfd_prep(new_vma, vrm->uf);
}
fixup_hugetlb_reservations(vma);
*new_vma_ptr = new_vma;
return err;
}
static void dontunmap_complete(struct vma_remap_struct *vrm,
struct vm_area_struct *new_vma)
{
unsigned long start = vrm->addr;
unsigned long end = vrm->addr + vrm->old_len;
unsigned long old_start = vrm->vma->vm_start;
unsigned long old_end = vrm->vma->vm_end;
vm_flags_clear(vrm->vma, VM_LOCKED_MASK);
if (new_vma != vrm->vma && start == old_start && end == old_end)
unlink_anon_vmas(vrm->vma);
}
static unsigned long move_vma(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *new_vma;
unsigned long hiwater_vm;
int err;
err = prep_move_vma(vrm);
if (err)
return err;
if (!vrm_calc_charge(vrm))
return -ENOMEM;
vma_start_write(vrm->vma);
err = copy_vma_and_data(vrm, &new_vma);
if (err && !new_vma)
return err;
hiwater_vm = mm->hiwater_vm;
vrm_stat_account(vrm, vrm->new_len);
if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP)))
dontunmap_complete(vrm, new_vma);
else
unmap_source_vma(vrm);
mm->hiwater_vm = hiwater_vm;
return err ? (unsigned long)err : vrm->new_addr;
}
static unsigned long shrink_vma(struct vma_remap_struct *vrm,
bool drop_lock)
{
struct mm_struct *mm = current->mm;
unsigned long unmap_start = vrm->addr + vrm->new_len;
unsigned long unmap_bytes = vrm->delta;
unsigned long res;
VMA_ITERATOR(vmi, mm, unmap_start);
VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK);
res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes,
vrm->uf_unmap, drop_lock);
vrm->vma = NULL;
if (res)
return res;
if (drop_lock) {
vrm->mmap_locked = false;
} else {
vrm->vma = vma_lookup(mm, vrm->addr);
if (!vrm->vma)
return -EFAULT;
}
return 0;
}
static unsigned long mremap_to(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
unsigned long err;
if (vrm->flags & MREMAP_FIXED) {
err = do_munmap(mm, vrm->new_addr, vrm->new_len,
vrm->uf_unmap_early);
vrm->vma = NULL;
vrm->vmi_needs_invalidate = true;
if (err)
return err;
vrm->vma = vma_lookup(mm, vrm->addr);
if (!vrm->vma)
return -EFAULT;
}
if (vrm->remap_type == MREMAP_SHRINK) {
err = shrink_vma(vrm, false);
if (err)
return err;
vrm->old_len = vrm->new_len;
}
if (vrm->flags & MREMAP_DONTUNMAP) {
vm_flags_t vm_flags = vrm->vma->vm_flags;
unsigned long pages = vrm->old_len >> PAGE_SHIFT;
if (!may_expand_vm(mm, vm_flags, pages))
return -ENOMEM;
}
err = vrm_set_new_addr(vrm);
if (err)
return err;
return move_vma(vrm);
}
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
unsigned long end = vma->vm_end + delta;
if (end < vma->vm_end)
return 0;
if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
return 0;
if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
0, MAP_FIXED) & ~PAGE_MASK)
return 0;
return 1;
}
static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm)
{
unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr;
if (suffix_bytes != vrm->old_len)
return false;
if (!vma_expandable(vrm->vma, vrm->delta))
return false;
return true;
}
static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = vrm->vma;
VMA_ITERATOR(vmi, mm, vma->vm_end);
if (!vrm_calc_charge(vrm))
return -ENOMEM;
vma = vma_merge_extend(&vmi, vma, vrm->delta);
if (!vma) {
vrm_uncharge(vrm);
return -ENOMEM;
}
vrm->vma = vma;
vrm_stat_account(vrm, vrm->delta);
return 0;
}
static bool align_hugetlb(struct vma_remap_struct *vrm)
{
struct hstate *h __maybe_unused = hstate_vma(vrm->vma);
vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h));
vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h));
if (vrm->addr & ~huge_page_mask(h))
return false;
if (vrm->new_addr & ~huge_page_mask(h))
return false;
if (vrm->new_len > vrm->old_len)
return false;
return true;
}
static unsigned long expand_vma(struct vma_remap_struct *vrm)
{
unsigned long err;
if (vrm_can_expand_in_place(vrm)) {
err = expand_vma_in_place(vrm);
if (err)
return err;
return vrm->addr;
}
if (!(vrm->flags & MREMAP_MAYMOVE))
return -ENOMEM;
err = vrm_set_new_addr(vrm);
if (err)
return err;
return move_vma(vrm);
}
static unsigned long mremap_at(struct vma_remap_struct *vrm)
{
unsigned long res;
switch (vrm->remap_type) {
case MREMAP_INVALID:
break;
case MREMAP_NO_RESIZE:
return vrm->addr;
case MREMAP_SHRINK:
res = shrink_vma(vrm, true);
if (res)
return res;
return vrm->addr;
case MREMAP_EXPAND:
return expand_vma(vrm);
}
WARN_ON_ONCE(1);
return -EINVAL;
}
static bool vrm_will_map_new(struct vma_remap_struct *vrm)
{
if (vrm->remap_type == MREMAP_EXPAND)
return true;
if (vrm_implies_new_addr(vrm))
return true;
return false;
}
static bool vrm_move_only(struct vma_remap_struct *vrm)
{
if (!(vrm->flags & MREMAP_FIXED))
return false;
if (vrm->old_len != vrm->new_len)
return false;
return true;
}
static void notify_uffd(struct vma_remap_struct *vrm, bool failed)
{
struct mm_struct *mm = current->mm;
userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
if (failed)
mremap_userfaultfd_fail(vrm->uf);
else
mremap_userfaultfd_complete(vrm->uf, vrm->addr,
vrm->new_addr, vrm->old_len);
userfaultfd_unmap_complete(mm, vrm->uf_unmap);
}
static bool vma_multi_allowed(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
if (userfaultfd_armed(vma))
return false;
if (!file || !file->f_op->get_unmapped_area)
return true;
if (vma_is_shmem(vma))
return true;
if (is_vm_hugetlb_page(vma))
return true;
if (file->f_op->get_unmapped_area == thp_get_unmapped_area)
return true;
return false;
}
static int check_prep_vma(struct vma_remap_struct *vrm)
{
struct vm_area_struct *vma = vrm->vma;
struct mm_struct *mm = current->mm;
unsigned long addr = vrm->addr;
unsigned long old_len, new_len, pgoff;
if (!vma)
return -EFAULT;
if (vma_is_sealed(vma))
return -EPERM;
if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm))
return -EINVAL;
vrm_set_delta(vrm);
vrm->remap_type = vrm_remap_type(vrm);
if (!vrm_implies_new_addr(vrm))
vrm->new_addr = addr;
if (!vrm_will_map_new(vrm))
return 0;
old_len = vrm->old_len;
new_len = vrm->new_len;
if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n",
current->comm, current->pid);
return -EINVAL;
}
if ((vrm->flags & MREMAP_DONTUNMAP) &&
(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
return -EINVAL;
if (vrm->remap_type == MREMAP_SHRINK)
old_len = new_len;
if (old_len > vma->vm_end - addr)
return -EFAULT;
if (new_len == old_len)
return 0;
if (vma->vm_flags & VM_LOCKED)
vrm->populate_expand = true;
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
return -EINVAL;
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
return -EFAULT;
if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, vrm->delta))
return -EAGAIN;
if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
return -ENOMEM;
return 0;
}
static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
{
unsigned long addr = vrm->addr;
unsigned long flags = vrm->flags;
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
return -EINVAL;
if (offset_in_page(addr))
return -EINVAL;
if (!vrm->new_len)
return -EINVAL;
if (vrm->new_len > TASK_SIZE)
return -EINVAL;
if (!vrm_implies_new_addr(vrm))
return 0;
if (vrm->new_addr > TASK_SIZE - vrm->new_len)
return -EINVAL;
if (offset_in_page(vrm->new_addr))
return -EINVAL;
if (!(flags & MREMAP_MAYMOVE))
return -EINVAL;
if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len)
return -EINVAL;
if (vrm_overlaps(vrm))
return -EINVAL;
if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3)
return -ENOMEM;
return 0;
}
static unsigned long remap_move(struct vma_remap_struct *vrm)
{
struct vm_area_struct *vma;
unsigned long start = vrm->addr;
unsigned long end = vrm->addr + vrm->old_len;
unsigned long new_addr = vrm->new_addr;
unsigned long target_addr = new_addr;
unsigned long res = -EFAULT;
unsigned long last_end;
bool seen_vma = false;
VMA_ITERATOR(vmi, current->mm, start);
for_each_vma_range(vmi, vma, end) {
unsigned long addr = max(vma->vm_start, start);
unsigned long len = min(end, vma->vm_end) - addr;
unsigned long offset, res_vma;
bool multi_allowed;
if (!seen_vma && start < vma->vm_start)
return -EFAULT;
offset = seen_vma ? vma->vm_start - last_end : 0;
last_end = vma->vm_end;
vrm->vma = vma;
vrm->addr = addr;
vrm->new_addr = target_addr + offset;
vrm->old_len = vrm->new_len = len;
multi_allowed = vma_multi_allowed(vma);
if (!multi_allowed) {
if (seen_vma)
return -EFAULT;
if (vma->vm_end < end)
return -EFAULT;
}
res_vma = check_prep_vma(vrm);
if (!res_vma)
res_vma = mremap_to(vrm);
if (IS_ERR_VALUE(res_vma))
return res_vma;
if (!seen_vma) {
VM_WARN_ON_ONCE(multi_allowed && res_vma != new_addr);
res = res_vma;
}
VM_WARN_ON_ONCE(!vrm->mmap_locked);
VM_WARN_ON_ONCE(vrm->populate_expand);
if (vrm->vmi_needs_invalidate) {
vma_iter_invalidate(&vmi);
vrm->vmi_needs_invalidate = false;
}
seen_vma = true;
target_addr = res_vma + vrm->new_len;
}
return res;
}
static unsigned long do_mremap(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
unsigned long res;
bool failed;
vrm->old_len = PAGE_ALIGN(vrm->old_len);
vrm->new_len = PAGE_ALIGN(vrm->new_len);
res = check_mremap_params(vrm);
if (res)
return res;
if (mmap_write_lock_killable(mm))
return -EINTR;
vrm->mmap_locked = true;
if (vrm_move_only(vrm)) {
res = remap_move(vrm);
} else {
vrm->vma = vma_lookup(current->mm, vrm->addr);
res = check_prep_vma(vrm);
if (res)
goto out;
res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
}
out:
failed = IS_ERR_VALUE(res);
if (vrm->mmap_locked)
mmap_write_unlock(mm);
if (!failed && vrm->populate_expand)
mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
notify_uffd(vrm, failed);
return res;
}
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
unsigned long, new_len, unsigned long, flags,
unsigned long, new_addr)
{
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap);
struct vma_remap_struct vrm = {
.addr = untagged_addr(addr),
.old_len = old_len,
.new_len = new_len,
.flags = flags,
.new_addr = new_addr,
.uf = &uf,
.uf_unmap_early = &uf_unmap_early,
.uf_unmap = &uf_unmap,
.remap_type = MREMAP_INVALID,
};
return do_mremap(&vrm);
}