#ifndef __MM_VMA_H
#define __MM_VMA_H
struct vma_prepare {
struct vm_area_struct *vma;
struct vm_area_struct *adj_next;
struct file *file;
struct address_space *mapping;
struct anon_vma *anon_vma;
struct vm_area_struct *insert;
struct vm_area_struct *remove;
struct vm_area_struct *remove2;
bool skip_vma_uprobe :1;
};
struct unlink_vma_file_batch {
int count;
struct vm_area_struct *vmas[8];
};
struct vma_munmap_struct {
struct vma_iterator *vmi;
struct vm_area_struct *vma;
struct vm_area_struct *prev;
struct vm_area_struct *next;
struct list_head *uf;
unsigned long start;
unsigned long end;
unsigned long unmap_start;
unsigned long unmap_end;
int vma_count;
bool unlock;
bool clear_ptes;
unsigned long nr_pages;
unsigned long locked_vm;
unsigned long nr_accounted;
unsigned long exec_vm;
unsigned long stack_vm;
unsigned long data_vm;
};
enum vma_merge_state {
VMA_MERGE_START,
VMA_MERGE_ERROR_NOMEM,
VMA_MERGE_NOMERGE,
VMA_MERGE_SUCCESS,
};
struct vma_merge_struct {
struct mm_struct *mm;
struct vma_iterator *vmi;
struct vm_area_struct *prev;
struct vm_area_struct *middle;
struct vm_area_struct *next;
struct vm_area_struct *target;
unsigned long start;
unsigned long end;
pgoff_t pgoff;
vm_flags_t vm_flags;
struct file *file;
struct anon_vma *anon_vma;
struct mempolicy *policy;
struct vm_userfaultfd_ctx uffd_ctx;
struct anon_vma_name *anon_name;
enum vma_merge_state state;
struct vm_area_struct *copied_from;
bool just_expand :1;
bool give_up_on_oom :1;
bool skip_vma_uprobe :1;
bool __adjust_middle_start :1;
bool __adjust_next_start :1;
bool __remove_middle :1;
bool __remove_next :1;
};
struct unmap_desc {
struct ma_state *mas;
struct vm_area_struct *first;
unsigned long pg_start;
unsigned long pg_end;
unsigned long vma_start;
unsigned long vma_end;
unsigned long tree_end;
unsigned long tree_reset;
bool mm_wr_locked;
};
static inline void unmap_all_init(struct unmap_desc *unmap,
struct vma_iterator *vmi, struct vm_area_struct *vma)
{
unmap->mas = &vmi->mas;
unmap->first = vma;
unmap->pg_start = FIRST_USER_ADDRESS;
unmap->pg_end = USER_PGTABLES_CEILING;
unmap->vma_start = 0;
unmap->vma_end = ULONG_MAX;
unmap->tree_end = ULONG_MAX;
unmap->tree_reset = vma->vm_end;
unmap->mm_wr_locked = false;
}
static inline void unmap_pgtable_init(struct unmap_desc *unmap,
struct vma_iterator *vmi)
{
vma_iter_set(vmi, unmap->tree_reset);
unmap->vma_start = FIRST_USER_ADDRESS;
unmap->vma_end = USER_PGTABLES_CEILING;
unmap->tree_end = USER_PGTABLES_CEILING;
}
#define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next) \
struct unmap_desc name = { \
.mas = &(_vmi)->mas, \
.first = _vma, \
.pg_start = _prev ? ((struct vm_area_struct *)_prev)->vm_end : \
FIRST_USER_ADDRESS, \
.pg_end = _next ? ((struct vm_area_struct *)_next)->vm_start : \
USER_PGTABLES_CEILING, \
.vma_start = _vma_start, \
.vma_end = _vma_end, \
.tree_end = _next ? \
((struct vm_area_struct *)_next)->vm_start : \
USER_PGTABLES_CEILING, \
.tree_reset = _vma->vm_end, \
.mm_wr_locked = true, \
}
static inline bool vmg_nomem(struct vma_merge_struct *vmg)
{
return vmg->state == VMA_MERGE_ERROR_NOMEM;
}
static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
unsigned long addr)
{
return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
}
#define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_) \
struct vma_merge_struct name = { \
.mm = mm_, \
.vmi = vmi_, \
.start = start_, \
.end = end_, \
.vm_flags = vm_flags_, \
.pgoff = pgoff_, \
.state = VMA_MERGE_START, \
}
#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
struct vma_merge_struct name = { \
.mm = vma_->vm_mm, \
.vmi = vmi_, \
.prev = prev_, \
.middle = vma_, \
.next = NULL, \
.start = start_, \
.end = end_, \
.vm_flags = vma_->vm_flags, \
.pgoff = vma_pgoff_offset(vma_, start_), \
.file = vma_->vm_file, \
.anon_vma = vma_->anon_vma, \
.policy = vma_policy(vma_), \
.uffd_ctx = vma_->vm_userfaultfd_ctx, \
.anon_name = anon_vma_name(vma_), \
.state = VMA_MERGE_START, \
}
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
void validate_mm(struct mm_struct *mm);
#else
#define validate_mm(mm) do { } while (0)
#endif
__must_check int vma_expand(struct vma_merge_struct *vmg);
__must_check int vma_shrink(struct vma_iterator *vmi,
struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff);
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
struct vm_area_struct *vma, gfp_t gfp)
{
if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_gfp(&vmi->mas, vma, gfp);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
vma_mark_attached(vma);
return 0;
}
static inline void set_vma_from_desc(struct vm_area_struct *vma,
struct vm_area_desc *desc)
{
vma->vm_pgoff = desc->pgoff;
if (desc->vm_file != vma->vm_file)
vma_set_file(vma, desc->vm_file);
vma->flags = desc->vma_flags;
vma->vm_page_prot = desc->page_prot;
vma->vm_ops = desc->vm_ops;
vma->vm_private_data = desc->private_data;
}
int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
unsigned long end, struct list_head *uf, bool unlock);
int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
void remove_vma(struct vm_area_struct *vma);
void unmap_region(struct unmap_desc *unmap);
__must_check struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
struct vm_area_struct *prev, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
vm_flags_t *vm_flags_ptr);
__must_check struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
struct vm_area_struct *prev, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct anon_vma_name *new_name);
__must_check struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
struct vm_area_struct *prev, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct mempolicy *new_pol);
__must_check struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi,
struct vm_area_struct *prev, struct vm_area_struct *vma,
unsigned long start, unsigned long end, vm_flags_t vm_flags,
struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom);
__must_check struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
__must_check struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
struct vm_area_struct *vma, unsigned long delta);
void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
struct vm_area_struct *vma);
struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
int mm_take_all_locks(struct mm_struct *mm);
void mm_drop_all_locks(struct mm_struct *mm);
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf);
int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
unsigned long addr, unsigned long request, unsigned long flags);
unsigned long unmapped_area(struct vm_unmapped_area_info *info);
unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHARED)
return vma_wants_writenotify(vma, vma->vm_page_prot);
return !!(vma->vm_flags & VM_WRITE);
}
#ifdef CONFIG_MMU
static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags)
{
return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
}
#endif
static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
unsigned long min)
{
return mas_prev(&vmi->mas, min);
}
static inline bool is_exec_mapping(vm_flags_t flags)
{
return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
}
static inline bool is_stack_mapping(vm_flags_t flags)
{
return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
}
static inline bool is_data_mapping(vm_flags_t flags)
{
return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
}
static inline void vma_iter_config(struct vma_iterator *vmi,
unsigned long index, unsigned long last)
{
__mas_set_range(&vmi->mas, index, last - 1);
}
static inline void vma_iter_reset(struct vma_iterator *vmi)
{
mas_reset(&vmi->mas);
}
static inline
struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
{
return mas_prev_range(&vmi->mas, min);
}
static inline
struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
{
return mas_next_range(&vmi->mas, max);
}
static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
unsigned long max, unsigned long size)
{
return mas_empty_area(&vmi->mas, min, max - 1, size);
}
static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
unsigned long max, unsigned long size)
{
return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
}
static inline int vma_iter_prealloc(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
}
static inline void vma_iter_clear(struct vma_iterator *vmi)
{
mas_store_prealloc(&vmi->mas, NULL);
}
static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
{
return mas_walk(&vmi->mas);
}
static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
vma_assert_attached(vma);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.index > vma->vm_start)) {
pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
vmi->mas.index, vma->vm_start, vma->vm_start,
vma->vm_end, vmi->mas.index, vmi->mas.last);
}
if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.last < vma->vm_start)) {
pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
vmi->mas.index, vmi->mas.last);
}
#endif
if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
}
static inline void vma_iter_store_new(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
vma_mark_attached(vma);
vma_iter_store_overwrite(vmi, vma);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
{
return vmi->mas.index;
}
static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
{
return vmi->mas.last + 1;
}
static inline
struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
{
return mas_prev_range(&vmi->mas, 0);
}
static inline
struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
struct vm_area_struct **pprev)
{
struct vm_area_struct *next = vma_next(vmi);
struct vm_area_struct *prev = vma_prev(vmi);
if (prev)
vma_iter_next_range(vmi);
if (pprev)
*pprev = prev;
return next;
}
#ifdef CONFIG_64BIT
static inline bool vma_is_sealed(struct vm_area_struct *vma)
{
return (vma->vm_flags & VM_SEALED);
}
#else
static inline bool vma_is_sealed(struct vm_area_struct *vma)
{
return false;
}
#endif
#if defined(CONFIG_STACK_GROWSUP)
int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#endif
int expand_downwards(struct vm_area_struct *vma, unsigned long address);
int __vm_munmap(unsigned long start, size_t len, bool unlock);
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma);
void __init vma_state_init(void);
struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
void vm_area_free(struct vm_area_struct *vma);
#ifdef CONFIG_MMU
int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
unsigned long *top_mem_p);
int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
#endif
#endif