#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/sysctl.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/userfaultfd_k.h>
#include <linux/elf.h>
#include <linux/elf-randomize.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/processor.h>
#include <linux/sizes.h>
#include <linux/compat.h>
#include <linux/fsnotify.h>
#include <linux/page_idle.h>
#include <linux/uaccess.h>
#include <kunit/visibility.h>
#include "internal.h"
#include "swap.h"
void kfree_const(const void *x)
{
if (!is_kernel_rodata((unsigned long)x))
kfree(x);
}
EXPORT_SYMBOL(kfree_const);
static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
char *buf;
buf = kmalloc_track_caller(len + 1, gfp);
if (!buf)
return NULL;
memcpy(buf, s, len);
buf[len] = '\0';
return buf;
}
noinline
char *kstrdup(const char *s, gfp_t gfp)
{
return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL;
}
EXPORT_SYMBOL(kstrdup);
const char *kstrdup_const(const char *s, gfp_t gfp)
{
if (is_kernel_rodata((unsigned long)s))
return s;
return kstrdup(s, gfp);
}
EXPORT_SYMBOL(kstrdup_const);
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL;
}
EXPORT_SYMBOL(kstrndup);
void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
{
void *p;
p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
if (p)
memcpy(p, src, len);
return p;
}
EXPORT_SYMBOL(kmemdup_noprof);
void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
{
return kmemdup(src, size_mul(element_size, count), gfp);
}
EXPORT_SYMBOL(kmemdup_array);
void *kvmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
p = kvmalloc(len, gfp);
if (p)
memcpy(p, src, len);
return p;
}
EXPORT_SYMBOL(kvmemdup);
char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
return s ? __kmemdup_nul(s, len, gfp) : NULL;
}
EXPORT_SYMBOL(kmemdup_nul);
static kmem_buckets *user_buckets __ro_after_init;
static int __init init_user_buckets(void)
{
user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
return 0;
}
subsys_initcall(init_user_buckets);
void *memdup_user(const void __user *src, size_t len)
{
void *p;
p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_user(p, src, len)) {
kfree(p);
return ERR_PTR(-EFAULT);
}
return p;
}
EXPORT_SYMBOL(memdup_user);
void *vmemdup_user(const void __user *src, size_t len)
{
void *p;
p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_user(p, src, len)) {
kvfree(p);
return ERR_PTR(-EFAULT);
}
return p;
}
EXPORT_SYMBOL(vmemdup_user);
char *strndup_user(const char __user *s, long n)
{
char *p;
long length;
length = strnlen_user(s, n);
if (!length)
return ERR_PTR(-EFAULT);
if (length > n)
return ERR_PTR(-EINVAL);
p = memdup_user(s, length);
if (IS_ERR(p))
return p;
p[length - 1] = '\0';
return p;
}
EXPORT_SYMBOL(strndup_user);
void *memdup_user_nul(const void __user *src, size_t len)
{
char *p;
p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_user(p, src, len)) {
kfree(p);
return ERR_PTR(-EFAULT);
}
p[len] = '\0';
return p;
}
EXPORT_SYMBOL(memdup_user_nul);
int vma_is_stack_for_current(const struct vm_area_struct *vma)
{
struct task_struct * __maybe_unused t = current;
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
void vma_set_file(struct vm_area_struct *vma, struct file *file)
{
get_file(file);
swap(vma->vm_file, file);
fput(file);
}
EXPORT_SYMBOL(vma_set_file);
#ifndef STACK_RND_MASK
#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
#endif
unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned long random_variable = 0;
if (current->flags & PF_RANDOMIZE) {
random_variable = get_random_long();
random_variable &= STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
#ifdef CONFIG_STACK_GROWSUP
return PAGE_ALIGN(stack_top) + random_variable;
#else
return PAGE_ALIGN(stack_top) - random_variable;
#endif
}
unsigned long randomize_page(unsigned long start, unsigned long range)
{
if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start;
start = PAGE_ALIGN(start);
}
if (start > ULONG_MAX - range)
range = ULONG_MAX - start;
range >>= PAGE_SHIFT;
if (range == 0)
return start;
return start + (get_random_long() % range << PAGE_SHIFT);
}
#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
{
if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
return randomize_page(mm->brk, SZ_32M);
return randomize_page(mm->brk, SZ_1G);
}
unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
if (is_compat_task())
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
static int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlim_stack->rlim_cur == RLIM_INFINITY &&
!IS_ENABLED(CONFIG_STACK_GROWSUP))
return 1;
return sysctl_legacy_va_layout;
}
#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP / 6 * 5)
static unsigned long mmap_base(const unsigned long rnd, const struct rlimit *rlim_stack)
{
#ifdef CONFIG_STACK_GROWSUP
return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
#else
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_guard_gap;
if (current->flags & PF_RANDOMIZE)
pad += (STACK_RND_MASK << PAGE_SHIFT);
if (gap + pad > gap)
gap += pad;
if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(STACK_TOP - gap - rnd);
#endif
}
void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm_flags_clear(MMF_TOPDOWN, mm);
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm_flags_set(MMF_TOPDOWN, mm);
}
}
#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
mm_flags_clear(MMF_TOPDOWN, mm);
}
#endif
#ifdef CONFIG_MMU
EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
#endif
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
const struct task_struct *task, bool bypass_rlim)
{
unsigned long locked_vm, limit;
int ret = 0;
mmap_assert_write_locked(mm);
locked_vm = mm->locked_vm;
if (inc) {
if (!bypass_rlim) {
limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked_vm + pages > limit)
ret = -ENOMEM;
}
if (!ret)
mm->locked_vm = locked_vm + pages;
} else {
WARN_ON_ONCE(pages > locked_vm);
mm->locked_vm = locked_vm - pages;
}
pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
(void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
return ret;
}
EXPORT_SYMBOL_GPL(__account_locked_vm);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
{
int ret;
if (pages == 0 || !mm)
return 0;
mmap_write_lock(mm);
ret = __account_locked_vm(mm, pages, inc, current,
capable(CAP_IPC_LOCK));
mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(account_locked_vm);
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff)
{
loff_t off = (loff_t)pgoff << PAGE_SHIFT;
unsigned long ret;
struct mm_struct *mm = current->mm;
unsigned long populate;
LIST_HEAD(uf);
ret = security_mmap_file(file, prot, flag);
if (!ret)
ret = fsnotify_mmap_perm(file, prot, off, len);
if (!ret) {
if (mmap_write_lock_killable(mm))
return -EINTR;
ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
&uf);
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(ret, populate);
}
return ret;
}
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
if (unlikely(offset + PAGE_ALIGN(len) < offset))
return -EINVAL;
if (unlikely(offset_in_page(offset)))
return -EINVAL;
return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(vm_mmap);
void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
return __vmalloc_noprof(bytes, flags);
}
EXPORT_SYMBOL(__vmalloc_array_noprof);
void *vmalloc_array_noprof(size_t n, size_t size)
{
return __vmalloc_array_noprof(n, size, GFP_KERNEL);
}
EXPORT_SYMBOL(vmalloc_array_noprof);
void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{
return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(__vcalloc_noprof);
void *vcalloc_noprof(size_t n, size_t size)
{
return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL(vcalloc_noprof);
struct anon_vma *folio_anon_vma(const struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;
if ((mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
return NULL;
return (void *)(mapping - FOLIO_MAPPING_ANON);
}
struct address_space *folio_mapping(const struct folio *folio)
{
struct address_space *mapping;
if (unlikely(folio_test_slab(folio)))
return NULL;
if (unlikely(folio_test_swapcache(folio)))
return swap_address_space(folio->swap);
mapping = folio->mapping;
if ((unsigned long)mapping & FOLIO_MAPPING_FLAGS)
return NULL;
return mapping;
}
EXPORT_SYMBOL(folio_mapping);
void folio_copy(struct folio *dst, struct folio *src)
{
long i = 0;
long nr = folio_nr_pages(src);
for (;;) {
copy_highpage(folio_page(dst, i), folio_page(src, i));
if (++i == nr)
break;
cond_resched();
}
}
EXPORT_SYMBOL(folio_copy);
int folio_mc_copy(struct folio *dst, struct folio *src)
{
long nr = folio_nr_pages(src);
long i = 0;
for (;;) {
if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i)))
return -EHWPOISON;
if (++i == nr)
break;
cond_resched();
}
return 0;
}
EXPORT_SYMBOL(folio_mc_copy);
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
static int sysctl_overcommit_ratio __read_mostly = 50;
static unsigned long sysctl_overcommit_kbytes __read_mostly;
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
#ifdef CONFIG_SYSCTL
static int overcommit_ratio_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
sysctl_overcommit_kbytes = 0;
return ret;
}
static void sync_overcommit_as(struct work_struct *dummy)
{
percpu_counter_sync(&vm_committed_as);
}
static int overcommit_policy_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int new_policy = -1;
int ret;
if (write) {
t = *table;
t.data = &new_policy;
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (ret || new_policy == -1)
return ret;
mm_compute_batch(new_policy);
if (new_policy == OVERCOMMIT_NEVER)
schedule_on_each_cpu(sync_overcommit_as);
sysctl_overcommit_memory = new_policy;
} else {
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
return ret;
}
static int overcommit_kbytes_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
sysctl_overcommit_ratio = 0;
return ret;
}
static const struct ctl_table util_sysctl_table[] = {
{
.procname = "overcommit_memory",
.data = &sysctl_overcommit_memory,
.maxlen = sizeof(sysctl_overcommit_memory),
.mode = 0644,
.proc_handler = overcommit_policy_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
.maxlen = sizeof(sysctl_overcommit_ratio),
.mode = 0644,
.proc_handler = overcommit_ratio_handler,
},
{
.procname = "overcommit_kbytes",
.data = &sysctl_overcommit_kbytes,
.maxlen = sizeof(sysctl_overcommit_kbytes),
.mode = 0644,
.proc_handler = overcommit_kbytes_handler,
},
{
.procname = "user_reserve_kbytes",
.data = &sysctl_user_reserve_kbytes,
.maxlen = sizeof(sysctl_user_reserve_kbytes),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "admin_reserve_kbytes",
.data = &sysctl_admin_reserve_kbytes,
.maxlen = sizeof(sysctl_admin_reserve_kbytes),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
};
static int __init init_vm_util_sysctls(void)
{
register_sysctl_init("vm", util_sysctl_table);
return 0;
}
subsys_initcall(init_vm_util_sysctls);
#endif
unsigned long vm_commit_limit(void)
{
unsigned long allowed;
if (sysctl_overcommit_kbytes)
allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
else
allowed = ((totalram_pages() - hugetlb_total_pages())
* sysctl_overcommit_ratio / 100);
allowed += total_swap_pages;
return allowed;
}
struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
unsigned long vm_memory_committed(void)
{
return percpu_counter_sum_positive(&vm_committed_as);
}
EXPORT_SYMBOL_GPL(vm_memory_committed);
int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin)
{
long allowed;
unsigned long bytes_failed;
vm_acct_memory(pages);
if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
return 0;
if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
if (pages > totalram_pages() + total_swap_pages)
goto error;
return 0;
}
allowed = vm_commit_limit();
if (!cap_sys_admin)
allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
if (mm) {
long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
allowed -= min_t(long, mm->total_vm / 32, reserve);
}
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
return 0;
error:
bytes_failed = pages << PAGE_SHIFT;
pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
__func__, current->pid, current->comm, bytes_failed);
vm_unacct_memory(pages);
return -ENOMEM;
}
int get_cmdline(struct task_struct *task, char *buffer, int buflen)
{
int res = 0;
unsigned int len;
struct mm_struct *mm = get_task_mm(task);
unsigned long arg_start, arg_end, env_start, env_end;
if (!mm)
goto out;
if (!mm->arg_end)
goto out_mm;
spin_lock(&mm->arg_lock);
arg_start = mm->arg_start;
arg_end = mm->arg_end;
env_start = mm->env_start;
env_end = mm->env_end;
spin_unlock(&mm->arg_lock);
len = arg_end - arg_start;
if (len > buflen)
len = buflen;
res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
len = strnlen(buffer, res);
if (len < res) {
res = len;
} else {
len = env_end - env_start;
if (len > buflen - res)
len = buflen - res;
res += access_process_vm(task, env_start,
buffer+res, len,
FOLL_FORCE);
res = strnlen(buffer, res);
}
}
out_mm:
mmput(mm);
out:
return res;
}
int __weak memcmp_pages(struct page *page1, struct page *page2)
{
char *addr1, *addr2;
int ret;
addr1 = kmap_local_page(page1);
addr2 = kmap_local_page(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
kunmap_local(addr2);
kunmap_local(addr1);
return ret;
}
#ifdef CONFIG_PRINTK
void mem_dump_obj(void *object)
{
const char *type;
if (kmem_dump_obj(object))
return;
if (vmalloc_dump_obj(object))
return;
if (is_vmalloc_addr(object))
type = "vmalloc memory";
else if (virt_addr_valid(object))
type = "non-slab/vmalloc memory";
else if (object == NULL)
type = "NULL pointer";
else if (object == ZERO_SIZE_PTR)
type = "zero-size pointer";
else
type = "non-paged memory";
pr_cont(" %s\n", type);
}
EXPORT_SYMBOL_GPL(mem_dump_obj);
#endif
static DECLARE_RWSEM(page_offline_rwsem);
void page_offline_freeze(void)
{
down_read(&page_offline_rwsem);
}
void page_offline_thaw(void)
{
up_read(&page_offline_rwsem);
}
void page_offline_begin(void)
{
down_write(&page_offline_rwsem);
}
EXPORT_SYMBOL(page_offline_begin);
void page_offline_end(void)
{
up_write(&page_offline_rwsem);
}
EXPORT_SYMBOL(page_offline_end);
#ifndef flush_dcache_folio
void flush_dcache_folio(struct folio *folio)
{
long i, nr = folio_nr_pages(folio);
for (i = 0; i < nr; i++)
flush_dcache_page(folio_page(folio, i));
}
EXPORT_SYMBOL(flush_dcache_folio);
#endif
int __compat_vma_mmap(const struct file_operations *f_op,
struct file *file, struct vm_area_struct *vma)
{
struct vm_area_desc desc = {
.mm = vma->vm_mm,
.file = file,
.start = vma->vm_start,
.end = vma->vm_end,
.pgoff = vma->vm_pgoff,
.vm_file = vma->vm_file,
.vma_flags = vma->flags,
.page_prot = vma->vm_page_prot,
.action.type = MMAP_NOTHING,
};
int err;
err = f_op->mmap_prepare(&desc);
if (err)
return err;
mmap_action_prepare(&desc.action, &desc);
set_vma_from_desc(vma, &desc);
return mmap_action_complete(&desc.action, vma);
}
EXPORT_SYMBOL(__compat_vma_mmap);
int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
{
return __compat_vma_mmap(file->f_op, file, vma);
}
EXPORT_SYMBOL(compat_vma_mmap);
static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio,
const struct page *page)
{
if (PageBuddy(page))
ps->flags |= PAGE_SNAPSHOT_PG_BUDDY;
else if (page_count(page) == 0 && is_free_buddy_page(page))
ps->flags |= PAGE_SNAPSHOT_PG_BUDDY;
if (folio_test_idle(folio))
ps->flags |= PAGE_SNAPSHOT_PG_IDLE;
}
void snapshot_page(struct page_snapshot *ps, const struct page *page)
{
unsigned long head, nr_pages = 1;
struct folio *foliop;
int loops = 5;
ps->pfn = page_to_pfn(page);
ps->flags = PAGE_SNAPSHOT_FAITHFUL;
again:
memset(&ps->folio_snapshot, 0, sizeof(struct folio));
memcpy(&ps->page_snapshot, page, sizeof(*page));
head = ps->page_snapshot.compound_head;
if ((head & 1) == 0) {
ps->idx = 0;
foliop = (struct folio *)&ps->page_snapshot;
if (!folio_test_large(foliop)) {
set_ps_flags(ps, page_folio(page), page);
memcpy(&ps->folio_snapshot, foliop,
sizeof(struct page));
return;
}
foliop = (struct folio *)page;
} else {
foliop = (struct folio *)(head - 1);
ps->idx = folio_page_idx(foliop, page);
}
if (ps->idx < MAX_FOLIO_NR_PAGES) {
memcpy(&ps->folio_snapshot, foliop, 2 * sizeof(struct page));
nr_pages = folio_nr_pages(&ps->folio_snapshot);
if (nr_pages > 1)
memcpy(&ps->folio_snapshot.__page_2, &foliop->__page_2,
sizeof(struct page));
set_ps_flags(ps, foliop, page);
}
if (ps->idx > nr_pages) {
if (loops-- > 0)
goto again;
clear_compound_head(&ps->page_snapshot);
foliop = (struct folio *)&ps->page_snapshot;
memcpy(&ps->folio_snapshot, foliop, sizeof(struct page));
ps->flags = 0;
ps->idx = 0;
}
}
static int mmap_action_finish(struct mmap_action *action,
const struct vm_area_struct *vma, int err)
{
if (err) {
const size_t len = vma_pages(vma) << PAGE_SHIFT;
do_munmap(current->mm, vma->vm_start, len, NULL);
if (action->error_hook) {
err = action->error_hook(err);
VM_WARN_ON_ONCE(!err);
}
return err;
}
if (action->success_hook)
return action->success_hook(vma);
return 0;
}
#ifdef CONFIG_MMU
void mmap_action_prepare(struct mmap_action *action,
struct vm_area_desc *desc)
{
switch (action->type) {
case MMAP_NOTHING:
break;
case MMAP_REMAP_PFN:
remap_pfn_range_prepare(desc, action->remap.start_pfn);
break;
case MMAP_IO_REMAP_PFN:
io_remap_pfn_range_prepare(desc, action->remap.start_pfn,
action->remap.size);
break;
}
}
EXPORT_SYMBOL(mmap_action_prepare);
int mmap_action_complete(struct mmap_action *action,
struct vm_area_struct *vma)
{
int err = 0;
switch (action->type) {
case MMAP_NOTHING:
break;
case MMAP_REMAP_PFN:
err = remap_pfn_range_complete(vma, action->remap.start,
action->remap.start_pfn, action->remap.size,
action->remap.pgprot);
break;
case MMAP_IO_REMAP_PFN:
err = io_remap_pfn_range_complete(vma, action->remap.start,
action->remap.start_pfn, action->remap.size,
action->remap.pgprot);
break;
}
return mmap_action_finish(action, vma, err);
}
EXPORT_SYMBOL(mmap_action_complete);
#else
void mmap_action_prepare(struct mmap_action *action,
struct vm_area_desc *desc)
{
switch (action->type) {
case MMAP_NOTHING:
break;
case MMAP_REMAP_PFN:
case MMAP_IO_REMAP_PFN:
WARN_ON_ONCE(1);
break;
}
}
EXPORT_SYMBOL(mmap_action_prepare);
int mmap_action_complete(struct mmap_action *action,
struct vm_area_struct *vma)
{
int err = 0;
switch (action->type) {
case MMAP_NOTHING:
break;
case MMAP_REMAP_PFN:
case MMAP_IO_REMAP_PFN:
WARN_ON_ONCE(1);
err = -EINVAL;
break;
}
return mmap_action_finish(action, vma, err);
}
EXPORT_SYMBOL(mmap_action_complete);
#endif
#ifdef CONFIG_MMU
unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
unsigned int max_nr)
{
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0);
}
#endif
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
bool page_range_contiguous(const struct page *page, unsigned long nr_pages)
{
const unsigned long start_pfn = page_to_pfn(page);
const unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn;
for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION);
pfn < end_pfn; pfn += PAGES_PER_SECTION)
if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn)))
return false;
return true;
}
EXPORT_SYMBOL(page_range_contiguous);
#endif