#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kasan.h>
#include <linux/kmsan.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/ratelimit.h>
#include <linux/oom.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/pagevec.h>
#include <linux/memory_hotplug.h>
#include <linux/nodemask.h>
#include <linux/vmstat.h>
#include <linux/fault-inject.h>
#include <linux/compaction.h>
#include <trace/events/kmem.h>
#include <trace/events/oom.h>
#include <linux/prefetch.h>
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/sched/mm.h>
#include <linux/page_owner.h>
#include <linux/page_table_check.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/lockdep.h>
#include <linux/psi.h>
#include <linux/khugepaged.h>
#include <linux/delayacct.h>
#include <linux/cacheinfo.h>
#include <linux/pgalloc_tag.h>
#include <asm/div64.h>
#include "internal.h"
#include "shuffle.h"
#include "page_reporting.h"
typedef int __bitwise fpi_t;
#define FPI_NONE ((__force fpi_t)0)
#define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
#define FPI_TO_TAIL ((__force fpi_t)BIT(1))
#define FPI_TRYLOCK ((__force fpi_t)BIT(2))
static DEFINE_MUTEX(pcp_batch_high_lock);
#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline void __pcp_trylock_noop(unsigned long *flags) { }
#define pcp_trylock_prepare(flags) __pcp_trylock_noop(&(flags))
#define pcp_trylock_finish(flags) __pcp_trylock_noop(&(flags))
#else
#define pcp_trylock_prepare(flags) local_irq_save(flags)
#define pcp_trylock_finish(flags) local_irq_restore(flags)
#endif
#ifndef CONFIG_PREEMPT_RT
#define pcpu_task_pin() preempt_disable()
#define pcpu_task_unpin() preempt_enable()
#else
#define pcpu_task_pin() migrate_disable()
#define pcpu_task_unpin() migrate_enable()
#endif
#define pcpu_spin_trylock(type, member, ptr) \
({ \
type *_ret; \
pcpu_task_pin(); \
_ret = this_cpu_ptr(ptr); \
if (!spin_trylock(&_ret->member)) { \
pcpu_task_unpin(); \
_ret = NULL; \
} \
_ret; \
})
#define pcpu_spin_unlock(member, ptr) \
({ \
spin_unlock(&ptr->member); \
pcpu_task_unpin(); \
})
#define pcp_spin_trylock(ptr, UP_flags) \
({ \
struct per_cpu_pages *__ret; \
pcp_trylock_prepare(UP_flags); \
__ret = pcpu_spin_trylock(struct per_cpu_pages, lock, ptr); \
if (!__ret) \
pcp_trylock_finish(UP_flags); \
__ret; \
})
#define pcp_spin_unlock(ptr, UP_flags) \
({ \
pcpu_spin_unlock(lock, ptr); \
pcp_trylock_finish(UP_flags); \
})
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline void __flags_noop(unsigned long *flags) { }
#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
({ \
__flags_noop(&(flags)); \
spin_lock(&(ptr)->lock); \
})
#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
({ \
spin_unlock(&(ptr)->lock); \
__flags_noop(&(flags)); \
})
#else
#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
spin_lock_irqsave(&(ptr)->lock, flags)
#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
spin_unlock_irqrestore(&(ptr)->lock, flags)
#endif
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif
DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
DEFINE_PER_CPU(int, _numa_mem_);
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif
static DEFINE_MUTEX(pcpu_drain_mutex);
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
#endif
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
[N_POSSIBLE] = NODE_MASK_ALL,
[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
[N_MEMORY] = { { [0] = 1UL } },
[N_CPU] = { { [0] = 1UL } },
#endif
};
EXPORT_SYMBOL(node_states);
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
unsigned int pageblock_order __read_mostly;
#endif
static void __free_pages_ok(struct page *page, unsigned int order,
fpi_t fpi_flags);
static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
#ifdef CONFIG_ZONE_DMA
[ZONE_DMA] = 256,
#endif
#ifdef CONFIG_ZONE_DMA32
[ZONE_DMA32] = 256,
#endif
[ZONE_NORMAL] = 32,
#ifdef CONFIG_HIGHMEM
[ZONE_HIGHMEM] = 0,
#endif
[ZONE_MOVABLE] = 0,
};
char * const zone_names[MAX_NR_ZONES] = {
#ifdef CONFIG_ZONE_DMA
"DMA",
#endif
#ifdef CONFIG_ZONE_DMA32
"DMA32",
#endif
"Normal",
#ifdef CONFIG_HIGHMEM
"HighMem",
#endif
"Movable",
#ifdef CONFIG_ZONE_DEVICE
"Device",
#endif
};
const char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Movable",
"Reclaimable",
"HighAtomic",
#ifdef CONFIG_CMA
"CMA",
#endif
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif
};
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
static int watermark_boost_factor __read_mostly = 15000;
static int watermark_scale_factor = 10;
int defrag_mode;
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#if MAX_NUMNODES > 1
unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
unsigned int nr_online_nodes __read_mostly = 1;
EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
static bool page_contains_unaccepted(struct page *page, unsigned int order);
static bool cond_accept_memory(struct zone *zone, unsigned int order,
int alloc_flags);
static bool __free_unaccepted(struct page *page);
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
DEFINE_STATIC_KEY_TRUE(deferred_pages);
static inline bool deferred_pages_enabled(void)
{
return static_branch_unlikely(&deferred_pages);
}
static bool __ref
_deferred_grow_zone(struct zone *zone, unsigned int order)
{
return deferred_grow_zone(zone, order);
}
#else
static inline bool deferred_pages_enabled(void)
{
return false;
}
static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
{
return false;
}
#endif
static inline unsigned long *get_pageblock_bitmap(const struct page *page,
unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
return section_to_usemap(__pfn_to_section(pfn));
#else
return page_zone(page)->pageblock_flags;
#endif
}
static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1);
#else
pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
#endif
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
}
static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit)
{
return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS;
}
static __always_inline void
get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn,
unsigned long **bitmap_word, unsigned long *bitidx)
{
unsigned long *bitmap;
unsigned long word_bitidx;
#ifdef CONFIG_MEMORY_ISOLATION
BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
#else
BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
#endif
BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK);
VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
bitmap = get_pageblock_bitmap(page, pfn);
*bitidx = pfn_to_bitidx(page, pfn);
word_bitidx = *bitidx / BITS_PER_LONG;
*bitidx &= (BITS_PER_LONG - 1);
*bitmap_word = &bitmap[word_bitidx];
}
static unsigned long __get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long mask)
{
unsigned long *bitmap_word;
unsigned long bitidx;
unsigned long word;
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
word = READ_ONCE(*bitmap_word);
return (word >> bitidx) & mask;
}
bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
enum pageblock_bits pb_bit)
{
unsigned long *bitmap_word;
unsigned long bitidx;
if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
return false;
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
return test_bit(bitidx + pb_bit, bitmap_word);
}
__always_inline enum migratetype
get_pfnblock_migratetype(const struct page *page, unsigned long pfn)
{
unsigned long mask = MIGRATETYPE_AND_ISO_MASK;
unsigned long flags;
flags = __get_pfnblock_flags_mask(page, pfn, mask);
#ifdef CONFIG_MEMORY_ISOLATION
if (flags & BIT(PB_migrate_isolate))
return MIGRATE_ISOLATE;
#endif
return flags & MIGRATETYPE_MASK;
}
static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn,
unsigned long flags, unsigned long mask)
{
unsigned long *bitmap_word;
unsigned long bitidx;
unsigned long word;
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
mask <<= bitidx;
flags <<= bitidx;
word = READ_ONCE(*bitmap_word);
do {
} while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags));
}
void set_pfnblock_bit(const struct page *page, unsigned long pfn,
enum pageblock_bits pb_bit)
{
unsigned long *bitmap_word;
unsigned long bitidx;
if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
return;
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
set_bit(bitidx + pb_bit, bitmap_word);
}
void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
enum pageblock_bits pb_bit)
{
unsigned long *bitmap_word;
unsigned long bitidx;
if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
return;
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
clear_bit(bitidx + pb_bit, bitmap_word);
}
static void set_pageblock_migratetype(struct page *page,
enum migratetype migratetype)
{
if (unlikely(page_group_by_mobility_disabled &&
migratetype < MIGRATE_PCPTYPES))
migratetype = MIGRATE_UNMOVABLE;
#ifdef CONFIG_MEMORY_ISOLATION
if (migratetype == MIGRATE_ISOLATE) {
VM_WARN_ONCE(1,
"Use set_pageblock_isolate() for pageblock isolation");
return;
}
VM_WARN_ONCE(get_pageblock_isolate(page),
"Use clear_pageblock_isolate() to unisolate pageblock");
#endif
__set_pfnblock_flags_mask(page, page_to_pfn(page),
(unsigned long)migratetype,
MIGRATETYPE_AND_ISO_MASK);
}
void __meminit init_pageblock_migratetype(struct page *page,
enum migratetype migratetype,
bool isolate)
{
unsigned long flags;
if (unlikely(page_group_by_mobility_disabled &&
migratetype < MIGRATE_PCPTYPES))
migratetype = MIGRATE_UNMOVABLE;
flags = migratetype;
#ifdef CONFIG_MEMORY_ISOLATION
if (migratetype == MIGRATE_ISOLATE) {
VM_WARN_ONCE(
1,
"Set isolate=true to isolate pageblock with a migratetype");
return;
}
if (isolate)
flags |= BIT(PB_migrate_isolate);
#endif
__set_pfnblock_flags_mask(page, page_to_pfn(page), flags,
MIGRATETYPE_AND_ISO_MASK);
}
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
int ret;
unsigned seq;
unsigned long pfn = page_to_pfn(page);
unsigned long sp, start_pfn;
do {
seq = zone_span_seqbegin(zone);
start_pfn = zone->zone_start_pfn;
sp = zone->spanned_pages;
ret = !zone_spans_pfn(zone, pfn);
} while (zone_span_seqretry(zone, seq));
if (ret)
pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
pfn, zone_to_nid(zone), zone->name,
start_pfn, start_pfn + sp);
return ret;
}
static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
{
if (page_outside_zone_boundaries(zone, page))
return true;
if (zone != page_zone(page))
return true;
return false;
}
#else
static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
{
return false;
}
#endif
static void bad_page(struct page *page, const char *reason)
{
static unsigned long resume;
static unsigned long nr_shown;
static unsigned long nr_unshown;
if (nr_shown == 60) {
if (time_before(jiffies, resume)) {
nr_unshown++;
goto out;
}
if (nr_unshown) {
pr_alert(
"BUG: Bad page state: %lu messages suppressed\n",
nr_unshown);
nr_unshown = 0;
}
nr_shown = 0;
}
if (nr_shown++ == 0)
resume = jiffies + 60 * HZ;
pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
current->comm, page_to_pfn(page));
dump_page(page, reason);
print_modules();
dump_stack();
out:
if (PageBuddy(page))
__ClearPageBuddy(page);
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
static inline unsigned int order_to_pindex(int migratetype, int order)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool movable;
if (order > PAGE_ALLOC_COSTLY_ORDER) {
VM_BUG_ON(order != HPAGE_PMD_ORDER);
movable = migratetype == MIGRATE_MOVABLE;
return NR_LOWORDER_PCP_LISTS + movable;
}
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
#endif
return (MIGRATE_PCPTYPES * order) + migratetype;
}
static inline int pindex_to_order(unsigned int pindex)
{
int order = pindex / MIGRATE_PCPTYPES;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pindex >= NR_LOWORDER_PCP_LISTS)
order = HPAGE_PMD_ORDER;
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
#endif
return order;
}
static inline bool pcp_allowed_order(unsigned int order)
{
if (order <= PAGE_ALLOC_COSTLY_ORDER)
return true;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order == HPAGE_PMD_ORDER)
return true;
#endif
return false;
}
void prep_compound_page(struct page *page, unsigned int order)
{
int i;
int nr_pages = 1 << order;
__SetPageHead(page);
for (i = 1; i < nr_pages; i++)
prep_compound_tail(page, i);
prep_compound_head(page, order);
}
static inline void set_buddy_order(struct page *page, unsigned int order)
{
set_page_private(page, order);
__SetPageBuddy(page);
}
#ifdef CONFIG_COMPACTION
static inline struct capture_control *task_capc(struct zone *zone)
{
struct capture_control *capc = current->capture_control;
return unlikely(capc) &&
!(current->flags & PF_KTHREAD) &&
!capc->page &&
capc->cc->zone == zone ? capc : NULL;
}
static inline bool
compaction_capture(struct capture_control *capc, struct page *page,
int order, int migratetype)
{
if (!capc || order != capc->cc->order)
return false;
if (is_migrate_cma(migratetype) ||
is_migrate_isolate(migratetype))
return false;
if (order < pageblock_order && migratetype == MIGRATE_MOVABLE &&
capc->cc->migratetype != MIGRATE_MOVABLE)
return false;
if (migratetype != capc->cc->migratetype)
trace_mm_page_alloc_extfrag(page, capc->cc->order, order,
capc->cc->migratetype, migratetype);
capc->page = page;
return true;
}
#else
static inline struct capture_control *task_capc(struct zone *zone)
{
return NULL;
}
static inline bool
compaction_capture(struct capture_control *capc, struct page *page,
int order, int migratetype)
{
return false;
}
#endif
static inline void account_freepages(struct zone *zone, int nr_pages,
int migratetype)
{
lockdep_assert_held(&zone->lock);
if (is_migrate_isolate(migratetype))
return;
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
else if (migratetype == MIGRATE_HIGHATOMIC)
WRITE_ONCE(zone->nr_free_highatomic,
zone->nr_free_highatomic + nr_pages);
}
static inline void __add_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype,
bool tail)
{
struct free_area *area = &zone->free_area[order];
int nr_pages = 1 << order;
VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
"page type is %d, passed migratetype is %d (nr=%d)\n",
get_pageblock_migratetype(page), migratetype, nr_pages);
if (tail)
list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
else
list_add(&page->buddy_list, &area->free_list[migratetype]);
area->nr_free++;
if (order >= pageblock_order && !is_migrate_isolate(migratetype))
__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
}
static inline void move_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int old_mt, int new_mt)
{
struct free_area *area = &zone->free_area[order];
int nr_pages = 1 << order;
VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
"page type is %d, passed migratetype is %d (nr=%d)\n",
get_pageblock_migratetype(page), old_mt, nr_pages);
list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
account_freepages(zone, -nr_pages, old_mt);
account_freepages(zone, nr_pages, new_mt);
if (order >= pageblock_order &&
is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
if (!is_migrate_isolate(old_mt))
nr_pages = -nr_pages;
__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
}
}
static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype)
{
int nr_pages = 1 << order;
VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
"page type is %d, passed migratetype is %d (nr=%d)\n",
get_pageblock_migratetype(page), migratetype, nr_pages);
if (page_reported(page))
__ClearPageReported(page);
list_del(&page->buddy_list);
__ClearPageBuddy(page);
set_page_private(page, 0);
zone->free_area[order].nr_free--;
if (order >= pageblock_order && !is_migrate_isolate(migratetype))
__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
}
static inline void del_page_from_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype)
{
__del_page_from_free_list(page, zone, order, migratetype);
account_freepages(zone, -(1 << order), migratetype);
}
static inline struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
return list_first_entry_or_null(&area->free_list[migratetype],
struct page, buddy_list);
}
static inline bool
buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
struct page *page, unsigned int order)
{
unsigned long higher_page_pfn;
struct page *higher_page;
if (order >= MAX_PAGE_ORDER - 1)
return false;
higher_page_pfn = buddy_pfn & pfn;
higher_page = page + (higher_page_pfn - pfn);
return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
NULL) != NULL;
}
static void change_pageblock_range(struct page *pageblock_page,
int start_order, int migratetype)
{
int nr_pageblocks = 1 << (start_order - pageblock_order);
while (nr_pageblocks--) {
set_pageblock_migratetype(pageblock_page, migratetype);
pageblock_page += pageblock_nr_pages;
}
}
static inline void __free_one_page(struct page *page,
unsigned long pfn,
struct zone *zone, unsigned int order,
int migratetype, fpi_t fpi_flags)
{
struct capture_control *capc = task_capc(zone);
unsigned long buddy_pfn = 0;
unsigned long combined_pfn;
struct page *buddy;
bool to_tail;
VM_BUG_ON(!zone_is_initialized(zone));
VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page);
VM_BUG_ON(migratetype == -1);
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
account_freepages(zone, 1 << order, migratetype);
while (order < MAX_PAGE_ORDER) {
int buddy_mt = migratetype;
if (compaction_capture(capc, page, order, migratetype)) {
account_freepages(zone, -(1 << order), migratetype);
return;
}
buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
if (!buddy)
goto done_merging;
if (unlikely(order >= pageblock_order)) {
buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
if (migratetype != buddy_mt &&
(!migratetype_is_mergeable(migratetype) ||
!migratetype_is_mergeable(buddy_mt)))
goto done_merging;
}
if (page_is_guard(buddy))
clear_page_guard(zone, buddy, order);
else
__del_page_from_free_list(buddy, zone, order, buddy_mt);
if (unlikely(buddy_mt != migratetype)) {
change_pageblock_range(buddy, order, migratetype);
}
combined_pfn = buddy_pfn & pfn;
page = page + (combined_pfn - pfn);
pfn = combined_pfn;
order++;
}
done_merging:
set_buddy_order(page, order);
if (fpi_flags & FPI_TO_TAIL)
to_tail = true;
else if (is_shuffle_order(order))
to_tail = shuffle_pick_tail();
else
to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
__add_to_free_list(page, zone, order, migratetype, to_tail);
if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
page_reporting_notify_free(order);
}
static inline bool page_expected_state(struct page *page,
unsigned long check_flags)
{
if (unlikely(atomic_read(&page->_mapcount) != -1))
return false;
if (unlikely((unsigned long)page->mapping |
page_ref_count(page) |
#ifdef CONFIG_MEMCG
page->memcg_data |
#endif
page_pool_page_is_pp(page) |
(page->flags.f & check_flags)))
return false;
return true;
}
static const char *page_bad_reason(struct page *page, unsigned long flags)
{
const char *bad_reason = NULL;
if (unlikely(atomic_read(&page->_mapcount) != -1))
bad_reason = "nonzero mapcount";
if (unlikely(page->mapping != NULL))
bad_reason = "non-NULL mapping";
if (unlikely(page_ref_count(page) != 0))
bad_reason = "nonzero _refcount";
if (unlikely(page->flags.f & flags)) {
if (flags == PAGE_FLAGS_CHECK_AT_PREP)
bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
else
bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
}
#ifdef CONFIG_MEMCG
if (unlikely(page->memcg_data))
bad_reason = "page still charged to cgroup";
#endif
if (unlikely(page_pool_page_is_pp(page)))
bad_reason = "page_pool leak";
return bad_reason;
}
static inline bool free_page_is_bad(struct page *page)
{
if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
return false;
bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
return true;
}
static inline bool is_check_pages_enabled(void)
{
return static_branch_unlikely(&check_pages_enabled);
}
static int free_tail_page_prepare(struct page *head_page, struct page *page)
{
struct folio *folio = (struct folio *)head_page;
int ret = 1;
BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
if (!is_check_pages_enabled()) {
ret = 0;
goto out;
}
switch (page - head_page) {
case 1:
if (unlikely(folio_large_mapcount(folio))) {
bad_page(page, "nonzero large_mapcount");
goto out;
}
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) &&
unlikely(atomic_read(&folio->_nr_pages_mapped))) {
bad_page(page, "nonzero nr_pages_mapped");
goto out;
}
if (IS_ENABLED(CONFIG_MM_ID)) {
if (unlikely(folio->_mm_id_mapcount[0] != -1)) {
bad_page(page, "nonzero mm mapcount 0");
goto out;
}
if (unlikely(folio->_mm_id_mapcount[1] != -1)) {
bad_page(page, "nonzero mm mapcount 1");
goto out;
}
}
if (IS_ENABLED(CONFIG_64BIT)) {
if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
bad_page(page, "nonzero entire_mapcount");
goto out;
}
if (unlikely(atomic_read(&folio->_pincount))) {
bad_page(page, "nonzero pincount");
goto out;
}
}
break;
case 2:
if (unlikely(!list_empty(&folio->_deferred_list))) {
bad_page(page, "on deferred list");
goto out;
}
if (!IS_ENABLED(CONFIG_64BIT)) {
if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
bad_page(page, "nonzero entire_mapcount");
goto out;
}
if (unlikely(atomic_read(&folio->_pincount))) {
bad_page(page, "nonzero pincount");
goto out;
}
}
break;
case 3:
if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
break;
fallthrough;
default:
if (page->mapping != TAIL_MAPPING) {
bad_page(page, "corrupted mapping in tail page");
goto out;
}
break;
}
if (unlikely(!PageTail(page))) {
bad_page(page, "PageTail not set");
goto out;
}
if (unlikely(compound_head(page) != head_page)) {
bad_page(page, "compound_head not consistent");
goto out;
}
ret = 0;
out:
page->mapping = NULL;
clear_compound_head(page);
return ret;
}
static inline bool should_skip_kasan_poison(struct page *page)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return deferred_pages_enabled();
return page_kasan_tag(page) == KASAN_TAG_KERNEL;
}
static void kernel_init_pages(struct page *page, int numpages)
{
int i;
kasan_disable_current();
for (i = 0; i < numpages; i++)
clear_highpage_kasan_tagged(page + i);
kasan_enable_current();
}
#ifdef CONFIG_MEM_ALLOC_PROFILING
void __clear_page_tag_ref(struct page *page)
{
union pgtag_ref_handle handle;
union codetag_ref ref;
if (get_page_tag_ref(page, &ref, &handle)) {
set_codetag_empty(&ref);
update_page_tag_ref(handle, &ref);
put_page_tag_ref(handle);
}
}
static noinline
void __pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr)
{
union pgtag_ref_handle handle;
union codetag_ref ref;
if (get_page_tag_ref(page, &ref, &handle)) {
alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
update_page_tag_ref(handle, &ref);
put_page_tag_ref(handle);
}
}
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr)
{
if (mem_alloc_profiling_enabled())
__pgalloc_tag_add(page, task, nr);
}
static noinline
void __pgalloc_tag_sub(struct page *page, unsigned int nr)
{
union pgtag_ref_handle handle;
union codetag_ref ref;
if (get_page_tag_ref(page, &ref, &handle)) {
alloc_tag_sub(&ref, PAGE_SIZE * nr);
update_page_tag_ref(handle, &ref);
put_page_tag_ref(handle);
}
}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
{
if (mem_alloc_profiling_enabled())
__pgalloc_tag_sub(page, nr);
}
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
{
if (tag)
this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
}
#else
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr) {}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
#endif
__always_inline bool __free_pages_prepare(struct page *page,
unsigned int order, fpi_t fpi_flags)
{
int bad = 0;
bool skip_kasan_poison = should_skip_kasan_poison(page);
bool init = want_init_on_free();
bool compound = PageCompound(page);
struct folio *folio = page_folio(page);
VM_BUG_ON_PAGE(PageTail(page), page);
trace_mm_page_free(page, order);
kmsan_free_page(page, order);
if (memcg_kmem_online() && PageMemcgKmem(page))
__memcg_kmem_uncharge_page(page, order);
if (unlikely(folio_test_mlocked(folio))) {
long nr_pages = folio_nr_pages(folio);
__folio_clear_mlocked(folio);
zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
}
if (unlikely(PageHWPoison(page)) && !order) {
reset_page_owner(page, order);
page_table_check_free(page, order);
pgalloc_tag_sub(page, 1 << order);
clear_page_tag_ref(page);
return false;
}
VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
if (unlikely(order)) {
int i;
if (compound) {
page[1].flags.f &= ~PAGE_FLAGS_SECOND;
#ifdef NR_PAGES_IN_LARGE_FOLIO
folio->_nr_pages = 0;
#endif
}
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_page_prepare(page, page + i);
if (is_check_pages_enabled()) {
if (free_page_is_bad(page + i)) {
bad++;
continue;
}
}
(page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
if (folio_test_anon(folio)) {
mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
folio->mapping = NULL;
}
if (unlikely(page_has_type(page)))
page->page_type = UINT_MAX;
if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
if (bad)
return false;
}
page_cpupid_reset_last(page);
page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
page->private = 0;
reset_page_owner(page, order);
page_table_check_free(page, order);
pgalloc_tag_sub(page, 1 << order);
if (!PageHighMem(page) && !(fpi_flags & FPI_TRYLOCK)) {
debug_check_no_locks_freed(page_address(page),
PAGE_SIZE << order);
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
kernel_poison_pages(page, 1 << order);
if (!skip_kasan_poison) {
kasan_poison_pages(page, order, init);
if (kasan_has_integrated_init())
init = false;
}
if (init)
kernel_init_pages(page, 1 << order);
arch_free_page(page, order);
debug_pagealloc_unmap_pages(page, 1 << order);
return true;
}
bool free_pages_prepare(struct page *page, unsigned int order)
{
return __free_pages_prepare(page, order, FPI_NONE);
}
static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp,
int pindex)
{
unsigned long flags;
unsigned int order;
struct page *page;
count = min(pcp->count, count);
pindex = pindex - 1;
spin_lock_irqsave(&zone->lock, flags);
while (count > 0) {
struct list_head *list;
int nr_pages;
do {
if (++pindex > NR_PCP_LISTS - 1)
pindex = 0;
list = &pcp->lists[pindex];
} while (list_empty(list));
order = pindex_to_order(pindex);
nr_pages = 1 << order;
do {
unsigned long pfn;
int mt;
page = list_last_entry(list, struct page, pcp_list);
pfn = page_to_pfn(page);
mt = get_pfnblock_migratetype(page, pfn);
list_del(&page->pcp_list);
count -= nr_pages;
pcp->count -= nr_pages;
__free_one_page(page, pfn, zone, order, mt, FPI_NONE);
trace_mm_page_pcpu_drain(page, order, mt);
} while (count > 0 && !list_empty(list));
}
spin_unlock_irqrestore(&zone->lock, flags);
}
static void split_large_buddy(struct zone *zone, struct page *page,
unsigned long pfn, int order, fpi_t fpi)
{
unsigned long end = pfn + (1 << order);
VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
VM_WARN_ON_ONCE(PageBuddy(page));
if (order > pageblock_order)
order = pageblock_order;
do {
int mt = get_pfnblock_migratetype(page, pfn);
__free_one_page(page, pfn, zone, order, mt, fpi);
pfn += 1 << order;
if (pfn == end)
break;
page = pfn_to_page(pfn);
} while (1);
}
static void add_page_to_zone_llist(struct zone *zone, struct page *page,
unsigned int order)
{
page->private = order;
llist_add(&page->pcp_llist, &zone->trylock_free_pages);
}
static void free_one_page(struct zone *zone, struct page *page,
unsigned long pfn, unsigned int order,
fpi_t fpi_flags)
{
struct llist_head *llhead;
unsigned long flags;
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
if (!spin_trylock_irqsave(&zone->lock, flags)) {
add_page_to_zone_llist(zone, page, order);
return;
}
} else {
spin_lock_irqsave(&zone->lock, flags);
}
llhead = &zone->trylock_free_pages;
if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) {
struct llist_node *llnode;
struct page *p, *tmp;
llnode = llist_del_all(llhead);
llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) {
unsigned int p_order = p->private;
split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
__count_vm_events(PGFREE, 1 << p_order);
}
}
split_large_buddy(zone, page, pfn, order, fpi_flags);
spin_unlock_irqrestore(&zone->lock, flags);
__count_vm_events(PGFREE, 1 << order);
}
static void __free_pages_ok(struct page *page, unsigned int order,
fpi_t fpi_flags)
{
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
if (__free_pages_prepare(page, order, fpi_flags))
free_one_page(zone, page, pfn, order, fpi_flags);
}
void __meminit __free_pages_core(struct page *page, unsigned int order,
enum meminit_context context)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
unsigned int loop;
if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
unlikely(context == MEMINIT_HOTPLUG)) {
for (loop = 0; loop < nr_pages; loop++, p++) {
VM_WARN_ON_ONCE(PageReserved(p));
__ClearPageOffline(p);
set_page_count(p, 0);
}
adjust_managed_page_count(page, nr_pages);
} else {
for (loop = 0; loop < nr_pages; loop++, p++) {
__ClearPageReserved(p);
set_page_count(p, 0);
}
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
}
if (page_contains_unaccepted(page, order)) {
if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
return;
accept_memory(page_to_phys(page), PAGE_SIZE << order);
}
__free_pages_ok(page, order, FPI_TO_TAIL);
}
struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
unsigned long end_pfn, struct zone *zone)
{
struct page *start_page;
struct page *end_page;
end_pfn--;
if (!pfn_valid(end_pfn))
return NULL;
start_page = pfn_to_online_page(start_pfn);
if (!start_page)
return NULL;
if (page_zone(start_page) != zone)
return NULL;
end_page = pfn_to_page(end_pfn);
if (page_zone_id(start_page) != page_zone_id(end_page))
return NULL;
return start_page;
}
static inline unsigned int expand(struct zone *zone, struct page *page, int low,
int high, int migratetype)
{
unsigned int size = 1 << high;
unsigned int nr_added = 0;
while (high > low) {
high--;
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
if (set_page_guard(zone, &page[size], high))
continue;
__add_to_free_list(&page[size], zone, high, migratetype, false);
set_buddy_order(&page[size], high);
nr_added += size;
}
return nr_added;
}
static __always_inline void page_del_and_expand(struct zone *zone,
struct page *page, int low,
int high, int migratetype)
{
int nr_pages = 1 << high;
__del_page_from_free_list(page, zone, high, migratetype);
nr_pages -= expand(zone, page, low, high, migratetype);
account_freepages(zone, -nr_pages, migratetype);
}
static void check_new_page_bad(struct page *page)
{
if (unlikely(PageHWPoison(page))) {
if (PageBuddy(page))
__ClearPageBuddy(page);
return;
}
bad_page(page,
page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
}
static bool check_new_page(struct page *page)
{
if (likely(page_expected_state(page,
PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
return false;
check_new_page_bad(page);
return true;
}
static inline bool check_new_pages(struct page *page, unsigned int order)
{
if (is_check_pages_enabled()) {
for (int i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (check_new_page(p))
return true;
}
}
return false;
}
static inline bool should_skip_kasan_unpoison(gfp_t flags)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
return false;
if (!kasan_hw_tags_enabled())
return true;
return flags & __GFP_SKIP_KASAN;
}
static inline bool should_skip_init(gfp_t flags)
{
if (!kasan_hw_tags_enabled())
return false;
return (flags & __GFP_SKIP_ZERO);
}
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
!should_skip_init(gfp_flags);
bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
int i;
set_page_private(page, 0);
arch_alloc_page(page, order);
debug_pagealloc_map_pages(page, 1 << order);
kernel_unpoison_pages(page, 1 << order);
if (zero_tags)
init = !tag_clear_highpages(page, 1 << order);
if (!should_skip_kasan_unpoison(gfp_flags) &&
kasan_unpoison_pages(page, order, init)) {
if (kasan_has_integrated_init())
init = false;
} else {
for (i = 0; i != 1 << order; ++i)
page_kasan_tag_reset(page + i);
}
if (init)
kernel_init_pages(page, 1 << order);
set_page_owner(page, order, gfp_flags);
page_table_check_alloc(page, order);
pgalloc_tag_add(page, current, 1 << order);
}
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags)
{
post_alloc_hook(page, order, gfp_flags);
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
if (alloc_flags & ALLOC_NO_WATERMARKS)
set_page_pfmemalloc(page);
else
clear_page_pfmemalloc(page);
}
static __always_inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
unsigned int current_order;
struct free_area *area;
struct page *page;
for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
area = &(zone->free_area[current_order]);
page = get_page_from_free_area(area, migratetype);
if (!page)
continue;
page_del_and_expand(zone, page, order, current_order,
migratetype);
trace_mm_page_alloc_zone_locked(page, order, migratetype,
pcp_allowed_order(order) &&
migratetype < MIGRATE_PCPTYPES);
return page;
}
return NULL;
}
static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
};
#ifdef CONFIG_CMA
static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)
{
return __rmqueue_smallest(zone, order, MIGRATE_CMA);
}
#else
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order) { return NULL; }
#endif
static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
int old_mt, int new_mt)
{
struct page *page;
unsigned long pfn, end_pfn;
unsigned int order;
int pages_moved = 0;
VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
end_pfn = pageblock_end_pfn(start_pfn);
for (pfn = start_pfn; pfn < end_pfn;) {
page = pfn_to_page(pfn);
if (!PageBuddy(page)) {
pfn++;
continue;
}
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
VM_BUG_ON_PAGE(page_zone(page) != zone, page);
order = buddy_order(page);
move_to_free_list(page, zone, order, old_mt, new_mt);
pfn += 1 << order;
pages_moved += 1 << order;
}
return pages_moved;
}
static bool prep_move_freepages_block(struct zone *zone, struct page *page,
unsigned long *start_pfn,
int *num_free, int *num_movable)
{
unsigned long pfn, start, end;
pfn = page_to_pfn(page);
start = pageblock_start_pfn(pfn);
end = pageblock_end_pfn(pfn);
if (!zone_spans_pfn(zone, start))
return false;
if (!zone_spans_pfn(zone, end - 1))
return false;
*start_pfn = start;
if (num_free) {
*num_free = 0;
*num_movable = 0;
for (pfn = start; pfn < end;) {
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
int nr = 1 << buddy_order(page);
*num_free += nr;
pfn += nr;
continue;
}
if (PageLRU(page) || page_has_movable_ops(page))
(*num_movable)++;
pfn++;
}
}
return true;
}
static int move_freepages_block(struct zone *zone, struct page *page,
int old_mt, int new_mt)
{
unsigned long start_pfn;
int res;
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return -1;
res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
return res;
}
#ifdef CONFIG_MEMORY_ISOLATION
static unsigned long find_large_buddy(unsigned long start_pfn)
{
int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER;
struct page *page;
unsigned long pfn = start_pfn;
while (!PageBuddy(page = pfn_to_page(pfn))) {
if (++order > MAX_PAGE_ORDER)
return start_pfn;
pfn &= ~0UL << order;
}
if (pfn + (1 << buddy_order(page)) > start_pfn)
return pfn;
return start_pfn;
}
static inline void toggle_pageblock_isolate(struct page *page, bool isolate)
{
if (isolate)
set_pageblock_isolate(page);
else
clear_pageblock_isolate(page);
}
static bool __move_freepages_block_isolate(struct zone *zone,
struct page *page, bool isolate)
{
unsigned long start_pfn, buddy_pfn;
int from_mt;
int to_mt;
struct page *buddy;
if (isolate == get_pageblock_isolate(page)) {
VM_WARN_ONCE(1, "%s a pageblock that is already in that state",
isolate ? "Isolate" : "Unisolate");
return false;
}
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return false;
if (pageblock_order == MAX_PAGE_ORDER)
goto move;
buddy_pfn = find_large_buddy(start_pfn);
buddy = pfn_to_page(buddy_pfn);
if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) {
int order = buddy_order(buddy);
del_page_from_free_list(buddy, zone, order,
get_pfnblock_migratetype(buddy, buddy_pfn));
toggle_pageblock_isolate(page, isolate);
split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE);
return true;
}
move:
if (isolate) {
from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
MIGRATETYPE_MASK);
to_mt = MIGRATE_ISOLATE;
} else {
from_mt = MIGRATE_ISOLATE;
to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
MIGRATETYPE_MASK);
}
__move_freepages_block(zone, start_pfn, from_mt, to_mt);
toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate);
return true;
}
bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
{
return __move_freepages_block_isolate(zone, page, true);
}
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
{
return __move_freepages_block_isolate(zone, page, false);
}
#endif
static inline bool boost_watermark(struct zone *zone)
{
unsigned long max_boost;
if (!watermark_boost_factor)
return false;
if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
return false;
max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
watermark_boost_factor, 10000);
if (!max_boost)
return false;
max_boost = max(pageblock_nr_pages, max_boost);
zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
max_boost);
return true;
}
static bool should_try_claim_block(unsigned int order, int start_mt)
{
if (order >= pageblock_order)
return true;
if (order >= pageblock_order / 2)
return true;
if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
return true;
if (page_group_by_mobility_disabled)
return true;
return false;
}
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool claimable)
{
int i;
if (claimable && !should_try_claim_block(order, migratetype))
return -2;
if (area->nr_free == 0)
return -1;
for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
int fallback_mt = fallbacks[migratetype][i];
if (!free_area_empty(area, fallback_mt))
return fallback_mt;
}
return -1;
}
static struct page *
try_to_claim_block(struct zone *zone, struct page *page,
int current_order, int order, int start_type,
int block_type, unsigned int alloc_flags)
{
int free_pages, movable_pages, alike_pages;
unsigned long start_pfn;
if (current_order >= pageblock_order) {
unsigned int nr_added;
del_page_from_free_list(page, zone, current_order, block_type);
change_pageblock_range(page, current_order, start_type);
nr_added = expand(zone, page, order, current_order, start_type);
account_freepages(zone, nr_added, start_type);
return page;
}
if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
&movable_pages))
return NULL;
if (start_type == MIGRATE_MOVABLE) {
alike_pages = movable_pages;
} else {
if (block_type == MIGRATE_MOVABLE)
alike_pages = pageblock_nr_pages
- (free_pages + movable_pages);
else
alike_pages = 0;
}
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) {
__move_freepages_block(zone, start_pfn, block_type, start_type);
set_pageblock_migratetype(pfn_to_page(start_pfn), start_type);
return __rmqueue_smallest(zone, order, start_type);
}
return NULL;
}
static __always_inline struct page *
__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
unsigned int alloc_flags)
{
struct free_area *area;
int current_order;
int min_order = order;
struct page *page;
int fallback_mt;
if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
min_order = pageblock_order;
for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
start_migratetype, true);
if (fallback_mt == -1)
continue;
if (fallback_mt == -2)
break;
page = get_page_from_free_area(area, fallback_mt);
page = try_to_claim_block(zone, page, current_order, order,
start_migratetype, fallback_mt,
alloc_flags);
if (page) {
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, fallback_mt);
return page;
}
}
return NULL;
}
static __always_inline struct page *
__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
{
struct free_area *area;
int current_order;
struct page *page;
int fallback_mt;
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
start_migratetype, false);
if (fallback_mt == -1)
continue;
page = get_page_from_free_area(area, fallback_mt);
page_del_and_expand(zone, page, order, current_order, fallback_mt);
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, fallback_mt);
return page;
}
return NULL;
}
enum rmqueue_mode {
RMQUEUE_NORMAL,
RMQUEUE_CMA,
RMQUEUE_CLAIM,
RMQUEUE_STEAL,
};
static __always_inline struct page *
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
unsigned int alloc_flags, enum rmqueue_mode *mode)
{
struct page *page;
if (IS_ENABLED(CONFIG_CMA)) {
if (alloc_flags & ALLOC_CMA &&
zone_page_state(zone, NR_FREE_CMA_PAGES) >
zone_page_state(zone, NR_FREE_PAGES) / 2) {
page = __rmqueue_cma_fallback(zone, order);
if (page)
return page;
}
}
switch (*mode) {
case RMQUEUE_NORMAL:
page = __rmqueue_smallest(zone, order, migratetype);
if (page)
return page;
fallthrough;
case RMQUEUE_CMA:
if (alloc_flags & ALLOC_CMA) {
page = __rmqueue_cma_fallback(zone, order);
if (page) {
*mode = RMQUEUE_CMA;
return page;
}
}
fallthrough;
case RMQUEUE_CLAIM:
page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
if (page) {
*mode = RMQUEUE_NORMAL;
return page;
}
fallthrough;
case RMQUEUE_STEAL:
if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
page = __rmqueue_steal(zone, order, migratetype);
if (page) {
*mode = RMQUEUE_STEAL;
return page;
}
}
}
return NULL;
}
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, unsigned int alloc_flags)
{
enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
unsigned long flags;
int i;
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
if (!spin_trylock_irqsave(&zone->lock, flags))
return 0;
} else {
spin_lock_irqsave(&zone->lock, flags);
}
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
alloc_flags, &rmqm);
if (unlikely(page == NULL))
break;
list_add_tail(&page->pcp_list, list);
}
spin_unlock_irqrestore(&zone->lock, flags);
return i;
}
bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
{
int high_min, to_drain, to_drain_batched, batch;
unsigned long UP_flags;
bool todo = false;
high_min = READ_ONCE(pcp->high_min);
batch = READ_ONCE(pcp->batch);
if (pcp->high > high_min) {
pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
pcp->high - (pcp->high >> 3), high_min);
if (pcp->high > high_min)
todo = true;
}
to_drain = pcp->count - pcp->high;
while (to_drain > 0) {
to_drain_batched = min(to_drain, batch);
pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
todo = true;
to_drain -= to_drain_batched;
}
return todo;
}
#ifdef CONFIG_NUMA
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long UP_flags;
int to_drain, batch;
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0) {
pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
free_pcppages_bulk(zone, to_drain, pcp, 0);
pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
}
}
#endif
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
unsigned long UP_flags;
int count;
do {
pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
count = pcp->count;
if (count) {
int to_drain = min(count,
pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
free_pcppages_bulk(zone, to_drain, pcp, 0);
count -= to_drain;
}
pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
} while (count);
}
static void drain_pages(unsigned int cpu)
{
struct zone *zone;
for_each_populated_zone(zone) {
drain_pages_zone(cpu, zone);
}
}
void drain_local_pages(struct zone *zone)
{
int cpu = smp_processor_id();
if (zone)
drain_pages_zone(cpu, zone);
else
drain_pages(cpu);
}
static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
{
int cpu;
static cpumask_t cpus_with_pcps;
if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
if (!zone)
return;
mutex_lock(&pcpu_drain_mutex);
}
for_each_online_cpu(cpu) {
struct per_cpu_pages *pcp;
struct zone *z;
bool has_pcps = false;
if (force_all_cpus) {
has_pcps = true;
} else if (zone) {
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
if (pcp->count)
has_pcps = true;
} else {
for_each_populated_zone(z) {
pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
if (pcp->count) {
has_pcps = true;
break;
}
}
}
if (has_pcps)
cpumask_set_cpu(cpu, &cpus_with_pcps);
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
for_each_cpu(cpu, &cpus_with_pcps) {
if (zone)
drain_pages_zone(cpu, zone);
else
drain_pages(cpu);
}
mutex_unlock(&pcpu_drain_mutex);
}
void drain_all_pages(struct zone *zone)
{
__drain_all_pages(zone, false);
}
static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
{
int min_nr_free, max_nr_free;
if (unlikely(free_high))
return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
if (unlikely(high < batch))
return 1;
min_nr_free = batch;
max_nr_free = high - batch;
batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
return batch;
}
static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
int batch, bool free_high)
{
int high, high_min, high_max;
high_min = READ_ONCE(pcp->high_min);
high_max = READ_ONCE(pcp->high_max);
high = pcp->high = clamp(pcp->high, high_min, high_max);
if (unlikely(!high))
return 0;
if (unlikely(free_high)) {
pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
high_min);
return 0;
}
if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
int free_count = max_t(int, pcp->free_count, batch);
pcp->high = max(high - free_count, high_min);
return min(batch << 2, pcp->high);
}
if (high_min == high_max)
return high;
if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
int free_count = max_t(int, pcp->free_count, batch);
pcp->high = max(high - free_count, high_min);
high = max(pcp->count, high_min);
} else if (pcp->count >= high) {
int need_high = pcp->free_count + batch;
if (pcp->high < need_high)
pcp->high = clamp(need_high, high_min, high_max);
}
return high;
}
static bool free_frozen_page_commit(struct zone *zone,
struct per_cpu_pages *pcp, struct page *page, int migratetype,
unsigned int order, fpi_t fpi_flags, unsigned long *UP_flags)
{
int high, batch;
int to_free, to_free_batched;
int pindex;
int cpu = smp_processor_id();
int ret = true;
bool free_high = false;
pcp->alloc_factor >>= 1;
__count_vm_events(PGFREE, 1 << order);
pindex = order_to_pindex(migratetype, order);
list_add(&page->pcp_list, &pcp->lists[pindex]);
pcp->count += 1 << order;
batch = READ_ONCE(pcp->batch);
if (order && order <= PAGE_ALLOC_COSTLY_ORDER) {
free_high = (pcp->free_count >= (batch + pcp->high_min / 2) &&
(pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
(!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
pcp->count >= batch));
pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
} else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
}
if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
pcp->free_count += (1 << order);
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
return true;
}
high = nr_pcp_high(pcp, zone, batch, free_high);
if (pcp->count < high)
return true;
to_free = nr_pcp_free(pcp, batch, high, free_high);
while (to_free > 0 && pcp->count > 0) {
to_free_batched = min(to_free, batch);
free_pcppages_bulk(zone, to_free_batched, pcp, pindex);
to_free -= to_free_batched;
if (to_free == 0 || pcp->count == 0)
break;
pcp_spin_unlock(pcp, *UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset, *UP_flags);
if (!pcp) {
ret = false;
break;
}
if (smp_processor_id() != cpu) {
pcp_spin_unlock(pcp, *UP_flags);
ret = false;
break;
}
}
if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
zone_watermark_ok(zone, 0, high_wmark_pages(zone),
ZONE_MOVABLE, 0)) {
struct pglist_data *pgdat = zone->zone_pgdat;
clear_bit(ZONE_BELOW_HIGH, &zone->flags);
if (kswapd_test_hopeless(pgdat) &&
next_memory_node(pgdat->node_id) < MAX_NUMNODES)
kswapd_clear_hopeless(pgdat, KSWAPD_CLEAR_HOPELESS_PCP);
}
return ret;
}
static void __free_frozen_pages(struct page *page, unsigned int order,
fpi_t fpi_flags)
{
unsigned long UP_flags;
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
int migratetype;
if (!pcp_allowed_order(order)) {
__free_pages_ok(page, order, fpi_flags);
return;
}
if (!__free_pages_prepare(page, order, fpi_flags))
return;
zone = page_zone(page);
migratetype = get_pfnblock_migratetype(page, pfn);
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, pfn, order, fpi_flags);
return;
}
migratetype = MIGRATE_MOVABLE;
}
if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT)
&& (in_nmi() || in_hardirq()))) {
add_page_to_zone_llist(zone, page, order);
return;
}
pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
if (pcp) {
if (!free_frozen_page_commit(zone, pcp, page, migratetype,
order, fpi_flags, &UP_flags))
return;
pcp_spin_unlock(pcp, UP_flags);
} else {
free_one_page(zone, page, pfn, order, fpi_flags);
}
}
void free_frozen_pages(struct page *page, unsigned int order)
{
__free_frozen_pages(page, order, FPI_NONE);
}
void free_frozen_pages_nolock(struct page *page, unsigned int order)
{
__free_frozen_pages(page, order, FPI_TRYLOCK);
}
void free_unref_folios(struct folio_batch *folios)
{
unsigned long UP_flags;
struct per_cpu_pages *pcp = NULL;
struct zone *locked_zone = NULL;
int i, j;
for (i = 0, j = 0; i < folios->nr; i++) {
struct folio *folio = folios->folios[i];
unsigned long pfn = folio_pfn(folio);
unsigned int order = folio_order(folio);
if (!__free_pages_prepare(&folio->page, order, FPI_NONE))
continue;
if (!pcp_allowed_order(order)) {
free_one_page(folio_zone(folio), &folio->page,
pfn, order, FPI_NONE);
continue;
}
folio->private = (void *)(unsigned long)order;
if (j != i)
folios->folios[j] = folio;
j++;
}
folios->nr = j;
for (i = 0; i < folios->nr; i++) {
struct folio *folio = folios->folios[i];
struct zone *zone = folio_zone(folio);
unsigned long pfn = folio_pfn(folio);
unsigned int order = (unsigned long)folio->private;
int migratetype;
folio->private = NULL;
migratetype = get_pfnblock_migratetype(&folio->page, pfn);
if (zone != locked_zone ||
is_migrate_isolate(migratetype)) {
if (pcp) {
pcp_spin_unlock(pcp, UP_flags);
locked_zone = NULL;
pcp = NULL;
}
if (is_migrate_isolate(migratetype)) {
free_one_page(zone, &folio->page, pfn,
order, FPI_NONE);
continue;
}
pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
if (unlikely(!pcp)) {
free_one_page(zone, &folio->page, pfn,
order, FPI_NONE);
continue;
}
locked_zone = zone;
}
if (unlikely(migratetype >= MIGRATE_PCPTYPES))
migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(&folio->page);
if (!free_frozen_page_commit(zone, pcp, &folio->page,
migratetype, order, FPI_NONE, &UP_flags)) {
pcp = NULL;
locked_zone = NULL;
}
}
if (pcp)
pcp_spin_unlock(pcp, UP_flags);
folio_batch_reinit(folios);
}
static void __split_page(struct page *page, unsigned int order)
{
VM_WARN_ON_PAGE(PageCompound(page), page);
split_page_owner(page, order, 0);
pgalloc_tag_split(page_folio(page), order, 0);
split_page_memcg(page, order);
}
void split_page(struct page *page, unsigned int order)
{
int i;
VM_WARN_ON_PAGE(!page_count(page), page);
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
__split_page(page, order);
}
EXPORT_SYMBOL_GPL(split_page);
int __isolate_free_page(struct page *page, unsigned int order)
{
struct zone *zone = page_zone(page);
int mt = get_pageblock_migratetype(page);
if (!is_migrate_isolate(mt)) {
unsigned long watermark;
watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
return 0;
}
del_page_from_free_list(page, zone, order, mt);
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
int mt = get_pageblock_migratetype(page);
if (migratetype_is_mergeable(mt))
move_freepages_block(zone, page, mt,
MIGRATE_MOVABLE);
}
}
return 1UL << order;
}
void __putback_isolated_page(struct page *page, unsigned int order, int mt)
{
struct zone *zone = page_zone(page);
lockdep_assert_held(&zone->lock);
__free_one_page(page, page_to_pfn(page), zone, order, mt,
FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
}
static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
long nr_account)
{
#ifdef CONFIG_NUMA
enum numa_stat_item local_stat = NUMA_LOCAL;
if (!static_branch_likely(&vm_numa_stat_key))
return;
if (zone_to_nid(z) != numa_node_id())
local_stat = NUMA_OTHER;
if (zone_to_nid(z) == zone_to_nid(preferred_zone))
__count_numa_events(z, NUMA_HIT, nr_account);
else {
__count_numa_events(z, NUMA_MISS, nr_account);
__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
}
__count_numa_events(z, local_stat, nr_account);
#endif
}
static __always_inline
struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
unsigned int order, unsigned int alloc_flags,
int migratetype)
{
struct page *page;
unsigned long flags;
do {
page = NULL;
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
if (!spin_trylock_irqsave(&zone->lock, flags))
return NULL;
} else {
spin_lock_irqsave(&zone->lock, flags);
}
if (alloc_flags & ALLOC_HIGHATOMIC)
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK)))
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
return NULL;
}
}
spin_unlock_irqrestore(&zone->lock, flags);
} while (check_new_pages(page, order));
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, 1);
return page;
}
static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
{
int high, base_batch, batch, max_nr_alloc;
int high_max, high_min;
base_batch = READ_ONCE(pcp->batch);
high_min = READ_ONCE(pcp->high_min);
high_max = READ_ONCE(pcp->high_max);
high = pcp->high = clamp(pcp->high, high_min, high_max);
if (unlikely(high < base_batch))
return 1;
if (order)
batch = base_batch;
else
batch = (base_batch << pcp->alloc_factor);
if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
high = pcp->high = min(high + batch, high_max);
if (!order) {
max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
if (batch <= max_nr_alloc &&
pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
pcp->alloc_factor++;
batch = min(batch, max_nr_alloc);
}
if (batch > 1)
batch = max(batch >> order, 2);
return batch;
}
static inline
struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
int migratetype,
unsigned int alloc_flags,
struct per_cpu_pages *pcp,
struct list_head *list)
{
struct page *page;
do {
if (list_empty(list)) {
int batch = nr_pcp_alloc(pcp, zone, order);
int alloced;
alloced = rmqueue_bulk(zone, order,
batch, list,
migratetype, alloc_flags);
pcp->count += alloced << order;
if (unlikely(list_empty(list)))
return NULL;
}
page = list_first_entry(list, struct page, pcp_list);
list_del(&page->pcp_list);
pcp->count -= 1 << order;
} while (check_new_pages(page, order));
return page;
}
static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
int migratetype, unsigned int alloc_flags)
{
struct per_cpu_pages *pcp;
struct list_head *list;
struct page *page;
unsigned long UP_flags;
pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
if (!pcp)
return NULL;
pcp->free_count >>= 1;
list = &pcp->lists[order_to_pindex(migratetype, order)];
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
pcp_spin_unlock(pcp, UP_flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, 1);
}
return page;
}
__no_sanitize_memory
static inline
struct page *rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
gfp_t gfp_flags, unsigned int alloc_flags,
int migratetype)
{
struct page *page;
if (likely(pcp_allowed_order(order))) {
page = rmqueue_pcplist(preferred_zone, zone, order,
migratetype, alloc_flags);
if (likely(page))
goto out;
}
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
migratetype);
out:
if ((alloc_flags & ALLOC_KSWAPD) &&
unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
wakeup_kswapd(zone, 0, 0, zone_idx(zone));
}
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
}
static void reserve_highatomic_pageblock(struct page *page, int order,
struct zone *zone)
{
int mt;
unsigned long max_managed, flags;
if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
return;
max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
if (zone->nr_reserved_highatomic >= max_managed)
return;
spin_lock_irqsave(&zone->lock, flags);
if (zone->nr_reserved_highatomic >= max_managed)
goto out_unlock;
mt = get_pageblock_migratetype(page);
if (!migratetype_is_mergeable(mt))
goto out_unlock;
if (order < pageblock_order) {
if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
goto out_unlock;
zone->nr_reserved_highatomic += pageblock_nr_pages;
} else {
change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
zone->nr_reserved_highatomic += 1 << order;
}
out_unlock:
spin_unlock_irqrestore(&zone->lock, flags);
}
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
bool force)
{
struct zonelist *zonelist = ac->zonelist;
unsigned long flags;
struct zoneref *z;
struct zone *zone;
struct page *page;
int order;
int ret;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
ac->nodemask) {
if (!force && zone->nr_reserved_highatomic <=
pageblock_nr_pages)
continue;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &(zone->free_area[order]);
unsigned long size;
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
if (!page)
continue;
size = max(pageblock_nr_pages, 1UL << order);
if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
size = zone->nr_reserved_highatomic;
zone->nr_reserved_highatomic -= size;
if (order < pageblock_order)
ret = move_freepages_block(zone, page,
MIGRATE_HIGHATOMIC,
ac->migratetype);
else {
move_to_free_list(page, zone, order,
MIGRATE_HIGHATOMIC,
ac->migratetype);
change_pageblock_range(page, order,
ac->migratetype);
ret = 1;
}
WARN_ON_ONCE(ret == -1);
if (ret > 0) {
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
}
}
spin_unlock_irqrestore(&zone->lock, flags);
}
return false;
}
static inline long __zone_watermark_unusable_free(struct zone *z,
unsigned int order, unsigned int alloc_flags)
{
long unusable_free = (1 << order) - 1;
if (likely(!(alloc_flags & ALLOC_RESERVES)))
unusable_free += READ_ONCE(z->nr_free_highatomic);
#ifdef CONFIG_CMA
if (!(alloc_flags & ALLOC_CMA))
unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
#endif
return unusable_free;
}
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int highest_zoneidx, unsigned int alloc_flags,
long free_pages)
{
long min = mark;
int o;
free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
if (unlikely(alloc_flags & ALLOC_RESERVES)) {
if (alloc_flags & ALLOC_MIN_RESERVE) {
min -= min / 2;
if (alloc_flags & ALLOC_NON_BLOCK)
min -= min / 4;
}
if (alloc_flags & ALLOC_OOM)
min -= min / 2;
}
if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
return false;
if (!order)
return true;
for (o = order; o < NR_PAGE_ORDERS; o++) {
struct free_area *area = &z->free_area[o];
int mt;
if (!area->nr_free)
continue;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
if (!free_area_empty(area, mt))
return true;
}
#ifdef CONFIG_CMA
if ((alloc_flags & ALLOC_CMA) &&
!free_area_empty(area, MIGRATE_CMA)) {
return true;
}
#endif
if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
!free_area_empty(area, MIGRATE_HIGHATOMIC)) {
return true;
}
}
return false;
}
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int highest_zoneidx, unsigned int alloc_flags)
{
return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
zone_page_state(z, NR_FREE_PAGES));
}
static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags, gfp_t gfp_mask)
{
long free_pages;
free_pages = zone_page_state(z, NR_FREE_PAGES);
if (!order) {
long usable_free;
long reserved;
usable_free = free_pages;
reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
usable_free -= min(usable_free, reserved);
if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
return true;
}
if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
free_pages))
return true;
if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
mark = z->_watermark[WMARK_MIN];
return __zone_watermark_ok(z, order, mark, highest_zoneidx,
alloc_flags, free_pages);
}
return false;
}
#ifdef CONFIG_NUMA
int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
node_reclaim_distance;
}
#else
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return true;
}
#endif
static inline unsigned int
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
{
unsigned int alloc_flags;
alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
if (defrag_mode) {
alloc_flags |= ALLOC_NOFRAGMENT;
return alloc_flags;
}
#ifdef CONFIG_ZONE_DMA32
if (!zone)
return alloc_flags;
if (zone_idx(zone) != ZONE_NORMAL)
return alloc_flags;
BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
if (nr_online_nodes > 1 && !populated_zone(--zone))
return alloc_flags;
alloc_flags |= ALLOC_NOFRAGMENT;
#endif
return alloc_flags;
}
static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
unsigned int alloc_flags)
{
#ifdef CONFIG_CMA
if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
#endif
return alloc_flags;
}
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
struct pglist_data *last_pgdat = NULL;
bool last_pgdat_dirty_ok = false;
bool no_fallback;
bool skip_kswapd_nodes = nr_online_nodes > 1;
bool skipped_kswapd_nodes = false;
retry:
no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
z = ac->preferred_zoneref;
for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
ac->nodemask) {
struct page *page;
unsigned long mark;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!__cpuset_zone_allowed(zone, gfp_mask))
continue;
if (ac->spread_dirty_pages) {
if (last_pgdat != zone->zone_pgdat) {
last_pgdat = zone->zone_pgdat;
last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
}
if (!last_pgdat_dirty_ok)
continue;
}
if (no_fallback && !defrag_mode && nr_online_nodes > 1 &&
zone != zonelist_zone(ac->preferred_zoneref)) {
int local_nid;
local_nid = zonelist_node_idx(ac->preferred_zoneref);
if (zone_to_nid(zone) != local_nid) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
}
if (skip_kswapd_nodes &&
!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) {
skipped_kswapd_nodes = true;
continue;
}
cond_accept_memory(zone, order, alloc_flags);
if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
goto check_alloc_wmark;
mark = high_wmark_pages(zone);
if (zone_watermark_fast(zone, order, mark,
ac->highest_zoneidx, alloc_flags,
gfp_mask))
goto try_this_zone;
else
set_bit(ZONE_BELOW_HIGH, &zone->flags);
check_alloc_wmark:
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
if (!zone_watermark_fast(zone, order, mark,
ac->highest_zoneidx, alloc_flags,
gfp_mask)) {
int ret;
if (cond_accept_memory(zone, order, alloc_flags))
goto try_this_zone;
if (deferred_pages_enabled()) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
if (!node_reclaim_enabled() ||
!zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
continue;
ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
switch (ret) {
case NODE_RECLAIM_NOSCAN:
continue;
case NODE_RECLAIM_FULL:
continue;
default:
if (zone_watermark_ok(zone, order, mark,
ac->highest_zoneidx, alloc_flags))
goto try_this_zone;
continue;
}
}
try_this_zone:
page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
reserve_highatomic_pageblock(page, order, zone);
return page;
} else {
if (cond_accept_memory(zone, order, alloc_flags))
goto try_this_zone;
if (deferred_pages_enabled()) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
}
}
if (skip_kswapd_nodes && skipped_kswapd_nodes) {
skip_kswapd_nodes = false;
goto retry;
}
if (no_fallback && !defrag_mode) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
return NULL;
}
static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
{
unsigned int filter = SHOW_MEM_FILTER_NODES;
if (!(gfp_mask & __GFP_NOMEMALLOC))
if (tsk_is_oom_victim(current) ||
(current->flags & (PF_MEMALLOC | PF_EXITING)))
filter &= ~SHOW_MEM_FILTER_NODES;
if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
filter &= ~SHOW_MEM_FILTER_NODES;
__show_mem(filter, nodemask, gfp_zone(gfp_mask));
mem_cgroup_show_protected_memory(NULL);
}
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
if ((gfp_mask & __GFP_NOWARN) ||
!__ratelimit(&nopage_rs) ||
((gfp_mask & __GFP_DMA) && !has_managed_dma()))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
current->comm, &vaf, gfp_mask, &gfp_mask,
nodemask_pr_args(nodemask));
va_end(args);
cpuset_print_current_mems_allowed();
pr_cont("\n");
dump_stack();
warn_alloc_show_mem(gfp_mask, nodemask);
}
static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags,
const struct alloc_context *ac)
{
struct page *page;
page = get_page_from_freelist(gfp_mask, order,
alloc_flags|ALLOC_CPUSET, ac);
if (!page)
page = get_page_from_freelist(gfp_mask, order,
alloc_flags, ac);
return page;
}
static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
const struct alloc_context *ac, unsigned long *did_some_progress)
{
struct oom_control oc = {
.zonelist = ac->zonelist,
.nodemask = ac->nodemask,
.memcg = NULL,
.gfp_mask = gfp_mask,
.order = order,
};
struct page *page;
*did_some_progress = 0;
if (!mutex_trylock(&oom_lock)) {
*did_some_progress = 1;
schedule_timeout_uninterruptible(1);
return NULL;
}
page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
~__GFP_DIRECT_RECLAIM, order,
ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
if (page)
goto out;
if (current->flags & PF_DUMPCORE)
goto out;
if (order > PAGE_ALLOC_COSTLY_ORDER)
goto out;
if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
goto out;
if (ac->highest_zoneidx < ZONE_NORMAL)
goto out;
if (pm_suspended_storage())
goto out;
if (out_of_memory(&oc) ||
WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
*did_some_progress = 1;
if (gfp_mask & __GFP_NOFAIL)
page = __alloc_pages_cpuset_fallback(gfp_mask, order,
ALLOC_NO_WATERMARKS, ac);
}
out:
mutex_unlock(&oom_lock);
return page;
}
#define MAX_COMPACT_RETRIES 16
#ifdef CONFIG_COMPACTION
static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, enum compact_result *compact_result)
{
struct page *page = NULL;
unsigned long pflags;
unsigned int noreclaim_flag;
if (!order)
return NULL;
psi_memstall_enter(&pflags);
delayacct_compact_start();
noreclaim_flag = memalloc_noreclaim_save();
*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
prio, &page);
memalloc_noreclaim_restore(noreclaim_flag);
psi_memstall_leave(&pflags);
delayacct_compact_end();
if (*compact_result == COMPACT_SKIPPED)
return NULL;
count_vm_event(COMPACTSTALL);
if (page)
prep_new_page(page, order, gfp_mask, alloc_flags);
if (!page)
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
if (page) {
struct zone *zone = page_zone(page);
zone->compact_blockskip_flush = false;
compaction_defer_reset(zone, order, true);
count_vm_event(COMPACTSUCCESS);
return page;
}
count_vm_event(COMPACTFAIL);
cond_resched();
return NULL;
}
static inline bool
should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
enum compact_result compact_result,
enum compact_priority *compact_priority,
int *compaction_retries)
{
int max_retries = MAX_COMPACT_RETRIES;
int min_priority;
bool ret = false;
int retries = *compaction_retries;
enum compact_priority priority = *compact_priority;
if (!order)
return false;
if (fatal_signal_pending(current))
return false;
if (compact_result == COMPACT_SKIPPED) {
ret = compaction_zonelist_suitable(ac, order, alloc_flags);
goto out;
}
if (compact_result == COMPACT_SUCCESS) {
if (order > PAGE_ALLOC_COSTLY_ORDER)
max_retries /= 4;
if (++(*compaction_retries) <= max_retries) {
ret = true;
goto out;
}
}
min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
if (*compact_priority > min_priority) {
(*compact_priority)--;
*compaction_retries = 0;
ret = true;
}
out:
trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
return ret;
}
#else
static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, enum compact_result *compact_result)
{
*compact_result = COMPACT_SKIPPED;
return NULL;
}
static inline bool
should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
enum compact_result compact_result,
enum compact_priority *compact_priority,
int *compaction_retries)
{
struct zone *zone;
struct zoneref *z;
if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
return false;
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
ac->highest_zoneidx, alloc_flags))
return true;
}
return false;
}
#endif
#ifdef CONFIG_LOCKDEP
static struct lockdep_map __fs_reclaim_map =
STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
static bool __need_reclaim(gfp_t gfp_mask)
{
if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
return false;
if (current->flags & PF_MEMALLOC)
return false;
if (gfp_mask & __GFP_NOLOCKDEP)
return false;
return true;
}
void __fs_reclaim_acquire(unsigned long ip)
{
lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
}
void __fs_reclaim_release(unsigned long ip)
{
lock_release(&__fs_reclaim_map, ip);
}
void fs_reclaim_acquire(gfp_t gfp_mask)
{
gfp_mask = current_gfp_context(gfp_mask);
if (__need_reclaim(gfp_mask)) {
if (gfp_mask & __GFP_FS)
__fs_reclaim_acquire(_RET_IP_);
#ifdef CONFIG_MMU_NOTIFIER
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#endif
}
}
EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
void fs_reclaim_release(gfp_t gfp_mask)
{
gfp_mask = current_gfp_context(gfp_mask);
if (__need_reclaim(gfp_mask)) {
if (gfp_mask & __GFP_FS)
__fs_reclaim_release(_RET_IP_);
}
}
EXPORT_SYMBOL_GPL(fs_reclaim_release);
#endif
static DEFINE_SEQLOCK(zonelist_update_seq);
static unsigned int zonelist_iter_begin(void)
{
if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
return read_seqbegin(&zonelist_update_seq);
return 0;
}
static unsigned int check_retry_zonelist(unsigned int seq)
{
if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
return read_seqretry(&zonelist_update_seq, seq);
return seq;
}
static unsigned long
__perform_reclaim(gfp_t gfp_mask, unsigned int order,
const struct alloc_context *ac)
{
unsigned int noreclaim_flag;
unsigned long progress;
cond_resched();
cpuset_memory_pressure_bump();
fs_reclaim_acquire(gfp_mask);
noreclaim_flag = memalloc_noreclaim_save();
progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
ac->nodemask);
memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(gfp_mask);
cond_resched();
return progress;
}
static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
unsigned long *did_some_progress)
{
struct page *page = NULL;
unsigned long pflags;
bool drained = false;
psi_memstall_enter(&pflags);
*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
if (unlikely(!(*did_some_progress)))
goto out;
retry:
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
if (!page && !drained) {
unreserve_highatomic_pageblock(ac, false);
drain_all_pages(NULL);
drained = true;
goto retry;
}
out:
psi_memstall_leave(&pflags);
return page;
}
static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
pg_data_t *last_pgdat = NULL;
enum zone_type highest_zoneidx = ac->highest_zoneidx;
unsigned int reclaim_order;
if (defrag_mode)
reclaim_order = max(order, pageblock_order);
else
reclaim_order = order;
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
ac->nodemask) {
if (!managed_zone(zone))
continue;
if (last_pgdat == zone->zone_pgdat)
continue;
wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
last_pgdat = zone->zone_pgdat;
}
}
static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
{
unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
alloc_flags |= (__force int)
(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
if (!(gfp_mask & __GFP_NOMEMALLOC)) {
alloc_flags |= ALLOC_NON_BLOCK;
if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
alloc_flags |= ALLOC_HIGHATOMIC;
}
if (alloc_flags & ALLOC_MIN_RESERVE)
alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_or_dl_task(current)) && in_task())
alloc_flags |= ALLOC_MIN_RESERVE;
alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
if (defrag_mode)
alloc_flags |= ALLOC_NOFRAGMENT;
return alloc_flags;
}
static bool oom_reserves_allowed(struct task_struct *tsk)
{
if (!tsk_is_oom_victim(tsk))
return false;
if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
return false;
return true;
}
static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
{
if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
return 0;
if (gfp_mask & __GFP_MEMALLOC)
return ALLOC_NO_WATERMARKS;
if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
return ALLOC_NO_WATERMARKS;
if (!in_interrupt()) {
if (current->flags & PF_MEMALLOC)
return ALLOC_NO_WATERMARKS;
else if (oom_reserves_allowed(current))
return ALLOC_OOM;
}
return 0;
}
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
{
return !!__gfp_pfmemalloc_flags(gfp_mask);
}
static inline bool
should_reclaim_retry(gfp_t gfp_mask, unsigned order,
struct alloc_context *ac, int alloc_flags,
bool did_some_progress, int *no_progress_loops)
{
struct zone *zone;
struct zoneref *z;
bool ret = false;
if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
*no_progress_loops = 0;
else
(*no_progress_loops)++;
if (*no_progress_loops > MAX_RECLAIM_RETRIES)
goto out;
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
unsigned long available;
unsigned long reclaimable;
unsigned long min_wmark = min_wmark_pages(zone);
bool wmark;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!__cpuset_zone_allowed(zone, gfp_mask))
continue;
available = reclaimable = zone_reclaimable_pages(zone);
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
wmark = __zone_watermark_ok(zone, order, min_wmark,
ac->highest_zoneidx, alloc_flags, available);
trace_reclaim_retry_zone(z, order, reclaimable,
available, min_wmark, *no_progress_loops, wmark);
if (wmark) {
ret = true;
break;
}
}
if (current->flags & PF_WQ_WORKER)
schedule_timeout_uninterruptible(1);
else
cond_resched();
out:
if (!ret)
return unreserve_highatomic_pageblock(ac, true);
return ret;
}
static inline bool
check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
{
if (cpusets_enabled() && ac->nodemask &&
!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
ac->nodemask = NULL;
return true;
}
if (read_mems_allowed_retry(cpuset_mems_cookie))
return true;
return false;
}
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct alloc_context *ac)
{
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
bool can_compact = can_direct_reclaim && gfp_compaction_allowed(gfp_mask);
bool nofail = gfp_mask & __GFP_NOFAIL;
const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
struct page *page = NULL;
unsigned int alloc_flags;
unsigned long did_some_progress;
enum compact_priority compact_priority;
enum compact_result compact_result;
int compaction_retries;
int no_progress_loops;
unsigned int cpuset_mems_cookie;
unsigned int zonelist_iter_cookie;
int reserve_flags;
bool compact_first = false;
bool can_retry_reserves = true;
if (unlikely(nofail)) {
WARN_ON_ONCE(!can_direct_reclaim);
WARN_ON_ONCE(current->flags & PF_MEMALLOC);
}
restart:
compaction_retries = 0;
no_progress_loops = 0;
compact_result = COMPACT_SKIPPED;
compact_priority = DEF_COMPACT_PRIORITY;
cpuset_mems_cookie = read_mems_allowed_begin();
zonelist_iter_cookie = zonelist_iter_begin();
if (can_compact && (costly_order || (order > 0 &&
ac->migratetype != MIGRATE_MOVABLE))) {
compact_first = true;
compact_priority = INIT_COMPACT_PRIORITY;
}
alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
if (!zonelist_zone(ac->preferred_zoneref))
goto nopage;
if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
struct zoneref *z = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx,
&cpuset_current_mems_allowed);
if (!zonelist_zone(z))
goto nopage;
}
retry:
if (alloc_flags & ALLOC_KSWAPD)
wake_all_kswapds(order, gfp_mask, ac);
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
if (page)
goto got_pg;
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
if (reserve_flags)
alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
(alloc_flags & ALLOC_KSWAPD);
if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
ac->nodemask = NULL;
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
if (can_retry_reserves) {
can_retry_reserves = false;
goto retry;
}
}
if (!can_direct_reclaim)
goto nopage;
if (current->flags & PF_MEMALLOC)
goto nopage;
if (!compact_first) {
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags,
ac, &did_some_progress);
if (page)
goto got_pg;
}
page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
compact_priority, &compact_result);
if (page)
goto got_pg;
if (compact_first) {
if (gfp_has_flags(gfp_mask, __GFP_NORETRY | __GFP_THISNODE))
goto nopage;
if (!(gfp_mask & __GFP_NORETRY))
compact_priority = DEF_COMPACT_PRIORITY;
compact_first = false;
goto retry;
}
if (gfp_mask & __GFP_NORETRY)
goto nopage;
if (costly_order && (!can_compact ||
!(gfp_mask & __GFP_RETRY_MAYFAIL)))
goto nopage;
if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
check_retry_zonelist(zonelist_iter_cookie))
goto restart;
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
did_some_progress > 0, &no_progress_loops))
goto retry;
if (did_some_progress > 0 && can_compact &&
should_compact_retry(ac, order, alloc_flags,
compact_result, &compact_priority,
&compaction_retries))
goto retry;
if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
check_retry_zonelist(zonelist_iter_cookie))
goto restart;
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
if (page)
goto got_pg;
if (tsk_is_oom_victim(current) &&
(alloc_flags & ALLOC_OOM ||
(gfp_mask & __GFP_NOMEMALLOC)))
goto nopage;
if (did_some_progress) {
no_progress_loops = 0;
goto retry;
}
nopage:
if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
check_retry_zonelist(zonelist_iter_cookie))
goto restart;
if (unlikely(nofail)) {
if (!can_direct_reclaim)
goto fail;
page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
if (page)
goto got_pg;
cond_resched();
goto retry;
}
fail:
warn_alloc(gfp_mask, ac->nodemask,
"page allocation failure: order:%u", order);
got_pg:
return page;
}
static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
int preferred_nid, nodemask_t *nodemask,
struct alloc_context *ac, gfp_t *alloc_gfp,
unsigned int *alloc_flags)
{
ac->highest_zoneidx = gfp_zone(gfp_mask);
ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
ac->nodemask = nodemask;
ac->migratetype = gfp_migratetype(gfp_mask);
if (cpusets_enabled()) {
*alloc_gfp |= __GFP_HARDWALL;
if (in_task() && !ac->nodemask)
ac->nodemask = &cpuset_current_mems_allowed;
else
*alloc_flags |= ALLOC_CPUSET;
}
might_alloc(gfp_mask);
if (!(*alloc_flags & ALLOC_TRYLOCK) &&
should_fail_alloc_page(gfp_mask, order))
return false;
*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
return true;
}
unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
struct page **page_array)
{
struct page *page;
unsigned long UP_flags;
struct zone *zone;
struct zoneref *z;
struct per_cpu_pages *pcp;
struct list_head *pcp_list;
struct alloc_context ac;
gfp_t alloc_gfp;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
int nr_populated = 0, nr_account = 0;
while (nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
if (unlikely(nr_pages <= 0))
goto out;
if (unlikely(nr_pages - nr_populated == 0))
goto out;
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
goto failed;
if (nr_pages - nr_populated == 1)
goto failed;
#ifdef CONFIG_PAGE_OWNER
if (static_branch_unlikely(&page_owner_inited))
goto failed;
#endif
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
goto out;
gfp = alloc_gfp;
z = ac.preferred_zoneref;
for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
unsigned long mark;
if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
!__cpuset_zone_allowed(zone, gfp)) {
continue;
}
if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
goto failed;
}
cond_accept_memory(zone, 0, alloc_flags);
retry_this_zone:
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
if (zone_watermark_fast(zone, 0, mark,
zonelist_zone_idx(ac.preferred_zoneref),
alloc_flags, gfp)) {
break;
}
if (cond_accept_memory(zone, 0, alloc_flags))
goto retry_this_zone;
if (deferred_pages_enabled()) {
if (_deferred_grow_zone(zone, 0))
goto retry_this_zone;
}
}
if (unlikely(!zone))
goto failed;
pcp = pcp_spin_trylock(zone->per_cpu_pageset, UP_flags);
if (!pcp)
goto failed;
pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
while (nr_populated < nr_pages) {
if (page_array[nr_populated]) {
nr_populated++;
continue;
}
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
pcp, pcp_list);
if (unlikely(!page)) {
if (!nr_account) {
pcp_spin_unlock(pcp, UP_flags);
goto failed;
}
break;
}
nr_account++;
prep_new_page(page, 0, gfp, 0);
set_page_refcounted(page);
page_array[nr_populated++] = page;
}
pcp_spin_unlock(pcp, UP_flags);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
out:
return nr_populated;
failed:
page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
if (page)
page_array[nr_populated++] = page;
goto out;
}
EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
int preferred_nid, nodemask_t *nodemask)
{
struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
gfp_t alloc_gfp;
struct alloc_context ac = { };
if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
return NULL;
gfp &= gfp_allowed_mask;
gfp = current_gfp_context(gfp);
alloc_gfp = gfp;
if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
&alloc_gfp, &alloc_flags))
return NULL;
alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
if (likely(page))
goto out;
alloc_gfp = gfp;
ac.spread_dirty_pages = false;
ac.nodemask = nodemask;
page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
out:
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
free_frozen_pages(page, order);
page = NULL;
}
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
kmsan_alloc_page(page, order, alloc_gfp);
return page;
}
EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
int preferred_nid, nodemask_t *nodemask)
{
struct page *page;
page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
if (page)
set_page_refcounted(page);
return page;
}
EXPORT_SYMBOL(__alloc_pages_noprof);
struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask)
{
struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
preferred_nid, nodemask);
return page_rmappable_folio(page);
}
EXPORT_SYMBOL(__folio_alloc_noprof);
unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
{
struct page *page;
page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
if (!page)
return 0;
return (unsigned long) page_address(page);
}
EXPORT_SYMBOL(get_free_pages_noprof);
unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
{
return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
}
EXPORT_SYMBOL(get_zeroed_page_noprof);
static void ___free_pages(struct page *page, unsigned int order,
fpi_t fpi_flags)
{
int head = PageHead(page);
struct alloc_tag *tag = pgalloc_tag_get(page);
if (put_page_testzero(page))
__free_frozen_pages(page, order, fpi_flags);
else if (!head) {
pgalloc_tag_sub_pages(tag, (1 << order) - 1);
while (order-- > 0) {
clear_page_tag_ref(page + (1 << order));
__free_frozen_pages(page + (1 << order), order,
fpi_flags);
}
}
}
void __free_pages(struct page *page, unsigned int order)
{
___free_pages(page, order, FPI_NONE);
}
EXPORT_SYMBOL(__free_pages);
void free_pages_nolock(struct page *page, unsigned int order)
{
___free_pages(page, order, FPI_TRYLOCK);
}
void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0) {
VM_BUG_ON(!virt_addr_valid((void *)addr));
__free_pages(virt_to_page((void *)addr), order);
}
}
EXPORT_SYMBOL(free_pages);
static void *make_alloc_exact(unsigned long addr, unsigned int order,
size_t size)
{
if (addr) {
unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
struct page *page = virt_to_page((void *)addr);
struct page *last = page + nr;
__split_page(page, order);
while (page < --last)
set_page_refcounted(last);
last = page + (1UL << order);
for (page += nr; page < last; page++)
__free_pages_ok(page, 0, FPI_TO_TAIL);
}
return (void *)addr;
}
void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
{
unsigned int order = get_order(size);
unsigned long addr;
if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
addr = get_free_pages_noprof(gfp_mask, order);
return make_alloc_exact(addr, order, size);
}
EXPORT_SYMBOL(alloc_pages_exact_noprof);
void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
{
unsigned int order = get_order(size);
struct page *p;
if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
p = alloc_pages_node_noprof(nid, gfp_mask, order);
if (!p)
return NULL;
return make_alloc_exact((unsigned long)page_address(p), order, size);
}
void free_pages_exact(void *virt, size_t size)
{
unsigned long addr = (unsigned long)virt;
unsigned long end = addr + PAGE_ALIGN(size);
while (addr < end) {
free_page(addr);
addr += PAGE_SIZE;
}
}
EXPORT_SYMBOL(free_pages_exact);
static unsigned long nr_free_zone_pages(int offset)
{
struct zoneref *z;
struct zone *zone;
unsigned long sum = 0;
struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
for_each_zone_zonelist(zone, z, zonelist, offset) {
unsigned long size = zone_managed_pages(zone);
unsigned long high = high_wmark_pages(zone);
if (size > high)
sum += size - high;
}
return sum;
}
unsigned long nr_free_buffer_pages(void)
{
return nr_free_zone_pages(gfp_zone(GFP_USER));
}
EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
zoneref->zone_idx = zone_idx(zone);
}
static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
{
struct zone *zone;
enum zone_type zone_type = MAX_NR_ZONES;
int nr_zones = 0;
do {
zone_type--;
zone = pgdat->node_zones + zone_type;
if (populated_zone(zone)) {
zoneref_set_zone(zone, &zonerefs[nr_zones++]);
check_highest_zone(zone_type);
}
} while (zone_type);
return nr_zones;
}
#ifdef CONFIG_NUMA
static int __parse_numa_zonelist_order(char *s)
{
if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
return -EINVAL;
}
return 0;
}
static char numa_zonelist_order[] = "Node";
#define NUMA_ZONELIST_ORDER_LEN 16
static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
if (write)
return __parse_numa_zonelist_order(buffer);
return proc_dostring(table, write, buffer, length, ppos);
}
static int node_load[MAX_NUMNODES];
int find_next_best_node(int node, nodemask_t *used_node_mask)
{
int n, val;
int min_val = INT_MAX;
int best_node = NUMA_NO_NODE;
if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
node_set(node, *used_node_mask);
return node;
}
for_each_node_state(n, N_MEMORY) {
if (node_isset(n, *used_node_mask))
continue;
val = node_distance(node, n);
val += (n < node);
if (!cpumask_empty(cpumask_of_node(n)))
val += PENALTY_FOR_NODE_WITH_CPUS;
val *= MAX_NUMNODES;
val += node_load[n];
if (val < min_val) {
min_val = val;
best_node = n;
}
}
if (best_node >= 0)
node_set(best_node, *used_node_mask);
return best_node;
}
static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
unsigned nr_nodes)
{
struct zoneref *zonerefs;
int i;
zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
for (i = 0; i < nr_nodes; i++) {
int nr_zones;
pg_data_t *node = NODE_DATA(node_order[i]);
nr_zones = build_zonerefs_node(node, zonerefs);
zonerefs += nr_zones;
}
zonerefs->zone = NULL;
zonerefs->zone_idx = 0;
}
static void build_thisnode_zonelists(pg_data_t *pgdat)
{
struct zoneref *zonerefs;
int nr_zones;
zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
nr_zones = build_zonerefs_node(pgdat, zonerefs);
zonerefs += nr_zones;
zonerefs->zone = NULL;
zonerefs->zone_idx = 0;
}
static void build_zonelists(pg_data_t *pgdat)
{
static int node_order[MAX_NUMNODES];
int node, nr_nodes = 0;
nodemask_t used_mask = NODE_MASK_NONE;
int local_node, prev_node;
local_node = pgdat->node_id;
prev_node = local_node;
memset(node_order, 0, sizeof(node_order));
while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
if (node_distance(local_node, node) !=
node_distance(local_node, prev_node))
node_load[node] += 1;
node_order[nr_nodes++] = node;
prev_node = node;
}
build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
build_thisnode_zonelists(pgdat);
pr_info("Fallback order for Node %d: ", local_node);
for (node = 0; node < nr_nodes; node++)
pr_cont("%d ", node_order[node]);
pr_cont("\n");
}
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node)
{
struct zoneref *z;
z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
gfp_zone(GFP_KERNEL),
NULL);
return zonelist_node_idx(z);
}
#endif
static void setup_min_unmapped_ratio(void);
static void setup_min_slab_ratio(void);
#else
static void build_zonelists(pg_data_t *pgdat)
{
struct zoneref *zonerefs;
int nr_zones;
zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
nr_zones = build_zonerefs_node(pgdat, zonerefs);
zonerefs += nr_zones;
zonerefs->zone = NULL;
zonerefs->zone_idx = 0;
}
#endif
static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
#define BOOT_PAGESET_HIGH 0
#define BOOT_PAGESET_BATCH 1
static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
static void __build_all_zonelists(void *data)
{
int nid;
int __maybe_unused cpu;
pg_data_t *self = data;
unsigned long flags;
write_seqlock_irqsave(&zonelist_update_seq, flags);
printk_deferred_enter();
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
#endif
if (self && !node_online(self->node_id)) {
build_zonelists(self);
} else {
for_each_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
build_zonelists(pgdat);
}
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
for_each_online_cpu(cpu)
set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
#endif
}
printk_deferred_exit();
write_sequnlock_irqrestore(&zonelist_update_seq, flags);
}
static noinline void __init
build_all_zonelists_init(void)
{
int cpu;
__build_all_zonelists(NULL);
for_each_possible_cpu(cpu)
per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
mminit_verify_zonelist();
cpuset_init_current_mems_allowed();
}
void __ref build_all_zonelists(pg_data_t *pgdat)
{
unsigned long vm_total_pages;
if (system_state == SYSTEM_BOOTING) {
build_all_zonelists_init();
} else {
__build_all_zonelists(pgdat);
}
vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
page_group_by_mobility_disabled = 1;
else
page_group_by_mobility_disabled = 0;
pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
nr_online_nodes,
str_off_on(page_group_by_mobility_disabled),
vm_total_pages);
#ifdef CONFIG_NUMA
pr_info("Policy zone: %s\n", zone_names[policy_zone]);
#endif
}
static int zone_batchsize(struct zone *zone)
{
#ifdef CONFIG_MMU
int batch;
batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE);
if (batch <= 1)
return 1;
batch = rounddown_pow_of_two(batch + batch/2) - 1;
return batch;
#else
return 1;
#endif
}
static int percpu_pagelist_high_fraction;
static int zone_highsize(struct zone *zone, int batch, int cpu_online,
int high_fraction)
{
#ifdef CONFIG_MMU
int high;
int nr_split_cpus;
unsigned long total_pages;
if (!high_fraction) {
total_pages = low_wmark_pages(zone);
} else {
total_pages = zone_managed_pages(zone) / high_fraction;
}
nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
if (!nr_split_cpus)
nr_split_cpus = num_online_cpus();
high = total_pages / nr_split_cpus;
high = max(high, batch << 2);
return high;
#else
return 0;
#endif
}
static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
unsigned long high_max, unsigned long batch)
{
WRITE_ONCE(pcp->batch, batch);
WRITE_ONCE(pcp->high_min, high_min);
WRITE_ONCE(pcp->high_max, high_max);
}
static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
{
int pindex;
memset(pcp, 0, sizeof(*pcp));
memset(pzstats, 0, sizeof(*pzstats));
spin_lock_init(&pcp->lock);
for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
INIT_LIST_HEAD(&pcp->lists[pindex]);
pcp->high_min = BOOT_PAGESET_HIGH;
pcp->high_max = BOOT_PAGESET_HIGH;
pcp->batch = BOOT_PAGESET_BATCH;
}
static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
unsigned long high_max, unsigned long batch)
{
struct per_cpu_pages *pcp;
int cpu;
for_each_possible_cpu(cpu) {
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
pageset_update(pcp, high_min, high_max, batch);
}
}
static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
{
int new_high_min, new_high_max, new_batch;
new_batch = zone_batchsize(zone);
if (percpu_pagelist_high_fraction) {
new_high_min = zone_highsize(zone, new_batch, cpu_online,
percpu_pagelist_high_fraction);
new_high_max = new_high_min;
} else {
new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
new_high_max = zone_highsize(zone, new_batch, cpu_online,
MIN_PERCPU_PAGELIST_HIGH_FRACTION);
}
if (zone->pageset_high_min == new_high_min &&
zone->pageset_high_max == new_high_max &&
zone->pageset_batch == new_batch)
return;
zone->pageset_high_min = new_high_min;
zone->pageset_high_max = new_high_max;
zone->pageset_batch = new_batch;
__zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
new_batch);
}
void __meminit setup_zone_pageset(struct zone *zone)
{
int cpu;
if (sizeof(struct per_cpu_zonestat) > 0)
zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
for_each_possible_cpu(cpu) {
struct per_cpu_pages *pcp;
struct per_cpu_zonestat *pzstats;
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
per_cpu_pages_init(pcp, pzstats);
}
zone_set_pageset_high_and_batch(zone, 0);
}
static void zone_pcp_update(struct zone *zone, int cpu_online)
{
mutex_lock(&pcp_batch_high_lock);
zone_set_pageset_high_and_batch(zone, cpu_online);
mutex_unlock(&pcp_batch_high_lock);
}
static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
{
struct per_cpu_pages *pcp;
struct cpu_cacheinfo *cci;
unsigned long UP_flags;
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
cci = get_cpu_cacheinfo(cpu);
pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
pcp->flags |= PCPF_FREE_HIGH_BATCH;
else
pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
}
void setup_pcp_cacheinfo(unsigned int cpu)
{
struct zone *zone;
for_each_populated_zone(zone)
zone_pcp_update_cacheinfo(zone, cpu);
}
void __init setup_per_cpu_pageset(void)
{
struct pglist_data *pgdat;
struct zone *zone;
int __maybe_unused cpu;
for_each_populated_zone(zone)
setup_zone_pageset(zone);
#ifdef CONFIG_NUMA
for_each_possible_cpu(cpu) {
struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
memset(pzstats->vm_numa_event, 0,
sizeof(pzstats->vm_numa_event));
}
#endif
for_each_online_pgdat(pgdat)
pgdat->per_cpu_nodestats =
alloc_percpu(struct per_cpu_nodestat);
}
__meminit void zone_pcp_init(struct zone *zone)
{
zone->per_cpu_pageset = &boot_pageset;
zone->per_cpu_zonestats = &boot_zonestats;
zone->pageset_high_min = BOOT_PAGESET_HIGH;
zone->pageset_high_max = BOOT_PAGESET_HIGH;
zone->pageset_batch = BOOT_PAGESET_BATCH;
if (populated_zone(zone))
pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
zone->present_pages, zone_batchsize(zone));
}
static void setup_per_zone_lowmem_reserve(void);
void adjust_managed_page_count(struct page *page, long count)
{
atomic_long_add(count, &page_zone(page)->managed_pages);
totalram_pages_add(count);
setup_per_zone_lowmem_reserve();
}
EXPORT_SYMBOL(adjust_managed_page_count);
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
{
void *pos;
unsigned long pages = 0;
start = (void *)PAGE_ALIGN((unsigned long)start);
end = (void *)((unsigned long)end & PAGE_MASK);
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
struct page *page = virt_to_page(pos);
void *direct_map_addr;
direct_map_addr = page_address(page);
direct_map_addr = kasan_reset_tag(direct_map_addr);
if ((unsigned int)poison <= 0xFF)
memset(direct_map_addr, poison, PAGE_SIZE);
free_reserved_page(page);
}
if (pages && s)
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
return pages;
}
void free_reserved_page(struct page *page)
{
clear_page_tag_ref(page);
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
adjust_managed_page_count(page, 1);
}
EXPORT_SYMBOL(free_reserved_page);
static int page_alloc_cpu_dead(unsigned int cpu)
{
struct zone *zone;
lru_add_drain_cpu(cpu);
mlock_drain_remote(cpu);
drain_pages(cpu);
vm_events_fold_cpu(cpu);
cpu_vm_stats_fold(cpu);
for_each_populated_zone(zone)
zone_pcp_update(zone, 0);
return 0;
}
static int page_alloc_cpu_online(unsigned int cpu)
{
struct zone *zone;
for_each_populated_zone(zone)
zone_pcp_update(zone, 1);
return 0;
}
void __init page_alloc_init_cpuhp(void)
{
int ret;
ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
"mm/page_alloc:pcp",
page_alloc_cpu_online,
page_alloc_cpu_dead);
WARN_ON(ret < 0);
}
static void calculate_totalreserve_pages(void)
{
struct pglist_data *pgdat;
unsigned long reserve_pages = 0;
enum zone_type i, j;
for_each_online_pgdat(pgdat) {
pgdat->totalreserve_pages = 0;
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
long max = 0;
unsigned long managed_pages = zone_managed_pages(zone);
for (j = MAX_NR_ZONES - 1; j > i; j--) {
if (!zone->lowmem_reserve[j])
continue;
max = zone->lowmem_reserve[j];
break;
}
max += high_wmark_pages(zone);
max = min_t(unsigned long, max, managed_pages);
pgdat->totalreserve_pages += max;
reserve_pages += max;
}
}
totalreserve_pages = reserve_pages;
trace_mm_calculate_totalreserve_pages(totalreserve_pages);
}
static void setup_per_zone_lowmem_reserve(void)
{
struct pglist_data *pgdat;
enum zone_type i, j;
for_each_online_pgdat(pgdat) {
for (i = 0; i < MAX_NR_ZONES - 1; i++) {
struct zone *zone = &pgdat->node_zones[i];
int ratio = sysctl_lowmem_reserve_ratio[i];
bool clear = !ratio || !zone_managed_pages(zone);
unsigned long managed_pages = 0;
for (j = i + 1; j < MAX_NR_ZONES; j++) {
struct zone *upper_zone = &pgdat->node_zones[j];
managed_pages += zone_managed_pages(upper_zone);
if (clear)
zone->lowmem_reserve[j] = 0;
else
zone->lowmem_reserve[j] = managed_pages / ratio;
trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
zone->lowmem_reserve[j]);
}
}
}
calculate_totalreserve_pages();
}
static void __setup_per_zone_wmarks(void)
{
unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
struct zone *zone;
unsigned long flags;
for_each_zone(zone) {
if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
lowmem_pages += zone_managed_pages(zone);
}
for_each_zone(zone) {
u64 tmp;
spin_lock_irqsave(&zone->lock, flags);
tmp = (u64)pages_min * zone_managed_pages(zone);
tmp = div64_ul(tmp, lowmem_pages);
if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
unsigned long min_pages;
min_pages = zone_managed_pages(zone) / 1024;
min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
zone->_watermark[WMARK_MIN] = min_pages;
} else {
zone->_watermark[WMARK_MIN] = tmp;
}
tmp = max_t(u64, tmp >> 2,
mult_frac(zone_managed_pages(zone),
watermark_scale_factor, 10000));
zone->watermark_boost = 0;
zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
trace_mm_setup_per_zone_wmarks(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
calculate_totalreserve_pages();
}
void setup_per_zone_wmarks(void)
{
struct zone *zone;
static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
__setup_per_zone_wmarks();
spin_unlock(&lock);
for_each_zone(zone)
zone_pcp_update(zone, 0);
}
void calculate_min_free_kbytes(void)
{
unsigned long lowmem_kbytes;
int new_min_free_kbytes;
lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
if (new_min_free_kbytes > user_min_free_kbytes)
min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
else
pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
new_min_free_kbytes, user_min_free_kbytes);
}
int __meminit init_per_zone_wmark_min(void)
{
calculate_min_free_kbytes();
setup_per_zone_wmarks();
refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve();
#ifdef CONFIG_NUMA
setup_min_unmapped_ratio();
setup_min_slab_ratio();
#endif
khugepaged_min_free_kbytes_update();
return 0;
}
postcore_initcall(init_per_zone_wmark_min)
static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
if (write) {
user_min_free_kbytes = min_free_kbytes;
setup_per_zone_wmarks();
}
return 0;
}
static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
if (write)
setup_per_zone_wmarks();
return 0;
}
#ifdef CONFIG_NUMA
static void setup_min_unmapped_ratio(void)
{
pg_data_t *pgdat;
struct zone *zone;
for_each_online_pgdat(pgdat)
pgdat->min_unmapped_pages = 0;
for_each_zone(zone)
zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
sysctl_min_unmapped_ratio) / 100;
}
static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
setup_min_unmapped_ratio();
return 0;
}
static void setup_min_slab_ratio(void)
{
pg_data_t *pgdat;
struct zone *zone;
for_each_online_pgdat(pgdat)
pgdat->min_slab_pages = 0;
for_each_zone(zone)
zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
sysctl_min_slab_ratio) / 100;
}
static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
setup_min_slab_ratio();
return 0;
}
#endif
static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
int write, void *buffer, size_t *length, loff_t *ppos)
{
int i;
proc_dointvec_minmax(table, write, buffer, length, ppos);
for (i = 0; i < MAX_NR_ZONES; i++) {
if (sysctl_lowmem_reserve_ratio[i] < 1)
sysctl_lowmem_reserve_ratio[i] = 0;
}
setup_per_zone_lowmem_reserve();
return 0;
}
static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
int write, void *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int old_percpu_pagelist_high_fraction;
int ret;
if (!write)
return proc_dointvec_minmax(table, write, buffer, length, ppos);
mutex_lock(&pcp_batch_high_lock);
old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (ret < 0)
goto out;
if (percpu_pagelist_high_fraction &&
percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
ret = -EINVAL;
goto out;
}
if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
goto out;
for_each_populated_zone(zone)
zone_set_pageset_high_and_batch(zone, 0);
out:
mutex_unlock(&pcp_batch_high_lock);
return ret;
}
static const struct ctl_table page_alloc_sysctl_table[] = {
{
.procname = "min_free_kbytes",
.data = &min_free_kbytes,
.maxlen = sizeof(min_free_kbytes),
.mode = 0644,
.proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "watermark_boost_factor",
.data = &watermark_boost_factor,
.maxlen = sizeof(watermark_boost_factor),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "watermark_scale_factor",
.data = &watermark_scale_factor,
.maxlen = sizeof(watermark_scale_factor),
.mode = 0644,
.proc_handler = watermark_scale_factor_sysctl_handler,
.extra1 = SYSCTL_ONE,
.extra2 = SYSCTL_THREE_THOUSAND,
},
{
.procname = "defrag_mode",
.data = &defrag_mode,
.maxlen = sizeof(defrag_mode),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "percpu_pagelist_high_fraction",
.data = &percpu_pagelist_high_fraction,
.maxlen = sizeof(percpu_pagelist_high_fraction),
.mode = 0644,
.proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "lowmem_reserve_ratio",
.data = &sysctl_lowmem_reserve_ratio,
.maxlen = sizeof(sysctl_lowmem_reserve_ratio),
.mode = 0644,
.proc_handler = lowmem_reserve_ratio_sysctl_handler,
},
#ifdef CONFIG_NUMA
{
.procname = "numa_zonelist_order",
.data = &numa_zonelist_order,
.maxlen = NUMA_ZONELIST_ORDER_LEN,
.mode = 0644,
.proc_handler = numa_zonelist_order_handler,
},
{
.procname = "min_unmapped_ratio",
.data = &sysctl_min_unmapped_ratio,
.maxlen = sizeof(sysctl_min_unmapped_ratio),
.mode = 0644,
.proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE_HUNDRED,
},
{
.procname = "min_slab_ratio",
.data = &sysctl_min_slab_ratio,
.maxlen = sizeof(sysctl_min_slab_ratio),
.mode = 0644,
.proc_handler = sysctl_min_slab_ratio_sysctl_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE_HUNDRED,
},
#endif
};
void __init page_alloc_sysctl_init(void)
{
register_sysctl_init("vm", page_alloc_sysctl_table);
}
#ifdef CONFIG_CONTIG_ALLOC
static void alloc_contig_dump_pages(struct list_head *page_list)
{
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
struct page *page;
dump_stack();
list_for_each_entry(page, page_list, lru)
dump_page(page, "migration failure");
}
}
static int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned long start, unsigned long end)
{
unsigned int nr_reclaimed;
unsigned long pfn = start;
unsigned int tries = 0;
int ret = 0;
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
.gfp_mask = cc->gfp_mask,
.reason = MR_CONTIG_RANGE,
};
lru_cache_disable();
while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
}
if (list_empty(&cc->migratepages)) {
cc->nr_migratepages = 0;
ret = isolate_migratepages_range(cc, pfn, end);
if (ret && ret != -EAGAIN)
break;
pfn = cc->migrate_pfn;
tries = 0;
} else if (++tries == 5) {
ret = -EBUSY;
break;
}
nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
&cc->migratepages);
cc->nr_migratepages -= nr_reclaimed;
ret = migrate_pages(&cc->migratepages, alloc_migration_target,
NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
if (ret == -ENOMEM)
break;
}
lru_cache_enable();
if (ret < 0) {
if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
alloc_contig_dump_pages(&cc->migratepages);
putback_movable_pages(&cc->migratepages);
}
return (ret < 0) ? ret : 0;
}
static void split_free_frozen_pages(struct list_head *list, gfp_t gfp_mask)
{
int order;
for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page, *next;
int nr_pages = 1 << order;
list_for_each_entry_safe(page, next, &list[order], lru) {
int i;
post_alloc_hook(page, order, gfp_mask);
if (!order)
continue;
__split_page(page, order);
list_del(&page->lru);
for (i = 0; i < nr_pages; i++)
list_add_tail(&page[i].lru, &list[0]);
}
}
}
static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
{
const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
__GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO |
__GFP_SKIP_KASAN;
const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
__GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
if (gfp_mask & ~(reclaim_mask | action_mask))
return -EINVAL;
*gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
__GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
return 0;
}
static void __free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
{
for (; nr_pages--; pfn++)
free_frozen_pages(pfn_to_page(pfn), 0);
}
int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
acr_flags_t alloc_flags, gfp_t gfp_mask)
{
const unsigned int order = ilog2(end - start);
unsigned long outer_start, outer_end;
int ret = 0;
struct compact_control cc = {
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
.no_set_skip_hint = true,
.alloc_contig = true,
};
INIT_LIST_HEAD(&cc.migratepages);
enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ?
PB_ISOLATE_MODE_CMA_ALLOC :
PB_ISOLATE_MODE_OTHER;
if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER))
return -EINVAL;
gfp_mask = current_gfp_context(gfp_mask);
if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
return -EINVAL;
ret = start_isolate_page_range(start, end, mode);
if (ret)
goto done;
drain_all_pages(cc.zone);
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret && ret != -EBUSY)
goto done;
ret = replace_free_hugepage_folios(start, end);
if (ret)
goto done;
outer_start = find_large_buddy(start);
if (test_pages_isolated(outer_start, end, mode)) {
ret = -EBUSY;
goto done;
}
outer_end = isolate_freepages_range(&cc, outer_start, end);
if (!outer_end) {
ret = -EBUSY;
goto done;
}
if (!(gfp_mask & __GFP_COMP)) {
split_free_frozen_pages(cc.freepages, gfp_mask);
if (start != outer_start)
__free_contig_frozen_range(outer_start, start - outer_start);
if (end != outer_end)
__free_contig_frozen_range(end, outer_end - end);
} else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
struct page *head = pfn_to_page(start);
check_new_pages(head, order);
prep_new_page(head, order, gfp_mask, 0);
} else {
ret = -EINVAL;
WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
start, end, outer_start, outer_end);
}
done:
undo_isolate_page_range(start, end);
return ret;
}
EXPORT_SYMBOL(alloc_contig_frozen_range_noprof);
int alloc_contig_range_noprof(unsigned long start, unsigned long end,
acr_flags_t alloc_flags, gfp_t gfp_mask)
{
int ret;
if (WARN_ON(gfp_mask & __GFP_COMP))
return -EINVAL;
ret = alloc_contig_frozen_range_noprof(start, end, alloc_flags, gfp_mask);
if (!ret)
set_pages_refcounted(pfn_to_page(start), end - start);
return ret;
}
EXPORT_SYMBOL(alloc_contig_range_noprof);
static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
unsigned long nr_pages, bool skip_hugetlb,
bool *skipped_hugetlb)
{
unsigned long end_pfn = start_pfn + nr_pages;
struct page *page;
while (start_pfn < end_pfn) {
unsigned long step = 1;
page = pfn_to_online_page(start_pfn);
if (!page)
return false;
if (page_zone(page) != z)
return false;
if (page_is_unmovable(z, page, PB_ISOLATE_MODE_OTHER, &step))
return false;
if (PageHuge(page)) {
unsigned int order;
if (skip_hugetlb) {
*skipped_hugetlb = true;
return false;
}
page = compound_head(page);
order = compound_order(page);
if ((order >= MAX_FOLIO_ORDER) ||
(nr_pages <= (1 << order)))
return false;
}
start_pfn += step;
}
return true;
}
static bool zone_spans_last_pfn(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
unsigned long last_pfn = start_pfn + nr_pages - 1;
return zone_spans_pfn(zone, last_pfn);
}
struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
gfp_t gfp_mask, int nid, nodemask_t *nodemask)
{
unsigned long ret, pfn, flags;
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
bool skip_hugetlb = true;
bool skipped_hugetlb = false;
retry:
zonelist = node_zonelist(nid, gfp_mask);
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(gfp_mask), nodemask) {
spin_lock_irqsave(&zone->lock, flags);
pfn = ALIGN(zone->zone_start_pfn, nr_pages);
while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
if (pfn_range_valid_contig(zone, pfn, nr_pages,
skip_hugetlb,
&skipped_hugetlb)) {
spin_unlock_irqrestore(&zone->lock, flags);
ret = alloc_contig_frozen_range_noprof(pfn,
pfn + nr_pages,
ACR_FLAGS_NONE,
gfp_mask);
if (!ret)
return pfn_to_page(pfn);
spin_lock_irqsave(&zone->lock, flags);
}
pfn += nr_pages;
}
spin_unlock_irqrestore(&zone->lock, flags);
}
if (skip_hugetlb && skipped_hugetlb) {
skip_hugetlb = false;
goto retry;
}
return NULL;
}
EXPORT_SYMBOL(alloc_contig_frozen_pages_noprof);
struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
struct page *page;
if (WARN_ON(gfp_mask & __GFP_COMP))
return NULL;
page = alloc_contig_frozen_pages_noprof(nr_pages, gfp_mask, nid,
nodemask);
if (page)
set_pages_refcounted(page, nr_pages);
return page;
}
EXPORT_SYMBOL(alloc_contig_pages_noprof);
void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
{
struct page *first_page = pfn_to_page(pfn);
const unsigned int order = ilog2(nr_pages);
if (WARN_ON_ONCE(first_page != compound_head(first_page)))
return;
if (PageHead(first_page)) {
WARN_ON_ONCE(order != compound_order(first_page));
free_frozen_pages(first_page, order);
return;
}
__free_contig_frozen_range(pfn, nr_pages);
}
EXPORT_SYMBOL(free_contig_frozen_range);
void free_contig_range(unsigned long pfn, unsigned long nr_pages)
{
if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn))))
return;
for (; nr_pages--; pfn++)
__free_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL(free_contig_range);
#endif
void zone_pcp_disable(struct zone *zone)
{
mutex_lock(&pcp_batch_high_lock);
__zone_set_pageset_high_and_batch(zone, 0, 0, 1);
__drain_all_pages(zone, true);
}
void zone_pcp_enable(struct zone *zone)
{
__zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
zone->pageset_high_max, zone->pageset_batch);
mutex_unlock(&pcp_batch_high_lock);
}
void zone_pcp_reset(struct zone *zone)
{
int cpu;
struct per_cpu_zonestat *pzstats;
if (zone->per_cpu_pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
drain_zonestat(zone, pzstats);
}
free_percpu(zone->per_cpu_pageset);
zone->per_cpu_pageset = &boot_pageset;
if (zone->per_cpu_zonestats != &boot_zonestats) {
free_percpu(zone->per_cpu_zonestats);
zone->per_cpu_zonestats = &boot_zonestats;
}
}
}
#ifdef CONFIG_MEMORY_HOTREMOVE
unsigned long __offline_isolated_pages(unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long already_offline = 0, flags;
unsigned long pfn = start_pfn;
struct page *page;
struct zone *zone;
unsigned int order;
offline_mem_sections(pfn, end_pfn);
zone = page_zone(pfn_to_page(pfn));
spin_lock_irqsave(&zone->lock, flags);
while (pfn < end_pfn) {
page = pfn_to_page(pfn);
if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
pfn++;
continue;
}
if (PageOffline(page)) {
BUG_ON(page_count(page));
BUG_ON(PageBuddy(page));
already_offline++;
pfn++;
continue;
}
BUG_ON(page_count(page));
BUG_ON(!PageBuddy(page));
VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
order = buddy_order(page);
del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
pfn += (1 << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
return end_pfn - start_pfn - already_offline;
}
#endif
bool is_free_buddy_page(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
unsigned int order;
for (order = 0; order < NR_PAGE_ORDERS; order++) {
const struct page *head = page - (pfn & ((1 << order) - 1));
if (PageBuddy(head) &&
buddy_order_unsafe(head) >= order)
break;
}
return order <= MAX_PAGE_ORDER;
}
EXPORT_SYMBOL(is_free_buddy_page);
#ifdef CONFIG_MEMORY_FAILURE
static inline void add_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype,
bool tail)
{
__add_to_free_list(page, zone, order, migratetype, tail);
account_freepages(zone, 1 << order, migratetype);
}
static void break_down_buddy_pages(struct zone *zone, struct page *page,
struct page *target, int low, int high,
int migratetype)
{
unsigned long size = 1 << high;
struct page *current_buddy;
while (high > low) {
high--;
size >>= 1;
if (target >= &page[size]) {
current_buddy = page;
page = page + size;
} else {
current_buddy = page + size;
}
if (set_page_guard(zone, current_buddy, high))
continue;
add_to_free_list(current_buddy, zone, high, migratetype, false);
set_buddy_order(current_buddy, high);
}
}
bool take_page_off_buddy(struct page *page)
{
struct zone *zone = page_zone(page);
unsigned long pfn = page_to_pfn(page);
unsigned long flags;
unsigned int order;
bool ret = false;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
int page_order = buddy_order(page_head);
if (PageBuddy(page_head) && page_order >= order) {
unsigned long pfn_head = page_to_pfn(page_head);
int migratetype = get_pfnblock_migratetype(page_head,
pfn_head);
del_page_from_free_list(page_head, zone, page_order,
migratetype);
break_down_buddy_pages(zone, page_head, page, 0,
page_order, migratetype);
SetPageHWPoisonTakenOff(page);
ret = true;
break;
}
if (page_count(page_head) > 0)
break;
}
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
}
bool put_page_back_buddy(struct page *page)
{
struct zone *zone = page_zone(page);
unsigned long flags;
bool ret = false;
spin_lock_irqsave(&zone->lock, flags);
if (put_page_testzero(page)) {
unsigned long pfn = page_to_pfn(page);
int migratetype = get_pfnblock_migratetype(page, pfn);
ClearPageHWPoisonTakenOff(page);
__free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
if (TestClearPageHWPoison(page)) {
ret = true;
}
}
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
}
#endif
bool has_managed_zone(enum zone_type zone)
{
struct pglist_data *pgdat;
for_each_online_pgdat(pgdat) {
if (managed_zone(&pgdat->node_zones[zone]))
return true;
}
return false;
}
#ifdef CONFIG_UNACCEPTED_MEMORY
static bool lazy_accept = true;
static int __init accept_memory_parse(char *p)
{
if (!strcmp(p, "lazy")) {
lazy_accept = true;
return 0;
} else if (!strcmp(p, "eager")) {
lazy_accept = false;
return 0;
} else {
return -EINVAL;
}
}
early_param("accept_memory", accept_memory_parse);
static bool page_contains_unaccepted(struct page *page, unsigned int order)
{
phys_addr_t start = page_to_phys(page);
return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
}
static void __accept_page(struct zone *zone, unsigned long *flags,
struct page *page)
{
list_del(&page->lru);
account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
__ClearPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, *flags);
accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
__free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
}
void accept_page(struct page *page)
{
struct zone *zone = page_zone(page);
unsigned long flags;
spin_lock_irqsave(&zone->lock, flags);
if (!PageUnaccepted(page)) {
spin_unlock_irqrestore(&zone->lock, flags);
return;
}
__accept_page(zone, &flags, page);
}
static bool try_to_accept_memory_one(struct zone *zone)
{
unsigned long flags;
struct page *page;
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
struct page, lru);
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
return false;
}
__accept_page(zone, &flags, page);
return true;
}
static bool cond_accept_memory(struct zone *zone, unsigned int order,
int alloc_flags)
{
long to_accept, wmark;
bool ret = false;
if (list_empty(&zone->unaccepted_pages))
return false;
if (alloc_flags & ALLOC_TRYLOCK)
return false;
wmark = promo_wmark_pages(zone);
if (!wmark)
return try_to_accept_memory_one(zone);
to_accept = wmark -
(zone_page_state(zone, NR_FREE_PAGES) -
__zone_watermark_unusable_free(zone, order, 0) -
zone_page_state(zone, NR_UNACCEPTED));
while (to_accept > 0) {
if (!try_to_accept_memory_one(zone))
break;
ret = true;
to_accept -= MAX_ORDER_NR_PAGES;
}
return ret;
}
static bool __free_unaccepted(struct page *page)
{
struct zone *zone = page_zone(page);
unsigned long flags;
if (!lazy_accept)
return false;
spin_lock_irqsave(&zone->lock, flags);
list_add_tail(&page->lru, &zone->unaccepted_pages);
account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
__SetPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags);
return true;
}
#else
static bool page_contains_unaccepted(struct page *page, unsigned int order)
{
return false;
}
static bool cond_accept_memory(struct zone *zone, unsigned int order,
int alloc_flags)
{
return false;
}
static bool __free_unaccepted(struct page *page)
{
BUILD_BUG();
return false;
}
#endif
struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
{
gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP
| gfp_flags;
unsigned int alloc_flags = ALLOC_TRYLOCK;
struct alloc_context ac = { };
struct page *page;
VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT);
if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
return NULL;
if (!pcp_allowed_order(order))
return NULL;
if (deferred_pages_enabled())
return NULL;
if (nid == NUMA_NO_NODE)
nid = numa_node_id();
prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac,
&alloc_gfp, &alloc_flags);
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) &&
unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
__free_frozen_pages(page, order, FPI_TRYLOCK);
page = NULL;
}
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
kmsan_alloc_page(page, order, alloc_gfp);
return page;
}
struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
{
struct page *page;
page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order);
if (page)
set_page_refcounted(page);
return page;
}
EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof);