#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mm_inline.h>
#include <linux/percpu_counter.h>
#include <linux/memremap.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
#include <linux/local_lock.h>
#include <linux/buffer_head.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/pagemap.h>
int page_cluster;
static const int page_cluster_max = 31;
struct cpu_fbatches {
local_lock_t lock;
struct folio_batch lru_add;
struct folio_batch lru_deactivate_file;
struct folio_batch lru_deactivate;
struct folio_batch lru_lazyfree;
#ifdef CONFIG_SMP
struct folio_batch lru_activate;
#endif
local_lock_t lock_irq;
struct folio_batch lru_move_tail;
};
static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
.lock_irq = INIT_LOCAL_LOCK(lock_irq),
};
static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
unsigned long *flagsp)
{
if (folio_test_lru(folio)) {
folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
lruvec_del_folio(*lruvecp, folio);
__folio_clear_lru_flags(folio);
}
}
static void page_cache_release(struct folio *folio)
{
struct lruvec *lruvec = NULL;
unsigned long flags;
__page_cache_release(folio, &lruvec, &flags);
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
}
void __folio_put(struct folio *folio)
{
if (unlikely(folio_is_zone_device(folio))) {
free_zone_device_folio(folio);
return;
}
if (folio_test_hugetlb(folio)) {
free_huge_folio(folio);
return;
}
page_cache_release(folio);
folio_unqueue_deferred_split(folio);
mem_cgroup_uncharge(folio);
free_frozen_pages(&folio->page, folio_order(folio));
}
EXPORT_SYMBOL(__folio_put);
typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
static void lru_add(struct lruvec *lruvec, struct folio *folio)
{
int was_unevictable = folio_test_clear_unevictable(folio);
long nr_pages = folio_nr_pages(folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
if (folio_evictable(folio)) {
if (was_unevictable)
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
folio_clear_active(folio);
folio_set_unevictable(folio);
folio->mlock_count = 0;
if (!was_unevictable)
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
}
lruvec_add_folio(lruvec, folio);
trace_mm_lru_insertion(folio);
}
static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
{
int i;
struct lruvec *lruvec = NULL;
unsigned long flags = 0;
for (i = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i];
if (move_fn != lru_add && !folio_test_clear_lru(folio))
continue;
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio);
folio_set_lru(folio);
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
folios_put(fbatch);
}
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
struct folio *folio, move_fn_t move_fn, bool disable_irq)
{
unsigned long flags;
folio_get(folio);
if (disable_irq)
local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
else
local_lock(&cpu_fbatches.lock);
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
!folio_may_be_lru_cached(folio) || lru_cache_disabled())
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
if (disable_irq)
local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
else
local_unlock(&cpu_fbatches.lock);
}
#define folio_batch_add_and_move(folio, op) \
__folio_batch_add_and_move( \
&cpu_fbatches.op, \
folio, \
op, \
offsetof(struct cpu_fbatches, op) >= \
offsetof(struct cpu_fbatches, lock_irq) \
)
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
{
if (folio_test_unevictable(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
lruvec_add_folio_tail(lruvec, folio);
__count_vm_events(PGROTATED, folio_nr_pages(folio));
}
void folio_rotate_reclaimable(struct folio *folio)
{
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
folio_test_unevictable(folio) || !folio_test_lru(folio))
return;
folio_batch_add_and_move(folio, lru_move_tail);
}
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
unsigned int nr_io, unsigned int nr_rotated)
__releases(lruvec->lru_lock)
{
unsigned long cost;
cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated;
if (!cost) {
spin_unlock_irq(&lruvec->lru_lock);
return;
}
for (;;) {
unsigned long lrusize;
if (file)
lruvec->file_cost += cost;
else
lruvec->anon_cost += cost;
lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
lruvec_page_state(lruvec, NR_ACTIVE_FILE);
if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
lruvec->file_cost /= 2;
lruvec->anon_cost /= 2;
}
spin_unlock_irq(&lruvec->lru_lock);
lruvec = parent_lruvec(lruvec);
if (!lruvec)
break;
spin_lock_irq(&lruvec->lru_lock);
}
}
void lru_note_cost_refault(struct folio *folio)
{
struct lruvec *lruvec;
lruvec = folio_lruvec_lock_irq(folio);
lru_note_cost_unlock_irq(lruvec, folio_is_file_lru(folio),
folio_nr_pages(folio), 0);
}
static void lru_activate(struct lruvec *lruvec, struct folio *folio)
{
long nr_pages = folio_nr_pages(folio);
if (folio_test_active(folio) || folio_test_unevictable(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_set_active(folio);
lruvec_add_folio(lruvec, folio);
trace_mm_lru_activate(folio);
__count_vm_events(PGACTIVATE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
}
#ifdef CONFIG_SMP
static void folio_activate_drain(int cpu)
{
struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_activate);
}
void folio_activate(struct folio *folio)
{
if (folio_test_active(folio) || folio_test_unevictable(folio) ||
!folio_test_lru(folio))
return;
folio_batch_add_and_move(folio, lru_activate);
}
#else
static inline void folio_activate_drain(int cpu)
{
}
void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
if (!folio_test_clear_lru(folio))
return;
lruvec = folio_lruvec_lock_irq(folio);
lru_activate(lruvec, folio);
unlock_page_lruvec_irq(lruvec);
folio_set_lru(folio);
}
#endif
static void __lru_cache_activate_folio(struct folio *folio)
{
struct folio_batch *fbatch;
int i;
local_lock(&cpu_fbatches.lock);
fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
struct folio *batch_folio = fbatch->folios[i];
if (batch_folio == folio) {
folio_set_active(folio);
break;
}
}
local_unlock(&cpu_fbatches.lock);
}
#ifdef CONFIG_LRU_GEN
static void lru_gen_inc_refs(struct folio *folio)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
if (folio_test_unevictable(folio))
return;
if (!folio_test_referenced(folio)) {
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return;
}
do {
if ((old_flags & LRU_REFS_MASK) == LRU_REFS_MASK) {
if (!folio_test_workingset(folio))
folio_set_workingset(folio);
return;
}
new_flags = old_flags + BIT(LRU_REFS_PGOFF);
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
}
static bool lru_gen_clear_refs(struct folio *folio)
{
struct lru_gen_folio *lrugen;
int gen = folio_lru_gen(folio);
int type = folio_is_file_lru(folio);
if (gen < 0)
return true;
set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS | BIT(PG_workingset), 0);
lrugen = &folio_lruvec(folio)->lrugen;
return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type]));
}
#else
static void lru_gen_inc_refs(struct folio *folio)
{
}
static bool lru_gen_clear_refs(struct folio *folio)
{
return false;
}
#endif
void folio_mark_accessed(struct folio *folio)
{
if (folio_test_dropbehind(folio))
return;
if (lru_gen_enabled()) {
lru_gen_inc_refs(folio);
return;
}
if (!folio_test_referenced(folio)) {
folio_set_referenced(folio);
} else if (folio_test_unevictable(folio)) {
} else if (!folio_test_active(folio)) {
if (folio_test_lru(folio))
folio_activate(folio);
else
__lru_cache_activate_folio(folio);
folio_clear_referenced(folio);
workingset_activation(folio);
}
if (folio_test_idle(folio))
folio_clear_idle(folio);
}
EXPORT_SYMBOL(folio_mark_accessed);
void folio_add_lru(struct folio *folio)
{
VM_BUG_ON_FOLIO(folio_test_active(folio) &&
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio);
folio_batch_add_and_move(folio, lru_add);
}
EXPORT_SYMBOL(folio_add_lru);
void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
{
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
mlock_new_folio(folio);
else
folio_add_lru(folio);
}
static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
{
bool active = folio_test_active(folio) || lru_gen_enabled();
long nr_pages = folio_nr_pages(folio);
if (folio_test_unevictable(folio))
return;
if (folio_mapped(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
folio_clear_referenced(folio);
if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
lruvec_add_folio(lruvec, folio);
folio_set_reclaim(folio);
} else {
lruvec_add_folio_tail(lruvec, folio);
__count_vm_events(PGROTATED, nr_pages);
}
if (active) {
__count_vm_events(PGDEACTIVATE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
nr_pages);
}
}
static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
{
long nr_pages = folio_nr_pages(folio);
if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
folio_clear_referenced(folio);
lruvec_add_folio(lruvec, folio);
__count_vm_events(PGDEACTIVATE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
{
long nr_pages = folio_nr_pages(folio);
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
if (lru_gen_enabled())
lru_gen_clear_refs(folio);
else
folio_clear_referenced(folio);
folio_clear_swapbacked(folio);
lruvec_add_folio(lruvec, folio);
__count_vm_events(PGLAZYFREE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
}
void lru_add_drain_cpu(int cpu)
{
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
struct folio_batch *fbatch = &fbatches->lru_add;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_add);
fbatch = &fbatches->lru_move_tail;
if (data_race(folio_batch_count(fbatch))) {
unsigned long flags;
local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
folio_batch_move_lru(fbatch, lru_move_tail);
local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
}
fbatch = &fbatches->lru_deactivate_file;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_deactivate_file);
fbatch = &fbatches->lru_deactivate;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_deactivate);
fbatch = &fbatches->lru_lazyfree;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_lazyfree);
folio_activate_drain(cpu);
}
void deactivate_file_folio(struct folio *folio)
{
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
return;
if (lru_gen_enabled() && lru_gen_clear_refs(folio))
return;
folio_batch_add_and_move(folio, lru_deactivate_file);
}
void folio_deactivate(struct folio *folio)
{
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
return;
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
return;
folio_batch_add_and_move(folio, lru_deactivate);
}
void folio_mark_lazyfree(struct folio *folio)
{
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
!folio_test_lru(folio) ||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
return;
folio_batch_add_and_move(folio, lru_lazyfree);
}
void lru_add_drain(void)
{
local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock);
mlock_drain_local();
}
static void lru_add_and_bh_lrus_drain(void)
{
local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock);
invalidate_bh_lrus_cpu();
mlock_drain_local();
}
void lru_add_drain_cpu_zone(struct zone *zone)
{
local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
drain_local_pages(zone);
local_unlock(&cpu_fbatches.lock);
mlock_drain_local();
}
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_and_bh_lrus_drain();
}
static bool cpu_needs_drain(unsigned int cpu)
{
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
return folio_batch_count(&fbatches->lru_add) ||
folio_batch_count(&fbatches->lru_move_tail) ||
folio_batch_count(&fbatches->lru_deactivate_file) ||
folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) ||
folio_batch_count(&fbatches->lru_activate) ||
need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL);
}
static inline void __lru_add_drain_all(bool force_all_cpus)
{
static unsigned int lru_drain_gen;
static struct cpumask has_work;
static DEFINE_MUTEX(lock);
unsigned cpu, this_gen;
if (WARN_ON(!mm_percpu_wq))
return;
smp_mb();
this_gen = smp_load_acquire(&lru_drain_gen);
lru_add_drain();
mutex_lock(&lock);
if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
goto done;
WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
smp_mb();
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
if (cpu_needs_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
queue_work_on(cpu, mm_percpu_wq, work);
__cpumask_set_cpu(cpu, &has_work);
}
}
for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu));
done:
mutex_unlock(&lock);
}
void lru_add_drain_all(void)
{
__lru_add_drain_all(false);
}
#else
void lru_add_drain_all(void)
{
lru_add_drain();
}
#endif
atomic_t lru_disable_count = ATOMIC_INIT(0);
void lru_cache_disable(void)
{
atomic_inc(&lru_disable_count);
synchronize_rcu_expedited();
#ifdef CONFIG_SMP
__lru_add_drain_all(true);
#else
lru_add_and_bh_lrus_drain();
#endif
}
void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
{
int i, j;
struct lruvec *lruvec = NULL;
unsigned long flags = 0;
for (i = 0, j = 0; i < folios->nr; i++) {
struct folio *folio = folios->folios[i];
unsigned int nr_refs = refs ? refs[i] : 1;
if (is_huge_zero_folio(folio))
continue;
if (folio_is_zone_device(folio)) {
if (lruvec) {
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
if (folio_ref_sub_and_test(folio, nr_refs))
free_zone_device_folio(folio);
continue;
}
if (!folio_ref_sub_and_test(folio, nr_refs))
continue;
if (folio_test_hugetlb(folio)) {
if (lruvec) {
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
free_huge_folio(folio);
continue;
}
folio_unqueue_deferred_split(folio);
__page_cache_release(folio, &lruvec, &flags);
if (j != i)
folios->folios[j] = folio;
j++;
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
if (!j) {
folio_batch_reinit(folios);
return;
}
folios->nr = j;
mem_cgroup_uncharge_folios(folios);
free_unref_folios(folios);
}
EXPORT_SYMBOL(folios_put_refs);
void release_pages(release_pages_arg arg, int nr)
{
struct folio_batch fbatch;
int refs[PAGEVEC_SIZE];
struct encoded_page **encoded = arg.encoded_pages;
int i;
folio_batch_init(&fbatch);
for (i = 0; i < nr; i++) {
struct folio *folio = page_folio(encoded_page_ptr(encoded[i]));
refs[fbatch.nr] = 1;
if (unlikely(encoded_page_flags(encoded[i]) &
ENCODED_PAGE_BIT_NR_PAGES_NEXT))
refs[fbatch.nr] = encoded_nr_pages(encoded[++i]);
if (folio_batch_add(&fbatch, folio) > 0)
continue;
folios_put_refs(&fbatch, refs);
}
if (fbatch.nr)
folios_put_refs(&fbatch, refs);
}
EXPORT_SYMBOL(release_pages);
void __folio_batch_release(struct folio_batch *fbatch)
{
if (!fbatch->percpu_pvec_drained) {
lru_add_drain();
fbatch->percpu_pvec_drained = true;
}
folios_put(fbatch);
}
EXPORT_SYMBOL(__folio_batch_release);
void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
{
unsigned int i, j;
for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i];
if (!xa_is_value(folio))
fbatch->folios[j++] = folio;
}
fbatch->nr = j;
}
static const struct ctl_table swap_sysctl_table[] = {
{
.procname = "page-cluster",
.data = &page_cluster,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = (void *)&page_cluster_max,
}
};
void __init swap_setup(void)
{
unsigned long megs = PAGES_TO_MB(totalram_pages());
if (megs < 16)
page_cluster = 2;
else
page_cluster = 3;
register_sysctl_init("vm", swap_sysctl_table);
}