#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/vmpressure.h>
#include <linux/vmstat.h>
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/compaction.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/memcontrol.h>
#include <linux/migrate.h>
#include <linux/delayacct.h>
#include <linux/sysctl.h>
#include <linux/memory-tiers.h>
#include <linux/oom.h>
#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/printk.h>
#include <linux/dax.h>
#include <linux/psi.h>
#include <linux/pagewalk.h>
#include <linux/shmem_fs.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/khugepaged.h>
#include <linux/rculist_nulls.h>
#include <linux/random.h>
#include <linux/mmu_notifier.h>
#include <linux/parser.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
#include <linux/swapops.h>
#include <linux/sched/sysctl.h>
#include "internal.h"
#include "swap.h"
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
struct scan_control {
unsigned long nr_to_reclaim;
nodemask_t *nodemask;
struct mem_cgroup *target_mem_cgroup;
unsigned long anon_cost;
unsigned long file_cost;
int *proactive_swappiness;
#define DEACTIVATE_ANON 1
#define DEACTIVATE_FILE 2
unsigned int may_deactivate:2;
unsigned int force_deactivate:1;
unsigned int skipped_deactivate:1;
unsigned int may_writepage:1;
unsigned int may_unmap:1;
unsigned int may_swap:1;
unsigned int no_cache_trim_mode:1;
unsigned int cache_trim_mode_failed:1;
unsigned int proactive:1;
unsigned int memcg_low_reclaim:1;
unsigned int memcg_low_skipped:1;
unsigned int memcg_full_walk:1;
unsigned int hibernation_mode:1;
unsigned int compaction_ready:1;
unsigned int cache_trim_mode:1;
unsigned int file_is_tiny:1;
unsigned int no_demotion:1;
s8 order;
s8 priority;
s8 reclaim_idx;
gfp_t gfp_mask;
unsigned long nr_scanned;
unsigned long nr_reclaimed;
struct {
unsigned int dirty;
unsigned int unqueued_dirty;
unsigned int congested;
unsigned int writeback;
unsigned int immediate;
unsigned int file_taken;
unsigned int taken;
} nr;
struct reclaim_state reclaim_state;
};
#ifdef ARCH_HAS_PREFETCHW
#define prefetchw_prev_lru_folio(_folio, _base, _field) \
do { \
if ((_folio)->lru.prev != _base) { \
struct folio *prev; \
\
prev = lru_to_folio(&(_folio->lru)); \
prefetchw(&prev->_field); \
} \
} while (0)
#else
#define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0)
#endif
int vm_swappiness = 60;
#ifdef CONFIG_MEMCG
static bool cgroup_reclaim(struct scan_control *sc)
{
return sc->target_mem_cgroup;
}
static bool root_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
}
static bool writeback_throttling_sane(struct scan_control *sc)
{
if (!cgroup_reclaim(sc))
return true;
#ifdef CONFIG_CGROUP_WRITEBACK
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
return true;
#endif
return false;
}
static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
{
if (sc->proactive && sc->proactive_swappiness)
return *sc->proactive_swappiness;
return mem_cgroup_swappiness(memcg);
}
#else
static bool cgroup_reclaim(struct scan_control *sc)
{
return false;
}
static bool root_reclaim(struct scan_control *sc)
{
return true;
}
static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
}
static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
{
return READ_ONCE(vm_swappiness);
}
#endif
#define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \
for ((idx) = 0, (zone) = (pgdat)->node_zones; \
(idx) <= (highidx); \
(idx)++, (zone)++) \
if (!managed_zone(zone)) \
continue; \
else
static void set_task_reclaim_state(struct task_struct *task,
struct reclaim_state *rs)
{
WARN_ON_ONCE(rs && task->reclaim_state);
WARN_ON_ONCE(!rs && !task->reclaim_state);
task->reclaim_state = rs;
}
static void flush_reclaim_state(struct scan_control *sc)
{
if (current->reclaim_state && root_reclaim(sc)) {
sc->nr_reclaimed += current->reclaim_state->reclaimed;
current->reclaim_state->reclaimed = 0;
}
}
static bool can_demote(int nid, struct scan_control *sc,
struct mem_cgroup *memcg)
{
struct pglist_data *pgdat = NODE_DATA(nid);
nodemask_t allowed_mask;
if (!pgdat || !numa_demotion_enabled)
return false;
if (sc && sc->no_demotion)
return false;
node_get_allowed_targets(pgdat, &allowed_mask);
if (nodes_empty(allowed_mask))
return false;
mem_cgroup_node_filter_allowed(memcg, &allowed_mask);
return !nodes_empty(allowed_mask);
}
static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
int nid,
struct scan_control *sc)
{
if (memcg == NULL) {
if (get_nr_swap_pages() > 0)
return true;
} else {
if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
return true;
}
return can_demote(nid, sc, memcg);
}
unsigned long zone_reclaimable_pages(struct zone *zone)
{
unsigned long nr;
nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
return nr;
}
static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zone_idx)
{
unsigned long size = 0;
int zid;
struct zone *zone;
for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) {
if (!mem_cgroup_disabled())
size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
else
size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
}
return size;
}
static unsigned long drop_slab_node(int nid)
{
unsigned long freed = 0;
struct mem_cgroup *memcg = NULL;
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
return freed;
}
void drop_slab(void)
{
int nid;
int shift = 0;
unsigned long freed;
do {
freed = 0;
for_each_online_node(nid) {
if (fatal_signal_pending(current))
return;
freed += drop_slab_node(nid);
}
} while ((freed >> shift++) > 1);
}
#define CHECK_RECLAIMER_OFFSET(type) \
do { \
BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
PGDEMOTE_##type - PGDEMOTE_KSWAPD); \
BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
PGSCAN_##type - PGSCAN_KSWAPD); \
} while (0)
static int reclaimer_offset(struct scan_control *sc)
{
CHECK_RECLAIMER_OFFSET(DIRECT);
CHECK_RECLAIMER_OFFSET(KHUGEPAGED);
CHECK_RECLAIMER_OFFSET(PROACTIVE);
if (current_is_kswapd())
return 0;
if (current_is_khugepaged())
return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD;
if (sc->proactive)
return PGSTEAL_PROACTIVE - PGSTEAL_KSWAPD;
return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
}
static void handle_write_error(struct address_space *mapping,
struct folio *folio, int error)
{
folio_lock(folio);
if (folio_mapping(folio) == mapping)
mapping_set_error(mapping, error);
folio_unlock(folio);
}
static bool skip_throttle_noprogress(pg_data_t *pgdat)
{
int reclaimable = 0, write_pending = 0;
int i;
struct zone *zone;
if (kswapd_test_hopeless(pgdat))
return true;
for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) {
reclaimable += zone_reclaimable_pages(zone);
write_pending += zone_page_state_snapshot(zone,
NR_ZONE_WRITE_PENDING);
}
if (2 * write_pending <= reclaimable)
return true;
return false;
}
void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
{
wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
long timeout, ret;
DEFINE_WAIT(wait);
if (!current_is_kswapd() &&
current->flags & (PF_USER_WORKER|PF_KTHREAD)) {
cond_resched();
return;
}
switch(reason) {
case VMSCAN_THROTTLE_WRITEBACK:
timeout = HZ/10;
if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) {
WRITE_ONCE(pgdat->nr_reclaim_start,
node_page_state(pgdat, NR_THROTTLED_WRITTEN));
}
break;
case VMSCAN_THROTTLE_CONGESTED:
fallthrough;
case VMSCAN_THROTTLE_NOPROGRESS:
if (skip_throttle_noprogress(pgdat)) {
cond_resched();
return;
}
timeout = 1;
break;
case VMSCAN_THROTTLE_ISOLATED:
timeout = HZ/50;
break;
default:
WARN_ON_ONCE(1);
timeout = HZ;
break;
}
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
ret = schedule_timeout(timeout);
finish_wait(wqh, &wait);
if (reason == VMSCAN_THROTTLE_WRITEBACK)
atomic_dec(&pgdat->nr_writeback_throttled);
trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout),
jiffies_to_usecs(timeout - ret),
reason);
}
void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
int nr_throttled)
{
unsigned long nr_written;
node_stat_add_folio(folio, NR_THROTTLED_WRITTEN);
nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) -
READ_ONCE(pgdat->nr_reclaim_start);
if (nr_written > SWAP_CLUSTER_MAX * nr_throttled)
wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]);
}
typedef enum {
PAGE_KEEP,
PAGE_ACTIVATE,
PAGE_SUCCESS,
PAGE_CLEAN,
} pageout_t;
static pageout_t writeout(struct folio *folio, struct address_space *mapping,
struct swap_iocb **plug, struct list_head *folio_list)
{
int res;
folio_set_reclaim(folio);
if (shmem_mapping(mapping))
res = shmem_writeout(folio, plug, folio_list);
else
res = swap_writeout(folio, plug);
if (res < 0)
handle_write_error(mapping, folio, res);
if (res == AOP_WRITEPAGE_ACTIVATE) {
folio_clear_reclaim(folio);
return PAGE_ACTIVATE;
}
if (!folio_test_writeback(folio))
folio_clear_reclaim(folio);
trace_mm_vmscan_write_folio(folio);
node_stat_add_folio(folio, NR_VMSCAN_WRITE);
return PAGE_SUCCESS;
}
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
struct swap_iocb **plug, struct list_head *folio_list)
{
if (folio_ref_count(folio) != 1 + folio_nr_pages(folio) || !mapping)
return PAGE_KEEP;
if (!shmem_mapping(mapping) && !folio_test_anon(folio))
return PAGE_ACTIVATE;
if (!folio_clear_dirty_for_io(folio))
return PAGE_CLEAN;
return writeout(folio, mapping, plug, folio_list);
}
static int __remove_mapping(struct address_space *mapping, struct folio *folio,
bool reclaimed, struct mem_cgroup *target_memcg)
{
int refcount;
void *shadow = NULL;
struct swap_cluster_info *ci;
BUG_ON(!folio_test_locked(folio));
BUG_ON(mapping != folio_mapping(folio));
if (folio_test_swapcache(folio)) {
ci = swap_cluster_get_and_lock_irq(folio);
} else {
spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
}
refcount = 1 + folio_nr_pages(folio);
if (!folio_ref_freeze(folio, refcount))
goto cannot_free;
if (unlikely(folio_test_dirty(folio))) {
folio_ref_unfreeze(folio, refcount);
goto cannot_free;
}
if (folio_test_swapcache(folio)) {
swp_entry_t swap = folio->swap;
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
memcg1_swapout(folio, swap);
__swap_cache_del_folio(ci, folio, swap, shadow);
swap_cluster_unlock_irq(ci);
} else {
void (*free_folio)(struct folio *);
free_folio = mapping->a_ops->free_folio;
if (reclaimed && folio_is_file_lru(folio) &&
!mapping_exiting(mapping) && !dax_mapping(mapping))
shadow = workingset_eviction(folio, target_memcg);
__filemap_remove_folio(folio, shadow);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
if (free_folio)
free_folio(folio);
}
return 1;
cannot_free:
if (folio_test_swapcache(folio)) {
swap_cluster_unlock_irq(ci);
} else {
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&mapping->host->i_lock);
}
return 0;
}
long remove_mapping(struct address_space *mapping, struct folio *folio)
{
if (__remove_mapping(mapping, folio, false, NULL)) {
folio_ref_unfreeze(folio, 1);
return folio_nr_pages(folio);
}
return 0;
}
void folio_putback_lru(struct folio *folio)
{
folio_add_lru(folio);
folio_put(folio);
}
enum folio_references {
FOLIOREF_RECLAIM,
FOLIOREF_RECLAIM_CLEAN,
FOLIOREF_KEEP,
FOLIOREF_ACTIVATE,
};
#ifdef CONFIG_LRU_GEN
static bool lru_gen_set_refs(struct folio *folio)
{
if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return false;
}
set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_workingset));
return true;
}
#else
static bool lru_gen_set_refs(struct folio *folio)
{
return false;
}
#endif
static enum folio_references folio_check_references(struct folio *folio,
struct scan_control *sc)
{
int referenced_ptes, referenced_folio;
vm_flags_t vm_flags;
referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
&vm_flags);
if (vm_flags & VM_LOCKED)
return FOLIOREF_ACTIVATE;
if (referenced_ptes == -1)
return FOLIOREF_KEEP;
if (lru_gen_enabled()) {
if (!referenced_ptes)
return FOLIOREF_RECLAIM;
return lru_gen_set_refs(folio) ? FOLIOREF_ACTIVATE : FOLIOREF_KEEP;
}
referenced_folio = folio_test_clear_referenced(folio);
if (referenced_ptes) {
folio_set_referenced(folio);
if (referenced_folio || referenced_ptes > 1)
return FOLIOREF_ACTIVATE;
if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio))
return FOLIOREF_ACTIVATE;
return FOLIOREF_KEEP;
}
if (referenced_folio && folio_is_file_lru(folio))
return FOLIOREF_RECLAIM_CLEAN;
return FOLIOREF_RECLAIM;
}
static void folio_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct address_space *mapping;
if (!folio_is_file_lru(folio) ||
(folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
*dirty = false;
*writeback = false;
return;
}
*dirty = folio_test_dirty(folio);
*writeback = folio_test_writeback(folio);
if (!folio_test_private(folio))
return;
mapping = folio_mapping(folio);
if (mapping && mapping->a_ops->is_dirty_writeback)
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
}
static struct folio *alloc_demote_folio(struct folio *src,
unsigned long private)
{
struct folio *dst;
nodemask_t *allowed_mask;
struct migration_target_control *mtc;
mtc = (struct migration_target_control *)private;
allowed_mask = mtc->nmask;
mtc->nmask = NULL;
mtc->gfp_mask |= __GFP_THISNODE;
dst = alloc_migration_target(src, (unsigned long)mtc);
if (dst)
return dst;
mtc->gfp_mask &= ~__GFP_THISNODE;
mtc->nmask = allowed_mask;
return alloc_migration_target(src, (unsigned long)mtc);
}
static unsigned int demote_folio_list(struct list_head *demote_folios,
struct pglist_data *pgdat,
struct mem_cgroup *memcg)
{
int target_nid;
unsigned int nr_succeeded;
nodemask_t allowed_mask;
struct migration_target_control mtc = {
.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
__GFP_NOMEMALLOC | GFP_NOWAIT,
.nmask = &allowed_mask,
.reason = MR_DEMOTION,
};
if (list_empty(demote_folios))
return 0;
node_get_allowed_targets(pgdat, &allowed_mask);
mem_cgroup_node_filter_allowed(memcg, &allowed_mask);
if (nodes_empty(allowed_mask))
return 0;
target_nid = next_demotion_node(pgdat->node_id, &allowed_mask);
if (target_nid == NUMA_NO_NODE)
return 0;
mtc.nid = target_nid;
migrate_pages(demote_folios, alloc_demote_folio, NULL,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);
return nr_succeeded;
}
static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
{
if (gfp_mask & __GFP_FS)
return true;
if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
return false;
return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
}
static unsigned int shrink_folio_list(struct list_head *folio_list,
struct pglist_data *pgdat, struct scan_control *sc,
struct reclaim_stat *stat, bool ignore_references,
struct mem_cgroup *memcg)
{
struct folio_batch free_folios;
LIST_HEAD(ret_folios);
LIST_HEAD(demote_folios);
unsigned int nr_reclaimed = 0, nr_demoted = 0;
unsigned int pgactivate = 0;
bool do_demote_pass;
struct swap_iocb *plug = NULL;
folio_batch_init(&free_folios);
memset(stat, 0, sizeof(*stat));
cond_resched();
do_demote_pass = can_demote(pgdat->node_id, sc, memcg);
retry:
while (!list_empty(folio_list)) {
struct address_space *mapping;
struct folio *folio;
enum folio_references references = FOLIOREF_RECLAIM;
bool dirty, writeback;
unsigned int nr_pages;
cond_resched();
folio = lru_to_folio(folio_list);
list_del(&folio->lru);
if (!folio_trylock(folio))
goto keep;
if (folio_contain_hwpoisoned_page(folio)) {
if (folio_test_large(folio))
goto keep_locked;
unmap_poisoned_folio(folio, folio_pfn(folio), false);
folio_unlock(folio);
folio_put(folio);
continue;
}
VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
nr_pages = folio_nr_pages(folio);
sc->nr_scanned += nr_pages;
if (unlikely(!folio_evictable(folio)))
goto activate_locked;
if (!sc->may_unmap && folio_mapped(folio))
goto keep_locked;
folio_check_dirty_writeback(folio, &dirty, &writeback);
if (dirty || writeback)
stat->nr_dirty += nr_pages;
if (dirty && !writeback)
stat->nr_unqueued_dirty += nr_pages;
if (writeback && folio_test_reclaim(folio))
stat->nr_congested += nr_pages;
if (folio_test_writeback(folio)) {
mapping = folio_mapping(folio);
if (current_is_kswapd() &&
folio_test_reclaim(folio) &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
stat->nr_immediate += nr_pages;
goto activate_locked;
} else if (writeback_throttling_sane(sc) ||
!folio_test_reclaim(folio) ||
!may_enter_fs(folio, sc->gfp_mask) ||
(mapping &&
mapping_writeback_may_deadlock_on_reclaim(mapping))) {
folio_set_reclaim(folio);
stat->nr_writeback += nr_pages;
goto activate_locked;
} else {
folio_unlock(folio);
folio_wait_writeback(folio);
list_add_tail(&folio->lru, folio_list);
continue;
}
}
if (!ignore_references)
references = folio_check_references(folio, sc);
switch (references) {
case FOLIOREF_ACTIVATE:
goto activate_locked;
case FOLIOREF_KEEP:
stat->nr_ref_keep += nr_pages;
goto keep_locked;
case FOLIOREF_RECLAIM:
case FOLIOREF_RECLAIM_CLEAN:
;
}
if (do_demote_pass &&
(thp_migration_supported() || !folio_test_large(folio))) {
list_add(&folio->lru, &demote_folios);
folio_unlock(folio);
continue;
}
if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
!folio_test_swapcache(folio)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
if (folio_maybe_dma_pinned(folio))
goto keep_locked;
if (folio_test_large(folio)) {
if (folio_expected_ref_count(folio) !=
folio_ref_count(folio) - 1)
goto activate_locked;
if (data_race(!list_empty(&folio->_deferred_list) &&
folio_test_partially_mapped(folio)) &&
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
if (folio_alloc_swap(folio)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
goto activate_locked_split;
if (split_folio_to_list(folio, folio_list))
goto activate_locked;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (nr_pages >= HPAGE_PMD_NR) {
count_memcg_folio_events(folio,
THP_SWPOUT_FALLBACK, 1);
count_vm_event(THP_SWPOUT_FALLBACK);
}
#endif
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
if (folio_alloc_swap(folio))
goto activate_locked_split;
}
folio_mark_dirty(folio);
}
if ((nr_pages > 1) && !folio_test_large(folio)) {
sc->nr_scanned -= (nr_pages - 1);
nr_pages = 1;
}
if (folio_mapped(folio)) {
enum ttu_flags flags = TTU_BATCH_FLUSH;
bool was_swapbacked = folio_test_swapbacked(folio);
if (folio_test_pmd_mappable(folio))
flags |= TTU_SPLIT_HUGE_PMD;
if (folio_test_large(folio))
flags |= TTU_SYNC;
try_to_unmap(folio, flags);
if (folio_mapped(folio)) {
stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked &&
folio_test_swapbacked(folio))
stat->nr_lazyfree_fail += nr_pages;
goto activate_locked;
}
}
if (folio_maybe_dma_pinned(folio))
goto activate_locked;
mapping = folio_mapping(folio);
if (folio_test_dirty(folio)) {
if (folio_is_file_lru(folio)) {
node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
nr_pages);
if (!folio_test_reclaim(folio))
folio_set_reclaim(folio);
goto activate_locked;
}
if (references == FOLIOREF_RECLAIM_CLEAN)
goto keep_locked;
if (!may_enter_fs(folio, sc->gfp_mask))
goto keep_locked;
if (!sc->may_writepage)
goto keep_locked;
try_to_unmap_flush_dirty();
switch (pageout(folio, mapping, &plug, folio_list)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
if (nr_pages > 1 && !folio_test_large(folio)) {
sc->nr_scanned -= (nr_pages - 1);
nr_pages = 1;
}
goto activate_locked;
case PAGE_SUCCESS:
if (nr_pages > 1 && !folio_test_large(folio)) {
sc->nr_scanned -= (nr_pages - 1);
nr_pages = 1;
}
stat->nr_pageout += nr_pages;
if (folio_test_writeback(folio))
goto keep;
if (folio_test_dirty(folio))
goto keep;
if (!folio_trylock(folio))
goto keep;
if (folio_test_dirty(folio) ||
folio_test_writeback(folio))
goto keep_locked;
mapping = folio_mapping(folio);
fallthrough;
case PAGE_CLEAN:
;
}
}
if (folio_needs_release(folio)) {
if (!filemap_release_folio(folio, sc->gfp_mask))
goto activate_locked;
if (!mapping && folio_ref_count(folio) == 1) {
folio_unlock(folio);
if (folio_put_testzero(folio))
goto free_it;
else {
nr_reclaimed += nr_pages;
continue;
}
}
}
if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
if (!folio_ref_freeze(folio, 1))
goto keep_locked;
count_vm_events(PGLAZYFREED, nr_pages);
count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
} else if (!mapping || !__remove_mapping(mapping, folio, true,
sc->target_mem_cgroup))
goto keep_locked;
folio_unlock(folio);
free_it:
nr_reclaimed += nr_pages;
folio_unqueue_deferred_split(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush();
free_unref_folios(&free_folios);
}
continue;
activate_locked_split:
if (nr_pages > 1) {
sc->nr_scanned -= (nr_pages - 1);
nr_pages = 1;
}
activate_locked:
if (folio_test_swapcache(folio) &&
(mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
folio_free_swap(folio);
VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
if (!folio_test_mlocked(folio)) {
int type = folio_is_file_lru(folio);
folio_set_active(folio);
stat->nr_activate[type] += nr_pages;
count_memcg_folio_events(folio, PGACTIVATE, nr_pages);
}
keep_locked:
folio_unlock(folio);
keep:
list_add(&folio->lru, &ret_folios);
VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
folio_test_unevictable(folio), folio);
}
nr_demoted = demote_folio_list(&demote_folios, pgdat, memcg);
nr_reclaimed += nr_demoted;
stat->nr_demoted += nr_demoted;
if (!list_empty(&demote_folios)) {
list_splice_init(&demote_folios, folio_list);
if (!sc->proactive) {
do_demote_pass = false;
goto retry;
}
}
pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush();
free_unref_folios(&free_folios);
list_splice(&ret_folios, folio_list);
count_vm_events(PGACTIVATE, pgactivate);
if (plug)
swap_write_unplug(plug);
return nr_reclaimed;
}
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *folio_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
};
struct reclaim_stat stat;
unsigned int nr_reclaimed;
struct folio *folio, *next;
LIST_HEAD(clean_folios);
unsigned int noreclaim_flag;
list_for_each_entry_safe(folio, next, folio_list, lru) {
if (page_has_movable_ops(&folio->page))
continue;
if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
!folio_test_dirty(folio) && !folio_test_unevictable(folio)) {
folio_clear_active(folio);
list_move(&folio->lru, &clean_folios);
}
}
noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc,
&stat, true, NULL);
memalloc_noreclaim_restore(noreclaim_flag);
list_splice(&clean_folios, folio_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)nr_reclaimed);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
stat.nr_lazyfree_fail);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)stat.nr_lazyfree_fail);
return nr_reclaimed;
}
static __always_inline void update_lru_sizes(struct lruvec *lruvec,
enum lru_list lru, unsigned long *nr_zone_taken)
{
int zid;
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
if (!nr_zone_taken[zid])
continue;
update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
}
}
static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
enum lru_list lru)
{
struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long skipped = 0, total_scan = 0, scan = 0;
unsigned long nr_pages;
unsigned long max_nr_skipped = 0;
LIST_HEAD(folios_skipped);
while (scan < nr_to_scan && !list_empty(src)) {
struct list_head *move_to = src;
struct folio *folio;
folio = lru_to_folio(src);
prefetchw_prev_lru_folio(folio, src, flags);
nr_pages = folio_nr_pages(folio);
total_scan += nr_pages;
if (max_nr_skipped < SWAP_CLUSTER_MAX_SKIPPED &&
(folio_zonenum(folio) > sc->reclaim_idx)) {
nr_skipped[folio_zonenum(folio)] += nr_pages;
move_to = &folios_skipped;
max_nr_skipped++;
goto move;
}
scan += nr_pages;
if (!folio_test_lru(folio))
goto move;
if (!sc->may_unmap && folio_mapped(folio))
goto move;
if (unlikely(!folio_try_get(folio)))
goto move;
if (!folio_test_clear_lru(folio)) {
folio_put(folio);
goto move;
}
nr_taken += nr_pages;
nr_zone_taken[folio_zonenum(folio)] += nr_pages;
move_to = dst;
move:
list_move(&folio->lru, move_to);
}
if (!list_empty(&folios_skipped)) {
int zid;
list_splice(&folios_skipped, src);
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
if (!nr_skipped[zid])
continue;
__count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
skipped += nr_skipped[zid];
}
}
*nr_scanned = total_scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
total_scan, skipped, nr_taken, lru);
update_lru_sizes(lruvec, lru, nr_zone_taken);
return nr_taken;
}
bool folio_isolate_lru(struct folio *folio)
{
bool ret = false;
VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio);
if (folio_test_clear_lru(folio)) {
struct lruvec *lruvec;
folio_get(folio);
lruvec = folio_lruvec_lock_irq(folio);
lruvec_del_folio(lruvec, folio);
unlock_page_lruvec_irq(lruvec);
ret = true;
}
return ret;
}
static bool too_many_isolated(struct pglist_data *pgdat, int file,
struct scan_control *sc)
{
unsigned long inactive, isolated;
bool too_many;
if (current_is_kswapd())
return false;
if (!writeback_throttling_sane(sc))
return false;
if (file) {
inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
} else {
inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
}
if (gfp_has_io_fs(sc->gfp_mask))
inactive >>= 3;
too_many = isolated > inactive;
if (!too_many)
wake_throttle_isolated(pgdat);
return too_many;
}
static unsigned int move_folios_to_lru(struct lruvec *lruvec,
struct list_head *list)
{
int nr_pages, nr_moved = 0;
struct folio_batch free_folios;
folio_batch_init(&free_folios);
while (!list_empty(list)) {
struct folio *folio = lru_to_folio(list);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
list_del(&folio->lru);
if (unlikely(!folio_evictable(folio))) {
spin_unlock_irq(&lruvec->lru_lock);
folio_putback_lru(folio);
spin_lock_irq(&lruvec->lru_lock);
continue;
}
folio_set_lru(folio);
if (unlikely(folio_put_testzero(folio))) {
__folio_clear_lru_flags(folio);
folio_unqueue_deferred_split(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
spin_unlock_irq(&lruvec->lru_lock);
mem_cgroup_uncharge_folios(&free_folios);
free_unref_folios(&free_folios);
spin_lock_irq(&lruvec->lru_lock);
}
continue;
}
VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
lruvec_add_folio(lruvec, folio);
nr_pages = folio_nr_pages(folio);
nr_moved += nr_pages;
if (folio_test_active(folio))
workingset_age_nonresident(lruvec, nr_pages);
}
if (free_folios.nr) {
spin_unlock_irq(&lruvec->lru_lock);
mem_cgroup_uncharge_folios(&free_folios);
free_unref_folios(&free_folios);
spin_lock_irq(&lruvec->lru_lock);
}
return nr_moved;
}
static int current_may_throttle(void)
{
return !(current->flags & PF_LOCAL_THROTTLE);
}
static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
struct lruvec *lruvec, struct scan_control *sc,
enum lru_list lru)
{
LIST_HEAD(folio_list);
unsigned long nr_scanned;
unsigned int nr_reclaimed = 0;
unsigned long nr_taken;
struct reclaim_stat stat;
bool file = is_file_lru(lru);
enum vm_event_item item;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
bool stalled = false;
while (unlikely(too_many_isolated(pgdat, file, sc))) {
if (stalled)
return 0;
stalled = true;
reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
}
lru_add_drain();
spin_lock_irq(&lruvec->lru_lock);
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
&nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
item = PGSCAN_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_scanned);
count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
__count_vm_events(PGSCAN_ANON + file, nr_scanned);
spin_unlock_irq(&lruvec->lru_lock);
if (nr_taken == 0)
return 0;
nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false,
lruvec_memcg(lruvec));
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
nr_scanned - nr_reclaimed);
if (stat.nr_unqueued_dirty == nr_taken) {
wakeup_flusher_threads(WB_REASON_VMSCAN);
if (!writeback_throttling_sane(sc))
reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
}
sc->nr.dirty += stat.nr_dirty;
sc->nr.congested += stat.nr_congested;
sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr.writeback += stat.nr_writeback;
sc->nr.immediate += stat.nr_immediate;
sc->nr.taken += nr_taken;
if (file)
sc->nr.file_taken += nr_taken;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
nr_scanned, nr_reclaimed, &stat, sc->priority, file);
return nr_reclaimed;
}
static void shrink_active_list(unsigned long nr_to_scan,
struct lruvec *lruvec,
struct scan_control *sc,
enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
vm_flags_t vm_flags;
LIST_HEAD(l_hold);
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
unsigned nr_deactivate, nr_activate;
unsigned nr_rotated = 0;
bool file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
lru_add_drain();
spin_lock_irq(&lruvec->lru_lock);
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
&nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
if (!cgroup_reclaim(sc))
__count_vm_events(PGREFILL, nr_scanned);
count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
spin_unlock_irq(&lruvec->lru_lock);
while (!list_empty(&l_hold)) {
struct folio *folio;
cond_resched();
folio = lru_to_folio(&l_hold);
list_del(&folio->lru);
if (unlikely(!folio_evictable(folio))) {
folio_putback_lru(folio);
continue;
}
if (unlikely(buffer_heads_over_limit)) {
if (folio_needs_release(folio) &&
folio_trylock(folio)) {
filemap_release_folio(folio, 0);
folio_unlock(folio);
}
}
if (folio_referenced(folio, 0, sc->target_mem_cgroup,
&vm_flags) != 0) {
if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
nr_rotated += folio_nr_pages(folio);
list_add(&folio->lru, &l_active);
continue;
}
}
folio_clear_active(folio);
folio_set_workingset(folio);
list_add(&folio->lru, &l_inactive);
}
spin_lock_irq(&lruvec->lru_lock);
nr_activate = move_folios_to_lru(lruvec, &l_active);
nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
__count_vm_events(PGDEACTIVATE, nr_deactivate);
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
lru_note_cost_unlock_irq(lruvec, file, 0, nr_rotated);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file);
}
static unsigned int reclaim_folio_list(struct list_head *folio_list,
struct pglist_data *pgdat)
{
struct reclaim_stat stat;
unsigned int nr_reclaimed;
struct folio *folio;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
.no_demotion = 1,
};
nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &stat, true, NULL);
while (!list_empty(folio_list)) {
folio = lru_to_folio(folio_list);
list_del(&folio->lru);
folio_putback_lru(folio);
}
trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat);
return nr_reclaimed;
}
unsigned long reclaim_pages(struct list_head *folio_list)
{
int nid;
unsigned int nr_reclaimed = 0;
LIST_HEAD(node_folio_list);
unsigned int noreclaim_flag;
if (list_empty(folio_list))
return nr_reclaimed;
noreclaim_flag = memalloc_noreclaim_save();
nid = folio_nid(lru_to_folio(folio_list));
do {
struct folio *folio = lru_to_folio(folio_list);
if (nid == folio_nid(folio)) {
folio_clear_active(folio);
list_move(&folio->lru, &node_folio_list);
continue;
}
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
nid = folio_nid(lru_to_folio(folio_list));
} while (!list_empty(folio_list));
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
memalloc_noreclaim_restore(noreclaim_flag);
return nr_reclaimed;
}
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct lruvec *lruvec, struct scan_control *sc)
{
if (is_active_lru(lru)) {
if (sc->may_deactivate & (1 << is_file_lru(lru)))
shrink_active_list(nr_to_scan, lruvec, sc, lru);
else
sc->skipped_deactivate = 1;
return 0;
}
return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
}
static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
{
enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
unsigned long inactive, active;
unsigned long inactive_ratio;
unsigned long gb;
inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
inactive_ratio = int_sqrt(10 * gb);
else
inactive_ratio = 1;
return inactive * inactive_ratio < active;
}
enum scan_balance {
SCAN_EQUAL,
SCAN_FRACT,
SCAN_ANON,
SCAN_FILE,
};
static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
{
unsigned long file;
struct lruvec *target_lruvec;
if (lru_gen_enabled())
return;
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup);
spin_lock_irq(&target_lruvec->lru_lock);
sc->anon_cost = target_lruvec->anon_cost;
sc->file_cost = target_lruvec->file_cost;
spin_unlock_irq(&target_lruvec->lru_lock);
if (!sc->force_deactivate) {
unsigned long refaults;
refaults = lruvec_page_state(target_lruvec,
WORKINGSET_ACTIVATE_ANON);
if (refaults != target_lruvec->refaults[WORKINGSET_ANON] ||
inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
sc->may_deactivate |= DEACTIVATE_ANON;
else
sc->may_deactivate &= ~DEACTIVATE_ANON;
refaults = lruvec_page_state(target_lruvec,
WORKINGSET_ACTIVATE_FILE);
if (refaults != target_lruvec->refaults[WORKINGSET_FILE] ||
inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
sc->may_deactivate |= DEACTIVATE_FILE;
else
sc->may_deactivate &= ~DEACTIVATE_FILE;
} else
sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) &&
!sc->no_cache_trim_mode)
sc->cache_trim_mode = 1;
else
sc->cache_trim_mode = 0;
if (!cgroup_reclaim(sc)) {
unsigned long total_high_wmark = 0;
unsigned long free, anon;
int z;
struct zone *zone;
free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
file = node_page_state(pgdat, NR_ACTIVE_FILE) +
node_page_state(pgdat, NR_INACTIVE_FILE);
for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) {
total_high_wmark += high_wmark_pages(zone);
}
anon = node_page_state(pgdat, NR_INACTIVE_ANON);
sc->file_is_tiny =
file + free <= total_high_wmark &&
!(sc->may_deactivate & DEACTIVATE_ANON) &&
anon >> sc->priority;
}
}
static inline void calculate_pressure_balance(struct scan_control *sc,
int swappiness, u64 *fraction, u64 *denominator)
{
unsigned long anon_cost, file_cost, total_cost;
unsigned long ap, fp;
total_cost = sc->anon_cost + sc->file_cost;
anon_cost = total_cost + sc->anon_cost;
file_cost = total_cost + sc->file_cost;
total_cost = anon_cost + file_cost;
ap = swappiness * (total_cost + 1);
ap /= anon_cost + 1;
fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1);
fp /= file_cost + 1;
fraction[WORKINGSET_ANON] = ap;
fraction[WORKINGSET_FILE] = fp;
*denominator = ap + fp;
}
static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
struct scan_control *sc, unsigned long scan)
{
unsigned long min, low, usage;
mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low, &usage);
if (min || low) {
unsigned long protection;
if (!sc->memcg_low_reclaim && low > min) {
protection = low;
sc->memcg_low_skipped = 1;
} else {
protection = min;
}
usage = max(usage, protection);
scan -= scan * protection / (usage + 1);
scan = max(scan, SWAP_CLUSTER_MAX);
}
return scan;
}
static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
unsigned long *nr)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
int swappiness = sc_swappiness(sc, memcg);
u64 fraction[ANON_AND_FILE];
u64 denominator = 0;
enum scan_balance scan_balance;
enum lru_list lru;
if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
scan_balance = SCAN_FILE;
goto out;
}
if (cgroup_reclaim(sc) && !swappiness) {
scan_balance = SCAN_FILE;
goto out;
}
if (swappiness == SWAPPINESS_ANON_ONLY) {
WARN_ON_ONCE(!sc->proactive);
scan_balance = SCAN_ANON;
goto out;
}
if (!sc->priority && swappiness) {
scan_balance = SCAN_EQUAL;
goto out;
}
if (sc->file_is_tiny) {
scan_balance = SCAN_ANON;
goto out;
}
if (sc->cache_trim_mode) {
scan_balance = SCAN_FILE;
goto out;
}
scan_balance = SCAN_FRACT;
calculate_pressure_balance(sc, swappiness, fraction, &denominator);
out:
for_each_evictable_lru(lru) {
bool file = is_file_lru(lru);
unsigned long lruvec_size;
unsigned long scan;
lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
scan = apply_proportional_protection(memcg, sc, lruvec_size);
scan >>= sc->priority;
if (!scan && !mem_cgroup_online(memcg))
scan = min(lruvec_size, SWAP_CLUSTER_MAX);
switch (scan_balance) {
case SCAN_EQUAL:
break;
case SCAN_FRACT:
scan = mem_cgroup_online(memcg) ?
div64_u64(scan * fraction[file], denominator) :
DIV64_U64_ROUND_UP(scan * fraction[file],
denominator);
break;
case SCAN_FILE:
case SCAN_ANON:
if ((scan_balance == SCAN_FILE) != file)
scan = 0;
break;
default:
BUG();
}
nr[lru] = scan;
}
}
static bool can_age_anon_pages(struct lruvec *lruvec,
struct scan_control *sc)
{
if (total_swap_pages > 0)
return true;
return can_demote(lruvec_pgdat(lruvec)->node_id, sc,
lruvec_memcg(lruvec));
}
#ifdef CONFIG_LRU_GEN
#ifdef CONFIG_LRU_GEN_ENABLED
DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
#define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
#else
DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
#define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
#endif
static bool should_walk_mmu(void)
{
return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
}
static bool should_clear_pmd_young(void)
{
return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
}
#define DEFINE_MAX_SEQ(lruvec) \
unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
#define DEFINE_MIN_SEQ(lruvec) \
unsigned long min_seq[ANON_AND_FILE] = { \
READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
}
#define min_type(swappiness) (!(swappiness))
#define max_type(swappiness) ((swappiness) < SWAPPINESS_ANON_ONLY)
#define evictable_min_seq(min_seq, swappiness) \
min((min_seq)[min_type(swappiness)], (min_seq)[max_type(swappiness)])
#define for_each_gen_type_zone(gen, type, zone) \
for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
#define for_each_evictable_type(type, swappiness) \
for ((type) = min_type(swappiness); (type) <= max_type(swappiness); (type)++)
#define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS)
#define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS)
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{
struct pglist_data *pgdat = NODE_DATA(nid);
#ifdef CONFIG_MEMCG
if (memcg) {
struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
if (!lruvec->pgdat)
lruvec->pgdat = pgdat;
return lruvec;
}
#endif
VM_WARN_ON_ONCE(!mem_cgroup_disabled());
return &pgdat->__lruvec;
}
static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
{
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
if (!sc->may_swap)
return 0;
if (!can_demote(pgdat->node_id, sc, memcg) &&
mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
return 0;
return sc_swappiness(sc, memcg);
}
static int get_nr_gens(struct lruvec *lruvec, int type)
{
return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
}
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
{
int type;
for (type = 0; type < ANON_AND_FILE; type++) {
int n = get_nr_gens(lruvec, type);
if (n < MIN_NR_GENS || n > MAX_NR_GENS)
return false;
}
return true;
}
#define BLOOM_FILTER_SHIFT 15
static inline int filter_gen_from_seq(unsigned long seq)
{
return seq % NR_BLOOM_FILTERS;
}
static void get_item_key(void *item, int *key)
{
u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
key[1] = hash >> BLOOM_FILTER_SHIFT;
}
static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return true;
get_item_key(item, key);
return test_bit(key[0], filter) && test_bit(key[1], filter);
}
static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return;
get_item_key(item, key);
if (!test_bit(key[0], filter))
set_bit(key[0], filter);
if (!test_bit(key[1], filter))
set_bit(key[1], filter);
}
static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq)
{
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
filter = mm_state->filters[gen];
if (filter) {
bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
return;
}
filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
WRITE_ONCE(mm_state->filters[gen], filter);
}
#ifdef CONFIG_LRU_GEN_WALKS_MMU
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
{
static struct lru_gen_mm_list mm_list = {
.fifo = LIST_HEAD_INIT(mm_list.fifo),
.lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
};
#ifdef CONFIG_MEMCG
if (memcg)
return &memcg->mm_list;
#endif
VM_WARN_ON_ONCE(!mem_cgroup_disabled());
return &mm_list;
}
static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
{
return &lruvec->mm_state;
}
static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
{
int key;
struct mm_struct *mm;
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
return NULL;
clear_bit(key, &mm->lru_gen.bitmap);
return mmget_not_zero(mm) ? mm : NULL;
}
void lru_gen_add_mm(struct mm_struct *mm)
{
int nid;
struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
#ifdef CONFIG_MEMCG
VM_WARN_ON_ONCE(mm->lru_gen.memcg);
mm->lru_gen.memcg = memcg;
#endif
spin_lock(&mm_list->lock);
for_each_node_state(nid, N_MEMORY) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
if (mm_state->tail == &mm_list->fifo)
mm_state->tail = &mm->lru_gen.list;
}
list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
spin_unlock(&mm_list->lock);
}
void lru_gen_del_mm(struct mm_struct *mm)
{
int nid;
struct lru_gen_mm_list *mm_list;
struct mem_cgroup *memcg = NULL;
if (list_empty(&mm->lru_gen.list))
return;
#ifdef CONFIG_MEMCG
memcg = mm->lru_gen.memcg;
#endif
mm_list = get_mm_list(memcg);
spin_lock(&mm_list->lock);
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
if (mm_state->head == &mm->lru_gen.list)
mm_state->head = mm_state->head->prev;
if (mm_state->tail == &mm->lru_gen.list)
mm_state->tail = mm_state->tail->next;
}
list_del_init(&mm->lru_gen.list);
spin_unlock(&mm_list->lock);
#ifdef CONFIG_MEMCG
mem_cgroup_put(mm->lru_gen.memcg);
mm->lru_gen.memcg = NULL;
#endif
}
#ifdef CONFIG_MEMCG
void lru_gen_migrate_mm(struct mm_struct *mm)
{
struct mem_cgroup *memcg;
struct task_struct *task = rcu_dereference_protected(mm->owner, true);
VM_WARN_ON_ONCE(task->mm != mm);
lockdep_assert_held(&task->alloc_lock);
if (mem_cgroup_disabled())
return;
if (!mm->lru_gen.memcg)
return;
rcu_read_lock();
memcg = mem_cgroup_from_task(task);
rcu_read_unlock();
if (memcg == mm->lru_gen.memcg)
return;
VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
lru_gen_del_mm(mm);
lru_gen_add_mm(mm);
}
#endif
#else
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
{
return NULL;
}
static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
{
return NULL;
}
static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
{
return NULL;
}
#endif
static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
{
int i;
int hist;
struct lruvec *lruvec = walk->lruvec;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
hist = lru_hist_from_seq(walk->seq);
for (i = 0; i < NR_MM_STATS; i++) {
WRITE_ONCE(mm_state->stats[hist][i],
mm_state->stats[hist][i] + walk->mm_stats[i]);
walk->mm_stats[i] = 0;
}
if (NR_HIST_GENS > 1 && last) {
hist = lru_hist_from_seq(walk->seq + 1);
for (i = 0; i < NR_MM_STATS; i++)
WRITE_ONCE(mm_state->stats[hist][i], 0);
}
}
static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
{
bool first = false;
bool last = false;
struct mm_struct *mm = NULL;
struct lruvec *lruvec = walk->lruvec;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
spin_lock(&mm_list->lock);
VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq);
if (walk->seq <= mm_state->seq)
goto done;
if (!mm_state->head)
mm_state->head = &mm_list->fifo;
if (mm_state->head == &mm_list->fifo)
first = true;
do {
mm_state->head = mm_state->head->next;
if (mm_state->head == &mm_list->fifo) {
WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
last = true;
break;
}
if (!mm_state->tail || mm_state->tail == mm_state->head) {
mm_state->tail = mm_state->head->next;
walk->force_scan = true;
}
} while (!(mm = get_next_mm(walk)));
done:
if (*iter || last)
reset_mm_stats(walk, last);
spin_unlock(&mm_list->lock);
if (mm && first)
reset_bloom_filter(mm_state, walk->seq + 1);
if (*iter)
mmput_async(*iter);
*iter = mm;
return last;
}
static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq)
{
bool success = false;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
spin_lock(&mm_list->lock);
VM_WARN_ON_ONCE(mm_state->seq + 1 < seq);
if (seq > mm_state->seq) {
mm_state->head = NULL;
mm_state->tail = NULL;
WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
success = true;
}
spin_unlock(&mm_list->lock);
return success;
}
struct ctrl_pos {
unsigned long refaulted;
unsigned long total;
int gain;
};
static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
struct ctrl_pos *pos)
{
int i;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
pos->gain = gain;
pos->refaulted = pos->total = 0;
for (i = tier % MAX_NR_TIERS; i <= min(tier, MAX_NR_TIERS - 1); i++) {
pos->refaulted += lrugen->avg_refaulted[type][i] +
atomic_long_read(&lrugen->refaulted[hist][type][i]);
pos->total += lrugen->avg_total[type][i] +
lrugen->protected[hist][type][i] +
atomic_long_read(&lrugen->evicted[hist][type][i]);
}
}
static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
{
int hist, tier;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
lockdep_assert_held(&lruvec->lru_lock);
if (!carryover && !clear)
return;
hist = lru_hist_from_seq(seq);
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
if (carryover) {
unsigned long sum;
sum = lrugen->avg_refaulted[type][tier] +
atomic_long_read(&lrugen->refaulted[hist][type][tier]);
WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
sum = lrugen->avg_total[type][tier] +
lrugen->protected[hist][type][tier] +
atomic_long_read(&lrugen->evicted[hist][type][tier]);
WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
}
if (clear) {
atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
WRITE_ONCE(lrugen->protected[hist][type][tier], 0);
}
}
}
static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
{
return pv->refaulted < MIN_LRU_BATCH ||
pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
(sp->refaulted + 1) * pv->total * pv->gain;
}
static int folio_update_gen(struct folio *folio, int gen)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return -1;
}
do {
if (!(old_flags & LRU_GEN_MASK))
return -1;
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset);
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
int type = folio_is_file_lru(folio);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
do {
new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
if (new_gen >= 0 && new_gen != old_gen)
return new_gen;
new_gen = (old_gen + 1) % MAX_NR_GENS;
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
if (reclaiming)
new_flags |= BIT(PG_reclaim);
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
return new_gen;
}
static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
int old_gen, int new_gen)
{
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
int delta = folio_nr_pages(folio);
VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
walk->batched++;
walk->nr_pages[old_gen][type][zone] -= delta;
walk->nr_pages[new_gen][type][zone] += delta;
}
static void reset_batch_size(struct lru_gen_mm_walk *walk)
{
int gen, type, zone;
struct lruvec *lruvec = walk->lruvec;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
walk->batched = 0;
for_each_gen_type_zone(gen, type, zone) {
enum lru_list lru = type * LRU_INACTIVE_FILE;
int delta = walk->nr_pages[gen][type][zone];
if (!delta)
continue;
walk->nr_pages[gen][type][zone] = 0;
WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
lrugen->nr_pages[gen][type][zone] + delta);
if (lru_gen_is_active(lruvec, gen))
lru += LRU_ACTIVE;
__update_lru_size(lruvec, lru, zone, delta);
}
}
static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
{
struct address_space *mapping;
struct vm_area_struct *vma = args->vma;
struct lru_gen_mm_walk *walk = args->private;
if (!vma_is_accessible(vma))
return true;
if (is_vm_hugetlb_page(vma))
return true;
if (!vma_has_recency(vma))
return true;
if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL))
return true;
if (vma == get_gate_vma(vma->vm_mm))
return true;
if (vma_is_anonymous(vma))
return !walk->swappiness;
if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
return true;
mapping = vma->vm_file->f_mapping;
if (mapping_unevictable(mapping))
return true;
if (shmem_mapping(mapping))
return !walk->swappiness;
if (walk->swappiness > MAX_SWAPPINESS)
return true;
return !mapping->a_ops->read_folio;
}
static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
unsigned long *vm_start, unsigned long *vm_end)
{
unsigned long start = round_up(*vm_end, size);
unsigned long end = (start | ~mask) + 1;
VMA_ITERATOR(vmi, args->mm, start);
VM_WARN_ON_ONCE(mask & size);
VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
for_each_vma(vmi, args->vma) {
if (end && end <= args->vma->vm_start)
return false;
if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args))
continue;
*vm_start = max(start, args->vma->vm_start);
*vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
return true;
}
return false;
}
static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr,
struct pglist_data *pgdat)
{
unsigned long pfn = pte_pfn(pte);
VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
if (!pte_present(pte) || is_zero_pfn(pfn))
return -1;
if (WARN_ON_ONCE(pte_special(pte)))
return -1;
if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm))
return -1;
if (WARN_ON_ONCE(!pfn_valid(pfn)))
return -1;
if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
return -1;
return pfn;
}
static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr,
struct pglist_data *pgdat)
{
unsigned long pfn = pmd_pfn(pmd);
VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
return -1;
if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm))
return -1;
if (WARN_ON_ONCE(!pfn_valid(pfn)))
return -1;
if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
return -1;
return pfn;
}
static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
struct pglist_data *pgdat)
{
struct folio *folio = pfn_folio(pfn);
if (folio_lru_gen(folio) < 0)
return NULL;
if (folio_nid(folio) != pgdat->node_id)
return NULL;
if (folio_memcg(folio) != memcg)
return NULL;
return folio;
}
static bool suitable_to_scan(int total, int young)
{
int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
return young * n >= total;
}
static void walk_update_folio(struct lru_gen_mm_walk *walk, struct folio *folio,
int new_gen, bool dirty)
{
int old_gen;
if (!folio)
return;
if (dirty && !folio_test_dirty(folio) &&
!(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
!folio_test_swapcache(folio)))
folio_mark_dirty(folio);
if (walk) {
old_gen = folio_update_gen(folio, new_gen);
if (old_gen >= 0 && old_gen != new_gen)
update_batch_size(walk, folio, old_gen, new_gen);
} else if (lru_gen_set_refs(folio)) {
old_gen = folio_lru_gen(folio);
if (old_gen >= 0 && old_gen != new_gen)
folio_activate(folio);
}
}
static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
struct mm_walk *args)
{
int i;
bool dirty;
pte_t *pte;
spinlock_t *ptl;
unsigned long addr;
int total = 0;
int young = 0;
struct folio *last = NULL;
struct lru_gen_mm_walk *walk = args->private;
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
DEFINE_MAX_SEQ(walk->lruvec);
int gen = lru_gen_from_seq(max_seq);
pmd_t pmdval;
pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl);
if (!pte)
return false;
if (!spin_trylock(ptl)) {
pte_unmap(pte);
return true;
}
if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
pte_unmap_unlock(pte, ptl);
return false;
}
lazy_mmu_mode_enable();
restart:
for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
unsigned long pfn;
struct folio *folio;
pte_t ptent = ptep_get(pte + i);
total++;
walk->mm_stats[MM_LEAF_TOTAL]++;
pfn = get_pte_pfn(ptent, args->vma, addr, pgdat);
if (pfn == -1)
continue;
folio = get_pfn_folio(pfn, memcg, pgdat);
if (!folio)
continue;
if (!ptep_clear_young_notify(args->vma, addr, pte + i))
continue;
if (last != folio) {
walk_update_folio(walk, last, gen, dirty);
last = folio;
dirty = false;
}
if (pte_dirty(ptent))
dirty = true;
young++;
walk->mm_stats[MM_LEAF_YOUNG]++;
}
walk_update_folio(walk, last, gen, dirty);
last = NULL;
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
goto restart;
lazy_mmu_mode_disable();
pte_unmap_unlock(pte, ptl);
return suitable_to_scan(total, young);
}
static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
{
int i;
bool dirty;
pmd_t *pmd;
spinlock_t *ptl;
struct folio *last = NULL;
struct lru_gen_mm_walk *walk = args->private;
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
DEFINE_MAX_SEQ(walk->lruvec);
int gen = lru_gen_from_seq(max_seq);
VM_WARN_ON_ONCE(pud_leaf(*pud));
if (*first == -1) {
*first = addr;
bitmap_zero(bitmap, MIN_LRU_BATCH);
return;
}
i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first);
if (i && i <= MIN_LRU_BATCH) {
__set_bit(i - 1, bitmap);
return;
}
pmd = pmd_offset(pud, *first);
ptl = pmd_lockptr(args->mm, pmd);
if (!spin_trylock(ptl))
goto done;
lazy_mmu_mode_enable();
do {
unsigned long pfn;
struct folio *folio;
addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
if (!pmd_present(pmd[i]))
goto next;
if (!pmd_trans_huge(pmd[i])) {
if (!walk->force_scan && should_clear_pmd_young() &&
!mm_has_notifiers(args->mm))
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
}
pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat);
if (pfn == -1)
goto next;
folio = get_pfn_folio(pfn, memcg, pgdat);
if (!folio)
goto next;
if (!pmdp_clear_young_notify(vma, addr, pmd + i))
goto next;
if (last != folio) {
walk_update_folio(walk, last, gen, dirty);
last = folio;
dirty = false;
}
if (pmd_dirty(pmd[i]))
dirty = true;
walk->mm_stats[MM_LEAF_YOUNG]++;
next:
i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
} while (i <= MIN_LRU_BATCH);
walk_update_folio(walk, last, gen, dirty);
lazy_mmu_mode_disable();
spin_unlock(ptl);
done:
*first = -1;
}
static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
struct mm_walk *args)
{
int i;
pmd_t *pmd;
unsigned long next;
unsigned long addr;
struct vm_area_struct *vma;
DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
unsigned long first = -1;
struct lru_gen_mm_walk *walk = args->private;
struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
VM_WARN_ON_ONCE(pud_leaf(*pud));
pmd = pmd_offset(pud, start & PUD_MASK);
restart:
vma = args->vma;
for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
pmd_t val = pmdp_get_lockless(pmd + i);
next = pmd_addr_end(addr, end);
if (!pmd_present(val) || is_huge_zero_pmd(val)) {
walk->mm_stats[MM_LEAF_TOTAL]++;
continue;
}
if (pmd_trans_huge(val)) {
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat);
walk->mm_stats[MM_LEAF_TOTAL]++;
if (pfn != -1)
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
continue;
}
if (!walk->force_scan && should_clear_pmd_young() &&
!mm_has_notifiers(args->mm)) {
if (!pmd_young(val))
continue;
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
}
if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i))
continue;
walk->mm_stats[MM_NONLEAF_FOUND]++;
if (!walk_pte_range(&val, addr, next, args))
continue;
walk->mm_stats[MM_NONLEAF_ADDED]++;
update_bloom_filter(mm_state, walk->seq + 1, pmd + i);
}
walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first);
if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
goto restart;
}
static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
struct mm_walk *args)
{
int i;
pud_t *pud;
unsigned long addr;
unsigned long next;
struct lru_gen_mm_walk *walk = args->private;
VM_WARN_ON_ONCE(p4d_leaf(*p4d));
pud = pud_offset(p4d, start & P4D_MASK);
restart:
for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
pud_t val = pudp_get(pud + i);
next = pud_addr_end(addr, end);
if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
continue;
walk_pmd_range(&val, addr, next, args);
if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
end = (addr | ~PUD_MASK) + 1;
goto done;
}
}
if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
goto restart;
end = round_up(end, P4D_SIZE);
done:
if (!end || !args->vma)
return 1;
walk->next_addr = max(end, args->vma->vm_start);
return -EAGAIN;
}
static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
{
static const struct mm_walk_ops mm_walk_ops = {
.test_walk = should_skip_vma,
.p4d_entry = walk_pud_range,
.walk_lock = PGWALK_RDLOCK,
};
int err;
struct lruvec *lruvec = walk->lruvec;
walk->next_addr = FIRST_USER_ADDRESS;
do {
DEFINE_MAX_SEQ(lruvec);
err = -EBUSY;
if (walk->seq != max_seq)
break;
if (mmap_read_trylock(mm)) {
err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
mmap_read_unlock(mm);
}
if (walk->batched) {
spin_lock_irq(&lruvec->lru_lock);
reset_batch_size(walk);
spin_unlock_irq(&lruvec->lru_lock);
}
cond_resched();
} while (err == -EAGAIN);
}
static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
{
struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
if (pgdat && current_is_kswapd()) {
VM_WARN_ON_ONCE(walk);
walk = &pgdat->mm_walk;
} else if (!walk && force_alloc) {
VM_WARN_ON_ONCE(current_is_kswapd());
walk = kzalloc_obj(*walk,
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
}
current->reclaim_state->mm_walk = walk;
return walk;
}
static void clear_mm_walk(void)
{
struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
current->reclaim_state->mm_walk = NULL;
if (!current_is_kswapd())
kfree(walk);
}
static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness)
{
int zone;
int remaining = MAX_LRU_BATCH;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
if (type && (swappiness == SWAPPINESS_ANON_ONLY))
goto done;
if (!type && !swappiness)
goto done;
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
struct list_head *head = &lrugen->folios[old_gen][type][zone];
while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
int refs = folio_lru_refs(folio);
bool workingset = folio_test_workingset(folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
new_gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
int tier = lru_tier_from_refs(refs, workingset);
int delta = folio_nr_pages(folio);
WRITE_ONCE(lrugen->protected[hist][type][tier],
lrugen->protected[hist][type][tier] + delta);
}
if (!--remaining)
return false;
}
}
done:
reset_ctrl_pos(lruvec, type, true);
WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
return true;
}
static bool try_to_inc_min_seq(struct lruvec *lruvec, int swappiness)
{
int gen, type, zone;
bool success = false;
bool seq_inc_flag = false;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
DEFINE_MIN_SEQ(lruvec);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
for_each_evictable_type(type, swappiness) {
while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
gen = lru_gen_from_seq(min_seq[type]);
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
if (!list_empty(&lrugen->folios[gen][type][zone]))
goto next;
}
min_seq[type]++;
seq_inc_flag = true;
}
next:
;
}
if (!seq_inc_flag)
return success;
if (swappiness && swappiness <= MAX_SWAPPINESS) {
unsigned long seq = lrugen->max_seq - MIN_NR_GENS;
if (min_seq[LRU_GEN_ANON] > seq && min_seq[LRU_GEN_FILE] < seq)
min_seq[LRU_GEN_ANON] = seq;
else if (min_seq[LRU_GEN_FILE] > seq && min_seq[LRU_GEN_ANON] < seq)
min_seq[LRU_GEN_FILE] = seq;
}
for_each_evictable_type(type, swappiness) {
if (min_seq[type] <= lrugen->min_seq[type])
continue;
reset_ctrl_pos(lruvec, type, true);
WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
success = true;
}
return success;
}
static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
{
bool success;
int prev, next;
int type, zone;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
restart:
if (seq < READ_ONCE(lrugen->max_seq))
return false;
spin_lock_irq(&lruvec->lru_lock);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
success = seq == lrugen->max_seq;
if (!success)
goto unlock;
for (type = 0; type < ANON_AND_FILE; type++) {
if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
continue;
if (inc_min_seq(lruvec, type, swappiness))
continue;
spin_unlock_irq(&lruvec->lru_lock);
cond_resched();
goto restart;
}
prev = lru_gen_from_seq(lrugen->max_seq - 1);
next = lru_gen_from_seq(lrugen->max_seq + 1);
for (type = 0; type < ANON_AND_FILE; type++) {
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
enum lru_list lru = type * LRU_INACTIVE_FILE;
long delta = lrugen->nr_pages[prev][type][zone] -
lrugen->nr_pages[next][type][zone];
if (!delta)
continue;
__update_lru_size(lruvec, lru, zone, delta);
__update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
}
}
for (type = 0; type < ANON_AND_FILE; type++)
reset_ctrl_pos(lruvec, type, false);
WRITE_ONCE(lrugen->timestamps[next], jiffies);
smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
unlock:
spin_unlock_irq(&lruvec->lru_lock);
return success;
}
static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
int swappiness, bool force_scan)
{
bool success;
struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq));
if (!mm_state)
return inc_max_seq(lruvec, seq, swappiness);
if (seq <= READ_ONCE(mm_state->seq))
return false;
if (!should_walk_mmu()) {
success = iterate_mm_list_nowalk(lruvec, seq);
goto done;
}
walk = set_mm_walk(NULL, true);
if (!walk) {
success = iterate_mm_list_nowalk(lruvec, seq);
goto done;
}
walk->lruvec = lruvec;
walk->seq = seq;
walk->swappiness = swappiness;
walk->force_scan = force_scan;
do {
success = iterate_mm_list(walk, &mm);
if (mm)
walk_mm(mm, walk);
} while (mm);
done:
if (success) {
success = inc_max_seq(lruvec, seq, swappiness);
WARN_ON_ONCE(!success);
}
return success;
}
static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
{
int priority;
unsigned long reclaimable;
if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
return;
reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
}
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
{
int gen, type, zone;
unsigned long total = 0;
int swappiness = get_swappiness(lruvec, sc);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
DEFINE_MIN_SEQ(lruvec);
for_each_evictable_type(type, swappiness) {
unsigned long seq;
for (seq = min_seq[type]; seq <= max_seq; seq++) {
gen = lru_gen_from_seq(seq);
for (zone = 0; zone < MAX_NR_ZONES; zone++)
total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
}
}
return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
}
static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc,
unsigned long min_ttl)
{
int gen;
unsigned long birth;
int swappiness = get_swappiness(lruvec, sc);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MIN_SEQ(lruvec);
if (mem_cgroup_below_min(NULL, memcg))
return false;
if (!lruvec_is_sizable(lruvec, sc))
return false;
gen = lru_gen_from_seq(evictable_min_seq(min_seq, swappiness));
birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
return time_is_before_jiffies(birth + min_ttl);
}
static unsigned long lru_gen_min_ttl __read_mostly;
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
struct mem_cgroup *memcg;
unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
bool reclaimable = !min_ttl;
VM_WARN_ON_ONCE(!current_is_kswapd());
set_initial_priority(pgdat, sc);
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
mem_cgroup_calculate_protection(NULL, memcg);
if (!reclaimable)
reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl);
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
if (!reclaimable && mutex_trylock(&oom_lock)) {
struct oom_control oc = {
.gfp_mask = sc->gfp_mask,
};
out_of_memory(&oc);
mutex_unlock(&oom_lock);
}
}
bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
int i;
bool dirty;
unsigned long start;
unsigned long end;
struct lru_gen_mm_walk *walk;
struct folio *last = NULL;
int young = 1;
pte_t *pte = pvmw->pte;
unsigned long addr = pvmw->address;
struct vm_area_struct *vma = pvmw->vma;
struct folio *folio = pfn_folio(pvmw->pfn);
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
DEFINE_MAX_SEQ(lruvec);
int gen = lru_gen_from_seq(max_seq);
lockdep_assert_held(pvmw->ptl);
VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
if (!ptep_clear_young_notify(vma, addr, pte))
return false;
if (spin_is_contended(pvmw->ptl))
return true;
if (vma->vm_flags & VM_SPECIAL)
return true;
walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
start = max(addr & PMD_MASK, vma->vm_start);
end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1;
if (end - start == PAGE_SIZE)
return true;
if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
end = start + MIN_LRU_BATCH * PAGE_SIZE;
else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
start = end - MIN_LRU_BATCH * PAGE_SIZE;
else {
start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
}
}
lazy_mmu_mode_enable();
pte -= (addr - start) / PAGE_SIZE;
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
unsigned long pfn;
pte_t ptent = ptep_get(pte + i);
pfn = get_pte_pfn(ptent, vma, addr, pgdat);
if (pfn == -1)
continue;
folio = get_pfn_folio(pfn, memcg, pgdat);
if (!folio)
continue;
if (!ptep_clear_young_notify(vma, addr, pte + i))
continue;
if (last != folio) {
walk_update_folio(walk, last, gen, dirty);
last = folio;
dirty = false;
}
if (pte_dirty(ptent))
dirty = true;
young++;
}
walk_update_folio(walk, last, gen, dirty);
lazy_mmu_mode_disable();
if (mm_state && suitable_to_scan(i, young))
update_bloom_filter(mm_state, max_seq, pvmw->pmd);
return true;
}
enum {
MEMCG_LRU_NOP,
MEMCG_LRU_HEAD,
MEMCG_LRU_TAIL,
MEMCG_LRU_OLD,
MEMCG_LRU_YOUNG,
};
static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
{
int seg;
int old, new;
unsigned long flags;
int bin = get_random_u32_below(MEMCG_NR_BINS);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
seg = 0;
new = old = lruvec->lrugen.gen;
if (op == MEMCG_LRU_HEAD)
seg = MEMCG_LRU_HEAD;
else if (op == MEMCG_LRU_TAIL)
seg = MEMCG_LRU_TAIL;
else if (op == MEMCG_LRU_OLD)
new = get_memcg_gen(pgdat->memcg_lru.seq);
else if (op == MEMCG_LRU_YOUNG)
new = get_memcg_gen(pgdat->memcg_lru.seq + 1);
else
VM_WARN_ON_ONCE(true);
WRITE_ONCE(lruvec->lrugen.seg, seg);
WRITE_ONCE(lruvec->lrugen.gen, new);
hlist_nulls_del_rcu(&lruvec->lrugen.list);
if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
else
hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
pgdat->memcg_lru.nr_memcgs[old]--;
pgdat->memcg_lru.nr_memcgs[new]++;
if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
}
#ifdef CONFIG_MEMCG
void lru_gen_online_memcg(struct mem_cgroup *memcg)
{
int gen;
int nid;
int bin = get_random_u32_below(MEMCG_NR_BINS);
for_each_node(nid) {
struct pglist_data *pgdat = NODE_DATA(nid);
struct lruvec *lruvec = get_lruvec(memcg, nid);
spin_lock_irq(&pgdat->memcg_lru.lock);
VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
gen = get_memcg_gen(pgdat->memcg_lru.seq);
lruvec->lrugen.gen = gen;
hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
pgdat->memcg_lru.nr_memcgs[gen]++;
spin_unlock_irq(&pgdat->memcg_lru.lock);
}
}
void lru_gen_offline_memcg(struct mem_cgroup *memcg)
{
int nid;
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD);
}
}
void lru_gen_release_memcg(struct mem_cgroup *memcg)
{
int gen;
int nid;
for_each_node(nid) {
struct pglist_data *pgdat = NODE_DATA(nid);
struct lruvec *lruvec = get_lruvec(memcg, nid);
spin_lock_irq(&pgdat->memcg_lru.lock);
if (hlist_nulls_unhashed(&lruvec->lrugen.list))
goto unlock;
gen = lruvec->lrugen.gen;
hlist_nulls_del_init_rcu(&lruvec->lrugen.list);
pgdat->memcg_lru.nr_memcgs[gen]--;
if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
unlock:
spin_unlock_irq(&pgdat->memcg_lru.lock);
}
}
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
struct lruvec *lruvec = get_lruvec(memcg, nid);
if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD)
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
}
#endif
static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
int tier_idx)
{
bool success;
bool dirty, writeback;
int gen = folio_lru_gen(folio);
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
int delta = folio_nr_pages(folio);
int refs = folio_lru_refs(folio);
bool workingset = folio_test_workingset(folio);
int tier = lru_tier_from_refs(refs, workingset);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
if (!folio_evictable(folio)) {
success = lru_gen_del_folio(lruvec, folio, true);
VM_WARN_ON_ONCE_FOLIO(!success, folio);
folio_set_unevictable(folio);
lruvec_add_folio(lruvec, folio);
__count_vm_events(UNEVICTABLE_PGCULLED, delta);
return true;
}
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) {
gen = folio_inc_gen(lruvec, folio, false);
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
WRITE_ONCE(lrugen->protected[hist][type][tier],
lrugen->protected[hist][type][tier] + delta);
}
return true;
}
if (zone > sc->reclaim_idx) {
gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
dirty = folio_test_dirty(folio);
writeback = folio_test_writeback(folio);
if (type == LRU_GEN_FILE && dirty) {
sc->nr.file_taken += delta;
if (!writeback)
sc->nr.unqueued_dirty += delta;
}
if (writeback || (type == LRU_GEN_FILE && dirty)) {
gen = folio_inc_gen(lruvec, folio, true);
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
return false;
}
static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc)
{
bool success;
if (!(sc->gfp_mask & __GFP_IO) &&
(folio_test_dirty(folio) ||
(folio_test_anon(folio) && !folio_test_swapcache(folio))))
return false;
if (!folio_try_get(folio))
return false;
if (!folio_test_clear_lru(folio)) {
folio_put(folio);
return false;
}
if (!folio_test_referenced(folio))
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
folio_clear_reclaim(folio);
success = lru_gen_del_folio(lruvec, folio, true);
VM_WARN_ON_ONCE_FOLIO(!success, folio);
return true;
}
static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, int type, int tier,
struct list_head *list)
{
int i;
int gen;
enum vm_event_item item;
int sorted = 0;
int scanned = 0;
int isolated = 0;
int skipped = 0;
int scan_batch = min(nr_to_scan, MAX_LRU_BATCH);
int remaining = scan_batch;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
VM_WARN_ON_ONCE(!list_empty(list));
if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
return 0;
gen = lru_gen_from_seq(lrugen->min_seq[type]);
for (i = MAX_NR_ZONES; i > 0; i--) {
LIST_HEAD(moved);
int skipped_zone = 0;
int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
struct list_head *head = &lrugen->folios[gen][type][zone];
while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
int delta = folio_nr_pages(folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
scanned += delta;
if (sort_folio(lruvec, folio, sc, tier))
sorted += delta;
else if (isolate_folio(lruvec, folio, sc)) {
list_add(&folio->lru, list);
isolated += delta;
} else {
list_move(&folio->lru, &moved);
skipped_zone += delta;
}
if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH)
break;
}
if (skipped_zone) {
list_splice(&moved, head);
__count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone);
skipped += skipped_zone;
}
if (!remaining || isolated >= MIN_LRU_BATCH)
break;
}
item = PGSCAN_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc)) {
__count_vm_events(item, isolated);
__count_vm_events(PGREFILL, sorted);
}
count_memcg_events(memcg, item, isolated);
count_memcg_events(memcg, PGREFILL, sorted);
__count_vm_events(PGSCAN_ANON + type, isolated);
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch,
scanned, skipped, isolated,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
if (type == LRU_GEN_FILE)
sc->nr.file_taken += isolated;
return isolated || !remaining ? scanned : 0;
}
static int get_tier_idx(struct lruvec *lruvec, int type)
{
int tier;
struct ctrl_pos sp, pv;
read_ctrl_pos(lruvec, type, 0, 2, &sp);
for (tier = 1; tier < MAX_NR_TIERS; tier++) {
read_ctrl_pos(lruvec, type, tier, 3, &pv);
if (!positive_ctrl_err(&sp, &pv))
break;
}
return tier - 1;
}
static int get_type_to_scan(struct lruvec *lruvec, int swappiness)
{
struct ctrl_pos sp, pv;
if (swappiness <= MIN_SWAPPINESS + 1)
return LRU_GEN_FILE;
if (swappiness >= MAX_SWAPPINESS)
return LRU_GEN_ANON;
read_ctrl_pos(lruvec, LRU_GEN_ANON, MAX_NR_TIERS, swappiness, &sp);
read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv);
return positive_ctrl_err(&sp, &pv);
}
static int isolate_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, int swappiness,
int *type_scanned, struct list_head *list)
{
int i;
int type = get_type_to_scan(lruvec, swappiness);
for_each_evictable_type(i, swappiness) {
int scanned;
int tier = get_tier_idx(lruvec, type);
*type_scanned = type;
scanned = scan_folios(nr_to_scan, lruvec, sc, type, tier, list);
if (scanned)
return scanned;
type = !type;
}
return 0;
}
static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, int swappiness)
{
int type;
int scanned;
int reclaimed;
LIST_HEAD(list);
LIST_HEAD(clean);
struct folio *folio;
struct folio *next;
enum vm_event_item item;
struct reclaim_stat stat;
struct lru_gen_mm_walk *walk;
bool skip_retry = false;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
spin_lock_irq(&lruvec->lru_lock);
scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness, &type, &list);
scanned += try_to_inc_min_seq(lruvec, swappiness);
if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq)
scanned = 0;
spin_unlock_irq(&lruvec->lru_lock);
if (list_empty(&list))
return scanned;
retry:
reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr_reclaimed += reclaimed;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
scanned, reclaimed, &stat, sc->priority,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
list_for_each_entry_safe_reverse(folio, next, &list, lru) {
DEFINE_MIN_SEQ(lruvec);
if (!folio_evictable(folio)) {
list_del(&folio->lru);
folio_putback_lru(folio);
continue;
}
if (!skip_retry && !folio_test_active(folio) && !folio_mapped(folio) &&
!folio_test_dirty(folio) && !folio_test_writeback(folio)) {
list_move(&folio->lru, &clean);
continue;
}
if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type])
set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active));
}
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &list);
walk = current->reclaim_state->mm_walk;
if (walk && walk->batched) {
walk->lruvec = lruvec;
reset_batch_size(walk);
}
mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc))
__count_vm_events(item, reclaimed);
count_memcg_events(memcg, item, reclaimed);
__count_vm_events(PGSTEAL_ANON + type, reclaimed);
spin_unlock_irq(&lruvec->lru_lock);
list_splice_init(&clean, &list);
if (!list_empty(&list)) {
skip_retry = true;
goto retry;
}
return scanned;
}
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
int swappiness, unsigned long *nr_to_scan)
{
int gen, type, zone;
unsigned long size = 0;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
DEFINE_MIN_SEQ(lruvec);
*nr_to_scan = 0;
if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
return true;
for_each_evictable_type(type, swappiness) {
unsigned long seq;
for (seq = min_seq[type]; seq <= max_seq; seq++) {
gen = lru_gen_from_seq(seq);
for (zone = 0; zone < MAX_NR_ZONES; zone++)
size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
}
}
*nr_to_scan = size;
return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
}
static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
{
bool success;
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
return -1;
success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
if (nr_to_scan && !mem_cgroup_online(memcg))
return nr_to_scan;
nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
if (!success || sc->priority == DEF_PRIORITY)
return nr_to_scan >> sc->priority;
return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
}
static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
{
int i;
enum zone_watermarks mark;
if (!root_reclaim(sc))
return false;
if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order)))
return true;
if (!current_is_kswapd() || sc->order)
return false;
mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ?
WMARK_PROMO : WMARK_HIGH;
for (i = 0; i <= sc->reclaim_idx; i++) {
struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH;
if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0))
return false;
}
return true;
}
static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
long nr_to_scan;
unsigned long scanned = 0;
int swappiness = get_swappiness(lruvec, sc);
while (true) {
int delta;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
if (nr_to_scan <= 0)
break;
delta = evict_folios(nr_to_scan, lruvec, sc, swappiness);
if (!delta)
break;
scanned += delta;
if (scanned >= nr_to_scan)
break;
if (should_abort_scan(lruvec, sc))
break;
cond_resched();
}
if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken)
wakeup_flusher_threads(WB_REASON_VMSCAN);
return nr_to_scan < 0;
}
static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
{
bool success;
unsigned long scanned = sc->nr_scanned;
unsigned long reclaimed = sc->nr_reclaimed;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
if (mem_cgroup_below_min(NULL, memcg))
return MEMCG_LRU_YOUNG;
if (mem_cgroup_below_low(NULL, memcg)) {
if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL)
return MEMCG_LRU_TAIL;
memcg_memory_event(memcg, MEMCG_LOW);
}
success = try_to_shrink_lruvec(lruvec, sc);
shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
if (!sc->proactive)
vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
sc->nr_reclaimed - reclaimed);
flush_reclaim_state(sc);
if (success && mem_cgroup_online(memcg))
return MEMCG_LRU_YOUNG;
if (!success && lruvec_is_sizable(lruvec, sc))
return 0;
return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ?
MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
}
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{
int op;
int gen;
int bin;
int first_bin;
struct lruvec *lruvec;
struct lru_gen_folio *lrugen;
struct mem_cgroup *memcg;
struct hlist_nulls_node *pos;
gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
restart:
op = 0;
memcg = NULL;
rcu_read_lock();
hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
if (op) {
lru_gen_rotate_memcg(lruvec, op);
op = 0;
}
mem_cgroup_put(memcg);
memcg = NULL;
if (gen != READ_ONCE(lrugen->gen))
continue;
lruvec = container_of(lrugen, struct lruvec, lrugen);
memcg = lruvec_memcg(lruvec);
if (!mem_cgroup_tryget(memcg)) {
lru_gen_release_memcg(memcg);
memcg = NULL;
continue;
}
rcu_read_unlock();
op = shrink_one(lruvec, sc);
rcu_read_lock();
if (should_abort_scan(lruvec, sc))
break;
}
rcu_read_unlock();
if (op)
lru_gen_rotate_memcg(lruvec, op);
mem_cgroup_put(memcg);
if (!is_a_nulls(pos))
return;
if (gen != get_nulls_value(pos))
goto restart;
bin = get_memcg_bin(bin + 1);
if (bin != first_bin)
goto restart;
}
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
struct blk_plug plug;
VM_WARN_ON_ONCE(root_reclaim(sc));
VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap);
lru_add_drain();
blk_start_plug(&plug);
set_mm_walk(NULL, sc->proactive);
if (try_to_shrink_lruvec(lruvec, sc))
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
clear_mm_walk();
blk_finish_plug(&plug);
}
static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
{
struct blk_plug plug;
unsigned long reclaimed = sc->nr_reclaimed;
VM_WARN_ON_ONCE(!root_reclaim(sc));
if (!sc->may_writepage || !sc->may_unmap)
goto done;
lru_add_drain();
blk_start_plug(&plug);
set_mm_walk(pgdat, sc->proactive);
set_initial_priority(pgdat, sc);
if (current_is_kswapd())
sc->nr_reclaimed = 0;
if (mem_cgroup_disabled())
shrink_one(&pgdat->__lruvec, sc);
else
shrink_many(pgdat, sc);
if (current_is_kswapd())
sc->nr_reclaimed += reclaimed;
clear_mm_walk();
blk_finish_plug(&plug);
done:
if (sc->nr_reclaimed > reclaimed)
kswapd_try_clear_hopeless(pgdat, sc->order, sc->reclaim_idx);
}
static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
{
struct lru_gen_folio *lrugen = &lruvec->lrugen;
if (lrugen->enabled) {
enum lru_list lru;
for_each_evictable_lru(lru) {
if (!list_empty(&lruvec->lists[lru]))
return false;
}
} else {
int gen, type, zone;
for_each_gen_type_zone(gen, type, zone) {
if (!list_empty(&lrugen->folios[gen][type][zone]))
return false;
}
}
return true;
}
static bool fill_evictable(struct lruvec *lruvec)
{
enum lru_list lru;
int remaining = MAX_LRU_BATCH;
for_each_evictable_lru(lru) {
int type = is_file_lru(lru);
bool active = is_active_lru(lru);
struct list_head *head = &lruvec->lists[lru];
while (!list_empty(head)) {
bool success;
struct folio *folio = lru_to_folio(head);
VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio);
VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio);
lruvec_del_folio(lruvec, folio);
success = lru_gen_add_folio(lruvec, folio, false);
VM_WARN_ON_ONCE(!success);
if (!--remaining)
return false;
}
}
return true;
}
static bool drain_evictable(struct lruvec *lruvec)
{
int gen, type, zone;
int remaining = MAX_LRU_BATCH;
for_each_gen_type_zone(gen, type, zone) {
struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
while (!list_empty(head)) {
bool success;
struct folio *folio = lru_to_folio(head);
VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
success = lru_gen_del_folio(lruvec, folio, false);
VM_WARN_ON_ONCE(!success);
lruvec_add_folio(lruvec, folio);
if (!--remaining)
return false;
}
}
return true;
}
static void lru_gen_change_state(bool enabled)
{
static DEFINE_MUTEX(state_mutex);
struct mem_cgroup *memcg;
cgroup_lock();
cpus_read_lock();
get_online_mems();
mutex_lock(&state_mutex);
if (enabled == lru_gen_enabled())
goto unlock;
if (enabled)
static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
else
static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
int nid;
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
spin_lock_irq(&lruvec->lru_lock);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
VM_WARN_ON_ONCE(!state_is_valid(lruvec));
lruvec->lrugen.enabled = enabled;
while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
spin_unlock_irq(&lruvec->lru_lock);
cond_resched();
spin_lock_irq(&lruvec->lru_lock);
}
spin_unlock_irq(&lruvec->lru_lock);
}
cond_resched();
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
unlock:
mutex_unlock(&state_mutex);
put_online_mems();
cpus_read_unlock();
cgroup_unlock();
}
static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
}
static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t len)
{
unsigned int msecs;
if (kstrtouint(buf, 0, &msecs))
return -EINVAL;
WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
return len;
}
static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms);
static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
unsigned int caps = 0;
if (get_cap(LRU_GEN_CORE))
caps |= BIT(LRU_GEN_CORE);
if (should_walk_mmu())
caps |= BIT(LRU_GEN_MM_WALK);
if (should_clear_pmd_young())
caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
return sysfs_emit(buf, "0x%04x\n", caps);
}
static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t len)
{
int i;
unsigned int caps;
if (tolower(*buf) == 'n')
caps = 0;
else if (tolower(*buf) == 'y')
caps = -1;
else if (kstrtouint(buf, 0, &caps))
return -EINVAL;
for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
bool enabled = caps & BIT(i);
if (i == LRU_GEN_CORE)
lru_gen_change_state(enabled);
else if (enabled)
static_branch_enable(&lru_gen_caps[i]);
else
static_branch_disable(&lru_gen_caps[i]);
}
return len;
}
static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled);
static struct attribute *lru_gen_attrs[] = {
&lru_gen_min_ttl_attr.attr,
&lru_gen_enabled_attr.attr,
NULL
};
static const struct attribute_group lru_gen_attr_group = {
.name = "lru_gen",
.attrs = lru_gen_attrs,
};
static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
{
struct mem_cgroup *memcg;
loff_t nr_to_skip = *pos;
m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
if (!m->private)
return ERR_PTR(-ENOMEM);
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
int nid;
for_each_node_state(nid, N_MEMORY) {
if (!nr_to_skip--)
return get_lruvec(memcg, nid);
}
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
return NULL;
}
static void lru_gen_seq_stop(struct seq_file *m, void *v)
{
if (!IS_ERR_OR_NULL(v))
mem_cgroup_iter_break(NULL, lruvec_memcg(v));
kvfree(m->private);
m->private = NULL;
}
static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
int nid = lruvec_pgdat(v)->node_id;
struct mem_cgroup *memcg = lruvec_memcg(v);
++*pos;
nid = next_memory_node(nid);
if (nid == MAX_NUMNODES) {
memcg = mem_cgroup_iter(NULL, memcg, NULL);
if (!memcg)
return NULL;
nid = first_memory_node;
}
return get_lruvec(memcg, nid);
}
static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
unsigned long max_seq, unsigned long *min_seq,
unsigned long seq)
{
int i;
int type, tier;
int hist = lru_hist_from_seq(seq);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier);
for (type = 0; type < ANON_AND_FILE; type++) {
const char *s = "xxx";
unsigned long n[3] = {};
if (seq == max_seq) {
s = "RTx";
n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
} else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
s = "rep";
n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
n[2] = READ_ONCE(lrugen->protected[hist][type][tier]);
}
for (i = 0; i < 3; i++)
seq_printf(m, " %10lu%c", n[i], s[i]);
}
seq_putc(m, '\n');
}
if (!mm_state)
return;
seq_puts(m, " ");
for (i = 0; i < NR_MM_STATS; i++) {
const char *s = "xxxx";
unsigned long n = 0;
if (seq == max_seq && NR_HIST_GENS == 1) {
s = "TYFA";
n = READ_ONCE(mm_state->stats[hist][i]);
} else if (seq != max_seq && NR_HIST_GENS > 1) {
s = "tyfa";
n = READ_ONCE(mm_state->stats[hist][i]);
}
seq_printf(m, " %10lu%c", n, s[i]);
}
seq_putc(m, '\n');
}
static int lru_gen_seq_show(struct seq_file *m, void *v)
{
unsigned long seq;
bool full = debugfs_get_aux_num(m->file);
struct lruvec *lruvec = v;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int nid = lruvec_pgdat(lruvec)->node_id;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
DEFINE_MIN_SEQ(lruvec);
if (nid == first_memory_node) {
const char *path = memcg ? m->private : "";
#ifdef CONFIG_MEMCG
if (memcg)
cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
#endif
seq_printf(m, "memcg %llu %s\n", mem_cgroup_id(memcg), path);
}
seq_printf(m, " node %5d\n", nid);
if (!full)
seq = evictable_min_seq(min_seq, MAX_SWAPPINESS / 2);
else if (max_seq >= MAX_NR_GENS)
seq = max_seq - MAX_NR_GENS + 1;
else
seq = 0;
for (; seq <= max_seq; seq++) {
int type, zone;
int gen = lru_gen_from_seq(seq);
unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
for (type = 0; type < ANON_AND_FILE; type++) {
unsigned long size = 0;
char mark = full && seq < min_seq[type] ? 'x' : ' ';
for (zone = 0; zone < MAX_NR_ZONES; zone++)
size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
seq_printf(m, " %10lu%c", size, mark);
}
seq_putc(m, '\n');
if (full)
lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
}
return 0;
}
static const struct seq_operations lru_gen_seq_ops = {
.start = lru_gen_seq_start,
.stop = lru_gen_seq_stop,
.next = lru_gen_seq_next,
.show = lru_gen_seq_show,
};
static int run_aging(struct lruvec *lruvec, unsigned long seq,
int swappiness, bool force_scan)
{
DEFINE_MAX_SEQ(lruvec);
if (seq > max_seq)
return -EINVAL;
return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST;
}
static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
int swappiness, unsigned long nr_to_reclaim)
{
DEFINE_MAX_SEQ(lruvec);
if (seq + MIN_NR_GENS > max_seq)
return -EINVAL;
sc->nr_reclaimed = 0;
while (!signal_pending(current)) {
DEFINE_MIN_SEQ(lruvec);
if (seq < evictable_min_seq(min_seq, swappiness))
return 0;
if (sc->nr_reclaimed >= nr_to_reclaim)
return 0;
if (!evict_folios(nr_to_reclaim - sc->nr_reclaimed, lruvec, sc,
swappiness))
return 0;
cond_resched();
}
return -EINTR;
}
static int run_cmd(char cmd, u64 memcg_id, int nid, unsigned long seq,
struct scan_control *sc, int swappiness, unsigned long opt)
{
struct lruvec *lruvec;
int err = -EINVAL;
struct mem_cgroup *memcg = NULL;
if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
return -EINVAL;
if (!mem_cgroup_disabled()) {
memcg = mem_cgroup_get_from_id(memcg_id);
if (!memcg)
return -EINVAL;
}
if (memcg_id != mem_cgroup_id(memcg))
goto done;
sc->target_mem_cgroup = memcg;
lruvec = get_lruvec(memcg, nid);
if (swappiness < MIN_SWAPPINESS)
swappiness = get_swappiness(lruvec, sc);
else if (swappiness > SWAPPINESS_ANON_ONLY)
goto done;
switch (cmd) {
case '+':
err = run_aging(lruvec, seq, swappiness, opt);
break;
case '-':
err = run_eviction(lruvec, seq, sc, swappiness, opt);
break;
}
done:
mem_cgroup_put(memcg);
return err;
}
static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
size_t len, loff_t *pos)
{
void *buf;
char *cur, *next;
unsigned int flags;
struct blk_plug plug;
int err = -EINVAL;
struct scan_control sc = {
.may_writepage = true,
.may_unmap = true,
.may_swap = true,
.reclaim_idx = MAX_NR_ZONES - 1,
.gfp_mask = GFP_KERNEL,
.proactive = true,
};
buf = kvmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, src, len)) {
kvfree(buf);
return -EFAULT;
}
set_task_reclaim_state(current, &sc.reclaim_state);
flags = memalloc_noreclaim_save();
blk_start_plug(&plug);
if (!set_mm_walk(NULL, true)) {
err = -ENOMEM;
goto done;
}
next = buf;
next[len] = '\0';
while ((cur = strsep(&next, ",;\n"))) {
int n;
int end;
char cmd, swap_string[5];
u64 memcg_id;
unsigned int nid;
unsigned long seq;
unsigned int swappiness;
unsigned long opt = -1;
cur = skip_spaces(cur);
if (!*cur)
continue;
n = sscanf(cur, "%c %llu %u %lu %n %4s %n %lu %n", &cmd, &memcg_id, &nid,
&seq, &end, swap_string, &end, &opt, &end);
if (n < 4 || cur[end]) {
err = -EINVAL;
break;
}
if (n == 4) {
swappiness = -1;
} else if (!strcmp("max", swap_string)) {
swappiness = SWAPPINESS_ANON_ONLY;
} else {
err = kstrtouint(swap_string, 0, &swappiness);
if (err)
break;
}
err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
if (err)
break;
}
done:
clear_mm_walk();
blk_finish_plug(&plug);
memalloc_noreclaim_restore(flags);
set_task_reclaim_state(current, NULL);
kvfree(buf);
return err ? : len;
}
static int lru_gen_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &lru_gen_seq_ops);
}
static const struct file_operations lru_gen_rw_fops = {
.open = lru_gen_seq_open,
.read = seq_read,
.write = lru_gen_seq_write,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations lru_gen_ro_fops = {
.open = lru_gen_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
void lru_gen_init_pgdat(struct pglist_data *pgdat)
{
int i, j;
spin_lock_init(&pgdat->memcg_lru.lock);
for (i = 0; i < MEMCG_NR_GENS; i++) {
for (j = 0; j < MEMCG_NR_BINS; j++)
INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
}
}
void lru_gen_init_lruvec(struct lruvec *lruvec)
{
int i;
int gen, type, zone;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lrugen->max_seq = MIN_NR_GENS + 1;
lrugen->enabled = lru_gen_enabled();
for (i = 0; i <= MIN_NR_GENS + 1; i++)
lrugen->timestamps[i] = jiffies;
for_each_gen_type_zone(gen, type, zone)
INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
if (mm_state)
mm_state->seq = MIN_NR_GENS;
}
#ifdef CONFIG_MEMCG
void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
if (!mm_list)
return;
INIT_LIST_HEAD(&mm_list->fifo);
spin_lock_init(&mm_list->lock);
}
void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
int i;
int nid;
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo));
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
sizeof(lruvec->lrugen.nr_pages)));
lruvec->lrugen.list.next = LIST_POISON1;
if (!mm_state)
continue;
for (i = 0; i < NR_BLOOM_FILTERS; i++) {
bitmap_free(mm_state->filters[i]);
mm_state->filters[i] = NULL;
}
}
}
#endif
static int __init init_lru_gen(void)
{
BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
pr_err("lru_gen: failed to create sysfs group\n");
debugfs_create_file_aux_num("lru_gen", 0644, NULL, NULL, false,
&lru_gen_rw_fops);
debugfs_create_file_aux_num("lru_gen_full", 0444, NULL, NULL, true,
&lru_gen_ro_fops);
return 0;
};
late_initcall(init_lru_gen);
#else
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
BUILD_BUG();
}
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
BUILD_BUG();
}
static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
{
BUILD_BUG();
}
#endif
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
unsigned long targets[NR_LRU_LISTS];
unsigned long nr_to_scan;
enum lru_list lru;
unsigned long nr_reclaimed = 0;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
bool proportional_reclaim;
struct blk_plug plug;
if (lru_gen_enabled() && !root_reclaim(sc)) {
lru_gen_shrink_lruvec(lruvec, sc);
return;
}
get_scan_count(lruvec, sc, nr);
memcpy(targets, nr, sizeof(nr));
proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
sc->priority == DEF_PRIORITY);
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
unsigned long nr_anon, nr_file, percentage;
unsigned long nr_scanned;
for_each_evictable_lru(lru) {
if (nr[lru]) {
nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
lruvec, sc);
}
}
cond_resched();
if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
continue;
nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
if (!nr_file || !nr_anon)
break;
if (nr_file > nr_anon) {
unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
targets[LRU_ACTIVE_ANON] + 1;
lru = LRU_BASE;
percentage = nr_anon * 100 / scan_target;
} else {
unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
targets[LRU_ACTIVE_FILE] + 1;
lru = LRU_FILE;
percentage = nr_file * 100 / scan_target;
}
nr[lru] = 0;
nr[lru + LRU_ACTIVE] = 0;
lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
nr_scanned = targets[lru] - nr[lru];
nr[lru] = targets[lru] * (100 - percentage) / 100;
nr[lru] -= min(nr[lru], nr_scanned);
lru += LRU_ACTIVE;
nr_scanned = targets[lru] - nr[lru];
nr[lru] = targets[lru] * (100 - percentage) / 100;
nr[lru] -= min(nr[lru], nr_scanned);
}
blk_finish_plug(&plug);
sc->nr_reclaimed += nr_reclaimed;
if (can_age_anon_pages(lruvec, sc) &&
inactive_is_low(lruvec, LRU_INACTIVE_ANON))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
}
static bool in_reclaim_compaction(struct scan_control *sc)
{
if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
sc->priority < DEF_PRIORITY - 2))
return true;
return false;
}
static inline bool should_continue_reclaim(struct pglist_data *pgdat,
unsigned long nr_reclaimed,
struct scan_control *sc)
{
unsigned long pages_for_compaction;
unsigned long inactive_lru_pages;
int z;
struct zone *zone;
if (!in_reclaim_compaction(sc))
return false;
if (!nr_reclaimed)
return false;
for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
unsigned long watermark = min_wmark_pages(zone);
if (zone_watermark_ok(zone, sc->order, watermark,
sc->reclaim_idx, 0))
return false;
if (compaction_suitable(zone, sc->order, watermark,
sc->reclaim_idx))
return false;
}
pages_for_compaction = compact_gap(sc->order);
inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
return inactive_lru_pages > pages_for_compaction;
}
static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
{
struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.pgdat = pgdat,
};
struct mem_cgroup_reclaim_cookie *partial = &reclaim;
struct mem_cgroup *memcg;
if (current_is_kswapd() || sc->memcg_full_walk)
partial = NULL;
memcg = mem_cgroup_iter(target_memcg, NULL, partial);
do {
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
unsigned long reclaimed;
unsigned long scanned;
cond_resched();
mem_cgroup_calculate_protection(target_memcg, memcg);
if (mem_cgroup_below_min(target_memcg, memcg)) {
continue;
} else if (mem_cgroup_below_low(target_memcg, memcg)) {
if (!sc->memcg_low_reclaim) {
sc->memcg_low_skipped = 1;
continue;
}
memcg_memory_event(memcg, MEMCG_LOW);
}
reclaimed = sc->nr_reclaimed;
scanned = sc->nr_scanned;
shrink_lruvec(lruvec, sc);
shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
sc->priority);
if (!sc->proactive)
vmpressure(sc->gfp_mask, memcg, false,
sc->nr_scanned - scanned,
sc->nr_reclaimed - reclaimed);
if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) {
mem_cgroup_iter_break(target_memcg, memcg);
break;
}
} while ((memcg = mem_cgroup_iter(target_memcg, memcg, partial)));
}
static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed;
struct lruvec *target_lruvec;
bool reclaimable = false;
if (lru_gen_enabled() && root_reclaim(sc)) {
memset(&sc->nr, 0, sizeof(sc->nr));
lru_gen_shrink_node(pgdat, sc);
return;
}
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
again:
memset(&sc->nr, 0, sizeof(sc->nr));
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;
prepare_scan_control(pgdat, sc);
shrink_node_memcgs(pgdat, sc);
flush_reclaim_state(sc);
nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed;
if (!sc->proactive)
vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
sc->nr_scanned - nr_scanned, nr_node_reclaimed);
if (nr_node_reclaimed)
reclaimable = true;
if (current_is_kswapd()) {
if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
set_bit(PGDAT_WRITEBACK, &pgdat->flags);
if (sc->nr.immediate)
reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
}
if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) {
if (cgroup_reclaim(sc) && writeback_throttling_sane(sc))
set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags);
if (current_is_kswapd())
set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags);
}
if (!current_is_kswapd() && current_may_throttle() &&
!sc->hibernation_mode &&
(test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) ||
test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags)))
reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc))
goto again;
if (reclaimable)
kswapd_try_clear_hopeless(pgdat, sc->order, sc->reclaim_idx);
else if (sc->cache_trim_mode)
sc->cache_trim_mode_failed = 1;
}
static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
unsigned long watermark;
if (!gfp_compaction_allowed(sc->gfp_mask))
return false;
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
sc->reclaim_idx, 0))
return true;
watermark = high_wmark_pages(zone);
if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx))
return true;
return false;
}
static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
{
if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) {
wait_queue_head_t *wqh;
wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS];
if (waitqueue_active(wqh))
wake_up(wqh);
return;
}
if (current_is_kswapd() || cgroup_reclaim(sc))
return;
if (sc->priority == 1 && !sc->nr_reclaimed)
reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
}
static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
gfp_t orig_mask;
pg_data_t *last_pgdat = NULL;
pg_data_t *first_pgdat = NULL;
orig_mask = sc->gfp_mask;
if (buffer_heads_over_limit) {
sc->gfp_mask |= __GFP_HIGHMEM;
sc->reclaim_idx = gfp_zone(sc->gfp_mask);
}
for_each_zone_zonelist_nodemask(zone, z, zonelist,
sc->reclaim_idx, sc->nodemask) {
if (!cgroup_reclaim(sc)) {
if (!cpuset_zone_allowed(zone,
GFP_KERNEL | __GFP_HARDWALL))
continue;
if (IS_ENABLED(CONFIG_COMPACTION) &&
sc->order > PAGE_ALLOC_COSTLY_ORDER &&
compaction_ready(zone, sc)) {
sc->compaction_ready = true;
continue;
}
if (zone->zone_pgdat == last_pgdat)
continue;
nr_soft_scanned = 0;
nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat,
sc->order, sc->gfp_mask,
&nr_soft_scanned);
sc->nr_reclaimed += nr_soft_reclaimed;
sc->nr_scanned += nr_soft_scanned;
}
if (!first_pgdat)
first_pgdat = zone->zone_pgdat;
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
shrink_node(zone->zone_pgdat, sc);
}
if (first_pgdat)
consider_reclaim_throttle(first_pgdat, sc);
sc->gfp_mask = orig_mask;
}
static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
{
struct lruvec *target_lruvec;
unsigned long refaults;
if (lru_gen_enabled())
return;
target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
target_lruvec->refaults[WORKINGSET_ANON] = refaults;
refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
target_lruvec->refaults[WORKINGSET_FILE] = refaults;
}
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc)
{
int initial_priority = sc->priority;
pg_data_t *last_pgdat;
struct zoneref *z;
struct zone *zone;
retry:
delayacct_freepages_start();
if (!cgroup_reclaim(sc))
__count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
do {
if (!sc->proactive)
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
sc->priority);
sc->nr_scanned = 0;
shrink_zones(zonelist, sc);
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
break;
if (sc->compaction_ready)
break;
} while (--sc->priority >= 0);
last_pgdat = NULL;
for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
sc->nodemask) {
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
if (cgroup_reclaim(sc)) {
struct lruvec *lruvec;
lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
zone->zone_pgdat);
clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
}
}
delayacct_freepages_end();
if (sc->nr_reclaimed)
return sc->nr_reclaimed;
if (sc->compaction_ready)
return 1;
if (!sc->memcg_full_walk) {
sc->priority = initial_priority;
sc->memcg_full_walk = 1;
goto retry;
}
if (sc->skipped_deactivate) {
sc->priority = initial_priority;
sc->force_deactivate = 1;
sc->skipped_deactivate = 0;
goto retry;
}
if (sc->memcg_low_skipped) {
sc->priority = initial_priority;
sc->force_deactivate = 0;
sc->memcg_low_reclaim = 1;
sc->memcg_low_skipped = 0;
goto retry;
}
return 0;
}
static bool allow_direct_reclaim(pg_data_t *pgdat)
{
struct zone *zone;
unsigned long pfmemalloc_reserve = 0;
unsigned long free_pages = 0;
int i;
bool wmark_ok;
if (kswapd_test_hopeless(pgdat))
return true;
for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
continue;
pfmemalloc_reserve += min_wmark_pages(zone);
free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES);
}
if (!pfmemalloc_reserve)
return true;
wmark_ok = free_pages > pfmemalloc_reserve / 2;
if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
wake_up_interruptible(&pgdat->kswapd_wait);
}
return wmark_ok;
}
static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
nodemask_t *nodemask)
{
struct zoneref *z;
struct zone *zone;
pg_data_t *pgdat = NULL;
if (current->flags & PF_KTHREAD)
goto out;
if (fatal_signal_pending(current))
goto out;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(gfp_mask), nodemask) {
if (zone_idx(zone) > ZONE_NORMAL)
continue;
pgdat = zone->zone_pgdat;
if (allow_direct_reclaim(pgdat))
goto out;
break;
}
if (!pgdat)
goto out;
count_vm_event(PGSCAN_DIRECT_THROTTLE);
if (!(gfp_mask & __GFP_FS))
wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
allow_direct_reclaim(pgdat), HZ);
else
wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
allow_direct_reclaim(pgdat));
if (fatal_signal_pending(current))
return true;
out:
return false;
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *nodemask)
{
unsigned long nr_reclaimed;
struct scan_control sc = {
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.gfp_mask = current_gfp_context(gfp_mask),
.reclaim_idx = gfp_zone(gfp_mask),
.order = order,
.nodemask = nodemask,
.priority = DEF_PRIORITY,
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
};
BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX);
BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
return 1;
set_task_reclaim_state(current, &sc.reclaim_state);
trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
set_task_reclaim_state(current, NULL);
return nr_reclaimed;
}
#ifdef CONFIG_MEMCG
unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
unsigned long *nr_scanned)
{
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
struct scan_control sc = {
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.target_mem_cgroup = memcg,
.may_writepage = 1,
.may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
};
WARN_ON_ONCE(!current->reclaim_state);
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
sc.gfp_mask);
shrink_lruvec(lruvec, &sc);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
*nr_scanned = sc.nr_scanned;
return sc.nr_reclaimed;
}
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
unsigned int reclaim_options,
int *swappiness)
{
unsigned long nr_reclaimed;
unsigned int noreclaim_flag;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.proactive_swappiness = swappiness,
.gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
.reclaim_idx = MAX_NR_ZONES - 1,
.target_mem_cgroup = memcg,
.priority = DEF_PRIORITY,
.may_writepage = 1,
.may_unmap = 1,
.may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
.proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
};
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
set_task_reclaim_state(current, &sc.reclaim_state);
trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
memalloc_noreclaim_restore(noreclaim_flag);
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
set_task_reclaim_state(current, NULL);
return nr_reclaimed;
}
#else
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
unsigned int reclaim_options,
int *swappiness)
{
return 0;
}
#endif
static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
struct mem_cgroup *memcg;
struct lruvec *lruvec;
if (lru_gen_enabled()) {
lru_gen_age_node(pgdat, sc);
return;
}
lruvec = mem_cgroup_lruvec(NULL, pgdat);
if (!can_age_anon_pages(lruvec, sc))
return;
if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
return;
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
lruvec = mem_cgroup_lruvec(memcg, pgdat);
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
memcg = mem_cgroup_iter(NULL, memcg, NULL);
} while (memcg);
}
static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
{
int i;
struct zone *zone;
for (i = highest_zoneidx; i >= 0; i--) {
zone = pgdat->node_zones + i;
if (!managed_zone(zone))
continue;
if (zone->watermark_boost)
return true;
}
return false;
}
static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
{
int i;
unsigned long mark = -1;
struct zone *zone;
for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
enum zone_stat_item item;
unsigned long free_pages;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
mark = promo_wmark_pages(zone);
else
mark = high_wmark_pages(zone);
if (defrag_mode && order)
item = NR_FREE_PAGES_BLOCKS;
else
item = NR_FREE_PAGES;
free_pages = zone_page_state(zone, item);
if (zone->percpu_drift_mark && free_pages < zone->percpu_drift_mark)
free_pages = zone_page_state_snapshot(zone, item);
if (__zone_watermark_ok(zone, order, mark, highest_zoneidx,
0, free_pages))
return true;
}
if (mark == -1)
return true;
return false;
}
static void clear_pgdat_congested(pg_data_t *pgdat)
{
struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags);
clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
}
static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
int highest_zoneidx)
{
if (waitqueue_active(&pgdat->pfmemalloc_wait))
wake_up_all(&pgdat->pfmemalloc_wait);
if (kswapd_test_hopeless(pgdat))
return true;
if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
clear_pgdat_congested(pgdat);
return true;
}
return false;
}
static bool kswapd_shrink_node(pg_data_t *pgdat,
struct scan_control *sc)
{
struct zone *zone;
int z;
unsigned long nr_reclaimed = sc->nr_reclaimed;
sc->nr_to_reclaim = 0;
for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
}
shrink_node(pgdat, sc);
if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
sc->order = 0;
return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim;
}
static inline void
update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
{
int i;
struct zone *zone;
for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
if (active)
set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
else
clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
}
}
static inline void
set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
{
update_reclaim_active(pgdat, highest_zoneidx, true);
}
static inline void
clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
{
update_reclaim_active(pgdat, highest_zoneidx, false);
}
static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
{
int i;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
unsigned long pflags;
unsigned long nr_boost_reclaim;
unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
bool boosted;
struct zone *zone;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.order = order,
.may_unmap = 1,
};
set_task_reclaim_state(current, &sc.reclaim_state);
psi_memstall_enter(&pflags);
__fs_reclaim_acquire(_THIS_IP_);
count_vm_event(PAGEOUTRUN);
nr_boost_reclaim = 0;
for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
nr_boost_reclaim += zone->watermark_boost;
zone_boosts[i] = zone->watermark_boost;
}
boosted = nr_boost_reclaim;
restart:
set_reclaim_active(pgdat, highest_zoneidx);
sc.priority = DEF_PRIORITY;
do {
unsigned long nr_reclaimed = sc.nr_reclaimed;
bool raise_priority = true;
bool balanced;
bool ret;
bool was_frozen;
sc.reclaim_idx = highest_zoneidx;
if (buffer_heads_over_limit) {
for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
zone = pgdat->node_zones + i;
if (!managed_zone(zone))
continue;
sc.reclaim_idx = i;
break;
}
}
balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
if (!balanced && nr_boost_reclaim) {
nr_boost_reclaim = 0;
goto restart;
}
if (!nr_boost_reclaim && balanced)
goto out;
if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
raise_priority = false;
sc.may_writepage = !nr_boost_reclaim;
sc.may_swap = !nr_boost_reclaim;
kswapd_age_node(pgdat, &sc);
sc.nr_scanned = 0;
nr_soft_scanned = 0;
nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order,
sc.gfp_mask, &nr_soft_scanned);
sc.nr_reclaimed += nr_soft_reclaimed;
if (kswapd_shrink_node(pgdat, &sc))
raise_priority = false;
if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
allow_direct_reclaim(pgdat))
wake_up_all(&pgdat->pfmemalloc_wait);
__fs_reclaim_release(_THIS_IP_);
ret = kthread_freezable_should_stop(&was_frozen);
__fs_reclaim_acquire(_THIS_IP_);
if (was_frozen || ret)
break;
nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
if (nr_boost_reclaim && !nr_reclaimed)
break;
if (raise_priority || !nr_reclaimed)
sc.priority--;
} while (sc.priority >= 1);
if (!sc.nr_reclaimed && sc.priority < 1 &&
!sc.no_cache_trim_mode && sc.cache_trim_mode_failed) {
sc.no_cache_trim_mode = 1;
goto restart;
}
if (!sc.nr_reclaimed && !boosted) {
int fail_cnt = atomic_inc_return(&pgdat->kswapd_failures);
trace_mm_vmscan_kswapd_reclaim_fail(pgdat->node_id, fail_cnt);
}
out:
clear_reclaim_active(pgdat, highest_zoneidx);
if (boosted) {
unsigned long flags;
for (i = 0; i <= highest_zoneidx; i++) {
if (!zone_boosts[i])
continue;
zone = pgdat->node_zones + i;
spin_lock_irqsave(&zone->lock, flags);
zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
spin_unlock_irqrestore(&zone->lock, flags);
}
wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
}
snapshot_refaults(NULL, pgdat);
__fs_reclaim_release(_THIS_IP_);
psi_memstall_leave(&pflags);
set_task_reclaim_state(current, NULL);
return sc.order;
}
static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
enum zone_type prev_highest_zoneidx)
{
enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
}
static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
unsigned int highest_zoneidx)
{
long remaining = 0;
DEFINE_WAIT(wait);
if (freezing(current) || kthread_should_stop())
return;
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
reset_isolation_suitable(pgdat);
wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
remaining = schedule_timeout(HZ/10);
if (remaining) {
WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
kswapd_highest_zoneidx(pgdat,
highest_zoneidx));
if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
}
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
}
if (!remaining &&
prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
if (!kthread_should_stop())
schedule();
set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
} else {
if (remaining)
count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
else
count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
}
finish_wait(&pgdat->kswapd_wait, &wait);
}
static int kswapd(void *p)
{
unsigned int alloc_order, reclaim_order;
unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
pg_data_t *pgdat = (pg_data_t *)p;
struct task_struct *tsk = current;
tsk->flags |= PF_MEMALLOC | PF_KSWAPD;
set_freezable();
WRITE_ONCE(pgdat->kswapd_order, 0);
WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
atomic_set(&pgdat->nr_writeback_throttled, 0);
for ( ; ; ) {
bool was_frozen;
alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
highest_zoneidx = kswapd_highest_zoneidx(pgdat,
highest_zoneidx);
kswapd_try_sleep:
kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
highest_zoneidx);
alloc_order = READ_ONCE(pgdat->kswapd_order);
highest_zoneidx = kswapd_highest_zoneidx(pgdat,
highest_zoneidx);
WRITE_ONCE(pgdat->kswapd_order, 0);
WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
if (kthread_freezable_should_stop(&was_frozen))
break;
if (was_frozen)
continue;
trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
alloc_order);
reclaim_order = balance_pgdat(pgdat, alloc_order,
highest_zoneidx);
if (reclaim_order < alloc_order)
goto kswapd_try_sleep;
}
tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
return 0;
}
void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
enum zone_type highest_zoneidx)
{
pg_data_t *pgdat;
enum zone_type curr_idx;
if (!managed_zone(zone))
return;
if (!cpuset_zone_allowed(zone, gfp_flags))
return;
pgdat = zone->zone_pgdat;
curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
if (READ_ONCE(pgdat->kswapd_order) < order)
WRITE_ONCE(pgdat->kswapd_order, order);
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
if (kswapd_test_hopeless(pgdat) ||
(pgdat_balanced(pgdat, order, highest_zoneidx) &&
!pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
wakeup_kcompactd(pgdat, order, highest_zoneidx);
return;
}
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
gfp_flags);
wake_up_interruptible(&pgdat->kswapd_wait);
}
void kswapd_clear_hopeless(pg_data_t *pgdat, enum kswapd_clear_hopeless_reason reason)
{
if (atomic_xchg(&pgdat->kswapd_failures, 0))
trace_mm_vmscan_kswapd_clear_hopeless(pgdat->node_id, reason);
}
void kswapd_try_clear_hopeless(struct pglist_data *pgdat,
unsigned int order, int highest_zoneidx)
{
if (pgdat_balanced(pgdat, order, highest_zoneidx))
kswapd_clear_hopeless(pgdat, current_is_kswapd() ?
KSWAPD_CLEAR_HOPELESS_KSWAPD : KSWAPD_CLEAR_HOPELESS_DIRECT);
}
bool kswapd_test_hopeless(pg_data_t *pgdat)
{
return atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES;
}
#ifdef CONFIG_HIBERNATION
unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
{
struct scan_control sc = {
.nr_to_reclaim = nr_to_reclaim,
.gfp_mask = GFP_HIGHUSER_MOVABLE,
.reclaim_idx = MAX_NR_ZONES - 1,
.priority = DEF_PRIORITY,
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
.hibernation_mode = 1,
};
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
unsigned long nr_reclaimed;
unsigned int noreclaim_flag;
fs_reclaim_acquire(sc.gfp_mask);
noreclaim_flag = memalloc_noreclaim_save();
set_task_reclaim_state(current, &sc.reclaim_state);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
set_task_reclaim_state(current, NULL);
memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(sc.gfp_mask);
return nr_reclaimed;
}
#endif
void __meminit kswapd_run(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
pgdat_kswapd_lock(pgdat);
if (!pgdat->kswapd) {
pgdat->kswapd = kthread_create_on_node(kswapd, pgdat, nid, "kswapd%d", nid);
if (IS_ERR(pgdat->kswapd)) {
pr_err("Failed to start kswapd on node %d, ret=%pe\n",
nid, pgdat->kswapd);
BUG_ON(system_state < SYSTEM_RUNNING);
pgdat->kswapd = NULL;
} else {
wake_up_process(pgdat->kswapd);
}
}
pgdat_kswapd_unlock(pgdat);
}
void __meminit kswapd_stop(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
struct task_struct *kswapd;
pgdat_kswapd_lock(pgdat);
kswapd = pgdat->kswapd;
if (kswapd) {
kthread_stop(kswapd);
pgdat->kswapd = NULL;
}
pgdat_kswapd_unlock(pgdat);
}
static const struct ctl_table vmscan_sysctl_table[] = {
{
.procname = "swappiness",
.data = &vm_swappiness,
.maxlen = sizeof(vm_swappiness),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO_HUNDRED,
},
#ifdef CONFIG_NUMA
{
.procname = "zone_reclaim_mode",
.data = &node_reclaim_mode,
.maxlen = sizeof(node_reclaim_mode),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
}
#endif
};
static int __init kswapd_init(void)
{
int nid;
swap_setup();
for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
register_sysctl_init("vm", vmscan_sysctl_table);
return 0;
}
module_init(kswapd_init)
#ifdef CONFIG_NUMA
int node_reclaim_mode __read_mostly;
#define NODE_RECLAIM_PRIORITY 4
int sysctl_min_unmapped_ratio = 1;
int sysctl_min_slab_ratio = 5;
static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
{
unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
node_page_state(pgdat, NR_ACTIVE_FILE);
return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
}
static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
{
unsigned long nr_pagecache_reclaimable;
unsigned long delta = 0;
if (node_reclaim_mode & RECLAIM_UNMAP)
nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
else
nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
delta += node_page_state(pgdat, NR_FILE_DIRTY);
if (unlikely(delta > nr_pagecache_reclaimable))
delta = nr_pagecache_reclaimable;
return nr_pagecache_reclaimable - delta;
}
static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
unsigned long nr_pages,
struct scan_control *sc)
{
struct task_struct *p = current;
unsigned int noreclaim_flag;
unsigned long pflags;
trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, sc->order,
sc->gfp_mask);
cond_resched();
psi_memstall_enter(&pflags);
delayacct_freepages_start();
fs_reclaim_acquire(sc->gfp_mask);
noreclaim_flag = memalloc_noreclaim_save();
set_task_reclaim_state(p, &sc->reclaim_state);
if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages ||
node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) {
do {
shrink_node(pgdat, sc);
} while (sc->nr_reclaimed < nr_pages && --sc->priority >= 0);
}
set_task_reclaim_state(p, NULL);
memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(sc->gfp_mask);
delayacct_freepages_end();
psi_memstall_leave(&pflags);
trace_mm_vmscan_node_reclaim_end(sc->nr_reclaimed);
return sc->nr_reclaimed;
}
int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
{
int ret;
const unsigned long nr_pages = 1 << order;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = current_gfp_context(gfp_mask),
.order = order,
.priority = NODE_RECLAIM_PRIORITY,
.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
.may_swap = 1,
.reclaim_idx = gfp_zone(gfp_mask),
};
if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
pgdat->min_slab_pages)
return NODE_RECLAIM_FULL;
if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
return NODE_RECLAIM_NOSCAN;
if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
return NODE_RECLAIM_NOSCAN;
if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
return NODE_RECLAIM_NOSCAN;
ret = __node_reclaim(pgdat, gfp_mask, nr_pages, &sc) >= nr_pages;
clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
if (ret)
count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
else
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
return ret;
}
#else
static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
unsigned long nr_pages,
struct scan_control *sc)
{
return 0;
}
#endif
enum {
MEMORY_RECLAIM_SWAPPINESS = 0,
MEMORY_RECLAIM_SWAPPINESS_MAX,
MEMORY_RECLAIM_NULL,
};
static const match_table_t tokens = {
{ MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
{ MEMORY_RECLAIM_SWAPPINESS_MAX, "swappiness=max"},
{ MEMORY_RECLAIM_NULL, NULL },
};
int user_proactive_reclaim(char *buf,
struct mem_cgroup *memcg, pg_data_t *pgdat)
{
unsigned int nr_retries = MAX_RECLAIM_RETRIES;
unsigned long nr_to_reclaim, nr_reclaimed = 0;
int swappiness = -1;
char *old_buf, *start;
substring_t args[MAX_OPT_ARGS];
gfp_t gfp_mask = GFP_KERNEL;
if (!buf || (!memcg && !pgdat) || (memcg && pgdat))
return -EINVAL;
buf = strstrip(buf);
old_buf = buf;
nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
if (buf == old_buf)
return -EINVAL;
buf = strstrip(buf);
while ((start = strsep(&buf, " ")) != NULL) {
if (!strlen(start))
continue;
switch (match_token(start, tokens, args)) {
case MEMORY_RECLAIM_SWAPPINESS:
if (match_int(&args[0], &swappiness))
return -EINVAL;
if (swappiness < MIN_SWAPPINESS ||
swappiness > MAX_SWAPPINESS)
return -EINVAL;
break;
case MEMORY_RECLAIM_SWAPPINESS_MAX:
swappiness = SWAPPINESS_ANON_ONLY;
break;
default:
return -EINVAL;
}
}
while (nr_reclaimed < nr_to_reclaim) {
unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
unsigned long reclaimed;
if (signal_pending(current))
return -EINTR;
if (!nr_retries)
lru_add_drain_all();
if (memcg) {
unsigned int reclaim_options;
reclaim_options = MEMCG_RECLAIM_MAY_SWAP |
MEMCG_RECLAIM_PROACTIVE;
reclaimed = try_to_free_mem_cgroup_pages(memcg,
batch_size, gfp_mask,
reclaim_options,
swappiness == -1 ? NULL : &swappiness);
} else {
struct scan_control sc = {
.gfp_mask = current_gfp_context(gfp_mask),
.reclaim_idx = gfp_zone(gfp_mask),
.proactive_swappiness = swappiness == -1 ? NULL : &swappiness,
.priority = DEF_PRIORITY,
.may_writepage = 1,
.nr_to_reclaim = max(batch_size, SWAP_CLUSTER_MAX),
.may_unmap = 1,
.may_swap = 1,
.proactive = 1,
};
if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED,
&pgdat->flags))
return -EBUSY;
reclaimed = __node_reclaim(pgdat, gfp_mask,
batch_size, &sc);
clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
}
if (!reclaimed && !nr_retries--)
return -EAGAIN;
nr_reclaimed += reclaimed;
}
return 0;
}
void check_move_unevictable_folios(struct folio_batch *fbatch)
{
struct lruvec *lruvec = NULL;
int pgscanned = 0;
int pgrescued = 0;
int i;
for (i = 0; i < fbatch->nr; i++) {
struct folio *folio = fbatch->folios[i];
int nr_pages = folio_nr_pages(folio);
pgscanned += nr_pages;
if (!folio_test_clear_lru(folio))
continue;
lruvec = folio_lruvec_relock_irq(folio, lruvec);
if (folio_evictable(folio) && folio_test_unevictable(folio)) {
lruvec_del_folio(lruvec, folio);
folio_clear_unevictable(folio);
lruvec_add_folio(lruvec, folio);
pgrescued += nr_pages;
}
folio_set_lru(folio);
}
if (lruvec) {
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
unlock_page_lruvec_irq(lruvec);
} else if (pgscanned) {
count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
}
}
EXPORT_SYMBOL_GPL(check_move_unevictable_folios);
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
static ssize_t reclaim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret, nid = dev->id;
ret = user_proactive_reclaim((char *)buf, NULL, NODE_DATA(nid));
return ret ? -EAGAIN : count;
}
static DEVICE_ATTR_WO(reclaim);
int reclaim_register_node(struct node *node)
{
return device_create_file(&node->dev, &dev_attr_reclaim);
}
void reclaim_unregister_node(struct node *node)
{
return device_remove_file(&node->dev, &dev_attr_reclaim);
}
#endif