nr_to_scan
NULL, sc->nr_to_scan);
unsigned int nr_to_scan,
while (freed < nr_to_scan) {
} while (sc->nr_scanned < sc->nr_to_scan);
sc->nr_to_scan,
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
sc->nr_to_scan - sc->nr_scanned,
unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
long nr = sc->nr_to_scan;
trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
.nr_to_scan = nr_to_scan,
TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted,
TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted),
__field(u32, nr_to_scan)
__entry->nr_to_scan = nr_to_scan;
__entry->nr_to_scan, __entry->purged, __entry->evicted,
if (freed >= sc->nr_to_scan)
while (num_freed < sc->nr_to_scan &&
.nr_to_scan = TTM_SHRINKER_BATCH,
unsigned long nr_to_scan, bool can_backup)
force = (nr_to_scan > shrinker->purgeable_pages && can_backup);
unsigned long nr_to_scan, nr_scanned = 0, freed = 0;
nr_to_scan = sc->nr_to_scan;
runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup);
if (purgeable && nr_scanned < nr_to_scan) {
nr_to_scan, &nr_scanned);
if (nr_scanned >= nr_to_scan || !can_backup)
nr_to_scan, &nr_scanned);
unsigned long i, nr = sc->nr_to_scan;
sc.nr_to_scan = strtoul_or_return(buf);
atomic_long_add(sc->nr_to_scan, &c->need_shrink);
return sc->nr_to_scan;
count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
while (ret < sc->nr_to_scan &&
deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
return shrink_free_pages(vb, sc->nr_to_scan);
long nr_to_scan;
if (ctx->scanned >= ctx->nr_to_scan)
if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
ctx.nr_to_scan = atomic64_read(&fs_info->em_shrinker_nr_to_scan);
while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) {
void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan);
btrfs_free_extent_maps(fs_info, nr_to_scan);
unsigned long nr_to_scan;
if (!lwc->nr_to_scan)
--lwc->nr_to_scan;
lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE * 2;
if (!lwc.nr_to_scan) /* more invalid leases */
if (lwc.nr_to_scan < CEPH_CAPS_PER_RELEASE)
lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE;
if (!lwc.nr_to_scan) /* more to check */
unsigned long nr = sc->nr_to_scan;
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
if (nr_to_scan <= 0)
nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
int nr_to_scan = sc->nr_to_scan;
trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
int *nr_to_scan, int *nr_shrunk)
while (*nr_to_scan > 0) {
(*nr_to_scan)--;
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
unsigned long nr = sc->nr_to_scan;
return gfs2_scan_glock_lru(sc->nr_to_scan);
unsigned long *nr_to_scan)
(*nr_to_scan) -= min(*nr_to_scan, freed);
if (*nr_to_scan == 0)
if (*nr_to_scan && journal->j_shrink_transaction)
unsigned long nr_to_scan = sc->nr_to_scan;
trace_jbd2_shrink_scan_enter(journal, sc->nr_to_scan, count);
nr_shrunk = jbd2_journal_shrink_checkpoint_list(journal, &nr_to_scan);
trace_jbd2_shrink_scan_exit(journal, nr_to_scan, nr_shrunk, count);
trace_jbd2_shrink_count(journal, sc->nr_to_scan, count);
unsigned long nr_to_scan)
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
return mb_cache_shrink(cache, sc->nr_to_scan);
unsigned long nr_to_scan);
nfs_do_access_cache_scan(unsigned int nr_to_scan)
if (nr_to_scan-- == 0)
int nr_to_scan = sc->nr_to_scan;
return nfs_do_access_cache_scan(nr_to_scan);
unsigned int nr_to_scan;
nr_to_scan = 100;
if (diff < nr_to_scan)
nr_to_scan = diff;
nfs_do_access_cache_scan(nr_to_scan);
if (scanned >= sc->nr_to_scan) {
if (freed > sc->nr_to_scan)
while (!list_empty(&free_dquots) && sc->nr_to_scan) {
sc->nr_to_scan--;
dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
sc->nr_to_scan = dentries + 1;
sc->nr_to_scan = inodes + 1;
sc->nr_to_scan = fs_objects + 1;
unsigned long nr = sc->nr_to_scan;
unsigned long nr_to_scan)
.icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
long xfs_reclaim_inodes_nr(struct xfs_mount *mp, unsigned long nr_to_scan);
return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
__field(unsigned long, nr_to_scan)
__entry->nr_to_scan = sc->nr_to_scan;
__entry->nr_to_scan,
unsigned int nr_to_scan,
unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
unsigned long nr_to_scan;
__field( long, nr_to_scan )
__entry->nr_to_scan = \
__entry->nr_to_scan, __entry->nr,
TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
TP_ARGS(sb, nr_to_scan, cache_cnt),
__field( int, nr_to_scan )
__entry->nr_to_scan = nr_to_scan;
__entry->nr_to_scan, __entry->cache_cnt)
TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
TP_ARGS(sb, nr_to_scan, cache_cnt)
TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
TP_ARGS(sb, nr_to_scan, cache_cnt)
TP_PROTO(journal_t *journal, unsigned long nr_to_scan,
TP_ARGS(journal, nr_to_scan, count),
__field(unsigned long, nr_to_scan)
__entry->nr_to_scan = nr_to_scan;
__entry->nr_to_scan, __entry->count)
TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count),
TP_ARGS(journal, nr_to_scan, count)
TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count),
TP_ARGS(journal, nr_to_scan, count)
TP_PROTO(journal_t *journal, unsigned long nr_to_scan,
TP_ARGS(journal, nr_to_scan, nr_shrunk, count),
__field(unsigned long, nr_to_scan)
__entry->nr_to_scan = nr_to_scan;
__entry->nr_to_scan, __entry->nr_shrunk,
sc->nr_to_scan -= _count;
if (sc->nr_to_scan <= 0)
if (!--sc->nr_to_scan)
if (sc->nr_to_scan && !list_empty(&ds_queue->split_queue)) {
unsigned long batch = sc ? sc->nr_to_scan : 128;
unsigned long nr_to_scan = min(batch_size, total_scan);
shrinkctl->nr_to_scan = nr_to_scan;
shrinkctl->nr_scanned = nr_to_scan;
unsigned long nr_to_scan = 0, read_len;
if (sscanf(kbuf, "%llu %d %lu", &id, &nid, &nr_to_scan) != 3)
if (nr_to_scan == 0)
sc.nr_to_scan = nr_to_scan;
sc.nr_scanned = nr_to_scan;
sc->nr_to_scan -= count;
if (sc->nr_to_scan <= 0)
static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
while (scan < nr_to_scan && !list_empty(src)) {
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
static void shrink_active_list(unsigned long nr_to_scan,
nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
shrink_active_list(nr_to_scan, lruvec, sc, lru);
return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
int scan_batch = min(nr_to_scan, MAX_LRU_BATCH);
static int isolate_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
scanned = scan_folios(nr_to_scan, lruvec, sc, type, tier, list);
static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness, &type, &list);
int swappiness, unsigned long *nr_to_scan)
*nr_to_scan = 0;
*nr_to_scan = size;
unsigned long nr_to_scan;
success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
if (nr_to_scan && !mem_cgroup_online(memcg))
return nr_to_scan;
nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
return nr_to_scan >> sc->priority;
long nr_to_scan;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
if (nr_to_scan <= 0)
delta = evict_folios(nr_to_scan, lruvec, sc, swappiness);
if (scanned >= nr_to_scan)
return nr_to_scan < 0;
unsigned long nr_to_scan;
nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
if (nr_to_scan-- == 0)
rpcauth_cache_do_shrink(int nr_to_scan)
freed = rpcauth_prune_expired(&free, nr_to_scan);
return rpcauth_cache_do_shrink(sc->nr_to_scan);
unsigned int nr_to_scan;
nr_to_scan = 100;
if (diff < nr_to_scan)
nr_to_scan = diff;
rpcauth_cache_do_shrink(nr_to_scan);