arch/x86/kernel/cpu/sgx/encl.c
1003
struct mem_cgroup *memcg = NULL;
arch/x86/kernel/cpu/sgx/encl.c
1025
memcg = get_mem_cgroup_from_mm(encl_mm->mm);
arch/x86/kernel/cpu/sgx/encl.c
1040
if (!memcg)
arch/x86/kernel/cpu/sgx/encl.c
1043
return memcg;
arch/x86/kernel/cpu/sgx/encl.c
1067
struct mem_cgroup *memcg = set_active_memcg(encl_memcg);
arch/x86/kernel/cpu/sgx/encl.c
1072
set_active_memcg(memcg);
drivers/tty/sysrq.c
391
.memcg = NULL,
fs/buffer.c
923
struct mem_cgroup *memcg, *old_memcg;
fs/buffer.c
926
memcg = folio_memcg(folio);
fs/buffer.c
927
old_memcg = set_active_memcg(memcg);
fs/notify/fanotify/fanotify.c
843
old_memcg = set_active_memcg(group->memcg);
fs/notify/fanotify/fanotify_user.c
1710
group->memcg = get_mem_cgroup_from_mm(current->mm);
fs/notify/group.c
27
mem_cgroup_put(group->memcg);
fs/notify/inotify/inotify_fsnotify.c
95
old_memcg = set_active_memcg(group->memcg);
fs/notify/inotify/inotify_user.c
676
group->memcg = get_mem_cgroup_from_mm(current->mm);
include/linux/backing-dev.h
146
void wb_memcg_offline(struct mem_cgroup *memcg);
include/linux/backing-dev.h
352
static inline void wb_memcg_offline(struct mem_cgroup *memcg)
include/linux/bpf.h
2663
struct mem_cgroup *memcg);
include/linux/bpf.h
2696
struct mem_cgroup *memcg)
include/linux/fsnotify_backend.h
252
struct mem_cgroup *memcg; /* memcg to charge allocations */
include/linux/huge_mm.h
419
void reparent_deferred_split_queue(struct mem_cgroup *memcg);
include/linux/huge_mm.h
653
static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {}
include/linux/list_lru.h
116
struct mem_cgroup *memcg);
include/linux/list_lru.h
156
struct mem_cgroup *memcg);
include/linux/list_lru.h
184
int nid, struct mem_cgroup *memcg);
include/linux/list_lru.h
190
return list_lru_count_one(lru, sc->nid, sc->memcg);
include/linux/list_lru.h
82
int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
include/linux/list_lru.h
84
void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
include/linux/memcontrol.h
1002
__memcg_memory_event(memcg, event, true);
include/linux/memcontrol.h
1008
struct mem_cgroup *memcg;
include/linux/memcontrol.h
1014
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
include/linux/memcontrol.h
1015
if (likely(memcg))
include/linux/memcontrol.h
1016
memcg_memory_event(memcg, event);
include/linux/memcontrol.h
1026
struct mem_cgroup *memcg;
include/linux/memcontrol.h
1033
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
include/linux/memcontrol.h
1034
if (!memcg)
include/linux/memcontrol.h
1035
memcg = root_mem_cgroup;
include/linux/memcontrol.h
1036
id = cgroup_id(memcg->css.cgroup);
include/linux/memcontrol.h
1085
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1095
static inline void memcg_memory_event(struct mem_cgroup *memcg,
include/linux/memcontrol.h
1106
struct mem_cgroup *memcg,
include/linux/memcontrol.h
1115
struct mem_cgroup *memcg)
include/linux/memcontrol.h
1120
struct mem_cgroup *memcg)
include/linux/memcontrol.h
1125
struct mem_cgroup *memcg)
include/linux/memcontrol.h
1131
struct mem_cgroup *memcg)
include/linux/memcontrol.h
1170
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
include/linux/memcontrol.h
1187
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1193
struct mem_cgroup *memcg)
include/linux/memcontrol.h
1227
static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1232
static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1237
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1279
static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
include/linux/memcontrol.h
1284
static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1296
static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1316
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1328
static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1334
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
include/linux/memcontrol.h
1339
mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1353
static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1357
static inline void mod_memcg_state(struct mem_cgroup *memcg,
include/linux/memcontrol.h
1368
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
include/linux/memcontrol.h
1373
static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
include/linux/memcontrol.h
1400
static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1404
static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1416
static inline void count_memcg_events(struct mem_cgroup *memcg,
include/linux/memcontrol.h
1471
struct mem_cgroup *memcg;
include/linux/memcontrol.h
1473
memcg = lruvec_memcg(lruvec);
include/linux/memcontrol.h
1474
if (!memcg)
include/linux/memcontrol.h
1476
memcg = parent_mem_cgroup(memcg);
include/linux/memcontrol.h
1477
if (!memcg)
include/linux/memcontrol.h
1479
return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
include/linux/memcontrol.h
1547
struct mem_cgroup *memcg;
include/linux/memcontrol.h
1552
memcg = folio_memcg(folio);
include/linux/memcontrol.h
1553
if (unlikely(memcg && &memcg->css != wb->memcg_css))
include/linux/memcontrol.h
1598
static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1603
write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
include/linux/memcontrol.h
1604
memcg->socket_pressure = val;
include/linux/memcontrol.h
1605
write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
include/linux/memcontrol.h
1608
static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1614
seq = read_seqbegin(&memcg->socket_pressure_seqlock);
include/linux/memcontrol.h
1615
val = memcg->socket_pressure;
include/linux/memcontrol.h
1616
} while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
include/linux/memcontrol.h
1621
static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1623
WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
include/linux/memcontrol.h
1626
static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1628
return READ_ONCE(memcg->socket_pressure);
include/linux/memcontrol.h
1632
int alloc_shrinker_info(struct mem_cgroup *memcg);
include/linux/memcontrol.h
1633
void free_shrinker_info(struct mem_cgroup *memcg);
include/linux/memcontrol.h
1634
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
include/linux/memcontrol.h
1635
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
include/linux/memcontrol.h
1668
static inline void set_shrinker_bit(struct mem_cgroup *memcg,
include/linux/memcontrol.h
1739
static inline int memcg_kmem_id(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1741
return memcg ? memcg->kmemcg_id : -1;
include/linux/memcontrol.h
1750
struct mem_cgroup *memcg;
include/linux/memcontrol.h
1756
memcg = obj_cgroup_memcg(objcg);
include/linux/memcontrol.h
1757
count_memcg_events(memcg, idx, count);
include/linux/memcontrol.h
176
struct mem_cgroup *memcg;
include/linux/memcontrol.h
1761
void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask);
include/linux/memcontrol.h
1763
void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
include/linux/memcontrol.h
1765
static inline bool memcg_is_dying(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1767
return memcg ? css_is_dying(&memcg->css) : false;
include/linux/memcontrol.h
1811
static inline int memcg_kmem_id(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1832
static inline void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg,
include/linux/memcontrol.h
1837
static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1841
static inline bool memcg_is_dying(struct mem_cgroup *memcg)
include/linux/memcontrol.h
1851
bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
include/linux/memcontrol.h
1865
static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
include/linux/memcontrol.h
382
return READ_ONCE(objcg->memcg);
include/linux/memcontrol.h
514
struct mem_cgroup *memcg;
include/linux/memcontrol.h
518
memcg = obj_cgroup_memcg(objcg);
include/linux/memcontrol.h
519
if (unlikely(!css_tryget(&memcg->css)))
include/linux/memcontrol.h
523
return memcg;
include/linux/memcontrol.h
546
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
include/linux/memcontrol.h
548
return (memcg == root_mem_cgroup);
include/linux/memcontrol.h
557
struct mem_cgroup *memcg,
include/linux/memcontrol.h
567
*usage = page_counter_read(&memcg->memory);
include/linux/memcontrol.h
601
if (root == memcg)
include/linux/memcontrol.h
604
*min = READ_ONCE(memcg->memory.emin);
include/linux/memcontrol.h
605
*low = READ_ONCE(memcg->memory.elow);
include/linux/memcontrol.h
609
struct mem_cgroup *memcg);
include/linux/memcontrol.h
612
struct mem_cgroup *memcg)
include/linux/memcontrol.h
619
return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
include/linux/memcontrol.h
620
memcg == target;
include/linux/memcontrol.h
624
struct mem_cgroup *memcg)
include/linux/memcontrol.h
626
if (mem_cgroup_unprotected(target, memcg))
include/linux/memcontrol.h
629
return READ_ONCE(memcg->memory.elow) >=
include/linux/memcontrol.h
630
page_counter_read(&memcg->memory);
include/linux/memcontrol.h
634
struct mem_cgroup *memcg)
include/linux/memcontrol.h
636
if (mem_cgroup_unprotected(target, memcg))
include/linux/memcontrol.h
639
return READ_ONCE(memcg->memory.emin) >=
include/linux/memcontrol.h
640
page_counter_read(&memcg->memory);
include/linux/memcontrol.h
707
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
include/linux/memcontrol.h
718
if (!memcg)
include/linux/memcontrol.h
719
memcg = root_mem_cgroup;
include/linux/memcontrol.h
721
mz = memcg->nodeinfo[pgdat->node_id];
include/linux/memcontrol.h
742
struct mem_cgroup *memcg = folio_memcg(folio);
include/linux/memcontrol.h
744
VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
include/linux/memcontrol.h
745
return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
include/linux/memcontrol.h
797
static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
include/linux/memcontrol.h
799
return !memcg || css_tryget(&memcg->css);
include/linux/memcontrol.h
802
static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
include/linux/memcontrol.h
804
return !memcg || css_tryget_online(&memcg->css);
include/linux/memcontrol.h
807
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
include/linux/memcontrol.h
809
if (memcg)
include/linux/memcontrol.h
810
css_put(&memcg->css);
include/linux/memcontrol.h
820
void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
include/linux/memcontrol.h
823
static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
include/linux/memcontrol.h
828
return memcg->id.id;
include/linux/memcontrol.h
832
static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
include/linux/memcontrol.h
834
return memcg ? cgroup_id(memcg->css.cgroup) : 0;
include/linux/memcontrol.h
852
return mz->memcg;
include/linux/memcontrol.h
861
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
include/linux/memcontrol.h
863
return mem_cgroup_from_css(memcg->css.parent);
include/linux/memcontrol.h
866
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
include/linux/memcontrol.h
869
if (root == memcg)
include/linux/memcontrol.h
871
return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
include/linux/memcontrol.h
875
struct mem_cgroup *memcg)
include/linux/memcontrol.h
883
match = mem_cgroup_is_descendant(task_memcg, memcg);
include/linux/memcontrol.h
891
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
include/linux/memcontrol.h
895
return css_is_online(&memcg->css);
include/linux/memcontrol.h
90
struct mem_cgroup *memcg; /* Back pointer, we cannot */
include/linux/memcontrol.h
919
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
include/linux/memcontrol.h
921
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
include/linux/memcontrol.h
924
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
include/linux/memcontrol.h
928
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
include/linux/memcontrol.h
931
void mod_memcg_state(struct mem_cgroup *memcg,
include/linux/memcontrol.h
937
struct mem_cgroup *memcg;
include/linux/memcontrol.h
943
memcg = folio_memcg(page_folio(page));
include/linux/memcontrol.h
944
if (memcg)
include/linux/memcontrol.h
945
mod_memcg_state(memcg, idx, val);
include/linux/memcontrol.h
949
unsigned long memcg_events(struct mem_cgroup *memcg, int event);
include/linux/memcontrol.h
950
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
include/linux/memcontrol.h
951
unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
include/linux/memcontrol.h
958
void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
include/linux/memcontrol.h
959
void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
include/linux/memcontrol.h
963
void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
include/linux/memcontrol.h
969
struct mem_cgroup *memcg = folio_memcg(folio);
include/linux/memcontrol.h
971
if (memcg)
include/linux/memcontrol.h
972
count_memcg_events(memcg, idx, nr);
include/linux/memcontrol.h
978
struct mem_cgroup *memcg;
include/linux/memcontrol.h
984
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
include/linux/memcontrol.h
985
if (likely(memcg))
include/linux/memcontrol.h
986
count_memcg_events(memcg, idx, count);
include/linux/memcontrol.h
996
void __memcg_memory_event(struct mem_cgroup *memcg,
include/linux/memcontrol.h
999
static inline void memcg_memory_event(struct mem_cgroup *memcg,
include/linux/mm_types.h
1367
struct mem_cgroup *memcg;
include/linux/mm_types.h
1459
mm->lru_gen.memcg = NULL;
include/linux/mmzone.h
621
void lru_gen_init_memcg(struct mem_cgroup *memcg);
include/linux/mmzone.h
622
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
include/linux/mmzone.h
623
void lru_gen_online_memcg(struct mem_cgroup *memcg);
include/linux/mmzone.h
624
void lru_gen_offline_memcg(struct mem_cgroup *memcg);
include/linux/mmzone.h
625
void lru_gen_release_memcg(struct mem_cgroup *memcg);
include/linux/mmzone.h
626
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
include/linux/mmzone.h
643
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
include/linux/mmzone.h
647
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
include/linux/mmzone.h
651
static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
include/linux/mmzone.h
655
static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
include/linux/mmzone.h
659
static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
include/linux/mmzone.h
663
static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
include/linux/oom.h
36
struct mem_cgroup *memcg;
include/linux/rmap.h
846
struct mem_cgroup *memcg, vm_flags_t *vm_flags);
include/linux/rmap.h
978
struct mem_cgroup *memcg,
include/linux/sched/mm.h
492
set_active_memcg(struct mem_cgroup *memcg)
include/linux/sched/mm.h
498
this_cpu_write(int_active_memcg, memcg);
include/linux/sched/mm.h
501
current->active_memcg = memcg;
include/linux/sched/mm.h
508
set_active_memcg(struct mem_cgroup *memcg)
include/linux/shrinker.h
55
struct mem_cgroup *memcg;
include/linux/swap.h
383
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
include/linux/swap.h
562
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
include/linux/swap.h
569
if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
include/linux/swap.h
572
return READ_ONCE(memcg->swappiness);
include/linux/swap.h
613
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
include/linux/swap.h
627
static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
include/linux/vmpressure.h
33
extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
include/linux/vmpressure.h
35
extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
include/linux/vmpressure.h
39
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
include/linux/vmpressure.h
41
extern int vmpressure_register_event(struct mem_cgroup *memcg,
include/linux/vmpressure.h
44
extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
include/linux/vmpressure.h
47
static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
include/linux/vmpressure.h
49
static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
include/linux/zswap.h
33
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
include/linux/zswap.h
58
static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
include/net/sock.h
2666
struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
include/net/sock.h
2670
return !!memcg->tcpmem_pressure;
include/net/sock.h
2675
mem_cgroup_get_socket_pressure(memcg))) {
include/net/sock.h
2680
} while ((memcg = parent_mem_cgroup(memcg)));
include/trace/events/memcg.h
14
TP_PROTO(struct mem_cgroup *memcg, int item, int val),
include/trace/events/memcg.h
16
TP_ARGS(memcg, item, val),
include/trace/events/memcg.h
25
__entry->id = cgroup_id(memcg->css.cgroup);
include/trace/events/memcg.h
3
#define TRACE_SYSTEM memcg
include/trace/events/memcg.h
36
TP_PROTO(struct mem_cgroup *memcg, int item, int val),
include/trace/events/memcg.h
38
TP_ARGS(memcg, item, val)
include/trace/events/memcg.h
43
TP_PROTO(struct mem_cgroup *memcg, int item, int val),
include/trace/events/memcg.h
45
TP_ARGS(memcg, item, val)
include/trace/events/memcg.h
50
TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val),
include/trace/events/memcg.h
52
TP_ARGS(memcg, item, val),
include/trace/events/memcg.h
61
__entry->id = cgroup_id(memcg->css.cgroup);
include/trace/events/memcg.h
72
TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val),
include/trace/events/memcg.h
74
TP_ARGS(memcg, item, val)
include/trace/events/memcg.h
79
TP_PROTO(struct mem_cgroup *memcg, s64 stats_updates,
include/trace/events/memcg.h
82
TP_ARGS(memcg, stats_updates, force, needs_flush),
include/trace/events/memcg.h
92
__entry->id = cgroup_id(memcg->css.cgroup);
kernel/bpf/memalloc.c
1001
struct mem_cgroup *memcg, *old_memcg;
kernel/bpf/memalloc.c
1003
memcg = get_memcg(c);
kernel/bpf/memalloc.c
1004
old_memcg = set_active_memcg(memcg);
kernel/bpf/memalloc.c
1009
mem_cgroup_put(memcg);
kernel/bpf/memalloc.c
209
struct mem_cgroup *memcg = NULL, *old_memcg;
kernel/bpf/memalloc.c
240
memcg = get_memcg(c);
kernel/bpf/memalloc.c
241
old_memcg = set_active_memcg(memcg);
kernel/bpf/memalloc.c
254
mem_cgroup_put(memcg);
kernel/bpf/syscall.c
528
struct mem_cgroup *memcg, *old_memcg;
kernel/bpf/syscall.c
531
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
533
bpf_map_memcg_exit(old_memcg, memcg);
kernel/bpf/syscall.c
541
struct mem_cgroup *memcg, *old_memcg;
kernel/bpf/syscall.c
544
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
546
bpf_map_memcg_exit(old_memcg, memcg);
kernel/bpf/syscall.c
553
struct mem_cgroup *memcg, *old_memcg;
kernel/bpf/syscall.c
556
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
558
bpf_map_memcg_exit(old_memcg, memcg);
kernel/bpf/syscall.c
566
struct mem_cgroup *memcg, *old_memcg;
kernel/bpf/syscall.c
569
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
571
bpf_map_memcg_exit(old_memcg, memcg);
kernel/bpf/syscall.c
579
struct mem_cgroup *memcg, *old_memcg;
kernel/bpf/syscall.c
582
bpf_map_memcg_enter(map, &old_memcg, &memcg);
kernel/bpf/syscall.c
584
bpf_map_memcg_exit(old_memcg, memcg);
mm/backing-dev.c
668
struct mem_cgroup *memcg;
mm/backing-dev.c
675
memcg = mem_cgroup_from_css(memcg_css);
mm/backing-dev.c
677
memcg_cgwb_list = &memcg->cgwb_list;
mm/backing-dev.c
931
void wb_memcg_offline(struct mem_cgroup *memcg)
mm/backing-dev.c
933
struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
mm/bpf_memcontrol.c
113
__bpf_kfunc unsigned long bpf_mem_cgroup_usage(struct mem_cgroup *memcg)
mm/bpf_memcontrol.c
115
return page_counter_read(&memcg->memory) * PAGE_SIZE;
mm/bpf_memcontrol.c
125
__bpf_kfunc unsigned long bpf_mem_cgroup_memory_events(struct mem_cgroup *memcg,
mm/bpf_memcontrol.c
131
return atomic_long_read(&memcg->memory_events[event]);
mm/bpf_memcontrol.c
143
__bpf_kfunc unsigned long bpf_mem_cgroup_page_state(struct mem_cgroup *memcg, int idx)
mm/bpf_memcontrol.c
148
return memcg_page_state_output(memcg, idx);
mm/bpf_memcontrol.c
157
__bpf_kfunc void bpf_mem_cgroup_flush_stats(struct mem_cgroup *memcg)
mm/bpf_memcontrol.c
159
mem_cgroup_flush_stats(memcg);
mm/bpf_memcontrol.c
47
struct mem_cgroup *memcg = NULL;
mm/bpf_memcontrol.c
63
memcg = container_of(css, struct mem_cgroup, css);
mm/bpf_memcontrol.c
68
return memcg;
mm/bpf_memcontrol.c
78
__bpf_kfunc void bpf_put_mem_cgroup(struct mem_cgroup *memcg)
mm/bpf_memcontrol.c
80
css_put(&memcg->css);
mm/bpf_memcontrol.c
92
__bpf_kfunc unsigned long bpf_mem_cgroup_vm_events(struct mem_cgroup *memcg,
mm/bpf_memcontrol.c
98
return memcg_events(memcg, event);
mm/damon/core.c
2120
struct mem_cgroup *memcg;
mm/damon/core.c
2125
memcg = mem_cgroup_get_from_id(goal->memcg_id);
mm/damon/core.c
2126
if (!memcg) {
mm/damon/core.c
2133
mem_cgroup_flush_stats(memcg);
mm/damon/core.c
2134
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
mm/damon/core.c
2140
mem_cgroup_put(memcg);
mm/damon/ops-common.c
261
struct mem_cgroup *memcg;
mm/damon/ops-common.c
273
memcg = folio_memcg_check(folio);
mm/damon/ops-common.c
274
if (!memcg)
mm/damon/ops-common.c
277
matched = filter->memcg_id == mem_cgroup_id(memcg);
mm/damon/sysfs-schemes.c
2488
static bool damon_sysfs_memcg_path_eq(struct mem_cgroup *memcg,
mm/damon/sysfs-schemes.c
2492
cgroup_path(memcg->css.cgroup, memcg_path_buf, PATH_MAX);
mm/damon/sysfs-schemes.c
2501
struct mem_cgroup *memcg;
mm/damon/sysfs-schemes.c
2512
for (memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg;
mm/damon/sysfs-schemes.c
2513
memcg = mem_cgroup_iter(NULL, memcg, NULL)) {
mm/damon/sysfs-schemes.c
2515
if (!mem_cgroup_online(memcg))
mm/damon/sysfs-schemes.c
2517
if (damon_sysfs_memcg_path_eq(memcg, path, memcg_path)) {
mm/damon/sysfs-schemes.c
2518
*id = mem_cgroup_id(memcg);
mm/huge_memory.c
1102
static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
mm/huge_memory.c
1104
return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
mm/huge_memory.c
1114
static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
mm/huge_memory.c
1120
static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
mm/huge_memory.c
1125
queue = memcg_split_queue(nid, memcg);
mm/huge_memory.c
1132
if (unlikely(memcg_is_dying(memcg))) {
mm/huge_memory.c
1134
memcg = parent_mem_cgroup(memcg);
mm/huge_memory.c
1142
split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
mm/huge_memory.c
1147
queue = memcg_split_queue(nid, memcg);
mm/huge_memory.c
1149
if (unlikely(memcg_is_dying(memcg))) {
mm/huge_memory.c
1151
memcg = parent_mem_cgroup(memcg);
mm/huge_memory.c
4355
struct mem_cgroup *memcg;
mm/huge_memory.c
4357
memcg = folio_split_queue_memcg(folio, ds_queue);
mm/huge_memory.c
4360
if (memcg)
mm/huge_memory.c
4361
set_shrinker_bit(memcg, folio_nid(folio),
mm/huge_memory.c
4374
if (sc->memcg)
mm/huge_memory.c
4375
ds_queue = &sc->memcg->deferred_split_queue;
mm/huge_memory.c
4419
ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
mm/huge_memory.c
4500
void reparent_deferred_split_queue(struct mem_cgroup *memcg)
mm/huge_memory.c
4502
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
mm/huge_memory.c
4503
struct deferred_split *ds_queue = &memcg->deferred_split_queue;
mm/internal.h
1662
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
mm/internal.h
619
struct mem_cgroup *memcg, pg_data_t *pgdat);
mm/list_lru.c
100
VM_WARN_ON(!css_is_dying(&memcg->css));
mm/list_lru.c
101
memcg = parent_mem_cgroup(memcg);
mm/list_lru.c
138
lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
mm/list_lru.c
162
struct mem_cgroup *memcg)
mm/list_lru.c
167
l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
mm/list_lru.c
174
set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
mm/list_lru.c
202
struct mem_cgroup *memcg)
mm/list_lru.c
206
l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
mm/list_lru.c
253
int nid, struct mem_cgroup *memcg)
mm/list_lru.c
259
l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
mm/list_lru.c
280
__list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
mm/list_lru.c
290
l = lock_list_lru_of_memcg(lru, nid, memcg, irq_off, true);
mm/list_lru.c
337
list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
mm/list_lru.c
341
return __list_lru_walk_one(lru, nid, memcg, isolate,
mm/list_lru.c
347
list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
mm/list_lru.c
351
return __list_lru_walk_one(lru, nid, memcg, isolate,
mm/list_lru.c
367
struct mem_cgroup *memcg;
mm/list_lru.c
372
memcg = mem_cgroup_from_private_id(index);
mm/list_lru.c
373
if (!mem_cgroup_tryget(memcg)) {
mm/list_lru.c
378
isolated += __list_lru_walk_one(lru, nid, memcg,
mm/list_lru.c
381
mem_cgroup_put(memcg);
mm/list_lru.c
467
void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
mm/list_lru.c
475
XA_STATE(xas, &lru->xa, memcg->kmemcg_id);
mm/list_lru.c
505
static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
mm/list_lru.c
508
int idx = memcg->kmemcg_id;
mm/list_lru.c
513
int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
mm/list_lru.c
521
if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
mm/list_lru.c
535
pos = memcg;
mm/list_lru.c
557
} while (pos != memcg && !css_is_dying(&pos->css));
mm/list_lru.c
80
lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
mm/list_lru.c
87
l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
mm/memcontrol-v1.c
1005
struct mem_cgroup *memcg = event->memcg;
mm/memcontrol-v1.c
1009
event->unregister_event(memcg, event->eventfd);
mm/memcontrol-v1.c
1016
css_put(&memcg->css);
mm/memcontrol-v1.c
1029
struct mem_cgroup *memcg = event->memcg;
mm/memcontrol-v1.c
1042
spin_lock(&memcg->event_list_lock);
mm/memcontrol-v1.c
1051
spin_unlock(&memcg->event_list_lock);
mm/memcontrol-v1.c
1079
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol-v1.c
1116
event->memcg = memcg;
mm/memcontrol-v1.c
1195
ret = event->register_event(memcg, event->eventfd, buf);
mm/memcontrol-v1.c
1201
spin_lock_irq(&memcg->event_list_lock);
mm/memcontrol-v1.c
1202
list_add(&event->list, &memcg->event_list);
mm/memcontrol-v1.c
1203
spin_unlock_irq(&memcg->event_list_lock);
mm/memcontrol-v1.c
1215
void memcg1_memcg_init(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1217
INIT_LIST_HEAD(&memcg->oom_notify);
mm/memcontrol-v1.c
1218
mutex_init(&memcg->thresholds_lock);
mm/memcontrol-v1.c
1219
INIT_LIST_HEAD(&memcg->event_list);
mm/memcontrol-v1.c
1220
spin_lock_init(&memcg->event_list_lock);
mm/memcontrol-v1.c
1223
void memcg1_css_offline(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1232
spin_lock_irq(&memcg->event_list_lock);
mm/memcontrol-v1.c
1233
list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
mm/memcontrol-v1.c
1237
spin_unlock_irq(&memcg->event_list_lock);
mm/memcontrol-v1.c
1244
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1250
for_each_mem_cgroup_tree(iter, memcg) {
mm/memcontrol-v1.c
1257
mem_cgroup_iter_break(memcg, iter);
mm/memcontrol-v1.c
1268
for_each_mem_cgroup_tree(iter, memcg) {
mm/memcontrol-v1.c
1270
mem_cgroup_iter_break(memcg, iter);
mm/memcontrol-v1.c
1283
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1289
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
1294
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1299
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
1304
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1313
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
1322
struct mem_cgroup *memcg;
mm/memcontrol-v1.c
1334
oom_wait_memcg = oom_wait_info->memcg;
mm/memcontrol-v1.c
1342
void memcg1_oom_recover(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1352
if (memcg && memcg->under_oom)
mm/memcontrol-v1.c
1353
__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
mm/memcontrol-v1.c
1375
struct mem_cgroup *memcg = current->memcg_in_oom;
mm/memcontrol-v1.c
1380
if (!memcg)
mm/memcontrol-v1.c
1386
owait.memcg = memcg;
mm/memcontrol-v1.c
1393
mem_cgroup_mark_under_oom(memcg);
mm/memcontrol-v1.c
1395
locked = mem_cgroup_oom_trylock(memcg);
mm/memcontrol-v1.c
1398
mem_cgroup_oom_notify(memcg);
mm/memcontrol-v1.c
1401
mem_cgroup_unmark_under_oom(memcg);
mm/memcontrol-v1.c
1405
mem_cgroup_oom_unlock(memcg);
mm/memcontrol-v1.c
1408
css_put(&memcg->css);
mm/memcontrol-v1.c
1413
bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked)
mm/memcontrol-v1.c
1433
if (READ_ONCE(memcg->oom_kill_disable)) {
mm/memcontrol-v1.c
1435
css_get(&memcg->css);
mm/memcontrol-v1.c
1436
current->memcg_in_oom = memcg;
mm/memcontrol-v1.c
1441
mem_cgroup_mark_under_oom(memcg);
mm/memcontrol-v1.c
1443
*locked = mem_cgroup_oom_trylock(memcg);
mm/memcontrol-v1.c
1446
mem_cgroup_oom_notify(memcg);
mm/memcontrol-v1.c
1448
mem_cgroup_unmark_under_oom(memcg);
mm/memcontrol-v1.c
1453
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked)
mm/memcontrol-v1.c
1456
mem_cgroup_oom_unlock(memcg);
mm/memcontrol-v1.c
1461
static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
1468
struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
mm/memcontrol-v1.c
1481
limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
mm/memcontrol-v1.c
1482
max <= memcg->memsw.max;
mm/memcontrol-v1.c
1497
drain_all_stock(memcg);
mm/memcontrol-v1.c
1502
if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
mm/memcontrol-v1.c
1510
memcg1_oom_recover(memcg);
mm/memcontrol-v1.c
1520
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1527
drain_all_stock(memcg);
mm/memcontrol-v1.c
1530
while (nr_retries && page_counter_read(&memcg->memory)) {
mm/memcontrol-v1.c
1534
if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
mm/memcontrol-v1.c
1546
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol-v1.c
1548
if (mem_cgroup_is_root(memcg))
mm/memcontrol-v1.c
1550
return mem_cgroup_force_empty(memcg) ?: nbytes;
mm/memcontrol-v1.c
1575
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol-v1.c
1580
counter = &memcg->memory;
mm/memcontrol-v1.c
1583
counter = &memcg->memsw;
mm/memcontrol-v1.c
1586
counter = &memcg->kmem;
mm/memcontrol-v1.c
1589
counter = &memcg->tcpmem;
mm/memcontrol-v1.c
1597
if (counter == &memcg->memory)
mm/memcontrol-v1.c
1598
return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
mm/memcontrol-v1.c
1599
if (counter == &memcg->memsw)
mm/memcontrol-v1.c
1600
return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
mm/memcontrol-v1.c
1609
return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
mm/memcontrol-v1.c
1625
static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
mm/memcontrol-v1.c
1631
ret = page_counter_set_max(&memcg->tcpmem, max);
mm/memcontrol-v1.c
1635
if (!memcg->tcpmem_active) {
mm/memcontrol-v1.c
1653
memcg->tcpmem_active = true;
mm/memcontrol-v1.c
1667
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol-v1.c
167
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
1678
if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
mm/memcontrol-v1.c
1684
ret = mem_cgroup_resize_max(memcg, nr_pages, false);
mm/memcontrol-v1.c
1687
ret = mem_cgroup_resize_max(memcg, nr_pages, true);
mm/memcontrol-v1.c
169
unsigned long nr_pages = page_counter_read(&memcg->memory);
mm/memcontrol-v1.c
170
unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
mm/memcontrol-v1.c
1700
ret = memcg_update_tcp_max(memcg, nr_pages);
mm/memcontrol-v1.c
1711
WRITE_ONCE(memcg->soft_limit, nr_pages);
mm/memcontrol-v1.c
1722
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol-v1.c
1727
counter = &memcg->memory;
mm/memcontrol-v1.c
1730
counter = &memcg->memsw;
mm/memcontrol-v1.c
1733
counter = &memcg->kmem;
mm/memcontrol-v1.c
1736
counter = &memcg->tcpmem;
mm/memcontrol-v1.c
1762
static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
1765
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
mm/memcontrol-v1.c
1782
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
179
static void memcg1_update_tree(struct mem_cgroup *memcg, int nid)
mm/memcontrol-v1.c
1793
nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
mm/memcontrol-v1.c
1795
nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
mm/memcontrol-v1.c
1815
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol-v1.c
1817
mem_cgroup_flush_stats(memcg);
mm/memcontrol-v1.c
1821
mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
mm/memcontrol-v1.c
1825
mem_cgroup_node_nr_lru_pages(memcg, nid,
mm/memcontrol-v1.c
1833
mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
mm/memcontrol-v1.c
1837
mem_cgroup_node_nr_lru_pages(memcg, nid,
mm/memcontrol-v1.c
186
if (soft_limit_excess(memcg))
mm/memcontrol-v1.c
187
lru_gen_soft_reclaim(memcg, nid);
mm/memcontrol-v1.c
1890
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
mm/memcontrol-v1.c
1898
mem_cgroup_flush_stats(memcg);
mm/memcontrol-v1.c
1903
nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
mm/memcontrol-v1.c
1909
memcg_events_local(memcg, memcg1_events[i]));
mm/memcontrol-v1.c
1913
memcg_page_state_local(memcg, NR_LRU_BASE + i) *
mm/memcontrol-v1.c
1918
for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
mm/memcontrol-v1.c
1930
nr = memcg_page_state_output(memcg, memcg1_stats[i]);
mm/memcontrol-v1.c
1938
(u64)memcg_events(memcg, memcg1_events[i]));
mm/memcontrol-v1.c
1942
(u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
mm/memcontrol-v1.c
1953
mz = memcg->nodeinfo[pgdat->node_id];
mm/memcontrol-v1.c
1967
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol-v1.c
1969
return mem_cgroup_swappiness(memcg);
mm/memcontrol-v1.c
1975
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol-v1.c
198
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
mm/memcontrol-v1.c
1980
if (!mem_cgroup_is_root(memcg)) {
mm/memcontrol-v1.c
1983
WRITE_ONCE(memcg->swappiness, val);
mm/memcontrol-v1.c
199
mz = memcg->nodeinfo[nid];
mm/memcontrol-v1.c
1992
struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
mm/memcontrol-v1.c
1994
seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
mm/memcontrol-v1.c
1995
seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
mm/memcontrol-v1.c
1997
atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
mm/memcontrol-v1.c
200
excess = soft_limit_excess(memcg);
mm/memcontrol-v1.c
2004
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol-v1.c
2011
if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
mm/memcontrol-v1.c
2014
WRITE_ONCE(memcg->oom_kill_disable, val);
mm/memcontrol-v1.c
2016
memcg1_oom_recover(memcg);
mm/memcontrol-v1.c
2187
void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
mm/memcontrol-v1.c
2191
page_counter_charge(&memcg->kmem, nr_pages);
mm/memcontrol-v1.c
2193
page_counter_uncharge(&memcg->kmem, -nr_pages);
mm/memcontrol-v1.c
2197
bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
mm/memcontrol-v1.c
2202
if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
mm/memcontrol-v1.c
2203
memcg->tcpmem_pressure = 0;
mm/memcontrol-v1.c
2206
memcg->tcpmem_pressure = 1;
mm/memcontrol-v1.c
2208
page_counter_charge(&memcg->tcpmem, nr_pages);
mm/memcontrol-v1.c
2214
bool memcg1_alloc_events(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
2216
memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu,
mm/memcontrol-v1.c
2218
return !!memcg->events_percpu;
mm/memcontrol-v1.c
222
void memcg1_remove_from_trees(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
2221
void memcg1_free_events(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
2223
free_percpu(memcg->events_percpu);
mm/memcontrol-v1.c
229
mz = memcg->nodeinfo[nid];
mm/memcontrol-v1.c
254
if (!soft_limit_excess(mz->memcg) ||
mm/memcontrol-v1.c
255
!css_tryget(&mz->memcg->css))
mm/memcontrol-v1.c
362
reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
mm/memcontrol-v1.c
375
excess = soft_limit_excess(mz->memcg);
mm/memcontrol-v1.c
387
css_put(&mz->memcg->css);
mm/memcontrol-v1.c
400
css_put(&next_mz->memcg->css);
mm/memcontrol-v1.c
430
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
mm/memcontrol-v1.c
434
if (mem_cgroup_is_root(memcg)) {
mm/memcontrol-v1.c
445
val = page_counter_read(&memcg->memory);
mm/memcontrol-v1.c
447
val = page_counter_read(&memcg->memsw);
mm/memcontrol-v1.c
452
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
mm/memcontrol-v1.c
460
t = rcu_dereference(memcg->thresholds.primary);
mm/memcontrol-v1.c
462
t = rcu_dereference(memcg->memsw_thresholds.primary);
mm/memcontrol-v1.c
467
usage = mem_cgroup_usage(memcg, swap);
mm/memcontrol-v1.c
503
static void mem_cgroup_threshold(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
505
while (memcg) {
mm/memcontrol-v1.c
506
__mem_cgroup_threshold(memcg, false);
mm/memcontrol-v1.c
508
__mem_cgroup_threshold(memcg, true);
mm/memcontrol-v1.c
510
memcg = parent_mem_cgroup(memcg);
mm/memcontrol-v1.c
533
static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
mm/memcontrol-v1.c
537
count_memcg_events(memcg, PGPGIN, 1);
mm/memcontrol-v1.c
539
count_memcg_events(memcg, PGPGOUT, 1);
mm/memcontrol-v1.c
543
__this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
mm/memcontrol-v1.c
549
static bool memcg1_event_ratelimit(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
554
val = __this_cpu_read(memcg->events_percpu->nr_page_events);
mm/memcontrol-v1.c
555
next = __this_cpu_read(memcg->events_percpu->targets[target]);
mm/memcontrol-v1.c
56
struct mem_cgroup *memcg;
mm/memcontrol-v1.c
568
__this_cpu_write(memcg->events_percpu->targets[target], next);
mm/memcontrol-v1.c
578
static void memcg1_check_events(struct mem_cgroup *memcg, int nid)
mm/memcontrol-v1.c
584
if (unlikely(memcg1_event_ratelimit(memcg,
mm/memcontrol-v1.c
588
do_softlimit = memcg1_event_ratelimit(memcg,
mm/memcontrol-v1.c
590
mem_cgroup_threshold(memcg);
mm/memcontrol-v1.c
592
memcg1_update_tree(memcg, nid);
mm/memcontrol-v1.c
596
void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
mm/memcontrol-v1.c
601
memcg1_charge_statistics(memcg, folio_nr_pages(folio));
mm/memcontrol-v1.c
602
memcg1_check_events(memcg, folio_nid(folio));
mm/memcontrol-v1.c
615
struct mem_cgroup *memcg, *swap_memcg;
mm/memcontrol-v1.c
627
memcg = folio_memcg(folio);
mm/memcontrol-v1.c
629
VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
mm/memcontrol-v1.c
630
if (!memcg)
mm/memcontrol-v1.c
638
swap_memcg = mem_cgroup_private_id_get_online(memcg);
mm/memcontrol-v1.c
650
if (!mem_cgroup_is_root(memcg))
mm/memcontrol-v1.c
651
page_counter_uncharge(&memcg->memory, nr_entries);
mm/memcontrol-v1.c
653
if (memcg != swap_memcg) {
mm/memcontrol-v1.c
656
page_counter_uncharge(&memcg->memsw, nr_entries);
mm/memcontrol-v1.c
667
memcg1_charge_statistics(memcg, -folio_nr_pages(folio));
mm/memcontrol-v1.c
669
memcg1_check_events(memcg, folio_nid(folio));
mm/memcontrol-v1.c
671
css_put(&memcg->css);
mm/memcontrol-v1.c
70
int (*register_event)(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
708
void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
mm/memcontrol-v1.c
714
count_memcg_events(memcg, PGPGOUT, pgpgout);
mm/memcontrol-v1.c
715
__this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory);
mm/memcontrol-v1.c
716
memcg1_check_events(memcg, nid);
mm/memcontrol-v1.c
734
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
740
list_for_each_entry(ev, &memcg->oom_notify, list)
mm/memcontrol-v1.c
747
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
mm/memcontrol-v1.c
751
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
755
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
768
mutex_lock(&memcg->thresholds_lock);
mm/memcontrol-v1.c
77
void (*unregister_event)(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
771
thresholds = &memcg->thresholds;
mm/memcontrol-v1.c
772
usage = mem_cgroup_usage(memcg, false);
mm/memcontrol-v1.c
774
thresholds = &memcg->memsw_thresholds;
mm/memcontrol-v1.c
775
usage = mem_cgroup_usage(memcg, true);
mm/memcontrol-v1.c
781
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
mm/memcontrol-v1.c
830
mutex_unlock(&memcg->thresholds_lock);
mm/memcontrol-v1.c
835
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
838
return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
mm/memcontrol-v1.c
841
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
844
return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
mm/memcontrol-v1.c
847
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
855
mutex_lock(&memcg->thresholds_lock);
mm/memcontrol-v1.c
858
thresholds = &memcg->thresholds;
mm/memcontrol-v1.c
859
usage = mem_cgroup_usage(memcg, false);
mm/memcontrol-v1.c
861
thresholds = &memcg->memsw_thresholds;
mm/memcontrol-v1.c
862
usage = mem_cgroup_usage(memcg, true);
mm/memcontrol-v1.c
870
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
mm/memcontrol-v1.c
929
mutex_unlock(&memcg->thresholds_lock);
mm/memcontrol-v1.c
932
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
935
return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
mm/memcontrol-v1.c
938
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
941
return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
mm/memcontrol-v1.c
944
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
956
list_add(&event->list, &memcg->oom_notify);
mm/memcontrol-v1.c
959
if (memcg->under_oom)
mm/memcontrol-v1.c
966
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
mm/memcontrol-v1.c
973
list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
mm/memcontrol-v1.h
100
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {}
mm/memcontrol-v1.h
101
static inline void memcg1_css_offline(struct mem_cgroup *memcg) {}
mm/memcontrol-v1.h
103
static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) { return true; }
mm/memcontrol-v1.h
104
static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
mm/memcontrol-v1.h
105
static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
mm/memcontrol-v1.h
108
struct mem_cgroup *memcg) {}
mm/memcontrol-v1.h
110
static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
mm/memcontrol-v1.h
114
static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
mm/memcontrol-v1.h
116
static inline void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages) {}
mm/memcontrol-v1.h
117
static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg) { return false; }
mm/memcontrol-v1.h
118
static inline bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
mm/memcontrol-v1.h
120
static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) {}
mm/memcontrol-v1.h
27
unsigned long memcg_events(struct mem_cgroup *memcg, int event);
mm/memcontrol-v1.h
30
void mem_cgroup_private_id_get_many(struct mem_cgroup *memcg, unsigned int n);
mm/memcontrol-v1.h
31
struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg);
mm/memcontrol-v1.h
42
unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
mm/memcontrol-v1.h
43
unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
mm/memcontrol-v1.h
44
unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
mm/memcontrol-v1.h
45
bool memcg1_alloc_events(struct mem_cgroup *memcg);
mm/memcontrol-v1.h
46
void memcg1_free_events(struct mem_cgroup *memcg);
mm/memcontrol-v1.h
48
void memcg1_memcg_init(struct mem_cgroup *memcg);
mm/memcontrol-v1.h
49
void memcg1_remove_from_trees(struct mem_cgroup *memcg);
mm/memcontrol-v1.h
51
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
mm/memcontrol-v1.h
53
WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
mm/memcontrol-v1.h
57
void memcg1_css_offline(struct mem_cgroup *memcg);
mm/memcontrol-v1.h
67
bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
mm/memcontrol-v1.h
68
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
mm/memcontrol-v1.h
69
void memcg1_oom_recover(struct mem_cgroup *memcg);
mm/memcontrol-v1.h
71
void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
mm/memcontrol-v1.h
72
void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
mm/memcontrol-v1.h
75
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
mm/memcontrol-v1.h
77
void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages);
mm/memcontrol-v1.h
78
static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg)
mm/memcontrol-v1.h
80
return memcg->tcpmem_active;
mm/memcontrol-v1.h
82
bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
mm/memcontrol-v1.h
84
static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
mm/memcontrol-v1.h
86
page_counter_uncharge(&memcg->tcpmem, nr_pages);
mm/memcontrol-v1.h
95
static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
mm/memcontrol-v1.h
96
static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
mm/memcontrol-v1.h
98
static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {}
mm/memcontrol-v1.h
99
static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {}
mm/memcontrol.c
1001
return memcg;
mm/memcontrol.c
1138
struct mem_cgroup *memcg = dead_memcg;
mm/memcontrol.c
1142
__invalidate_reclaim_iterators(memcg, dead_memcg);
mm/memcontrol.c
1143
last = memcg;
mm/memcontrol.c
1144
} while ((memcg = parent_mem_cgroup(memcg)));
mm/memcontrol.c
115
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
mm/memcontrol.c
117
if (!memcg)
mm/memcontrol.c
1170
void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
mm/memcontrol.c
1176
BUG_ON(mem_cgroup_is_root(memcg));
mm/memcontrol.c
1178
for_each_mem_cgroup_tree(iter, memcg) {
mm/memcontrol.c
118
memcg = root_mem_cgroup;
mm/memcontrol.c
119
return &memcg->vmpressure;
mm/memcontrol.c
1190
mem_cgroup_iter_break(memcg, iter);
mm/memcontrol.c
1199
struct mem_cgroup *memcg;
mm/memcontrol.c
1204
memcg = folio_memcg(folio);
mm/memcontrol.c
1206
if (!memcg)
mm/memcontrol.c
1209
VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
mm/memcontrol.c
1325
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
mm/memcontrol.c
1331
count = page_counter_read(&memcg->memory);
mm/memcontrol.c
1332
limit = READ_ONCE(memcg->memory.max);
mm/memcontrol.c
1337
count = page_counter_read(&memcg->memsw);
mm/memcontrol.c
1338
limit = READ_ONCE(memcg->memsw.max);
mm/memcontrol.c
138
static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
mm/memcontrol.c
1455
unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
mm/memcontrol.c
1457
return memcg_page_state(memcg, item) *
mm/memcontrol.c
1462
unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
mm/memcontrol.c
1464
return memcg_page_state_local(memcg, item) *
mm/memcontrol.c
1481
static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
mm/memcontrol.c
1495
mem_cgroup_flush_stats(memcg);
mm/memcontrol.c
1505
size = memcg_page_state_output(memcg, memory_stats[i].idx);
mm/memcontrol.c
1509
size += memcg_page_state_output(memcg,
mm/memcontrol.c
1517
memcg_events(memcg, PGSCAN_KSWAPD) +
mm/memcontrol.c
1518
memcg_events(memcg, PGSCAN_DIRECT) +
mm/memcontrol.c
1519
memcg_events(memcg, PGSCAN_PROACTIVE) +
mm/memcontrol.c
1520
memcg_events(memcg, PGSCAN_KHUGEPAGED));
mm/memcontrol.c
1522
memcg_events(memcg, PGSTEAL_KSWAPD) +
mm/memcontrol.c
1523
memcg_events(memcg, PGSTEAL_DIRECT) +
mm/memcontrol.c
1524
memcg_events(memcg, PGSTEAL_PROACTIVE) +
mm/memcontrol.c
1525
memcg_events(memcg, PGSTEAL_KHUGEPAGED));
mm/memcontrol.c
1535
memcg_events(memcg, memcg_vm_event_stat[i]));
mm/memcontrol.c
1539
static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
mm/memcontrol.c
1542
memcg_stat_format(memcg, s);
mm/memcontrol.c
1544
memcg1_stat_format(memcg, s);
mm/memcontrol.c
1558
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
mm/memcontrol.c
1562
if (memcg) {
mm/memcontrol.c
1564
pr_cont_cgroup_path(memcg->css.cgroup);
mm/memcontrol.c
1579
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
mm/memcontrol.c
1589
memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
mm/memcontrol.c
1591
memory_failcnt = memcg->memory.failcnt;
mm/memcontrol.c
1594
K((u64)page_counter_read(&memcg->memory)),
mm/memcontrol.c
1595
K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
mm/memcontrol.c
1598
K((u64)page_counter_read(&memcg->swap)),
mm/memcontrol.c
1599
K((u64)READ_ONCE(memcg->swap.max)),
mm/memcontrol.c
1600
atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
mm/memcontrol.c
1604
K((u64)page_counter_read(&memcg->memsw)),
mm/memcontrol.c
1605
K((u64)memcg->memsw.max), memcg->memsw.failcnt);
mm/memcontrol.c
1607
K((u64)page_counter_read(&memcg->kmem)),
mm/memcontrol.c
1608
K((u64)memcg->kmem.max), memcg->kmem.failcnt);
mm/memcontrol.c
1613
pr_cont_cgroup_path(memcg->css.cgroup);
mm/memcontrol.c
1616
memory_stat_format(memcg, &s);
mm/memcontrol.c
1623
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
mm/memcontrol.c
1625
unsigned long max = READ_ONCE(memcg->memory.max);
mm/memcontrol.c
1628
if (mem_cgroup_swappiness(memcg)) {
mm/memcontrol.c
1630
unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
mm/memcontrol.c
1635
if (mem_cgroup_swappiness(memcg))
mm/memcontrol.c
1636
max += min(READ_ONCE(memcg->swap.max),
mm/memcontrol.c
1642
void __memcg_memory_event(struct mem_cgroup *memcg,
mm/memcontrol.c
1651
atomic_long_inc(&memcg->memory_events_local[event]);
mm/memcontrol.c
1653
cgroup_file_notify(&memcg->events_local_file);
mm/memcontrol.c
1656
atomic_long_inc(&memcg->memory_events[event]);
mm/memcontrol.c
1659
cgroup_file_notify(&memcg->swap_events_file);
mm/memcontrol.c
1661
cgroup_file_notify(&memcg->events_file);
mm/memcontrol.c
1668
} while ((memcg = parent_mem_cgroup(memcg)) &&
mm/memcontrol.c
1669
!mem_cgroup_is_root(memcg));
mm/memcontrol.c
1673
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
mm/memcontrol.c
1679
.memcg = memcg,
mm/memcontrol.c
1688
if (mem_cgroup_margin(memcg) >= (1 << order))
mm/memcontrol.c
1706
static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
mm/memcontrol.c
1713
memcg_memory_event(memcg, MEMCG_OOM);
mm/memcontrol.c
1715
if (!memcg1_oom_prepare(memcg, &locked))
mm/memcontrol.c
1718
ret = mem_cgroup_out_of_memory(memcg, mask, order);
mm/memcontrol.c
172
struct mem_cgroup *memcg;
mm/memcontrol.c
1720
memcg1_oom_finish(memcg, locked);
mm/memcontrol.c
1739
struct mem_cgroup *memcg;
mm/memcontrol.c
174
memcg = get_mem_cgroup_from_objcg(objcg);
mm/memcontrol.c
1749
memcg = mem_cgroup_from_task(victim);
mm/memcontrol.c
175
mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
mm/memcontrol.c
1750
if (mem_cgroup_is_root(memcg))
mm/memcontrol.c
1758
if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
mm/memcontrol.c
176
memcg1_account_kmem(memcg, -nr_pages);
mm/memcontrol.c
1766
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
mm/memcontrol.c
1767
if (READ_ONCE(memcg->oom_group))
mm/memcontrol.c
1768
oom_group = memcg;
mm/memcontrol.c
177
if (!mem_cgroup_is_root(memcg))
mm/memcontrol.c
1770
if (memcg == oom_domain)
mm/memcontrol.c
178
memcg_uncharge(memcg, nr_pages);
mm/memcontrol.c
1782
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
mm/memcontrol.c
1785
pr_cont_cgroup_path(memcg->css.cgroup);
mm/memcontrol.c
179
mem_cgroup_put(memcg);
mm/memcontrol.c
1841
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
mm/memcontrol.c
1855
if (memcg != READ_ONCE(stock->cached[i]))
mm/memcontrol.c
1871
static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
mm/memcontrol.c
1873
page_counter_uncharge(&memcg->memory, nr_pages);
mm/memcontrol.c
1875
page_counter_uncharge(&memcg->memsw, nr_pages);
mm/memcontrol.c
1939
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
mm/memcontrol.c
1955
VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
mm/memcontrol.c
1963
memcg_uncharge(memcg, nr_pages);
mm/memcontrol.c
1972
if (memcg == READ_ONCE(stock->cached[i])) {
mm/memcontrol.c
1988
css_get(&memcg->css);
mm/memcontrol.c
1989
WRITE_ONCE(stock->cached[i], memcg);
mm/memcontrol.c
1999
struct mem_cgroup *memcg;
mm/memcontrol.c
2005
memcg = READ_ONCE(stock->cached[i]);
mm/memcontrol.c
2006
if (!memcg)
mm/memcontrol.c
2010
mem_cgroup_is_descendant(memcg, root_memcg)) {
mm/memcontrol.c
2088
static unsigned long reclaim_high(struct mem_cgroup *memcg,
mm/memcontrol.c
209
static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
mm/memcontrol.c
2097
if (page_counter_read(&memcg->memory) <=
mm/memcontrol.c
2098
READ_ONCE(memcg->memory.high))
mm/memcontrol.c
2101
memcg_memory_event(memcg, MEMCG_HIGH);
mm/memcontrol.c
2104
nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
mm/memcontrol.c
2109
} while ((memcg = parent_mem_cgroup(memcg)) &&
mm/memcontrol.c
2110
!mem_cgroup_is_root(memcg));
mm/memcontrol.c
2117
struct mem_cgroup *memcg;
mm/memcontrol.c
2119
memcg = container_of(work, struct mem_cgroup, high_work);
mm/memcontrol.c
2120
reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
mm/memcontrol.c
214
objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
mm/memcontrol.c
219
list_add(&objcg->list, &memcg->objcg_list);
mm/memcontrol.c
2194
static u64 mem_find_max_overage(struct mem_cgroup *memcg)
mm/memcontrol.c
2199
overage = calculate_overage(page_counter_read(&memcg->memory),
mm/memcontrol.c
2200
READ_ONCE(memcg->memory.high));
mm/memcontrol.c
2202
} while ((memcg = parent_mem_cgroup(memcg)) &&
mm/memcontrol.c
2203
!mem_cgroup_is_root(memcg));
mm/memcontrol.c
2208
static u64 swap_find_max_overage(struct mem_cgroup *memcg)
mm/memcontrol.c
221
list_for_each_entry(iter, &memcg->objcg_list, list)
mm/memcontrol.c
2213
overage = calculate_overage(page_counter_read(&memcg->swap),
mm/memcontrol.c
2214
READ_ONCE(memcg->swap.high));
mm/memcontrol.c
2216
memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
mm/memcontrol.c
2218
} while ((memcg = parent_mem_cgroup(memcg)) &&
mm/memcontrol.c
2219
!mem_cgroup_is_root(memcg));
mm/memcontrol.c
222
WRITE_ONCE(iter->memcg, parent);
mm/memcontrol.c
2228
static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
mm/memcontrol.c
224
list_splice(&memcg->objcg_list, &parent->objcg_list);
mm/memcontrol.c
2272
struct mem_cgroup *memcg;
mm/memcontrol.c
2275
memcg = get_mem_cgroup_from_mm(current->mm);
mm/memcontrol.c
2299
nr_reclaimed = reclaim_high(memcg,
mm/memcontrol.c
2307
penalty_jiffies = calculate_high_delay(memcg, nr_pages,
mm/memcontrol.c
2308
mem_find_max_overage(memcg));
mm/memcontrol.c
2310
penalty_jiffies += calculate_high_delay(memcg, nr_pages,
mm/memcontrol.c
2311
swap_find_max_overage(memcg));
mm/memcontrol.c
2352
css_put(&memcg->css);
mm/memcontrol.c
2355
static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
mm/memcontrol.c
2371
if (consume_stock(memcg, nr_pages))
mm/memcontrol.c
2379
page_counter_try_charge(&memcg->memsw, batch, &counter)) {
mm/memcontrol.c
2380
if (page_counter_try_charge(&memcg->memory, batch, &counter))
mm/memcontrol.c
2383
page_counter_uncharge(&memcg->memsw, batch);
mm/memcontrol.c
2484
page_counter_charge(&memcg->memory, nr_pages);
mm/memcontrol.c
2486
page_counter_charge(&memcg->memsw, nr_pages);
mm/memcontrol.c
2492
refill_stock(memcg, batch - nr_pages);
mm/memcontrol.c
2506
mem_high = page_counter_read(&memcg->memory) >
mm/memcontrol.c
2507
READ_ONCE(memcg->memory.high);
mm/memcontrol.c
2508
swap_high = page_counter_read(&memcg->swap) >
mm/memcontrol.c
2509
READ_ONCE(memcg->swap.high);
mm/memcontrol.c
2514
schedule_work(&memcg->high_work);
mm/memcontrol.c
2534
} while ((memcg = parent_mem_cgroup(memcg)));
mm/memcontrol.c
2550
static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
mm/memcontrol.c
2553
if (mem_cgroup_is_root(memcg))
mm/memcontrol.c
2556
return try_charge_memcg(memcg, gfp_mask, nr_pages);
mm/memcontrol.c
2559
static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
mm/memcontrol.c
256
struct mem_cgroup *memcg = folio_memcg(folio);
mm/memcontrol.c
2569
folio->memcg_data = (unsigned long)memcg;
mm/memcontrol.c
2573
static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
mm/memcontrol.c
258
if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
mm/memcontrol.c
2580
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/memcontrol.c
2583
struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
mm/memcontrol.c
2586
css_rstat_updated(&memcg->css, smp_processor_id());
mm/memcontrol.c
259
memcg = root_mem_cgroup;
mm/memcontrol.c
2594
static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
mm/memcontrol.c
2600
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/memcontrol.c
2609
struct mem_cgroup *memcg;
mm/memcontrol.c
261
return &memcg->css;
mm/memcontrol.c
2612
memcg = obj_cgroup_memcg(objcg);
mm/memcontrol.c
2613
account_slab_nmi_safe(memcg, pgdat, idx, nr);
mm/memcontrol.c
2669
static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
mm/memcontrol.c
2673
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
mm/memcontrol.c
2674
objcg = rcu_dereference(memcg->objcg);
mm/memcontrol.c
2684
struct mem_cgroup *memcg;
mm/memcontrol.c
2719
memcg = mem_cgroup_from_task(current);
mm/memcontrol.c
2720
objcg = __get_obj_cgroup_from_memcg(memcg);
mm/memcontrol.c
2735
struct mem_cgroup *memcg;
mm/memcontrol.c
2742
memcg = current->active_memcg;
mm/memcontrol.c
2743
if (unlikely(memcg))
mm/memcontrol.c
2756
memcg = this_cpu_read(int_active_memcg);
mm/memcontrol.c
2757
if (unlikely(memcg))
mm/memcontrol.c
2764
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
mm/memcontrol.c
2771
objcg = rcu_dereference_check(memcg->objcg, 1);
mm/memcontrol.c
279
struct mem_cgroup *memcg;
mm/memcontrol.c
2790
struct mem_cgroup *memcg;
mm/memcontrol.c
2793
memcg = __folio_memcg(folio);
mm/memcontrol.c
2794
if (memcg)
mm/memcontrol.c
2795
objcg = __get_obj_cgroup_from_memcg(memcg);
mm/memcontrol.c
2804
static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
mm/memcontrol.c
2807
mod_memcg_state(memcg, MEMCG_KMEM, val);
mm/memcontrol.c
2810
css_rstat_updated(&memcg->css, smp_processor_id());
mm/memcontrol.c
2811
atomic_add(val, &memcg->kmem_stat);
mm/memcontrol.c
2815
static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
mm/memcontrol.c
2817
mod_memcg_state(memcg, MEMCG_KMEM, val);
mm/memcontrol.c
2829
struct mem_cgroup *memcg;
mm/memcontrol.c
2831
memcg = get_mem_cgroup_from_objcg(objcg);
mm/memcontrol.c
2833
account_kmem_nmi_safe(memcg, -nr_pages);
mm/memcontrol.c
2834
memcg1_account_kmem(memcg, -nr_pages);
mm/memcontrol.c
2835
if (!mem_cgroup_is_root(memcg))
mm/memcontrol.c
2836
refill_stock(memcg, nr_pages);
mm/memcontrol.c
2838
css_put(&memcg->css);
mm/memcontrol.c
284
memcg = folio_memcg_check(page_folio(page));
mm/memcontrol.c
2852
struct mem_cgroup *memcg;
mm/memcontrol.c
2855
memcg = get_mem_cgroup_from_objcg(objcg);
mm/memcontrol.c
2857
ret = try_charge_memcg(memcg, gfp, nr_pages);
mm/memcontrol.c
286
while (memcg && !css_is_online(&memcg->css))
mm/memcontrol.c
2861
account_kmem_nmi_safe(memcg, nr_pages);
mm/memcontrol.c
2862
memcg1_account_kmem(memcg, nr_pages);
mm/memcontrol.c
2864
css_put(&memcg->css);
mm/memcontrol.c
287
memcg = parent_mem_cgroup(memcg);
mm/memcontrol.c
288
if (memcg)
mm/memcontrol.c
289
ino = cgroup_ino(memcg->css.cgroup);
mm/memcontrol.c
3013
struct mem_cgroup *memcg;
mm/memcontrol.c
3015
memcg = get_mem_cgroup_from_objcg(old);
mm/memcontrol.c
3017
mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
mm/memcontrol.c
3018
memcg1_account_kmem(memcg, -nr_pages);
mm/memcontrol.c
3019
if (!mem_cgroup_is_root(memcg))
mm/memcontrol.c
3020
memcg_uncharge(memcg, nr_pages);
mm/memcontrol.c
3022
css_put(&memcg->css);
mm/memcontrol.c
3066
struct mem_cgroup *memcg;
mm/memcontrol.c
3071
memcg = obj_cgroup_memcg(objcg);
mm/memcontrol.c
3072
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
mm/memcontrol.c
3217
struct mem_cgroup *memcg;
mm/memcontrol.c
3219
memcg = get_mem_cgroup_from_objcg(objcg);
mm/memcontrol.c
3220
ret = memcg_list_lru_alloc(memcg, lru, flags);
mm/memcontrol.c
3221
css_put(&memcg->css);
mm/memcontrol.c
3318
static int memcg_online_kmem(struct mem_cgroup *memcg)
mm/memcontrol.c
3325
if (unlikely(mem_cgroup_is_root(memcg)))
mm/memcontrol.c
3332
objcg->memcg = memcg;
mm/memcontrol.c
3333
rcu_assign_pointer(memcg->objcg, objcg);
mm/memcontrol.c
3335
memcg->orig_objcg = objcg;
mm/memcontrol.c
3339
memcg->kmemcg_id = memcg->id.id;
mm/memcontrol.c
3344
static void memcg_offline_kmem(struct mem_cgroup *memcg)
mm/memcontrol.c
3351
if (unlikely(mem_cgroup_is_root(memcg)))
mm/memcontrol.c
3354
parent = parent_mem_cgroup(memcg);
mm/memcontrol.c
3358
memcg_reparent_list_lrus(memcg, parent);
mm/memcontrol.c
3364
memcg_reparent_objcgs(memcg, parent);
mm/memcontrol.c
3371
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
mm/memcontrol.c
3373
return wb_domain_init(&memcg->cgwb_domain, gfp);
mm/memcontrol.c
3376
static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
mm/memcontrol.c
3378
wb_domain_exit(&memcg->cgwb_domain);
mm/memcontrol.c
3381
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
mm/memcontrol.c
3383
wb_domain_size_changed(&memcg->cgwb_domain);
mm/memcontrol.c
3388
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
mm/memcontrol.c
3390
if (!memcg->css.parent)
mm/memcontrol.c
3393
return &memcg->cgwb_domain;
mm/memcontrol.c
3418
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
mm/memcontrol.c
3421
mem_cgroup_flush_stats_ratelimited(memcg);
mm/memcontrol.c
3423
*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
mm/memcontrol.c
3424
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
mm/memcontrol.c
3425
*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
mm/memcontrol.c
3426
memcg_page_state(memcg, NR_ACTIVE_FILE);
mm/memcontrol.c
3429
while ((parent = parent_mem_cgroup(memcg))) {
mm/memcontrol.c
3430
unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
mm/memcontrol.c
3431
READ_ONCE(memcg->memory.high));
mm/memcontrol.c
3432
unsigned long used = page_counter_read(&memcg->memory);
mm/memcontrol.c
3435
memcg = parent;
mm/memcontrol.c
3486
struct mem_cgroup *memcg = folio_memcg(folio);
mm/memcontrol.c
3501
frn = &memcg->cgwb_frn[i];
mm/memcontrol.c
3528
frn = &memcg->cgwb_frn[oldest];
mm/memcontrol.c
3538
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
mm/memcontrol.c
3544
struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
mm/memcontrol.c
3565
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
mm/memcontrol.c
3570
static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
mm/memcontrol.c
3574
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
mm/memcontrol.c
3607
static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg)
mm/memcontrol.c
3609
if (memcg->id.id > 0) {
mm/memcontrol.c
3610
xa_erase(&mem_cgroup_private_ids, memcg->id.id);
mm/memcontrol.c
3611
memcg->id.id = 0;
mm/memcontrol.c
3615
void __maybe_unused mem_cgroup_private_id_get_many(struct mem_cgroup *memcg,
mm/memcontrol.c
3618
refcount_add(n, &memcg->id.ref);
mm/memcontrol.c
3621
static void mem_cgroup_private_id_put_many(struct mem_cgroup *memcg, unsigned int n)
mm/memcontrol.c
3623
if (refcount_sub_and_test(n, &memcg->id.ref)) {
mm/memcontrol.c
3624
mem_cgroup_private_id_remove(memcg);
mm/memcontrol.c
3627
css_put(&memcg->css);
mm/memcontrol.c
3631
static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg)
mm/memcontrol.c
3633
mem_cgroup_private_id_put_many(memcg, 1);
mm/memcontrol.c
3636
struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg)
mm/memcontrol.c
3638
while (!refcount_inc_not_zero(&memcg->id.ref)) {
mm/memcontrol.c
3643
if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
mm/memcontrol.c
3647
memcg = parent_mem_cgroup(memcg);
mm/memcontrol.c
3648
if (!memcg)
mm/memcontrol.c
3649
memcg = root_mem_cgroup;
mm/memcontrol.c
3651
return memcg;
mm/memcontrol.c
3670
struct mem_cgroup *memcg = NULL;
mm/memcontrol.c
3678
memcg = container_of(css, struct mem_cgroup, css);
mm/memcontrol.c
3682
return memcg;
mm/memcontrol.c
3695
static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
mm/memcontrol.c
3715
pn->memcg = memcg;
mm/memcontrol.c
3717
memcg->nodeinfo[node] = pn;
mm/memcontrol.c
3724
static void __mem_cgroup_free(struct mem_cgroup *memcg)
mm/memcontrol.c
3728
obj_cgroup_put(memcg->orig_objcg);
mm/memcontrol.c
3731
free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
mm/memcontrol.c
3732
memcg1_free_events(memcg);
mm/memcontrol.c
3733
kfree(memcg->vmstats);
mm/memcontrol.c
3734
free_percpu(memcg->vmstats_percpu);
mm/memcontrol.c
3735
kfree(memcg);
mm/memcontrol.c
3738
static void mem_cgroup_free(struct mem_cgroup *memcg)
mm/memcontrol.c
3740
lru_gen_exit_memcg(memcg);
mm/memcontrol.c
3741
memcg_wb_domain_exit(memcg);
mm/memcontrol.c
3742
__mem_cgroup_free(memcg);
mm/memcontrol.c
3749
struct mem_cgroup *memcg;
mm/memcontrol.c
3754
memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
mm/memcontrol.c
3755
if (!memcg)
mm/memcontrol.c
3758
error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL,
mm/memcontrol.c
3764
memcg->vmstats = kzalloc_obj(struct memcg_vmstats, GFP_KERNEL_ACCOUNT);
mm/memcontrol.c
3765
if (!memcg->vmstats)
mm/memcontrol.c
3768
memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
mm/memcontrol.c
3770
if (!memcg->vmstats_percpu)
mm/memcontrol.c
3773
if (!memcg1_alloc_events(memcg))
mm/memcontrol.c
3779
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
mm/memcontrol.c
3781
statc->vmstats = memcg->vmstats;
mm/memcontrol.c
3785
if (!alloc_mem_cgroup_per_node_info(memcg, node))
mm/memcontrol.c
3788
if (memcg_wb_domain_init(memcg, GFP_KERNEL))
mm/memcontrol.c
3791
INIT_WORK(&memcg->high_work, high_work_func);
mm/memcontrol.c
3792
vmpressure_init(&memcg->vmpressure);
mm/memcontrol.c
3793
INIT_LIST_HEAD(&memcg->memory_peaks);
mm/memcontrol.c
3794
INIT_LIST_HEAD(&memcg->swap_peaks);
mm/memcontrol.c
3795
spin_lock_init(&memcg->peaks_lock);
mm/memcontrol.c
3796
memcg->socket_pressure = get_jiffies_64();
mm/memcontrol.c
3798
seqlock_init(&memcg->socket_pressure_seqlock);
mm/memcontrol.c
3800
memcg1_memcg_init(memcg);
mm/memcontrol.c
3801
memcg->kmemcg_id = -1;
mm/memcontrol.c
3802
INIT_LIST_HEAD(&memcg->objcg_list);
mm/memcontrol.c
3804
INIT_LIST_HEAD(&memcg->cgwb_list);
mm/memcontrol.c
3806
memcg->cgwb_frn[i].done =
mm/memcontrol.c
3810
spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
mm/memcontrol.c
3811
INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
mm/memcontrol.c
3812
memcg->deferred_split_queue.split_queue_len = 0;
mm/memcontrol.c
3814
lru_gen_init_memcg(memcg);
mm/memcontrol.c
3815
return memcg;
mm/memcontrol.c
3817
mem_cgroup_private_id_remove(memcg);
mm/memcontrol.c
3818
__mem_cgroup_free(memcg);
mm/memcontrol.c
3826
struct mem_cgroup *memcg, *old_memcg;
mm/memcontrol.c
3830
memcg = mem_cgroup_alloc(parent);
mm/memcontrol.c
3832
if (IS_ERR(memcg))
mm/memcontrol.c
3833
return ERR_CAST(memcg);
mm/memcontrol.c
3835
page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
mm/memcontrol.c
3836
memcg1_soft_limit_reset(memcg);
mm/memcontrol.c
3838
memcg->zswap_max = PAGE_COUNTER_MAX;
mm/memcontrol.c
3839
WRITE_ONCE(memcg->zswap_writeback, true);
mm/memcontrol.c
3841
page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
mm/memcontrol.c
3843
WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
mm/memcontrol.c
3845
page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
mm/memcontrol.c
3846
page_counter_init(&memcg->swap, &parent->swap, false);
mm/memcontrol.c
3848
memcg->memory.track_failcnt = !memcg_on_dfl;
mm/memcontrol.c
3849
WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
mm/memcontrol.c
3850
page_counter_init(&memcg->kmem, &parent->kmem, false);
mm/memcontrol.c
3851
page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
mm/memcontrol.c
3856
page_counter_init(&memcg->memory, NULL, true);
mm/memcontrol.c
3857
page_counter_init(&memcg->swap, NULL, false);
mm/memcontrol.c
3859
page_counter_init(&memcg->kmem, NULL, false);
mm/memcontrol.c
3860
page_counter_init(&memcg->tcpmem, NULL, false);
mm/memcontrol.c
3862
root_mem_cgroup = memcg;
mm/memcontrol.c
3863
return &memcg->css;
mm/memcontrol.c
3872
return &memcg->css;
mm/memcontrol.c
3877
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
3879
if (memcg_online_kmem(memcg))
mm/memcontrol.c
3887
if (alloc_shrinker_info(memcg))
mm/memcontrol.c
3890
if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
mm/memcontrol.c
3893
lru_gen_online_memcg(memcg);
mm/memcontrol.c
3896
refcount_set(&memcg->id.ref, 1);
mm/memcontrol.c
3909
xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL);
mm/memcontrol.c
3913
memcg_offline_kmem(memcg);
mm/memcontrol.c
3915
mem_cgroup_private_id_remove(memcg);
mm/memcontrol.c
3921
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
3923
memcg1_css_offline(memcg);
mm/memcontrol.c
3925
page_counter_set_min(&memcg->memory, 0);
mm/memcontrol.c
3926
page_counter_set_low(&memcg->memory, 0);
mm/memcontrol.c
3928
zswap_memcg_offline_cleanup(memcg);
mm/memcontrol.c
3930
memcg_offline_kmem(memcg);
mm/memcontrol.c
3931
reparent_deferred_split_queue(memcg);
mm/memcontrol.c
3932
reparent_shrinker_deferred(memcg);
mm/memcontrol.c
3933
wb_memcg_offline(memcg);
mm/memcontrol.c
3934
lru_gen_offline_memcg(memcg);
mm/memcontrol.c
3936
drain_all_stock(memcg);
mm/memcontrol.c
3938
mem_cgroup_private_id_put(memcg);
mm/memcontrol.c
3943
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
3945
invalidate_reclaim_iterators(memcg);
mm/memcontrol.c
3946
lru_gen_release_memcg(memcg);
mm/memcontrol.c
3951
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
3956
wb_wait_for_completion(&memcg->cgwb_frn[i].done);
mm/memcontrol.c
3961
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
mm/memcontrol.c
3967
vmpressure_cleanup(&memcg->vmpressure);
mm/memcontrol.c
3968
cancel_work_sync(&memcg->high_work);
mm/memcontrol.c
3969
memcg1_remove_from_trees(memcg);
mm/memcontrol.c
3970
free_shrinker_info(memcg);
mm/memcontrol.c
3971
mem_cgroup_free(memcg);
mm/memcontrol.c
3989
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
3991
page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
mm/memcontrol.c
3992
page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
mm/memcontrol.c
3994
page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
mm/memcontrol.c
3995
page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
mm/memcontrol.c
3997
page_counter_set_min(&memcg->memory, 0);
mm/memcontrol.c
3998
page_counter_set_low(&memcg->memory, 0);
mm/memcontrol.c
3999
page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
mm/memcontrol.c
4000
memcg1_soft_limit_reset(memcg);
mm/memcontrol.c
4001
page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
mm/memcontrol.c
4002
memcg_wb_domain_size_changed(memcg);
mm/memcontrol.c
4059
static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
mm/memcontrol.c
4064
if (atomic_read(&memcg->kmem_stat)) {
mm/memcontrol.c
4065
int kmem = atomic_xchg(&memcg->kmem_stat, 0);
mm/memcontrol.c
4068
memcg->vmstats->state[index] += kmem;
mm/memcontrol.c
4074
struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
mm/memcontrol.c
4100
static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
mm/memcontrol.c
4107
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
4108
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
mm/memcontrol.c
4113
flush_nmi_stats(memcg, parent, cpu);
mm/memcontrol.c
4115
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
mm/memcontrol.c
4118
.aggregate = memcg->vmstats->state,
mm/memcontrol.c
4119
.local = memcg->vmstats->state_local,
mm/memcontrol.c
4120
.pending = memcg->vmstats->state_pending,
mm/memcontrol.c
4129
.aggregate = memcg->vmstats->events,
mm/memcontrol.c
4130
.local = memcg->vmstats->events_local,
mm/memcontrol.c
4131
.pending = memcg->vmstats->events_pending,
mm/memcontrol.c
4140
struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
mm/memcontrol.c
4164
if (atomic_read(&memcg->vmstats->stats_updates))
mm/memcontrol.c
4165
atomic_set(&memcg->vmstats->stats_updates, 0);
mm/memcontrol.c
4248
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
4250
return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
mm/memcontrol.c
4272
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
mm/memcontrol.c
4274
return peak_show(sf, v, &memcg->memory);
mm/memcontrol.c
4287
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4294
spin_lock(&memcg->peaks_lock);
mm/memcontrol.c
4296
spin_unlock(&memcg->peaks_lock);
mm/memcontrol.c
4305
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4308
spin_lock(&memcg->peaks_lock);
mm/memcontrol.c
4322
spin_unlock(&memcg->peaks_lock);
mm/memcontrol.c
4330
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4332
return peak_write(of, buf, nbytes, off, &memcg->memory,
mm/memcontrol.c
4333
&memcg->memory_peaks);
mm/memcontrol.c
4347
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4356
page_counter_set_min(&memcg->memory, min);
mm/memcontrol.c
4370
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4379
page_counter_set_low(&memcg->memory, low);
mm/memcontrol.c
4393
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4404
page_counter_set_high(&memcg->memory, high);
mm/memcontrol.c
4410
unsigned long nr_pages = page_counter_read(&memcg->memory);
mm/memcontrol.c
4420
drain_all_stock(memcg);
mm/memcontrol.c
4425
reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
mm/memcontrol.c
4432
memcg_wb_domain_size_changed(memcg);
mm/memcontrol.c
4445
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4456
xchg(&memcg->memory.max, max);
mm/memcontrol.c
4462
unsigned long nr_pages = page_counter_read(&memcg->memory);
mm/memcontrol.c
4471
drain_all_stock(memcg);
mm/memcontrol.c
4477
if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
mm/memcontrol.c
4483
memcg_memory_event(memcg, MEMCG_OOM);
mm/memcontrol.c
4484
if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
mm/memcontrol.c
4489
memcg_wb_domain_size_changed(memcg);
mm/memcontrol.c
4513
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol.c
4515
__memory_events_show(m, memcg->memory_events);
mm/memcontrol.c
4521
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol.c
4523
__memory_events_show(m, memcg->memory_events_local);
mm/memcontrol.c
4529
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol.c
4536
memory_stat_format(memcg, &s);
mm/memcontrol.c
4553
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol.c
4555
mem_cgroup_flush_stats(memcg);
mm/memcontrol.c
4568
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
mm/memcontrol.c
4582
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol.c
4584
seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
mm/memcontrol.c
4592
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4606
WRITE_ONCE(memcg->oom_group, oom_group);
mm/memcontrol.c
4614
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
4617
ret = user_proactive_reclaim(buf, memcg, NULL);
mm/memcontrol.c
4725
struct mem_cgroup *memcg)
mm/memcontrol.c
4736
page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
mm/memcontrol.c
4739
static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
mm/memcontrol.c
4744
ret = try_charge(memcg, gfp, folio_nr_pages(folio));
mm/memcontrol.c
4748
css_get(&memcg->css);
mm/memcontrol.c
4749
commit_charge(folio, memcg);
mm/memcontrol.c
4750
memcg1_commit_charge(folio, memcg);
mm/memcontrol.c
4757
struct mem_cgroup *memcg;
mm/memcontrol.c
4760
memcg = get_mem_cgroup_from_mm(mm);
mm/memcontrol.c
4761
ret = charge_memcg(folio, memcg, gfp);
mm/memcontrol.c
4762
css_put(&memcg->css);
mm/memcontrol.c
4781
struct mem_cgroup *memcg = get_mem_cgroup_from_current();
mm/memcontrol.c
4790
!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
mm/memcontrol.c
4793
if (charge_memcg(folio, memcg, gfp))
mm/memcontrol.c
4797
mem_cgroup_put(memcg);
mm/memcontrol.c
4816
struct mem_cgroup *memcg;
mm/memcontrol.c
4825
memcg = mem_cgroup_from_private_id(id);
mm/memcontrol.c
4826
if (!memcg || !css_tryget_online(&memcg->css))
mm/memcontrol.c
4827
memcg = get_mem_cgroup_from_mm(mm);
mm/memcontrol.c
4830
ret = charge_memcg(folio, memcg, gfp);
mm/memcontrol.c
4832
css_put(&memcg->css);
mm/memcontrol.c
4837
struct mem_cgroup *memcg;
mm/memcontrol.c
4852
memcg_uncharge(ug->memcg, ug->nr_memory);
mm/memcontrol.c
4854
mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
mm/memcontrol.c
4855
memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
mm/memcontrol.c
4857
memcg1_oom_recover(ug->memcg);
mm/memcontrol.c
4860
memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
mm/memcontrol.c
4863
css_put(&ug->memcg->css);
mm/memcontrol.c
4869
struct mem_cgroup *memcg;
mm/memcontrol.c
4885
memcg = get_mem_cgroup_from_objcg(objcg);
mm/memcontrol.c
4887
memcg = __folio_memcg(folio);
mm/memcontrol.c
4890
if (!memcg)
mm/memcontrol.c
4893
if (ug->memcg != memcg) {
mm/memcontrol.c
4894
if (ug->memcg) {
mm/memcontrol.c
4898
ug->memcg = memcg;
mm/memcontrol.c
4902
css_get(&memcg->css);
mm/memcontrol.c
4915
if (!mem_cgroup_is_root(memcg))
mm/memcontrol.c
4923
css_put(&memcg->css);
mm/memcontrol.c
4947
if (ug.memcg)
mm/memcontrol.c
4963
struct mem_cgroup *memcg;
mm/memcontrol.c
4978
memcg = folio_memcg(old);
mm/memcontrol.c
4979
VM_WARN_ON_ONCE_FOLIO(!memcg, old);
mm/memcontrol.c
4980
if (!memcg)
mm/memcontrol.c
4984
if (!mem_cgroup_is_root(memcg)) {
mm/memcontrol.c
4985
page_counter_charge(&memcg->memory, nr_pages);
mm/memcontrol.c
4987
page_counter_charge(&memcg->memsw, nr_pages);
mm/memcontrol.c
4990
css_get(&memcg->css);
mm/memcontrol.c
4991
commit_charge(new, memcg);
mm/memcontrol.c
4992
memcg1_commit_charge(new, memcg);
mm/memcontrol.c
5008
struct mem_cgroup *memcg;
mm/memcontrol.c
5019
memcg = folio_memcg(old);
mm/memcontrol.c
5025
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
mm/memcontrol.c
5026
if (!memcg)
mm/memcontrol.c
5030
commit_charge(new, memcg);
mm/memcontrol.c
5042
struct mem_cgroup *memcg;
mm/memcontrol.c
5052
memcg = mem_cgroup_from_task(current);
mm/memcontrol.c
5053
if (mem_cgroup_is_root(memcg))
mm/memcontrol.c
5055
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
mm/memcontrol.c
5057
if (css_tryget(&memcg->css))
mm/memcontrol.c
5058
sk->sk_memcg = memcg;
mm/memcontrol.c
5065
struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
mm/memcontrol.c
5067
if (memcg)
mm/memcontrol.c
5068
css_put(&memcg->css);
mm/memcontrol.c
5073
struct mem_cgroup *memcg;
mm/memcontrol.c
5080
memcg = mem_cgroup_from_sk(sk);
mm/memcontrol.c
5081
if (memcg)
mm/memcontrol.c
5082
css_get(&memcg->css);
mm/memcontrol.c
5099
struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
mm/memcontrol.c
5102
return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
mm/memcontrol.c
5104
if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
mm/memcontrol.c
5105
mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
mm/memcontrol.c
5119
struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
mm/memcontrol.c
5122
memcg1_uncharge_skmem(memcg, nr_pages);
mm/memcontrol.c
5126
mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
mm/memcontrol.c
5128
refill_stock(memcg, nr_pages);
mm/memcontrol.c
5212
struct mem_cgroup *memcg;
mm/memcontrol.c
5217
memcg = folio_memcg(folio);
mm/memcontrol.c
5219
VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
mm/memcontrol.c
5220
if (!memcg)
mm/memcontrol.c
5224
memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
mm/memcontrol.c
5228
memcg = mem_cgroup_private_id_get_online(memcg);
mm/memcontrol.c
5230
if (!mem_cgroup_is_root(memcg) &&
mm/memcontrol.c
5231
!page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
mm/memcontrol.c
5232
memcg_memory_event(memcg, MEMCG_SWAP_MAX);
mm/memcontrol.c
5233
memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
mm/memcontrol.c
5234
mem_cgroup_private_id_put(memcg);
mm/memcontrol.c
5240
mem_cgroup_private_id_get_many(memcg, nr_pages - 1);
mm/memcontrol.c
5241
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
mm/memcontrol.c
5243
swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry);
mm/memcontrol.c
5255
struct mem_cgroup *memcg;
mm/memcontrol.c
5260
memcg = mem_cgroup_from_private_id(id);
mm/memcontrol.c
5261
if (memcg) {
mm/memcontrol.c
5262
if (!mem_cgroup_is_root(memcg)) {
mm/memcontrol.c
5264
page_counter_uncharge(&memcg->memsw, nr_pages);
mm/memcontrol.c
5266
page_counter_uncharge(&memcg->swap, nr_pages);
mm/memcontrol.c
5268
mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
mm/memcontrol.c
5269
mem_cgroup_private_id_put_many(memcg, nr_pages);
mm/memcontrol.c
5274
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
mm/memcontrol.c
5280
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
mm/memcontrol.c
5282
READ_ONCE(memcg->swap.max) -
mm/memcontrol.c
5283
page_counter_read(&memcg->swap));
mm/memcontrol.c
5289
struct mem_cgroup *memcg;
mm/memcontrol.c
5298
memcg = folio_memcg(folio);
mm/memcontrol.c
5299
if (!memcg)
mm/memcontrol.c
5302
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
mm/memcontrol.c
5303
unsigned long usage = page_counter_read(&memcg->swap);
mm/memcontrol.c
5305
if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
mm/memcontrol.c
5306
usage * 2 >= READ_ONCE(memcg->swap.max))
mm/memcontrol.c
5329
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
5331
return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
mm/memcontrol.c
5336
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
mm/memcontrol.c
5338
return peak_show(sf, v, &memcg->swap);
mm/memcontrol.c
5344
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
5346
return peak_write(of, buf, nbytes, off, &memcg->swap,
mm/memcontrol.c
5347
&memcg->swap_peaks);
mm/memcontrol.c
5359
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
5368
page_counter_set_high(&memcg->swap, high);
mm/memcontrol.c
5382
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
5391
xchg(&memcg->swap.max, max);
mm/memcontrol.c
5398
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol.c
5401
atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
mm/memcontrol.c
5403
atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
mm/memcontrol.c
5405
atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
mm/memcontrol.c
5460
struct mem_cgroup *memcg, *original_memcg;
mm/memcontrol.c
5467
for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
mm/memcontrol.c
5468
memcg = parent_mem_cgroup(memcg)) {
mm/memcontrol.c
5469
unsigned long max = READ_ONCE(memcg->zswap_max);
mm/memcontrol.c
5480
__mem_cgroup_flush_stats(memcg, true);
mm/memcontrol.c
5481
pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
mm/memcontrol.c
5501
struct mem_cgroup *memcg;
mm/memcontrol.c
5513
memcg = obj_cgroup_memcg(objcg);
mm/memcontrol.c
5514
mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
mm/memcontrol.c
5515
mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
mm/memcontrol.c
5528
struct mem_cgroup *memcg;
mm/memcontrol.c
5536
memcg = obj_cgroup_memcg(objcg);
mm/memcontrol.c
5537
mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
mm/memcontrol.c
5538
mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
mm/memcontrol.c
5542
bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
mm/memcontrol.c
5548
for (; memcg; memcg = parent_mem_cgroup(memcg))
mm/memcontrol.c
5549
if (!READ_ONCE(memcg->zswap_writeback))
mm/memcontrol.c
5558
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
mm/memcontrol.c
5560
mem_cgroup_flush_stats(memcg);
mm/memcontrol.c
5561
return memcg_page_state(memcg, MEMCG_ZSWAP_B);
mm/memcontrol.c
5573
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
5582
xchg(&memcg->zswap_max, max);
mm/memcontrol.c
5589
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mm/memcontrol.c
5591
seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
mm/memcontrol.c
5598
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
mm/memcontrol.c
5608
WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
mm/memcontrol.c
5651
void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask)
mm/memcontrol.c
5655
if (!memcg)
mm/memcontrol.c
5664
cpuset_nodes_allowed(memcg->css.cgroup, &allowed);
mm/memcontrol.c
5668
void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
mm/memcontrol.c
567
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
mm/memcontrol.c
5673
if (!memcg)
mm/memcontrol.c
5674
memcg = root_mem_cgroup;
mm/memcontrol.c
5677
K(atomic_long_read(&memcg->memory.children_min_usage)),
mm/memcontrol.c
5678
K(atomic_long_read(&memcg->memory.children_low_usage)));
mm/memcontrol.c
577
css_rstat_updated(&memcg->css, cpu);
mm/memcontrol.c
578
statc_pcpu = memcg->vmstats_percpu;
mm/memcontrol.c
599
static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
mm/memcontrol.c
601
bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
mm/memcontrol.c
603
trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates),
mm/memcontrol.c
609
if (mem_cgroup_is_root(memcg))
mm/memcontrol.c
612
css_rstat_flush(&memcg->css);
mm/memcontrol.c
624
void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
mm/memcontrol.c
629
if (!memcg)
mm/memcontrol.c
630
memcg = root_mem_cgroup;
mm/memcontrol.c
632
__mem_cgroup_flush_stats(memcg, false);
mm/memcontrol.c
635
void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
mm/memcontrol.c
639
mem_cgroup_flush_stats(memcg);
mm/memcontrol.c
652
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
mm/memcontrol.c
660
x = READ_ONCE(memcg->vmstats->state[i]);
mm/memcontrol.c
698
void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
mm/memcontrol.c
712
this_cpu_add(memcg->vmstats_percpu->state[i], val);
mm/memcontrol.c
714
memcg_rstat_updated(memcg, val, cpu);
mm/memcontrol.c
715
trace_mod_memcg_state(memcg, idx, val);
mm/memcontrol.c
722
unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
mm/memcontrol.c
730
x = READ_ONCE(memcg->vmstats->state_local[i]);
mm/memcontrol.c
744
struct mem_cgroup *memcg;
mm/memcontrol.c
752
memcg = pn->memcg;
mm/memcontrol.c
757
this_cpu_add(memcg->vmstats_percpu->state[i], val);
mm/memcontrol.c
763
memcg_rstat_updated(memcg, val, cpu);
mm/memcontrol.c
764
trace_mod_memcg_lruvec_state(memcg, idx, val);
mm/memcontrol.c
793
struct mem_cgroup *memcg;
mm/memcontrol.c
798
memcg = folio_memcg(folio);
mm/memcontrol.c
800
if (!memcg) {
mm/memcontrol.c
806
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/memcontrol.c
815
struct mem_cgroup *memcg;
mm/memcontrol.c
819
memcg = mem_cgroup_from_virt(p);
mm/memcontrol.c
827
if (!memcg) {
mm/memcontrol.c
830
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/memcontrol.c
842
void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
mm/memcontrol.c
856
this_cpu_add(memcg->vmstats_percpu->events[i], count);
mm/memcontrol.c
857
memcg_rstat_updated(memcg, count, cpu);
mm/memcontrol.c
858
trace_count_memcg_events(memcg, idx, count);
mm/memcontrol.c
863
unsigned long memcg_events(struct mem_cgroup *memcg, int event)
mm/memcontrol.c
870
return READ_ONCE(memcg->vmstats->events[i]);
mm/memcontrol.c
882
unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
mm/memcontrol.c
889
return READ_ONCE(memcg->vmstats->events_local[i]);
mm/memcontrol.c
928
struct mem_cgroup *memcg;
mm/memcontrol.c
943
memcg = active_memcg();
mm/memcontrol.c
944
if (unlikely(memcg)) {
mm/memcontrol.c
946
css_get(&memcg->css);
mm/memcontrol.c
947
return memcg;
mm/memcontrol.c
956
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
mm/memcontrol.c
957
if (unlikely(!memcg))
mm/memcontrol.c
958
memcg = root_mem_cgroup;
mm/memcontrol.c
959
} while (!css_tryget(&memcg->css));
mm/memcontrol.c
961
return memcg;
mm/memcontrol.c
970
struct mem_cgroup *memcg;
mm/memcontrol.c
977
memcg = mem_cgroup_from_task(current);
mm/memcontrol.c
978
if (!css_tryget(&memcg->css)) {
mm/memcontrol.c
983
return memcg;
mm/memcontrol.c
992
struct mem_cgroup *memcg = folio_memcg(folio);
mm/memcontrol.c
998
if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
mm/memcontrol.c
999
memcg = root_mem_cgroup;
mm/migrate.c
2728
struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
mm/migrate.c
2729
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/migrate.c
2739
count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
mm/migrate.c
2745
mem_cgroup_put(memcg);
mm/migrate.c
673
struct mem_cgroup *memcg;
mm/migrate.c
675
memcg = folio_memcg(folio);
mm/migrate.c
676
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
mm/migrate.c
677
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
mm/oom_kill.c
1056
oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
mm/oom_kill.c
261
oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
mm/oom_kill.c
370
mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
mm/oom_kill.c
431
mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
mm/oom_kill.c
454
mem_cgroup_print_oom_context(oc->memcg, victim);
mm/oom_kill.c
469
mem_cgroup_print_oom_meminfo(oc->memcg);
mm/oom_kill.c
475
mem_cgroup_show_protected_memory(oc->memcg);
mm/oom_kill.c
74
return oc->memcg != NULL;
mm/page_alloc.c
4076
.memcg = NULL,
mm/page_io.c
308
struct mem_cgroup *memcg;
mm/page_io.c
310
memcg = folio_memcg(folio);
mm/page_io.c
311
if (!memcg)
mm/page_io.c
315
css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
mm/page_owner.c
516
struct mem_cgroup *memcg;
mm/page_owner.c
529
memcg = page_memcg_check(page);
mm/page_owner.c
530
if (!memcg)
mm/page_owner.c
533
online = css_is_online(&memcg->css);
mm/page_owner.c
534
cgroup_name(memcg->css.cgroup, name, sizeof(name));
mm/rmap.c
1026
struct mem_cgroup *memcg = pra->memcg;
mm/rmap.c
1041
if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
mm/rmap.c
1060
struct mem_cgroup *memcg, vm_flags_t *vm_flags)
mm/rmap.c
1065
.memcg = memcg,
mm/rmap.c
911
struct mem_cgroup *memcg;
mm/shrinker.c
102
free_shrinker_info(memcg);
mm/shrinker.c
106
static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
mm/shrinker.c
109
return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
mm/shrinker.c
113
static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size,
mm/shrinker.c
121
pn = memcg->nodeinfo[nid];
mm/shrinker.c
122
old = shrinker_info_protected(memcg, nid);
mm/shrinker.c
155
struct mem_cgroup *memcg;
mm/shrinker.c
165
memcg = mem_cgroup_iter(NULL, NULL, NULL);
mm/shrinker.c
167
ret = expand_one_shrinker_info(memcg, new_size, old_size,
mm/shrinker.c
170
mem_cgroup_iter_break(NULL, memcg);
mm/shrinker.c
173
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
mm/shrinker.c
196
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
mm/shrinker.c
198
if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
mm/shrinker.c
203
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
mm/shrinker.c
253
struct mem_cgroup *memcg)
mm/shrinker.c
260
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
mm/shrinker.c
269
struct mem_cgroup *memcg)
mm/shrinker.c
276
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
mm/shrinker.c
285
void reparent_shrinker_deferred(struct mem_cgroup *memcg)
mm/shrinker.c
293
parent = parent_mem_cgroup(memcg);
mm/shrinker.c
300
child_info = shrinker_info_protected(memcg, nid);
mm/shrinker.c
324
struct mem_cgroup *memcg)
mm/shrinker.c
330
struct mem_cgroup *memcg)
mm/shrinker.c
344
if (sc->memcg &&
mm/shrinker.c
347
sc->memcg);
mm/shrinker.c
361
if (sc->memcg &&
mm/shrinker.c
364
sc->memcg);
mm/shrinker.c
470
struct mem_cgroup *memcg, int priority)
mm/shrinker.c
476
if (!mem_cgroup_online(memcg))
mm/shrinker.c
516
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
mm/shrinker.c
531
.memcg = memcg,
mm/shrinker.c
573
set_shrinker_bit(memcg, nid, shrinker_id);
mm/shrinker.c
588
struct mem_cgroup *memcg, int priority)
mm/shrinker.c
614
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
mm/shrinker.c
62
void free_shrinker_info(struct mem_cgroup *memcg)
mm/shrinker.c
627
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
mm/shrinker.c
628
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
mm/shrinker.c
656
.memcg = memcg,
mm/shrinker.c
69
pn = memcg->nodeinfo[nid];
mm/shrinker.c
77
int alloc_shrinker_info(struct mem_cgroup *memcg)
mm/shrinker.c
94
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
mm/shrinker_debug.c
114
struct mem_cgroup *memcg = NULL;
mm/shrinker_debug.c
133
memcg = mem_cgroup_get_from_id(id);
mm/shrinker_debug.c
134
if (!memcg)
mm/shrinker_debug.c
137
if (!mem_cgroup_online(memcg)) {
mm/shrinker_debug.c
138
mem_cgroup_put(memcg);
mm/shrinker_debug.c
146
sc.memcg = memcg;
mm/shrinker_debug.c
152
mem_cgroup_put(memcg);
mm/shrinker_debug.c
19
struct mem_cgroup *memcg,
mm/shrinker_debug.c
30
.memcg = memcg,
mm/shrinker_debug.c
51
struct mem_cgroup *memcg;
mm/shrinker_debug.c
64
memcg = mem_cgroup_iter(NULL, NULL, NULL);
mm/shrinker_debug.c
66
if (memcg && !mem_cgroup_online(memcg))
mm/shrinker_debug.c
70
memcg_aware ? memcg : NULL,
mm/shrinker_debug.c
73
seq_printf(m, "%llu", mem_cgroup_id(memcg));
mm/shrinker_debug.c
80
mem_cgroup_iter_break(NULL, memcg);
mm/shrinker_debug.c
85
mem_cgroup_iter_break(NULL, memcg);
mm/shrinker_debug.c
89
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
mm/vmpressure.c
239
void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
mm/vmpressure.c
255
vmpr = memcg_to_vmpressure(memcg);
mm/vmpressure.c
295
if (!memcg || mem_cgroup_is_root(memcg))
mm/vmpressure.c
319
mem_cgroup_set_socket_pressure(memcg);
mm/vmpressure.c
335
void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
mm/vmpressure.c
351
vmpressure(gfp, memcg, true, vmpressure_win, 0);
mm/vmpressure.c
374
int vmpressure_register_event(struct mem_cgroup *memcg,
mm/vmpressure.c
377
struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
mm/vmpressure.c
435
void vmpressure_unregister_event(struct mem_cgroup *memcg,
mm/vmpressure.c
438
struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
mm/vmpressure.c
77
struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr);
mm/vmpressure.c
79
memcg = parent_mem_cgroup(memcg);
mm/vmpressure.c
80
if (!memcg)
mm/vmpressure.c
82
return memcg_to_vmpressure(memcg);
mm/vmscan.c
1023
struct mem_cgroup *memcg)
mm/vmscan.c
1045
mem_cgroup_node_filter_allowed(memcg, &allowed_mask);
mm/vmscan.c
1086
struct mem_cgroup *memcg)
mm/vmscan.c
1099
do_demote_pass = can_demote(pgdat->node_id, sc, memcg);
mm/vmscan.c
1576
nr_demoted = demote_folio_list(&demote_folios, pgdat, memcg);
mm/vmscan.c
244
static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
mm/vmscan.c
2458
static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
mm/vmscan.c
2463
mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low, &usage);
mm/vmscan.c
248
return mem_cgroup_swappiness(memcg);
mm/vmscan.c
2531
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
2532
int swappiness = sc_swappiness(sc, memcg);
mm/vmscan.c
2539
if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
mm/vmscan.c
2601
scan = apply_proportional_protection(memcg, sc, lruvec_size);
mm/vmscan.c
2608
if (!scan && !mem_cgroup_online(memcg))
mm/vmscan.c
2623
scan = mem_cgroup_online(memcg) ?
mm/vmscan.c
266
static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
mm/vmscan.c
2710
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
mm/vmscan.c
2715
if (memcg) {
mm/vmscan.c
2716
struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
mm/vmscan.c
2732
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
2738
if (!can_demote(pgdat->node_id, sc, memcg) &&
mm/vmscan.c
2739
mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
mm/vmscan.c
2742
return sc_swappiness(sc, memcg);
mm/vmscan.c
2864
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
mm/vmscan.c
2872
if (memcg)
mm/vmscan.c
2873
return &memcg->mm_list;
mm/vmscan.c
2906
struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
mm/vmscan.c
2907
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
mm/vmscan.c
2911
VM_WARN_ON_ONCE(mm->lru_gen.memcg);
mm/vmscan.c
2912
mm->lru_gen.memcg = memcg;
mm/vmscan.c
2917
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
2934
struct mem_cgroup *memcg = NULL;
mm/vmscan.c
2940
memcg = mm->lru_gen.memcg;
mm/vmscan.c
2942
mm_list = get_mm_list(memcg);
mm/vmscan.c
2947
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
2964
mem_cgroup_put(mm->lru_gen.memcg);
mm/vmscan.c
2965
mm->lru_gen.memcg = NULL;
mm/vmscan.c
2972
struct mem_cgroup *memcg;
mm/vmscan.c
2983
if (!mm->lru_gen.memcg)
mm/vmscan.c
2987
memcg = mem_cgroup_from_task(task);
mm/vmscan.c
2989
if (memcg == mm->lru_gen.memcg)
mm/vmscan.c
3001
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
mm/vmscan.c
3049
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
3050
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
mm/vmscan.c
3110
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
3111
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
mm/vmscan.c
344
struct mem_cgroup *memcg)
mm/vmscan.c
3444
static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
mm/vmscan.c
3455
if (folio_memcg(folio) != memcg)
mm/vmscan.c
3505
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
mm/vmscan.c
3539
folio = get_pfn_folio(pfn, memcg, pgdat);
mm/vmscan.c
3581
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
mm/vmscan.c
359
mem_cgroup_node_filter_allowed(memcg, &allowed_mask);
mm/vmscan.c
363
static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
mm/vmscan.c
3630
folio = get_pfn_folio(pfn, memcg, pgdat);
mm/vmscan.c
367
if (memcg == NULL) {
mm/vmscan.c
376
if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
mm/vmscan.c
385
return can_demote(nid, sc, memcg);
mm/vmscan.c
4111
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
4127
return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
mm/vmscan.c
4136
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
4139
if (mem_cgroup_below_min(NULL, memcg))
mm/vmscan.c
4156
struct mem_cgroup *memcg;
mm/vmscan.c
4164
memcg = mem_cgroup_iter(NULL, NULL, NULL);
mm/vmscan.c
4166
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/vmscan.c
4168
mem_cgroup_calculate_protection(NULL, memcg);
mm/vmscan.c
4172
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
mm/vmscan.c
4214
struct mem_cgroup *memcg = folio_memcg(folio);
mm/vmscan.c
4216
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/vmscan.c
4266
folio = get_pfn_folio(pfn, memcg, pgdat);
mm/vmscan.c
431
struct mem_cgroup *memcg = NULL;
mm/vmscan.c
433
memcg = mem_cgroup_iter(NULL, NULL, NULL);
mm/vmscan.c
435
freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
mm/vmscan.c
4358
void lru_gen_online_memcg(struct mem_cgroup *memcg)
mm/vmscan.c
436
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
mm/vmscan.c
4366
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
4383
void lru_gen_offline_memcg(struct mem_cgroup *memcg)
mm/vmscan.c
4388
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
4394
void lru_gen_release_memcg(struct mem_cgroup *memcg)
mm/vmscan.c
4401
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
4420
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
mm/vmscan.c
4422
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
4554
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
4609
count_memcg_events(memcg, item, isolated);
mm/vmscan.c
4610
count_memcg_events(memcg, PGREFILL, sorted);
mm/vmscan.c
4701
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
4718
reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
mm/vmscan.c
4762
count_memcg_events(memcg, item, reclaimed);
mm/vmscan.c
4815
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
4818
if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
mm/vmscan.c
4824
if (nr_to_scan && !mem_cgroup_online(memcg))
mm/vmscan.c
4827
nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
mm/vmscan.c
4911
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
4915
if (mem_cgroup_below_min(NULL, memcg))
mm/vmscan.c
4918
if (mem_cgroup_below_low(NULL, memcg)) {
mm/vmscan.c
4923
memcg_memory_event(memcg, MEMCG_LOW);
mm/vmscan.c
4928
shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
mm/vmscan.c
4931
vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
mm/vmscan.c
4936
if (success && mem_cgroup_online(memcg))
mm/vmscan.c
4955
struct mem_cgroup *memcg;
mm/vmscan.c
4962
memcg = NULL;
mm/vmscan.c
4972
mem_cgroup_put(memcg);
mm/vmscan.c
4973
memcg = NULL;
mm/vmscan.c
4979
memcg = lruvec_memcg(lruvec);
mm/vmscan.c
4981
if (!mem_cgroup_tryget(memcg)) {
mm/vmscan.c
4982
lru_gen_release_memcg(memcg);
mm/vmscan.c
4983
memcg = NULL;
mm/vmscan.c
5002
mem_cgroup_put(memcg);
mm/vmscan.c
5171
struct mem_cgroup *memcg;
mm/vmscan.c
5186
memcg = mem_cgroup_iter(NULL, NULL, NULL);
mm/vmscan.c
5191
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
5210
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
mm/vmscan.c
5306
struct mem_cgroup *memcg;
mm/vmscan.c
5313
memcg = mem_cgroup_iter(NULL, NULL, NULL);
mm/vmscan.c
5319
return get_lruvec(memcg, nid);
mm/vmscan.c
5321
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
mm/vmscan.c
5338
struct mem_cgroup *memcg = lruvec_memcg(v);
mm/vmscan.c
5344
memcg = mem_cgroup_iter(NULL, memcg, NULL);
mm/vmscan.c
5345
if (!memcg)
mm/vmscan.c
5351
return get_lruvec(memcg, nid);
mm/vmscan.c
5416
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
mm/vmscan.c
5421
const char *path = memcg ? m->private : "";
mm/vmscan.c
5424
if (memcg)
mm/vmscan.c
5425
cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
mm/vmscan.c
5427
seq_printf(m, "memcg %llu %s\n", mem_cgroup_id(memcg), path);
mm/vmscan.c
5517
struct mem_cgroup *memcg = NULL;
mm/vmscan.c
5523
memcg = mem_cgroup_get_from_id(memcg_id);
mm/vmscan.c
5524
if (!memcg)
mm/vmscan.c
5528
if (memcg_id != mem_cgroup_id(memcg))
mm/vmscan.c
5531
sc->target_mem_cgroup = memcg;
mm/vmscan.c
5532
lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
5548
mem_cgroup_put(memcg);
mm/vmscan.c
5696
void lru_gen_init_memcg(struct mem_cgroup *memcg)
mm/vmscan.c
5698
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
mm/vmscan.c
5707
void lru_gen_exit_memcg(struct mem_cgroup *memcg)
mm/vmscan.c
5711
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
mm/vmscan.c
5716
struct lruvec *lruvec = get_lruvec(memcg, nid);
mm/vmscan.c
5967
struct mem_cgroup *memcg;
mm/vmscan.c
5981
memcg = mem_cgroup_iter(target_memcg, NULL, partial);
mm/vmscan.c
5983
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/vmscan.c
5995
mem_cgroup_calculate_protection(target_memcg, memcg);
mm/vmscan.c
5997
if (mem_cgroup_below_min(target_memcg, memcg)) {
mm/vmscan.c
6003
} else if (mem_cgroup_below_low(target_memcg, memcg)) {
mm/vmscan.c
6014
memcg_memory_event(memcg, MEMCG_LOW);
mm/vmscan.c
6022
shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
mm/vmscan.c
6027
vmpressure(sc->gfp_mask, memcg, false,
mm/vmscan.c
6033
mem_cgroup_iter_break(target_memcg, memcg);
mm/vmscan.c
6036
} while ((memcg = mem_cgroup_iter(target_memcg, memcg, partial)));
mm/vmscan.c
6612
unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
mm/vmscan.c
6617
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/vmscan.c
6620
.target_mem_cgroup = memcg,
mm/vmscan.c
6651
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
mm/vmscan.c
6665
.target_mem_cgroup = memcg,
mm/vmscan.c
6692
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
mm/vmscan.c
6704
struct mem_cgroup *memcg;
mm/vmscan.c
6719
memcg = mem_cgroup_iter(NULL, NULL, NULL);
mm/vmscan.c
6721
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/vmscan.c
6724
memcg = mem_cgroup_iter(NULL, memcg, NULL);
mm/vmscan.c
6725
} while (memcg);
mm/vmscan.c
7748
struct mem_cgroup *memcg, pg_data_t *pgdat)
mm/vmscan.c
7757
if (!buf || (!memcg && !pgdat) || (memcg && pgdat))
mm/vmscan.c
7803
if (memcg) {
mm/vmscan.c
7808
reclaimed = try_to_free_mem_cgroup_pages(memcg,
mm/workingset.c
244
struct mem_cgroup *memcg = folio_memcg(folio);
mm/workingset.c
249
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/workingset.c
257
return pack_shadow(mem_cgroup_private_id(memcg), pgdat, token, workingset);
mm/workingset.c
269
struct mem_cgroup *memcg;
mm/workingset.c
274
memcg = mem_cgroup_from_private_id(memcg_id);
mm/workingset.c
275
*lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/workingset.c
538
struct mem_cgroup *memcg;
mm/workingset.c
560
memcg = folio_memcg(folio);
mm/workingset.c
562
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mm/workingset.c
674
if (sc->memcg) {
mm/workingset.c
678
mem_cgroup_flush_stats_ratelimited(sc->memcg);
mm/workingset.c
679
lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
mm/zswap.c
1188
!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
mm/zswap.c
1205
struct mem_cgroup *memcg = sc->memcg;
mm/zswap.c
1206
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
mm/zswap.c
1212
if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
mm/zswap.c
1232
mem_cgroup_flush_stats(memcg);
mm/zswap.c
1233
nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
mm/zswap.c
1234
nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
mm/zswap.c
1290
static int shrink_memcg(struct mem_cgroup *memcg)
mm/zswap.c
1294
if (!mem_cgroup_zswap_writeback_enabled(memcg))
mm/zswap.c
1301
if (memcg && !mem_cgroup_online(memcg))
mm/zswap.c
1307
shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
mm/zswap.c
1320
struct mem_cgroup *memcg;
mm/zswap.c
1366
memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
mm/zswap.c
1367
zswap_next_shrink = memcg;
mm/zswap.c
1368
} while (memcg && !mem_cgroup_tryget_online(memcg));
mm/zswap.c
1371
if (!memcg) {
mm/zswap.c
1383
ret = shrink_memcg(memcg);
mm/zswap.c
1385
mem_cgroup_put(memcg);
mm/zswap.c
1493
struct mem_cgroup *memcg = NULL;
mm/zswap.c
1506
memcg = get_mem_cgroup_from_objcg(objcg);
mm/zswap.c
1507
if (shrink_memcg(memcg)) {
mm/zswap.c
1508
mem_cgroup_put(memcg);
mm/zswap.c
1511
mem_cgroup_put(memcg);
mm/zswap.c
1522
memcg = get_mem_cgroup_from_objcg(objcg);
mm/zswap.c
1523
if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
mm/zswap.c
1524
mem_cgroup_put(memcg);
mm/zswap.c
1527
mem_cgroup_put(memcg);
mm/zswap.c
625
struct mem_cgroup *memcg;
mm/zswap.c
639
memcg = mem_cgroup_from_entry(entry);
mm/zswap.c
641
list_lru_add(list_lru, &entry->lru, nid, memcg);
mm/zswap.c
648
struct mem_cgroup *memcg;
mm/zswap.c
651
memcg = mem_cgroup_from_entry(entry);
mm/zswap.c
653
list_lru_del(list_lru, &entry->lru, nid, memcg);
mm/zswap.c
682
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
mm/zswap.c
686
if (zswap_next_shrink == memcg) {
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
17
struct mem_cgroup *memcg;
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
23
memcg = bpf_get_mem_cgroup(css);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
24
if (!memcg)
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
27
bpf_mem_cgroup_flush_stats(memcg);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
29
memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(memcg, NR_ANON_MAPPED);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
30
memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, NR_SHMEM);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
31
memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, NR_FILE_PAGES);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
32
memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, NR_FILE_MAPPED);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
33
memcg_query.memcg_kmem = bpf_mem_cgroup_page_state(memcg, MEMCG_KMEM);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
34
memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, PGFAULT);
tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c
36
bpf_put_mem_cgroup(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1028
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
1039
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
1040
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
1043
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
1046
if (cg_read_long(memcg, "memory.swap.current")) {
tools/testing/selftests/cgroup/test_memcontrol.c
1051
swap_peak_fd = cg_open(memcg, "memory.swap.peak",
tools/testing/selftests/cgroup/test_memcontrol.c
1073
mem_peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
tools/testing/selftests/cgroup/test_memcontrol.c
1078
if (cg_read_long(memcg, "memory.swap.peak"))
tools/testing/selftests/cgroup/test_memcontrol.c
1093
if (cg_read_long(memcg, "memory.peak"))
tools/testing/selftests/cgroup/test_memcontrol.c
1106
if (cg_read_strcmp(memcg, "memory.max", "max\n"))
tools/testing/selftests/cgroup/test_memcontrol.c
1109
if (cg_read_strcmp(memcg, "memory.swap.max", "max\n"))
tools/testing/selftests/cgroup/test_memcontrol.c
1112
if (cg_write(memcg, "memory.swap.max", "30M"))
tools/testing/selftests/cgroup/test_memcontrol.c
1115
if (cg_write(memcg, "memory.max", "30M"))
tools/testing/selftests/cgroup/test_memcontrol.c
1119
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
tools/testing/selftests/cgroup/test_memcontrol.c
1122
if (cg_read_key_long(memcg, "memory.events", "oom ") != 1)
tools/testing/selftests/cgroup/test_memcontrol.c
1125
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1)
tools/testing/selftests/cgroup/test_memcontrol.c
1128
peak = cg_read_long(memcg, "memory.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
1132
peak = cg_read_long(memcg, "memory.swap.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
1148
peak_reset = cg_write(memcg, "memory.swap.peak", (char *)reset_string);
tools/testing/selftests/cgroup/test_memcontrol.c
1152
peak_reset = cg_write(memcg, "memory.peak", (char *)reset_string);
tools/testing/selftests/cgroup/test_memcontrol.c
1177
if (cg_read_long(memcg, "memory.peak") < MB(29))
tools/testing/selftests/cgroup/test_memcontrol.c
1180
if (cg_read_long(memcg, "memory.swap.peak") < MB(29))
tools/testing/selftests/cgroup/test_memcontrol.c
1183
if (cg_run(memcg, alloc_anon_50M_check_swap, (void *)MB(30)))
tools/testing/selftests/cgroup/test_memcontrol.c
1186
max = cg_read_key_long(memcg, "memory.events", "max ");
tools/testing/selftests/cgroup/test_memcontrol.c
1190
peak = cg_read_long(memcg, "memory.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
1194
peak = cg_read_long(memcg, "memory.swap.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
1213
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1214
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1227
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
1229
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
1230
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
1233
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
1236
if (cg_write(memcg, "memory.max", "30M"))
tools/testing/selftests/cgroup/test_memcontrol.c
1239
if (cg_write(memcg, "memory.swap.max", "0"))
tools/testing/selftests/cgroup/test_memcontrol.c
1242
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
tools/testing/selftests/cgroup/test_memcontrol.c
1245
if (cg_read_strcmp(memcg, "cgroup.procs", ""))
tools/testing/selftests/cgroup/test_memcontrol.c
1248
if (cg_read_key_long(memcg, "memory.events", "oom ") != 1)
tools/testing/selftests/cgroup/test_memcontrol.c
1251
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1)
tools/testing/selftests/cgroup/test_memcontrol.c
1257
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1258
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1388
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
1391
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
1392
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
1395
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
1406
pid = cg_run_nowait(memcg, tcp_server, &args);
tools/testing/selftests/cgroup/test_memcontrol.c
1428
if (tcp_client(memcg, port) != KSFT_PASS)
tools/testing/selftests/cgroup/test_memcontrol.c
1435
if (cg_read_long(memcg, "memory.current") < 0)
tools/testing/selftests/cgroup/test_memcontrol.c
1450
sock_post = cg_read_key_long_poll(memcg, "memory.stat", "sock ", 0,
tools/testing/selftests/cgroup/test_memcontrol.c
1459
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1460
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1602
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
1605
memcg = cg_name(root, "memcg_test_0");
tools/testing/selftests/cgroup/test_memcontrol.c
1607
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
1610
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
1613
if (cg_write(memcg, "memory.max", "50M"))
tools/testing/selftests/cgroup/test_memcontrol.c
1616
if (cg_write(memcg, "memory.swap.max", "0"))
tools/testing/selftests/cgroup/test_memcontrol.c
1619
if (cg_write(memcg, "memory.oom.group", "1"))
tools/testing/selftests/cgroup/test_memcontrol.c
1622
safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
tools/testing/selftests/cgroup/test_memcontrol.c
1626
cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
tools/testing/selftests/cgroup/test_memcontrol.c
1627
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
tools/testing/selftests/cgroup/test_memcontrol.c
1630
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
tools/testing/selftests/cgroup/test_memcontrol.c
1639
if (memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
1640
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
1641
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
250
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
255
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
256
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
259
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
262
current = cg_read_long(memcg, "memory.current");
tools/testing/selftests/cgroup/test_memcontrol.c
266
peak = cg_read_long(memcg, "memory.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
270
if (cg_run(memcg, alloc_anon_50M_check, NULL))
tools/testing/selftests/cgroup/test_memcontrol.c
273
peak = cg_read_long(memcg, "memory.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
282
peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
tools/testing/selftests/cgroup/test_memcontrol.c
303
peak_fd2 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
tools/testing/selftests/cgroup/test_memcontrol.c
308
peak_fd3 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
tools/testing/selftests/cgroup/test_memcontrol.c
329
peak = cg_read_long(memcg, "memory.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
337
peak_fd4 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
tools/testing/selftests/cgroup/test_memcontrol.c
350
if (cg_run(memcg, alloc_pagecache_50M_check, NULL))
tools/testing/selftests/cgroup/test_memcontrol.c
353
peak = cg_read_long(memcg, "memory.peak");
tools/testing/selftests/cgroup/test_memcontrol.c
384
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
385
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
443
static bool reclaim_until(const char *memcg, long goal);
tools/testing/selftests/cgroup/test_memcontrol.c
705
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
708
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
709
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
712
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
715
if (cg_read_strcmp(memcg, "memory.high", "max\n"))
tools/testing/selftests/cgroup/test_memcontrol.c
718
if (cg_write(memcg, "memory.swap.max", "0"))
tools/testing/selftests/cgroup/test_memcontrol.c
721
if (cg_write(memcg, "memory.high", "30M"))
tools/testing/selftests/cgroup/test_memcontrol.c
724
if (cg_run(memcg, alloc_anon, (void *)MB(31)))
tools/testing/selftests/cgroup/test_memcontrol.c
727
if (!cg_run(memcg, alloc_pagecache_50M_check, NULL))
tools/testing/selftests/cgroup/test_memcontrol.c
730
if (cg_run(memcg, alloc_pagecache_max_30M, NULL))
tools/testing/selftests/cgroup/test_memcontrol.c
733
high = cg_read_key_long(memcg, "memory.events", "high ");
tools/testing/selftests/cgroup/test_memcontrol.c
740
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
741
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
768
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
772
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
773
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
776
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
779
pre_high = cg_read_key_long(memcg, "memory.events", "high ");
tools/testing/selftests/cgroup/test_memcontrol.c
780
pre_max = cg_read_key_long(memcg, "memory.events", "max ");
tools/testing/selftests/cgroup/test_memcontrol.c
784
if (cg_write(memcg, "memory.swap.max", "0"))
tools/testing/selftests/cgroup/test_memcontrol.c
787
if (cg_write(memcg, "memory.high", "30M"))
tools/testing/selftests/cgroup/test_memcontrol.c
790
if (cg_write(memcg, "memory.max", "140M"))
tools/testing/selftests/cgroup/test_memcontrol.c
793
fd = memcg_prepare_for_wait(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
797
pid = cg_run_nowait(memcg, alloc_anon_mlock, (void *)MB(200));
tools/testing/selftests/cgroup/test_memcontrol.c
803
post_high = cg_read_key_long(memcg, "memory.events", "high ");
tools/testing/selftests/cgroup/test_memcontrol.c
804
post_max = cg_read_key_long(memcg, "memory.events", "max ");
tools/testing/selftests/cgroup/test_memcontrol.c
816
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
817
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
830
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
833
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
834
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
837
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
840
if (cg_read_strcmp(memcg, "memory.max", "max\n"))
tools/testing/selftests/cgroup/test_memcontrol.c
843
if (cg_write(memcg, "memory.swap.max", "0"))
tools/testing/selftests/cgroup/test_memcontrol.c
846
if (cg_write(memcg, "memory.max", "30M"))
tools/testing/selftests/cgroup/test_memcontrol.c
850
if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
tools/testing/selftests/cgroup/test_memcontrol.c
853
if (cg_run(memcg, alloc_pagecache_max_30M, NULL))
tools/testing/selftests/cgroup/test_memcontrol.c
856
current = cg_read_long(memcg, "memory.current");
tools/testing/selftests/cgroup/test_memcontrol.c
860
max = cg_read_key_long(memcg, "memory.events", "max ");
tools/testing/selftests/cgroup/test_memcontrol.c
867
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
868
free(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
888
static bool reclaim_until(const char *memcg, long goal)
tools/testing/selftests/cgroup/test_memcontrol.c
896
current = cg_read_long(memcg, "memory.current");
tools/testing/selftests/cgroup/test_memcontrol.c
906
err = cg_write(memcg, "memory.reclaim", buf);
tools/testing/selftests/cgroup/test_memcontrol.c
924
char *memcg;
tools/testing/selftests/cgroup/test_memcontrol.c
927
memcg = cg_name(root, "memcg_test");
tools/testing/selftests/cgroup/test_memcontrol.c
928
if (!memcg)
tools/testing/selftests/cgroup/test_memcontrol.c
931
if (cg_create(memcg))
tools/testing/selftests/cgroup/test_memcontrol.c
934
current = cg_read_long(memcg, "memory.current");
tools/testing/selftests/cgroup/test_memcontrol.c
942
cg_run_nowait(memcg, alloc_pagecache_50M_noexit, (void *)(long)fd);
tools/testing/selftests/cgroup/test_memcontrol.c
949
cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(50));
tools/testing/selftests/cgroup/test_memcontrol.c
959
while (!values_close(cg_read_long(memcg, "memory.current"),
tools/testing/selftests/cgroup/test_memcontrol.c
976
if (!reclaim_until(memcg, MB(30)))
tools/testing/selftests/cgroup/test_memcontrol.c
981
cg_destroy(memcg);
tools/testing/selftests/cgroup/test_memcontrol.c
982
free(memcg);
tools/testing/selftests/kvm/include/lru_gen_util.h
43
void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg);
tools/testing/selftests/kvm/include/lru_gen_util.h
46
void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg);
tools/testing/selftests/kvm/lib/lru_gen_util.c
201
void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg)
tools/testing/selftests/kvm/lib/lru_gen_util.c
209
.name = memcg,
tools/testing/selftests/kvm/lib/lru_gen_util.c
232
memcg);
tools/testing/selftests/kvm/lib/lru_gen_util.c
237
print_memcg_stats(stats, memcg);
tools/testing/selftests/kvm/lib/lru_gen_util.c
302
void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg)
tools/testing/selftests/kvm/lib/lru_gen_util.c
309
lru_gen_read_memcg_stats(stats, memcg);
tools/testing/selftests/kvm/lib/lru_gen_util.c
325
lru_gen_read_memcg_stats(stats, memcg);