Symbol: cma
arch/arm/mm/dma-mapping.c
540
bool allowblock, cma;
arch/arm/mm/dma-mapping.c
573
cma = allowblock ? dev_get_cma_area(dev) : NULL;
arch/arm/mm/dma-mapping.c
575
if (cma)
arch/powerpc/include/asm/kvm_host.h
267
int cma;
arch/powerpc/kernel/fadump.c
67
static struct cma *fadump_cma;
arch/powerpc/kvm/book3s_64_mmu_hv.c
103
if (cma)
arch/powerpc/kvm/book3s_64_mmu_hv.c
112
info->cma = cma;
arch/powerpc/kvm/book3s_64_mmu_hv.c
185
if (info->cma)
arch/powerpc/kvm/book3s_64_mmu_hv.c
75
int cma = 0;
arch/powerpc/kvm/book3s_64_mmu_hv.c
87
cma = 1;
arch/powerpc/kvm/book3s_hv_builtin.c
47
static struct cma *kvm_cma;
arch/riscv/kernel/kexec_elf.c
98
kbuf.cma = NULL;
arch/s390/mm/init.c
221
static int s390_cma_check_range(struct cma *cma, void *data)
arch/s390/mm/init.c
227
if (cma_intersects(cma, mem_data->start, mem_data->end))
drivers/dma-buf/heaps/cma_heap.c
278
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
drivers/dma-buf/heaps/cma_heap.c
322
cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
drivers/dma-buf/heaps/cma_heap.c
33
static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
drivers/dma-buf/heaps/cma_heap.c
36
int __init dma_heap_cma_register_heap(struct cma *cma)
drivers/dma-buf/heaps/cma_heap.c
378
cma_release(cma_heap->cma, cma_pages, pagecount);
drivers/dma-buf/heaps/cma_heap.c
389
static int __init __add_cma_heap(struct cma *cma, const char *name)
drivers/dma-buf/heaps/cma_heap.c
397
cma_heap->cma = cma;
drivers/dma-buf/heaps/cma_heap.c
41
dma_areas[dma_areas_num++] = cma;
drivers/dma-buf/heaps/cma_heap.c
416
struct cma *default_cma = dev_get_cma_area(NULL);
drivers/dma-buf/heaps/cma_heap.c
427
struct cma *cma = dma_areas[i];
drivers/dma-buf/heaps/cma_heap.c
429
ret = __add_cma_heap(cma, cma_get_name(cma));
drivers/dma-buf/heaps/cma_heap.c
431
pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
drivers/dma-buf/heaps/cma_heap.c
48
struct cma *cma;
drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
23
bool cma;
drivers/gpu/drm/nouveau/nouveau_bo.c
1274
reg->bus.is_iomem = !drm->agp.cma;
drivers/gpu/drm/nouveau/nouveau_drv.h
231
bool cma;
drivers/gpu/drm/nouveau/nouveau_ttm.c
299
drm->agp.cma = pci->agp.cma;
drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
135
pci->agp.cma = info.cant_use_aperture;
drivers/s390/char/vmcp.c
41
static struct cma *vmcp_cma;
include/linux/cma.h
23
struct cma;
include/linux/cma.h
26
extern phys_addr_t cma_get_base(const struct cma *cma);
include/linux/cma.h
27
extern unsigned long cma_get_size(const struct cma *cma);
include/linux/cma.h
28
extern const char *cma_get_name(const struct cma *cma);
include/linux/cma.h
33
bool fixed, const char *name, struct cma **res_cma,
include/linux/cma.h
38
bool fixed, const char *name, struct cma **res_cma)
include/linux/cma.h
45
const char *name, struct cma **res_cma, int nid);
include/linux/cma.h
49
struct cma **res_cma);
include/linux/cma.h
50
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
include/linux/cma.h
52
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
include/linux/cma.h
54
struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
include/linux/cma.h
56
struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order);
include/linux/cma.h
57
bool cma_release_frozen(struct cma *cma, const struct page *pages,
include/linux/cma.h
60
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
include/linux/cma.h
61
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
include/linux/cma.h
63
extern void cma_reserve_pages_on_error(struct cma *cma);
include/linux/device.h
622
struct cma *cma_area; /* contiguous memory area for dma
include/linux/dma-buf/heaps/cma.h
10
static inline int dma_heap_cma_register_heap(struct cma *cma)
include/linux/dma-buf/heaps/cma.h
5
struct cma;
include/linux/dma-buf/heaps/cma.h
8
int dma_heap_cma_register_heap(struct cma *cma);
include/linux/dma-map-ops.h
105
phys_addr_t limit, struct cma **res_cma, bool fixed);
include/linux/dma-map-ops.h
116
static inline struct cma *dev_get_cma_area(struct device *dev)
include/linux/dma-map-ops.h
124
phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
include/linux/dma-map-ops.h
13
struct cma;
include/linux/dma-map-ops.h
94
extern struct cma *dma_contiguous_default_area;
include/linux/dma-map-ops.h
96
static inline struct cma *dev_get_cma_area(struct device *dev)
include/linux/hugetlb.h
660
HPAGEFLAG(Cma, cma)
include/linux/hugetlb.h
689
struct cma;
include/linux/hugetlb.h
695
struct cma *cma;
include/linux/kexec.h
194
struct page *cma;
include/trace/events/cma.h
3
#define TRACE_SYSTEM cma
kernel/crash_reserve.c
488
struct cma *res;
kernel/dma/contiguous.c
106
static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
kernel/dma/contiguous.c
108
static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
kernel/dma/contiguous.c
176
struct cma **cma;
kernel/dma/contiguous.c
186
cma = &dma_contiguous_pernuma_area[nid];
kernel/dma/contiguous.c
189
0, false, name, cma, nid);
kernel/dma/contiguous.c
197
cma = &dma_contiguous_numa_area[nid];
kernel/dma/contiguous.c
200
name, cma, nid);
kernel/dma/contiguous.c
296
phys_addr_t limit, struct cma **res_cma,
kernel/dma/contiguous.c
350
static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
kernel/dma/contiguous.c
354
return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
kernel/dma/contiguous.c
388
struct cma *cma = dma_contiguous_pernuma_area[nid];
kernel/dma/contiguous.c
391
if (cma) {
kernel/dma/contiguous.c
392
page = cma_alloc_aligned(cma, size, gfp);
kernel/dma/contiguous.c
397
cma = dma_contiguous_numa_area[nid];
kernel/dma/contiguous.c
398
if (cma) {
kernel/dma/contiguous.c
399
page = cma_alloc_aligned(cma, size, gfp);
kernel/dma/contiguous.c
482
struct cma *cma;
kernel/dma/contiguous.c
494
err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
kernel/dma/contiguous.c
501
dma_contiguous_default_area = cma;
kernel/dma/contiguous.c
504
rmem->priv = cma;
kernel/dma/contiguous.c
509
err = dma_heap_cma_register_heap(cma);
kernel/dma/contiguous.c
515
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
kernel/dma/contiguous.c
56
struct cma *dma_contiguous_default_area;
kernel/dma/pool.c
60
struct cma *cma;
kernel/dma/pool.c
62
cma = dev_get_cma_area(NULL);
kernel/dma/pool.c
63
if (!cma)
kernel/dma/pool.c
66
size = cma_get_size(cma);
kernel/dma/pool.c
71
end = cma_get_base(cma) + size - 1;
kernel/kexec_core.c
563
struct page *cma = image->segment_cma[i];
kernel/kexec_core.c
566
if (!cma)
kernel/kexec_core.c
569
arch_kexec_pre_free_pages(page_address(cma), nr_pages);
kernel/kexec_core.c
570
dma_release_from_contiguous(NULL, cma, nr_pages);
kernel/kexec_core.c
745
struct page *cma = image->segment_cma[idx];
kernel/kexec_core.c
746
char *ptr = page_address(cma);
kernel/kexec_core.c
963
struct page *cma;
kernel/kexec_core.c
967
cma = image->segment_cma[idx];
kernel/kexec_core.c
968
if (cma)
kernel/kexec_core.c
969
return page_address(cma);
kernel/kexec_file.c
700
kbuf->cma = p;
kernel/kexec_file.c
783
kbuf->cma = NULL;
kernel/kexec_file.c
796
kbuf->image->segment_cma[kbuf->image->nr_segments] = kbuf->cma;
mm/cma.c
1012
bool cma_release(struct cma *cma, const struct page *pages,
mm/cma.c
1019
cmr = find_cma_memrange(cma, pages, count);
mm/cma.c
102
bool cma_validate_zones(struct cma *cma)
mm/cma.c
1029
__cma_release_frozen(cma, cmr, pages, count);
mm/cma.c
1034
bool cma_release_frozen(struct cma *cma, const struct page *pages,
mm/cma.c
1039
cmr = find_cma_memrange(cma, pages, count);
mm/cma.c
1043
__cma_release_frozen(cma, cmr, pages, count);
mm/cma.c
1048
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
mm/cma.c
1062
bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
mm/cma.c
1068
for (r = 0; r < cma->nranges; r++) {
mm/cma.c
1069
cmr = &cma->ranges[r];
mm/cma.c
1105
void __init *cma_reserve_early(struct cma *cma, unsigned long size)
mm/cma.c
1112
if (!cma || !cma->count)
mm/cma.c
1117
if (test_bit(CMA_ACTIVATED, &cma->flags))
mm/cma.c
1123
if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
mm/cma.c
1128
if (size > cma->available_count)
mm/cma.c
1131
for (r = 0; r < cma->nranges; r++) {
mm/cma.c
1132
cmr = &cma->ranges[r];
mm/cma.c
1137
cma->available_count -= size;
mm/cma.c
115
valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
mm/cma.c
116
if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
mm/cma.c
119
for (r = 0; r < cma->nranges; r++) {
mm/cma.c
120
cmr = &cma->ranges[r];
mm/cma.c
129
if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
mm/cma.c
130
set_bit(CMA_ZONES_INVALID, &cma->flags);
mm/cma.c
135
set_bit(CMA_ZONES_VALID, &cma->flags);
mm/cma.c
140
static void __init cma_activate_area(struct cma *cma)
mm/cma.c
147
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
mm/cma.c
148
cmr = &cma->ranges[allocrange];
mm/cma.c
150
cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
mm/cma.c
156
if (!cma_validate_zones(cma))
mm/cma.c
159
for (r = 0; r < cma->nranges; r++) {
mm/cma.c
160
cmr = &cma->ranges[r];
mm/cma.c
163
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
mm/cma.c
172
spin_lock_init(&cma->lock);
mm/cma.c
174
mutex_init(&cma->alloc_mutex);
mm/cma.c
177
INIT_HLIST_HEAD(&cma->mem_head);
mm/cma.c
178
spin_lock_init(&cma->mem_head_lock);
mm/cma.c
180
set_bit(CMA_ACTIVATED, &cma->flags);
mm/cma.c
186
bitmap_free(cma->ranges[r].bitmap);
mm/cma.c
189
if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
mm/cma.c
191
cmr = &cma->ranges[r];
mm/cma.c
197
totalcma_pages -= cma->count;
mm/cma.c
198
cma->available_count = cma->count = 0;
mm/cma.c
199
pr_err("CMA area %s could not be activated\n", cma->name);
mm/cma.c
213
void __init cma_reserve_pages_on_error(struct cma *cma)
mm/cma.c
215
set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
mm/cma.c
220
struct cma **res_cma)
mm/cma.c
222
struct cma *cma;
mm/cma.c
233
cma = &cma_areas[cma_area_count];
mm/cma.c
237
strscpy(cma->name, name);
mm/cma.c
239
snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
mm/cma.c
241
cma->available_count = cma->count = size >> PAGE_SHIFT;
mm/cma.c
242
cma->order_per_bit = order_per_bit;
mm/cma.c
243
*res_cma = cma;
mm/cma.c
244
totalcma_pages += cma->count;
mm/cma.c
249
static void __init cma_drop_area(struct cma *cma)
mm/cma.c
251
totalcma_pages -= cma->count;
mm/cma.c
270
struct cma **res_cma)
mm/cma.c
272
struct cma *cma;
mm/cma.c
292
ret = cma_new_area(name, size, order_per_bit, &cma);
mm/cma.c
296
cma->ranges[0].base_pfn = PFN_DOWN(base);
mm/cma.c
297
cma->ranges[0].early_pfn = PFN_DOWN(base);
mm/cma.c
298
cma->ranges[0].count = cma->count;
mm/cma.c
299
cma->nranges = 1;
mm/cma.c
300
cma->nid = NUMA_NO_NODE;
mm/cma.c
302
*res_cma = cma;
mm/cma.c
37
struct cma cma_areas[MAX_CMA_AREAS];
mm/cma.c
40
phys_addr_t cma_get_base(const struct cma *cma)
mm/cma.c
42
WARN_ON_ONCE(cma->nranges != 1);
mm/cma.c
43
return PFN_PHYS(cma->ranges[0].base_pfn);
mm/cma.c
433
bool fixed, const char *name, struct cma **res_cma,
mm/cma.c
46
unsigned long cma_get_size(const struct cma *cma)
mm/cma.c
48
return cma->count << PAGE_SHIFT;
mm/cma.c
51
const char *cma_get_name(const struct cma *cma)
mm/cma.c
53
return cma->name;
mm/cma.c
530
const char *name, struct cma **res_cma, int nid)
mm/cma.c
541
struct cma *cma;
mm/cma.c
56
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
mm/cma.c
571
ret = cma_new_area(name, total_size, order_per_bit, &cma);
mm/cma.c
59
if (align_order <= cma->order_per_bit)
mm/cma.c
61
return (1UL << (align_order - cma->order_per_bit)) - 1;
mm/cma.c
636
cma_drop_area(cma);
mm/cma.c
677
cmrp = &cma->ranges[nr++];
mm/cma.c
68
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
mm/cma.c
694
cma_drop_area(cma);
mm/cma.c
699
cma->nranges = nr;
mm/cma.c
700
cma->nid = nid;
mm/cma.c
701
*res_cma = cma;
mm/cma.c
73
>> cma->order_per_bit;
mm/cma.c
736
bool fixed, const char *name, struct cma **res_cma,
mm/cma.c
753
static void cma_debug_show_areas(struct cma *cma)
mm/cma.c
76
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
mm/cma.c
761
spin_lock_irq(&cma->lock);
mm/cma.c
763
for (r = 0; r < cma->nranges; r++) {
mm/cma.c
764
cmr = &cma->ranges[r];
mm/cma.c
766
nbits = cma_bitmap_maxno(cma, cmr);
mm/cma.c
770
nr_part = (end - start) << cma->order_per_bit;
mm/cma.c
775
pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
mm/cma.c
776
cma->count);
mm/cma.c
777
spin_unlock_irq(&cma->lock);
mm/cma.c
780
static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
mm/cma.c
789
mask = cma_bitmap_aligned_mask(cma, align);
mm/cma.c
79
return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
mm/cma.c
790
offset = cma_bitmap_aligned_offset(cma, cmr, align);
mm/cma.c
791
bitmap_maxno = cma_bitmap_maxno(cma, cmr);
mm/cma.c
792
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
mm/cma.c
798
spin_lock_irq(&cma->lock);
mm/cma.c
803
if (count > cma->available_count) {
mm/cma.c
804
spin_unlock_irq(&cma->lock);
mm/cma.c
811
spin_unlock_irq(&cma->lock);
mm/cma.c
815
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
mm/cma.c
82
static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
mm/cma.c
824
spin_unlock_irq(&cma->lock);
mm/cma.c
826
__func__, cma->name, pfn, pfn + count - 1);
mm/cma.c
831
cma->available_count -= count;
mm/cma.c
837
spin_unlock_irq(&cma->lock);
mm/cma.c
839
mutex_lock(&cma->alloc_mutex);
mm/cma.c
841
mutex_unlock(&cma->alloc_mutex);
mm/cma.c
845
cma_clear_bitmap(cma, cmr, pfn, count);
mm/cma.c
852
trace_cma_alloc_busy_retry(cma->name, pfn, page, count, align);
mm/cma.c
860
static struct page *__cma_alloc_frozen(struct cma *cma,
mm/cma.c
866
const char *name = cma ? cma->name : NULL;
mm/cma.c
868
if (!cma || !cma->count)
mm/cma.c
872
(void *)cma, cma->name, count, align);
mm/cma.c
877
trace_cma_alloc_start(name, count, cma->available_count, cma->count, align);
mm/cma.c
879
for (r = 0; r < cma->nranges; r++) {
mm/cma.c
88
bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
mm/cma.c
882
ret = cma_range_alloc(cma, &cma->ranges[r], count, align,
mm/cma.c
89
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
mm/cma.c
900
__func__, cma->name, count, ret);
mm/cma.c
901
cma_debug_show_areas(cma);
mm/cma.c
909
cma_sysfs_account_success_pages(cma, count);
mm/cma.c
91
spin_lock_irqsave(&cma->lock, flags);
mm/cma.c
912
cma_sysfs_account_fail_pages(cma, count);
mm/cma.c
918
struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
mm/cma.c
923
return __cma_alloc_frozen(cma, count, align, gfp);
mm/cma.c
926
struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order)
mm/cma.c
93
cma->available_count += count;
mm/cma.c
930
return __cma_alloc_frozen(cma, 1 << order, order, gfp);
mm/cma.c
94
spin_unlock_irqrestore(&cma->lock, flags);
mm/cma.c
943
struct page *cma_alloc(struct cma *cma, unsigned long count,
mm/cma.c
948
page = cma_alloc_frozen(cma, count, align, no_warn);
mm/cma.c
955
static struct cma_memrange *find_cma_memrange(struct cma *cma,
mm/cma.c
964
if (!cma || !pages || count > cma->count)
mm/cma.c
969
for (r = 0; r < cma->nranges; r++) {
mm/cma.c
970
cmr = &cma->ranges[r];
mm/cma.c
980
if (r == cma->nranges) {
mm/cma.c
989
static void __cma_release_frozen(struct cma *cma, struct cma_memrange *cmr,
mm/cma.c
997
cma_clear_bitmap(cma, cmr, pfn, count);
mm/cma.c
998
cma_sysfs_account_release_pages(cma, count);
mm/cma.c
999
trace_cma_release(cma->name, pfn, pages, count);
mm/cma.h
10
struct cma *cma;
mm/cma.h
74
extern struct cma cma_areas[MAX_CMA_AREAS];
mm/cma.h
77
static inline unsigned long cma_bitmap_maxno(struct cma *cma,
mm/cma.h
80
return cmr->count >> cma->order_per_bit;
mm/cma.h
84
void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages);
mm/cma.h
85
void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages);
mm/cma.h
86
void cma_sysfs_account_release_pages(struct cma *cma, unsigned long nr_pages);
mm/cma.h
88
static inline void cma_sysfs_account_success_pages(struct cma *cma,
mm/cma.h
90
static inline void cma_sysfs_account_fail_pages(struct cma *cma,
mm/cma.h
92
static inline void cma_sysfs_account_release_pages(struct cma *cma,
mm/cma_debug.c
100
cma_release(cma, mem->p, mem->n);
mm/cma_debug.c
103
} else if (cma->order_per_bit == 0) {
mm/cma_debug.c
104
cma_release(cma, mem->p, count);
mm/cma_debug.c
108
cma_add_to_cma_mem_list(cma, mem);
mm/cma_debug.c
111
cma_add_to_cma_mem_list(cma, mem);
mm/cma_debug.c
123
struct cma *cma = data;
mm/cma_debug.c
125
return cma_free_mem(cma, pages);
mm/cma_debug.c
129
static int cma_alloc_mem(struct cma *cma, int count)
mm/cma_debug.c
138
p = cma_alloc(cma, count, 0, false);
mm/cma_debug.c
147
cma_add_to_cma_mem_list(cma, mem);
mm/cma_debug.c
155
struct cma *cma = data;
mm/cma_debug.c
157
return cma_alloc_mem(cma, pages);
mm/cma_debug.c
161
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
mm/cma_debug.c
168
tmp = debugfs_create_dir(cma->name, root_dentry);
mm/cma_debug.c
170
debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
mm/cma_debug.c
171
debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
mm/cma_debug.c
172
debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
mm/cma_debug.c
174
&cma->order_per_bit, &cma_debugfs_fops);
mm/cma_debug.c
175
debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
mm/cma_debug.c
176
debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
mm/cma_debug.c
179
for (r = 0; r < cma->nranges; r++) {
mm/cma_debug.c
180
cmr = &cma->ranges[r];
mm/cma_debug.c
187
DIV_ROUND_UP(cma_bitmap_maxno(cma, cmr),
mm/cma_debug.c
36
struct cma *cma = data;
mm/cma_debug.c
38
spin_lock_irq(&cma->lock);
mm/cma_debug.c
39
*val = cma->count - cma->available_count;
mm/cma_debug.c
40
spin_unlock_irq(&cma->lock);
mm/cma_debug.c
48
struct cma *cma = data;
mm/cma_debug.c
55
spin_lock_irq(&cma->lock);
mm/cma_debug.c
56
for (r = 0; r < cma->nranges; r++) {
mm/cma_debug.c
57
cmr = &cma->ranges[r];
mm/cma_debug.c
58
bitmap_maxno = cma_bitmap_maxno(cma, cmr);
mm/cma_debug.c
62
spin_unlock_irq(&cma->lock);
mm/cma_debug.c
63
*val = (u64)maxchunk << cma->order_per_bit;
mm/cma_debug.c
69
static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
mm/cma_debug.c
71
spin_lock(&cma->mem_head_lock);
mm/cma_debug.c
72
hlist_add_head(&mem->node, &cma->mem_head);
mm/cma_debug.c
73
spin_unlock(&cma->mem_head_lock);
mm/cma_debug.c
76
static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
mm/cma_debug.c
80
spin_lock(&cma->mem_head_lock);
mm/cma_debug.c
81
if (!hlist_empty(&cma->mem_head)) {
mm/cma_debug.c
82
mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
mm/cma_debug.c
85
spin_unlock(&cma->mem_head_lock);
mm/cma_debug.c
90
static int cma_free_mem(struct cma *cma, int count)
mm/cma_debug.c
95
mem = cma_get_entry_from_list(cma);
mm/cma_sysfs.c
100
ATTRIBUTE_GROUPS(cma);
mm/cma_sysfs.c
112
struct cma *cma;
mm/cma_sysfs.c
126
cma = &cma_areas[i];
mm/cma_sysfs.c
127
cma->cma_kobj = cma_kobj;
mm/cma_sysfs.c
128
cma_kobj->cma = cma;
mm/cma_sysfs.c
130
cma_kobj_root, "%s", cma->name);
mm/cma_sysfs.c
140
cma = &cma_areas[i];
mm/cma_sysfs.c
141
kobject_put(&cma->cma_kobj->kobj);
mm/cma_sysfs.c
17
void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages)
mm/cma_sysfs.c
19
atomic64_add(nr_pages, &cma->nr_pages_succeeded);
mm/cma_sysfs.c
22
void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages)
mm/cma_sysfs.c
24
atomic64_add(nr_pages, &cma->nr_pages_failed);
mm/cma_sysfs.c
27
void cma_sysfs_account_release_pages(struct cma *cma, unsigned long nr_pages)
mm/cma_sysfs.c
29
atomic64_add(nr_pages, &cma->nr_pages_released);
mm/cma_sysfs.c
32
static inline struct cma *cma_from_kobj(struct kobject *kobj)
mm/cma_sysfs.c
34
return container_of(kobj, struct cma_kobject, kobj)->cma;
mm/cma_sysfs.c
40
struct cma *cma = cma_from_kobj(kobj);
mm/cma_sysfs.c
43
atomic64_read(&cma->nr_pages_succeeded));
mm/cma_sysfs.c
50
struct cma *cma = cma_from_kobj(kobj);
mm/cma_sysfs.c
52
return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_failed));
mm/cma_sysfs.c
59
struct cma *cma = cma_from_kobj(kobj);
mm/cma_sysfs.c
61
return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_released));
mm/cma_sysfs.c
68
struct cma *cma = cma_from_kobj(kobj);
mm/cma_sysfs.c
70
return sysfs_emit(buf, "%lu\n", cma->count);
mm/cma_sysfs.c
77
struct cma *cma = cma_from_kobj(kobj);
mm/cma_sysfs.c
79
return sysfs_emit(buf, "%lu\n", cma->available_count);
mm/cma_sysfs.c
85
struct cma *cma = cma_from_kobj(kobj);
mm/cma_sysfs.c
86
struct cma_kobject *cma_kobj = cma->cma_kobj;
mm/cma_sysfs.c
89
cma->cma_kobj = NULL;
mm/hugetlb.c
3109
m->cma = NULL;
mm/hugetlb.c
3290
valid = cma_validate_zones(m->cma);
mm/hugetlb.c
3992
bool cma;
mm/hugetlb.c
3997
cma = folio_test_hugetlb_cma(folio);
mm/hugetlb.c
4015
if (cma)
mm/hugetlb_cma.c
16
static struct cma *hugetlb_cma[MAX_NUMNODES] __ro_after_init;
mm/hugetlb_cma.c
62
struct cma *cma;
mm/hugetlb_cma.c
66
cma = hugetlb_cma[*nid];
mm/hugetlb_cma.c
67
m = cma_reserve_early(cma, huge_page_size(h));
mm/hugetlb_cma.c
73
cma = hugetlb_cma[node];
mm/hugetlb_cma.c
74
if (!cma || node == *nid)
mm/hugetlb_cma.c
76
m = cma_reserve_early(cma, huge_page_size(h));
mm/hugetlb_cma.c
86
m->cma = cma;
mm/internal.h
1011
struct cma;
mm/internal.h
1014
bool cma_validate_zones(struct cma *cma);
mm/internal.h
1015
void *cma_reserve_early(struct cma *cma, unsigned long size);
mm/internal.h
1018
static inline bool cma_validate_zones(struct cma *cma)
mm/internal.h
1022
static inline void *cma_reserve_early(struct cma *cma, unsigned long size)