cma
bool allowblock, cma;
cma = allowblock ? dev_get_cma_area(dev) : NULL;
if (cma)
int cma;
static struct cma *fadump_cma;
if (cma)
info->cma = cma;
if (info->cma)
int cma = 0;
cma = 1;
static struct cma *kvm_cma;
kbuf.cma = NULL;
static int s390_cma_check_range(struct cma *cma, void *data)
if (cma_intersects(cma, mem_data->start, mem_data->end))
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
int __init dma_heap_cma_register_heap(struct cma *cma)
cma_release(cma_heap->cma, cma_pages, pagecount);
static int __init __add_cma_heap(struct cma *cma, const char *name)
cma_heap->cma = cma;
dma_areas[dma_areas_num++] = cma;
struct cma *default_cma = dev_get_cma_area(NULL);
struct cma *cma = dma_areas[i];
ret = __add_cma_heap(cma, cma_get_name(cma));
pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
struct cma *cma;
bool cma;
reg->bus.is_iomem = !drm->agp.cma;
bool cma;
drm->agp.cma = pci->agp.cma;
pci->agp.cma = info.cant_use_aperture;
static struct cma *vmcp_cma;
struct cma;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
extern const char *cma_get_name(const struct cma *cma);
bool fixed, const char *name, struct cma **res_cma,
bool fixed, const char *name, struct cma **res_cma)
const char *name, struct cma **res_cma, int nid);
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order);
bool cma_release_frozen(struct cma *cma, const struct page *pages,
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
struct cma *cma_area; /* contiguous memory area for dma
static inline int dma_heap_cma_register_heap(struct cma *cma)
struct cma;
int dma_heap_cma_register_heap(struct cma *cma);
phys_addr_t limit, struct cma **res_cma, bool fixed);
static inline struct cma *dev_get_cma_area(struct device *dev)
phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
struct cma;
extern struct cma *dma_contiguous_default_area;
static inline struct cma *dev_get_cma_area(struct device *dev)
HPAGEFLAG(Cma, cma)
struct cma;
struct cma *cma;
struct page *cma;
#define TRACE_SYSTEM cma
struct cma *res;
static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
struct cma **cma;
cma = &dma_contiguous_pernuma_area[nid];
0, false, name, cma, nid);
cma = &dma_contiguous_numa_area[nid];
name, cma, nid);
phys_addr_t limit, struct cma **res_cma,
static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
struct cma *cma = dma_contiguous_pernuma_area[nid];
if (cma) {
page = cma_alloc_aligned(cma, size, gfp);
cma = dma_contiguous_numa_area[nid];
if (cma) {
page = cma_alloc_aligned(cma, size, gfp);
struct cma *cma;
err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
dma_contiguous_default_area = cma;
rmem->priv = cma;
err = dma_heap_cma_register_heap(cma);
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
struct cma *dma_contiguous_default_area;
struct cma *cma;
cma = dev_get_cma_area(NULL);
if (!cma)
size = cma_get_size(cma);
end = cma_get_base(cma) + size - 1;
struct page *cma = image->segment_cma[i];
if (!cma)
arch_kexec_pre_free_pages(page_address(cma), nr_pages);
dma_release_from_contiguous(NULL, cma, nr_pages);
struct page *cma = image->segment_cma[idx];
char *ptr = page_address(cma);
struct page *cma;
cma = image->segment_cma[idx];
if (cma)
return page_address(cma);
kbuf->cma = p;
kbuf->cma = NULL;
kbuf->image->segment_cma[kbuf->image->nr_segments] = kbuf->cma;
bool cma_release(struct cma *cma, const struct page *pages,
cmr = find_cma_memrange(cma, pages, count);
bool cma_validate_zones(struct cma *cma)
__cma_release_frozen(cma, cmr, pages, count);
bool cma_release_frozen(struct cma *cma, const struct page *pages,
cmr = find_cma_memrange(cma, pages, count);
__cma_release_frozen(cma, cmr, pages, count);
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
void __init *cma_reserve_early(struct cma *cma, unsigned long size)
if (!cma || !cma->count)
if (test_bit(CMA_ACTIVATED, &cma->flags))
if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
if (size > cma->available_count)
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
cma->available_count -= size;
valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
set_bit(CMA_ZONES_INVALID, &cma->flags);
set_bit(CMA_ZONES_VALID, &cma->flags);
static void __init cma_activate_area(struct cma *cma)
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
cmr = &cma->ranges[allocrange];
cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
if (!cma_validate_zones(cma))
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
spin_lock_init(&cma->lock);
mutex_init(&cma->alloc_mutex);
INIT_HLIST_HEAD(&cma->mem_head);
spin_lock_init(&cma->mem_head_lock);
set_bit(CMA_ACTIVATED, &cma->flags);
bitmap_free(cma->ranges[r].bitmap);
if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
cmr = &cma->ranges[r];
totalcma_pages -= cma->count;
cma->available_count = cma->count = 0;
pr_err("CMA area %s could not be activated\n", cma->name);
void __init cma_reserve_pages_on_error(struct cma *cma)
set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
struct cma **res_cma)
struct cma *cma;
cma = &cma_areas[cma_area_count];
strscpy(cma->name, name);
snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
cma->available_count = cma->count = size >> PAGE_SHIFT;
cma->order_per_bit = order_per_bit;
*res_cma = cma;
totalcma_pages += cma->count;
static void __init cma_drop_area(struct cma *cma)
totalcma_pages -= cma->count;
struct cma **res_cma)
struct cma *cma;
ret = cma_new_area(name, size, order_per_bit, &cma);
cma->ranges[0].base_pfn = PFN_DOWN(base);
cma->ranges[0].early_pfn = PFN_DOWN(base);
cma->ranges[0].count = cma->count;
cma->nranges = 1;
cma->nid = NUMA_NO_NODE;
*res_cma = cma;
struct cma cma_areas[MAX_CMA_AREAS];
phys_addr_t cma_get_base(const struct cma *cma)
WARN_ON_ONCE(cma->nranges != 1);
return PFN_PHYS(cma->ranges[0].base_pfn);
bool fixed, const char *name, struct cma **res_cma,
unsigned long cma_get_size(const struct cma *cma)
return cma->count << PAGE_SHIFT;
const char *cma_get_name(const struct cma *cma)
return cma->name;
const char *name, struct cma **res_cma, int nid)
struct cma *cma;
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
ret = cma_new_area(name, total_size, order_per_bit, &cma);
if (align_order <= cma->order_per_bit)
return (1UL << (align_order - cma->order_per_bit)) - 1;
cma_drop_area(cma);
cmrp = &cma->ranges[nr++];
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
cma_drop_area(cma);
cma->nranges = nr;
cma->nid = nid;
*res_cma = cma;
>> cma->order_per_bit;
bool fixed, const char *name, struct cma **res_cma,
static void cma_debug_show_areas(struct cma *cma)
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
spin_lock_irq(&cma->lock);
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
nbits = cma_bitmap_maxno(cma, cmr);
nr_part = (end - start) << cma->order_per_bit;
pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
cma->count);
spin_unlock_irq(&cma->lock);
static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
mask = cma_bitmap_aligned_mask(cma, align);
return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
offset = cma_bitmap_aligned_offset(cma, cmr, align);
bitmap_maxno = cma_bitmap_maxno(cma, cmr);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
spin_lock_irq(&cma->lock);
if (count > cma->available_count) {
spin_unlock_irq(&cma->lock);
spin_unlock_irq(&cma->lock);
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
spin_unlock_irq(&cma->lock);
__func__, cma->name, pfn, pfn + count - 1);
cma->available_count -= count;
spin_unlock_irq(&cma->lock);
mutex_lock(&cma->alloc_mutex);
mutex_unlock(&cma->alloc_mutex);
cma_clear_bitmap(cma, cmr, pfn, count);
trace_cma_alloc_busy_retry(cma->name, pfn, page, count, align);
static struct page *__cma_alloc_frozen(struct cma *cma,
const char *name = cma ? cma->name : NULL;
if (!cma || !cma->count)
(void *)cma, cma->name, count, align);
trace_cma_alloc_start(name, count, cma->available_count, cma->count, align);
for (r = 0; r < cma->nranges; r++) {
bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
ret = cma_range_alloc(cma, &cma->ranges[r], count, align,
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
__func__, cma->name, count, ret);
cma_debug_show_areas(cma);
cma_sysfs_account_success_pages(cma, count);
spin_lock_irqsave(&cma->lock, flags);
cma_sysfs_account_fail_pages(cma, count);
struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
return __cma_alloc_frozen(cma, count, align, gfp);
struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order)
cma->available_count += count;
return __cma_alloc_frozen(cma, 1 << order, order, gfp);
spin_unlock_irqrestore(&cma->lock, flags);
struct page *cma_alloc(struct cma *cma, unsigned long count,
page = cma_alloc_frozen(cma, count, align, no_warn);
static struct cma_memrange *find_cma_memrange(struct cma *cma,
if (!cma || !pages || count > cma->count)
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
if (r == cma->nranges) {
static void __cma_release_frozen(struct cma *cma, struct cma_memrange *cmr,
cma_clear_bitmap(cma, cmr, pfn, count);
cma_sysfs_account_release_pages(cma, count);
trace_cma_release(cma->name, pfn, pages, count);
struct cma *cma;
extern struct cma cma_areas[MAX_CMA_AREAS];
static inline unsigned long cma_bitmap_maxno(struct cma *cma,
return cmr->count >> cma->order_per_bit;
void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages);
void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages);
void cma_sysfs_account_release_pages(struct cma *cma, unsigned long nr_pages);
static inline void cma_sysfs_account_success_pages(struct cma *cma,
static inline void cma_sysfs_account_fail_pages(struct cma *cma,
static inline void cma_sysfs_account_release_pages(struct cma *cma,
cma_release(cma, mem->p, mem->n);
} else if (cma->order_per_bit == 0) {
cma_release(cma, mem->p, count);
cma_add_to_cma_mem_list(cma, mem);
cma_add_to_cma_mem_list(cma, mem);
struct cma *cma = data;
return cma_free_mem(cma, pages);
static int cma_alloc_mem(struct cma *cma, int count)
p = cma_alloc(cma, count, 0, false);
cma_add_to_cma_mem_list(cma, mem);
struct cma *cma = data;
return cma_alloc_mem(cma, pages);
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
tmp = debugfs_create_dir(cma->name, root_dentry);
debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
&cma->order_per_bit, &cma_debugfs_fops);
debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
DIV_ROUND_UP(cma_bitmap_maxno(cma, cmr),
struct cma *cma = data;
spin_lock_irq(&cma->lock);
*val = cma->count - cma->available_count;
spin_unlock_irq(&cma->lock);
struct cma *cma = data;
spin_lock_irq(&cma->lock);
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
bitmap_maxno = cma_bitmap_maxno(cma, cmr);
spin_unlock_irq(&cma->lock);
*val = (u64)maxchunk << cma->order_per_bit;
static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
spin_lock(&cma->mem_head_lock);
hlist_add_head(&mem->node, &cma->mem_head);
spin_unlock(&cma->mem_head_lock);
static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
spin_lock(&cma->mem_head_lock);
if (!hlist_empty(&cma->mem_head)) {
mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
spin_unlock(&cma->mem_head_lock);
static int cma_free_mem(struct cma *cma, int count)
mem = cma_get_entry_from_list(cma);
ATTRIBUTE_GROUPS(cma);
struct cma *cma;
cma = &cma_areas[i];
cma->cma_kobj = cma_kobj;
cma_kobj->cma = cma;
cma_kobj_root, "%s", cma->name);
cma = &cma_areas[i];
kobject_put(&cma->cma_kobj->kobj);
void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages)
atomic64_add(nr_pages, &cma->nr_pages_succeeded);
void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages)
atomic64_add(nr_pages, &cma->nr_pages_failed);
void cma_sysfs_account_release_pages(struct cma *cma, unsigned long nr_pages)
atomic64_add(nr_pages, &cma->nr_pages_released);
static inline struct cma *cma_from_kobj(struct kobject *kobj)
return container_of(kobj, struct cma_kobject, kobj)->cma;
struct cma *cma = cma_from_kobj(kobj);
atomic64_read(&cma->nr_pages_succeeded));
struct cma *cma = cma_from_kobj(kobj);
return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_failed));
struct cma *cma = cma_from_kobj(kobj);
return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_released));
struct cma *cma = cma_from_kobj(kobj);
return sysfs_emit(buf, "%lu\n", cma->count);
struct cma *cma = cma_from_kobj(kobj);
return sysfs_emit(buf, "%lu\n", cma->available_count);
struct cma *cma = cma_from_kobj(kobj);
struct cma_kobject *cma_kobj = cma->cma_kobj;
cma->cma_kobj = NULL;
m->cma = NULL;
valid = cma_validate_zones(m->cma);
bool cma;
cma = folio_test_hugetlb_cma(folio);
if (cma)
static struct cma *hugetlb_cma[MAX_NUMNODES] __ro_after_init;
struct cma *cma;
cma = hugetlb_cma[*nid];
m = cma_reserve_early(cma, huge_page_size(h));
cma = hugetlb_cma[node];
if (!cma || node == *nid)
m = cma_reserve_early(cma, huge_page_size(h));
m->cma = cma;
struct cma;
bool cma_validate_zones(struct cma *cma);
void *cma_reserve_early(struct cma *cma, unsigned long size);
static inline bool cma_validate_zones(struct cma *cma)
static inline void *cma_reserve_early(struct cma *cma, unsigned long size)