root/drivers/dma-buf/heaps/system_heap.c
// SPDX-License-Identifier: GPL-2.0
/*
 * DMABUF System heap exporter
 *
 * Copyright (C) 2011 Google, Inc.
 * Copyright (C) 2019, 2020 Linaro Ltd.
 *
 * Portions based off of Andrew Davis' SRAM heap:
 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
 *      Andrew F. Davis <afd@ti.com>
 */

#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

struct system_heap_buffer {
        struct dma_heap *heap;
        struct list_head attachments;
        struct mutex lock;
        unsigned long len;
        struct sg_table sg_table;
        int vmap_cnt;
        void *vaddr;
};

struct dma_heap_attachment {
        struct device *dev;
        struct sg_table table;
        struct list_head list;
        bool mapped;
};

#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
#define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
                                | __GFP_NORETRY) & ~__GFP_RECLAIM) \
                                | __GFP_COMP)
static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
/*
 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
 * of order 0 pages can significantly improve the performance of many IOMMUs
 * by reducing TLB pressure and time spent updating page tables.
 */
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)

static int dup_sg_table(struct sg_table *from, struct sg_table *to)
{
        struct scatterlist *sg, *new_sg;
        int ret, i;

        ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
        if (ret)
                return ret;

        new_sg = to->sgl;
        for_each_sgtable_sg(from, sg, i) {
                sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
                new_sg = sg_next(new_sg);
        }

        return 0;
}

static int system_heap_attach(struct dma_buf *dmabuf,
                              struct dma_buf_attachment *attachment)
{
        struct system_heap_buffer *buffer = dmabuf->priv;
        struct dma_heap_attachment *a;
        int ret;

        a = kzalloc_obj(*a);
        if (!a)
                return -ENOMEM;

        ret = dup_sg_table(&buffer->sg_table, &a->table);
        if (ret) {
                kfree(a);
                return ret;
        }

        a->dev = attachment->dev;
        INIT_LIST_HEAD(&a->list);
        a->mapped = false;

        attachment->priv = a;

        mutex_lock(&buffer->lock);
        list_add(&a->list, &buffer->attachments);
        mutex_unlock(&buffer->lock);

        return 0;
}

static void system_heap_detach(struct dma_buf *dmabuf,
                               struct dma_buf_attachment *attachment)
{
        struct system_heap_buffer *buffer = dmabuf->priv;
        struct dma_heap_attachment *a = attachment->priv;

        mutex_lock(&buffer->lock);
        list_del(&a->list);
        mutex_unlock(&buffer->lock);

        sg_free_table(&a->table);
        kfree(a);
}

static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
                                                enum dma_data_direction direction)
{
        struct dma_heap_attachment *a = attachment->priv;
        struct sg_table *table = &a->table;
        int ret;

        ret = dma_map_sgtable(attachment->dev, table, direction, 0);
        if (ret)
                return ERR_PTR(ret);

        a->mapped = true;
        return table;
}

static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
                                      struct sg_table *table,
                                      enum dma_data_direction direction)
{
        struct dma_heap_attachment *a = attachment->priv;

        a->mapped = false;
        dma_unmap_sgtable(attachment->dev, table, direction, 0);
}

static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                                                enum dma_data_direction direction)
{
        struct system_heap_buffer *buffer = dmabuf->priv;
        struct dma_heap_attachment *a;

        mutex_lock(&buffer->lock);

        if (buffer->vmap_cnt)
                invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);

        list_for_each_entry(a, &buffer->attachments, list) {
                if (!a->mapped)
                        continue;
                dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
        }
        mutex_unlock(&buffer->lock);

        return 0;
}

static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                                              enum dma_data_direction direction)
{
        struct system_heap_buffer *buffer = dmabuf->priv;
        struct dma_heap_attachment *a;

        mutex_lock(&buffer->lock);

        if (buffer->vmap_cnt)
                flush_kernel_vmap_range(buffer->vaddr, buffer->len);

        list_for_each_entry(a, &buffer->attachments, list) {
                if (!a->mapped)
                        continue;
                dma_sync_sgtable_for_device(a->dev, &a->table, direction);
        }
        mutex_unlock(&buffer->lock);

        return 0;
}

static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
        struct system_heap_buffer *buffer = dmabuf->priv;
        struct sg_table *table = &buffer->sg_table;
        unsigned long addr = vma->vm_start;
        unsigned long pgoff = vma->vm_pgoff;
        struct scatterlist *sg;
        int i, ret;

        for_each_sgtable_sg(table, sg, i) {
                unsigned long n = sg->length >> PAGE_SHIFT;

                if (pgoff < n)
                        break;
                pgoff -= n;
        }

        for (; sg && addr < vma->vm_end; sg = sg_next(sg)) {
                unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff;
                struct page *page = sg_page(sg) + pgoff;
                unsigned long size = n << PAGE_SHIFT;

                if (addr + size > vma->vm_end)
                        size = vma->vm_end - addr;

                ret = remap_pfn_range(vma, addr, page_to_pfn(page),
                                size, vma->vm_page_prot);
                if (ret)
                        return ret;

                addr += size;
                pgoff = 0;
        }

        return 0;
}

static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
{
        struct sg_table *table = &buffer->sg_table;
        int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
        struct page **pages = vmalloc(sizeof(struct page *) * npages);
        struct page **tmp = pages;
        struct sg_page_iter piter;
        void *vaddr;

        if (!pages)
                return ERR_PTR(-ENOMEM);

        for_each_sgtable_page(table, &piter, 0) {
                WARN_ON(tmp - pages >= npages);
                *tmp++ = sg_page_iter_page(&piter);
        }

        vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
        vfree(pages);

        if (!vaddr)
                return ERR_PTR(-ENOMEM);

        return vaddr;
}

static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
        struct system_heap_buffer *buffer = dmabuf->priv;
        void *vaddr;
        int ret = 0;

        mutex_lock(&buffer->lock);
        if (buffer->vmap_cnt) {
                buffer->vmap_cnt++;
                iosys_map_set_vaddr(map, buffer->vaddr);
                goto out;
        }

        vaddr = system_heap_do_vmap(buffer);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
                goto out;
        }

        buffer->vaddr = vaddr;
        buffer->vmap_cnt++;
        iosys_map_set_vaddr(map, buffer->vaddr);
out:
        mutex_unlock(&buffer->lock);

        return ret;
}

static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
        struct system_heap_buffer *buffer = dmabuf->priv;

        mutex_lock(&buffer->lock);
        if (!--buffer->vmap_cnt) {
                vunmap(buffer->vaddr);
                buffer->vaddr = NULL;
        }
        mutex_unlock(&buffer->lock);
        iosys_map_clear(map);
}

static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
{
        struct system_heap_buffer *buffer = dmabuf->priv;
        struct sg_table *table;
        struct scatterlist *sg;
        int i;

        table = &buffer->sg_table;
        for_each_sgtable_sg(table, sg, i) {
                struct page *page = sg_page(sg);

                __free_pages(page, compound_order(page));
        }
        sg_free_table(table);
        kfree(buffer);
}

static const struct dma_buf_ops system_heap_buf_ops = {
        .attach = system_heap_attach,
        .detach = system_heap_detach,
        .map_dma_buf = system_heap_map_dma_buf,
        .unmap_dma_buf = system_heap_unmap_dma_buf,
        .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
        .end_cpu_access = system_heap_dma_buf_end_cpu_access,
        .mmap = system_heap_mmap,
        .vmap = system_heap_vmap,
        .vunmap = system_heap_vunmap,
        .release = system_heap_dma_buf_release,
};

static struct page *alloc_largest_available(unsigned long size,
                                            unsigned int max_order)
{
        struct page *page;
        int i;
        gfp_t flags;

        for (i = 0; i < NUM_ORDERS; i++) {
                if (size <  (PAGE_SIZE << orders[i]))
                        continue;
                if (max_order < orders[i])
                        continue;
                flags = order_flags[i];
                if (mem_accounting)
                        flags |= __GFP_ACCOUNT;
                page = alloc_pages(flags, orders[i]);
                if (!page)
                        continue;
                return page;
        }
        return NULL;
}

static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
                                            unsigned long len,
                                            u32 fd_flags,
                                            u64 heap_flags)
{
        struct system_heap_buffer *buffer;
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
        unsigned long size_remaining = len;
        unsigned int max_order = orders[0];
        struct dma_buf *dmabuf;
        struct sg_table *table;
        struct scatterlist *sg;
        struct list_head pages;
        struct page *page, *tmp_page;
        int i, ret = -ENOMEM;

        buffer = kzalloc_obj(*buffer);
        if (!buffer)
                return ERR_PTR(-ENOMEM);

        INIT_LIST_HEAD(&buffer->attachments);
        mutex_init(&buffer->lock);
        buffer->heap = heap;
        buffer->len = len;

        INIT_LIST_HEAD(&pages);
        i = 0;
        while (size_remaining > 0) {
                /*
                 * Avoid trying to allocate memory if the process
                 * has been killed by SIGKILL
                 */
                if (fatal_signal_pending(current)) {
                        ret = -EINTR;
                        goto free_buffer;
                }

                page = alloc_largest_available(size_remaining, max_order);
                if (!page)
                        goto free_buffer;

                list_add_tail(&page->lru, &pages);
                size_remaining -= page_size(page);
                max_order = compound_order(page);
                i++;
        }

        table = &buffer->sg_table;
        if (sg_alloc_table(table, i, GFP_KERNEL))
                goto free_buffer;

        sg = table->sgl;
        list_for_each_entry_safe(page, tmp_page, &pages, lru) {
                sg_set_page(sg, page, page_size(page), 0);
                sg = sg_next(sg);
                list_del(&page->lru);
        }

        /* create the dmabuf */
        exp_info.exp_name = dma_heap_get_name(heap);
        exp_info.ops = &system_heap_buf_ops;
        exp_info.size = buffer->len;
        exp_info.flags = fd_flags;
        exp_info.priv = buffer;
        dmabuf = dma_buf_export(&exp_info);
        if (IS_ERR(dmabuf)) {
                ret = PTR_ERR(dmabuf);
                goto free_pages;
        }
        return dmabuf;

free_pages:
        for_each_sgtable_sg(table, sg, i) {
                struct page *p = sg_page(sg);

                __free_pages(p, compound_order(p));
        }
        sg_free_table(table);
free_buffer:
        list_for_each_entry_safe(page, tmp_page, &pages, lru)
                __free_pages(page, compound_order(page));
        kfree(buffer);

        return ERR_PTR(ret);
}

static const struct dma_heap_ops system_heap_ops = {
        .allocate = system_heap_allocate,
};

static int __init system_heap_create(void)
{
        struct dma_heap_export_info exp_info;
        struct dma_heap *sys_heap;

        exp_info.name = "system";
        exp_info.ops = &system_heap_ops;
        exp_info.priv = NULL;

        sys_heap = dma_heap_add(&exp_info);
        if (IS_ERR(sys_heap))
                return PTR_ERR(sys_heap);

        return 0;
}
module_init(system_heap_create);