#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include <linux/pagemap.h>
#include <drm/drm_drv.h>
#include <drm/drm_pagemap.h>
struct drm_pagemap_zdd {
struct kref refcount;
struct drm_pagemap_devmem *devmem_allocation;
void *device_private_page_owner;
};
static struct drm_pagemap_zdd *
drm_pagemap_zdd_alloc(void *device_private_page_owner)
{
struct drm_pagemap_zdd *zdd;
zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
if (!zdd)
return NULL;
kref_init(&zdd->refcount);
zdd->devmem_allocation = NULL;
zdd->device_private_page_owner = device_private_page_owner;
return zdd;
}
static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd)
{
kref_get(&zdd->refcount);
return zdd;
}
static void drm_pagemap_zdd_destroy(struct kref *ref)
{
struct drm_pagemap_zdd *zdd =
container_of(ref, struct drm_pagemap_zdd, refcount);
struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
if (devmem) {
complete_all(&devmem->detached);
if (devmem->ops->devmem_release)
devmem->ops->devmem_release(devmem);
}
kfree(zdd);
}
static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd)
{
kref_put(&zdd->refcount, drm_pagemap_zdd_destroy);
}
static void drm_pagemap_migration_unlock_put_page(struct page *page)
{
unlock_page(page);
put_page(page);
}
static void drm_pagemap_migration_unlock_put_pages(unsigned long npages,
unsigned long *migrate_pfn)
{
unsigned long i;
for (i = 0; i < npages; ++i) {
struct page *page;
if (!migrate_pfn[i])
continue;
page = migrate_pfn_to_page(migrate_pfn[i]);
drm_pagemap_migration_unlock_put_page(page);
migrate_pfn[i] = 0;
}
}
static void drm_pagemap_get_devmem_page(struct page *page,
struct drm_pagemap_zdd *zdd)
{
page->zone_device_data = drm_pagemap_zdd_get(zdd);
zone_device_page_init(page);
}
static int drm_pagemap_migrate_map_pages(struct device *dev,
struct drm_pagemap_addr *pagemap_addr,
unsigned long *migrate_pfn,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
dma_addr_t dma_addr;
struct folio *folio;
unsigned int order = 0;
if (!page)
goto next;
if (WARN_ON_ONCE(is_zone_device_page(page)))
return -EFAULT;
folio = page_folio(page);
order = folio_order(folio);
dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
if (dma_mapping_error(dev, dma_addr))
return -EFAULT;
pagemap_addr[i] =
drm_pagemap_addr_encode(dma_addr,
DRM_INTERCONNECT_SYSTEM,
order, dir);
next:
i += NR_PAGES(order);
}
return 0;
}
static void drm_pagemap_migrate_unmap_pages(struct device *dev,
struct drm_pagemap_addr *pagemap_addr,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
for (i = 0; i < npages;) {
if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
goto next;
dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
next:
i += NR_PAGES(pagemap_addr[i].order);
}
}
static unsigned long
npages_in_range(unsigned long start, unsigned long end)
{
return (end - start) >> PAGE_SHIFT;
}
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long timeslice_ms,
void *pgmap_owner)
{
const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
struct migrate_vma migrate = {
.start = start,
.end = end,
.pgmap_owner = pgmap_owner,
.flags = MIGRATE_VMA_SELECT_SYSTEM,
};
unsigned long i, npages = npages_in_range(start, end);
struct vm_area_struct *vas;
struct drm_pagemap_zdd *zdd = NULL;
struct page **pages;
struct drm_pagemap_addr *pagemap_addr;
void *buf;
int err;
mmap_assert_locked(mm);
if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
!ops->copy_to_ram)
return -EOPNOTSUPP;
vas = vma_lookup(mm, start);
if (!vas) {
err = -ENOENT;
goto err_out;
}
if (end > vas->vm_end || start < vas->vm_start) {
err = -EINVAL;
goto err_out;
}
if (!vma_is_anonymous(vas)) {
err = -EBUSY;
goto err_out;
}
buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
zdd = drm_pagemap_zdd_alloc(pgmap_owner);
if (!zdd) {
err = -ENOMEM;
goto err_free;
}
migrate.vma = vas;
migrate.src = buf;
migrate.dst = migrate.src + npages;
err = migrate_vma_setup(&migrate);
if (err)
goto err_free;
if (!migrate.cpages) {
err = -EFAULT;
goto err_free;
}
if (migrate.cpages != npages) {
err = -EBUSY;
goto err_finalize;
}
err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
if (err)
goto err_finalize;
err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
migrate.src, npages, DMA_TO_DEVICE);
if (err)
goto err_finalize;
for (i = 0; i < npages; ++i) {
struct page *page = pfn_to_page(migrate.dst[i]);
pages[i] = page;
migrate.dst[i] = migrate_pfn(migrate.dst[i]);
drm_pagemap_get_devmem_page(page, zdd);
}
err = ops->copy_to_devmem(pages, pagemap_addr, npages,
devmem_allocation->pre_migrate_fence);
if (err)
goto err_finalize;
dma_fence_put(devmem_allocation->pre_migrate_fence);
devmem_allocation->pre_migrate_fence = NULL;
devmem_allocation->timeslice_expiration = get_jiffies_64() +
msecs_to_jiffies(timeslice_ms);
zdd->devmem_allocation = devmem_allocation;
err_finalize:
if (err)
drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_TO_DEVICE);
err_free:
if (zdd)
drm_pagemap_zdd_put(zdd);
kvfree(buf);
err_out:
return err;
}
EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
struct page *fault_page,
unsigned long npages,
unsigned long *mpages,
unsigned long *src_mpfn,
unsigned long *mpfn,
unsigned long addr)
{
unsigned long i;
for (i = 0; i < npages;) {
struct page *page = NULL, *src_page;
struct folio *folio;
unsigned int order = 0;
if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
goto next;
src_page = migrate_pfn_to_page(src_mpfn[i]);
if (!src_page)
goto next;
if (fault_page) {
if (src_page->zone_device_data !=
fault_page->zone_device_data)
goto next;
}
order = folio_order(page_folio(src_page));
if (vas)
folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
else
folio = folio_alloc(GFP_HIGHUSER, order);
if (!folio)
goto free_pages;
page = folio_page(folio, 0);
mpfn[i] = migrate_pfn(page_to_pfn(page));
next:
if (page)
addr += page_size(page);
else
addr += PAGE_SIZE;
i += NR_PAGES(order);
}
for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
unsigned int order = 0;
if (!page)
goto next_lock;
WARN_ON_ONCE(!folio_trylock(page_folio(page)));
order = folio_order(page_folio(page));
*mpages += NR_PAGES(order);
next_lock:
i += NR_PAGES(order);
}
return 0;
free_pages:
for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
unsigned int order = 0;
if (!page)
goto next_put;
put_page(page);
mpfn[i] = 0;
order = folio_order(page_folio(page));
next_put:
i += NR_PAGES(order);
}
return -ENOMEM;
}
int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
{
const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
unsigned long npages, mpages = 0;
struct page **pages;
unsigned long *src, *dst;
struct drm_pagemap_addr *pagemap_addr;
void *buf;
int i, err = 0;
unsigned int retry_count = 2;
npages = devmem_allocation->size >> PAGE_SHIFT;
retry:
if (!mmget_not_zero(devmem_allocation->mm))
return -EFAULT;
buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
src = buf;
dst = buf + (sizeof(*src) * npages);
pagemap_addr = buf + (2 * sizeof(*src) * npages);
pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
if (err)
goto err_free;
err = migrate_device_pfns(src, npages);
if (err)
goto err_free;
err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
src, dst, 0);
if (err || !mpages)
goto err_finalize;
err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
dst, npages, DMA_FROM_DEVICE);
if (err)
goto err_finalize;
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
err_finalize:
if (err)
drm_pagemap_migration_unlock_put_pages(npages, dst);
migrate_device_pages(src, dst, npages);
migrate_device_finalize(src, dst, npages);
drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
err_out:
mmput_async(devmem_allocation->mm);
if (completion_done(&devmem_allocation->detached))
return 0;
if (retry_count--) {
cond_resched();
goto retry;
}
return err ?: -EBUSY;
}
EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
void *device_private_page_owner,
struct page *page,
unsigned long fault_addr,
unsigned long size)
{
struct migrate_vma migrate = {
.vma = vas,
.pgmap_owner = device_private_page_owner,
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
MIGRATE_VMA_SELECT_DEVICE_COHERENT,
.fault_page = page,
};
struct drm_pagemap_zdd *zdd;
const struct drm_pagemap_devmem_ops *ops;
struct device *dev = NULL;
unsigned long npages, mpages = 0;
struct page **pages;
struct drm_pagemap_addr *pagemap_addr;
unsigned long start, end;
void *buf;
int i, err = 0;
if (page) {
zdd = page->zone_device_data;
if (time_before64(get_jiffies_64(),
zdd->devmem_allocation->timeslice_expiration))
return 0;
}
start = ALIGN_DOWN(fault_addr, size);
end = ALIGN(fault_addr + 1, size);
if (start < vas->vm_start)
start = vas->vm_start;
if (end > vas->vm_end)
end = vas->vm_end;
migrate.start = start;
migrate.end = end;
npages = npages_in_range(start, end);
buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
migrate.vma = vas;
migrate.src = buf;
migrate.dst = migrate.src + npages;
err = migrate_vma_setup(&migrate);
if (err)
goto err_free;
if (!migrate.cpages)
goto err_free;
if (!page) {
for (i = 0; i < npages; ++i) {
if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
continue;
page = migrate_pfn_to_page(migrate.src[i]);
break;
}
if (!page)
goto err_finalize;
}
zdd = page->zone_device_data;
ops = zdd->devmem_allocation->ops;
dev = zdd->devmem_allocation->dev;
err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages,
migrate.src, migrate.dst,
start);
if (err)
goto err_finalize;
err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate.dst, npages,
DMA_FROM_DEVICE);
if (err)
goto err_finalize;
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
err_finalize:
if (err)
drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
if (dev)
drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
err_out:
return err;
}
static void drm_pagemap_page_free(struct page *page)
{
drm_pagemap_zdd_put(page->zone_device_data);
}
static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
{
struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
int err;
err = __drm_pagemap_migrate_to_ram(vmf->vma,
zdd->device_private_page_owner,
vmf->page, vmf->address,
zdd->devmem_allocation->size);
return err ? VM_FAULT_SIGBUS : 0;
}
static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
.page_free = drm_pagemap_page_free,
.migrate_to_ram = drm_pagemap_migrate_to_ram,
};
const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void)
{
return &drm_pagemap_pagemap_ops;
}
EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
struct drm_pagemap *dpagemap, size_t size,
struct dma_fence *pre_migrate_fence)
{
init_completion(&devmem_allocation->detached);
devmem_allocation->dev = dev;
devmem_allocation->mm = mm;
devmem_allocation->ops = ops;
devmem_allocation->dpagemap = dpagemap;
devmem_allocation->size = size;
devmem_allocation->pre_migrate_fence = pre_migrate_fence;
}
EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
{
struct drm_pagemap_zdd *zdd = page->zone_device_data;
return zdd->devmem_allocation->dpagemap;
}
EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms)
{
int err;
if (!mmget_not_zero(mm))
return -EFAULT;
mmap_read_lock(mm);
err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
timeslice_ms);
mmap_read_unlock(mm);
mmput(mm);
return err;
}
EXPORT_SYMBOL(drm_pagemap_populate_mm);