#include <linux/export.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/sched/mm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
#ifdef CONFIG_FAULT_INJECTION
#include <linux/fault-inject.h>
static DECLARE_FAULT_ATTR(backup_fault_inject);
#else
#define should_fail(...) false
#endif
struct ttm_pool_dma {
dma_addr_t addr;
unsigned long vaddr;
bus_dma_tag_t dmat;
bus_dmamap_t map;
bus_dma_segment_t seg;
};
struct ttm_pool_alloc_state {
struct vm_page **pages;
struct vm_page **caching_divide;
dma_addr_t *dma_addr;
pgoff_t remaining_pages;
enum ttm_caching tt_caching;
unsigned int *orders;
};
struct ttm_pool_tt_restore {
struct ttm_pool *pool;
struct ttm_pool_alloc_state snapshot_alloc;
struct vm_page *alloced_page;
dma_addr_t first_dma;
pgoff_t alloced_pages;
pgoff_t restored_pages;
enum ttm_caching page_caching;
unsigned int order;
};
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker *mm_shrinker;
static DECLARE_RWSEM(pool_shrink_rwsem);
#ifdef __linux__
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
unsigned int order)
{
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
struct ttm_pool_dma *dma;
struct page *p;
void *vaddr;
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_THISNODE;
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
if (p)
p->private = order;
return p;
}
dma = kmalloc(sizeof(*dma), GFP_KERNEL);
if (!dma)
return NULL;
if (order)
attr |= DMA_ATTR_NO_WARN;
vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
&dma->addr, gfp_flags, attr);
if (!vaddr)
goto error_free;
if (is_vmalloc_addr(vaddr))
p = vmalloc_to_page(vaddr);
else
p = virt_to_page(vaddr);
dma->vaddr = (unsigned long)vaddr | order;
p->private = (unsigned long)dma;
return p;
error_free:
kfree(dma);
return NULL;
}
static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
unsigned int order, struct page *p)
{
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
struct ttm_pool_dma *dma;
void *vaddr;
#ifdef CONFIG_X86
if (caching != ttm_cached && !PageHighMem(p))
set_pages_wb(p, 1 << order);
#endif
if (!pool || !pool->use_dma_alloc) {
__free_pages(p, order);
return;
}
if (order)
attr |= DMA_ATTR_NO_WARN;
dma = (void *)p->private;
vaddr = (void *)(dma->vaddr & PAGE_MASK);
dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
attr);
kfree(dma);
}
#else
static struct vm_page *ttm_pool_alloc_page(struct ttm_pool *pool,
gfp_t gfp_flags, unsigned int order,
bus_dma_tag_t dmat)
{
struct ttm_pool_dma *dma;
struct vm_page *p;
struct uvm_constraint_range *constraint = &no_constraint;
int flags = (gfp_flags & M_NOWAIT) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
int dmaflags = BUS_DMA_64BIT;
int nsegs;
if (pool->use_dma32) {
constraint = &dma_constraint;
dmaflags &= ~BUS_DMA_64BIT;
}
dma = kmalloc(sizeof(*dma), GFP_KERNEL);
if (!dma)
return NULL;
if (bus_dmamap_create(dmat, (1ULL << order) * PAGE_SIZE, 1,
(1ULL << order) * PAGE_SIZE, 0, flags | dmaflags, &dma->map))
goto error_free;
#ifdef bus_dmamem_alloc_range
if (bus_dmamem_alloc_range(dmat, (1ULL << order) * PAGE_SIZE,
PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO,
constraint->ucr_low, constraint->ucr_high)) {
bus_dmamap_destroy(dmat, dma->map);
goto error_free;
}
#else
if (bus_dmamem_alloc(dmat, (1ULL << order) * PAGE_SIZE,
PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO)) {
bus_dmamap_destroy(dmat, dma->map);
goto error_free;
}
#endif
if (bus_dmamap_load_raw(dmat, dma->map, &dma->seg, 1,
(1ULL << order) * PAGE_SIZE, flags)) {
bus_dmamem_free(dmat, &dma->seg, 1);
bus_dmamap_destroy(dmat, dma->map);
goto error_free;
}
dma->dmat = dmat;
dma->addr = dma->map->dm_segs[0].ds_addr;
#ifndef __sparc64__
p = PHYS_TO_VM_PAGE(dma->seg.ds_addr);
#else
p = TAILQ_FIRST((struct pglist *)dma->seg._ds_mlist);
#endif
p->objt.rbt_parent = (struct rb_entry *)dma;
return p;
error_free:
kfree(dma);
return NULL;
}
static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
unsigned int order, struct vm_page *p)
{
struct ttm_pool_dma *dma;
#ifdef CONFIG_X86
if (caching != ttm_cached && !PageHighMem(p))
set_pages_wb(p, 1 << order);
#endif
dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
bus_dmamap_unload(dma->dmat, dma->map);
bus_dmamem_free(dma->dmat, &dma->seg, 1);
bus_dmamap_destroy(dma->dmat, dma->map);
kfree(dma);
}
#endif
static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
{
#ifdef CONFIG_X86
unsigned int num_pages = alloc->pages - alloc->caching_divide;
if (!num_pages)
return 0;
switch (alloc->tt_caching) {
case ttm_cached:
break;
case ttm_write_combined:
return set_pages_array_wc(alloc->caching_divide, num_pages);
case ttm_uncached:
return set_pages_array_uc(alloc->caching_divide, num_pages);
}
#endif
alloc->caching_divide = alloc->pages;
return 0;
}
#ifdef __linux__
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
struct vm_page *p, dma_addr_t *dma_addr)
{
dma_addr_t addr;
if (pool->use_dma_alloc) {
struct ttm_pool_dma *dma = (void *)p->private;
addr = dma->addr;
} else {
size_t size = (1ULL << order) * PAGE_SIZE;
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(pool->dev, addr))
return -EFAULT;
}
*dma_addr = addr;
return 0;
}
static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
unsigned int num_pages)
{
if (pool->use_dma_alloc)
return;
dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
DMA_BIDIRECTIONAL);
}
#else
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
struct vm_page *p, dma_addr_t *dma_addr)
{
struct ttm_pool_dma *dma;
dma_addr_t addr;
unsigned int i;
dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
addr = dma->addr;
*dma_addr = addr;
return 0;
}
static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
unsigned int num_pages)
{
}
#endif
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct vm_page *p)
{
unsigned int i, num_pages = 1 << pt->order;
struct ttm_pool_type_lru *entry;
for (i = 0; i < num_pages; ++i) {
#ifdef notyet
if (PageHighMem(p))
clear_highpage(p + i);
else
#endif
pmap_zero_page(p + i);
}
entry = malloc(sizeof(struct ttm_pool_type_lru), M_DRM, M_WAITOK);
entry->pg = p;
spin_lock(&pt->lock);
LIST_INSERT_HEAD(&pt->lru, entry, entries);
spin_unlock(&pt->lock);
atomic_long_add(1 << pt->order, &allocated_pages);
}
static struct vm_page *ttm_pool_type_take(struct ttm_pool_type *pt)
{
struct vm_page *p = NULL;
struct ttm_pool_type_lru *entry;
spin_lock(&pt->lock);
if (!LIST_EMPTY(&pt->lru)) {
entry = LIST_FIRST(&pt->lru);
p = entry->pg;
atomic_long_sub(1 << pt->order, &allocated_pages);
LIST_REMOVE(entry, entries);
free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
}
spin_unlock(&pt->lock);
return p;
}
static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
enum ttm_caching caching, unsigned int order)
{
pt->pool = pool;
pt->caching = caching;
pt->order = order;
mtx_init(&pt->lock, IPL_NONE);
INIT_LIST_HEAD(&pt->pages);
LIST_INIT(&pt->lru);
spin_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
spin_unlock(&shrinker_lock);
}
static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct vm_page *p;
struct ttm_pool_type_lru *entry;
spin_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
while (!LIST_EMPTY(&pt->lru)) {
entry = LIST_FIRST(&pt->lru);
LIST_REMOVE(entry, entries);
free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
}
}
static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_uncached[order];
return &global_uncached[order];
default:
break;
}
#endif
return NULL;
}
static unsigned int ttm_pool_shrink(void)
{
struct ttm_pool_type *pt;
unsigned int num_pages;
struct vm_page *p;
down_read(&pool_shrink_rwsem);
spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
list_move_tail(&pt->shrinker_list, &shrinker_list);
spin_unlock(&shrinker_lock);
p = ttm_pool_type_take(pt);
if (p) {
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
num_pages = 1 << pt->order;
} else {
num_pages = 0;
}
up_read(&pool_shrink_rwsem);
return num_pages;
}
#ifdef notyet
static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct vm_page *p)
{
if (pool->use_dma_alloc) {
struct ttm_pool_dma *dma = (void *)p->private;
return dma->vaddr & ~LINUX_PAGE_MASK;
}
return p->private;
}
#endif
static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct vm_page *p)
{
STUB();
#ifdef notyet
unsigned int order = ttm_pool_page_order(pool, p);
pgoff_t nr;
if (!order)
return;
split_page(p, order);
nr = 1UL << order;
while (nr--)
(p++)->private = 0;
#endif
}
static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore)
{
return restore && restore->restored_pages < (1 << restore->order);
}
static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct vm_page *page,
const dma_addr_t *dma_addr, enum ttm_caching caching,
unsigned int tt_order)
{
struct ttm_pool_type *pt = NULL;
unsigned int order;
pgoff_t nr;
if (pool) {
#ifdef __linux__
order = ttm_pool_page_order(pool, page);
#else
order = tt_order;
#endif
nr = (1UL << order);
if (dma_addr)
ttm_pool_unmap(pool, *dma_addr, nr);
pt = ttm_pool_select_type(pool, caching, order);
} else {
#ifdef __linux__
order = page->private;
#else
order = tt_order;
#endif
nr = (1UL << order);
}
if (pt)
ttm_pool_type_give(pt, page);
else
ttm_pool_free_page(pool, caching, order, page);
return nr;
}
static void ttm_pool_allocated_page_commit(struct vm_page *allocated,
dma_addr_t first_dma,
struct ttm_pool_alloc_state *alloc,
pgoff_t nr)
{
pgoff_t i;
unsigned int order = order_base_2(nr);
for (i = 0; i < nr; ++i) {
*alloc->pages++ = allocated++;
*alloc->orders++ = order;
}
alloc->remaining_pages -= nr;
if (!alloc->dma_addr)
return;
for (i = 0; i < nr; ++i) {
*alloc->dma_addr++ = first_dma;
first_dma += PAGE_SIZE;
}
}
static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
struct file *backup,
const struct ttm_operation_ctx *ctx,
struct ttm_pool_alloc_state *alloc)
{
STUB();
return -ENOSYS;
#ifdef notyet
pgoff_t i, nr = 1UL << restore->order;
struct vm_page **first_page = alloc->pages;
struct vm_page *p;
int ret = 0;
for (i = restore->restored_pages; i < nr; ++i) {
p = first_page[i];
if (ttm_backup_page_ptr_is_handle(p)) {
unsigned long handle = ttm_backup_page_ptr_to_handle(p);
if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible &&
should_fail(&backup_fault_inject, 1)) {
ret = -EINTR;
break;
}
if (handle == 0) {
restore->restored_pages++;
continue;
}
ret = ttm_backup_copy_page(backup, restore->alloced_page + i,
handle, ctx->interruptible);
if (ret)
break;
ttm_backup_drop(backup, handle);
} else if (p) {
ttm_pool_split_for_swap(restore->pool, p);
copy_highpage(restore->alloced_page + i, p);
__free_pages(p, 0);
}
restore->restored_pages++;
first_page[i] = ttm_backup_handle_to_page_ptr(0);
}
if (ret) {
if (!restore->restored_pages) {
dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL;
ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
dma_addr, restore->page_caching);
restore->restored_pages = nr;
}
return ret;
}
ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma,
alloc, nr);
if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page))
alloc->caching_divide = alloc->pages;
restore->snapshot_alloc = *alloc;
restore->alloced_pages += nr;
return 0;
#endif
}
static void
ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order,
struct vm_page *p,
enum ttm_caching page_caching,
dma_addr_t first_dma,
struct ttm_pool_tt_restore *restore,
const struct ttm_pool_alloc_state *alloc)
{
restore->pool = pool;
restore->order = order;
restore->restored_pages = 0;
restore->page_caching = page_caching;
restore->first_dma = first_dma;
restore->alloced_page = p;
restore->snapshot_alloc = *alloc;
}
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
struct vm_page *p, enum ttm_caching page_caching,
struct ttm_pool_alloc_state *alloc,
struct ttm_pool_tt_restore *restore)
{
bool caching_consistent;
dma_addr_t first_dma;
int r = 0;
caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
if (caching_consistent) {
r = ttm_pool_apply_caching(alloc);
if (r)
return r;
}
if (alloc->dma_addr) {
r = ttm_pool_map(pool, order, p, &first_dma);
if (r)
return r;
}
if (restore) {
ttm_pool_page_allocated_restore(pool, order, p, page_caching,
first_dma, restore, alloc);
} else {
ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order);
if (caching_consistent)
alloc->caching_divide = alloc->pages;
}
return 0;
}
static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
struct vm_page **pages = &tt->pages[start_page];
struct file *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
struct vm_page *p = *pages;
nr = 1;
if (ttm_backup_page_ptr_is_handle(p)) {
unsigned long handle = ttm_backup_page_ptr_to_handle(p);
if (handle != 0)
ttm_backup_drop(backup, handle);
} else if (p) {
dma_addr_t *dma_addr = tt->dma_address ?
tt->dma_address + i : NULL;
nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching, tt->orders[i]);
}
}
}
static void ttm_pool_alloc_state_init(const struct ttm_tt *tt,
struct ttm_pool_alloc_state *alloc)
{
alloc->pages = tt->pages;
alloc->caching_divide = tt->pages;
alloc->dma_addr = tt->dma_address;
alloc->remaining_pages = tt->num_pages;
alloc->tt_caching = tt->caching;
alloc->orders = tt->orders;
}
static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
const struct ttm_pool_alloc_state *alloc)
{
return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
}
static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_operation_ctx *ctx,
struct ttm_pool_alloc_state *alloc,
struct ttm_pool_tt_restore *restore)
{
enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
pgoff_t caching_divide;
unsigned int order;
bool allow_pools;
struct vm_page *p;
int r;
unsigned int *orders = tt->orders;
WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt));
#ifdef __linux__
WARN_ON(alloc->dma_addr && !pool->dev);
#endif
if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (ctx->gfp_retry_mayfail)
gfp_flags |= __GFP_RETRY_MAYFAIL;
if (pool->use_dma32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
page_caching = tt->caching;
allow_pools = true;
for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc);
alloc->remaining_pages;
order = ttm_pool_alloc_find_order(order, alloc)) {
struct ttm_pool_type *pt;
p = NULL;
pt = ttm_pool_select_type(pool, page_caching, order);
if (pt && allow_pools)
p = ttm_pool_type_take(pt);
if (!p) {
page_caching = ttm_cached;
allow_pools = false;
p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat);
}
if (!p) {
if (order) {
--order;
page_caching = tt->caching;
allow_pools = true;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc,
restore);
if (r)
goto error_free_page;
if (ttm_pool_restore_valid(restore)) {
r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc);
if (r)
goto error_free_all;
}
}
r = ttm_pool_apply_caching(alloc);
if (r)
goto error_free_all;
kfree(tt->restore);
tt->restore = NULL;
return 0;
error_free_page:
ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
if (tt->restore)
return r;
caching_divide = alloc->caching_divide - tt->pages;
ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
tt->num_pages - alloc->remaining_pages);
return r;
}
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
struct ttm_pool_alloc_state alloc;
if (WARN_ON(ttm_tt_is_backed_up(tt)))
return -EINVAL;
ttm_pool_alloc_state_init(tt, &alloc);
return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL);
}
EXPORT_SYMBOL(ttm_pool_alloc);
int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_operation_ctx *ctx)
{
struct ttm_pool_alloc_state alloc;
if (WARN_ON(!ttm_tt_is_backed_up(tt)))
return -EINVAL;
if (!tt->restore) {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
ttm_pool_alloc_state_init(tt, &alloc);
if (ctx->gfp_retry_mayfail)
gfp |= __GFP_RETRY_MAYFAIL;
tt->restore = kzalloc(sizeof(*tt->restore), gfp);
if (!tt->restore)
return -ENOMEM;
tt->restore->snapshot_alloc = alloc;
tt->restore->pool = pool;
tt->restore->restored_pages = 1;
} else {
struct ttm_pool_tt_restore *restore = tt->restore;
int ret;
alloc = restore->snapshot_alloc;
if (ttm_pool_restore_valid(tt->restore)) {
ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
if (ret)
return ret;
}
if (!alloc.remaining_pages)
return 0;
}
return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
}
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
{
ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
while (atomic_long_read(&allocated_pages) > page_pool_size)
ttm_pool_shrink();
}
EXPORT_SYMBOL(ttm_pool_free);
void ttm_pool_drop_backed_up(struct ttm_tt *tt)
{
struct ttm_pool_tt_restore *restore;
pgoff_t start_page = 0;
WARN_ON(!ttm_tt_is_backed_up(tt));
restore = tt->restore;
if (ttm_pool_restore_valid(restore)) {
dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL;
ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
dma_addr, restore->page_caching, restore->order);
restore->restored_pages = 1UL << restore->order;
}
if (restore) {
pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages;
start_page = restore->alloced_pages;
WARN_ON(mid > start_page);
ttm_pool_free_range(restore->pool, tt, tt->caching,
0, mid);
ttm_pool_free_range(restore->pool, tt, ttm_cached,
mid, restore->alloced_pages);
kfree(restore);
tt->restore = NULL;
}
ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages);
}
long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_backup_flags *flags)
{
struct file *backup = tt->backup;
struct vm_page *page;
unsigned long handle;
gfp_t alloc_gfp;
gfp_t gfp;
int ret = 0;
pgoff_t shrunken = 0;
pgoff_t i, num_pages;
if (WARN_ON(ttm_tt_is_backed_up(tt)))
return -EINVAL;
if ((!ttm_backup_bytes_avail() && !flags->purge) ||
pool->use_dma_alloc || ttm_tt_is_backed_up(tt))
return -EBUSY;
#ifdef CONFIG_X86
if (tt->caching != ttm_cached)
set_pages_array_wb(tt->pages, tt->num_pages);
#endif
if (tt->dma_address || flags->purge) {
for (i = 0; i < tt->num_pages; i += num_pages) {
unsigned int order;
page = tt->pages[i];
if (unlikely(!page)) {
num_pages = 1;
continue;
}
#ifdef __linux__
order = ttm_pool_page_order(pool, page);
#else
order = tt->orders[i];
#endif
num_pages = 1UL << order;
if (tt->dma_address)
ttm_pool_unmap(pool, tt->dma_address[i],
num_pages);
if (flags->purge) {
shrunken += num_pages;
#ifdef __linux__
page->private = 0;
#endif
__free_pages(page, order);
memset(tt->pages + i, 0,
num_pages * sizeof(*tt->pages));
}
}
}
if (flags->purge)
return shrunken;
if (pool->use_dma32)
gfp = GFP_DMA32;
else
gfp = GFP_HIGHUSER;
alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL;
num_pages = tt->num_pages;
if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1))
num_pages = DIV_ROUND_UP(num_pages, 2);
for (i = 0; i < num_pages; ++i) {
s64 shandle;
page = tt->pages[i];
if (unlikely(!page))
continue;
ttm_pool_split_for_swap(pool, page);
shandle = ttm_backup_backup_page(backup, page, flags->writeback, i,
gfp, alloc_gfp);
if (shandle < 0) {
ret = shandle;
break;
}
handle = shandle;
tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
#ifdef notyet
put_page(page);
#else
STUB();
#endif
shrunken++;
}
return shrunken ? shrunken : ret;
}
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
int nid, bool use_dma_alloc, bool use_dma32)
{
unsigned int i, j;
WARN_ON(!dev && use_dma_alloc);
pool->dev = dev;
pool->nid = nid;
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_init(pt, pool, i, j);
}
}
}
EXPORT_SYMBOL(ttm_pool_init);
static void ttm_pool_synchronize_shrinkers(void)
{
down_write(&pool_shrink_rwsem);
up_write(&pool_shrink_rwsem);
}
void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_fini(pt);
}
}
ttm_pool_synchronize_shrinkers();
}
EXPORT_SYMBOL(ttm_pool_fini);
#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
unsigned long num_freed = 0;
do
num_freed += ttm_pool_shrink();
while (num_freed < sc->nr_to_scan &&
atomic_long_read(&allocated_pages));
sc->nr_scanned = num_freed;
return num_freed ?: SHRINK_STOP;
}
static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
struct shrink_control *sc)
{
#ifdef notyet
unsigned long num_pages = atomic_long_read(&allocated_pages);
return num_pages ? num_pages : SHRINK_EMPTY;
#else
STUB();
unsigned long num_pages = atomic_long_read(&allocated_pages);
return num_pages ? num_pages : 0;
#endif
}
#ifdef CONFIG_DEBUG_FS
static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
{
unsigned int count = 0;
struct ttm_pool_type_lru *entry;
spin_lock(&pt->lock);
LIST_FOREACH(entry, &pt->lru, entries)
++count;
spin_unlock(&pt->lock);
return count;
}
static void ttm_pool_debugfs_header(struct seq_file *m)
{
unsigned int i;
seq_puts(m, "\t ");
for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
struct seq_file *m)
{
unsigned int i;
for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
static void ttm_pool_debugfs_footer(struct seq_file *m)
{
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
atomic_long_read(&allocated_pages), page_pool_size);
}
static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
{
ttm_pool_debugfs_header(m);
spin_lock(&shrinker_lock);
seq_puts(m, "wc\t:");
ttm_pool_debugfs_orders(global_write_combined, m);
seq_puts(m, "uc\t:");
ttm_pool_debugfs_orders(global_uncached, m);
seq_puts(m, "wc 32\t:");
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
seq_puts(m, "uc 32\t:");
ttm_pool_debugfs_orders(global_dma32_uncached, m);
spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
seq_puts(m, "unused\n");
return 0;
}
ttm_pool_debugfs_header(m);
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
if (!ttm_pool_select_type(pool, i, 0))
continue;
if (pool->use_dma_alloc)
seq_puts(m, "DMA ");
else
seq_printf(m, "N%d ", pool->nid);
switch (i) {
case ttm_cached:
seq_puts(m, "\t:");
break;
case ttm_write_combined:
seq_puts(m, "wc\t:");
break;
case ttm_uncached:
seq_puts(m, "uc\t:");
break;
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
}
EXPORT_SYMBOL(ttm_pool_debugfs);
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
struct shrink_control sc = {
.gfp_mask = GFP_NOFS,
.nr_to_scan = TTM_SHRINKER_BATCH,
};
unsigned long count;
fs_reclaim_acquire(GFP_KERNEL);
count = ttm_pool_shrinker_count(mm_shrinker, &sc);
seq_printf(m, "%lu/%lu\n", count,
ttm_pool_shrinker_scan(mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
#endif
int ttm_pool_mgr_init(unsigned long num_pages)
{
unsigned int i;
if (!page_pool_size)
page_pool_size = num_pages;
mtx_init(&shrinker_lock, IPL_NONE);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_dma32_uncached[i], NULL,
ttm_uncached, i);
}
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_globals_fops);
debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_shrink_fops);
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root,
&backup_fault_inject);
#endif
#endif
mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
if (!mm_shrinker)
return -ENOMEM;
mm_shrinker->count_objects = ttm_pool_shrinker_count;
mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
mm_shrinker->batch = TTM_SHRINKER_BATCH;
mm_shrinker->seeks = 1;
shrinker_register(mm_shrinker);
return 0;
}
void ttm_pool_mgr_fini(void)
{
unsigned int i;
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
ttm_pool_type_fini(&global_dma32_write_combined[i]);
ttm_pool_type_fini(&global_dma32_uncached[i]);
}
shrinker_free(mm_shrinker);
WARN_ON(!list_empty(&shrinker_list));
}