#include <linux/slab.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_pagemap.h>
#include <drm/drm_pagemap_util.h>
#include <drm/drm_print.h>
struct drm_pagemap_cache {
struct mutex lookup_mutex;
spinlock_t lock;
struct drm_pagemap_shrinker *shrinker;
struct drm_pagemap *dpagemap;
struct completion queued;
};
struct drm_pagemap_shrinker {
struct drm_device *drm;
spinlock_t lock;
struct list_head dpagemaps;
atomic_t num_dpagemaps;
struct shrinker *shrink;
};
static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap);
static void drm_pagemap_cache_fini(void *arg)
{
struct drm_pagemap_cache *cache = arg;
struct drm_pagemap *dpagemap;
drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
spin_lock(&cache->lock);
dpagemap = cache->dpagemap;
cache->dpagemap = NULL;
if (dpagemap && !drm_pagemap_shrinker_cancel(dpagemap))
dpagemap = NULL;
spin_unlock(&cache->lock);
if (dpagemap)
drm_pagemap_destroy(dpagemap, false);
mutex_destroy(&cache->lookup_mutex);
kfree(cache);
}
struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker)
{
struct drm_pagemap_cache *cache = kzalloc_obj(*cache);
int err;
if (!cache)
return ERR_PTR(-ENOMEM);
mutex_init(&cache->lookup_mutex);
spin_lock_init(&cache->lock);
cache->shrinker = shrinker;
init_completion(&cache->queued);
err = devm_add_action_or_reset(shrinker->drm->dev, drm_pagemap_cache_fini, cache);
if (err)
return ERR_PTR(err);
return cache;
}
EXPORT_SYMBOL(drm_pagemap_cache_create_devm);
int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache)
{
return mutex_lock_interruptible(&cache->lookup_mutex);
}
EXPORT_SYMBOL(drm_pagemap_cache_lock_lookup);
void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache)
{
mutex_unlock(&cache->lookup_mutex);
}
EXPORT_SYMBOL(drm_pagemap_cache_unlock_lookup);
struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache)
{
struct drm_pagemap *dpagemap;
int err;
lockdep_assert_held(&cache->lookup_mutex);
retry:
spin_lock(&cache->lock);
dpagemap = cache->dpagemap;
if (drm_pagemap_get_unless_zero(dpagemap)) {
spin_unlock(&cache->lock);
return dpagemap;
}
if (!dpagemap) {
spin_unlock(&cache->lock);
return NULL;
}
if (!try_wait_for_completion(&cache->queued)) {
spin_unlock(&cache->lock);
err = wait_for_completion_interruptible(&cache->queued);
if (err)
return ERR_PTR(err);
goto retry;
}
if (drm_pagemap_shrinker_cancel(dpagemap)) {
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
err = drm_pagemap_reinit(dpagemap);
if (err) {
drm_pagemap_destroy(dpagemap, false);
return ERR_PTR(err);
}
drm_pagemap_cache_set_pagemap(cache, dpagemap);
} else {
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
dpagemap = NULL;
}
return dpagemap;
}
EXPORT_SYMBOL(drm_pagemap_get_from_cache);
void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap)
{
struct drm_device *drm = dpagemap->drm;
lockdep_assert_held(&cache->lookup_mutex);
spin_lock(&cache->lock);
dpagemap->cache = cache;
swap(cache->dpagemap, dpagemap);
reinit_completion(&cache->queued);
spin_unlock(&cache->lock);
drm_WARN_ON(drm, !!dpagemap);
}
EXPORT_SYMBOL(drm_pagemap_cache_set_pagemap);
struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache)
{
struct drm_pagemap *dpagemap;
spin_lock(&cache->lock);
dpagemap = drm_pagemap_get_unless_zero(cache->dpagemap);
spin_unlock(&cache->lock);
return dpagemap;
}
EXPORT_SYMBOL(drm_pagemap_get_from_cache_if_active);
static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap)
{
struct drm_pagemap_cache *cache = dpagemap->cache;
struct drm_pagemap_shrinker *shrinker = cache->shrinker;
spin_lock(&shrinker->lock);
if (list_empty(&dpagemap->shrink_link)) {
spin_unlock(&shrinker->lock);
return false;
}
list_del_init(&dpagemap->shrink_link);
atomic_dec(&shrinker->num_dpagemaps);
spin_unlock(&shrinker->lock);
return true;
}
#ifdef CONFIG_PROVE_LOCKING
void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
{
int idx;
if (drm_dev_enter(dpagemap->drm, &idx)) {
struct drm_pagemap_cache *cache = dpagemap->cache;
if (cache)
might_lock(&cache->shrinker->lock);
drm_dev_exit(idx);
}
}
#endif
void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap)
{
struct drm_pagemap_cache *cache;
struct drm_pagemap_shrinker *shrinker;
int idx;
if (!drm_dev_enter(dpagemap->drm, &idx))
goto out_no_cache;
cache = dpagemap->cache;
if (!cache) {
drm_dev_exit(idx);
goto out_no_cache;
}
shrinker = cache->shrinker;
spin_lock(&shrinker->lock);
list_add_tail(&dpagemap->shrink_link, &shrinker->dpagemaps);
atomic_inc(&shrinker->num_dpagemaps);
spin_unlock(&shrinker->lock);
complete_all(&cache->queued);
drm_dev_exit(idx);
return;
out_no_cache:
drm_pagemap_destroy(dpagemap, true);
}
static unsigned long
drm_pagemap_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct drm_pagemap_shrinker *shrinker = shrink->private_data;
unsigned long count = atomic_read(&shrinker->num_dpagemaps);
return count ? : SHRINK_EMPTY;
}
static unsigned long
drm_pagemap_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct drm_pagemap_shrinker *shrinker = shrink->private_data;
struct drm_pagemap *dpagemap;
struct drm_pagemap_cache *cache;
unsigned long nr_freed = 0;
sc->nr_scanned = 0;
spin_lock(&shrinker->lock);
do {
dpagemap = list_first_entry_or_null(&shrinker->dpagemaps, typeof(*dpagemap),
shrink_link);
if (!dpagemap)
break;
atomic_dec(&shrinker->num_dpagemaps);
list_del_init(&dpagemap->shrink_link);
spin_unlock(&shrinker->lock);
sc->nr_scanned++;
nr_freed++;
cache = dpagemap->cache;
spin_lock(&cache->lock);
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
drm_dbg(dpagemap->drm, "Shrinking dpagemap %p.\n", dpagemap);
drm_pagemap_destroy(dpagemap, true);
spin_lock(&shrinker->lock);
} while (sc->nr_scanned < sc->nr_to_scan);
spin_unlock(&shrinker->lock);
return sc->nr_scanned ? nr_freed : SHRINK_STOP;
}
static void drm_pagemap_shrinker_fini(void *arg)
{
struct drm_pagemap_shrinker *shrinker = arg;
drm_dbg(shrinker->drm, "Destroying dpagemap shrinker.\n");
drm_WARN_ON(shrinker->drm, !!atomic_read(&shrinker->num_dpagemaps));
shrinker_free(shrinker->shrink);
kfree(shrinker);
}
struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm)
{
struct drm_pagemap_shrinker *shrinker;
struct shrinker *shrink;
int err;
shrinker = kzalloc_obj(*shrinker);
if (!shrinker)
return ERR_PTR(-ENOMEM);
shrink = shrinker_alloc(0, "drm-drm_pagemap:%s", drm->unique);
if (!shrink) {
kfree(shrinker);
return ERR_PTR(-ENOMEM);
}
spin_lock_init(&shrinker->lock);
INIT_LIST_HEAD(&shrinker->dpagemaps);
shrinker->drm = drm;
shrinker->shrink = shrink;
shrink->count_objects = drm_pagemap_shrinker_count;
shrink->scan_objects = drm_pagemap_shrinker_scan;
shrink->private_data = shrinker;
shrinker_register(shrink);
err = devm_add_action_or_reset(drm->dev, drm_pagemap_shrinker_fini, shrinker);
if (err)
return ERR_PTR(err);
return shrinker;
}
EXPORT_SYMBOL(drm_pagemap_shrinker_create_devm);
struct drm_pagemap_owner {
struct kref kref;
};
static void drm_pagemap_owner_release(struct kref *kref)
{
kfree(container_of(kref, struct drm_pagemap_owner, kref));
}
void drm_pagemap_release_owner(struct drm_pagemap_peer *peer)
{
struct drm_pagemap_owner_list *owner_list = peer->list;
if (!owner_list)
return;
mutex_lock(&owner_list->lock);
list_del(&peer->link);
kref_put(&peer->owner->kref, drm_pagemap_owner_release);
peer->owner = NULL;
mutex_unlock(&owner_list->lock);
}
EXPORT_SYMBOL(drm_pagemap_release_owner);
typedef bool (*interconnect_fn)(struct drm_pagemap_peer *peer1, struct drm_pagemap_peer *peer2);
int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
struct drm_pagemap_owner_list *owner_list,
interconnect_fn has_interconnect)
{
struct drm_pagemap_peer *cur_peer;
struct drm_pagemap_owner *owner = NULL;
bool interconnect = false;
mutex_lock(&owner_list->lock);
might_alloc(GFP_KERNEL);
list_for_each_entry(cur_peer, &owner_list->peers, link) {
if (cur_peer->owner != owner) {
if (owner && interconnect)
break;
owner = cur_peer->owner;
interconnect = true;
}
if (interconnect && !has_interconnect(peer, cur_peer))
interconnect = false;
}
if (!interconnect) {
owner = kmalloc_obj(*owner);
if (!owner) {
mutex_unlock(&owner_list->lock);
return -ENOMEM;
}
kref_init(&owner->kref);
list_add_tail(&peer->link, &owner_list->peers);
} else {
kref_get(&owner->kref);
list_add_tail(&peer->link, &cur_peer->link);
}
peer->owner = owner;
peer->list = owner_list;
mutex_unlock(&owner_list->lock);
return 0;
}
EXPORT_SYMBOL(drm_pagemap_acquire_owner);