#include "amdgpu_ids.h"
#include <linux/idr.h>
#include <linux/dma-fence-array.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
static DEFINE_IDR(amdgpu_pasid_idr);
static DEFINE_SPINLOCK(amdgpu_pasid_idr_lock);
struct amdgpu_pasid_cb {
struct dma_fence_cb cb;
u32 pasid;
};
int amdgpu_pasid_alloc(unsigned int bits)
{
int pasid;
if (bits == 0)
return -EINVAL;
spin_lock(&amdgpu_pasid_idr_lock);
pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1,
1U << bits, GFP_ATOMIC);
spin_unlock(&amdgpu_pasid_idr_lock);
if (pasid >= 0)
trace_amdgpu_pasid_allocated(pasid);
return pasid;
}
void amdgpu_pasid_free(u32 pasid)
{
trace_amdgpu_pasid_freed(pasid);
spin_lock(&amdgpu_pasid_idr_lock);
idr_remove(&amdgpu_pasid_idr, pasid);
spin_unlock(&amdgpu_pasid_idr_lock);
}
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
struct dma_fence_cb *_cb)
{
struct amdgpu_pasid_cb *cb =
container_of(_cb, struct amdgpu_pasid_cb, cb);
amdgpu_pasid_free(cb->pasid);
dma_fence_put(fence);
kfree(cb);
}
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
u32 pasid)
{
struct amdgpu_pasid_cb *cb;
struct dma_fence *fence;
int r;
r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
if (r)
goto fallback;
if (!fence) {
amdgpu_pasid_free(pasid);
return;
}
cb = kmalloc_obj(*cb);
if (!cb) {
dma_fence_wait(fence, false);
dma_fence_put(fence);
amdgpu_pasid_free(pasid);
} else {
cb->pasid = pasid;
if (dma_fence_add_callback(fence, &cb->cb,
amdgpu_pasid_free_cb))
amdgpu_pasid_free_cb(fence, &cb->cb);
}
return;
fallback:
dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id)
{
return id->current_gpu_reset_count !=
atomic_read(&adev->gpu_reset_counter);
}
static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
struct amdgpu_job *job)
{
return id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
id->gws_base != job->gws_base ||
id->gws_size != job->gws_size ||
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size;
}
static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
struct amdgpu_job *job)
{
return id->pd_gpu_addr == job->vm_pd_addr &&
!amdgpu_vmid_gds_switch_needed(id, job);
}
static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
struct amdgpu_vmid **idle,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
if (!dma_fence_is_signaled(ring->vmid_wait)) {
*fence = dma_fence_get(ring->vmid_wait);
return 0;
}
list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
NULL : ring;
*fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
if (!(*fence))
return 0;
}
*idle = NULL;
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = dma_fence_get(*fence);
dma_fence_get(*fence);
return 0;
}
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_vmid **id,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
*id = vm->reserved_vmid[vmhub];
if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
!dma_fence_is_signaled((*id)->last_flush)))
needs_flush = true;
if ((*id)->owner != vm->immediate.fence_context ||
(!adev->vm_manager.concurrent_flush && needs_flush)) {
struct dma_fence *tmp;
if (adev->vm_manager.concurrent_flush)
ring = NULL;
(*id)->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
*id = NULL;
*fence = dma_fence_get(tmp);
return 0;
}
}
r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
GFP_ATOMIC);
if (r)
return r;
job->vm_needs_flush = needs_flush;
job->spm_update_needed = true;
return 0;
}
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
job->vm_needs_flush = vm->use_cpu_for_update;
list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
bool needs_flush = vm->use_cpu_for_update;
if ((*id)->owner != vm->immediate.fence_context)
continue;
if (!amdgpu_vmid_compatible(*id, job))
continue;
if (!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
!dma_fence_is_signaled((*id)->last_flush)))
needs_flush = true;
if ((*id)->flushed_updates < updates)
needs_flush = true;
if (needs_flush && !adev->vm_manager.concurrent_flush)
continue;
r = amdgpu_sync_fence(&(*id)->active,
&job->base.s_fence->finished,
GFP_ATOMIC);
if (r)
return r;
job->vm_needs_flush |= needs_flush;
return 0;
}
*id = NULL;
return 0;
}
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *idle = NULL;
struct amdgpu_vmid *id = NULL;
int r = 0;
mutex_lock(&id_mgr->lock);
r = amdgpu_vmid_grab_idle(ring, &idle, fence);
if (r || !idle)
goto error;
if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
} else {
r = amdgpu_vmid_grab_used(vm, ring, job, &id);
if (r)
goto error;
if (!id) {
id = idle;
r = amdgpu_sync_fence(&id->active,
&job->base.s_fence->finished,
GFP_ATOMIC);
if (r)
goto error;
job->vm_needs_flush = true;
}
list_move_tail(&id->list, &id_mgr->ids_lru);
}
job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
if (job->vm_needs_flush) {
id->flushed_updates = amdgpu_vm_tlb_seq(vm);
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
job->vmid = id - id_mgr->ids;
job->pasid = vm->pasid;
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
id->gws_base = job->gws_base;
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->immediate.fence_context;
trace_amdgpu_vm_grab_id(vm, ring, job);
error:
mutex_unlock(&id_mgr->lock);
return r;
}
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
{
return vm->reserved_vmid[vmhub];
}
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id;
int r = 0;
mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub])
goto unlock;
if (id_mgr->reserved_vmid) {
r = -ENOENT;
goto unlock;
}
id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
list_del_init(&id->list);
vm->reserved_vmid[vmhub] = id;
id_mgr->reserved_vmid = true;
mutex_unlock(&id_mgr->lock);
return 0;
unlock:
mutex_unlock(&id_mgr->lock);
return r;
}
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub]) {
list_add(&vm->reserved_vmid[vmhub]->list,
&id_mgr->ids_lru);
vm->reserved_vmid[vmhub] = NULL;
id_mgr->reserved_vmid = false;
}
mutex_unlock(&id_mgr->lock);
}
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[vmid];
mutex_lock(&id_mgr->lock);
id->owner = 0;
id->gds_base = 0;
id->gds_size = 0;
id->gws_base = 0;
id->gws_size = 0;
id->oa_base = 0;
id->oa_size = 0;
mutex_unlock(&id_mgr->lock);
}
void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
{
unsigned i, j;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmid_mgr *id_mgr =
&adev->vm_manager.id_mgr[i];
for (j = 1; j < id_mgr->num_ids; ++j)
amdgpu_vmid_reset(adev, i, j);
}
}
void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
{
unsigned i, j;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmid_mgr *id_mgr =
&adev->vm_manager.id_mgr[i];
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
else if (AMDGPU_IS_MMHUB0(i) ||
AMDGPU_IS_MMHUB1(i))
id_mgr->num_ids = 16;
else
id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
for (j = 1; j < id_mgr->num_ids; ++j) {
amdgpu_vmid_reset(adev, i, j);
amdgpu_sync_create(&id_mgr->ids[j].active);
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
}
void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
{
unsigned i, j;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmid_mgr *id_mgr =
&adev->vm_manager.id_mgr[i];
mutex_destroy(&id_mgr->lock);
for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
struct amdgpu_vmid *id = &id_mgr->ids[j];
amdgpu_sync_free(&id->active);
dma_fence_put(id->last_flush);
dma_fence_put(id->pasid_mapping);
}
}
}
void amdgpu_pasid_mgr_cleanup(void)
{
spin_lock(&amdgpu_pasid_idr_lock);
idr_destroy(&amdgpu_pasid_idr);
spin_unlock(&amdgpu_pasid_idr_lock);
}