#include "xe_assert.h"
#include "xe_dep_job_types.h"
#include "xe_dep_scheduler.h"
#include "xe_exec_queue.h"
#include "xe_gt_printk.h"
#include "xe_gt_types.h"
#include "xe_page_reclaim.h"
#include "xe_tlb_inval.h"
#include "xe_tlb_inval_job.h"
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_vm.h"
struct xe_tlb_inval_job {
struct xe_dep_job dep;
struct xe_tlb_inval *tlb_inval;
struct xe_exec_queue *q;
struct xe_vm *vm;
struct xe_page_reclaim_list prl;
struct kref refcount;
struct dma_fence *fence;
u64 start;
u64 end;
int type;
bool fence_armed;
};
static struct dma_fence *xe_tlb_inval_job_run(struct xe_dep_job *dep_job)
{
struct xe_tlb_inval_job *job =
container_of(dep_job, typeof(*job), dep);
struct xe_tlb_inval_fence *ifence =
container_of(job->fence, typeof(*ifence), base);
struct drm_suballoc *prl_sa = NULL;
if (xe_page_reclaim_list_valid(&job->prl)) {
prl_sa = xe_page_reclaim_create_prl_bo(job->tlb_inval, &job->prl, ifence);
if (IS_ERR(prl_sa))
prl_sa = NULL;
}
xe_tlb_inval_range(job->tlb_inval, ifence, job->start,
job->end, job->vm->usm.asid, prl_sa);
return job->fence;
}
static void xe_tlb_inval_job_free(struct xe_dep_job *dep_job)
{
struct xe_tlb_inval_job *job =
container_of(dep_job, typeof(*job), dep);
xe_tlb_inval_job_put(job);
}
static const struct xe_dep_job_ops dep_job_ops = {
.run_job = xe_tlb_inval_job_run,
.free_job = xe_tlb_inval_job_free,
};
struct xe_tlb_inval_job *
xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
struct xe_dep_scheduler *dep_scheduler,
struct xe_vm *vm, u64 start, u64 end, int type)
{
struct xe_tlb_inval_job *job;
struct drm_sched_entity *entity =
xe_dep_scheduler_entity(dep_scheduler);
struct xe_tlb_inval_fence *ifence;
int err;
xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
job = kmalloc_obj(*job);
if (!job)
return ERR_PTR(-ENOMEM);
job->q = q;
job->vm = vm;
job->tlb_inval = tlb_inval;
job->start = start;
job->end = end;
job->fence_armed = false;
xe_page_reclaim_list_init(&job->prl);
job->dep.ops = &dep_job_ops;
job->type = type;
kref_init(&job->refcount);
xe_exec_queue_get(q);
xe_vm_get(vm);
ifence = kmalloc_obj(*ifence);
if (!ifence) {
err = -ENOMEM;
goto err_job;
}
job->fence = &ifence->base;
err = drm_sched_job_init(&job->dep.drm, entity, 1, NULL,
q->xef ? q->xef->drm->client_id : 0);
if (err)
goto err_fence;
xe_pm_runtime_get_noresume(gt_to_xe(q->gt));
return job;
err_fence:
kfree(ifence);
err_job:
xe_vm_put(vm);
xe_exec_queue_put(q);
kfree(job);
return ERR_PTR(err);
}
void xe_tlb_inval_job_add_page_reclaim(struct xe_tlb_inval_job *job,
struct xe_page_reclaim_list *prl)
{
struct xe_device *xe = gt_to_xe(job->q->gt);
xe_gt_WARN_ON(job->q->gt, !xe->info.has_page_reclaim_hw_assist);
job->prl = *prl;
xe_page_reclaim_entries_get(job->prl.entries);
}
static void xe_tlb_inval_job_destroy(struct kref *ref)
{
struct xe_tlb_inval_job *job = container_of(ref, typeof(*job),
refcount);
struct xe_tlb_inval_fence *ifence =
container_of(job->fence, typeof(*ifence), base);
struct xe_exec_queue *q = job->q;
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_vm *vm = job->vm;
xe_page_reclaim_entries_put(job->prl.entries);
if (!job->fence_armed)
kfree(ifence);
else
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->dep.drm);
kfree(job);
xe_vm_put(vm);
xe_exec_queue_put(q);
xe_pm_runtime_put(xe);
}
int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job)
{
xe_assert(gt_to_xe(job->q->gt), !xa_load(&job->dep.drm.dependencies, 0));
might_alloc(GFP_KERNEL);
return drm_sched_job_add_dependency(&job->dep.drm,
dma_fence_get_stub());
}
struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
struct xe_migrate *m,
struct dma_fence *fence)
{
struct xe_tlb_inval_fence *ifence =
container_of(job->fence, typeof(*ifence), base);
if (!dma_fence_is_signaled(fence)) {
void *ptr;
xe_assert(gt_to_xe(job->q->gt),
xa_load(&job->dep.drm.dependencies, 0) ==
dma_fence_get_stub());
dma_fence_get(fence);
ptr = xa_store(&job->dep.drm.dependencies, 0, fence,
GFP_ATOMIC);
xe_assert(gt_to_xe(job->q->gt), !xa_is_err(ptr));
}
xe_tlb_inval_job_get(job);
job->fence_armed = true;
xe_migrate_job_lock(m, job->q);
xe_tlb_inval_fence_init(job->tlb_inval, ifence, false);
dma_fence_get(job->fence);
drm_sched_job_arm(&job->dep.drm);
dma_fence_get(&job->dep.drm.s_fence->finished);
drm_sched_entity_push_job(&job->dep.drm);
xe_exec_queue_tlb_inval_last_fence_set(job->q, job->vm,
&job->dep.drm.s_fence->finished,
job->type);
xe_migrate_job_unlock(m, job->q);
return &job->dep.drm.s_fence->finished;
}
void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job)
{
kref_get(&job->refcount);
}
void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job)
{
if (!IS_ERR_OR_NULL(job))
kref_put(&job->refcount, xe_tlb_inval_job_destroy);
}