sched_job
amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
struct amdgpu_job *job = to_amdgpu_job(sched_job);
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
job = to_amdgpu_job(sched_job);
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
__string(ring, sched_job->base.sched->name)
struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
void (*free_job)(struct drm_sched_job *sched_job);
void (*cancel_job)(struct drm_sched_job *sched_job);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
__entry->fence_context = sched_job->s_fence->finished.context;
__entry->fence_seqno = sched_job->s_fence->finished.seqno;
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
__entry->fence_context = sched_job->s_fence->finished.context;
__entry->fence_seqno = sched_job->s_fence->finished.seqno;
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity),
__string(name, sched_job->sched->name)
__string(dev, dev_name(sched_job->sched->dev))
&sched_job->sched->credit_count);
__entry->fence_context = sched_job->s_fence->finished.context;
__entry->fence_seqno = sched_job->s_fence->finished.seqno;
__entry->client_id = sched_job->s_fence->drm_client_id;
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
struct drm_sched_job *sched_job)
trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
struct drm_sched_job *sched_job;
sched_job = drm_sched_entity_queue_peek(entity);
if (!sched_job)
drm_sched_job_dependency(sched_job, entity))) {
if (drm_sched_entity_add_dependency_cb(entity, sched_job))
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_get(&sched_job->s_fence->finished));
sched_job->entity = NULL;
return sched_job;
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
struct drm_sched_entity *entity = sched_job->entity;
trace_drm_sched_job_queue(sched_job, entity);
xa_for_each(&sched_job->dependencies, index, entry)
trace_drm_sched_job_add_dep(sched_job, entry);
sched_job->submit_ts = submit_ts = ktime_get();
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
struct drm_sched_job *sched_job;
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
s_fence = sched_job->s_fence;
atomic_add(sched_job->credits, &sched->credit_count);
drm_sched_job_begin(sched_job);
trace_drm_sched_job_run(sched_job, entity);
fence = sched->ops->run_job(sched_job);
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done(sched_job, fence->error);
drm_sched_job_done(sched_job, IS_ERR(fence) ?
pjob *p = sched_job(cf, ca);
pjob *p = sched_job(cf, ca);