a5xx_gpu
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
if (a5xx_gpu->gpmu_bo) {
msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->gpmu_bo);
if (a5xx_gpu->shadow_bo) {
msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->shadow_bo);
kfree(a5xx_gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (ring != a5xx_gpu->cur_ring) {
a5xx_gpu->last_seqno[ring->id] = submit->seqno;
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->has_whereami)
a5xx_gpu->shadow[i] = 0;
OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
return a5xx_gpu->cur_ring;
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->has_whereami)
return a5xx_gpu->shadow[ring->id];
struct a5xx_gpu *a5xx_gpu = NULL;
a5xx_gpu = kzalloc_obj(*a5xx_gpu);
if (!a5xx_gpu)
adreno_gpu = &a5xx_gpu->base;
a5xx_gpu->lm_leakage = 0x4E001A;
a5xx_destroy(&(a5xx_gpu->base.base));
a5xx_gpu->last_seqno[ring->id] = submit->seqno;
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->has_whereami) {
OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
a5xx_gpu->has_whereami = true;
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (!a5xx_gpu->pm4_bo) {
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
if (!a5xx_gpu->pfp_bo) {
a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
if (a5xx_gpu->has_whereami) {
if (!a5xx_gpu->shadow_bo) {
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
gpu->vm, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
if (IS_ERR(a5xx_gpu->shadow))
return PTR_ERR(a5xx_gpu->shadow);
msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
if (a5xx_gpu->shadow_bo) {
shadowptr(a5xx_gpu, gpu->rb[0]));
#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (!a5xx_gpu->gpmu_dwords)
OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
OUT_RING(ring, a5xx_gpu->gpmu_dwords);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->gpmu_bo)
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags);
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
if (!ring || (a5xx_gpu->cur_ring == ring)) {
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
a5xx_gpu->preempt_iova[ring->id]);
a5xx_gpu->next_ring = ring;
mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
timer_delete(&a5xx_gpu->preempt_timer);
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
a5xx_gpu->next_ring = NULL;
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
a5xx_gpu->cur_ring = gpu->rb[0];
a5xx_gpu->preempt[i]->data = 0;
a5xx_gpu->preempt[i]->info = 0;
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
static inline void set_preempt_state(struct a5xx_gpu *gpu,
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->vm);
msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->vm);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
spin_lock_init(&a5xx_gpu->preempt_start_lock);
timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (!empty && ring == a5xx_gpu->cur_ring)
empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i];
struct a5xx_gpu *a5xx_gpu = timer_container_of(a5xx_gpu, t,
struct msm_gpu *gpu = &a5xx_gpu->base.base;
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);