batch_addr
*cs++ = batch_addr(state) + dst_offset;
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
*cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
u64 batch_addr,
jump_offset = jump_target - batch_addr;
u64 batch_addr, shadow_addr;
batch_addr = gen8_canonical_addr(i915_vma_offset(batch) + batch_offset);
batch_addr, shadow_addr,
batch_addr,
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
static int pxp_pkt_submit(struct xe_exec_queue *q, u64 batch_addr)
job = xe_sched_job_create(q, &batch_addr);
static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
dw[i++] = lower_32_bits(batch_addr);
dw[i++] = upper_32_bits(batch_addr);
u64 batch_addr, u32 *head, u32 seqno)
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
u64 batch_addr, u32 *head, u32 seqno)
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
u64 batch_addr, u32 *head,
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
job->ptrs[0].batch_addr,
job->ptrs[i].batch_addr,
job->ptrs[i].batch_addr,
job->ptrs[i].batch_addr,
job->ptrs[i].batch_addr = batch_addr[i];
snapshot->batch_addr[i] =
xe_device_uncanonicalize_addr(xe, job->ptrs[i].batch_addr);
drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
u64 *batch_addr)
u64 *batch_addr);
u64 batch_addr;
u64 batch_addr[] __counted_by(batch_addr_len);
__field(u64, batch_addr)
__entry->batch_addr = (u64)job->ptrs[0].batch_addr;
__entry->batch_addr, __entry->guc_state,