drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
905
long workload[1];
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
923
workload[0] = hwmgr->workload_setting[index];
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
928
workload[0] = hwmgr->workload_setting[index];
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
938
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
275
long workload[1];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
300
workload[0] = hwmgr->workload_setting[index];
drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
302
if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode)
drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
303
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1755
#define WORKLOAD_MAP(profile, workload) \
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
1756
[profile] = {1, (workload)}
drivers/gpu/drm/i915/gvt/cmd_parser.c
1024
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1235
s->workload->pending_events);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1242
s->workload->pending_events);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1758
s->workload->pending_events);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1829
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
drivers/gpu/drm/i915/gvt/cmd_parser.c
1845
s->engine->name, s->workload);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1857
s->engine->name, s->workload);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1891
s->engine->name, s->workload);
drivers/gpu/drm/i915/gvt/cmd_parser.c
1912
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
drivers/gpu/drm/i915/gvt/cmd_parser.c
1974
list_add(&bb->list, &s->workload->shadow_bb);
drivers/gpu/drm/i915/gvt/cmd_parser.c
2752
s->engine->name, s->workload);
drivers/gpu/drm/i915/gvt/cmd_parser.c
2760
s->workload, info->name);
drivers/gpu/drm/i915/gvt/cmd_parser.c
2845
static int scan_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/cmd_parser.c
2852
if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
drivers/gpu/drm/i915/gvt/cmd_parser.c
2855
gma_head = workload->rb_start + workload->rb_head;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2856
gma_tail = workload->rb_start + workload->rb_tail;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2860
s.vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2861
s.engine = workload->engine;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2862
s.ring_start = workload->rb_start;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2863
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
drivers/gpu/drm/i915/gvt/cmd_parser.c
2866
s.rb_va = workload->shadow_ring_buffer_va;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2867
s.workload = workload;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2870
if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
drivers/gpu/drm/i915/gvt/cmd_parser.c
2877
ret = command_scan(&s, workload->rb_head, workload->rb_tail,
drivers/gpu/drm/i915/gvt/cmd_parser.c
2878
workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
drivers/gpu/drm/i915/gvt/cmd_parser.c
2890
struct intel_vgpu_workload *workload = container_of(wa_ctx,
drivers/gpu/drm/i915/gvt/cmd_parser.c
2907
s.vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2908
s.engine = workload->engine;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2914
s.workload = workload;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2927
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/cmd_parser.c
2929
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2935
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
drivers/gpu/drm/i915/gvt/cmd_parser.c
2938
workload->rb_len = (workload->rb_tail + guest_rb_size -
drivers/gpu/drm/i915/gvt/cmd_parser.c
2939
workload->rb_head) % guest_rb_size;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2941
gma_head = workload->rb_start + workload->rb_head;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2942
gma_tail = workload->rb_start + workload->rb_tail;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2943
gma_top = workload->rb_start + guest_rb_size;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2945
if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
drivers/gpu/drm/i915/gvt/cmd_parser.c
2949
p = krealloc(s->ring_scan_buffer[workload->engine->id],
drivers/gpu/drm/i915/gvt/cmd_parser.c
2950
workload->rb_len, GFP_KERNEL);
drivers/gpu/drm/i915/gvt/cmd_parser.c
2955
s->ring_scan_buffer[workload->engine->id] = p;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2956
s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2959
shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
drivers/gpu/drm/i915/gvt/cmd_parser.c
2962
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2973
gma_head = workload->rb_start;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2986
int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/cmd_parser.c
2989
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
2991
ret = shadow_workload_ring_buffer(workload);
drivers/gpu/drm/i915/gvt/cmd_parser.c
2997
ret = scan_workload(workload);
drivers/gpu/drm/i915/gvt/cmd_parser.c
3009
struct intel_vgpu_workload *workload = container_of(wa_ctx,
drivers/gpu/drm/i915/gvt/cmd_parser.c
3012
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3017
obj = i915_gem_object_create_shmem(workload->engine->i915,
drivers/gpu/drm/i915/gvt/cmd_parser.c
3039
ret = copy_gma_to_hva(workload->vgpu,
drivers/gpu/drm/i915/gvt/cmd_parser.c
3040
workload->vgpu->gtt.ggtt_mm,
drivers/gpu/drm/i915/gvt/cmd_parser.c
3081
struct intel_vgpu_workload *workload = container_of(wa_ctx,
drivers/gpu/drm/i915/gvt/cmd_parser.c
3084
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3146
s.workload = NULL;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3166
int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/cmd_parser.c
3168
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3171
int ring_id = workload->engine->id;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3177
ctx_size = workload->engine->context_size - PAGE_SIZE;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3191
s.vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3192
s.engine = workload->engine;
drivers/gpu/drm/i915/gvt/cmd_parser.c
3198
s.workload = workload;
drivers/gpu/drm/i915/gvt/cmd_parser.c
522
struct intel_vgpu_workload *workload;
drivers/gpu/drm/i915/gvt/cmd_parser.c
860
u32 base = s->workload->engine->mmio_base;
drivers/gpu/drm/i915/gvt/cmd_parser.c
868
struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
drivers/gpu/drm/i915/gvt/cmd_parser.c
884
&s->workload->lri_shadow_mm);
drivers/gpu/drm/i915/gvt/cmd_parser.h
50
int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
drivers/gpu/drm/i915/gvt/cmd_parser.h
56
int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
drivers/gpu/drm/i915/gvt/execlist.c
370
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/execlist.c
372
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/execlist.c
377
if (!workload->emulate_schedule_in)
drivers/gpu/drm/i915/gvt/execlist.c
380
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
drivers/gpu/drm/i915/gvt/execlist.c
381
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
drivers/gpu/drm/i915/gvt/execlist.c
383
ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
drivers/gpu/drm/i915/gvt/execlist.c
392
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/execlist.c
394
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/execlist.c
397
&s->execlist[workload->engine->id];
drivers/gpu/drm/i915/gvt/execlist.c
399
struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
drivers/gpu/drm/i915/gvt/execlist.c
404
workload, workload->status);
drivers/gpu/drm/i915/gvt/execlist.c
406
if (workload->status || vgpu->resetting_eng & workload->engine->mask)
drivers/gpu/drm/i915/gvt/execlist.c
409
if (!list_empty(workload_q_head(vgpu, workload->engine))) {
drivers/gpu/drm/i915/gvt/execlist.c
414
this_desc = &workload->ctx_desc;
drivers/gpu/drm/i915/gvt/execlist.c
425
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
drivers/gpu/drm/i915/gvt/execlist.c
436
struct intel_vgpu_workload *workload = NULL;
drivers/gpu/drm/i915/gvt/execlist.c
438
workload = intel_vgpu_create_workload(vgpu, engine, desc);
drivers/gpu/drm/i915/gvt/execlist.c
439
if (IS_ERR(workload))
drivers/gpu/drm/i915/gvt/execlist.c
440
return PTR_ERR(workload);
drivers/gpu/drm/i915/gvt/execlist.c
442
workload->prepare = prepare_execlist_workload;
drivers/gpu/drm/i915/gvt/execlist.c
443
workload->complete = complete_execlist_workload;
drivers/gpu/drm/i915/gvt/execlist.c
444
workload->emulate_schedule_in = emulate_schedule_in;
drivers/gpu/drm/i915/gvt/execlist.c
447
workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
drivers/gpu/drm/i915/gvt/execlist.c
449
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
drivers/gpu/drm/i915/gvt/execlist.c
452
intel_vgpu_queue_workload(workload);
drivers/gpu/drm/i915/gvt/gvt.h
563
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1025
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
drivers/gpu/drm/i915/gvt/scheduler.c
1026
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
drivers/gpu/drm/i915/gvt/scheduler.c
1030
if (!list_empty(&workload->lri_shadow_mm)) {
drivers/gpu/drm/i915/gvt/scheduler.c
1031
struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
drivers/gpu/drm/i915/gvt/scheduler.c
1035
update_guest_pdps(vgpu, workload->ring_context_gpa,
drivers/gpu/drm/i915/gvt/scheduler.c
1040
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
drivers/gpu/drm/i915/gvt/scheduler.c
1049
workload->ring_context_gpa +
drivers/gpu/drm/i915/gvt/scheduler.c
1078
struct intel_vgpu_workload *workload =
drivers/gpu/drm/i915/gvt/scheduler.c
1080
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
1082
struct i915_request *rq = workload->req;
drivers/gpu/drm/i915/gvt/scheduler.c
109
if (workload->engine->id != RCS0)
drivers/gpu/drm/i915/gvt/scheduler.c
1093
wait_event(workload->shadow_ctx_status_wq,
drivers/gpu/drm/i915/gvt/scheduler.c
1094
!atomic_read(&workload->shadow_ctx_active));
drivers/gpu/drm/i915/gvt/scheduler.c
1101
if (likely(workload->status == -EINPROGRESS)) {
drivers/gpu/drm/i915/gvt/scheduler.c
1102
if (workload->req->fence.error == -EIO)
drivers/gpu/drm/i915/gvt/scheduler.c
1103
workload->status = -EIO;
drivers/gpu/drm/i915/gvt/scheduler.c
1105
workload->status = 0;
drivers/gpu/drm/i915/gvt/scheduler.c
1108
if (!workload->status &&
drivers/gpu/drm/i915/gvt/scheduler.c
1110
update_guest_context(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1112
for_each_set_bit(event, workload->pending_events,
drivers/gpu/drm/i915/gvt/scheduler.c
1117
i915_request_put(fetch_and_zero(&workload->req));
drivers/gpu/drm/i915/gvt/scheduler.c
1121
ring_id, workload, workload->status);
drivers/gpu/drm/i915/gvt/scheduler.c
1125
list_del_init(&workload->list);
drivers/gpu/drm/i915/gvt/scheduler.c
1127
if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
drivers/gpu/drm/i915/gvt/scheduler.c
113
workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
drivers/gpu/drm/i915/gvt/scheduler.c
1144
workload->complete(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1146
intel_vgpu_shadow_mm_unpin(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1147
intel_vgpu_destroy_workload(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
115
for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
drivers/gpu/drm/i915/gvt/scheduler.c
1165
struct intel_vgpu_workload *workload = NULL;
drivers/gpu/drm/i915/gvt/scheduler.c
1177
workload = pick_next_workload(gvt, engine);
drivers/gpu/drm/i915/gvt/scheduler.c
1178
if (workload)
drivers/gpu/drm/i915/gvt/scheduler.c
118
workload->flex_mmio[i] = reg_state[state_offset + 1];
drivers/gpu/drm/i915/gvt/scheduler.c
1185
if (!workload)
drivers/gpu/drm/i915/gvt/scheduler.c
1189
engine->name, workload,
drivers/gpu/drm/i915/gvt/scheduler.c
1190
workload->vgpu->id);
drivers/gpu/drm/i915/gvt/scheduler.c
1195
engine->name, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1206
update_vreg_in_ctx(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1208
ret = dispatch_workload(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1211
vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
1217
engine->name, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1218
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/i915/gvt/scheduler.c
1222
workload, workload->status);
drivers/gpu/drm/i915/gvt/scheduler.c
123
reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
drivers/gpu/drm/i915/gvt/scheduler.c
125
for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
drivers/gpu/drm/i915/gvt/scheduler.c
130
reg_state[state_offset + 1] = workload->flex_mmio[i];
drivers/gpu/drm/i915/gvt/scheduler.c
135
static int populate_shadow_context(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
137
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
139
struct intel_context *ctx = workload->req->context;
drivers/gpu/drm/i915/gvt/scheduler.c
149
int ring_id = workload->engine->id;
drivers/gpu/drm/i915/gvt/scheduler.c
1526
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
1528
struct intel_vgpu_submission *s = &workload->vgpu->submission;
drivers/gpu/drm/i915/gvt/scheduler.c
1530
intel_context_unpin(s->shadow[workload->engine->id]);
drivers/gpu/drm/i915/gvt/scheduler.c
1531
release_shadow_batch_buffer(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1532
release_shadow_wa_ctx(&workload->wa_ctx);
drivers/gpu/drm/i915/gvt/scheduler.c
1534
if (!list_empty(&workload->lri_shadow_mm)) {
drivers/gpu/drm/i915/gvt/scheduler.c
1536
list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
drivers/gpu/drm/i915/gvt/scheduler.c
1543
GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
drivers/gpu/drm/i915/gvt/scheduler.c
1544
if (workload->shadow_mm)
drivers/gpu/drm/i915/gvt/scheduler.c
1545
intel_vgpu_mm_put(workload->shadow_mm);
drivers/gpu/drm/i915/gvt/scheduler.c
1547
kmem_cache_free(s->workloads, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1554
struct intel_vgpu_workload *workload;
drivers/gpu/drm/i915/gvt/scheduler.c
1556
workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
drivers/gpu/drm/i915/gvt/scheduler.c
1557
if (!workload)
drivers/gpu/drm/i915/gvt/scheduler.c
1560
INIT_LIST_HEAD(&workload->list);
drivers/gpu/drm/i915/gvt/scheduler.c
1561
INIT_LIST_HEAD(&workload->shadow_bb);
drivers/gpu/drm/i915/gvt/scheduler.c
1562
INIT_LIST_HEAD(&workload->lri_shadow_mm);
drivers/gpu/drm/i915/gvt/scheduler.c
1564
init_waitqueue_head(&workload->shadow_ctx_status_wq);
drivers/gpu/drm/i915/gvt/scheduler.c
1565
atomic_set(&workload->shadow_ctx_active, 0);
drivers/gpu/drm/i915/gvt/scheduler.c
1567
workload->status = -EINPROGRESS;
drivers/gpu/drm/i915/gvt/scheduler.c
1568
workload->vgpu = vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
1570
return workload;
drivers/gpu/drm/i915/gvt/scheduler.c
1589
static int prepare_mm(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
159
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
drivers/gpu/drm/i915/gvt/scheduler.c
1591
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
drivers/gpu/drm/i915/gvt/scheduler.c
1593
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
1609
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
drivers/gpu/drm/i915/gvt/scheduler.c
161
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
drivers/gpu/drm/i915/gvt/scheduler.c
1611
mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
drivers/gpu/drm/i915/gvt/scheduler.c
1615
workload->shadow_mm = mm;
drivers/gpu/drm/i915/gvt/scheduler.c
164
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
drivers/gpu/drm/i915/gvt/scheduler.c
1643
struct intel_vgpu_workload *workload = NULL;
drivers/gpu/drm/i915/gvt/scheduler.c
1699
workload = alloc_workload(vgpu);
drivers/gpu/drm/i915/gvt/scheduler.c
1700
if (IS_ERR(workload))
drivers/gpu/drm/i915/gvt/scheduler.c
1701
return workload;
drivers/gpu/drm/i915/gvt/scheduler.c
1703
workload->engine = engine;
drivers/gpu/drm/i915/gvt/scheduler.c
1704
workload->ctx_desc = *desc;
drivers/gpu/drm/i915/gvt/scheduler.c
1705
workload->ring_context_gpa = ring_context_gpa;
drivers/gpu/drm/i915/gvt/scheduler.c
1706
workload->rb_head = head;
drivers/gpu/drm/i915/gvt/scheduler.c
1707
workload->guest_rb_head = guest_head;
drivers/gpu/drm/i915/gvt/scheduler.c
1708
workload->rb_tail = tail;
drivers/gpu/drm/i915/gvt/scheduler.c
1709
workload->rb_start = start;
drivers/gpu/drm/i915/gvt/scheduler.c
1710
workload->rb_ctl = ctl;
drivers/gpu/drm/i915/gvt/scheduler.c
1718
workload->wa_ctx.indirect_ctx.guest_gma =
drivers/gpu/drm/i915/gvt/scheduler.c
1720
workload->wa_ctx.indirect_ctx.size =
drivers/gpu/drm/i915/gvt/scheduler.c
1724
if (workload->wa_ctx.indirect_ctx.size != 0) {
drivers/gpu/drm/i915/gvt/scheduler.c
1726
workload->wa_ctx.indirect_ctx.guest_gma,
drivers/gpu/drm/i915/gvt/scheduler.c
1727
workload->wa_ctx.indirect_ctx.size)) {
drivers/gpu/drm/i915/gvt/scheduler.c
1729
workload->wa_ctx.indirect_ctx.guest_gma);
drivers/gpu/drm/i915/gvt/scheduler.c
173
if (workload->engine->id == RCS0) {
drivers/gpu/drm/i915/gvt/scheduler.c
1730
kmem_cache_free(s->workloads, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1735
workload->wa_ctx.per_ctx.guest_gma =
drivers/gpu/drm/i915/gvt/scheduler.c
1737
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
drivers/gpu/drm/i915/gvt/scheduler.c
1738
if (workload->wa_ctx.per_ctx.valid) {
drivers/gpu/drm/i915/gvt/scheduler.c
1740
workload->wa_ctx.per_ctx.guest_gma,
drivers/gpu/drm/i915/gvt/scheduler.c
1743
workload->wa_ctx.per_ctx.guest_gma);
drivers/gpu/drm/i915/gvt/scheduler.c
1744
kmem_cache_free(s->workloads, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1751
workload, engine->name, head, tail, start, ctl);
drivers/gpu/drm/i915/gvt/scheduler.c
1753
ret = prepare_mm(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1755
kmem_cache_free(s->workloads, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1766
ret = intel_gvt_scan_and_shadow_workload(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
177
} else if (workload->engine->id == BCS0)
drivers/gpu/drm/i915/gvt/scheduler.c
1772
intel_vgpu_destroy_workload(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1778
intel_vgpu_destroy_workload(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
1782
return workload;
drivers/gpu/drm/i915/gvt/scheduler.c
1789
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
179
workload->ring_context_gpa +
drivers/gpu/drm/i915/gvt/scheduler.c
1791
list_add_tail(&workload->list,
drivers/gpu/drm/i915/gvt/scheduler.c
1792
workload_q_head(workload->vgpu, workload->engine));
drivers/gpu/drm/i915/gvt/scheduler.c
1793
intel_gvt_kick_schedule(workload->vgpu->gvt);
drivers/gpu/drm/i915/gvt/scheduler.c
1794
wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
drivers/gpu/drm/i915/gvt/scheduler.c
190
workload->ring_context_gpa +
drivers/gpu/drm/i915/gvt/scheduler.c
196
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
drivers/gpu/drm/i915/gvt/scheduler.c
199
workload->engine->name, workload->ctx_desc.lrca,
drivers/gpu/drm/i915/gvt/scheduler.c
200
workload->ctx_desc.context_id,
drivers/gpu/drm/i915/gvt/scheduler.c
201
workload->ring_context_gpa);
drivers/gpu/drm/i915/gvt/scheduler.c
211
workload->ctx_desc.lrca) &&
drivers/gpu/drm/i915/gvt/scheduler.c
213
workload->ring_context_gpa))
drivers/gpu/drm/i915/gvt/scheduler.c
216
s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
drivers/gpu/drm/i915/gvt/scheduler.c
217
s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
drivers/gpu/drm/i915/gvt/scheduler.c
223
context_page_num = workload->engine->context_size;
drivers/gpu/drm/i915/gvt/scheduler.c
226
if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
drivers/gpu/drm/i915/gvt/scheduler.c
235
(u32)((workload->ctx_desc.lrca + i) <<
drivers/gpu/drm/i915/gvt/scheduler.c
261
ret = intel_gvt_scan_engine_context(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
302
struct intel_vgpu_workload *workload;
drivers/gpu/drm/i915/gvt/scheduler.c
319
workload = scheduler->current_workload[ring_id];
drivers/gpu/drm/i915/gvt/scheduler.c
320
if (unlikely(!workload))
drivers/gpu/drm/i915/gvt/scheduler.c
326
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
drivers/gpu/drm/i915/gvt/scheduler.c
329
workload->vgpu, rq->engine);
drivers/gpu/drm/i915/gvt/scheduler.c
330
scheduler->engine_owner[ring_id] = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
333
ring_id, workload->vgpu->id);
drivers/gpu/drm/i915/gvt/scheduler.c
335
atomic_set(&workload->shadow_ctx_active, 1);
drivers/gpu/drm/i915/gvt/scheduler.c
338
save_ring_hw_state(workload->vgpu, rq->engine);
drivers/gpu/drm/i915/gvt/scheduler.c
339
atomic_set(&workload->shadow_ctx_active, 0);
drivers/gpu/drm/i915/gvt/scheduler.c
342
save_ring_hw_state(workload->vgpu, rq->engine);
drivers/gpu/drm/i915/gvt/scheduler.c
348
wake_up(&workload->shadow_ctx_status_wq);
drivers/gpu/drm/i915/gvt/scheduler.c
354
struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
363
desc |= (u64)workload->ctx_desc.addressing_mode <<
drivers/gpu/drm/i915/gvt/scheduler.c
369
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
371
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
372
struct i915_request *req = workload->req;
drivers/gpu/drm/i915/gvt/scheduler.c
399
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
drivers/gpu/drm/i915/gvt/scheduler.c
402
workload->rb_len);
drivers/gpu/drm/i915/gvt/scheduler.c
406
shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
drivers/gpu/drm/i915/gvt/scheduler.c
409
workload->shadow_ring_buffer_va = cs;
drivers/gpu/drm/i915/gvt/scheduler.c
412
workload->rb_len);
drivers/gpu/drm/i915/gvt/scheduler.c
414
cs += workload->rb_len / sizeof(u32);
drivers/gpu/drm/i915/gvt/scheduler.c
415
intel_ring_advance(workload->req, cs);
drivers/gpu/drm/i915/gvt/scheduler.c
442
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
drivers/gpu/drm/i915/gvt/scheduler.c
445
struct intel_vgpu_mm *mm = workload->shadow_mm;
drivers/gpu/drm/i915/gvt/scheduler.c
467
intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
469
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
473
if (workload->req)
drivers/gpu/drm/i915/gvt/scheduler.c
476
rq = i915_request_create(s->shadow[workload->engine->id]);
drivers/gpu/drm/i915/gvt/scheduler.c
482
workload->req = i915_request_get(rq);
drivers/gpu/drm/i915/gvt/scheduler.c
494
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
496
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
502
if (workload->shadow)
drivers/gpu/drm/i915/gvt/scheduler.c
505
if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
drivers/gpu/drm/i915/gvt/scheduler.c
506
shadow_context_descriptor_update(s->shadow[workload->engine->id],
drivers/gpu/drm/i915/gvt/scheduler.c
507
workload);
drivers/gpu/drm/i915/gvt/scheduler.c
509
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
513
if (workload->engine->id == RCS0 &&
drivers/gpu/drm/i915/gvt/scheduler.c
514
workload->wa_ctx.indirect_ctx.size) {
drivers/gpu/drm/i915/gvt/scheduler.c
515
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
drivers/gpu/drm/i915/gvt/scheduler.c
520
workload->shadow = true;
drivers/gpu/drm/i915/gvt/scheduler.c
524
release_shadow_wa_ctx(&workload->wa_ctx);
drivers/gpu/drm/i915/gvt/scheduler.c
528
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
drivers/gpu/drm/i915/gvt/scheduler.c
530
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
532
struct intel_gvt *gvt = workload->vgpu->gvt;
drivers/gpu/drm/i915/gvt/scheduler.c
538
list_for_each_entry(bb, &workload->shadow_bb, list) {
drivers/gpu/drm/i915/gvt/scheduler.c
548
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
drivers/gpu/drm/i915/gvt/scheduler.c
582
ret = i915_vma_move_to_active(bb->vma, workload->req,
drivers/gpu/drm/i915/gvt/scheduler.c
595
release_shadow_batch_buffer(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
601
struct intel_vgpu_workload *workload =
drivers/gpu/drm/i915/gvt/scheduler.c
603
struct i915_request *rq = workload->req;
drivers/gpu/drm/i915/gvt/scheduler.c
659
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
661
vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
drivers/gpu/drm/i915/gvt/scheduler.c
662
workload->rb_start;
drivers/gpu/drm/i915/gvt/scheduler.c
665
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
669
if (list_empty(&workload->shadow_bb))
drivers/gpu/drm/i915/gvt/scheduler.c
672
bb = list_first_entry(&workload->shadow_bb,
drivers/gpu/drm/i915/gvt/scheduler.c
675
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
drivers/gpu/drm/i915/gvt/scheduler.c
693
intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
695
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
699
ret = intel_vgpu_pin_mm(workload->shadow_mm);
drivers/gpu/drm/i915/gvt/scheduler.c
705
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
drivers/gpu/drm/i915/gvt/scheduler.c
706
!workload->shadow_mm->ppgtt_mm.shadowed) {
drivers/gpu/drm/i915/gvt/scheduler.c
707
intel_vgpu_unpin_mm(workload->shadow_mm);
drivers/gpu/drm/i915/gvt/scheduler.c
71
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
712
if (!list_empty(&workload->lri_shadow_mm)) {
drivers/gpu/drm/i915/gvt/scheduler.c
713
list_for_each_entry(m, &workload->lri_shadow_mm,
drivers/gpu/drm/i915/gvt/scheduler.c
718
&workload->lri_shadow_mm,
drivers/gpu/drm/i915/gvt/scheduler.c
728
intel_vgpu_unpin_mm(workload->shadow_mm);
drivers/gpu/drm/i915/gvt/scheduler.c
734
intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
738
if (!list_empty(&workload->lri_shadow_mm)) {
drivers/gpu/drm/i915/gvt/scheduler.c
739
list_for_each_entry(m, &workload->lri_shadow_mm,
drivers/gpu/drm/i915/gvt/scheduler.c
74
struct intel_context *ctx = workload->req->context;
drivers/gpu/drm/i915/gvt/scheduler.c
743
intel_vgpu_unpin_mm(workload->shadow_mm);
drivers/gpu/drm/i915/gvt/scheduler.c
746
static int prepare_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
748
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
752
ret = intel_vgpu_shadow_mm_pin(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
758
update_shadow_pdps(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
76
if (WARN_ON(!workload->shadow_mm))
drivers/gpu/drm/i915/gvt/scheduler.c
760
set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
drivers/gpu/drm/i915/gvt/scheduler.c
762
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
drivers/gpu/drm/i915/gvt/scheduler.c
768
ret = intel_vgpu_flush_post_shadow(workload->vgpu);
drivers/gpu/drm/i915/gvt/scheduler.c
774
ret = copy_workload_to_ring_buffer(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
780
ret = prepare_shadow_batch_buffer(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
786
ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
drivers/gpu/drm/i915/gvt/scheduler.c
79
if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
drivers/gpu/drm/i915/gvt/scheduler.c
792
if (workload->prepare) {
drivers/gpu/drm/i915/gvt/scheduler.c
793
ret = workload->prepare(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
800
release_shadow_wa_ctx(&workload->wa_ctx);
drivers/gpu/drm/i915/gvt/scheduler.c
802
release_shadow_batch_buffer(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
804
intel_vgpu_shadow_mm_unpin(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
808
static int dispatch_workload(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
810
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
815
workload->engine->name, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
819
ret = intel_gvt_workload_req_alloc(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
823
ret = intel_gvt_scan_and_shadow_workload(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
827
ret = populate_shadow_context(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
829
release_shadow_wa_ctx(&workload->wa_ctx);
drivers/gpu/drm/i915/gvt/scheduler.c
833
ret = prepare_workload(workload);
drivers/gpu/drm/i915/gvt/scheduler.c
839
rq = fetch_and_zero(&workload->req);
drivers/gpu/drm/i915/gvt/scheduler.c
84
(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
drivers/gpu/drm/i915/gvt/scheduler.c
843
if (!IS_ERR_OR_NULL(workload->req)) {
drivers/gpu/drm/i915/gvt/scheduler.c
845
workload->engine->name, workload->req);
drivers/gpu/drm/i915/gvt/scheduler.c
846
i915_request_add(workload->req);
drivers/gpu/drm/i915/gvt/scheduler.c
847
workload->dispatched = true;
drivers/gpu/drm/i915/gvt/scheduler.c
851
workload->status = ret;
drivers/gpu/drm/i915/gvt/scheduler.c
860
struct intel_vgpu_workload *workload = NULL;
drivers/gpu/drm/i915/gvt/scheduler.c
888
workload = scheduler->current_workload[engine->id];
drivers/gpu/drm/i915/gvt/scheduler.c
890
engine->name, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
905
workload = scheduler->current_workload[engine->id];
drivers/gpu/drm/i915/gvt/scheduler.c
907
gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
drivers/gpu/drm/i915/gvt/scheduler.c
909
atomic_inc(&workload->vgpu->submission.running_workload_num);
drivers/gpu/drm/i915/gvt/scheduler.c
912
return workload;
drivers/gpu/drm/i915/gvt/scheduler.c
92
static void sr_oa_regs(struct intel_vgpu_workload *workload,
drivers/gpu/drm/i915/gvt/scheduler.c
945
static void update_guest_context(struct intel_vgpu_workload *workload)
drivers/gpu/drm/i915/gvt/scheduler.c
947
struct i915_request *rq = workload->req;
drivers/gpu/drm/i915/gvt/scheduler.c
948
struct intel_vgpu *vgpu = workload->vgpu;
drivers/gpu/drm/i915/gvt/scheduler.c
95
struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
drivers/gpu/drm/i915/gvt/scheduler.c
950
struct intel_context *ctx = workload->req->context;
drivers/gpu/drm/i915/gvt/scheduler.c
962
workload->ctx_desc.lrca);
drivers/gpu/drm/i915/gvt/scheduler.c
966
head = workload->rb_head;
drivers/gpu/drm/i915/gvt/scheduler.c
967
tail = workload->rb_tail;
drivers/gpu/drm/i915/gvt/scheduler.c
968
wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
drivers/gpu/drm/i915/gvt/scheduler.c
998
(u32)((workload->ctx_desc.lrca + i) <<
drivers/gpu/drm/i915/gvt/scheduler.h
137
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
drivers/gpu/drm/i915/gvt/scheduler.h
164
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
drivers/gpu/drm/i915/gvt/trace.h
231
void *workload, const char *cmd_name),
drivers/gpu/drm/i915/gvt/trace.h
234
buf_addr_type, workload, cmd_name),
drivers/gpu/drm/i915/gvt/trace.h
243
__field(void*, workload)
drivers/gpu/drm/i915/gvt/trace.h
255
__entry->workload = workload;
drivers/gpu/drm/i915/gvt/trace.h
271
__entry->workload)
kernel/trace/trace_osnoise.c
1260
int workload = test_bit(OSN_WORKLOAD, &osnoise_options);
kernel/trace/trace_osnoise.c
1262
if ((p->pid != osn_var->pid) || !workload)
kernel/trace/trace_osnoise.c
1265
if ((n->pid != osn_var->pid) || !workload)
tools/perf/bench/find-bit-bench.c
82
workload(bit);
tools/perf/bench/find-bit-bench.c
97
workload(bit);
tools/perf/builtin-record.c
1874
thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
tools/perf/builtin-record.c
2669
rec->evlist->workload.pid,
tools/perf/builtin-record.c
2689
rec->evlist->workload.pid,
tools/perf/builtin-record.c
2905
kill(rec->evlist->workload.pid, SIGTERM);
tools/perf/builtin-stat.c
794
child_pid = evsel_list->workload.pid;
tools/perf/builtin-trace.c
4479
workload_pid = evlist->workload.pid;
tools/perf/tests/builtin-test.c
163
#define workloads__for_each(workload) \
tools/perf/tests/builtin-test.c
164
for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
tools/perf/tests/builtin-test.c
788
const char *workload = NULL;
tools/perf/tests/builtin-test.c
800
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
tools/perf/tests/builtin-test.c
828
if (workload)
tools/perf/tests/builtin-test.c
829
return run_workload(workload, argc, argv);
tools/perf/tests/event-times.c
53
waitpid(evlist->workload.pid, NULL, 0);
tools/perf/tests/perf-record.c
129
err = sched__get_first_possible_cpu(evlist->workload.pid, cpu_mask);
tools/perf/tests/perf-record.c
142
if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
tools/perf/tests/perf-record.c
231
if ((pid_t)sample.pid != evlist->workload.pid) {
tools/perf/tests/perf-record.c
233
name, evlist->workload.pid, sample.pid);
tools/perf/tests/perf-record.c
237
if ((pid_t)sample.tid != evlist->workload.pid) {
tools/perf/tests/perf-record.c
239
name, evlist->workload.pid, sample.tid);
tools/perf/tests/perf-record.c
248
(pid_t)event->comm.pid != evlist->workload.pid) {
tools/perf/util/bpf_counter.c
444
} else if (target->pid || evsel->evlist->workload.pid != -1) {
tools/perf/util/bpf_lock_contention.c
347
if (target__none(target) && evlist->workload.pid > 0) {
tools/perf/util/bpf_lock_contention.c
348
u32 pid = evlist->workload.pid;
tools/perf/util/evlist.c
1473
evlist->workload.cork_fd = -1;
tools/perf/util/evlist.c
1485
evlist->workload.pid = fork();
tools/perf/util/evlist.c
1486
if (evlist->workload.pid < 0) {
tools/perf/util/evlist.c
1491
if (!evlist->workload.pid) {
tools/perf/util/evlist.c
1562
perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
tools/perf/util/evlist.c
1576
evlist->workload.cork_fd = go_pipe[1];
tools/perf/util/evlist.c
1591
if (evlist->workload.cork_fd >= 0) {
tools/perf/util/evlist.c
1597
ret = write(evlist->workload.cork_fd, &bf, 1);
tools/perf/util/evlist.c
1601
close(evlist->workload.cork_fd);
tools/perf/util/evlist.c
1602
evlist->workload.cork_fd = -1;
tools/perf/util/evlist.c
1613
if (evlist->workload.cork_fd >= 0) {
tools/perf/util/evlist.c
1614
close(evlist->workload.cork_fd);
tools/perf/util/evlist.c
1615
evlist->workload.cork_fd = -1;
tools/perf/util/evlist.c
1616
waitpid(evlist->workload.pid, &status, WNOHANG);
tools/perf/util/evlist.c
81
evlist->workload.pid = -1;
tools/perf/util/evlist.h
71
} workload;
tools/perf/util/intel-tpebs.c
176
workload_pid = t->evsel->evlist->workload.pid;