arch/powerpc/include/asm/book3s/64/kup.h
197
#include <linux/sched.h>
arch/x86/events/core.c
1001
return sched.state.unassigned;
arch/x86/events/core.c
838
static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
arch/x86/events/core.c
843
memset(sched, 0, sizeof(*sched));
arch/x86/events/core.c
844
sched->max_events = num;
arch/x86/events/core.c
845
sched->max_weight = wmax;
arch/x86/events/core.c
846
sched->max_gp = gpmax;
arch/x86/events/core.c
847
sched->constraints = constraints;
arch/x86/events/core.c
854
sched->state.event = idx; /* start with min weight */
arch/x86/events/core.c
855
sched->state.weight = wmin;
arch/x86/events/core.c
856
sched->state.unassigned = num;
arch/x86/events/core.c
859
static void perf_sched_save_state(struct perf_sched *sched)
arch/x86/events/core.c
861
if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
arch/x86/events/core.c
864
sched->saved[sched->saved_states] = sched->state;
arch/x86/events/core.c
865
sched->saved_states++;
arch/x86/events/core.c
868
static bool perf_sched_restore_state(struct perf_sched *sched)
arch/x86/events/core.c
870
if (!sched->saved_states)
arch/x86/events/core.c
873
sched->saved_states--;
arch/x86/events/core.c
874
sched->state = sched->saved[sched->saved_states];
arch/x86/events/core.c
878
sched->state.used &= ~BIT_ULL(sched->state.counter);
arch/x86/events/core.c
881
sched->state.counter++;
arch/x86/events/core.c
890
static bool __perf_sched_find_counter(struct perf_sched *sched)
arch/x86/events/core.c
895
if (!sched->state.unassigned)
arch/x86/events/core.c
898
if (sched->state.event >= sched->max_events)
arch/x86/events/core.c
901
c = sched->constraints[sched->state.event];
arch/x86/events/core.c
908
if (sched->state.used & mask)
arch/x86/events/core.c
911
sched->state.used |= mask;
arch/x86/events/core.c
917
idx = sched->state.counter;
arch/x86/events/core.c
924
if (sched->state.used & mask)
arch/x86/events/core.c
927
if (sched->state.nr_gp++ >= sched->max_gp)
arch/x86/events/core.c
930
sched->state.used |= mask;
arch/x86/events/core.c
937
sched->state.counter = idx;
arch/x86/events/core.c
940
perf_sched_save_state(sched);
arch/x86/events/core.c
945
static bool perf_sched_find_counter(struct perf_sched *sched)
arch/x86/events/core.c
947
while (!__perf_sched_find_counter(sched)) {
arch/x86/events/core.c
948
if (!perf_sched_restore_state(sched))
arch/x86/events/core.c
959
static bool perf_sched_next_event(struct perf_sched *sched)
arch/x86/events/core.c
963
if (!sched->state.unassigned || !--sched->state.unassigned)
arch/x86/events/core.c
968
sched->state.event++;
arch/x86/events/core.c
969
if (sched->state.event >= sched->max_events) {
arch/x86/events/core.c
971
sched->state.event = 0;
arch/x86/events/core.c
972
sched->state.weight++;
arch/x86/events/core.c
973
if (sched->state.weight > sched->max_weight)
arch/x86/events/core.c
976
c = sched->constraints[sched->state.event];
arch/x86/events/core.c
977
} while (c->weight != sched->state.weight);
arch/x86/events/core.c
979
sched->state.counter = 0; /* start with first counter */
arch/x86/events/core.c
990
struct perf_sched sched;
arch/x86/events/core.c
992
perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
arch/x86/events/core.c
995
if (!perf_sched_find_counter(&sched))
arch/x86/events/core.c
998
assign[sched.state.event] = sched.state.counter;
arch/x86/events/core.c
999
} while (perf_sched_next_event(&sched));
arch/xtensa/include/asm/bitops.h
216
#include <asm-generic/bitops/sched.h>
crypto/fcrypt.c
223
#define F_ENCRYPT(R, L, sched) \
crypto/fcrypt.c
226
u.l = sched ^ R; \
crypto/fcrypt.c
242
F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
crypto/fcrypt.c
243
F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
crypto/fcrypt.c
244
F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
crypto/fcrypt.c
245
F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
crypto/fcrypt.c
246
F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
crypto/fcrypt.c
247
F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
crypto/fcrypt.c
248
F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
crypto/fcrypt.c
249
F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
crypto/fcrypt.c
250
F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
crypto/fcrypt.c
251
F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
crypto/fcrypt.c
252
F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
crypto/fcrypt.c
253
F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
crypto/fcrypt.c
254
F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
crypto/fcrypt.c
255
F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
crypto/fcrypt.c
256
F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
crypto/fcrypt.c
257
F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
crypto/fcrypt.c
274
F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
crypto/fcrypt.c
275
F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
crypto/fcrypt.c
276
F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
crypto/fcrypt.c
277
F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
crypto/fcrypt.c
278
F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
crypto/fcrypt.c
279
F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
crypto/fcrypt.c
280
F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
crypto/fcrypt.c
281
F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
crypto/fcrypt.c
282
F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
crypto/fcrypt.c
283
F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
crypto/fcrypt.c
284
F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
crypto/fcrypt.c
285
F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
crypto/fcrypt.c
286
F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
crypto/fcrypt.c
287
F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
crypto/fcrypt.c
288
F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
crypto/fcrypt.c
289
F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
crypto/fcrypt.c
328
ctx->sched[0x0] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
329
ctx->sched[0x1] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
330
ctx->sched[0x2] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
331
ctx->sched[0x3] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
332
ctx->sched[0x4] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
333
ctx->sched[0x5] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
334
ctx->sched[0x6] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
335
ctx->sched[0x7] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
336
ctx->sched[0x8] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
337
ctx->sched[0x9] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
338
ctx->sched[0xa] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
339
ctx->sched[0xb] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
340
ctx->sched[0xc] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
341
ctx->sched[0xd] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
342
ctx->sched[0xe] = cpu_to_be32(k); ror56_64(k, 11);
crypto/fcrypt.c
343
ctx->sched[0xf] = cpu_to_be32(k);
crypto/fcrypt.c
369
ctx->sched[0x0] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
370
ctx->sched[0x1] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
371
ctx->sched[0x2] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
372
ctx->sched[0x3] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
373
ctx->sched[0x4] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
374
ctx->sched[0x5] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
375
ctx->sched[0x6] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
376
ctx->sched[0x7] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
377
ctx->sched[0x8] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
378
ctx->sched[0x9] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
379
ctx->sched[0xa] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
380
ctx->sched[0xb] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
381
ctx->sched[0xc] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
382
ctx->sched[0xd] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
383
ctx->sched[0xe] = cpu_to_be32(lo); ror56(hi, lo, 11);
crypto/fcrypt.c
384
ctx->sched[0xf] = cpu_to_be32(lo);
crypto/fcrypt.c
54
__be32 sched[ROUNDS];
drivers/accel/amdxdna/aie2_ctx.c
54
drm_sched_stop(&hwctx->priv->sched, bad_job);
drivers/accel/amdxdna/aie2_ctx.c
544
struct drm_gpu_scheduler *sched;
drivers/accel/amdxdna/aie2_ctx.c
56
drm_sched_start(&hwctx->priv->sched, 0);
drivers/accel/amdxdna/aie2_ctx.c
593
sched = &priv->sched;
drivers/accel/amdxdna/aie2_ctx.c
600
ret = drm_sched_init(sched, &args);
drivers/accel/amdxdna/aie2_ctx.c
607
&sched, 1, NULL);
drivers/accel/amdxdna/aie2_ctx.c
658
drm_sched_fini(&priv->sched);
drivers/accel/amdxdna/aie2_ctx.c
684
drm_sched_stop(&hwctx->priv->sched, NULL);
drivers/accel/amdxdna/aie2_ctx.c
686
drm_sched_start(&hwctx->priv->sched, 0);
drivers/accel/amdxdna/aie2_ctx.c
697
drm_sched_fini(&hwctx->priv->sched);
drivers/accel/amdxdna/aie2_pci.h
140
struct drm_gpu_scheduler sched;
drivers/accel/ethosu/ethosu_device.h
182
struct drm_gpu_scheduler sched;
drivers/accel/ethosu/ethosu_job.c
211
drm_sched_fault(&dev->sched);
drivers/accel/ethosu/ethosu_job.c
273
drm_sched_stop(&dev->sched, bad);
drivers/accel/ethosu/ethosu_job.c
283
drm_sched_start(&dev->sched, 0);
drivers/accel/ethosu/ethosu_job.c
331
ret = drm_sched_init(&edev->sched, &args);
drivers/accel/ethosu/ethosu_job.c
340
drm_sched_fini(&edev->sched);
drivers/accel/ethosu/ethosu_job.c
346
drm_sched_fini(&dev->sched);
drivers/accel/ethosu/ethosu_job.c
352
struct drm_gpu_scheduler *sched = &dev->sched;
drivers/accel/ethosu/ethosu_job.c
357
&sched, 1, NULL);
drivers/accel/rocket/rocket_core.h
55
struct drm_gpu_scheduler sched;
drivers/accel/rocket/rocket_job.c
275
struct drm_gpu_scheduler *sched)
drivers/accel/rocket/rocket_job.c
280
if (&rdev->cores[core].sched == sched)
drivers/accel/rocket/rocket_job.c
291
struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
drivers/accel/rocket/rocket_job.c
356
drm_sched_stop(&core->sched, bad);
drivers/accel/rocket/rocket_job.c
379
drm_sched_start(&core->sched, 0);
drivers/accel/rocket/rocket_job.c
386
struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
drivers/accel/rocket/rocket_job.c
473
ret = drm_sched_init(&core->sched, &args);
drivers/accel/rocket/rocket_job.c
482
drm_sched_fini(&core->sched);
drivers/accel/rocket/rocket_job.c
490
drm_sched_fini(&core->sched);
drivers/accel/rocket/rocket_job.c
505
scheds[core] = &rdev->cores[core].sched;
drivers/accel/rocket/rocket_job.c
528
if (atomic_read(&core->sched.credit_count))
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
829
if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
297
drm_sched_stop(&ring->sched, NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
302
drm_sched_start(&ring->sched, 0);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1111
struct drm_gpu_scheduler *sched = entity->rq->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1112
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1208
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1242
sched = p->gang_leader->base.entity->rq->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1252
if (!s_fence || s_fence->sched != sched) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
207
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
229
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
246
sched = drm_sched_pick_best(scheds, num_scheds);
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
247
scheds = &sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
833
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1663
drm_sched_wqueue_stop(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1679
drm_sched_wqueue_start(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1835
static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1840
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1841
list_for_each_entry(s_job, &sched->pending_list, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1842
fence = sched->ops->run_job(s_job);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1845
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1855
struct drm_gpu_scheduler *sched = &ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1872
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1873
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1877
sched->ops->free_job(s_job);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1885
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1919
drm_sched_wqueue_stop(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1942
amdgpu_ib_preempt_job_recovery(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1953
drm_sched_wqueue_start(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
354
coredump->ring = to_amdgpu_ring(s_job->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2956
r = drm_sched_init(&ring->sched, &args);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3162
adev->mman.buffer_funcs_ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
4062
if (adev->mman.buffer_funcs_ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
5219
if (adev->mman.buffer_funcs_ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
5954
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
6333
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
6336
amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
6417
drm_sched_start(&ring->sched, 0);
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2846
if (ring && ring->sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
616
if (ring->sched.ops)
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
617
drm_sched_fini(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
1075
if (adev->mes.ring[0].sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
1151
if (adev->mes.ring[0].sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
1249
if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
1568
struct drm_gpu_scheduler *sched = &ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
1579
&sched, 1, NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
1638
if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2479
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2481
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2499
if (ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2549
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2551
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2570
if (ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
526
if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
578
if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
718
!ring->sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
787
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
873
if (adev->mes.ring[MES_PIPE_INST(xcc_inst, 0)].sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
177
if (!ring->sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
442
if (!ring->sched.ready || !ring->funcs->test_ib)
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
467
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
100
__func__, s_job->sched->name);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
119
s_job->sched->name);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
124
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
138
s_job->sched->name);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
140
drm_sched_wqueue_stop(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
144
drm_sched_wqueue_start(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
147
ring->sched.name);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
154
dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
179
drm_sched_suspend_timeout(&ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
378
job->base.sched = &ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
392
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
429
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
483
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
490
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
491
struct drm_sched_rq *rq = sched->sched_rq[i];
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
506
list_for_each_entry(s_job, &sched->pending_list, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
90
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
110
return to_amdgpu_ring(job->base.entity->rq->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
132
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
375
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
377
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
397
if (ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
451
if (adev->gfx.gfx_ring[i].sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
466
if (adev->gfx.compute_ring[i].sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
481
if (adev->sdma.instance[i].ring.sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
499
if (adev->uvd.inst[i].ring.sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
509
if (adev->vce.ring[i].sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
522
if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
535
if (adev->vcn.inst[i].ring_dec.sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
549
if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
565
if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
574
if (adev->vpe.ring.sched.ready &&
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
326
rst_ctxt->job->base.sched->name);
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
401
adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
402
&ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
422
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
771
ring->sched.ready = !r;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
862
if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
112
struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
309
struct drm_gpu_scheduler sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
69
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
381
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
383
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
387
page->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
389
page->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
420
if (ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
426
if (page->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
571
drm_sched_wqueue_stop(&gfx_ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
574
drm_sched_wqueue_stop(&page_ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
604
drm_sched_wqueue_start(&gfx_ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
607
drm_sched_wqueue_start(&page_ring->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
335
if (s_fence->sched == &ring->sched) {
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
73
ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
156
__entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
159
to_amdgpu_ring(job->base.entity->rq->sched));
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
174
__string(ring, to_amdgpu_ring(job->base.sched)->name)
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
197
__string(ring, to_amdgpu_ring(job->base.sched)->name)
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
549
__string(ring, sched_job->base.sched->name)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2304
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2307
sched = &ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2309
DRM_SCHED_PRIORITY_KERNEL, &sched,
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2319
DRM_SCHED_PRIORITY_NORMAL, &sched,
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2329
DRM_SCHED_PRIORITY_NORMAL, &sched,
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2408
if (!ring->sched.ready) {
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
408
struct drm_gpu_scheduler *sched = &ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
412
&sched, 1, NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
281
struct drm_gpu_scheduler *sched = &ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
285
&sched, 1, NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1413
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1415
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1433
if (ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1501
drm_sched_wqueue_stop(&vinst->ring_dec.sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1503
drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1525
drm_sched_wqueue_start(&vinst->ring_dec.sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1527
drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
1208
adev->mes.ring[0].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
113
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
114
sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
468
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
474
sched = entity->entity.rq->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
475
if (drm_sched_wqueue_ready(sched)) {
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
476
ring = to_amdgpu_ring(entity->entity.rq->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
511
xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
579
.sched[(*num_gpu_sched)++] = &ring->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
600
if (!ring || !ring->sched.ready || ring->no_scheduler)
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1180
drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1355
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
6641
adev->gfx.kiq[0].ring.sched.ready = false;
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
9358
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
9367
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
422
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
6640
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
6649
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
2805
adev->gfx.kiq[0].ring.sched.ready = enable;
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
3431
adev->gfx.kiq[0].ring.sched.ready = true;
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
357
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
5006
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
5015
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
157
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
1917
adev->gfx.kiq[xcc_id].ring.sched.ready = enable;
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
2441
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
3728
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
3739
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
3358
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
4815
drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
4822
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1489
if (!ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
4286
adev->gfx.kiq[0].ring.sched.ready = false;
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
6620
drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
6628
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
3476
adev->gfx.kiq[0].ring.sched.ready = false;
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
4647
if (!ring->sched.ready)
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
6269
drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
6277
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
501
if (!adev->gfx.compute_ring[0].sched.ready ||
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
502
!adev->gfx.compute_ring[1].sched.ready)
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
645
if (!adev->gfx.compute_ring[0].sched.ready)
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
1747
adev->gfx.kiq[xcc_id].ring.sched.ready = false;
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
3346
drm_sched_fault(&ring->sched);
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
258
if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
246
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
317
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
352
if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
334
if (((adev->gfx.kiq[inst].ring.sched.ready ||
drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
335
adev->mes.ring[MES_PIPE_INST(inst, 0)].sched.ready) &&
drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
372
if (adev->enable_uni_mes && adev->mes.ring[0].sched.ready &&
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
835
if (adev->gfx.kiq[inst].ring.sched.ready &&
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
196
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
387
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
266
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1627
if (adev->mes.ring[0].sched.ready) {
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1629
adev->mes.ring[0].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1647
if (adev->mes.ring[0].sched.ready)
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1695
adev->gfx.kiq[0].ring.sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
1696
adev->mes.ring[0].sched.ready = true;
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1461
kiq_ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1725
adev->mes.ring[0].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1804
if (adev->mes.ring[0].sched.ready) {
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1812
adev->mes.ring[0].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1825
if (adev->mes.ring[0].sched.ready)
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1880
adev->gfx.kiq[0].ring.sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
1881
adev->mes.ring[0].sched.ready = true;
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1387
kiq_ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1680
adev->mes.ring[MES_PIPE_INST(xcc_id, 0)].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1768
if (adev->mes.ring[inst].sched.ready) {
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1776
adev->mes.ring[inst].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1828
if (adev->mes.ring[MES_PIPE_INST(xcc_id, 0)].sched.ready)
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1885
adev->gfx.kiq[xcc_id].ring.sched.ready = false;
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
1886
adev->mes.ring[MES_PIPE_INST(xcc_id, 0)].sched.ready = true;
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1081
drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1250
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1419
drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1692
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
2143
drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
2628
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
2634
sched = &adev->sdma.instance[i].page.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
2636
sched = &adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
2637
adev->vm_manager.vm_pte_scheds[i] = sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
1839
drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
2329
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
2335
sched = &adev->sdma.instance[i].page.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
2337
sched = &adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
2338
adev->vm_manager.vm_pte_scheds[i] = sched;
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
2070
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
2074
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
1910
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
1860
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
611
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
620
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
1770
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
593
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
602
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/si_dma.c
848
&adev->sdma.instance[i].ring.sched;
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
300
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
472
adev->vcn.inst[j].ring_enc[0].sched.ready = true;
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
473
adev->vcn.inst[j].ring_enc[1].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
474
adev->vcn.inst[j].ring_enc[2].sched.ready = false;
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
475
adev->vcn.inst[j].ring_dec.sched.ready = true;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1901
[AMDGPU_RING_PRIO_DEFAULT].sched;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
383
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
390
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
396
ring->sched.ready = false;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
403
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
1818
[AMDGPU_RING_PRIO_0].sched;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
337
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
1649
drm_sched_wqueue_stop(&ring->sched);
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
1686
drm_sched_wqueue_start(&ring->sched);
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
341
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
1313
drm_sched_wqueue_stop(&ring->sched);
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
1349
drm_sched_wqueue_start(&ring->sched);
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
313
ring->sched.ready = true;
drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
304
vpe->ring.sched.ready = false;
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
582
if (ring && ring->sched.ready)
drivers/gpu/drm/etnaviv/etnaviv_drv.c
87
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
90
sched = &gpu->sched;
drivers/gpu/drm/etnaviv/etnaviv_drv.c
92
DRM_SCHED_PRIORITY_NORMAL, &sched,
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1598
drm_sched_fault(&gpu->sched);
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1990
if (atomic_read(&gpu->sched.credit_count))
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
118
struct drm_gpu_scheduler sched;
drivers/gpu/drm/etnaviv/etnaviv_sched.c
153
return drm_sched_init(&gpu->sched, &args);
drivers/gpu/drm/etnaviv/etnaviv_sched.c
158
drm_sched_fini(&gpu->sched);
drivers/gpu/drm/etnaviv/etnaviv_sched.c
77
drm_sched_stop(&gpu->sched, sched_job);
drivers/gpu/drm/etnaviv/etnaviv_sched.c
86
drm_sched_resubmit_jobs(&gpu->sched);
drivers/gpu/drm/etnaviv/etnaviv_sched.c
88
drm_sched_start(&gpu->sched, 0);
drivers/gpu/drm/i915/gem/i915_gem_context.c
1618
ctx->sched = pc->sched;
drivers/gpu/drm/i915/gem/i915_gem_context.c
2081
ctx->sched.priority = args->value;
drivers/gpu/drm/i915/gem/i915_gem_context.c
2087
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
drivers/gpu/drm/i915/gem/i915_gem_context.c
2566
args->value = ctx->sched.priority;
drivers/gpu/drm/i915/gem/i915_gem_context.c
299
pc->sched.priority = I915_PRIORITY_NORMAL;
drivers/gpu/drm/i915/gem/i915_gem_context.c
928
pc->sched.priority = args->value;
drivers/gpu/drm/i915/gem/i915_gem_context.c
987
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
200
struct i915_sched_attr sched;
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
375
struct i915_sched_attr sched;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3086
attr = eb->gem_context->sched;
drivers/gpu/drm/i915/gt/intel_context.c
563
sched.link) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2324
list_for_each_entry(rq, requests, sched.link) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2503
sched.link) {
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
115
rq->sched.attr.priority);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
188
rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
196
if (rq->sched.attr.priority >= attr.priority)
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
198
if (rq->sched.attr.priority >= attr.priority)
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
295
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
44
if (rq && rq->sched.attr.priority >= I915_PRIORITY_BARRIER &&
drivers/gpu/drm/i915/gt/intel_engine_pm.c
222
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/intel_engine_user.c
100
u8 sched;
drivers/gpu/drm/i915/gt/intel_engine_user.c
128
enabled |= BIT(map[i].sched);
drivers/gpu/drm/i915/gt/intel_engine_user.c
130
disabled |= BIT(map[i].sched);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1087
list_move_tail(&rq->sched.link, pl);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1091
container_of(p->waiter, typeof(*w), sched);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1113
list_move_tail(&w->sched.link, &list);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1116
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1166
if (!list_is_last_rcu(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1324
last->sched.attr.priority,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2064
list_move_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2071
container_of(p->waiter, typeof(*w), sched);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2089
list_move_tail(&w->sched.link, &list);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2092
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2137
container_of(p->signaler, typeof(*s), sched);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2164
list_move_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2172
container_of(p->waiter, typeof(*w), sched);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2187
list_move_tail(&w->sched.link, &list);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2190
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2550
GEM_BUG_ON(!list_empty(&rq->sched.link));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2551
list_add_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2586
list_add_tail(&request->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2593
GEM_BUG_ON(list_empty(&request->sched.link));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
264
return READ_ONCE(rq->sched.attr.priority);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3174
list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3194
list_for_each_entry(rq, &sched_engine->hold, sched.link)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3280
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3300
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3350
inflight->sched.attr.priority);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
337
if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
338
rq_prio(list_next_entry(rq, sched.link)) > last_prio)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
383
sched.link) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
385
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3920
list_move_tail(&rq->sched.link, virtual_queue(ve));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
399
list_move(&rq->sched.link, pl);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4092
list_for_each_entry(rq, &sched_engine->requests, sched.link) {
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1139
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1145
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
397
list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
drivers/gpu/drm/i915/gt/intel_ring_submission.c
467
list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
654
sched.link)
drivers/gpu/drm/i915/gt/mock_engine.c
259
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
drivers/gpu/drm/i915/gt/mock_engine.c
280
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/mock_engine.c
305
list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
drivers/gpu/drm/i915/gt/selftest_context.c
28
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1484
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1548
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1556
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1747
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1752
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1851
ctx_lo->sched.priority = 1;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1973
b.ctx->sched.priority = I915_PRIORITY_MAX;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2843
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2885
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3389
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3394
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3511
ctx->sched.priority = prio;
drivers/gpu/drm/i915/gt/selftest_execlists.c
393
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
440
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
986
h.ctx->sched.priority = 1024;
drivers/gpu/drm/i915/gt/selftest_lrc.c
104
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1257
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_lrc.c
558
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1835
sched.link) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1839
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1850
list_add(&rq->sched.link, pl);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1968
list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2006
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2205
GEM_BUG_ON(!list_empty(&rq->sched.link));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2206
list_add_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3731
list_move_tail(&rq->sched.link, &ce->guc_state.requests);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3766
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3866
prio = ctx->sched.priority;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5422
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
799
return rq->sched.attr.priority;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
961
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/i915_gpu_error.c
1422
erq->sched_attr = request->sched.attr;
drivers/gpu/drm/i915/i915_gpu_error.c
1473
e->sched_attr = ctx->sched;
drivers/gpu/drm/i915/i915_request.c
1019
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
drivers/gpu/drm/i915/i915_request.c
1020
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
drivers/gpu/drm/i915/i915_request.c
1141
return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
drivers/gpu/drm/i915/i915_request.c
1224
if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
drivers/gpu/drm/i915/i915_request.c
1241
to->sched.semaphores |= mask;
drivers/gpu/drm/i915/i915_request.c
1325
err = i915_sched_node_add_dependency(&to->sched,
drivers/gpu/drm/i915/i915_request.c
1326
&from->sched,
drivers/gpu/drm/i915/i915_request.c
1346
rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
drivers/gpu/drm/i915/i915_request.c
1482
ret = i915_sched_node_add_dependency(&to->sched,
drivers/gpu/drm/i915/i915_request.c
1483
&from->sched,
drivers/gpu/drm/i915/i915_request.c
1651
__i915_sched_node_add_dependency(&rq->sched,
drivers/gpu/drm/i915/i915_request.c
1652
&prev->sched,
drivers/gpu/drm/i915/i915_request.c
1706
__i915_sched_node_add_dependency(&rq->sched,
drivers/gpu/drm/i915/i915_request.c
1707
&prev->sched,
drivers/gpu/drm/i915/i915_request.c
1861
attr = ctx->sched;
drivers/gpu/drm/i915/i915_request.c
2222
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
drivers/gpu/drm/i915/i915_request.c
414
i915_sched_node_fini(&rq->sched);
drivers/gpu/drm/i915/i915_request.c
633
list_del_init(&request->sched.link);
drivers/gpu/drm/i915/i915_request.c
659
if (request->sched.semaphores &&
drivers/gpu/drm/i915/i915_request.c
661
engine->saturated |= request->sched.semaphores;
drivers/gpu/drm/i915/i915_request.c
738
if (request->sched.semaphores && __i915_request_has_started(request))
drivers/gpu/drm/i915/i915_request.c
739
request->sched.semaphores = 0;
drivers/gpu/drm/i915/i915_request.c
879
i915_sched_node_init(&rq->sched);
drivers/gpu/drm/i915/i915_request.c
972
i915_sched_node_reinit(&rq->sched);
drivers/gpu/drm/i915/i915_request.h
270
struct i915_sched_node sched;
drivers/gpu/drm/i915/i915_request.h
604
return !list_empty(&rq->sched.link);
drivers/gpu/drm/i915/i915_scheduler.c
21
return container_of(node, const struct i915_request, sched);
drivers/gpu/drm/i915/i915_scheduler.c
242
sched);
drivers/gpu/drm/i915/i915_scheduler.c
292
__i915_schedule(&rq->sched, attr);
drivers/gpu/drm/i915/i915_scheduler.h
20
list_for_each_entry(it, &(plist)->requests, sched.link)
drivers/gpu/drm/i915/i915_scheduler.h
23
list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
drivers/gpu/drm/i915/i915_scheduler_types.h
86
&(rq__)->sched.waiters_list, \
drivers/gpu/drm/i915/i915_scheduler_types.h
91
&(rq__)->sched.signalers_list, \
drivers/gpu/drm/i915/i915_trace.h
361
__entry->prio = rq->sched.attr.priority;
drivers/gpu/drm/i915/selftests/i915_request.c
2522
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/imagination/pvr_queue.c
1166
struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler);
drivers/gpu/drm/imagination/pvr_queue.c
1239
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/imagination/pvr_queue.c
1277
sched = &queue->scheduler;
drivers/gpu/drm/imagination/pvr_queue.c
1317
&sched, 1, &ctx->faulty);
drivers/gpu/drm/imagination/pvr_queue.c
493
return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity);
drivers/gpu/drm/imagination/pvr_queue.c
613
struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler);
drivers/gpu/drm/imagination/pvr_queue.c
755
struct pvr_queue *queue = container_of(job->base.sched,
drivers/gpu/drm/imagination/pvr_queue.c
811
struct drm_gpu_scheduler *sched = s_job->sched;
drivers/gpu/drm/imagination/pvr_queue.c
812
struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler);
drivers/gpu/drm/imagination/pvr_queue.c
817
dev_err(sched->dev, "Job timeout\n");
drivers/gpu/drm/imagination/pvr_queue.c
833
drm_sched_stop(sched, s_job);
drivers/gpu/drm/imagination/pvr_queue.c
836
list_for_each_entry(job, &sched->pending_list, base.list) {
drivers/gpu/drm/imagination/pvr_queue.c
855
drm_sched_start(sched, 0);
drivers/gpu/drm/imagination/pvr_queue.c
899
sched_fence->sched->ops == &pvr_queue_sched_ops)
drivers/gpu/drm/lima/lima_sched.c
110
static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/lima/lima_sched.c
112
return container_of(sched, struct lima_sched_pipe, base);
drivers/gpu/drm/lima/lima_sched.c
163
struct drm_gpu_scheduler *sched = &pipe->base;
drivers/gpu/drm/lima/lima_sched.c
166
&sched, 1, NULL);
drivers/gpu/drm/lima/lima_sched.c
209
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
drivers/gpu/drm/lima/lima_sched.c
276
struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
drivers/gpu/drm/lima/lima_sched.c
405
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
drivers/gpu/drm/lima/lima_sched.c
478
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
drivers/gpu/drm/lima/lima_trace.h
19
__string(pipe, task->base.sched->name)
drivers/gpu/drm/msm/adreno/adreno_device.c
344
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
drivers/gpu/drm/msm/adreno/adreno_device.c
346
drm_sched_wqueue_stop(sched);
drivers/gpu/drm/msm/adreno/adreno_device.c
355
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
drivers/gpu/drm/msm/adreno/adreno_device.c
357
drm_sched_wqueue_start(sched);
drivers/gpu/drm/msm/msm_gem.h
76
struct drm_gpu_scheduler sched;
drivers/gpu/drm/msm/msm_gem_vma.c
842
ret = drm_sched_init(&vm->sched, &args);
drivers/gpu/drm/msm/msm_gem_vma.c
912
drm_sched_stop(&vm->sched, NULL);
drivers/gpu/drm/msm/msm_gem_vma.c
913
drm_sched_fini(&vm->sched);
drivers/gpu/drm/msm/msm_ringbuffer.c
111
ret = drm_sched_init(&ring->sched, &args);
drivers/gpu/drm/msm/msm_ringbuffer.c
136
drm_sched_fini(&ring->sched);
drivers/gpu/drm/msm/msm_ringbuffer.h
56
struct drm_gpu_scheduler sched;
drivers/gpu/drm/msm/msm_submitqueue.c
151
struct drm_gpu_scheduler *sched = &ring->sched;
drivers/gpu/drm/msm/msm_submitqueue.c
156
ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
drivers/gpu/drm/msm/msm_submitqueue.c
220
struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched;
drivers/gpu/drm/msm/msm_submitqueue.c
225
&sched, 1, NULL);
drivers/gpu/drm/nouveau/nouveau_abi16.c
178
if (chan->sched)
drivers/gpu/drm/nouveau/nouveau_abi16.c
179
drm_sched_entity_fini(&chan->sched->entity);
drivers/gpu/drm/nouveau/nouveau_abi16.c
184
if (chan->sched)
drivers/gpu/drm/nouveau/nouveau_abi16.c
185
nouveau_sched_destroy(&chan->sched);
drivers/gpu/drm/nouveau/nouveau_abi16.c
418
ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
drivers/gpu/drm/nouveau/nouveau_abi16.h
29
struct nouveau_sched *sched;
drivers/gpu/drm/nouveau/nouveau_drm.c
219
if (cli->sched)
drivers/gpu/drm/nouveau/nouveau_drm.c
220
nouveau_sched_destroy(&cli->sched);
drivers/gpu/drm/nouveau/nouveau_drm.c
316
ret = nouveau_sched_create(&cli->sched, drm, NULL, 1);
drivers/gpu/drm/nouveau/nouveau_drv.h
105
struct nouveau_sched *sched;
drivers/gpu/drm/nouveau/nouveau_exec.c
241
args.sched = __args->sched;
drivers/gpu/drm/nouveau/nouveau_exec.c
396
args.sched = chan16->sched;
drivers/gpu/drm/nouveau/nouveau_exec.h
11
struct nouveau_sched *sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
120
struct nouveau_sched *sched = job->sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
122
spin_lock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_sched.c
124
spin_unlock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_sched.c
126
wake_up(&sched->job.wq);
drivers/gpu/drm/nouveau/nouveau_sched.c
275
struct nouveau_sched *sched = job->sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
29
struct nouveau_sched *sched = args->sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
295
mutex_lock(&sched->mutex);
drivers/gpu/drm/nouveau/nouveau_sched.c
307
spin_lock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_sched.c
308
list_add(&job->entry, &sched->job.list.head);
drivers/gpu/drm/nouveau/nouveau_sched.c
309
spin_unlock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_sched.c
328
mutex_unlock(&sched->mutex);
drivers/gpu/drm/nouveau/nouveau_sched.c
338
mutex_unlock(&sched->mutex);
drivers/gpu/drm/nouveau/nouveau_sched.c
36
job->sched = sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
370
struct drm_gpu_scheduler *sched = sched_job->sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
374
drm_sched_stop(sched, sched_job);
drivers/gpu/drm/nouveau/nouveau_sched.c
381
drm_sched_start(sched, 0);
drivers/gpu/drm/nouveau/nouveau_sched.c
401
nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
drivers/gpu/drm/nouveau/nouveau_sched.c
404
struct drm_gpu_scheduler *drm_sched = &sched->base;
drivers/gpu/drm/nouveau/nouveau_sched.c
405
struct drm_sched_entity *entity = &sched->entity;
drivers/gpu/drm/nouveau/nouveau_sched.c
422
sched->wq = wq;
drivers/gpu/drm/nouveau/nouveau_sched.c
446
mutex_init(&sched->mutex);
drivers/gpu/drm/nouveau/nouveau_sched.c
447
spin_lock_init(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_sched.c
448
INIT_LIST_HEAD(&sched->job.list.head);
drivers/gpu/drm/nouveau/nouveau_sched.c
449
init_waitqueue_head(&sched->job.wq);
drivers/gpu/drm/nouveau/nouveau_sched.c
456
if (sched->wq)
drivers/gpu/drm/nouveau/nouveau_sched.c
457
destroy_workqueue(sched->wq);
drivers/gpu/drm/nouveau/nouveau_sched.c
465
struct nouveau_sched *sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
468
sched = kzalloc_obj(*sched);
drivers/gpu/drm/nouveau/nouveau_sched.c
469
if (!sched)
drivers/gpu/drm/nouveau/nouveau_sched.c
472
ret = nouveau_sched_init(sched, drm, wq, credit_limit);
drivers/gpu/drm/nouveau/nouveau_sched.c
474
kfree(sched);
drivers/gpu/drm/nouveau/nouveau_sched.c
478
*psched = sched;
drivers/gpu/drm/nouveau/nouveau_sched.c
484
nouveau_sched_job_list_empty(struct nouveau_sched *sched)
drivers/gpu/drm/nouveau/nouveau_sched.c
488
spin_lock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_sched.c
489
empty = list_empty(&sched->job.list.head);
drivers/gpu/drm/nouveau/nouveau_sched.c
490
spin_unlock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_sched.c
496
nouveau_sched_fini(struct nouveau_sched *sched)
drivers/gpu/drm/nouveau/nouveau_sched.c
498
struct drm_gpu_scheduler *drm_sched = &sched->base;
drivers/gpu/drm/nouveau/nouveau_sched.c
499
struct drm_sched_entity *entity = &sched->entity;
drivers/gpu/drm/nouveau/nouveau_sched.c
501
wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));
drivers/gpu/drm/nouveau/nouveau_sched.c
509
if (sched->wq)
drivers/gpu/drm/nouveau/nouveau_sched.c
510
destroy_workqueue(sched->wq);
drivers/gpu/drm/nouveau/nouveau_sched.c
516
struct nouveau_sched *sched = *psched;
drivers/gpu/drm/nouveau/nouveau_sched.c
518
nouveau_sched_fini(sched);
drivers/gpu/drm/nouveau/nouveau_sched.c
519
kfree(sched);
drivers/gpu/drm/nouveau/nouveau_sched.c
87
ret = drm_sched_job_init(&job->base, &sched->entity,
drivers/gpu/drm/nouveau/nouveau_sched.h
29
struct nouveau_sched *sched;
drivers/gpu/drm/nouveau/nouveau_sched.h
53
struct nouveau_sched *sched;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1084
struct nouveau_sched *sched = job->sched;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1090
spin_lock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1091
list_for_each_entry(__job, &sched->job.list.head, entry) {
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1101
spin_unlock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1109
spin_unlock(&sched->job.list.lock);
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1686
args.sched = __args->sched;
drivers/gpu/drm/nouveau/nouveau_uvmm.c
1814
args.sched = cli->sched;
drivers/gpu/drm/nouveau/nouveau_uvmm.h
56
struct nouveau_sched *sched;
drivers/gpu/drm/panfrost/panfrost_job.c
1067
struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
drivers/gpu/drm/panfrost/panfrost_job.c
1070
&sched, 1, NULL);
drivers/gpu/drm/panfrost/panfrost_job.c
36
struct drm_gpu_scheduler sched;
drivers/gpu/drm/panfrost/panfrost_job.c
502
drm_sched_fault(&pfdev->js->queue[js].sched);
drivers/gpu/drm/panfrost/panfrost_job.c
682
drm_sched_stop(&pfdev->js->queue[i].sched, bad);
drivers/gpu/drm/panfrost/panfrost_job.c
747
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
drivers/gpu/drm/panfrost/panfrost_job.c
752
drm_sched_start(&pfdev->js->queue[i].sched, 0);
drivers/gpu/drm/panfrost/panfrost_job.c
901
ret = drm_sched_init(&js->queue[j].sched, &args);
drivers/gpu/drm/panfrost/panfrost_job.c
915
drm_sched_fini(&js->queue[j].sched);
drivers/gpu/drm/panfrost/panfrost_job.c
929
drm_sched_fini(&js->queue[j].sched);
drivers/gpu/drm/panfrost/panfrost_job.c
977
if (atomic_read(&js->queue[i].sched.credit_count))
drivers/gpu/drm/panthor/panthor_mmu.c
1894
drm_sched_fini(&vm->sched);
drivers/gpu/drm/panthor/panthor_mmu.c
239
struct drm_gpu_scheduler sched;
drivers/gpu/drm/panthor/panthor_mmu.c
2411
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/panthor/panthor_mmu.c
2479
ret = drm_sched_init(&vm->sched, &sched_args);
drivers/gpu/drm/panthor/panthor_mmu.c
2483
sched = &vm->sched;
drivers/gpu/drm/panthor/panthor_mmu.c
2484
ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
drivers/gpu/drm/panthor/panthor_mmu.c
2510
drm_sched_fini(&vm->sched);
drivers/gpu/drm/panthor/panthor_mmu.c
814
drm_sched_stop(&vm->sched, NULL);
drivers/gpu/drm/panthor/panthor_mmu.c
819
drm_sched_start(&vm->sched, 0);
drivers/gpu/drm/panthor/panthor_sched.c
1480
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
1481
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
1487
lockdep_assert_held(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1505
cancel_delayed_work(&sched->tick_work);
drivers/gpu/drm/panthor/panthor_sched.c
1507
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
1526
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
1527
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
1535
lockdep_assert_held(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1578
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
1585
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1599
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1626
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
1630
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1645
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1681
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
1682
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
1685
lockdep_assert_held(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1696
if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
drivers/gpu/drm/panthor/panthor_sched.c
1732
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
1734
lockdep_assert_held(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1736
sched->might_have_idle_groups = true;
drivers/gpu/drm/panthor/panthor_sched.c
1742
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
1762
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
1763
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
1766
lockdep_assert_held(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1778
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
1873
struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
drivers/gpu/drm/panthor/panthor_sched.c
1875
u32 events = atomic_xchg(&sched->fw_events, 0);
drivers/gpu/drm/panthor/panthor_sched.c
1876
struct panthor_device *ptdev = sched->ptdev;
drivers/gpu/drm/panthor/panthor_sched.c
1878
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1892
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
1952
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
1955
lockdep_assert_held(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
2020
tick_ctx_is_full(const struct panthor_scheduler *sched,
drivers/gpu/drm/panthor/panthor_sched.c
2023
return ctx->group_count == sched->csg_slot_count;
drivers/gpu/drm/panthor/panthor_sched.c
2027
tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
drivers/gpu/drm/panthor/panthor_sched.c
2035
if (tick_ctx_is_full(sched, ctx))
drivers/gpu/drm/panthor/panthor_sched.c
2052
if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
drivers/gpu/drm/panthor/panthor_sched.c
2073
if (tick_ctx_is_full(sched, ctx))
drivers/gpu/drm/panthor/panthor_sched.c
2079
tick_ctx_insert_old_group(struct panthor_scheduler *sched,
drivers/gpu/drm/panthor/panthor_sched.c
2083
struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2090
struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2105
tick_ctx_init(struct panthor_scheduler *sched,
drivers/gpu/drm/panthor/panthor_sched.c
2108
struct panthor_device *ptdev = sched->ptdev;
drivers/gpu/drm/panthor/panthor_sched.c
2122
for (i = 0; i < sched->csg_slot_count; i++) {
drivers/gpu/drm/panthor/panthor_sched.c
2123
struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
drivers/gpu/drm/panthor/panthor_sched.c
2144
tick_ctx_insert_old_group(sched, ctx, group);
drivers/gpu/drm/panthor/panthor_sched.c
2216
tick_ctx_cleanup(struct panthor_scheduler *sched,
drivers/gpu/drm/panthor/panthor_sched.c
2219
struct panthor_device *ptdev = sched->ptdev;
drivers/gpu/drm/panthor/panthor_sched.c
2240
&sched->groups.idle[group->priority] :
drivers/gpu/drm/panthor/panthor_sched.c
2241
&sched->groups.runnable[group->priority]);
drivers/gpu/drm/panthor/panthor_sched.c
2260
&sched->groups.idle[group->priority] :
drivers/gpu/drm/panthor/panthor_sched.c
2261
&sched->groups.runnable[group->priority]);
drivers/gpu/drm/panthor/panthor_sched.c
2269
tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
drivers/gpu/drm/panthor/panthor_sched.c
2272
struct panthor_device *ptdev = sched->ptdev;
drivers/gpu/drm/panthor/panthor_sched.c
2290
csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2306
csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2344
for (i = 0; i < sched->csg_slot_count; i++) {
drivers/gpu/drm/panthor/panthor_sched.c
2345
if (!sched->csg_slots[i].group)
drivers/gpu/drm/panthor/panthor_sched.c
2368
csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2413
list_move_tail(&group->run_node, &sched->groups.idle[prio]);
drivers/gpu/drm/panthor/panthor_sched.c
2415
list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
drivers/gpu/drm/panthor/panthor_sched.c
2420
sched->used_csg_slot_count = ctx->group_count;
drivers/gpu/drm/panthor/panthor_sched.c
2421
sched->might_have_idle_groups = ctx->idle_group_count > 0;
drivers/gpu/drm/panthor/panthor_sched.c
2425
tick_ctx_update_resched_target(struct panthor_scheduler *sched,
drivers/gpu/drm/panthor/panthor_sched.c
2433
resched_target = sched->last_tick + sched->tick_period;
drivers/gpu/drm/panthor/panthor_sched.c
2435
if (time_before64(sched->resched_target, sched->last_tick) ||
drivers/gpu/drm/panthor/panthor_sched.c
2436
time_before64(resched_target, sched->resched_target))
drivers/gpu/drm/panthor/panthor_sched.c
2437
sched->resched_target = resched_target;
drivers/gpu/drm/panthor/panthor_sched.c
2439
return sched->resched_target - sched->last_tick;
drivers/gpu/drm/panthor/panthor_sched.c
2442
sched->resched_target = U64_MAX;
drivers/gpu/drm/panthor/panthor_sched.c
2448
struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
drivers/gpu/drm/panthor/panthor_sched.c
2450
struct panthor_device *ptdev = sched->ptdev;
drivers/gpu/drm/panthor/panthor_sched.c
2452
u64 resched_target = sched->resched_target;
drivers/gpu/drm/panthor/panthor_sched.c
2467
resched_target = sched->last_tick + sched->tick_period;
drivers/gpu/drm/panthor/panthor_sched.c
2474
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
2475
if (panthor_device_reset_is_pending(sched->ptdev))
drivers/gpu/drm/panthor/panthor_sched.c
2478
tick_ctx_init(sched, &ctx);
drivers/gpu/drm/panthor/panthor_sched.c
2488
prio >= 0 && !tick_ctx_is_full(sched, &ctx);
drivers/gpu/drm/panthor/panthor_sched.c
2490
tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
drivers/gpu/drm/panthor/panthor_sched.c
2493
tick_ctx_pick_groups_from_list(sched, &ctx,
drivers/gpu/drm/panthor/panthor_sched.c
2494
&sched->groups.runnable[prio],
drivers/gpu/drm/panthor/panthor_sched.c
2502
prio >= 0 && !tick_ctx_is_full(sched, &ctx);
drivers/gpu/drm/panthor/panthor_sched.c
2513
tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
drivers/gpu/drm/panthor/panthor_sched.c
2516
tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
drivers/gpu/drm/panthor/panthor_sched.c
2524
tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
drivers/gpu/drm/panthor/panthor_sched.c
2531
prio >= 0 && !tick_ctx_is_full(sched, &ctx);
drivers/gpu/drm/panthor/panthor_sched.c
2534
tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
drivers/gpu/drm/panthor/panthor_sched.c
2535
tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
drivers/gpu/drm/panthor/panthor_sched.c
2539
tick_ctx_apply(sched, &ctx);
drivers/gpu/drm/panthor/panthor_sched.c
2544
panthor_devfreq_record_idle(sched->ptdev);
drivers/gpu/drm/panthor/panthor_sched.c
2545
if (sched->pm.has_ref) {
drivers/gpu/drm/panthor/panthor_sched.c
2547
sched->pm.has_ref = false;
drivers/gpu/drm/panthor/panthor_sched.c
2550
panthor_devfreq_record_busy(sched->ptdev);
drivers/gpu/drm/panthor/panthor_sched.c
2551
if (!sched->pm.has_ref) {
drivers/gpu/drm/panthor/panthor_sched.c
2553
sched->pm.has_ref = true;
drivers/gpu/drm/panthor/panthor_sched.c
2557
sched->last_tick = now;
drivers/gpu/drm/panthor/panthor_sched.c
2558
resched_delay = tick_ctx_update_resched_target(sched, &ctx);
drivers/gpu/drm/panthor/panthor_sched.c
2563
sched_queue_delayed_work(sched, tick, resched_delay);
drivers/gpu/drm/panthor/panthor_sched.c
2566
tick_ctx_cleanup(sched, &ctx);
drivers/gpu/drm/panthor/panthor_sched.c
2569
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
2608
struct panthor_scheduler *sched = container_of(work,
drivers/gpu/drm/panthor/panthor_sched.c
2614
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
2615
list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
drivers/gpu/drm/panthor/panthor_sched.c
2636
&sched->groups.runnable[group->priority]);
drivers/gpu/drm/panthor/panthor_sched.c
2645
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
2648
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
2653
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
2656
drm_WARN_ON(&ptdev->base, sched->resched_target != U64_MAX);
drivers/gpu/drm/panthor/panthor_sched.c
2662
sched->resched_target = sched->last_tick + sched->tick_period;
drivers/gpu/drm/panthor/panthor_sched.c
2663
if (sched->used_csg_slot_count == sched->csg_slot_count &&
drivers/gpu/drm/panthor/panthor_sched.c
2664
time_before64(now, sched->resched_target))
drivers/gpu/drm/panthor/panthor_sched.c
2665
delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
drivers/gpu/drm/panthor/panthor_sched.c
2669
sched_queue_delayed_work(sched, tick, delay_jiffies);
drivers/gpu/drm/panthor/panthor_sched.c
2675
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
2676
struct list_head *queue = &sched->groups.runnable[group->priority];
drivers/gpu/drm/panthor/panthor_sched.c
2690
if (atomic_read(&sched->reset.in_progress))
drivers/gpu/drm/panthor/panthor_sched.c
2698
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
2705
if (sched->might_have_idle_groups) {
drivers/gpu/drm/panthor/panthor_sched.c
2706
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
2711
if (sched->resched_target != U64_MAX) {
drivers/gpu/drm/panthor/panthor_sched.c
2713
if (sched->used_csg_slot_count < sched->csg_slot_count)
drivers/gpu/drm/panthor/panthor_sched.c
2714
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
2746
struct panthor_scheduler *sched = group->ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
2748
lockdep_assert_held(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
2754
list_move_tail(&group->run_node, &sched->reset.stopped_groups);
drivers/gpu/drm/panthor/panthor_sched.c
2759
struct panthor_scheduler *sched = group->ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
2769
&sched->groups.idle[group->priority] :
drivers/gpu/drm/panthor/panthor_sched.c
2770
&sched->groups.runnable[group->priority]);
drivers/gpu/drm/panthor/panthor_sched.c
2812
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
2817
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
2819
for (i = 0; i < sched->csg_slot_count; i++) {
drivers/gpu/drm/panthor/panthor_sched.c
2820
struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
drivers/gpu/drm/panthor/panthor_sched.c
2842
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2866
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2900
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
drivers/gpu/drm/panthor/panthor_sched.c
2911
for (i = 0; i < sched->csg_slot_count; i++) {
drivers/gpu/drm/panthor/panthor_sched.c
2912
struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
drivers/gpu/drm/panthor/panthor_sched.c
2929
&sched->groups.idle[group->priority]);
drivers/gpu/drm/panthor/panthor_sched.c
2939
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
2944
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
2948
mutex_lock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
2949
atomic_set(&sched->reset.in_progress, true);
drivers/gpu/drm/panthor/panthor_sched.c
2954
cancel_work_sync(&sched->sync_upd_work);
drivers/gpu/drm/panthor/panthor_sched.c
2955
cancel_delayed_work_sync(&sched->tick_work);
drivers/gpu/drm/panthor/panthor_sched.c
2962
for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
drivers/gpu/drm/panthor/panthor_sched.c
2963
list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
drivers/gpu/drm/panthor/panthor_sched.c
2967
for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
drivers/gpu/drm/panthor/panthor_sched.c
2968
list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
drivers/gpu/drm/panthor/panthor_sched.c
2972
mutex_unlock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
2977
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
2980
mutex_lock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
2982
list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
drivers/gpu/drm/panthor/panthor_sched.c
2995
atomic_set(&sched->reset.in_progress, false);
drivers/gpu/drm/panthor/panthor_sched.c
2996
mutex_unlock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
3000
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
3001
sched_queue_work(sched, sync_upd);
drivers/gpu/drm/panthor/panthor_sched.c
3173
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
3185
params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
drivers/gpu/drm/panthor/panthor_sched.c
3302
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
3321
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3365
sched->resched_target == U64_MAX;
drivers/gpu/drm/panthor/panthor_sched.c
3374
if (!sched->pm.has_ref &&
drivers/gpu/drm/panthor/panthor_sched.c
3377
sched->pm.has_ref = true;
drivers/gpu/drm/panthor/panthor_sched.c
3380
panthor_devfreq_record_busy(sched->ptdev);
drivers/gpu/drm/panthor/panthor_sched.c
3390
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3403
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
3409
drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
drivers/gpu/drm/panthor/panthor_sched.c
3413
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3426
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3641
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
3747
mutex_lock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
3748
if (atomic_read(&sched->reset.in_progress)) {
drivers/gpu/drm/panthor/panthor_sched.c
3751
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3753
&sched->groups.idle[group->priority]);
drivers/gpu/drm/panthor/panthor_sched.c
3754
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3756
mutex_unlock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
3779
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
3789
mutex_lock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
3790
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3793
sched_queue_delayed_work(sched, tick, 0);
drivers/gpu/drm/panthor/panthor_sched.c
3794
} else if (!atomic_read(&sched->reset.in_progress)) {
drivers/gpu/drm/panthor/panthor_sched.c
3802
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3803
mutex_unlock(&sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
3826
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
3838
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
3847
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
4052
struct panthor_scheduler *sched = ptdev->scheduler;
drivers/gpu/drm/panthor/panthor_sched.c
4054
disable_delayed_work_sync(&sched->tick_work);
drivers/gpu/drm/panthor/panthor_sched.c
4055
disable_work_sync(&sched->fw_events_work);
drivers/gpu/drm/panthor/panthor_sched.c
4056
disable_work_sync(&sched->sync_upd_work);
drivers/gpu/drm/panthor/panthor_sched.c
4058
mutex_lock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
4059
if (sched->pm.has_ref) {
drivers/gpu/drm/panthor/panthor_sched.c
4061
sched->pm.has_ref = false;
drivers/gpu/drm/panthor/panthor_sched.c
4063
mutex_unlock(&sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
4068
struct panthor_scheduler *sched = res;
drivers/gpu/drm/panthor/panthor_sched.c
4071
if (!sched || !sched->csg_slot_count)
drivers/gpu/drm/panthor/panthor_sched.c
4074
if (sched->wq)
drivers/gpu/drm/panthor/panthor_sched.c
4075
destroy_workqueue(sched->wq);
drivers/gpu/drm/panthor/panthor_sched.c
4077
if (sched->heap_alloc_wq)
drivers/gpu/drm/panthor/panthor_sched.c
4078
destroy_workqueue(sched->heap_alloc_wq);
drivers/gpu/drm/panthor/panthor_sched.c
4081
drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
drivers/gpu/drm/panthor/panthor_sched.c
4082
drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
drivers/gpu/drm/panthor/panthor_sched.c
4085
drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
drivers/gpu/drm/panthor/panthor_sched.c
4093
struct panthor_scheduler *sched;
drivers/gpu/drm/panthor/panthor_sched.c
4097
sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
drivers/gpu/drm/panthor/panthor_sched.c
4098
if (!sched)
drivers/gpu/drm/panthor/panthor_sched.c
4126
sched->ptdev = ptdev;
drivers/gpu/drm/panthor/panthor_sched.c
4127
sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
drivers/gpu/drm/panthor/panthor_sched.c
4128
sched->csg_slot_count = num_groups;
drivers/gpu/drm/panthor/panthor_sched.c
4129
sched->cs_slot_count = csg_iface->control->stream_num;
drivers/gpu/drm/panthor/panthor_sched.c
4130
sched->as_slot_count = gpu_as_count;
drivers/gpu/drm/panthor/panthor_sched.c
4131
ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
drivers/gpu/drm/panthor/panthor_sched.c
4132
ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
drivers/gpu/drm/panthor/panthor_sched.c
4133
ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
drivers/gpu/drm/panthor/panthor_sched.c
4135
sched->last_tick = 0;
drivers/gpu/drm/panthor/panthor_sched.c
4136
sched->resched_target = U64_MAX;
drivers/gpu/drm/panthor/panthor_sched.c
4137
sched->tick_period = msecs_to_jiffies(10);
drivers/gpu/drm/panthor/panthor_sched.c
4138
INIT_DELAYED_WORK(&sched->tick_work, tick_work);
drivers/gpu/drm/panthor/panthor_sched.c
4139
INIT_WORK(&sched->sync_upd_work, sync_upd_work);
drivers/gpu/drm/panthor/panthor_sched.c
4140
INIT_WORK(&sched->fw_events_work, process_fw_events_work);
drivers/gpu/drm/panthor/panthor_sched.c
4142
ret = drmm_mutex_init(&ptdev->base, &sched->lock);
drivers/gpu/drm/panthor/panthor_sched.c
4147
INIT_LIST_HEAD(&sched->groups.runnable[prio]);
drivers/gpu/drm/panthor/panthor_sched.c
4148
INIT_LIST_HEAD(&sched->groups.idle[prio]);
drivers/gpu/drm/panthor/panthor_sched.c
4150
INIT_LIST_HEAD(&sched->groups.waiting);
drivers/gpu/drm/panthor/panthor_sched.c
4152
ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
drivers/gpu/drm/panthor/panthor_sched.c
4156
INIT_LIST_HEAD(&sched->reset.stopped_groups);
drivers/gpu/drm/panthor/panthor_sched.c
4173
sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
drivers/gpu/drm/panthor/panthor_sched.c
4174
sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
drivers/gpu/drm/panthor/panthor_sched.c
4175
if (!sched->wq || !sched->heap_alloc_wq) {
drivers/gpu/drm/panthor/panthor_sched.c
4176
panthor_sched_fini(&ptdev->base, sched);
drivers/gpu/drm/panthor/panthor_sched.c
4181
ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
drivers/gpu/drm/panthor/panthor_sched.c
4185
ptdev->scheduler = sched;
drivers/gpu/drm/panthor/panthor_sched.c
745
#define sched_queue_work(sched, wname) \
drivers/gpu/drm/panthor/panthor_sched.c
747
if (!atomic_read(&(sched)->reset.in_progress) && \
drivers/gpu/drm/panthor/panthor_sched.c
748
!panthor_device_reset_is_pending((sched)->ptdev)) \
drivers/gpu/drm/panthor/panthor_sched.c
749
queue_work((sched)->wq, &(sched)->wname ## _work); \
drivers/gpu/drm/panthor/panthor_sched.c
761
#define sched_queue_delayed_work(sched, wname, delay) \
drivers/gpu/drm/panthor/panthor_sched.c
763
if (!atomic_read(&sched->reset.in_progress) && \
drivers/gpu/drm/panthor/panthor_sched.c
764
!panthor_device_reset_is_pending((sched)->ptdev)) \
drivers/gpu/drm/panthor/panthor_sched.c
765
mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
61
__string(name, sched_job->sched->name)
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
64
__string(dev, dev_name(sched_job->sched->dev))
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
74
&sched_job->sched->credit_count);
drivers/gpu/drm/scheduler/sched_entity.c
215
job->sched->ops->free_job(job);
drivers/gpu/drm/scheduler/sched_entity.c
285
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/scheduler/sched_entity.c
292
sched = entity->rq->sched;
drivers/gpu/drm/scheduler/sched_entity.c
300
sched->job_scheduled,
drivers/gpu/drm/scheduler/sched_entity.c
304
wait_event_killable(sched->job_scheduled,
drivers/gpu/drm/scheduler/sched_entity.c
375
drm_sched_wakeup(entity->rq->sched);
drivers/gpu/drm/scheduler/sched_entity.c
402
struct drm_gpu_scheduler *sched = entity->rq->sched;
drivers/gpu/drm/scheduler/sched_entity.c
418
if (!fence->error && s_fence && s_fence->sched == sched &&
drivers/gpu/drm/scheduler/sched_entity.c
458
if (job->sched->ops->prepare_job)
drivers/gpu/drm/scheduler/sched_entity.c
459
return job->sched->ops->prepare_job(job, entity);
drivers/gpu/drm/scheduler/sched_entity.c
528
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/scheduler/sched_entity.c
554
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
drivers/gpu/drm/scheduler/sched_entity.c
555
rq = sched ? sched->sched_rq[entity->priority] : NULL;
drivers/gpu/drm/scheduler/sched_entity.c
591
atomic_inc(entity->rq->sched->score);
drivers/gpu/drm/scheduler/sched_entity.c
604
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/scheduler/sched_entity.c
617
sched = rq->sched;
drivers/gpu/drm/scheduler/sched_entity.c
628
drm_sched_wakeup(sched);
drivers/gpu/drm/scheduler/sched_fence.c
118
if (!WARN_ON_ONCE(fence->sched))
drivers/gpu/drm/scheduler/sched_fence.c
230
fence->sched = entity->rq->sched;
drivers/gpu/drm/scheduler/sched_fence.c
95
return (const char *)fence->sched->name;
drivers/gpu/drm/scheduler/sched_internal.h
13
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
drivers/gpu/drm/scheduler/sched_main.c
100
WARN_ON(check_sub_overflow(sched->credit_limit,
drivers/gpu/drm/scheduler/sched_main.c
101
atomic_read(&sched->credit_count),
drivers/gpu/drm/scheduler/sched_main.c
1081
void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1083
drm_sched_run_job_queue(sched);
drivers/gpu/drm/scheduler/sched_main.c
1098
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1105
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
drivers/gpu/drm/scheduler/sched_main.c
1107
drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
drivers/gpu/drm/scheduler/sched_main.c
1108
drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
drivers/gpu/drm/scheduler/sched_main.c
1129
drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more)
drivers/gpu/drm/scheduler/sched_main.c
1133
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
1135
job = list_first_entry_or_null(&sched->pending_list,
drivers/gpu/drm/scheduler/sched_main.c
1142
cancel_delayed_work(&sched->work_tdr);
drivers/gpu/drm/scheduler/sched_main.c
1145
next = list_first_entry_or_null(&sched->pending_list,
drivers/gpu/drm/scheduler/sched_main.c
115
static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
drivers/gpu/drm/scheduler/sched_main.c
1157
drm_sched_start_timeout(sched);
drivers/gpu/drm/scheduler/sched_main.c
1163
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
1180
struct drm_gpu_scheduler *sched, *picked_sched = NULL;
drivers/gpu/drm/scheduler/sched_main.c
1185
sched = sched_list[i];
drivers/gpu/drm/scheduler/sched_main.c
1187
if (!sched->ready) {
drivers/gpu/drm/scheduler/sched_main.c
1189
sched->name);
drivers/gpu/drm/scheduler/sched_main.c
1193
num_score = atomic_read(sched->score);
drivers/gpu/drm/scheduler/sched_main.c
1196
picked_sched = sched;
drivers/gpu/drm/scheduler/sched_main.c
1211
struct drm_gpu_scheduler *sched =
drivers/gpu/drm/scheduler/sched_main.c
1216
job = drm_sched_get_finished_job(sched, &have_more);
drivers/gpu/drm/scheduler/sched_main.c
1218
sched->ops->free_job(job);
drivers/gpu/drm/scheduler/sched_main.c
1220
drm_sched_run_free_queue(sched);
drivers/gpu/drm/scheduler/sched_main.c
1223
drm_sched_run_job_queue(sched);
drivers/gpu/drm/scheduler/sched_main.c
1233
struct drm_gpu_scheduler *sched =
drivers/gpu/drm/scheduler/sched_main.c
1242
entity = drm_sched_select_entity(sched);
drivers/gpu/drm/scheduler/sched_main.c
1254
drm_sched_run_job_queue(sched);
drivers/gpu/drm/scheduler/sched_main.c
1260
atomic_add(sched_job->credits, &sched->credit_count);
drivers/gpu/drm/scheduler/sched_main.c
1268
fence = sched->ops->run_job(sched_job);
drivers/gpu/drm/scheduler/sched_main.c
127
if (s_job->credits > sched->credit_limit) {
drivers/gpu/drm/scheduler/sched_main.c
1278
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
drivers/gpu/drm/scheduler/sched_main.c
128
dev_WARN(sched->dev,
drivers/gpu/drm/scheduler/sched_main.c
1286
wake_up(&sched->job_scheduled);
drivers/gpu/drm/scheduler/sched_main.c
1287
drm_sched_run_job_queue(sched);
drivers/gpu/drm/scheduler/sched_main.c
130
s_job->credits = sched->credit_limit;
drivers/gpu/drm/scheduler/sched_main.c
1317
int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
drivers/gpu/drm/scheduler/sched_main.c
1321
sched->ops = args->ops;
drivers/gpu/drm/scheduler/sched_main.c
1322
sched->credit_limit = args->credit_limit;
drivers/gpu/drm/scheduler/sched_main.c
1323
sched->name = args->name;
drivers/gpu/drm/scheduler/sched_main.c
1324
sched->timeout = args->timeout;
drivers/gpu/drm/scheduler/sched_main.c
1325
sched->hang_limit = args->hang_limit;
drivers/gpu/drm/scheduler/sched_main.c
1326
sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_percpu_wq;
drivers/gpu/drm/scheduler/sched_main.c
1327
sched->score = args->score ? args->score : &sched->_score;
drivers/gpu/drm/scheduler/sched_main.c
1328
sched->dev = args->dev;
drivers/gpu/drm/scheduler/sched_main.c
133
return drm_sched_available_credits(sched) >= s_job->credits;
drivers/gpu/drm/scheduler/sched_main.c
1333
dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
drivers/gpu/drm/scheduler/sched_main.c
1336
} else if (sched->sched_rq) {
drivers/gpu/drm/scheduler/sched_main.c
1341
dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
drivers/gpu/drm/scheduler/sched_main.c
1346
sched->submit_wq = args->submit_wq;
drivers/gpu/drm/scheduler/sched_main.c
1347
sched->own_submit_wq = false;
drivers/gpu/drm/scheduler/sched_main.c
1349
sched->submit_wq = drm_sched_alloc_wq(args->name);
drivers/gpu/drm/scheduler/sched_main.c
1350
if (!sched->submit_wq)
drivers/gpu/drm/scheduler/sched_main.c
1353
sched->own_submit_wq = true;
drivers/gpu/drm/scheduler/sched_main.c
1356
sched->sched_rq = kmalloc_objs(*sched->sched_rq, args->num_rqs,
drivers/gpu/drm/scheduler/sched_main.c
1358
if (!sched->sched_rq)
drivers/gpu/drm/scheduler/sched_main.c
1360
sched->num_rqs = args->num_rqs;
drivers/gpu/drm/scheduler/sched_main.c
1361
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
drivers/gpu/drm/scheduler/sched_main.c
1362
sched->sched_rq[i] = kzalloc_obj(*sched->sched_rq[i]);
drivers/gpu/drm/scheduler/sched_main.c
1363
if (!sched->sched_rq[i])
drivers/gpu/drm/scheduler/sched_main.c
1365
drm_sched_rq_init(sched, sched->sched_rq[i]);
drivers/gpu/drm/scheduler/sched_main.c
1368
init_waitqueue_head(&sched->job_scheduled);
drivers/gpu/drm/scheduler/sched_main.c
1369
INIT_LIST_HEAD(&sched->pending_list);
drivers/gpu/drm/scheduler/sched_main.c
1370
spin_lock_init(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
1371
atomic_set(&sched->credit_count, 0);
drivers/gpu/drm/scheduler/sched_main.c
1372
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
drivers/gpu/drm/scheduler/sched_main.c
1373
INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
drivers/gpu/drm/scheduler/sched_main.c
1374
INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
drivers/gpu/drm/scheduler/sched_main.c
1375
atomic_set(&sched->_score, 0);
drivers/gpu/drm/scheduler/sched_main.c
1376
atomic64_set(&sched->job_id_count, 0);
drivers/gpu/drm/scheduler/sched_main.c
1377
sched->pause_submit = false;
drivers/gpu/drm/scheduler/sched_main.c
1379
sched->ready = true;
drivers/gpu/drm/scheduler/sched_main.c
1383
kfree(sched->sched_rq[i]);
drivers/gpu/drm/scheduler/sched_main.c
1385
kfree(sched->sched_rq);
drivers/gpu/drm/scheduler/sched_main.c
1386
sched->sched_rq = NULL;
drivers/gpu/drm/scheduler/sched_main.c
1388
if (sched->own_submit_wq)
drivers/gpu/drm/scheduler/sched_main.c
1389
destroy_workqueue(sched->submit_wq);
drivers/gpu/drm/scheduler/sched_main.c
1390
dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
drivers/gpu/drm/scheduler/sched_main.c
1395
static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1400
list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
drivers/gpu/drm/scheduler/sched_main.c
1401
sched->ops->cancel_job(job);
drivers/gpu/drm/scheduler/sched_main.c
1403
sched->ops->free_job(job);
drivers/gpu/drm/scheduler/sched_main.c
1420
void drm_sched_fini(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1425
drm_sched_wqueue_stop(sched);
drivers/gpu/drm/scheduler/sched_main.c
1427
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
drivers/gpu/drm/scheduler/sched_main.c
1428
struct drm_sched_rq *rq = sched->sched_rq[i];
drivers/gpu/drm/scheduler/sched_main.c
1458
dev_warn(sched->dev, "Tearing down scheduler with active entities!\n");
drivers/gpu/drm/scheduler/sched_main.c
1462
kfree(sched->sched_rq[i]);
drivers/gpu/drm/scheduler/sched_main.c
1466
wake_up_all(&sched->job_scheduled);
drivers/gpu/drm/scheduler/sched_main.c
1469
cancel_delayed_work_sync(&sched->work_tdr);
drivers/gpu/drm/scheduler/sched_main.c
1472
if (sched->ops->cancel_job)
drivers/gpu/drm/scheduler/sched_main.c
1473
drm_sched_cancel_remaining_jobs(sched);
drivers/gpu/drm/scheduler/sched_main.c
1475
if (sched->own_submit_wq)
drivers/gpu/drm/scheduler/sched_main.c
1476
destroy_workqueue(sched->submit_wq);
drivers/gpu/drm/scheduler/sched_main.c
1477
sched->ready = false;
drivers/gpu/drm/scheduler/sched_main.c
1478
kfree(sched->sched_rq);
drivers/gpu/drm/scheduler/sched_main.c
1479
sched->sched_rq = NULL;
drivers/gpu/drm/scheduler/sched_main.c
1481
if (!list_empty(&sched->pending_list))
drivers/gpu/drm/scheduler/sched_main.c
1482
dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
drivers/gpu/drm/scheduler/sched_main.c
1500
struct drm_gpu_scheduler *sched = bad->sched;
drivers/gpu/drm/scheduler/sched_main.c
1509
for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
drivers/gpu/drm/scheduler/sched_main.c
1510
struct drm_sched_rq *rq = sched->sched_rq[i];
drivers/gpu/drm/scheduler/sched_main.c
1536
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1538
return sched->ready;
drivers/gpu/drm/scheduler/sched_main.c
1549
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1551
WRITE_ONCE(sched->pause_submit, true);
drivers/gpu/drm/scheduler/sched_main.c
1552
cancel_work_sync(&sched->work_run_job);
drivers/gpu/drm/scheduler/sched_main.c
1553
cancel_work_sync(&sched->work_free_job);
drivers/gpu/drm/scheduler/sched_main.c
1566
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1568
WRITE_ONCE(sched->pause_submit, false);
drivers/gpu/drm/scheduler/sched_main.c
1569
queue_work(sched->submit_wq, &sched->work_run_job);
drivers/gpu/drm/scheduler/sched_main.c
1570
queue_work(sched->submit_wq, &sched->work_free_job);
drivers/gpu/drm/scheduler/sched_main.c
1580
bool drm_sched_is_stopped(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
1582
return READ_ONCE(sched->pause_submit);
drivers/gpu/drm/scheduler/sched_main.c
1600
WARN_ON(!drm_sched_is_stopped(job->sched));
drivers/gpu/drm/scheduler/sched_main.c
182
static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
drivers/gpu/drm/scheduler/sched_main.c
189
rq->sched = sched;
drivers/gpu/drm/scheduler/sched_main.c
209
atomic_inc(rq->sched->score);
drivers/gpu/drm/scheduler/sched_main.c
231
atomic_dec(rq->sched->score);
drivers/gpu/drm/scheduler/sched_main.c
256
drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
drivers/gpu/drm/scheduler/sched_main.c
284
if (!drm_sched_can_queue(sched, entity)) {
drivers/gpu/drm/scheduler/sched_main.c
313
drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
drivers/gpu/drm/scheduler/sched_main.c
327
if (!drm_sched_can_queue(sched, entity)) {
drivers/gpu/drm/scheduler/sched_main.c
345
static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
347
if (!drm_sched_is_stopped(sched))
drivers/gpu/drm/scheduler/sched_main.c
348
queue_work(sched->submit_wq, &sched->work_run_job);
drivers/gpu/drm/scheduler/sched_main.c
355
static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
357
if (!drm_sched_is_stopped(sched))
drivers/gpu/drm/scheduler/sched_main.c
358
queue_work(sched->submit_wq, &sched->work_free_job);
drivers/gpu/drm/scheduler/sched_main.c
371
struct drm_gpu_scheduler *sched = s_fence->sched;
drivers/gpu/drm/scheduler/sched_main.c
373
atomic_sub(s_job->credits, &sched->credit_count);
drivers/gpu/drm/scheduler/sched_main.c
374
atomic_dec(sched->score);
drivers/gpu/drm/scheduler/sched_main.c
381
drm_sched_run_free_queue(sched);
drivers/gpu/drm/scheduler/sched_main.c
403
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
405
lockdep_assert_held(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
407
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
drivers/gpu/drm/scheduler/sched_main.c
408
!list_empty(&sched->pending_list))
drivers/gpu/drm/scheduler/sched_main.c
409
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
drivers/gpu/drm/scheduler/sched_main.c
412
static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
414
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
415
drm_sched_start_timeout(sched);
drivers/gpu/drm/scheduler/sched_main.c
416
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
426
void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
428
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
429
sched->timeout = 0;
drivers/gpu/drm/scheduler/sched_main.c
430
drm_sched_start_timeout(sched);
drivers/gpu/drm/scheduler/sched_main.c
431
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
442
void drm_sched_fault(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
444
if (sched->timeout_wq)
drivers/gpu/drm/scheduler/sched_main.c
445
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
drivers/gpu/drm/scheduler/sched_main.c
461
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
465
sched_timeout = sched->work_tdr.timer.expires;
drivers/gpu/drm/scheduler/sched_main.c
471
if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
drivers/gpu/drm/scheduler/sched_main.c
475
return sched->timeout;
drivers/gpu/drm/scheduler/sched_main.c
487
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
drivers/gpu/drm/scheduler/sched_main.c
490
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
492
if (list_empty(&sched->pending_list))
drivers/gpu/drm/scheduler/sched_main.c
493
cancel_delayed_work(&sched->work_tdr);
drivers/gpu/drm/scheduler/sched_main.c
495
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
drivers/gpu/drm/scheduler/sched_main.c
497
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
503
struct drm_gpu_scheduler *sched = s_job->sched;
drivers/gpu/drm/scheduler/sched_main.c
505
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
506
list_add_tail(&s_job->list, &sched->pending_list);
drivers/gpu/drm/scheduler/sched_main.c
507
drm_sched_start_timeout(sched);
drivers/gpu/drm/scheduler/sched_main.c
508
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
523
static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
drivers/gpu/drm/scheduler/sched_main.c
526
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
527
list_add(&job->list, &sched->pending_list);
drivers/gpu/drm/scheduler/sched_main.c
533
drm_sched_run_free_queue(sched);
drivers/gpu/drm/scheduler/sched_main.c
534
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
539
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/scheduler/sched_main.c
543
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
drivers/gpu/drm/scheduler/sched_main.c
546
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
547
job = list_first_entry_or_null(&sched->pending_list,
drivers/gpu/drm/scheduler/sched_main.c
558
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
560
status = job->sched->ops->timedout_job(job);
drivers/gpu/drm/scheduler/sched_main.c
566
if (sched->free_guilty) {
drivers/gpu/drm/scheduler/sched_main.c
567
job->sched->ops->free_job(job);
drivers/gpu/drm/scheduler/sched_main.c
568
sched->free_guilty = false;
drivers/gpu/drm/scheduler/sched_main.c
572
drm_sched_job_reinsert_on_false_timeout(sched, job);
drivers/gpu/drm/scheduler/sched_main.c
574
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
578
drm_sched_start_timeout_unlocked(sched);
drivers/gpu/drm/scheduler/sched_main.c
600
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
drivers/gpu/drm/scheduler/sched_main.c
604
drm_sched_wqueue_stop(sched);
drivers/gpu/drm/scheduler/sched_main.c
613
if (bad && bad->sched == sched)
drivers/gpu/drm/scheduler/sched_main.c
618
list_add(&bad->list, &sched->pending_list);
drivers/gpu/drm/scheduler/sched_main.c
627
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
drivers/gpu/drm/scheduler/sched_main.c
634
atomic_sub(s_job->credits, &sched->credit_count);
drivers/gpu/drm/scheduler/sched_main.c
640
spin_lock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
642
spin_unlock(&sched->job_list_lock);
drivers/gpu/drm/scheduler/sched_main.c
658
sched->ops->free_job(s_job);
drivers/gpu/drm/scheduler/sched_main.c
660
sched->free_guilty = true;
drivers/gpu/drm/scheduler/sched_main.c
670
cancel_delayed_work(&sched->work_tdr);
drivers/gpu/drm/scheduler/sched_main.c
689
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
drivers/gpu/drm/scheduler/sched_main.c
698
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
drivers/gpu/drm/scheduler/sched_main.c
701
atomic_add(s_job->credits, &sched->credit_count);
drivers/gpu/drm/scheduler/sched_main.c
713
drm_sched_start_timeout_unlocked(sched);
drivers/gpu/drm/scheduler/sched_main.c
714
drm_sched_wqueue_start(sched);
drivers/gpu/drm/scheduler/sched_main.c
737
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/sched_main.c
744
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
drivers/gpu/drm/scheduler/sched_main.c
747
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
drivers/gpu/drm/scheduler/sched_main.c
755
fence = sched->ops->run_job(s_job);
drivers/gpu/drm/scheduler/sched_main.c
810
dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
drivers/gpu/drm/scheduler/sched_main.c
860
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/scheduler/sched_main.c
865
sched = entity->rq->sched;
drivers/gpu/drm/scheduler/sched_main.c
867
job->sched = sched;
drivers/gpu/drm/scheduler/sched_main.c
96
static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
145
return (const char *)job->base.sched->name;
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
166
struct drm_mock_scheduler *sched =
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
167
drm_sched_to_mock_sched(sched_job->sched);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
172
&sched->lock,
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
173
sched->hw_timeline.context,
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
174
atomic_inc_return(&sched->hw_timeline.next_seqno));
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
178
spin_lock_irq(&sched->lock);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
182
if (!list_empty(&sched->job_list)) {
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
184
list_last_entry(&sched->job_list, typeof(*prev),
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
195
list_add_tail(&job->link, &sched->job_list);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
198
spin_unlock_irq(&sched->lock);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
217
struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
226
spin_lock_irqsave(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
233
spin_unlock_irqrestore(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
25
struct drm_mock_scheduler *sched)
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
254
struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
260
spin_lock_irqsave(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
266
spin_unlock_irqrestore(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
299
struct drm_mock_scheduler *sched;
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
302
sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
303
KUNIT_ASSERT_NOT_NULL(test, sched);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
305
ret = drm_sched_init(&sched->base, &args);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
308
sched->test = test;
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
309
sched->hw_timeline.context = dma_fence_context_alloc(1);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
310
atomic_set(&sched->hw_timeline.next_seqno, 0);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
311
INIT_LIST_HEAD(&sched->job_list);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
312
spin_lock_init(&sched->lock);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
314
return sched;
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
324
void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
326
drm_sched_fini(&sched->base);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
34
drm_sched = &sched->base;
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
343
unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
351
spin_lock_irqsave(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
352
if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num <
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
353
sched->hw_timeline.cur_seqno))
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
355
sched->hw_timeline.cur_seqno += num;
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
356
list_for_each_entry_safe(job, next, &sched->job_list, link) {
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
357
if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno)
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
364
spin_unlock_irqrestore(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
60
struct drm_mock_scheduler *sched =
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
61
drm_sched_to_mock_sched(job->base.sched);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
63
lockdep_assert_held(&sched->lock);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
76
struct drm_mock_scheduler *sched =
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
77
drm_sched_to_mock_sched(job->base.sched);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
83
spin_lock_irqsave(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
84
list_for_each_entry_safe(job, next, &sched->job_list, link) {
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
91
sched->hw_timeline.cur_seqno = job->hw_fence.seqno;
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
94
spin_unlock_irqrestore(&sched->lock, flags);
drivers/gpu/drm/scheduler/tests/sched_tests.h
114
drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched)
drivers/gpu/drm/scheduler/tests/sched_tests.h
116
return container_of(sched, struct drm_mock_scheduler, base);
drivers/gpu/drm/scheduler/tests/sched_tests.h
133
void drm_mock_sched_fini(struct drm_mock_scheduler *sched);
drivers/gpu/drm/scheduler/tests/sched_tests.h
134
unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
drivers/gpu/drm/scheduler/tests/sched_tests.h
140
struct drm_mock_scheduler *sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
121
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
134
sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
157
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
172
sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
212
struct drm_mock_scheduler *sched;
drivers/gpu/drm/scheduler/tests/tests_basic.c
221
sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/scheduler/tests/tests_basic.c
223
sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
233
drm_mock_sched_fini(sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
252
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
26
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
265
sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
28
drm_mock_sched_fini(sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
292
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
306
sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
323
i = drm_mock_sched_advance(sched, 1);
drivers/gpu/drm/scheduler/tests/tests_basic.c
348
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
364
entity[p] = drm_mock_sched_entity_new(test, p, sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
383
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
40
struct drm_mock_scheduler *sched = test->priv;
drivers/gpu/drm/scheduler/tests/tests_basic.c
400
entity[p] = drm_mock_sched_entity_new(test, p, sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
439
struct drm_mock_scheduler *sched[3];
drivers/gpu/drm/scheduler/tests/tests_basic.c
456
for (i = 0; i < ARRAY_SIZE(sched); i++)
drivers/gpu/drm/scheduler/tests/tests_basic.c
457
sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/scheduler/tests/tests_basic.c
462
sched[i % ARRAY_SIZE(sched)]);
drivers/gpu/drm/scheduler/tests/tests_basic.c
478
cur_sched %= ARRAY_SIZE(sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
479
modify = &sched[cur_sched]->base;
drivers/gpu/drm/scheduler/tests/tests_basic.c
487
for (i = 0; i < ARRAY_SIZE(sched); i++)
drivers/gpu/drm/scheduler/tests/tests_basic.c
488
drm_mock_sched_fini(sched[i]);
drivers/gpu/drm/scheduler/tests/tests_basic.c
504
struct drm_mock_scheduler *sched;
drivers/gpu/drm/scheduler/tests/tests_basic.c
513
sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/scheduler/tests/tests_basic.c
514
sched->base.credit_limit = 1;
drivers/gpu/drm/scheduler/tests/tests_basic.c
518
sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
53
sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
532
i = drm_mock_sched_advance(sched, 1);
drivers/gpu/drm/scheduler/tests/tests_basic.c
538
i = drm_mock_sched_advance(sched, 1);
drivers/gpu/drm/scheduler/tests/tests_basic.c
545
drm_mock_sched_fini(sched);
drivers/gpu/drm/scheduler/tests/tests_basic.c
64
i = drm_mock_sched_advance(sched, 1);
drivers/gpu/drm/v3d/v3d_drv.c
133
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/v3d/v3d_drv.c
143
sched = &v3d->queue[i].sched;
drivers/gpu/drm/v3d/v3d_drv.c
145
DRM_SCHED_PRIORITY_NORMAL, &sched,
drivers/gpu/drm/v3d/v3d_drv.h
54
struct drm_gpu_scheduler sched;
drivers/gpu/drm/v3d/v3d_sched.c
738
drm_sched_stop(&v3d->queue[i].sched, sched_job);
drivers/gpu/drm/v3d/v3d_sched.c
753
drm_sched_resubmit_jobs(&v3d->queue[i].sched);
drivers/gpu/drm/v3d/v3d_sched.c
757
drm_sched_start(&v3d->queue[i].sched, 0);
drivers/gpu/drm/v3d/v3d_sched.c
881
return drm_sched_init(&v3d->queue[queue].sched, &args);
drivers/gpu/drm/v3d/v3d_sched.c
931
if (v3d->queue[q].sched.ready)
drivers/gpu/drm/v3d/v3d_sched.c
932
drm_sched_fini(&v3d->queue[q].sched);
drivers/gpu/drm/xe/xe_dep_scheduler.c
107
drm_sched_fini(&dep_scheduler->sched);
drivers/gpu/drm/xe/xe_dep_scheduler.c
124
drm_sched_fini(&dep_scheduler->sched);
drivers/gpu/drm/xe/xe_dep_scheduler.c
31
struct drm_gpu_scheduler sched;
drivers/gpu/drm/xe/xe_dep_scheduler.c
77
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/xe/xe_dep_scheduler.c
93
err = drm_sched_init(&dep_scheduler->sched, &args);
drivers/gpu/drm/xe/xe_dep_scheduler.c
97
sched = &dep_scheduler->sched;
drivers/gpu/drm/xe/xe_dep_scheduler.c
98
err = drm_sched_entity_init(&dep_scheduler->entity, 0, &sched, 1, NULL);
drivers/gpu/drm/xe/xe_execlist.c
337
struct drm_gpu_scheduler *sched;
drivers/gpu/drm/xe/xe_execlist.c
361
err = drm_sched_init(&exl->sched, &args);
drivers/gpu/drm/xe/xe_execlist.c
365
sched = &exl->sched;
drivers/gpu/drm/xe/xe_execlist.c
366
err = drm_sched_entity_init(&exl->entity, 0, &sched, 1, NULL);
drivers/gpu/drm/xe/xe_execlist.c
381
drm_sched_fini(&exl->sched);
drivers/gpu/drm/xe/xe_execlist.c
392
drm_sched_fini(&exl->sched);
drivers/gpu/drm/xe/xe_execlist_types.h
37
struct drm_gpu_scheduler sched;
drivers/gpu/drm/xe/xe_gpu_scheduler.c
10
if (!drm_sched_is_stopped(&sched->base))
drivers/gpu/drm/xe/xe_gpu_scheduler.c
101
drm_sched_wqueue_stop(&sched->base);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
102
cancel_work_sync(&sched->work_process_msg);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
105
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.c
107
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
11
queue_work(sched->base.submit_wq, &sched->work_process_msg);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
110
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.c
113
xe_sched_msg_lock(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
114
xe_sched_add_msg_locked(sched, msg);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
115
xe_sched_msg_unlock(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
118
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.c
121
lockdep_assert_held(&sched->msg_lock);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
123
list_add_tail(&msg->link, &sched->msgs);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
124
xe_sched_process_msg_queue(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
132
void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.c
135
lockdep_assert_held(&sched->msg_lock);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
137
list_add(&msg->link, &sched->msgs);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
138
xe_sched_process_msg_queue(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
14
static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.c
18
xe_sched_msg_lock(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
19
msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
21
xe_sched_process_msg_queue(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
22
xe_sched_msg_unlock(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
26
xe_sched_get_msg(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.c
30
xe_sched_msg_lock(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
31
msg = list_first_entry_or_null(&sched->msgs,
drivers/gpu/drm/xe/xe_gpu_scheduler.c
35
xe_sched_msg_unlock(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
42
struct xe_gpu_scheduler *sched =
drivers/gpu/drm/xe/xe_gpu_scheduler.c
46
if (drm_sched_is_stopped(&sched->base))
drivers/gpu/drm/xe/xe_gpu_scheduler.c
49
msg = xe_sched_get_msg(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
51
sched->ops->process_msg(msg);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
53
xe_sched_process_msg_queue_if_ready(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
57
int xe_sched_init(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.c
79
sched->ops = xe_ops;
drivers/gpu/drm/xe/xe_gpu_scheduler.c
8
static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.c
80
spin_lock_init(&sched->msg_lock);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
81
INIT_LIST_HEAD(&sched->msgs);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
82
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
84
return drm_sched_init(&sched->base, &args);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
87
void xe_sched_fini(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.c
89
xe_sched_submission_stop(sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
90
drm_sched_fini(&sched->base);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
93
void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.c
95
drm_sched_wqueue_start(&sched->base);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
96
queue_work(sched->base.submit_wq, &sched->work_process_msg);
drivers/gpu/drm/xe/xe_gpu_scheduler.c
99
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
12
int xe_sched_init(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.h
20
void xe_sched_fini(struct xe_gpu_scheduler *sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
22
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
23
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
25
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
27
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.h
29
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.h
31
void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
drivers/gpu/drm/xe/xe_gpu_scheduler.h
34
static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
36
spin_lock(&sched->msg_lock);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
39
static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
41
spin_unlock(&sched->msg_lock);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
44
static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
46
drm_sched_stop(&sched->base, NULL);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
49
static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
51
drm_sched_tdr_queue_imm(&sched->base);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
54
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
59
drm_sched_for_each_pending_job(s_job, &sched->base, NULL) {
drivers/gpu/drm/xe/xe_gpu_scheduler.h
62
sched->base.ops->run_job(s_job);
drivers/gpu/drm/xe/xe_gpu_scheduler.h
79
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
83
drm_sched_for_each_pending_job(job, &sched->base, NULL)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
92
struct xe_gpu_scheduler *sched)
drivers/gpu/drm/xe/xe_gpu_scheduler.h
95
(struct drm_gpu_scheduler **)&sched,
drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
26
struct xe_gpu_scheduler sched;
drivers/gpu/drm/xe/xe_guc_submit.c
1239
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
1242
xe_sched_submission_start(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1244
xe_sched_tdr_queue_imm(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1414
xe_sched_tdr_queue_imm(&q->guc->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1446
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
1467
xe_sched_submission_stop(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1566
xe_sched_tdr_queue_imm(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1608
drm_sched_for_each_pending_job(tmp_job, &sched->base, NULL)
drivers/gpu/drm/xe/xe_guc_submit.c
1611
xe_sched_submission_start(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1630
xe_sched_submission_start(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1642
xe_sched_fini(&ge->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1670
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
drivers/gpu/drm/xe/xe_guc_submit.c
1925
struct xe_gpu_scheduler *sched;
drivers/gpu/drm/xe/xe_guc_submit.c
1957
submit_wq = primary->guc->sched.base.submit_wq;
drivers/gpu/drm/xe/xe_guc_submit.c
1960
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
drivers/gpu/drm/xe/xe_guc_submit.c
1967
sched = &ge->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
1968
err = xe_sched_entity_init(&ge->entity, sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1981
xe_sched_stop(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2011
xe_sched_fini(&ge->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2037
xe_sched_add_msg_head(&q->guc->sched, msg);
drivers/gpu/drm/xe/xe_guc_submit.c
2039
xe_sched_add_msg_locked(&q->guc->sched, msg);
drivers/gpu/drm/xe/xe_guc_submit.c
2041
xe_sched_add_msg(&q->guc->sched, msg);
drivers/gpu/drm/xe/xe_guc_submit.c
2158
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2164
xe_sched_msg_lock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2167
xe_sched_msg_unlock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2217
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2223
xe_sched_msg_lock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2225
xe_sched_msg_unlock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2260
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2264
xe_sched_submission_stop(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2288
struct xe_sched_job *job = xe_sched_first_pending_job(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2445
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2452
xe_sched_submission_stop(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2453
cancel_delayed_work_sync(&sched->base.work_tdr);
drivers/gpu/drm/xe/xe_guc_submit.c
2471
job = xe_sched_first_pending_job(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2495
xe_sched_submission_stop(&q->guc->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2524
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2527
struct xe_sched_job *job = xe_sched_first_pending_job(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2546
xe_sched_resubmit_jobs(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2549
xe_sched_submission_start(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2550
xe_sched_submission_resume_tdr(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2579
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2584
drm_sched_for_each_pending_job(s_job, &sched->base, NULL) {
drivers/gpu/drm/xe/xe_guc_submit.c
2625
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2638
xe_sched_msg_lock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2640
xe_sched_msg_unlock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2653
xe_sched_msg_lock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2655
xe_sched_msg_unlock(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2663
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2668
xe_sched_resubmit_jobs(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2670
xe_sched_submission_start(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2673
xe_sched_submission_resume_tdr(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2687
xe_sched_submission_start(&q->guc->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2709
!drm_sched_is_stopped(&q->guc->sched.base))
drivers/gpu/drm/xe/xe_guc_submit.c
2728
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2734
xe_sched_submission_start(sched);
drivers/gpu/drm/xe/xe_guc_submit.c
3163
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
3178
snapshot->sched_timeout = sched->base.timeout;
drivers/gpu/drm/xe/xe_guc_submit.c
575
xe_sched_tdr_queue_imm(&q->guc->sched);
drivers/infiniband/hw/cxgb4/cm.c
145
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
drivers/infiniband/hw/cxgb4/cm.c
4398
sched(dev, skb);
drivers/infiniband/hw/cxgb4/cm.c
4432
sched(dev, skb);
drivers/infiniband/hw/cxgb4/cm.c
4441
[CPL_ACT_ESTABLISH] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4442
[CPL_ACT_OPEN_RPL] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4443
[CPL_RX_DATA] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4444
[CPL_ABORT_RPL_RSS] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4445
[CPL_ABORT_RPL] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4446
[CPL_PASS_OPEN_RPL] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4447
[CPL_CLOSE_LISTSRV_RPL] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4448
[CPL_PASS_ACCEPT_REQ] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4449
[CPL_PASS_ESTABLISH] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4450
[CPL_PEER_CLOSE] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4451
[CPL_CLOSE_CON_RPL] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4453
[CPL_RDMA_TERMINATE] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4454
[CPL_FW4_ACK] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4456
[CPL_GET_TCB_RPL] = sched,
drivers/infiniband/hw/cxgb4/cm.c
4458
[CPL_RX_PKT] = sched
drivers/infiniband/hw/cxgb4/cm.c
530
sched(ep->com.dev, skb);
drivers/input/joystick/sidewinder.c
120
int timeout, bitout, sched, i, kick, start, strobe;
drivers/input/joystick/sidewinder.c
130
sched = 0;
drivers/input/joystick/sidewinder.c
150
sched--;
drivers/input/joystick/sidewinder.c
163
sched = kick; /* Schedule second trigger */
drivers/input/joystick/sidewinder.c
168
if (pending && sched < 0 && (i > -SW_END)) { /* Second trigger time */
drivers/input/serio/hil_mlc.c
745
goto sched;
drivers/input/serio/hil_mlc.c
748
goto sched;
drivers/input/serio/hil_mlc.c
751
sched:
drivers/net/ethernet/chelsio/cxgb/sge.c
1308
struct sched *s = from_tasklet(s, t, sched_tsk);
drivers/net/ethernet/chelsio/cxgb/sge.c
264
struct sched *tx_sched;
drivers/net/ethernet/chelsio/cxgb/sge.c
277
struct sched *s = sge->tx_sched;
drivers/net/ethernet/chelsio/cxgb/sge.c
293
struct sched *s = sge->tx_sched;
drivers/net/ethernet/chelsio/cxgb/sge.c
338
struct sched *s = sge->tx_sched;
drivers/net/ethernet/chelsio/cxgb/sge.c
353
struct sched *s = sge->tx_sched;
drivers/net/ethernet/chelsio/cxgb/sge.c
366
struct sched *s;
drivers/net/ethernet/chelsio/cxgb/sge.c
369
s = kzalloc_obj(struct sched);
drivers/net/ethernet/chelsio/cxgb/sge.c
393
struct sched *s = sge->tx_sched;
drivers/net/ethernet/chelsio/cxgb/sge.c
428
struct sched *s = sge->tx_sched;
drivers/net/ethernet/chelsio/cxgb3/common.h
731
int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
101
uint8_t sched;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
798
char *buf, int sched)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
805
addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
809
if (sched & 1)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
824
const char *buf, size_t len, int sched)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
841
ret = t3_config_sched(adap, val, sched);
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
848
#define TM_ATTR(name, sched) \
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
852
return tm_attr_show(d, buf, sched); \
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
857
return tm_attr_store(d, buf, len, sched); \
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
939
static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
955
req->sched = sched;
drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
889
__u8 sched;
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
2972
int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
2998
A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
3000
if (sched & 1)
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2063
void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10284
void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10290
addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10292
if (sched & 1)
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10304
addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
10306
if (sched & 1)
drivers/net/ethernet/intel/ice/ice_lag.c
1064
pvf_teid = le32_to_cpu(pv_ctx->sched.vsi_node[0]->info.node_teid);
drivers/net/ethernet/intel/ice/ice_lag.c
1068
svf_teid = le32_to_cpu(sv_ctx->sched.vsi_node[0]->info.node_teid);
drivers/net/ethernet/intel/ice/ice_lag.c
1108
q_node = ice_sched_find_node_by_teid(src_ctx->sched.vsi_node[0],
drivers/net/ethernet/intel/ice/ice_lag.c
1249
if (!ctx->sched.vsi_node[tc])
drivers/net/ethernet/intel/ice/ice_lag.c
1253
teid = ctx->sched.vsi_node[tc]->info.node_teid;
drivers/net/ethernet/intel/ice/ice_lag.c
1255
parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
drivers/net/ethernet/intel/ice/ice_lag.c
1305
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
drivers/net/ethernet/intel/ice/ice_lag.c
2469
if (!ctx->sched.vsi_node[tc])
drivers/net/ethernet/intel/ice/ice_lag.c
2473
teid = ctx->sched.vsi_node[tc]->info.node_teid;
drivers/net/ethernet/intel/ice/ice_lag.c
2475
parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
drivers/net/ethernet/intel/ice/ice_lag.c
2523
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
drivers/net/ethernet/intel/ice/ice_lag.c
651
if (!ctx->sched.vsi_node[tc])
drivers/net/ethernet/intel/ice/ice_lag.c
662
teid = ctx->sched.vsi_node[tc]->info.node_teid;
drivers/net/ethernet/intel/ice/ice_lag.c
664
parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
drivers/net/ethernet/intel/ice/ice_lag.c
718
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
drivers/net/ethernet/intel/ice/ice_sched.c
1540
vsi_node = vsi_ctx->sched.vsi_node[tc];
drivers/net/ethernet/intel/ice/ice_sched.c
1923
prev_numqs = vsi_ctx->sched.max_lanq[tc];
drivers/net/ethernet/intel/ice/ice_sched.c
1925
prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
drivers/net/ethernet/intel/ice/ice_sched.c
1963
vsi_ctx->sched.max_lanq[tc] = new_numqs;
drivers/net/ethernet/intel/ice/ice_sched.c
1965
vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
drivers/net/ethernet/intel/ice/ice_sched.c
2024
vsi_ctx->sched.vsi_node[tc] = vsi_node;
drivers/net/ethernet/intel/ice/ice_sched.c
2030
vsi_ctx->sched.max_lanq[tc] = 0;
drivers/net/ethernet/intel/ice/ice_sched.c
2031
vsi_ctx->sched.max_rdmaq[tc] = 0;
drivers/net/ethernet/intel/ice/ice_sched.c
2189
vsi_ctx->sched.vsi_node[i] = NULL;
drivers/net/ethernet/intel/ice/ice_sched.c
2192
vsi_ctx->sched.max_lanq[i] = 0;
drivers/net/ethernet/intel/ice/ice_sched.c
2194
vsi_ctx->sched.max_rdmaq[i] = 0;
drivers/net/ethernet/intel/ice/ice_sched.c
3172
ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
drivers/net/ethernet/intel/ice/ice_sched.c
3175
ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
drivers/net/ethernet/intel/ice/ice_sched.c
3178
ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
drivers/net/ethernet/intel/ice/ice_sched.c
4035
node = vsi_ctx->sched.vsi_node[tc];
drivers/net/ethernet/intel/ice/ice_switch.h
45
struct ice_sched_vsi_info sched;
drivers/net/ethernet/intel/ice/virt/virtchnl.c
459
cfg->shaper.committed = vsi_ctx->sched.bw_t_info[i].cir_bw.bw;
drivers/net/ethernet/intel/ice/virt/virtchnl.c
460
cfg->shaper.peak = vsi_ctx->sched.bw_t_info[i].eir_bw.bw;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3801
u8 sched = *(u8 *)(inbox->buf + 64);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
3804
port = (sched >> 6 & 1) + 1;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
688
u8 sched = *(u8 *)(inbox->buf + 64);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
694
port = (sched >> 6 & 1) + 1;
drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
262
static u32 sparx5_dsm_cp_cal(u32 *sched)
drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
267
if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
268
tmp = sched[idx];
drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
269
sched[idx] = SPX5_DSM_CAL_EMPTY;
drivers/net/wireless/ath/ath9k/ath9k.h
1009
struct ath_chanctx_sched sched;
drivers/net/wireless/ath/ath9k/channel.c
1043
struct ath_softc *sc = timer_container_of(sc, t, sched.timer);
drivers/net/wireless/ath/ath9k/channel.c
1163
switch (sc->sched.state) {
drivers/net/wireless/ath/ath9k/channel.c
1173
sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
drivers/net/wireless/ath/ath9k/channel.c
1282
if (!sc->sched.offchannel_pending)
drivers/net/wireless/ath/ath9k/channel.c
1283
sc->sched.offchannel_duration = 0;
drivers/net/wireless/ath/ath9k/channel.c
1285
if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE)
drivers/net/wireless/ath/ath9k/channel.c
1286
sc->sched.state = ATH_CHANCTX_STATE_IDLE;
drivers/net/wireless/ath/ath9k/channel.c
1298
sc->sched.channel_switch_time =
drivers/net/wireless/ath/ath9k/channel.c
1359
timer_setup(&sc->sched.timer, ath_chanctx_timer, 0);
drivers/net/wireless/ath/ath9k/channel.c
1557
timer_delete_sync(&sc->sched.timer);
drivers/net/wireless/ath/ath9k/channel.c
256
if (likely(sc->sched.channel_switch_time))
drivers/net/wireless/ath/ath9k/channel.c
258
usecs_to_jiffies(sc->sched.channel_switch_time);
drivers/net/wireless/ath/ath9k/channel.c
306
ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time);
drivers/net/wireless/ath/ath9k/channel.c
377
mod_timer(&sc->sched.timer, jiffies + timeout);
drivers/net/wireless/ath/ath9k/channel.c
394
if (ctx->active && sc->sched.extend_absence) {
drivers/net/wireless/ath/ath9k/channel.c
396
sc->sched.extend_absence = false;
drivers/net/wireless/ath/ath9k/channel.c
403
if (ctx->active && sc->sched.beacon_miss >= 2) {
drivers/net/wireless/ath/ath9k/channel.c
405
sc->sched.extend_absence = true;
drivers/net/wireless/ath/ath9k/channel.c
418
avp->offchannel_duration = sc->sched.offchannel_duration;
drivers/net/wireless/ath/ath9k/channel.c
446
if (sc->sched.extend_absence)
drivers/net/wireless/ath/ath9k/channel.c
448
sc->sched.channel_switch_time;
drivers/net/wireless/ath/ath9k/channel.c
452
sc->sched.channel_switch_time;
drivers/net/wireless/ath/ath9k/channel.c
455
sc->sched.extend_absence)
drivers/net/wireless/ath/ath9k/channel.c
479
avp->noa_duration = duration + sc->sched.channel_switch_time;
drivers/net/wireless/ath/ath9k/channel.c
508
chanctx_state_string(sc->sched.state),
drivers/net/wireless/ath/ath9k/channel.c
530
if (sc->sched.offchannel_pending && !sc->sched.wait_switch) {
drivers/net/wireless/ath/ath9k/channel.c
531
sc->sched.offchannel_pending = false;
drivers/net/wireless/ath/ath9k/channel.c
533
sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
drivers/net/wireless/ath/ath9k/channel.c
539
if (ctx->active && sc->sched.state == ATH_CHANCTX_STATE_IDLE) {
drivers/net/wireless/ath/ath9k/channel.c
541
sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
drivers/net/wireless/ath/ath9k/channel.c
547
if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) {
drivers/net/wireless/ath/ath9k/channel.c
548
sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
drivers/net/wireless/ath/ath9k/channel.c
553
if (sc->sched.mgd_prepare_tx)
drivers/net/wireless/ath/ath9k/channel.c
554
sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
drivers/net/wireless/ath/ath9k/channel.c
563
sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) {
drivers/net/wireless/ath/ath9k/channel.c
571
if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
drivers/net/wireless/ath/ath9k/channel.c
576
sc->sched.beacon_pending = true;
drivers/net/wireless/ath/ath9k/channel.c
577
sc->sched.next_tbtt = REG_READ(ah, AR_NEXT_TBTT_TIMER);
drivers/net/wireless/ath/ath9k/channel.c
583
tsf_time = sc->sched.next_tbtt + beacon_int / 4;
drivers/net/wireless/ath/ath9k/channel.c
584
sc->sched.switch_start_time = tsf_time;
drivers/net/wireless/ath/ath9k/channel.c
585
sc->cur_chan->last_beacon = sc->sched.next_tbtt;
drivers/net/wireless/ath/ath9k/channel.c
605
if (sc->sched.mgd_prepare_tx) {
drivers/net/wireless/ath/ath9k/channel.c
621
(!avp->noa_duration || sc->sched.force_noa_update))
drivers/net/wireless/ath/ath9k/channel.c
625
if (ctx->active && sc->sched.force_noa_update)
drivers/net/wireless/ath/ath9k/channel.c
626
sc->sched.force_noa_update = false;
drivers/net/wireless/ath/ath9k/channel.c
630
if (!sc->sched.beacon_pending) {
drivers/net/wireless/ath/ath9k/channel.c
636
sc->sched.beacon_pending = false;
drivers/net/wireless/ath/ath9k/channel.c
638
if (sc->sched.mgd_prepare_tx) {
drivers/net/wireless/ath/ath9k/channel.c
639
sc->sched.mgd_prepare_tx = false;
drivers/net/wireless/ath/ath9k/channel.c
646
if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
drivers/net/wireless/ath/ath9k/channel.c
652
sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
drivers/net/wireless/ath/ath9k/channel.c
653
ath_chanctx_setup_timer(sc, sc->sched.switch_start_time);
drivers/net/wireless/ath/ath9k/channel.c
656
if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_TIMER)
drivers/net/wireless/ath/ath9k/channel.c
660
sc->sched.beacon_pending)
drivers/net/wireless/ath/ath9k/channel.c
661
sc->sched.beacon_miss++;
drivers/net/wireless/ath/ath9k/channel.c
666
sc->sched.state = ATH_CHANCTX_STATE_SWITCH;
drivers/net/wireless/ath/ath9k/channel.c
674
sc->sched.beacon_pending = false;
drivers/net/wireless/ath/ath9k/channel.c
675
sc->sched.beacon_miss = 0;
drivers/net/wireless/ath/ath9k/channel.c
677
if (sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
drivers/net/wireless/ath/ath9k/channel.c
678
!sc->sched.beacon_adjust ||
drivers/net/wireless/ath/ath9k/channel.c
687
tsf_time = sc->sched.switch_start_time;
drivers/net/wireless/ath/ath9k/channel.c
692
sc->sched.beacon_adjust = false;
drivers/net/wireless/ath/ath9k/channel.c
696
if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE ||
drivers/net/wireless/ath/ath9k/channel.c
703
sc->sched.state = ATH_CHANCTX_STATE_IDLE;
drivers/net/wireless/ath/ath9k/channel.c
707
sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
drivers/net/wireless/ath/ath9k/channel.c
721
sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
drivers/net/wireless/ath/ath9k/channel.c
722
sc->sched.wait_switch = false;
drivers/net/wireless/ath/ath9k/channel.c
726
if (sc->sched.extend_absence) {
drivers/net/wireless/ath/ath9k/channel.c
727
sc->sched.beacon_miss = 0;
drivers/net/wireless/ath/ath9k/channel.c
731
tsf_time -= sc->sched.channel_switch_time;
drivers/net/wireless/ath/ath9k/channel.c
733
sc->sched.switch_start_time = tsf_time;
drivers/net/wireless/ath/ath9k/channel.c
736
sc->sched.beacon_pending = true;
drivers/net/wireless/ath/ath9k/channel.c
737
sc->sched.beacon_adjust = true;
drivers/net/wireless/ath/ath9k/channel.c
751
sc->sched.state = ATH_CHANCTX_STATE_IDLE;
drivers/net/wireless/ath/ath9k/channel.c
756
sc->sched.state = ATH_CHANCTX_STATE_IDLE;
drivers/net/wireless/ath/ath9k/channel.c
775
if (sc->sched.beacon_pending)
drivers/net/wireless/ath/ath9k/channel.c
808
sc->sched.offchannel_pending = true;
drivers/net/wireless/ath/ath9k/channel.c
809
sc->sched.wait_switch = true;
drivers/net/wireless/ath/ath9k/channel.c
810
sc->sched.offchannel_duration =
drivers/net/wireless/ath/ath9k/channel.c
812
sc->sched.channel_switch_time;
drivers/net/wireless/ath/ath9k/channel.c
828
sc->sched.offchannel_duration =
drivers/net/wireless/ath/ath9k/channel.c
830
sc->sched.channel_switch_time;
drivers/net/wireless/ath/ath9k/channel.c
836
sc->sched.offchannel_duration);
drivers/net/wireless/ath/ath9k/channel.c
922
sc->sched.offchannel_pending = false;
drivers/net/wireless/ath/ath9k/channel.c
923
sc->sched.wait_switch = false;
drivers/net/wireless/ath/ath9k/channel.c
978
sc->sched.force_noa_update = true;
drivers/net/wireless/ath/ath9k/main.c
2716
sc->sched.mgd_prepare_tx = true;
drivers/net/wireless/ath/ath9k/main.c
2732
sc->sched.mgd_prepare_tx = false;
drivers/net/wireless/ath/ath9k/main.c
2745
sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
drivers/net/wireless/ath/ath9k/main.c
511
bool sched = false;
drivers/net/wireless/ath/ath9k/main.c
553
sched = true;
drivers/net/wireless/ath/ath9k/main.c
594
if (sched) {
drivers/net/wireless/mediatek/mt76/mt76.h
706
} sched;
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
540
seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota);
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
541
seq_printf(s, "ple_data_quota\t%d\n", sdio->sched.ple_data_quota);
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
542
seq_printf(s, "pse_mcu_quota\t%d\n", sdio->sched.pse_mcu_quota);
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
543
seq_printf(s, "sched_deficit\t%d\n", sdio->sched.deficit);
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
24
sdio->sched.pse_data_quota = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP,
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
26
sdio->sched.pse_mcu_quota = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP,
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
28
sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP,
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
30
sdio->sched.pse_page_size = MT_PSE_PAGE_SZ;
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
33
sdio->sched.deficit = txdwcnt << 2;
drivers/net/wireless/mediatek/mt76/mt7915/coredump.c
202
mt7915_memcpy_fromio(dev, dump->sched, base + 0xc + y * 12,
drivers/net/wireless/mediatek/mt76/mt7915/coredump.c
203
sizeof(dump->sched));
drivers/net/wireless/mediatek/mt76/mt7915/coredump.h
45
} sched[60];
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1249
iter->sched ? 's' : 'u',
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2202
if (!iter || !iter->sched || iter->start_tsf > duration) {
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2214
if (!iter_next->sched ||
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2350
flow->sched = true;
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
130
u8 sched:1;
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
256
seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota);
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
257
seq_printf(s, "ple_data_quota\t%d\n", sdio->sched.ple_data_quota);
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
258
seq_printf(s, "pse_mcu_quota\t%d\n", sdio->sched.pse_mcu_quota);
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
259
seq_printf(s, "sched_deficit\t%d\n", sdio->sched.deficit);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
515
sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
518
sdio->sched.pse_mcu_quota =
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
519
sdio->sched.pse_mcu_quota ? sdio->pse_mcu_quota_max : sdio->pse_mcu_quota_max - 1;
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
520
sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
521
sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
522
sdio->sched.deficit = tx_res->pp_padding;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
811
iter->sched ? 's' : 'u',
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3121
if (!iter || !iter->sched || iter->start_tsf > duration) {
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3133
if (!iter_next->sched ||
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3270
flow->sched = true;
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
214
u8 sched:1;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
193
pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit,
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
194
sdio->sched.pse_page_size);
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
200
if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
203
if (sdio->sched.pse_data_quota < *pse_size + pse_sz ||
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
204
sdio->sched.ple_data_quota < *ple_size + 1)
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
219
sdio->sched.pse_mcu_quota -= pse_size;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
221
sdio->sched.pse_data_quota -= pse_size;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
222
sdio->sched.ple_data_quota -= ple_size;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
48
sdio->sched.pse_mcu_quota += pse_mcu_quota;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
50
sdio->sched.pse_mcu_quota > sdio->pse_mcu_quota_max) {
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
51
sdio->sched.pse_mcu_quota = sdio->pse_mcu_quota_max;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
53
sdio->sched.pse_data_quota += pse_data_quota;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
54
sdio->sched.ple_data_quota += ple_data_quota;
drivers/slimbus/core.c
268
mutex_init(&ctrl->sched.m_reconf);
drivers/slimbus/core.c
269
init_completion(&ctrl->sched.pause_comp);
drivers/slimbus/core.c
490
if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
drivers/slimbus/core.c
492
ctrl->sched.clk_state, ret);
drivers/slimbus/messaging.c
121
if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE &&
drivers/slimbus/messaging.c
129
if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
drivers/slimbus/messaging.c
131
ctrl->sched.clk_state, ret);
drivers/slimbus/sched.c
112
sched->clk_state = SLIM_CLK_ACTIVE;
drivers/slimbus/sched.c
114
sched->clk_state = SLIM_CLK_PAUSED;
drivers/slimbus/sched.c
115
complete(&sched->pause_comp);
drivers/slimbus/sched.c
117
mutex_unlock(&sched->m_reconf);
drivers/slimbus/sched.c
29
struct slim_sched *sched = &ctrl->sched;
drivers/slimbus/sched.c
38
mutex_lock(&sched->m_reconf);
drivers/slimbus/sched.c
40
if (sched->clk_state == SLIM_CLK_ACTIVE) {
drivers/slimbus/sched.c
41
mutex_unlock(&sched->m_reconf);
drivers/slimbus/sched.c
49
ret = wait_for_completion_timeout(&sched->pause_comp,
drivers/slimbus/sched.c
52
mutex_unlock(&sched->m_reconf);
drivers/slimbus/sched.c
63
if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
drivers/slimbus/sched.c
66
sched->clk_state = SLIM_CLK_ACTIVE;
drivers/slimbus/sched.c
67
mutex_unlock(&sched->m_reconf);
drivers/slimbus/sched.c
73
if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) {
drivers/slimbus/sched.c
74
mutex_unlock(&sched->m_reconf);
drivers/slimbus/sched.c
83
mutex_unlock(&sched->m_reconf);
drivers/slimbus/sched.c
89
sched->clk_state = SLIM_CLK_ENTERING_PAUSE;
drivers/slimbus/slimbus.h
413
struct slim_sched sched;
drivers/usb/fotg210/fotg210-hcd.c
4072
struct fotg210_iso_sched *sched;
drivers/usb/fotg210/fotg210-hcd.c
4075
sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
drivers/usb/fotg210/fotg210-hcd.c
4076
if (unlikely(sched == NULL))
drivers/usb/fotg210/fotg210-hcd.c
4079
itd_sched_init(fotg210, sched, stream, urb);
drivers/usb/fotg210/fotg210-hcd.c
4082
num_itds = 1 + (sched->span + 7) / 8;
drivers/usb/fotg210/fotg210-hcd.c
4108
iso_sched_free(stream, sched);
drivers/usb/fotg210/fotg210-hcd.c
4116
list_add(&itd->itd_list, &sched->td_list);
drivers/usb/fotg210/fotg210-hcd.c
4121
urb->hcpriv = sched;
drivers/usb/fotg210/fotg210-hcd.c
4160
struct fotg210_iso_sched *sched = urb->hcpriv;
drivers/usb/fotg210/fotg210-hcd.c
4163
span = sched->span;
drivers/usb/fotg210/fotg210-hcd.c
4270
iso_sched_free(stream, sched);
drivers/usb/host/ehci-sched.c
1243
struct ehci_iso_sched *sched;
drivers/usb/host/ehci-sched.c
1246
sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
drivers/usb/host/ehci-sched.c
1247
if (unlikely(sched == NULL))
drivers/usb/host/ehci-sched.c
1250
itd_sched_init(ehci, sched, stream, urb);
drivers/usb/host/ehci-sched.c
1253
num_itds = 1 + (sched->span + 7) / 8;
drivers/usb/host/ehci-sched.c
1279
iso_sched_free(stream, sched);
drivers/usb/host/ehci-sched.c
1288
list_add(&itd->itd_list, &sched->td_list);
drivers/usb/host/ehci-sched.c
1293
urb->hcpriv = sched;
drivers/usb/host/ehci-sched.c
1387
struct ehci_iso_sched *sched,
drivers/usb/host/ehci-sched.c
1480
struct ehci_iso_sched *sched = urb->hcpriv;
drivers/usb/host/ehci-sched.c
1485
span = sched->span;
drivers/usb/host/ehci-sched.c
1523
sched, tt))
drivers/usb/host/ehci-sched.c
1624
iso_sched_free(stream, sched);
drivers/usb/host/ehci-sched.c
1625
sched = NULL;
drivers/usb/host/ehci-sched.c
1629
if (sched)
drivers/usb/host/ehci-sched.c
1630
sched->first_packet = urb->error_count;
drivers/usb/host/ehci-sched.c
1656
iso_sched_free(stream, sched);
drivers/usb/host/ehci-sched.c
2150
struct ehci_iso_sched *sched = urb->hcpriv;
drivers/usb/host/ehci-sched.c
2168
for (packet = sched->first_packet, sitd = NULL;
drivers/usb/host/ehci-sched.c
2173
BUG_ON(list_empty(&sched->td_list));
drivers/usb/host/ehci-sched.c
2177
sitd = list_entry(sched->td_list.next,
drivers/usb/host/ehci-sched.c
2183
sitd_patch(ehci, stream, sitd, sched, packet);
drivers/usb/host/ehci-sched.c
2192
iso_sched_free(stream, sched);
include/asm-generic/vmlinux.lds.h
624
*(.sched.text) \
include/drm/gpu_scheduler.h
252
struct drm_gpu_scheduler *sched;
include/drm/gpu_scheduler.h
299
struct drm_gpu_scheduler *sched;
include/drm/gpu_scheduler.h
355
struct drm_gpu_scheduler *sched;
include/drm/gpu_scheduler.h
632
int drm_sched_init(struct drm_gpu_scheduler *sched,
include/drm/gpu_scheduler.h
635
void drm_sched_fini(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
637
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
638
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
include/drm/gpu_scheduler.h
640
void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
641
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
642
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
643
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
644
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
include/drm/gpu_scheduler.h
645
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
include/drm/gpu_scheduler.h
646
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
647
void drm_sched_fault(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
648
bool drm_sched_is_stopped(struct drm_gpu_scheduler *sched);
include/drm/gpu_scheduler.h
708
struct drm_gpu_scheduler *sched;
include/drm/gpu_scheduler.h
713
__drm_sched_pending_job_iter_begin(struct drm_gpu_scheduler *sched)
include/drm/gpu_scheduler.h
716
.sched = sched,
include/drm/gpu_scheduler.h
719
WARN_ON(!drm_sched_is_stopped(sched));
include/drm/gpu_scheduler.h
727
WARN_ON(!drm_sched_is_stopped(iter.sched));
include/net/ip_vs.h
1523
struct ip_vs_scheduler *sched);
include/net/sctp/stream_sched.h
45
enum sctp_sched_type sched);
include/net/sctp/stream_sched.h
57
void sctp_sched_ops_register(enum sctp_sched_type sched,
include/net/sctp/structs.h
1072
const struct sctp_sched_ops *sched;
include/trace/events/sched.h
3
#define TRACE_SYSTEM sched
include/trace/events/xdp.h
175
int sched, struct xdp_cpumap_stats *xdp_stats),
include/trace/events/xdp.h
177
TP_ARGS(map_id, processed, drops, sched, xdp_stats),
include/trace/events/xdp.h
185
__field(int, sched)
include/trace/events/xdp.h
197
__entry->sched = sched;
include/trace/events/xdp.h
211
__entry->sched,
kernel/bpf/cpumap.c
306
unsigned int kmem_alloc_drops = 0, sched = 0;
kernel/bpf/cpumap.c
319
sched = 1;
kernel/bpf/cpumap.c
326
sched = cond_resched();
kernel/bpf/cpumap.c
390
sched, &stats);
net/ipv6/ip6_flowlabel.c
147
unsigned long sched = 0;
net/ipv6/ip6_flowlabel.c
169
if (!sched || time_before(ttd, sched))
net/ipv6/ip6_flowlabel.c
170
sched = ttd;
net/ipv6/ip6_flowlabel.c
175
if (!sched && atomic_read(&fl_size))
net/ipv6/ip6_flowlabel.c
176
sched = now + FL_MAX_LINGER;
net/ipv6/ip6_flowlabel.c
177
if (sched) {
net/ipv6/ip6_flowlabel.c
178
mod_timer(&ip6_fl_gc_timer, sched);
net/mptcp/ctrl.c
116
struct mptcp_sched_ops *sched;
net/mptcp/ctrl.c
120
sched = mptcp_sched_find(name);
net/mptcp/ctrl.c
121
if (sched)
net/mptcp/protocol.c
3545
mptcp_init_sched(msk, mptcp_sk(sk)->sched);
net/mptcp/protocol.h
342
struct mptcp_sched_ops *sched;
net/mptcp/protocol.h
808
int mptcp_validate_scheduler(struct mptcp_sched_ops *sched);
net/mptcp/protocol.h
809
int mptcp_register_scheduler(struct mptcp_sched_ops *sched);
net/mptcp/protocol.h
810
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched);
net/mptcp/protocol.h
813
struct mptcp_sched_ops *sched);
net/mptcp/sched.c
104
if (mptcp_sched_find(sched->name)) {
net/mptcp/sched.c
108
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
net/mptcp/sched.c
111
pr_debug("%s registered\n", sched->name);
net/mptcp/sched.c
115
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
net/mptcp/sched.c
117
if (sched == &mptcp_sched_default)
net/mptcp/sched.c
121
list_del_rcu(&sched->list);
net/mptcp/sched.c
131
struct mptcp_sched_ops *sched)
net/mptcp/sched.c
133
if (!sched)
net/mptcp/sched.c
134
sched = &mptcp_sched_default;
net/mptcp/sched.c
136
if (!bpf_try_module_get(sched, sched->owner))
net/mptcp/sched.c
139
msk->sched = sched;
net/mptcp/sched.c
140
if (msk->sched->init)
net/mptcp/sched.c
141
msk->sched->init(msk);
net/mptcp/sched.c
143
pr_debug("sched=%s\n", msk->sched->name);
net/mptcp/sched.c
150
struct mptcp_sched_ops *sched = msk->sched;
net/mptcp/sched.c
152
if (!sched)
net/mptcp/sched.c
155
msk->sched = NULL;
net/mptcp/sched.c
156
if (sched->release)
net/mptcp/sched.c
157
sched->release(msk);
net/mptcp/sched.c
159
bpf_module_put(sched, sched->owner);
net/mptcp/sched.c
190
if (msk->sched == &mptcp_sched_default || !msk->sched)
net/mptcp/sched.c
192
return msk->sched->get_send(msk);
net/mptcp/sched.c
210
if (msk->sched == &mptcp_sched_default || !msk->sched)
net/mptcp/sched.c
212
if (msk->sched->get_retrans)
net/mptcp/sched.c
213
return msk->sched->get_retrans(msk);
net/mptcp/sched.c
214
return msk->sched->get_send(msk);
net/mptcp/sched.c
53
struct mptcp_sched_ops *sched, *ret = NULL;
net/mptcp/sched.c
55
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
net/mptcp/sched.c
56
if (!strcmp(sched->name, name)) {
net/mptcp/sched.c
57
ret = sched;
net/mptcp/sched.c
70
struct mptcp_sched_ops *sched;
net/mptcp/sched.c
74
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
net/mptcp/sched.c
77
offs == 0 ? "" : " ", sched->name);
net/mptcp/sched.c
85
int mptcp_validate_scheduler(struct mptcp_sched_ops *sched)
net/mptcp/sched.c
87
if (!sched->get_send) {
net/mptcp/sched.c
88
pr_err("%s does not implement required ops\n", sched->name);
net/mptcp/sched.c
95
int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
net/mptcp/sched.c
99
ret = mptcp_validate_scheduler(sched);
net/netfilter/ipvs/ip_vs_core.c
341
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_core.c
348
sched = rcu_dereference(svc->scheduler);
net/netfilter/ipvs/ip_vs_core.c
349
if (sched) {
net/netfilter/ipvs/ip_vs_core.c
352
dest = sched->schedule(svc, skb, iph);
net/netfilter/ipvs/ip_vs_core.c
446
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_core.c
523
sched = rcu_dereference(svc->scheduler);
net/netfilter/ipvs/ip_vs_core.c
524
if (sched) {
net/netfilter/ipvs/ip_vs_core.c
527
dest = sched->schedule(svc, skb, iph);
net/netfilter/ipvs/ip_vs_ctl.c
1042
sched = rcu_dereference_protected(svc->scheduler, 1);
net/netfilter/ipvs/ip_vs_ctl.c
1043
if (sched && sched->add_dest)
net/netfilter/ipvs/ip_vs_ctl.c
1044
sched->add_dest(svc, dest);
net/netfilter/ipvs/ip_vs_ctl.c
1046
sched = rcu_dereference_protected(svc->scheduler, 1);
net/netfilter/ipvs/ip_vs_ctl.c
1047
if (sched && sched->upd_dest)
net/netfilter/ipvs/ip_vs_ctl.c
1048
sched->upd_dest(svc, dest);
net/netfilter/ipvs/ip_vs_ctl.c
1290
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_ctl.c
1292
sched = rcu_dereference_protected(svc->scheduler, 1);
net/netfilter/ipvs/ip_vs_ctl.c
1293
if (sched && sched->del_dest)
net/netfilter/ipvs/ip_vs_ctl.c
1294
sched->del_dest(svc, dest);
net/netfilter/ipvs/ip_vs_ctl.c
1371
struct ip_vs_scheduler *sched = NULL;
net/netfilter/ipvs/ip_vs_ctl.c
1382
sched = ip_vs_scheduler_get(u->sched_name);
net/netfilter/ipvs/ip_vs_ctl.c
1383
if (!sched) {
net/netfilter/ipvs/ip_vs_ctl.c
1451
if (sched) {
net/netfilter/ipvs/ip_vs_ctl.c
1452
ret = ip_vs_bind_scheduler(svc, sched);
net/netfilter/ipvs/ip_vs_ctl.c
1455
sched = NULL;
net/netfilter/ipvs/ip_vs_ctl.c
1500
ip_vs_unbind_scheduler(svc, sched);
net/netfilter/ipvs/ip_vs_ctl.c
1503
ip_vs_scheduler_put(sched);
net/netfilter/ipvs/ip_vs_ctl.c
1519
struct ip_vs_scheduler *sched = NULL, *old_sched;
net/netfilter/ipvs/ip_vs_ctl.c
1528
sched = ip_vs_scheduler_get(u->sched_name);
net/netfilter/ipvs/ip_vs_ctl.c
1529
if (!sched) {
net/netfilter/ipvs/ip_vs_ctl.c
1535
old_sched = sched;
net/netfilter/ipvs/ip_vs_ctl.c
1560
if (sched != old_sched) {
net/netfilter/ipvs/ip_vs_ctl.c
1568
if (sched) {
net/netfilter/ipvs/ip_vs_ctl.c
1569
ret = ip_vs_bind_scheduler(svc, sched);
net/netfilter/ipvs/ip_vs_ctl.c
1571
ip_vs_scheduler_put(sched);
net/netfilter/ipvs/ip_vs_ctl.c
2412
struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
net/netfilter/ipvs/ip_vs_ctl.c
2413
char *sched_name = sched ? sched->name : "none";
net/netfilter/ipvs/ip_vs_ctl.c
2844
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_ctl.c
2848
sched = rcu_dereference_protected(src->scheduler, 1);
net/netfilter/ipvs/ip_vs_ctl.c
2849
sched_name = sched ? sched->name : "none";
net/netfilter/ipvs/ip_vs_ctl.c
3318
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_ctl.c
3342
sched = rcu_dereference_protected(svc->scheduler, 1);
net/netfilter/ipvs/ip_vs_ctl.c
3343
sched_name = sched ? sched->name : "none";
net/netfilter/ipvs/ip_vs_ctl.c
971
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_sched.c
101
module_put(sched->module);
net/netfilter/ipvs/ip_vs_sched.c
114
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_sched.c
119
sched = ip_vs_sched_getbyname(sched_name);
net/netfilter/ipvs/ip_vs_sched.c
124
if (sched == NULL) {
net/netfilter/ipvs/ip_vs_sched.c
126
sched = ip_vs_sched_getbyname(sched_name);
net/netfilter/ipvs/ip_vs_sched.c
129
return sched;
net/netfilter/ipvs/ip_vs_sched.c
144
struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
net/netfilter/ipvs/ip_vs_sched.c
145
char *sched_name = sched ? sched->name : "none";
net/netfilter/ipvs/ip_vs_sched.c
168
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_sched.c
198
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
net/netfilter/ipvs/ip_vs_sched.c
199
if (strcmp(scheduler->name, sched->name) == 0) {
net/netfilter/ipvs/ip_vs_sched.c
60
struct ip_vs_scheduler *sched)
net/netfilter/ipvs/ip_vs_sched.c
69
if (sched->done_service)
net/netfilter/ipvs/ip_vs_sched.c
70
sched->done_service(svc);
net/netfilter/ipvs/ip_vs_sched.c
80
struct ip_vs_scheduler *sched;
net/netfilter/ipvs/ip_vs_sched.c
86
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
net/netfilter/ipvs/ip_vs_sched.c
90
if (sched->module && !try_module_get(sched->module)) {
net/netfilter/ipvs/ip_vs_sched.c
96
if (strcmp(sched_name, sched->name)==0) {
net/netfilter/ipvs/ip_vs_sched.c
99
return sched;
net/sched/act_gate.c
263
struct tcf_gate_params *sched,
net/sched/act_gate.c
293
list_add_tail(&entry->list, &sched->entries);
net/sched/act_gate.c
297
sched->num_entries = i;
net/sched/act_gate.c
302
release_entry_list(&sched->entries);
net/sched/sch_hfsc.c
1058
cl->sched = q;
net/sched/sch_hfsc.c
121
struct hfsc_sched *sched; /* scheduler data */
net/sched/sch_hfsc.c
1419
q->root.sched = q;
net/sched/sch_hfsc.c
192
struct rb_node **p = &cl->sched->eligible.rb_node;
net/sched/sch_hfsc.c
205
rb_insert_color(&cl->el_node, &cl->sched->eligible);
net/sched/sch_hfsc.c
212
rb_erase(&cl->el_node, &cl->sched->eligible);
net/sched/sch_taprio.c
1088
struct sched_gate_list *sched,
net/sched/sch_taprio.c
1118
list_add_tail(&entry->list, &sched->entries);
net/sched/sch_taprio.c
1122
sched->num_entries = i;
net/sched/sch_taprio.c
120
struct sched_gate_list *sched)
net/sched/sch_taprio.c
1207
struct sched_gate_list *sched,
net/sched/sch_taprio.c
1214
base = sched_base_time(sched);
net/sched/sch_taprio.c
1222
cycle = sched->cycle_time;
net/sched/sch_taprio.c
1241
struct sched_gate_list *sched, ktime_t base)
net/sched/sch_taprio.c
1249
first = list_first_entry(&sched->entries,
net/sched/sch_taprio.c
1252
cycle = sched->cycle_time;
net/sched/sch_taprio.c
1255
sched->cycle_end_time = ktime_add_ns(base, cycle);
net/sched/sch_taprio.c
1258
taprio_set_budgets(q, sched, first);
net/sched/sch_taprio.c
1261
if (first->gate_duration[tc] == sched->cycle_time)
net/sched/sch_taprio.c
127
list_for_each_entry(entry, &sched->entries, list) {
net/sched/sch_taprio.c
1364
struct sched_gate_list *sched, ktime_t base)
net/sched/sch_taprio.c
1369
list_for_each_entry(entry, &sched->entries, list) {
net/sched/sch_taprio.c
1458
struct sched_gate_list *sched,
net/sched/sch_taprio.c
1465
offload->base_time = sched->base_time;
net/sched/sch_taprio.c
1466
offload->cycle_time = sched->cycle_time;
net/sched/sch_taprio.c
1467
offload->cycle_time_extension = sched->cycle_time_extension;
net/sched/sch_taprio.c
1469
list_for_each_entry(entry, &sched->entries, list) {
net/sched/sch_taprio.c
150
cur = list_next_entry_circular(cur, &sched->entries, list);
net/sched/sch_taprio.c
1516
struct sched_gate_list *sched,
net/sched/sch_taprio.c
1543
offload = taprio_offload_alloc(sched->num_entries);
net/sched/sch_taprio.c
1553
taprio_sched_to_offload(dev, sched, offload, &caps);
net/sched/sch_taprio.c
159
sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
net/sched/sch_taprio.c
160
sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
net/sched/sch_taprio.c
170
static ktime_t sched_base_time(const struct sched_gate_list *sched)
net/sched/sch_taprio.c
172
if (!sched)
net/sched/sch_taprio.c
175
return ns_to_ktime(sched->base_time);
net/sched/sch_taprio.c
198
struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
net/sched/sch_taprio.c
201
list_for_each_entry_safe(entry, n, &sched->entries, list) {
net/sched/sch_taprio.c
206
kfree(sched);
net/sched/sch_taprio.c
224
static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
net/sched/sch_taprio.c
2283
struct sched_gate_list *sched)
net/sched/sch_taprio.c
229
time_since_sched_start = ktime_sub(time, sched->base_time);
net/sched/sch_taprio.c
2297
sched->max_sdu[tc]))
net/sched/sch_taprio.c
230
div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
net/sched/sch_taprio.c
235
static ktime_t get_interval_end_time(struct sched_gate_list *sched,
net/sched/sch_taprio.c
240
s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
net/sched/sch_taprio.c
243
cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
net/sched/sch_taprio.c
245
cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
net/sched/sch_taprio.c
249
else if (admin && admin != sched &&
net/sched/sch_taprio.c
272
struct sched_gate_list *sched,
net/sched/sch_taprio.c
288
if (sched->max_open_gate_duration[tc] == sched->cycle_time) {
net/sched/sch_taprio.c
293
max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
net/sched/sch_taprio.c
310
sched->max_frm_len[tc] = max_sdu + dev->hard_header_len;
net/sched/sch_taprio.c
311
sched->max_sdu[tc] = max_sdu;
net/sched/sch_taprio.c
313
sched->max_frm_len[tc] = U32_MAX; /* never oversized */
net/sched/sch_taprio.c
314
sched->max_sdu[tc] = 0;
net/sched/sch_taprio.c
325
struct sched_gate_list *sched,
net/sched/sch_taprio.c
347
if (!sched)
net/sched/sch_taprio.c
350
cycle = sched->cycle_time;
net/sched/sch_taprio.c
351
cycle_elapsed = get_cycle_time_elapsed(sched, time);
net/sched/sch_taprio.c
355
list_for_each_entry(entry, &sched->entries, list) {
net/sched/sch_taprio.c
357
curr_intv_end = get_interval_end_time(sched, admin, entry,
net/sched/sch_taprio.c
403
struct sched_gate_list *sched, *admin;
net/sched/sch_taprio.c
408
sched = rcu_dereference(q->oper_sched);
net/sched/sch_taprio.c
411
entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
net/sched/sch_taprio.c
472
struct sched_gate_list *sched, *admin;
net/sched/sch_taprio.c
486
sched = rcu_dereference(q->oper_sched);
net/sched/sch_taprio.c
488
switch_schedules(q, &admin, &sched);
net/sched/sch_taprio.c
491
if (!sched || ktime_before(minimum_time, sched->base_time)) {
net/sched/sch_taprio.c
502
entry = find_entry_to_transmit(skb, sch, sched, admin,
net/sched/sch_taprio.c
515
if (admin && admin != sched &&
net/sched/sch_taprio.c
517
sched = admin;
net/sched/sch_taprio.c
529
entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
net/sched/sch_taprio.c
545
struct sched_gate_list *sched;
net/sched/sch_taprio.c
553
sched = rcu_dereference(q->oper_sched);
net/sched/sch_taprio.c
554
if (sched && skb->len > sched->max_frm_len[tc])
net/sched/sch_taprio.c
663
struct sched_gate_list *sched,
net/sched/sch_taprio.c
672
if (entry->gate_duration[tc] == sched->cycle_time)
net/sctp/outqueue.c
389
q->sched->unsched_all(&asoc->stream);
net/sctp/outqueue.c
417
q->sched->sched_all(&asoc->stream);
net/sctp/outqueue.c
76
return q->sched->dequeue(q);
net/sctp/sm_sideeffect.c
1121
asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
net/sctp/stream.c
133
const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
net/sctp/stream.c
145
sched->unsched_all(stream);
net/sctp/stream.c
147
sched->sched_all(stream);
net/sctp/stream.c
185
const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
net/sctp/stream.c
188
sched->unsched_all(stream);
net/sctp/stream.c
210
const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
net/sctp/stream.c
212
sched->unsched_all(stream);
net/sctp/stream.c
221
sched->sched_all(stream);
net/sctp/stream.c
57
const struct sctp_sched_ops *sched;
net/sctp/stream.c
62
sched = sctp_sched_ops_from_stream(stream);
net/sctp/stream.c
63
sched->free_sid(stream, sid);
net/sctp/stream_sched.c
116
void sctp_sched_ops_register(enum sctp_sched_type sched,
net/sctp/stream_sched.c
119
sctp_sched_ops[sched] = sched_ops;
net/sctp/stream_sched.c
133
const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
net/sctp/stream_sched.c
137
sched->unsched_all(stream);
net/sctp/stream_sched.c
142
sched->free_sid(stream, i);
net/sctp/stream_sched.c
149
enum sctp_sched_type sched)
net/sctp/stream_sched.c
151
const struct sctp_sched_ops *old = asoc->outqueue.sched;
net/sctp/stream_sched.c
157
if (sched > SCTP_SS_MAX)
net/sctp/stream_sched.c
160
n = sctp_sched_ops[sched];
net/sctp/stream_sched.c
167
asoc->outqueue.sched = n;
net/sctp/stream_sched.c
190
asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
net/sctp/stream_sched.c
200
if (asoc->outqueue.sched == sctp_sched_ops[i])
net/sctp/stream_sched.c
220
return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
net/sctp/stream_sched.c
232
return asoc->outqueue.sched->get(&asoc->stream, sid, value);
net/sctp/stream_sched.c
253
q->sched->dequeue_done(q, ch);
net/sctp/stream_sched.c
266
const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
net/sctp/stream_sched.c
270
return sched->init_sid(stream, sid, gfp);
net/sctp/stream_sched.c
279
return asoc->outqueue.sched;
net/unix/af_unix.c
1606
int sched;
net/unix/af_unix.c
1611
sched = !sock_flag(other, SOCK_DEAD) &&
net/unix/af_unix.c
1617
if (sched)
samples/bpf/xdp_sample.bpf.c
157
unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
samples/bpf/xdp_sample.bpf.c
175
if (sched)
tools/perf/builtin-sched.c
1039
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
tools/perf/builtin-sched.c
1049
__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
tools/perf/builtin-sched.c
1136
static int latency_switch_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
1152
timestamp0 = sched->cpu_last_switched[cpu];
tools/perf/builtin-sched.c
1153
sched->cpu_last_switched[cpu] = timestamp;
tools/perf/builtin-sched.c
1169
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
tools/perf/builtin-sched.c
1171
if (thread_atoms_insert(sched, sched_out))
tools/perf/builtin-sched.c
1173
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
tools/perf/builtin-sched.c
1182
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
tools/perf/builtin-sched.c
1184
if (thread_atoms_insert(sched, sched_in))
tools/perf/builtin-sched.c
1186
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
tools/perf/builtin-sched.c
1206
static int latency_runtime_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
1214
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
tools/perf/builtin-sched.c
1223
if (thread_atoms_insert(sched, thread))
tools/perf/builtin-sched.c
1225
atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
tools/perf/builtin-sched.c
1241
static int latency_wakeup_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
1256
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
tools/perf/builtin-sched.c
1258
if (thread_atoms_insert(sched, wakee))
tools/perf/builtin-sched.c
1260
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
tools/perf/builtin-sched.c
1284
if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
tools/perf/builtin-sched.c
1287
sched->nr_timestamps++;
tools/perf/builtin-sched.c
1289
sched->nr_unordered_timestamps++;
tools/perf/builtin-sched.c
1302
static int latency_migrate_task_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
1317
if (sched->profile_cpu == -1)
tools/perf/builtin-sched.c
132
int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
tools/perf/builtin-sched.c
1323
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
tools/perf/builtin-sched.c
1325
if (thread_atoms_insert(sched, migrant))
tools/perf/builtin-sched.c
1327
register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
tools/perf/builtin-sched.c
1328
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
tools/perf/builtin-sched.c
1342
sched->nr_timestamps++;
tools/perf/builtin-sched.c
1345
sched->nr_unordered_timestamps++;
tools/perf/builtin-sched.c
135
int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
tools/perf/builtin-sched.c
1352
static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
tools/perf/builtin-sched.c
1367
sched->all_runtime += work_list->total_runtime;
tools/perf/builtin-sched.c
1368
sched->all_count += work_list->nb_atoms;
tools/perf/builtin-sched.c
138
int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
tools/perf/builtin-sched.c
142
int (*fork_event)(struct perf_sched *sched, union perf_event *event,
tools/perf/builtin-sched.c
145
int (*migrate_task_event)(struct perf_sched *sched,
tools/perf/builtin-sched.c
1500
static void perf_sched__sort_lat(struct perf_sched *sched)
tools/perf/builtin-sched.c
1503
struct rb_root_cached *root = &sched->atom_root;
tools/perf/builtin-sched.c
1513
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
tools/perf/builtin-sched.c
1515
if (root == &sched->atom_root) {
tools/perf/builtin-sched.c
1516
root = &sched->merged_atom_root;
tools/perf/builtin-sched.c
1526
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
1528
if (sched->tp_handler->wakeup_event)
tools/perf/builtin-sched.c
1529
return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
tools/perf/builtin-sched.c
1548
map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
tools/perf/builtin-sched.c
1553
if (!sched->map.color_pids || !thread || thread__priv(thread))
tools/perf/builtin-sched.c
1556
if (thread_map__has(sched->map.color_pids, tid))
tools/perf/builtin-sched.c
1563
static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
tools/perf/builtin-sched.c
1565
bool fuzzy_match = sched->map.fuzzy;
tools/perf/builtin-sched.c
1566
struct strlist *task_names = sched->map.task_names;
tools/perf/builtin-sched.c
1579
static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
tools/perf/builtin-sched.c
1584
.cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
tools/perf/builtin-sched.c
1586
struct thread *curr_thread = sched->curr_thread[cpu.cpu];
tools/perf/builtin-sched.c
1587
struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
tools/perf/builtin-sched.c
1597
if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
tools/perf/builtin-sched.c
1605
thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
tools/perf/builtin-sched.c
1606
sched->curr_thread[cpu.cpu];
tools/perf/builtin-sched.c
1617
curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
tools/perf/builtin-sched.c
1629
static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
tools/perf/builtin-sched.c
1652
if (this_cpu.cpu > sched->max_cpu.cpu)
tools/perf/builtin-sched.c
1653
sched->max_cpu = this_cpu;
tools/perf/builtin-sched.c
1655
if (sched->map.comp) {
tools/perf/builtin-sched.c
1656
cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
tools/perf/builtin-sched.c
1657
if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
tools/perf/builtin-sched.c
1658
sched->map.comp_cpus[cpus_nr++] = this_cpu;
tools/perf/builtin-sched.c
1662
cpus_nr = sched->max_cpu.cpu;
tools/perf/builtin-sched.c
1664
timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
tools/perf/builtin-sched.c
1665
sched->cpu_last_switched[this_cpu.cpu] = timestamp;
tools/perf/builtin-sched.c
1676
sched_in = map__findnew_thread(sched, machine, -1, next_pid);
tools/perf/builtin-sched.c
1677
sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
tools/perf/builtin-sched.c
1685
thread__put(sched->curr_thread[this_cpu.cpu]);
tools/perf/builtin-sched.c
1686
thread__put(sched->curr_out_thread[this_cpu.cpu]);
tools/perf/builtin-sched.c
1688
sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
tools/perf/builtin-sched.c
1689
sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
tools/perf/builtin-sched.c
1703
} else if (!sched->map.task_name || sched_match_task(sched, str)) {
tools/perf/builtin-sched.c
1704
tr->shortname[0] = sched->next_shortname1;
tools/perf/builtin-sched.c
1705
tr->shortname[1] = sched->next_shortname2;
tools/perf/builtin-sched.c
1707
if (sched->next_shortname1 < 'Z') {
tools/perf/builtin-sched.c
1708
sched->next_shortname1++;
tools/perf/builtin-sched.c
1710
sched->next_shortname1 = 'A';
tools/perf/builtin-sched.c
1711
if (sched->next_shortname2 < '9')
tools/perf/builtin-sched.c
1712
sched->next_shortname2++;
tools/perf/builtin-sched.c
1714
sched->next_shortname2 = '0';
tools/perf/builtin-sched.c
1723
if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
tools/perf/builtin-sched.c
1732
if (sched->map.task_name && !sched_match_task(sched, str)) {
tools/perf/builtin-sched.c
1733
if (!sched_match_task(sched, thread__comm_str(sched_out)))
tools/perf/builtin-sched.c
1740
if (!(sched->map.task_name && !sched_match_task(sched, str)))
tools/perf/builtin-sched.c
1746
print_sched_map(sched, this_cpu, cpus_nr, color, false);
tools/perf/builtin-sched.c
1761
if (sched->map.comp && new_cpu)
tools/perf/builtin-sched.c
1770
if (sched->map.task_name) {
tools/perf/builtin-sched.c
1771
tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
tools/perf/builtin-sched.c
1779
print_sched_map(sched, this_cpu, cpus_nr, color, true);
tools/perf/builtin-sched.c
1798
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
1803
if (sched->curr_pid[this_cpu] != (u32)-1) {
tools/perf/builtin-sched.c
1808
if (sched->curr_pid[this_cpu] != prev_pid)
tools/perf/builtin-sched.c
1809
sched->nr_context_switch_bugs++;
tools/perf/builtin-sched.c
1812
if (sched->tp_handler->switch_event)
tools/perf/builtin-sched.c
1813
err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
tools/perf/builtin-sched.c
1815
sched->curr_pid[this_cpu] = next_pid;
tools/perf/builtin-sched.c
1824
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
1826
if (sched->tp_handler->runtime_event)
tools/perf/builtin-sched.c
1827
return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
tools/perf/builtin-sched.c
1837
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
1843
if (sched->tp_handler->fork_event)
tools/perf/builtin-sched.c
1844
return sched->tp_handler->fork_event(sched, event, machine);
tools/perf/builtin-sched.c
1854
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
1856
if (sched->tp_handler->migrate_task_event)
tools/perf/builtin-sched.c
1857
return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
tools/perf/builtin-sched.c
1914
static int perf_sched__read_events(struct perf_sched *sched)
tools/perf/builtin-sched.c
1928
.force = sched->force,
tools/perf/builtin-sched.c
1932
session = perf_session__new(&data, &sched->tool);
tools/perf/builtin-sched.c
1954
sched->nr_events = session->evlist->stats.nr_events[0];
tools/perf/builtin-sched.c
1955
sched->nr_lost_events = session->evlist->stats.total_lost;
tools/perf/builtin-sched.c
1956
sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
tools/perf/builtin-sched.c
2087
static void timehist_header(struct perf_sched *sched)
tools/perf/builtin-sched.c
2089
u32 ncpus = sched->max_cpu.cpu + 1;
tools/perf/builtin-sched.c
2094
if (sched->show_cpu_visual) {
tools/perf/builtin-sched.c
2106
if (sched->show_prio)
tools/perf/builtin-sched.c
2111
if (sched->pre_migrations)
tools/perf/builtin-sched.c
2114
if (sched->show_state)
tools/perf/builtin-sched.c
2124
if (sched->show_cpu_visual)
tools/perf/builtin-sched.c
2129
if (sched->show_prio)
tools/perf/builtin-sched.c
2134
if (sched->pre_migrations)
tools/perf/builtin-sched.c
2144
if (sched->show_cpu_visual)
tools/perf/builtin-sched.c
2149
if (sched->show_prio)
tools/perf/builtin-sched.c
2154
if (sched->pre_migrations)
tools/perf/builtin-sched.c
2157
if (sched->show_state)
tools/perf/builtin-sched.c
2163
static void timehist_print_sample(struct perf_sched *sched,
tools/perf/builtin-sched.c
2173
u32 max_cpus = sched->max_cpu.cpu + 1;
tools/perf/builtin-sched.c
2184
if (sched->show_cpu_visual) {
tools/perf/builtin-sched.c
2207
if (sched->show_prio)
tools/perf/builtin-sched.c
2215
if (sched->pre_migrations)
tools/perf/builtin-sched.c
2218
if (sched->show_state)
tools/perf/builtin-sched.c
2221
if (sched->show_next) {
tools/perf/builtin-sched.c
2226
if (sched->show_wakeups && !sched->show_next)
tools/perf/builtin-sched.c
2232
if (sched->show_callchain)
tools/perf/builtin-sched.c
2327
static void save_task_callchain(struct perf_sched *sched,
tools/perf/builtin-sched.c
2342
if (!sched->show_callchain || sample->callchain == NULL) {
tools/perf/builtin-sched.c
2350
NULL, NULL, sched->max_stack + 2) != 0) {
tools/perf/builtin-sched.c
2486
static void save_idle_callchain(struct perf_sched *sched,
tools/perf/builtin-sched.c
2492
if (!sched->show_callchain || sample->callchain == NULL)
tools/perf/builtin-sched.c
2502
static struct thread *timehist_get_thread(struct perf_sched *sched,
tools/perf/builtin-sched.c
2523
save_task_callchain(sched, sample, evsel, machine);
tools/perf/builtin-sched.c
2524
if (sched->idle_hist) {
tools/perf/builtin-sched.c
2543
save_idle_callchain(sched, itr, sample);
tools/perf/builtin-sched.c
2550
static bool timehist_skip_sample(struct perf_sched *sched,
tools/perf/builtin-sched.c
2561
sched->skipped_samples++;
tools/perf/builtin-sched.c
2564
if (sched->prio_str) {
tools/perf/builtin-sched.c
2577
if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) {
tools/perf/builtin-sched.c
2579
sched->skipped_samples++;
tools/perf/builtin-sched.c
2583
if (sched->idle_hist) {
tools/perf/builtin-sched.c
2594
static void timehist_print_wakeup_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
2608
if (timehist_skip_sample(sched, thread, evsel, sample) &&
tools/perf/builtin-sched.c
2609
timehist_skip_sample(sched, awakened, evsel, sample)) {
tools/perf/builtin-sched.c
2616
if (sched->show_cpu_visual)
tools/perf/builtin-sched.c
2617
printf(" %*s ", sched->max_cpu.cpu + 1, "");
tools/perf/builtin-sched.c
2646
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
2666
if (sched->show_wakeups &&
tools/perf/builtin-sched.c
2667
!perf_time__skip_sample(&sched->ptime, sample->time))
tools/perf/builtin-sched.c
2668
timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
tools/perf/builtin-sched.c
2674
static void timehist_print_migration_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
2685
if (sched->summary_only)
tools/perf/builtin-sched.c
2688
max_cpus = sched->max_cpu.cpu + 1;
tools/perf/builtin-sched.c
2696
if (timehist_skip_sample(sched, thread, evsel, sample) &&
tools/perf/builtin-sched.c
2697
timehist_skip_sample(sched, migrated, evsel, sample)) {
tools/perf/builtin-sched.c
2705
if (sched->show_cpu_visual) {
tools/perf/builtin-sched.c
2735
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
2755
if (sched->show_migrations) {
tools/perf/builtin-sched.c
2756
timehist_print_migration_event(sched, evsel, sample,
tools/perf/builtin-sched.c
2794
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
2795
struct perf_time_interval *ptime = &sched->ptime;
tools/perf/builtin-sched.c
2811
if (sched->show_prio || sched->prio_str)
tools/perf/builtin-sched.c
2814
thread = timehist_get_thread(sched, sample, machine, evsel);
tools/perf/builtin-sched.c
2820
if (timehist_skip_sample(sched, thread, evsel, sample))
tools/perf/builtin-sched.c
2859
if (!sched->idle_hist || thread__tid(thread) == 0) {
tools/perf/builtin-sched.c
2863
if (sched->idle_hist) {
tools/perf/builtin-sched.c
2893
if (!sched->summary_only)
tools/perf/builtin-sched.c
2894
timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
tools/perf/builtin-sched.c
2898
if (sched->hist_time.start == 0 && t >= ptime->start)
tools/perf/builtin-sched.c
2899
sched->hist_time.start = t;
tools/perf/builtin-sched.c
2901
sched->hist_time.end = t;
tools/perf/builtin-sched.c
2992
struct perf_sched *sched;
tools/perf/builtin-sched.c
3012
if (stats->sched->show_state)
tools/perf/builtin-sched.c
307
static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
tools/perf/builtin-sched.c
3075
static void timehist_print_summary(struct perf_sched *sched,
tools/perf/builtin-sched.c
3084
u64 hist_time = sched->hist_time.end - sched->hist_time.start;
tools/perf/builtin-sched.c
3087
totals.sched = sched;
tools/perf/builtin-sched.c
3089
if (sched->idle_hist) {
tools/perf/builtin-sched.c
3093
} else if (sched->show_state) {
tools/perf/builtin-sched.c
3104
sched->show_state ? "(msec)" : "%");
tools/perf/builtin-sched.c
3113
if (sched->skipped_samples && !sched->idle_hist)
tools/perf/builtin-sched.c
313
} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
tools/perf/builtin-sched.c
3135
if (sched->idle_hist && sched->show_callchain) {
tools/perf/builtin-sched.c
3175
printf(" (x %d)\n", sched->max_cpu.cpu);
tools/perf/builtin-sched.c
3190
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
3196
if (this_cpu.cpu > sched->max_cpu.cpu)
tools/perf/builtin-sched.c
3197
sched->max_cpu = this_cpu;
tools/perf/builtin-sched.c
3208
static int timehist_check_attr(struct perf_sched *sched,
tools/perf/builtin-sched.c
3222
if (sched->show_callchain &&
tools/perf/builtin-sched.c
3226
sched->show_callchain = 0;
tools/perf/builtin-sched.c
3234
static int timehist_parse_prio_str(struct perf_sched *sched)
tools/perf/builtin-sched.c
3238
const char *str = sched->prio_str;
tools/perf/builtin-sched.c
326
static void calibrate_run_measurement_overhead(struct perf_sched *sched)
tools/perf/builtin-sched.c
3264
__set_bit(start_prio, sched->prio_bitmap);
tools/perf/builtin-sched.c
3275
static int perf_sched__timehist(struct perf_sched *sched)
tools/perf/builtin-sched.c
3289
.force = sched->force,
tools/perf/builtin-sched.c
3300
sched->tool.sample = perf_timehist__process_sample;
tools/perf/builtin-sched.c
3301
sched->tool.mmap = perf_event__process_mmap;
tools/perf/builtin-sched.c
3302
sched->tool.comm = perf_event__process_comm;
tools/perf/builtin-sched.c
3303
sched->tool.exit = perf_event__process_exit;
tools/perf/builtin-sched.c
3304
sched->tool.fork = perf_event__process_fork;
tools/perf/builtin-sched.c
3305
sched->tool.lost = process_lost;
tools/perf/builtin-sched.c
3306
sched->tool.attr = perf_event__process_attr;
tools/perf/builtin-sched.c
3307
sched->tool.tracing_data = perf_event__process_tracing_data;
tools/perf/builtin-sched.c
3308
sched->tool.build_id = perf_event__process_build_id;
tools/perf/builtin-sched.c
3310
sched->tool.ordering_requires_timestamps = true;
tools/perf/builtin-sched.c
3312
symbol_conf.use_callchain = sched->show_callchain;
tools/perf/builtin-sched.c
3314
session = perf_session__new(&data, &sched->tool);
tools/perf/builtin-sched.c
3329
if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
tools/perf/builtin-sched.c
333
burn_nsecs(sched, 0);
tools/perf/builtin-sched.c
3335
if (timehist_check_attr(sched, evlist) != 0)
tools/perf/builtin-sched.c
3338
if (timehist_parse_prio_str(sched) != 0) {
tools/perf/builtin-sched.c
3361
if ((sched->show_migrations || sched->pre_migrations) &&
tools/perf/builtin-sched.c
3366
sched->max_cpu.cpu = env->nr_cpus_online;
tools/perf/builtin-sched.c
3367
if (sched->max_cpu.cpu == 0)
tools/perf/builtin-sched.c
3368
sched->max_cpu.cpu = 4;
tools/perf/builtin-sched.c
3369
if (init_idle_threads(sched->max_cpu.cpu))
tools/perf/builtin-sched.c
3373
if (sched->summary_only)
tools/perf/builtin-sched.c
3374
sched->summary = sched->summary_only;
tools/perf/builtin-sched.c
3376
if (!sched->summary_only)
tools/perf/builtin-sched.c
3377
timehist_header(sched);
tools/perf/builtin-sched.c
338
sched->run_measurement_overhead = min_delta;
tools/perf/builtin-sched.c
3385
sched->nr_events = evlist->stats.nr_events[0];
tools/perf/builtin-sched.c
3386
sched->nr_lost_events = evlist->stats.total_lost;
tools/perf/builtin-sched.c
3387
sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
tools/perf/builtin-sched.c
3389
if (sched->summary)
tools/perf/builtin-sched.c
3390
timehist_print_summary(sched, session);
tools/perf/builtin-sched.c
3400
static void print_bad_events(struct perf_sched *sched)
tools/perf/builtin-sched.c
3402
if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
tools/perf/builtin-sched.c
3404
(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
tools/perf/builtin-sched.c
3405
sched->nr_unordered_timestamps, sched->nr_timestamps);
tools/perf/builtin-sched.c
3407
if (sched->nr_lost_events && sched->nr_events) {
tools/perf/builtin-sched.c
3409
(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
tools/perf/builtin-sched.c
3410
sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
tools/perf/builtin-sched.c
3412
if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
tools/perf/builtin-sched.c
3414
(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
tools/perf/builtin-sched.c
3415
sched->nr_context_switch_bugs, sched->nr_timestamps);
tools/perf/builtin-sched.c
3416
if (sched->nr_lost_events)
tools/perf/builtin-sched.c
343
static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
tools/perf/builtin-sched.c
3463
static void perf_sched__merge_lat(struct perf_sched *sched)
tools/perf/builtin-sched.c
3468
if (sched->skip_merge)
tools/perf/builtin-sched.c
3471
while ((node = rb_first_cached(&sched->atom_root))) {
tools/perf/builtin-sched.c
3472
rb_erase_cached(node, &sched->atom_root);
tools/perf/builtin-sched.c
3474
__merge_work_atoms(&sched->merged_atom_root, data);
tools/perf/builtin-sched.c
3478
static int setup_cpus_switch_event(struct perf_sched *sched)
tools/perf/builtin-sched.c
3482
sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
tools/perf/builtin-sched.c
3483
if (!sched->cpu_last_switched)
tools/perf/builtin-sched.c
3486
sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
tools/perf/builtin-sched.c
3487
if (!sched->curr_pid) {
tools/perf/builtin-sched.c
3488
zfree(&sched->cpu_last_switched);
tools/perf/builtin-sched.c
3493
sched->curr_pid[i] = -1;
tools/perf/builtin-sched.c
3498
static void free_cpus_switch_event(struct perf_sched *sched)
tools/perf/builtin-sched.c
3500
zfree(&sched->curr_pid);
tools/perf/builtin-sched.c
3501
zfree(&sched->cpu_last_switched);
tools/perf/builtin-sched.c
3504
static int perf_sched__lat(struct perf_sched *sched)
tools/perf/builtin-sched.c
3511
if (setup_cpus_switch_event(sched))
tools/perf/builtin-sched.c
3514
if (perf_sched__read_events(sched))
tools/perf/builtin-sched.c
3517
perf_sched__merge_lat(sched);
tools/perf/builtin-sched.c
3518
perf_sched__sort_lat(sched);
tools/perf/builtin-sched.c
3524
next = rb_first_cached(&sched->sorted_atom_root);
tools/perf/builtin-sched.c
3530
output_lat_thread(sched, work_list);
tools/perf/builtin-sched.c
3536
(double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
tools/perf/builtin-sched.c
3540
print_bad_events(sched);
tools/perf/builtin-sched.c
3545
while ((next = rb_first_cached(&sched->sorted_atom_root))) {
tools/perf/builtin-sched.c
3549
rb_erase_cached(next, &sched->sorted_atom_root);
tools/perf/builtin-sched.c
3553
free_cpus_switch_event(sched);
tools/perf/builtin-sched.c
3557
static int setup_map_cpus(struct perf_sched *sched)
tools/perf/builtin-sched.c
3559
sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
tools/perf/builtin-sched.c
356
sched->sleep_measurement_overhead = min_delta;
tools/perf/builtin-sched.c
3561
if (sched->map.comp) {
tools/perf/builtin-sched.c
3562
sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
tools/perf/builtin-sched.c
3563
if (!sched->map.comp_cpus)
tools/perf/builtin-sched.c
3567
if (sched->map.cpus_str) {
tools/perf/builtin-sched.c
3568
sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
tools/perf/builtin-sched.c
3569
if (!sched->map.cpus) {
tools/perf/builtin-sched.c
3570
pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
tools/perf/builtin-sched.c
3571
zfree(&sched->map.comp_cpus);
tools/perf/builtin-sched.c
3579
static int setup_color_pids(struct perf_sched *sched)
tools/perf/builtin-sched.c
3583
if (!sched->map.color_pids_str)
tools/perf/builtin-sched.c
3586
map = thread_map__new_by_tid_str(sched->map.color_pids_str);
tools/perf/builtin-sched.c
3588
pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
tools/perf/builtin-sched.c
3592
sched->map.color_pids = map;
tools/perf/builtin-sched.c
3596
static int setup_color_cpus(struct perf_sched *sched)
tools/perf/builtin-sched.c
3600
if (!sched->map.color_cpus_str)
tools/perf/builtin-sched.c
3603
map = perf_cpu_map__new(sched->map.color_cpus_str);
tools/perf/builtin-sched.c
3605
pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
tools/perf/builtin-sched.c
3609
sched->map.color_cpus = map;
tools/perf/builtin-sched.c
3613
static int perf_sched__map(struct perf_sched *sched)
tools/perf/builtin-sched.c
3617
sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
tools/perf/builtin-sched.c
3618
if (!sched->curr_thread)
tools/perf/builtin-sched.c
3621
sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
tools/perf/builtin-sched.c
3622
if (!sched->curr_out_thread)
tools/perf/builtin-sched.c
3625
if (setup_cpus_switch_event(sched))
tools/perf/builtin-sched.c
3628
if (setup_map_cpus(sched))
tools/perf/builtin-sched.c
3631
if (setup_color_pids(sched))
tools/perf/builtin-sched.c
3634
if (setup_color_cpus(sched))
tools/perf/builtin-sched.c
3638
if (perf_sched__read_events(sched))
tools/perf/builtin-sched.c
3642
print_bad_events(sched);
tools/perf/builtin-sched.c
3645
perf_cpu_map__put(sched->map.color_cpus);
tools/perf/builtin-sched.c
3648
perf_thread_map__put(sched->map.color_pids);
tools/perf/builtin-sched.c
3651
zfree(&sched->map.comp_cpus);
tools/perf/builtin-sched.c
3652
perf_cpu_map__put(sched->map.cpus);
tools/perf/builtin-sched.c
3655
free_cpus_switch_event(sched);
tools/perf/builtin-sched.c
3659
thread__put(sched->curr_out_thread[i]);
tools/perf/builtin-sched.c
3660
zfree(&sched->curr_out_thread);
tools/perf/builtin-sched.c
3664
thread__put(sched->curr_thread[i]);
tools/perf/builtin-sched.c
3665
zfree(&sched->curr_thread);
tools/perf/builtin-sched.c
3669
static int perf_sched__replay(struct perf_sched *sched)
tools/perf/builtin-sched.c
3674
mutex_init(&sched->start_work_mutex);
tools/perf/builtin-sched.c
3675
mutex_init(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
3677
ret = setup_cpus_switch_event(sched);
tools/perf/builtin-sched.c
3681
calibrate_run_measurement_overhead(sched);
tools/perf/builtin-sched.c
3682
calibrate_sleep_measurement_overhead(sched);
tools/perf/builtin-sched.c
3684
test_calibrations(sched);
tools/perf/builtin-sched.c
3686
ret = perf_sched__read_events(sched);
tools/perf/builtin-sched.c
3690
printf("nr_run_events: %ld\n", sched->nr_run_events);
tools/perf/builtin-sched.c
3691
printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
tools/perf/builtin-sched.c
3692
printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
tools/perf/builtin-sched.c
3694
if (sched->targetless_wakeups)
tools/perf/builtin-sched.c
3695
printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
tools/perf/builtin-sched.c
3696
if (sched->multitarget_wakeups)
tools/perf/builtin-sched.c
3697
printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
tools/perf/builtin-sched.c
3698
if (sched->nr_run_events_optimized)
tools/perf/builtin-sched.c
3700
sched->nr_run_events_optimized);
tools/perf/builtin-sched.c
3702
print_task_traces(sched);
tools/perf/builtin-sched.c
3703
add_cross_task_wakeups(sched);
tools/perf/builtin-sched.c
3705
sched->thread_funcs_exit = false;
tools/perf/builtin-sched.c
3706
create_tasks(sched);
tools/perf/builtin-sched.c
3708
if (sched->replay_repeat == 0)
tools/perf/builtin-sched.c
3709
sched->replay_repeat = UINT_MAX;
tools/perf/builtin-sched.c
3711
for (i = 0; i < sched->replay_repeat; i++)
tools/perf/builtin-sched.c
3712
run_one_test(sched);
tools/perf/builtin-sched.c
3714
sched->thread_funcs_exit = true;
tools/perf/builtin-sched.c
3715
destroy_tasks(sched);
tools/perf/builtin-sched.c
3718
free_cpus_switch_event(sched);
tools/perf/builtin-sched.c
3721
mutex_destroy(&sched->start_work_mutex);
tools/perf/builtin-sched.c
3722
mutex_destroy(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
3726
static void setup_sorting(struct perf_sched *sched, const struct option *options,
tools/perf/builtin-sched.c
3729
char *tmp, *tok, *str = strdup(sched->sort_order);
tools/perf/builtin-sched.c
3733
if (sort_dimension__add(tok, &sched->sort_list) < 0) {
tools/perf/builtin-sched.c
3741
sort_dimension__add("pid", &sched->cmp_pid);
tools/perf/builtin-sched.c
3749
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
tools/perf/builtin-sched.c
3751
if (perf_data__write(sched->data, event, event->header.size) <= 0) {
tools/perf/builtin-sched.c
3756
sched->session->header.data_size += event->header.size;
tools/perf/builtin-sched.c
3807
static int perf_sched__schedstat_record(struct perf_sched *sched,
tools/perf/builtin-sched.c
3829
session = perf_session__new(&data, &sched->tool);
tools/perf/builtin-sched.c
3838
sched->session = session;
tools/perf/builtin-sched.c
3839
sched->data = &data;
tools/perf/builtin-sched.c
389
static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
tools/perf/builtin-sched.c
3892
err = perf_event__synthesize_schedstat(&(sched->tool),
tools/perf/builtin-sched.c
3914
err = perf_event__synthesize_schedstat(&(sched->tool),
tools/perf/builtin-sched.c
399
sched->nr_run_events_optimized++;
tools/perf/builtin-sched.c
409
sched->nr_run_events++;
tools/perf/builtin-sched.c
412
static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
tools/perf/builtin-sched.c
423
sched->targetless_wakeups++;
tools/perf/builtin-sched.c
427
sched->multitarget_wakeups++;
tools/perf/builtin-sched.c
435
sched->nr_wakeup_events++;
tools/perf/builtin-sched.c
438
static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
tools/perf/builtin-sched.c
445
sched->nr_sleep_events++;
tools/perf/builtin-sched.c
448
static struct task_desc *register_pid(struct perf_sched *sched,
tools/perf/builtin-sched.c
4483
static int perf_sched__schedstat_report(struct perf_sched *sched)
tools/perf/builtin-sched.c
4494
sched->tool.schedstat_cpu = perf_sched__process_schedstat;
tools/perf/builtin-sched.c
4495
sched->tool.schedstat_domain = perf_sched__process_schedstat;
tools/perf/builtin-sched.c
4497
session = perf_session__new(&data, &sched->tool);
tools/perf/builtin-sched.c
4535
static int perf_sched__schedstat_diff(struct perf_sched *sched,
tools/perf/builtin-sched.c
454
if (sched->pid_to_task == NULL) {
tools/perf/builtin-sched.c
4563
sched->tool.schedstat_cpu = perf_sched__process_schedstat;
tools/perf/builtin-sched.c
4564
sched->tool.schedstat_domain = perf_sched__process_schedstat;
tools/perf/builtin-sched.c
4568
session[0] = perf_session__new(&data[0], &sched->tool);
tools/perf/builtin-sched.c
457
BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
tools/perf/builtin-sched.c
4585
session[1] = perf_session__new(&data[1], &sched->tool);
tools/perf/builtin-sched.c
460
BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
tools/perf/builtin-sched.c
463
sched->pid_to_task[pid_max++] = NULL;
tools/perf/builtin-sched.c
4637
static int perf_sched__schedstat_live(struct perf_sched *sched,
tools/perf/builtin-sched.c
466
task = sched->pid_to_task[pid];
tools/perf/builtin-sched.c
4680
err = perf_event__synthesize_schedstat(&(sched->tool),
tools/perf/builtin-sched.c
4702
err = perf_event__synthesize_schedstat(&(sched->tool),
tools/perf/builtin-sched.c
473
task->nr = sched->nr_tasks;
tools/perf/builtin-sched.c
479
add_sched_event_sleep(sched, task, 0);
tools/perf/builtin-sched.c
481
sched->pid_to_task[pid] = task;
tools/perf/builtin-sched.c
482
sched->nr_tasks++;
tools/perf/builtin-sched.c
4822
struct perf_sched sched = {
tools/perf/builtin-sched.c
4823
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
tools/perf/builtin-sched.c
4824
.sort_list = LIST_HEAD_INIT(sched.sort_list),
tools/perf/builtin-sched.c
483
sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
tools/perf/builtin-sched.c
484
BUG_ON(!sched->tasks);
tools/perf/builtin-sched.c
4841
OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
tools/perf/builtin-sched.c
4845
OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
tools/perf/builtin-sched.c
4847
OPT_INTEGER('C', "CPU", &sched.profile_cpu,
tools/perf/builtin-sched.c
4849
OPT_BOOLEAN('p', "pids", &sched.skip_merge,
tools/perf/builtin-sched.c
485
sched->tasks[task->nr] = task;
tools/perf/builtin-sched.c
4854
OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
tools/perf/builtin-sched.c
4859
OPT_BOOLEAN(0, "compact", &sched.map.comp,
tools/perf/builtin-sched.c
4861
OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
tools/perf/builtin-sched.c
4863
OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
tools/perf/builtin-sched.c
4865
OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
tools/perf/builtin-sched.c
4867
OPT_STRING(0, "task-name", &sched.map.task_name, "task",
tools/perf/builtin-sched.c
4869
OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
tools/perf/builtin-sched.c
4878
OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
tools/perf/builtin-sched.c
488
printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
tools/perf/builtin-sched.c
4880
OPT_UINTEGER(0, "max-stack", &sched.max_stack,
tools/perf/builtin-sched.c
4884
OPT_BOOLEAN('s', "summary", &sched.summary_only,
tools/perf/builtin-sched.c
4886
OPT_BOOLEAN('S', "with-summary", &sched.summary,
tools/perf/builtin-sched.c
4888
OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
tools/perf/builtin-sched.c
4889
OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
tools/perf/builtin-sched.c
4890
OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
tools/perf/builtin-sched.c
4891
OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
tools/perf/builtin-sched.c
4892
OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
tools/perf/builtin-sched.c
4893
OPT_STRING(0, "time", &sched.time_str, "str",
tools/perf/builtin-sched.c
4895
OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
tools/perf/builtin-sched.c
4901
OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"),
tools/perf/builtin-sched.c
4902
OPT_STRING(0, "prio", &sched.prio_str, "prio",
tools/perf/builtin-sched.c
4904
OPT_BOOLEAN('P', "pre-migrations", &sched.pre_migrations, "Show pre-migration wait time"),
tools/perf/builtin-sched.c
494
static void print_task_traces(struct perf_sched *sched)
tools/perf/builtin-sched.c
4960
perf_tool__init(&sched.tool, /*ordered_events=*/true);
tools/perf/builtin-sched.c
4961
sched.tool.sample = perf_sched__process_tracepoint_sample;
tools/perf/builtin-sched.c
4962
sched.tool.comm = perf_sched__process_comm;
tools/perf/builtin-sched.c
4963
sched.tool.namespaces = perf_event__process_namespaces;
tools/perf/builtin-sched.c
4964
sched.tool.lost = perf_event__process_lost;
tools/perf/builtin-sched.c
4965
sched.tool.fork = perf_sched__process_fork_event;
tools/perf/builtin-sched.c
4982
sched.tp_handler = &lat_ops;
tools/perf/builtin-sched.c
4988
setup_sorting(&sched, latency_options, latency_usage);
tools/perf/builtin-sched.c
4989
ret = perf_sched__lat(&sched);
tools/perf/builtin-sched.c
499
for (i = 0; i < sched->nr_tasks; i++) {
tools/perf/builtin-sched.c
4996
if (sched.map.task_name) {
tools/perf/builtin-sched.c
4997
sched.map.task_names = strlist__new(sched.map.task_name, NULL);
tools/perf/builtin-sched.c
4998
if (sched.map.task_names == NULL) {
tools/perf/builtin-sched.c
500
task = sched->tasks[i];
tools/perf/builtin-sched.c
5005
sched.tp_handler = &map_ops;
tools/perf/builtin-sched.c
5006
setup_sorting(&sched, latency_options, latency_usage);
tools/perf/builtin-sched.c
5007
ret = perf_sched__map(&sched);
tools/perf/builtin-sched.c
5009
sched.tp_handler = &replay_ops;
tools/perf/builtin-sched.c
5015
ret = perf_sched__replay(&sched);
tools/perf/builtin-sched.c
5023
if ((sched.show_wakeups || sched.show_next) &&
tools/perf/builtin-sched.c
5024
sched.summary_only) {
tools/perf/builtin-sched.c
5027
if (sched.show_wakeups)
tools/perf/builtin-sched.c
5029
if (sched.show_next)
tools/perf/builtin-sched.c
5036
ret = perf_sched__timehist(&sched);
tools/perf/builtin-sched.c
5049
return perf_sched__schedstat_record(&sched, argc, argv);
tools/perf/builtin-sched.c
5054
return perf_sched__schedstat_report(&sched);
tools/perf/builtin-sched.c
5059
return perf_sched__schedstat_diff(&sched, argc, argv);
tools/perf/builtin-sched.c
506
static void add_cross_task_wakeups(struct perf_sched *sched)
tools/perf/builtin-sched.c
5061
return perf_sched__schedstat_live(&sched, argc, argv);
tools/perf/builtin-sched.c
511
for (i = 0; i < sched->nr_tasks; i++) {
tools/perf/builtin-sched.c
512
task1 = sched->tasks[i];
tools/perf/builtin-sched.c
514
if (j == sched->nr_tasks)
tools/perf/builtin-sched.c
516
task2 = sched->tasks[j];
tools/perf/builtin-sched.c
517
add_sched_event_wakeup(sched, task1, 0, task2);
tools/perf/builtin-sched.c
521
static void perf_sched__process_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
528
burn_nsecs(sched, atom->duration);
tools/perf/builtin-sched.c
560
static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
tools/perf/builtin-sched.c
579
if (sched->force) {
tools/perf/builtin-sched.c
581
limit.rlim_cur += sched->nr_tasks - cur_task;
tools/perf/builtin-sched.c
615
struct perf_sched *sched;
tools/perf/builtin-sched.c
623
struct perf_sched *sched = parms->sched;
tools/perf/builtin-sched.c
636
while (!sched->thread_funcs_exit) {
tools/perf/builtin-sched.c
639
mutex_lock(&sched->start_work_mutex);
tools/perf/builtin-sched.c
640
mutex_unlock(&sched->start_work_mutex);
tools/perf/builtin-sched.c
646
perf_sched__process_event(sched, this_task->atoms[i]);
tools/perf/builtin-sched.c
654
mutex_lock(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
655
mutex_unlock(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
660
static void create_tasks(struct perf_sched *sched)
tools/perf/builtin-sched.c
661
EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
tools/perf/builtin-sched.c
662
EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
tools/perf/builtin-sched.c
674
mutex_lock(&sched->start_work_mutex);
tools/perf/builtin-sched.c
675
mutex_lock(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
676
for (i = 0; i < sched->nr_tasks; i++) {
tools/perf/builtin-sched.c
679
parms->task = task = sched->tasks[i];
tools/perf/builtin-sched.c
680
parms->sched = sched;
tools/perf/builtin-sched.c
681
parms->fd = self_open_counters(sched, i);
tools/perf/builtin-sched.c
690
static void destroy_tasks(struct perf_sched *sched)
tools/perf/builtin-sched.c
691
UNLOCK_FUNCTION(sched->start_work_mutex)
tools/perf/builtin-sched.c
692
UNLOCK_FUNCTION(sched->work_done_wait_mutex)
tools/perf/builtin-sched.c
698
mutex_unlock(&sched->start_work_mutex);
tools/perf/builtin-sched.c
699
mutex_unlock(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
701
for (i = 0; i < sched->nr_tasks; i++) {
tools/perf/builtin-sched.c
702
task = sched->tasks[i];
tools/perf/builtin-sched.c
710
static void wait_for_tasks(struct perf_sched *sched)
tools/perf/builtin-sched.c
711
EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
tools/perf/builtin-sched.c
712
EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
tools/perf/builtin-sched.c
718
sched->start_time = get_nsecs();
tools/perf/builtin-sched.c
719
sched->cpu_usage = 0;
tools/perf/builtin-sched.c
720
mutex_unlock(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
722
for (i = 0; i < sched->nr_tasks; i++) {
tools/perf/builtin-sched.c
723
task = sched->tasks[i];
tools/perf/builtin-sched.c
728
mutex_lock(&sched->work_done_wait_mutex);
tools/perf/builtin-sched.c
732
mutex_unlock(&sched->start_work_mutex);
tools/perf/builtin-sched.c
734
for (i = 0; i < sched->nr_tasks; i++) {
tools/perf/builtin-sched.c
735
task = sched->tasks[i];
tools/perf/builtin-sched.c
739
sched->cpu_usage += task->cpu_usage;
tools/perf/builtin-sched.c
744
if (!sched->runavg_cpu_usage)
tools/perf/builtin-sched.c
745
sched->runavg_cpu_usage = sched->cpu_usage;
tools/perf/builtin-sched.c
746
sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
tools/perf/builtin-sched.c
748
sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
tools/perf/builtin-sched.c
749
if (!sched->runavg_parent_cpu_usage)
tools/perf/builtin-sched.c
750
sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
tools/perf/builtin-sched.c
751
sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
tools/perf/builtin-sched.c
752
sched->parent_cpu_usage)/sched->replay_repeat;
tools/perf/builtin-sched.c
754
mutex_lock(&sched->start_work_mutex);
tools/perf/builtin-sched.c
756
for (i = 0; i < sched->nr_tasks; i++) {
tools/perf/builtin-sched.c
757
task = sched->tasks[i];
tools/perf/builtin-sched.c
762
static void run_one_test(struct perf_sched *sched)
tools/perf/builtin-sched.c
763
EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
tools/perf/builtin-sched.c
764
EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
tools/perf/builtin-sched.c
769
wait_for_tasks(sched);
tools/perf/builtin-sched.c
773
sched->sum_runtime += delta;
tools/perf/builtin-sched.c
774
sched->nr_runs++;
tools/perf/builtin-sched.c
776
avg_delta = sched->sum_runtime / sched->nr_runs;
tools/perf/builtin-sched.c
781
sched->sum_fluct += fluct;
tools/perf/builtin-sched.c
782
if (!sched->run_avg)
tools/perf/builtin-sched.c
783
sched->run_avg = delta;
tools/perf/builtin-sched.c
784
sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
tools/perf/builtin-sched.c
786
printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
tools/perf/builtin-sched.c
788
printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
tools/perf/builtin-sched.c
791
(double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
tools/perf/builtin-sched.c
799
(double)sched->parent_cpu_usage / NSEC_PER_MSEC,
tools/perf/builtin-sched.c
800
(double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
tools/perf/builtin-sched.c
805
if (sched->nr_sleep_corrections)
tools/perf/builtin-sched.c
806
printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
tools/perf/builtin-sched.c
807
sched->nr_sleep_corrections = 0;
tools/perf/builtin-sched.c
810
static void test_calibrations(struct perf_sched *sched)
tools/perf/builtin-sched.c
815
burn_nsecs(sched, NSEC_PER_MSEC);
tools/perf/builtin-sched.c
828
replay_wakeup_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
842
waker = register_pid(sched, sample->tid, "<unknown>");
tools/perf/builtin-sched.c
843
wakee = register_pid(sched, pid, comm);
tools/perf/builtin-sched.c
845
add_sched_event_wakeup(sched, waker, sample->time, wakee);
tools/perf/builtin-sched.c
849
static int replay_switch_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
869
timestamp0 = sched->cpu_last_switched[cpu];
tools/perf/builtin-sched.c
883
prev = register_pid(sched, prev_pid, prev_comm);
tools/perf/builtin-sched.c
884
next = register_pid(sched, next_pid, next_comm);
tools/perf/builtin-sched.c
886
sched->cpu_last_switched[cpu] = timestamp;
tools/perf/builtin-sched.c
888
add_sched_event_run(sched, prev, timestamp, delta);
tools/perf/builtin-sched.c
889
add_sched_event_sleep(sched, prev, timestamp);
tools/perf/builtin-sched.c
894
static int replay_fork_event(struct perf_sched *sched,
tools/perf/builtin-sched.c
917
register_pid(sched, thread__tid(parent), thread__comm_str(parent));
tools/perf/builtin-sched.c
918
register_pid(sched, thread__tid(child), thread__comm_str(child));
tools/perf/builtin-trace.c
207
bool sched;
tools/perf/builtin-trace.c
4421
if (trace->sched &&
tools/perf/builtin-trace.c
4936
if (trace->sched)
tools/perf/builtin-trace.c
4997
if (trace->sched)
tools/perf/builtin-trace.c
5428
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
tools/perf/util/machine.c
3275
if (!machine->sched.text_start) {
tools/perf/util/machine.c
3281
machine->sched.text_start = 1;
tools/perf/util/machine.c
3285
machine->sched.text_start = map__unmap_ip(kmap, sym->start);
tools/perf/util/machine.c
3289
machine->sched.text_end = map__unmap_ip(kmap, sym->start);
tools/perf/util/machine.c
3310
if (machine->sched.text_start == 1)
tools/perf/util/machine.c
3314
if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
tools/perf/util/machine.h
52
} sched, lock, traceiter, trace;
tools/testing/selftests/bpf/progs/xdp_features.c
240
unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
tools/testing/selftests/x86/fsgsbase.c
599
for (int sched = 0; sched < 2; sched++) {
tools/testing/selftests/x86/fsgsbase.c
600
mov_0_gs(0, !!sched);
tools/testing/selftests/x86/fsgsbase.c
601
mov_0_gs(1, !!sched);
tools/testing/selftests/x86/fsgsbase.c
602
mov_0_gs(0x200000000, !!sched);