Symbol: worker
arch/x86/kvm/i8254.c
215
kthread_queue_work(pit->worker, &pit->expired);
arch/x86/kvm/i8254.c
276
kthread_queue_work(pt->worker, &pt->expired);
arch/x86/kvm/i8254.c
753
pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr);
arch/x86/kvm/i8254.c
754
if (IS_ERR(pit->worker))
arch/x86/kvm/i8254.c
796
kthread_destroy_worker(pit->worker);
arch/x86/kvm/i8254.c
813
kthread_destroy_worker(pit->worker);
arch/x86/kvm/i8254.h
51
struct kthread_worker *worker;
drivers/block/drbd/drbd_int.h
670
struct drbd_thread worker;
drivers/block/drbd/drbd_int.h
731
struct work_struct worker;
drivers/block/drbd/drbd_main.c
2209
struct work_struct worker;
drivers/block/drbd/drbd_main.c
2217
struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
drivers/block/drbd/drbd_main.c
2280
queue_work(retry.wq, &retry.worker);
drivers/block/drbd/drbd_main.c
2502
connection->worker.reset_cpu_mask = 1;
drivers/block/drbd/drbd_main.c
2588
drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
drivers/block/drbd/drbd_main.c
2589
connection->worker.connection = connection;
drivers/block/drbd/drbd_main.c
2644
INIT_WORK(&device->submit.worker, do_submit);
drivers/block/drbd/drbd_main.c
2871
INIT_WORK(&retry.worker, do_retry);
drivers/block/drbd/drbd_main.c
3482
D_ASSERT(device, current == peer_device->connection->worker.task);
drivers/block/drbd/drbd_main.c
3529
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
drivers/block/drbd/drbd_nl.c
1366
drbd_thread_start(&connection->worker);
drivers/block/drbd/drbd_nl.c
1382
drbd_thread_stop(&connection->worker);
drivers/block/drbd/drbd_nl.c
368
if (current == connection->worker.task)
drivers/block/drbd/drbd_nl.c
397
if (current == connection->worker.task)
drivers/block/drbd/drbd_nl.c
4375
if (get_t_state(&connection->worker) == RUNNING)
drivers/block/drbd/drbd_nl.c
4437
drbd_thread_stop(&connection->worker);
drivers/block/drbd/drbd_req.c
1192
queue_work(device->submit.wq, &device->submit.worker);
drivers/block/drbd/drbd_req.c
1514
struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
drivers/block/drbd/drbd_state.c
1529
D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
drivers/block/drbd/drbd_state.c
625
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
drivers/block/drbd/drbd_worker.c
1768
if (current == connection->worker.task) {
drivers/block/drbd/drbd_worker.c
2127
if (get_t_state(&connection->worker) != RUNNING)
drivers/block/loop.c
1946
static void loop_process_work(struct loop_worker *worker,
drivers/block/loop.c
1971
if (worker && !work_pending(&worker->work)) {
drivers/block/loop.c
1972
worker->last_ran_at = jiffies;
drivers/block/loop.c
1973
list_add_tail(&worker->idle_list, &lo->idle_worker_list);
drivers/block/loop.c
1982
struct loop_worker *worker =
drivers/block/loop.c
1984
loop_process_work(worker, &worker->cmd_list, worker->lo);
drivers/block/loop.c
799
struct loop_worker *cur_worker, *worker = NULL;
drivers/block/loop.c
814
worker = cur_worker;
drivers/block/loop.c
822
if (worker)
drivers/block/loop.c
825
worker = kzalloc_obj(struct loop_worker, GFP_NOWAIT);
drivers/block/loop.c
830
if (!worker) {
drivers/block/loop.c
838
worker->blkcg_css = cmd->blkcg_css;
drivers/block/loop.c
839
css_get(worker->blkcg_css);
drivers/block/loop.c
840
INIT_WORK(&worker->work, loop_workfn);
drivers/block/loop.c
841
INIT_LIST_HEAD(&worker->cmd_list);
drivers/block/loop.c
842
INIT_LIST_HEAD(&worker->idle_list);
drivers/block/loop.c
843
worker->lo = lo;
drivers/block/loop.c
844
rb_link_node(&worker->rb_node, parent, node);
drivers/block/loop.c
845
rb_insert_color(&worker->rb_node, &lo->worker_tree);
drivers/block/loop.c
847
if (worker) {
drivers/block/loop.c
853
if (!list_empty(&worker->idle_list))
drivers/block/loop.c
854
list_del_init(&worker->idle_list);
drivers/block/loop.c
855
work = &worker->work;
drivers/block/loop.c
856
cmd_list = &worker->cmd_list;
drivers/block/loop.c
873
struct loop_worker *pos, *worker;
drivers/block/loop.c
876
list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
drivers/block/loop.c
879
time_is_after_jiffies(worker->last_ran_at +
drivers/block/loop.c
882
list_del(&worker->idle_list);
drivers/block/loop.c
883
rb_erase(&worker->rb_node, &lo->worker_tree);
drivers/block/loop.c
884
css_put(worker->blkcg_css);
drivers/block/loop.c
885
kfree(worker);
drivers/crypto/caam/caamrng.c
143
worker);
drivers/crypto/caam/caamrng.c
161
schedule_work(&ctx->worker);
drivers/crypto/caam/caamrng.c
170
flush_work(&ctx->worker);
drivers/crypto/caam/caamrng.c
241
INIT_WORK(&ctx->worker, caam_rng_worker);
drivers/crypto/caam/caamrng.c
43
struct work_struct worker;
drivers/gpio/gpio-mpsse.c
370
struct mpsse_worker *worker, *worker_tmp;
drivers/gpio/gpio-mpsse.c
374
list_for_each_entry_safe(worker, worker_tmp,
drivers/gpio/gpio-mpsse.c
377
if (worker == my_worker)
drivers/gpio/gpio-mpsse.c
380
list_del(&worker->list);
drivers/gpio/gpio-mpsse.c
383
atomic_set(&worker->cancelled, 1);
drivers/gpio/gpio-mpsse.c
385
INIT_LIST_HEAD(&worker->destroy);
drivers/gpio/gpio-mpsse.c
386
list_add(&worker->destroy, &destructors);
drivers/gpio/gpio-mpsse.c
390
list_for_each_entry_safe(worker, worker_tmp,
drivers/gpio/gpio-mpsse.c
392
list_del(&worker->destroy);
drivers/gpio/gpio-mpsse.c
393
cancel_work_sync(&worker->work);
drivers/gpio/gpio-mpsse.c
394
kfree(worker);
drivers/gpio/gpio-mpsse.c
493
struct mpsse_worker *worker;
drivers/gpio/gpio-mpsse.c
505
list_for_each_entry(worker, &priv->workers, list)
drivers/gpio/gpio-mpsse.c
506
atomic_set(&worker->cancelled, 1);
drivers/gpio/gpio-mpsse.c
511
struct mpsse_worker *worker;
drivers/gpio/gpio-mpsse.c
521
worker = kzalloc(sizeof(*worker), GFP_NOWAIT);
drivers/gpio/gpio-mpsse.c
522
if (!worker)
drivers/gpio/gpio-mpsse.c
525
worker->priv = priv;
drivers/gpio/gpio-mpsse.c
526
INIT_LIST_HEAD(&worker->list);
drivers/gpio/gpio-mpsse.c
527
INIT_WORK(&worker->work, gpio_mpsse_poll);
drivers/gpio/gpio-mpsse.c
528
schedule_work(&worker->work);
drivers/gpio/gpio-mpsse.c
531
list_add(&worker->list, &priv->workers);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2372
struct send_exception_work_handler_workarea worker;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2374
INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2376
worker.p = p;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2377
worker.queue_id = queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2378
worker.error_reason = error_reason;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2380
schedule_work(&worker.work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2381
flush_work(&worker.work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2382
destroy_work_on_stack(&worker.work);
drivers/gpu/drm/drm_flip_work.c
104
struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
drivers/gpu/drm/drm_flip_work.c
144
INIT_WORK(&work->worker, flip_worker);
drivers/gpu/drm/drm_flip_work.c
98
queue_work(wq, &work->worker);
drivers/gpu/drm/drm_internal.h
122
kthread_flush_worker(vblank->worker);
drivers/gpu/drm/drm_internal.h
127
if (vblank->worker)
drivers/gpu/drm/drm_internal.h
128
kthread_destroy_worker(vblank->worker);
drivers/gpu/drm/drm_vblank_work.c
152
ret = kthread_queue_work(vblank->worker, &work->base);
drivers/gpu/drm/drm_vblank_work.c
255
kthread_flush_worker(vblank->worker);
drivers/gpu/drm/drm_vblank_work.c
278
struct kthread_worker *worker;
drivers/gpu/drm/drm_vblank_work.c
282
worker = kthread_run_worker(0, "card%d-crtc%d",
drivers/gpu/drm/drm_vblank_work.c
285
if (IS_ERR(worker))
drivers/gpu/drm/drm_vblank_work.c
286
return PTR_ERR(worker);
drivers/gpu/drm/drm_vblank_work.c
288
vblank->worker = worker;
drivers/gpu/drm/drm_vblank_work.c
290
sched_set_fifo(worker->task);
drivers/gpu/drm/drm_vblank_work.c
62
kthread_queue_work(vblank->worker, &work->base);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
184
struct kthread_worker *worker;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
367
struct kthread_worker *worker;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
372
worker = kthread_run_worker(0, "igt/parallel:%s",
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
374
if (IS_ERR(worker)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
375
err = PTR_ERR(worker);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
379
data[n].worker = worker;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
395
kthread_queue_work(data[n].worker, &data[n].work);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
422
if (data[n].worker)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
423
kthread_destroy_worker(data[n].worker);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3560
struct kthread_worker *worker[I915_NUM_ENGINES] = {};
drivers/gpu/drm/i915/gt/selftest_execlists.c
3580
worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3581
if (IS_ERR(worker[id])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3582
err = PTR_ERR(worker[id]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3587
kthread_queue_work(worker[id], &arg[id].work);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3592
if (IS_ERR_OR_NULL(worker[id]))
drivers/gpu/drm/i915/gt/selftest_execlists.c
3601
kthread_destroy_worker(worker[id]);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1014
struct kthread_worker *worker;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1028
worker = kthread_run_worker(0, "igt/%s",
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1030
if (IS_ERR(worker)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1031
err = PTR_ERR(worker);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1037
threads[tmp].worker = worker;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1040
kthread_queue_work(threads[tmp].worker,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1191
if (!threads[tmp].worker)
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1204
kthread_destroy_worker(threads[tmp].worker);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
854
struct kthread_worker *worker;
drivers/gpu/drm/i915/gt/selftest_slpc.c
20
struct kthread_worker *worker;
drivers/gpu/drm/i915/gt/selftest_slpc.c
507
threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
drivers/gpu/drm/i915/gt/selftest_slpc.c
509
if (IS_ERR(threads[i].worker)) {
drivers/gpu/drm/i915/gt/selftest_slpc.c
510
ret = PTR_ERR(threads[i].worker);
drivers/gpu/drm/i915/gt/selftest_slpc.c
516
kthread_queue_work(threads[i].worker, &threads[i].work);
drivers/gpu/drm/i915/gt/selftest_slpc.c
522
if (IS_ERR_OR_NULL(threads[i].worker))
drivers/gpu/drm/i915/gt/selftest_slpc.c
531
kthread_destroy_worker(threads[i].worker);
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
1202
container_of(w, struct intel_guc_ct, requests.worker);
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
1241
queue_work(system_unbound_wq, &ct->requests.worker);
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
151
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
84
struct work_struct worker; /* handler for incoming requests */
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1747
flush_work(&guc->ct.requests.worker);
drivers/gpu/drm/i915/selftests/i915_request.c
1459
struct kthread_worker *worker;
drivers/gpu/drm/i915/selftests/i915_request.c
1647
struct kthread_worker *worker;
drivers/gpu/drm/i915/selftests/i915_request.c
1649
worker = kthread_run_worker(0, "igt/parallel:%s",
drivers/gpu/drm/i915/selftests/i915_request.c
1651
if (IS_ERR(worker)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1652
err = PTR_ERR(worker);
drivers/gpu/drm/i915/selftests/i915_request.c
1656
threads[idx].worker = worker;
drivers/gpu/drm/i915/selftests/i915_request.c
1661
kthread_queue_work(worker, &threads[idx].work);
drivers/gpu/drm/i915/selftests/i915_request.c
1669
if (!threads[idx].worker)
drivers/gpu/drm/i915/selftests/i915_request.c
1677
kthread_destroy_worker(threads[idx++].worker);
drivers/gpu/drm/i915/selftests/i915_request.c
1806
struct kthread_worker *worker;
drivers/gpu/drm/i915/selftests/i915_request.c
1808
worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
drivers/gpu/drm/i915/selftests/i915_request.c
1809
if (IS_ERR(worker)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1810
ret = PTR_ERR(worker);
drivers/gpu/drm/i915/selftests/i915_request.c
1814
threads[i].worker = worker;
drivers/gpu/drm/i915/selftests/i915_request.c
1819
kthread_queue_work(worker, &threads[i].work);
drivers/gpu/drm/i915/selftests/i915_request.c
1836
if (!threads[i].worker)
drivers/gpu/drm/i915/selftests/i915_request.c
1845
kthread_destroy_worker(threads[i].worker);
drivers/gpu/drm/i915/selftests/i915_request.c
2957
struct kthread_worker *worker;
drivers/gpu/drm/i915/selftests/i915_request.c
304
struct kthread_worker *worker;
drivers/gpu/drm/i915/selftests/i915_request.c
3215
struct kthread_worker *worker;
drivers/gpu/drm/i915/selftests/i915_request.c
3221
worker = kthread_run_worker(0, "igt:%s",
drivers/gpu/drm/i915/selftests/i915_request.c
3223
if (IS_ERR(worker)) {
drivers/gpu/drm/i915/selftests/i915_request.c
3224
err = PTR_ERR(worker);
drivers/gpu/drm/i915/selftests/i915_request.c
3228
engines[idx].worker = worker;
drivers/gpu/drm/i915/selftests/i915_request.c
3234
kthread_queue_work(worker, &engines[idx].work);
drivers/gpu/drm/i915/selftests/i915_request.c
3242
if (!engines[idx].worker)
drivers/gpu/drm/i915/selftests/i915_request.c
3252
kthread_destroy_worker(engines[idx].worker);
drivers/gpu/drm/i915/selftests/i915_request.c
494
struct kthread_worker *worker;
drivers/gpu/drm/i915/selftests/i915_request.c
496
worker = kthread_run_worker(0, "igt/%d", n);
drivers/gpu/drm/i915/selftests/i915_request.c
497
if (IS_ERR(worker)) {
drivers/gpu/drm/i915/selftests/i915_request.c
498
ret = PTR_ERR(worker);
drivers/gpu/drm/i915/selftests/i915_request.c
503
threads[n].worker = worker;
drivers/gpu/drm/i915/selftests/i915_request.c
510
kthread_queue_work(worker, &threads[n].work);
drivers/gpu/drm/i915/selftests/i915_request.c
524
kthread_destroy_worker(threads[n].worker);
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
1260
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
199
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
91
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
34
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1911
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1931
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
181
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
99
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
1045
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
1065
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
949
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
751
kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work);
drivers/gpu/drm/msm/msm_atomic.c
118
timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
drivers/gpu/drm/msm/msm_atomic.c
119
if (IS_ERR(timer->worker)) {
drivers/gpu/drm/msm/msm_atomic.c
120
int ret = PTR_ERR(timer->worker);
drivers/gpu/drm/msm/msm_atomic.c
121
timer->worker = NULL;
drivers/gpu/drm/msm/msm_atomic.c
124
sched_set_fifo(timer->worker->task);
drivers/gpu/drm/msm/msm_atomic.c
126
msm_hrtimer_work_init(&timer->work, timer->worker,
drivers/gpu/drm/msm/msm_atomic.c
135
if (timer->worker)
drivers/gpu/drm/msm/msm_atomic.c
136
kthread_destroy_worker(timer->worker);
drivers/gpu/drm/msm/msm_drv.h
489
struct kthread_worker *worker;
drivers/gpu/drm/msm/msm_drv.h
496
struct kthread_worker *worker,
drivers/gpu/drm/msm/msm_fence.c
156
kthread_queue_work(fctx2gpu(fctx)->worker,
drivers/gpu/drm/msm/msm_fence.c
24
kthread_queue_work(fctx2gpu(fctx)->worker, &fctx->deadline_work);
drivers/gpu/drm/msm/msm_gpu.c
1144
if (gpu->worker) {
drivers/gpu/drm/msm/msm_gpu.c
1145
kthread_destroy_worker(gpu->worker);
drivers/gpu/drm/msm/msm_gpu.c
664
kthread_queue_work(gpu->worker, &gpu->recover_work);
drivers/gpu/drm/msm/msm_gpu.c
874
kthread_queue_work(gpu->worker, &gpu->retire_work);
drivers/gpu/drm/msm/msm_gpu.c
992
gpu->worker = kthread_run_worker(0, "gpu-worker");
drivers/gpu/drm/msm/msm_gpu.c
993
if (IS_ERR(gpu->worker)) {
drivers/gpu/drm/msm/msm_gpu.c
994
ret = PTR_ERR(gpu->worker);
drivers/gpu/drm/msm/msm_gpu.c
995
gpu->worker = NULL;
drivers/gpu/drm/msm/msm_gpu.c
999
sched_set_fifo_low(gpu->worker->task);
drivers/gpu/drm/msm/msm_gpu.h
255
struct kthread_worker *worker;
drivers/gpu/drm/msm/msm_gpu_devfreq.c
199
msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
drivers/gpu/drm/msm/msm_gpu_devfreq.c
201
msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
drivers/gpu/drm/msm/msm_io_utils.c
120
kthread_queue_work(work->worker, &work->work);
drivers/gpu/drm/msm/msm_io_utils.c
133
struct kthread_worker *worker,
drivers/gpu/drm/msm/msm_io_utils.c
139
work->worker = worker;
drivers/gpu/drm/msm/msm_kms.c
247
if (kms->event_thread[i].worker)
drivers/gpu/drm/msm/msm_kms.c
248
kthread_destroy_worker(kms->event_thread[i].worker);
drivers/gpu/drm/msm/msm_kms.c
309
ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
drivers/gpu/drm/msm/msm_kms.c
310
if (IS_ERR(ev_thread->worker)) {
drivers/gpu/drm/msm/msm_kms.c
311
ret = PTR_ERR(ev_thread->worker);
drivers/gpu/drm/msm/msm_kms.c
313
ev_thread->worker = NULL;
drivers/gpu/drm/msm/msm_kms.c
317
sched_set_fifo(ev_thread->worker->task);
drivers/gpu/drm/msm/msm_kms.h
127
struct kthread_worker *worker;
drivers/gpu/drm/msm/msm_kms.h
135
struct kthread_worker *worker;
drivers/gpu/drm/nouveau/nouveau_drm.c
177
list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
drivers/gpu/drm/nouveau/nouveau_drm.c
200
list_add_tail(&work->head, &cli->worker);
drivers/gpu/drm/nouveau/nouveau_drm.c
217
WARN_ON(!list_empty(&cli->worker));
drivers/gpu/drm/nouveau/nouveau_drm.c
260
INIT_LIST_HEAD(&cli->worker);
drivers/gpu/drm/nouveau/nouveau_drv.h
115
struct list_head worker;
drivers/gpu/drm/xe/xe_gt.c
655
if (disable_work_sync(&gt->reset.worker))
drivers/gpu/drm/xe/xe_gt.c
673
INIT_WORK(&gt->reset.worker, gt_reset_worker);
drivers/gpu/drm/xe/xe_gt.c
868
struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
drivers/gpu/drm/xe/xe_gt.c
945
if (!queue_work(gt->ordered_wq, &gt->reset.worker))
drivers/gpu/drm/xe/xe_gt.h
85
return flush_work(&gt->reset.worker);
drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
2087
struct xe_gt *gt = container_of(w, struct xe_gt, sriov.pf.control.worker);
drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
2096
cancel_work_sync(&gt->sriov.pf.control.worker);
drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
2131
INIT_WORK(&gt->sriov.pf.control.worker, control_worker_func);
drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
362
queue_work(xe->sriov.wq, &gt->sriov.pf.control.worker);
drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
128
struct work_struct worker;
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
1400
queue_work(gt->ordered_wq, &gt->sriov.vf.migration.worker);
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
1406
sriov.vf.migration.worker);
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
1419
cancel_work_sync(&gt->sriov.vf.migration.worker);
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
1443
INIT_WORK(&gt->sriov.vf.migration.worker, migration_worker_func);
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
859
started = queue_work(gt->ordered_wq, &gt->sriov.vf.migration.worker);
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
50
struct work_struct worker;
drivers/gpu/drm/xe/xe_gt_types.h
199
struct work_struct worker;
drivers/gpu/drm/xe/xe_guc_ct.c
2169
queue_work(system_unbound_wq, &(ct)->dead.worker);
drivers/gpu/drm/xe/xe_guc_ct.c
2205
struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
drivers/gpu/drm/xe/xe_guc_ct.c
647
queue_work(system_unbound_wq, &ct->dead.worker);
drivers/gpu/drm/xe/xe_guc_ct.c
88
cancel_work_sync(&ct->dead.worker);
drivers/gpu/drm/xe/xe_guc_ct.c
94
INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
drivers/gpu/drm/xe/xe_guc_ct_types.h
102
struct work_struct worker;
drivers/gpu/drm/xe/xe_guc_relay.c
354
INIT_WORK(&relay->worker, relays_worker_fn);
drivers/gpu/drm/xe/xe_guc_relay.c
791
queue_work(relay_to_xe(relay)->sriov.wq, &relay->worker);
drivers/gpu/drm/xe/xe_guc_relay.c
796
struct xe_guc_relay *relay = container_of(w, struct xe_guc_relay, worker);
drivers/gpu/drm/xe/xe_guc_relay_types.h
22
struct work_struct worker;
drivers/gpu/drm/xe/xe_pagefault.c
253
container_of(w, typeof(*pf_queue), worker);
drivers/gpu/drm/xe/xe_pagefault.c
322
INIT_WORK(&pf_queue->worker, xe_pagefault_queue_work);
drivers/gpu/drm/xe/xe_pagefault.c
441
queue_work(xe->usm.pf_wq, &pf_queue->worker);
drivers/gpu/drm/xe/xe_pagefault_types.h
133
struct work_struct worker;
drivers/gpu/drm/xe/xe_sync.c
101
INIT_WORK(&ufence->worker, user_fence_worker);
drivers/gpu/drm/xe/xe_sync.c
102
queue_work(ufence->xe->ordered_wq, &ufence->worker);
drivers/gpu/drm/xe/xe_sync.c
26
struct work_struct worker;
drivers/gpu/drm/xe/xe_sync.c
78
struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
drivers/hid/hid-bigbenff.c
185
struct work_struct worker;
drivers/hid/hid-bigbenff.c
194
schedule_work(&bigben->worker);
drivers/hid/hid-bigbenff.c
201
struct bigben_device, worker);
drivers/hid/hid-bigbenff.c
365
cancel_work_sync(&bigben->worker);
drivers/hid/hid-bigbenff.c
414
INIT_WORK(&bigben->worker, bigben_worker);
drivers/hid/hid-wiimote-core.c
101
schedule_work(&wdata->queue.worker);
drivers/hid/hid-wiimote-core.c
1748
INIT_WORK(&wdata->queue.worker, wiimote_queue_worker);
drivers/hid/hid-wiimote-core.c
1782
cancel_work_sync(&wdata->queue.worker);
drivers/hid/hid-wiimote-core.c
44
worker);
drivers/hid/hid-wiimote.h
110
struct work_struct worker;
drivers/hid/uhid.c
540
schedule_work(&uhid->worker);
drivers/hid/uhid.c
584
cancel_work_sync(&uhid->worker);
drivers/hid/uhid.c
63
struct work_struct worker;
drivers/hid/uhid.c
649
INIT_WORK(&uhid->worker, uhid_device_add_worker);
drivers/hid/uhid.c
70
struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
drivers/i2c/i2c-slave-testunit.c
127
queue_delayed_work(system_long_wq, &tu->worker,
drivers/i2c/i2c-slave-testunit.c
170
struct testunit_data *tu = container_of(work, struct testunit_data, worker.work);
drivers/i2c/i2c-slave-testunit.c
247
INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work);
drivers/i2c/i2c-slave-testunit.c
268
cancel_delayed_work_sync(&tu->worker);
drivers/i2c/i2c-slave-testunit.c
50
struct delayed_work worker;
drivers/macintosh/ams/ams-core.c
185
INIT_WORK(&ams_info.worker, ams_worker);
drivers/macintosh/ams/ams-core.c
216
flush_work(&ams_info.worker);
drivers/macintosh/ams/ams-core.c
65
schedule_work(&ams_info.worker);
drivers/macintosh/ams/ams.h
37
struct work_struct worker;
drivers/md/dm-clone-target.c
1276
struct clone *clone = container_of(work, typeof(*clone), worker);
drivers/md/dm-clone-target.c
131
struct work_struct worker;
drivers/md/dm-clone-target.c
1872
INIT_WORK(&clone->worker, do_worker);
drivers/md/dm-clone-target.c
251
queue_work(clone->wq, &clone->worker);
drivers/md/dm-delay.c
174
if (dc->worker)
drivers/md/dm-delay.c
175
kthread_stop(dc->worker);
drivers/md/dm-delay.c
284
dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker");
drivers/md/dm-delay.c
285
if (IS_ERR(dc->worker)) {
drivers/md/dm-delay.c
286
ret = PTR_ERR(dc->worker);
drivers/md/dm-delay.c
287
dc->worker = NULL;
drivers/md/dm-delay.c
337
wake_up_process(dc->worker);
drivers/md/dm-delay.c
39
struct task_struct *worker;
drivers/md/dm-delay.c
71
return !!dc->worker;
drivers/md/dm-era-target.c
1184
struct work_struct worker;
drivers/md/dm-era-target.c
1242
queue_work(era->wq, &era->worker);
drivers/md/dm-era-target.c
1358
struct era *era = container_of(ws, struct era, worker);
drivers/md/dm-era-target.c
1538
INIT_WORK(&era->worker, do_work);
drivers/md/dm-thin.c
2416
struct pool *pool = container_of(ws, struct pool, worker);
drivers/md/dm-thin.c
2463
struct work_struct worker;
drivers/md/dm-thin.c
2469
return container_of(ws, struct pool_work, worker);
drivers/md/dm-thin.c
2480
INIT_WORK_ONSTACK(&pw->worker, fn);
drivers/md/dm-thin.c
2482
queue_work(pool->wq, &pw->worker);
drivers/md/dm-thin.c
2484
destroy_work_on_stack(&pw->worker);
drivers/md/dm-thin.c
253
struct work_struct worker;
drivers/md/dm-thin.c
2994
INIT_WORK(&pool->worker, do_worker);
drivers/md/dm-thin.c
438
queue_work(pool->wq, &pool->worker);
drivers/md/raid5.c
6664
struct r5worker *worker,
drivers/md/raid5.c
6715
struct r5worker *worker = container_of(work, struct r5worker, work);
drivers/md/raid5.c
6716
struct r5worker_group *group = worker->group;
drivers/md/raid5.c
6731
released = release_stripe_list(conf, worker->temp_inactive_list);
drivers/md/raid5.c
6733
batch_size = handle_active_stripes(conf, group_id, worker,
drivers/md/raid5.c
6734
worker->temp_inactive_list);
drivers/md/raid5.c
6735
worker->working = false;
drivers/md/raid5.c
7288
struct r5worker *worker = group->workers + j;
drivers/md/raid5.c
7289
worker->group = group;
drivers/md/raid5.c
7290
INIT_WORK(&worker->work, raid5_do_work);
drivers/md/raid5.c
7293
INIT_LIST_HEAD(worker->temp_inactive_list + k);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
154
kthread_queue_work(dev->worker, &dev->work);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
345
dev->worker = kthread_run_worker(0, "vpu_irq_thread");
drivers/media/platform/chips-media/wave5/wave5-vpu.c
346
if (IS_ERR(dev->worker)) {
drivers/media/platform/chips-media/wave5/wave5-vpu.c
348
ret = PTR_ERR(dev->worker);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
412
kthread_destroy_worker(dev->worker);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
445
kthread_destroy_worker(dev->worker);
drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
766
struct kthread_worker *worker;
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
202
INIT_WORK(&ctx->decode_work, ctx->dev->vdec_pdata->worker);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
114
void (*worker)(struct work_struct *work);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
617
.worker = mtk_vdec_worker,
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
926
.worker = mtk_vdec_worker,
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
944
.worker = mtk_vdec_worker,
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
961
.worker = mtk_vdec_worker,
drivers/media/usb/hdpvr/hdpvr-core.c
425
flush_work(&dev->worker);
drivers/media/usb/hdpvr/hdpvr-video.c
1135
flush_work(&dev->worker);
drivers/media/usb/hdpvr/hdpvr-video.c
1173
INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
drivers/media/usb/hdpvr/hdpvr-video.c
252
worker);
drivers/media/usb/hdpvr/hdpvr-video.c
316
schedule_work(&dev->worker);
drivers/media/usb/hdpvr/hdpvr-video.c
348
flush_work(&dev->worker);
drivers/media/usb/hdpvr/hdpvr.h
106
struct work_struct worker;
drivers/net/wireguard/device.h
30
struct multicore_worker __percpu *worker;
drivers/net/wireguard/queueing.c
13
struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
drivers/net/wireguard/queueing.c
15
if (!worker)
drivers/net/wireguard/queueing.c
19
per_cpu_ptr(worker, cpu)->ptr = ptr;
drivers/net/wireguard/queueing.c
20
INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
drivers/net/wireguard/queueing.c
22
return worker;
drivers/net/wireguard/queueing.c
35
queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
drivers/net/wireguard/queueing.c
36
if (!queue->worker) {
drivers/net/wireguard/queueing.c
45
free_percpu(queue->worker);
drivers/net/wireguard/queueing.h
171
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
drivers/net/wireguard/receive.c
571
&per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
2195
static void brcmf_fws_dequeue_worker(struct work_struct *worker)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
2205
fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
772
static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
777
msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
drivers/net/wireless/mediatek/mt76/sdio.c
491
static void mt76s_tx_status_data(struct mt76_worker *worker)
drivers/net/wireless/mediatek/mt76/sdio.c
498
sdio = container_of(worker, struct mt76_sdio, stat_worker);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1271
if (md_ctrl->txq[i].worker) {
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1272
destroy_workqueue(md_ctrl->txq[i].worker);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1273
md_ctrl->txq[i].worker = NULL;
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1278
if (md_ctrl->rxq[i].worker) {
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1279
destroy_workqueue(md_ctrl->rxq[i].worker);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1280
md_ctrl->rxq[i].worker = NULL;
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1314
md_ctrl->txq[i].worker =
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1318
if (!md_ctrl->txq[i].worker)
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1328
md_ctrl->rxq[i].worker =
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1332
if (!md_ctrl->rxq[i].worker)
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
246
queue_work(queue->worker, &queue->cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
352
queue_work(queue->worker, &queue->cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
576
queue_work(md_ctrl->txq[i].worker,
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
601
queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.h
92
struct workqueue_struct *worker;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
135
struct workqueue_struct *worker;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
177
queue_work(dpmaif_ctrl->txq[txq->index].worker,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
531
queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
619
txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
622
if (!txq->worker)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
633
if (txq->worker)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
634
destroy_workqueue(txq->worker);
drivers/platform/chrome/cros_ec_spi.c
707
static void cros_ec_spi_high_pri_release(void *worker)
drivers/platform/chrome/cros_ec_spi.c
709
kthread_destroy_worker(worker);
drivers/platform/olpc/olpc-ec.c
105
schedule_work(&ec->worker);
drivers/platform/olpc/olpc-ec.c
123
schedule_work(&ec->worker);
drivers/platform/olpc/olpc-ec.c
36
struct work_struct worker;
drivers/platform/olpc/olpc-ec.c
416
INIT_WORK(&ec->worker, olpc_ec_worker);
drivers/platform/olpc/olpc-ec.c
79
struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker);
drivers/s390/block/dasd_alias.c
145
INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
drivers/s390/block/dasd_alias.c
253
cancel_work_sync(&lcu->suc_data.worker);
drivers/s390/block/dasd_alias.c
903
worker);
drivers/s390/block/dasd_alias.c
965
if (!schedule_work(&lcu->suc_data.worker))
drivers/s390/block/dasd_eckd.c
101
struct work_struct worker;
drivers/s390/block/dasd_eckd.c
114
struct work_struct worker;
drivers/s390/block/dasd_eckd.c
1439
data = container_of(work, struct pe_handler_work_data, worker);
drivers/s390/block/dasd_eckd.c
1480
INIT_WORK(&data->worker, do_pe_handler_work);
drivers/s390/block/dasd_eckd.c
1485
schedule_work(&data->worker);
drivers/s390/block/dasd_eckd.c
1701
data = container_of(work, struct ext_pool_exhaust_work_data, worker);
drivers/s390/block/dasd_eckd.c
1726
INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
drivers/s390/block/dasd_eckd.c
1737
schedule_work(&data->worker);
drivers/s390/block/dasd_eckd.c
6738
data = container_of(work, struct check_attention_work_data, worker);
drivers/s390/block/dasd_eckd.c
6770
INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
drivers/s390/block/dasd_eckd.c
6774
schedule_work(&data->worker);
drivers/s390/block/dasd_eckd.c
94
struct work_struct worker;
drivers/s390/block/dasd_eckd.h
629
struct work_struct worker;
drivers/s390/net/qeth_l2_main.c
1299
struct work_struct worker;
drivers/s390/net/qeth_l2_main.c
1308
container_of(work, struct qeth_bridge_state_data, worker);
drivers/s390/net/qeth_l2_main.c
1356
INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
drivers/s390/net/qeth_l2_main.c
1362
queue_work(card->event_wq, &data->worker);
drivers/vdpa/vdpa_sim/vdpa_sim.c
232
vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s",
drivers/vdpa/vdpa_sim/vdpa_sim.c
234
if (IS_ERR(vdpasim->worker))
drivers/vdpa/vdpa_sim/vdpa_sim.c
286
kthread_queue_work(vdpasim->worker, &vdpasim->work);
drivers/vdpa/vdpa_sim/vdpa_sim.c
72
kthread_queue_work(vdpasim->worker, work);
drivers/vdpa/vdpa_sim/vdpa_sim.c
746
kthread_destroy_worker(vdpasim->worker);
drivers/vdpa/vdpa_sim/vdpa_sim.h
60
struct kthread_worker *worker;
drivers/vhost/vhost.c
1014
struct vhost_worker *worker;
drivers/vhost/vhost.c
1070
worker = rcu_dereference_check(vq->worker,
drivers/vhost/vhost.c
1072
if (!worker) {
drivers/vhost/vhost.c
1078
ring_worker.worker_id = worker->id;
drivers/vhost/vhost.c
1095
struct vhost_worker *worker;
drivers/vhost/vhost.c
1117
worker = vhost_worker_create(dev);
drivers/vhost/vhost.c
1118
if (!worker) {
drivers/vhost/vhost.c
1124
__vhost_vq_attach_worker(dev->vqs[i], worker);
drivers/vhost/vhost.c
244
static void vhost_worker_queue(struct vhost_worker *worker,
drivers/vhost/vhost.c
252
llist_add(&work->node, &worker->work_list);
drivers/vhost/vhost.c
253
worker->ops->wakeup(worker);
drivers/vhost/vhost.c
259
struct vhost_worker *worker;
drivers/vhost/vhost.c
263
worker = rcu_dereference(vq->worker);
drivers/vhost/vhost.c
264
if (worker) {
drivers/vhost/vhost.c
266
vhost_worker_queue(worker, work);
drivers/vhost/vhost.c
280
static void __vhost_worker_flush(struct vhost_worker *worker)
drivers/vhost/vhost.c
284
if (!worker->attachment_cnt || worker->killed)
drivers/vhost/vhost.c
290
vhost_worker_queue(worker, &flush.work);
drivers/vhost/vhost.c
295
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
297
mutex_lock(&worker->mutex);
drivers/vhost/vhost.c
300
static void vhost_worker_flush(struct vhost_worker *worker)
drivers/vhost/vhost.c
302
mutex_lock(&worker->mutex);
drivers/vhost/vhost.c
303
__vhost_worker_flush(worker);
drivers/vhost/vhost.c
304
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
309
struct vhost_worker *worker;
drivers/vhost/vhost.c
312
xa_for_each(&dev->worker_xa, i, worker)
drivers/vhost/vhost.c
313
vhost_worker_flush(worker);
drivers/vhost/vhost.c
320
struct vhost_worker *worker;
drivers/vhost/vhost.c
324
worker = rcu_dereference(vq->worker);
drivers/vhost/vhost.c
325
if (worker && !llist_empty(&worker->work_list))
drivers/vhost/vhost.c
395
rcu_assign_pointer(vq->worker, NULL);
drivers/vhost/vhost.c
402
struct vhost_worker *worker = data;
drivers/vhost/vhost.c
404
struct vhost_dev *dev = worker->dev;
drivers/vhost/vhost.c
417
node = llist_del_all(&worker->work_list);
drivers/vhost/vhost.c
427
kcov_remote_start_common(worker->kcov_handle);
drivers/vhost/vhost.c
440
struct vhost_worker *worker = data;
drivers/vhost/vhost.c
444
node = llist_del_all(&worker->work_list);
drivers/vhost/vhost.c
453
kcov_remote_start_common(worker->kcov_handle);
drivers/vhost/vhost.c
465
struct vhost_worker *worker = data;
drivers/vhost/vhost.c
466
struct vhost_dev *dev = worker->dev;
drivers/vhost/vhost.c
470
mutex_lock(&worker->mutex);
drivers/vhost/vhost.c
471
worker->killed = true;
drivers/vhost/vhost.c
477
if (worker ==
drivers/vhost/vhost.c
478
rcu_dereference_check(vq->worker,
drivers/vhost/vhost.c
480
rcu_assign_pointer(vq->worker, NULL);
drivers/vhost/vhost.c
486
worker->attachment_cnt -= attach_cnt;
drivers/vhost/vhost.c
493
vhost_run_work_list(worker);
drivers/vhost/vhost.c
494
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
647
static int vhost_attach_task_to_cgroups(struct vhost_worker *worker)
drivers/vhost/vhost.c
655
vhost_worker_queue(worker, &attach.work);
drivers/vhost/vhost.c
657
mutex_lock(&worker->mutex);
drivers/vhost/vhost.c
663
saved_cnt = worker->attachment_cnt;
drivers/vhost/vhost.c
664
worker->attachment_cnt = INT_MAX;
drivers/vhost/vhost.c
665
__vhost_worker_flush(worker);
drivers/vhost/vhost.c
666
worker->attachment_cnt = saved_cnt;
drivers/vhost/vhost.c
668
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
711
struct vhost_worker *worker)
drivers/vhost/vhost.c
713
if (!worker)
drivers/vhost/vhost.c
716
WARN_ON(!llist_empty(&worker->work_list));
drivers/vhost/vhost.c
717
xa_erase(&dev->worker_xa, worker->id);
drivers/vhost/vhost.c
718
worker->ops->stop(worker);
drivers/vhost/vhost.c
719
kfree(worker);
drivers/vhost/vhost.c
724
struct vhost_worker *worker;
drivers/vhost/vhost.c
731
rcu_assign_pointer(dev->vqs[i]->worker, NULL);
drivers/vhost/vhost.c
736
xa_for_each(&dev->worker_xa, i, worker)
drivers/vhost/vhost.c
737
vhost_worker_destroy(dev, worker);
drivers/vhost/vhost.c
741
static void vhost_task_wakeup(struct vhost_worker *worker)
drivers/vhost/vhost.c
743
return vhost_task_wake(worker->vtsk);
drivers/vhost/vhost.c
746
static void vhost_kthread_wakeup(struct vhost_worker *worker)
drivers/vhost/vhost.c
748
wake_up_process(worker->kthread_task);
drivers/vhost/vhost.c
751
static void vhost_task_do_stop(struct vhost_worker *worker)
drivers/vhost/vhost.c
753
return vhost_task_stop(worker->vtsk);
drivers/vhost/vhost.c
756
static void vhost_kthread_do_stop(struct vhost_worker *worker)
drivers/vhost/vhost.c
758
kthread_stop(worker->kthread_task);
drivers/vhost/vhost.c
761
static int vhost_task_worker_create(struct vhost_worker *worker,
drivers/vhost/vhost.c
769
worker, name);
drivers/vhost/vhost.c
773
worker->vtsk = vtsk;
drivers/vhost/vhost.c
775
ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
drivers/vhost/vhost.c
777
vhost_task_do_stop(worker);
drivers/vhost/vhost.c
780
worker->id = id;
drivers/vhost/vhost.c
784
static int vhost_kthread_worker_create(struct vhost_worker *worker,
drivers/vhost/vhost.c
791
task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name);
drivers/vhost/vhost.c
795
worker->kthread_task = task;
drivers/vhost/vhost.c
797
ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
drivers/vhost/vhost.c
801
ret = vhost_attach_task_to_cgroups(worker);
drivers/vhost/vhost.c
805
worker->id = id;
drivers/vhost/vhost.c
811
vhost_kthread_do_stop(worker);
drivers/vhost/vhost.c
829
struct vhost_worker *worker;
drivers/vhost/vhost.c
835
worker = kzalloc_obj(*worker, GFP_KERNEL_ACCOUNT);
drivers/vhost/vhost.c
836
if (!worker)
drivers/vhost/vhost.c
839
worker->dev = dev;
drivers/vhost/vhost.c
840
worker->ops = ops;
drivers/vhost/vhost.c
843
mutex_init(&worker->mutex);
drivers/vhost/vhost.c
844
init_llist_head(&worker->work_list);
drivers/vhost/vhost.c
845
worker->kcov_handle = kcov_common_handle();
drivers/vhost/vhost.c
846
ret = ops->create(worker, dev, name);
drivers/vhost/vhost.c
850
return worker;
drivers/vhost/vhost.c
853
kfree(worker);
drivers/vhost/vhost.c
859
struct vhost_worker *worker)
drivers/vhost/vhost.c
863
mutex_lock(&worker->mutex);
drivers/vhost/vhost.c
864
if (worker->killed) {
drivers/vhost/vhost.c
865
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
871
old_worker = rcu_dereference_check(vq->worker,
drivers/vhost/vhost.c
873
rcu_assign_pointer(vq->worker, worker);
drivers/vhost/vhost.c
874
worker->attachment_cnt++;
drivers/vhost/vhost.c
878
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
882
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
932
struct vhost_worker *worker;
drivers/vhost/vhost.c
937
worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
drivers/vhost/vhost.c
938
if (!worker || worker->id != info->worker_id)
drivers/vhost/vhost.c
941
__vhost_vq_attach_worker(vq, worker);
drivers/vhost/vhost.c
949
struct vhost_worker *worker;
drivers/vhost/vhost.c
951
worker = vhost_worker_create(dev);
drivers/vhost/vhost.c
952
if (!worker)
drivers/vhost/vhost.c
955
info->worker_id = worker->id;
drivers/vhost/vhost.c
964
struct vhost_worker *worker;
drivers/vhost/vhost.c
966
worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
drivers/vhost/vhost.c
967
if (!worker || worker->id != info->worker_id)
drivers/vhost/vhost.c
970
mutex_lock(&worker->mutex);
drivers/vhost/vhost.c
971
if (worker->attachment_cnt || worker->killed) {
drivers/vhost/vhost.c
972
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
980
__vhost_worker_flush(worker);
drivers/vhost/vhost.c
981
mutex_unlock(&worker->mutex);
drivers/vhost/vhost.c
983
vhost_worker_destroy(dev, worker);
drivers/vhost/vhost.h
34
int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
drivers/vhost/vhost.h
36
void (*stop)(struct vhost_worker *worker);
drivers/vhost/vhost.h
37
void (*wakeup)(struct vhost_worker *worker);
drivers/vhost/vhost.h
96
struct vhost_worker __rcu *worker;
fs/erofs/zdata.c
1461
struct kthread_worker *worker;
fs/erofs/zdata.c
1464
worker = rcu_dereference(
fs/erofs/zdata.c
1466
if (!worker) {
fs/erofs/zdata.c
1470
kthread_queue_work(worker, &io->u.kthread_work);
fs/erofs/zdata.c
296
struct kthread_worker *worker;
fs/erofs/zdata.c
300
worker = rcu_dereference_protected(
fs/erofs/zdata.c
303
if (worker)
fs/erofs/zdata.c
304
kthread_destroy_worker(worker);
fs/erofs/zdata.c
311
struct kthread_worker *worker =
fs/erofs/zdata.c
314
if (IS_ERR(worker))
fs/erofs/zdata.c
315
return worker;
fs/erofs/zdata.c
317
sched_set_fifo_low(worker->task);
fs/erofs/zdata.c
318
return worker;
fs/erofs/zdata.c
323
struct kthread_worker *worker;
fs/erofs/zdata.c
332
worker = erofs_init_percpu_worker(cpu);
fs/erofs/zdata.c
333
if (!IS_ERR(worker))
fs/erofs/zdata.c
334
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
fs/erofs/zdata.c
345
struct kthread_worker *worker, *old;
fs/erofs/zdata.c
347
worker = erofs_init_percpu_worker(cpu);
fs/erofs/zdata.c
348
if (IS_ERR(worker))
fs/erofs/zdata.c
349
return PTR_ERR(worker);
fs/erofs/zdata.c
355
rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
fs/erofs/zdata.c
358
kthread_destroy_worker(worker);
fs/erofs/zdata.c
364
struct kthread_worker *worker;
fs/erofs/zdata.c
367
worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
fs/erofs/zdata.c
373
if (worker)
fs/erofs/zdata.c
374
kthread_destroy_worker(worker);
include/drm/drm_flip_work.h
65
struct work_struct worker;
include/drm/drm_vblank.h
266
struct kthread_worker *worker;
include/linux/devm-helpers.h
48
work_func_t worker)
include/linux/devm-helpers.h
50
INIT_DELAYED_WORK(w, worker);
include/linux/devm-helpers.h
73
work_func_t worker)
include/linux/devm-helpers.h
75
INIT_WORK(w, worker);
include/linux/kthread.h
156
struct kthread_worker *worker;
include/linux/kthread.h
184
extern void __kthread_init_worker(struct kthread_worker *worker,
include/linux/kthread.h
187
#define kthread_init_worker(worker) \
include/linux/kthread.h
190
__kthread_init_worker((worker), "("#worker")->lock", &__key); \
include/linux/kthread.h
263
bool kthread_queue_work(struct kthread_worker *worker,
include/linux/kthread.h
266
bool kthread_queue_delayed_work(struct kthread_worker *worker,
include/linux/kthread.h
270
bool kthread_mod_delayed_work(struct kthread_worker *worker,
include/linux/kthread.h
275
void kthread_flush_worker(struct kthread_worker *worker);
include/linux/kthread.h
280
void kthread_destroy_worker(struct kthread_worker *worker);
include/trace/events/sched.h
66
TP_PROTO(struct kthread_worker *worker,
include/trace/events/sched.h
69
TP_ARGS(worker, work),
include/trace/events/sched.h
74
__field( void *, worker)
include/trace/events/sched.h
80
__entry->worker = worker;
include/trace/events/sched.h
84
__entry->work, __entry->function, __entry->worker)
io_uring/io-wq.c
1087
static bool __io_wq_worker_cancel(struct io_worker *worker,
io_uring/io-wq.c
1093
__set_notify_signal(worker->task);
io_uring/io-wq.c
1100
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
io_uring/io-wq.c
1108
raw_spin_lock(&worker->lock);
io_uring/io-wq.c
1109
if (__io_wq_worker_cancel(worker, match, worker->cur_work))
io_uring/io-wq.c
1111
raw_spin_unlock(&worker->lock);
io_uring/io-wq.c
1307
struct io_worker *worker;
io_uring/io-wq.c
1311
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
1312
return worker->wq == data;
io_uring/io-wq.c
1325
struct io_worker *worker;
io_uring/io-wq.c
1327
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
1328
io_worker_cancel_cb(worker);
io_uring/io-wq.c
1334
kfree(worker);
io_uring/io-wq.c
1407
static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
io_uring/io-wq.c
1412
cpumask_set_cpu(od->cpu, worker->wq->cpu_mask);
io_uring/io-wq.c
1414
cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask);
io_uring/io-wq.c
148
static void io_wq_dec_running(struct io_worker *worker);
io_uring/io-wq.c
165
static bool io_worker_get(struct io_worker *worker)
io_uring/io-wq.c
167
return refcount_inc_not_zero(&worker->ref);
io_uring/io-wq.c
170
static void io_worker_release(struct io_worker *worker)
io_uring/io-wq.c
172
if (refcount_dec_and_test(&worker->ref))
io_uring/io-wq.c
173
complete(&worker->ref_done);
io_uring/io-wq.c
187
static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
io_uring/io-wq.c
189
return worker->acct;
io_uring/io-wq.c
200
struct io_worker *worker = current->worker_private;
io_uring/io-wq.c
205
return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
io_uring/io-wq.c
208
static void io_worker_cancel_cb(struct io_worker *worker)
io_uring/io-wq.c
210
struct io_wq_acct *acct = io_wq_get_acct(worker);
io_uring/io-wq.c
211
struct io_wq *wq = worker->wq;
io_uring/io-wq.c
218
clear_bit_unlock(0, &worker->create_state);
io_uring/io-wq.c
219
io_worker_release(worker);
io_uring/io-wq.c
224
struct io_worker *worker;
io_uring/io-wq.c
228
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
229
return worker == data;
io_uring/io-wq.c
232
static void io_worker_exit(struct io_worker *worker)
io_uring/io-wq.c
234
struct io_wq *wq = worker->wq;
io_uring/io-wq.c
235
struct io_wq_acct *acct = io_wq_get_acct(worker);
io_uring/io-wq.c
239
io_task_worker_match, worker);
io_uring/io-wq.c
243
io_worker_cancel_cb(worker);
io_uring/io-wq.c
246
io_worker_release(worker);
io_uring/io-wq.c
247
wait_for_completion(&worker->ref_done);
io_uring/io-wq.c
250
if (test_bit(IO_WORKER_F_FREE, &worker->flags))
io_uring/io-wq.c
251
hlist_nulls_del_rcu(&worker->nulls_node);
io_uring/io-wq.c
252
list_del_rcu(&worker->all_list);
io_uring/io-wq.c
254
io_wq_dec_running(worker);
io_uring/io-wq.c
262
kfree_rcu(worker, rcu);
io_uring/io-wq.c
296
struct io_worker *worker;
io_uring/io-wq.c
303
hlist_nulls_for_each_entry_rcu(worker, n, &acct->free_list, nulls_node) {
io_uring/io-wq.c
304
if (!io_worker_get(worker))
io_uring/io-wq.c
311
wake_up_process(worker->task);
io_uring/io-wq.c
312
io_worker_release(worker);
io_uring/io-wq.c
344
static void io_wq_inc_running(struct io_worker *worker)
io_uring/io-wq.c
346
struct io_wq_acct *acct = io_wq_get_acct(worker);
io_uring/io-wq.c
353
struct io_worker *worker;
io_uring/io-wq.c
359
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
360
wq = worker->wq;
io_uring/io-wq.c
361
acct = worker->acct;
io_uring/io-wq.c
383
clear_bit_unlock(0, &worker->create_state);
io_uring/io-wq.c
384
io_worker_release(worker);
io_uring/io-wq.c
387
static bool io_queue_worker_create(struct io_worker *worker,
io_uring/io-wq.c
391
struct io_wq *wq = worker->wq;
io_uring/io-wq.c
396
if (!io_worker_get(worker))
io_uring/io-wq.c
404
if (test_bit(0, &worker->create_state) ||
io_uring/io-wq.c
405
test_and_set_bit_lock(0, &worker->create_state))
io_uring/io-wq.c
409
init_task_work(&worker->create_work, func);
io_uring/io-wq.c
410
if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
io_uring/io-wq.c
423
clear_bit_unlock(0, &worker->create_state);
io_uring/io-wq.c
425
io_worker_release(worker);
io_uring/io-wq.c
456
static void io_wq_dec_running(struct io_worker *worker)
io_uring/io-wq.c
458
struct io_wq_acct *acct = io_wq_get_acct(worker);
io_uring/io-wq.c
459
struct io_wq *wq = worker->wq;
io_uring/io-wq.c
461
if (!test_bit(IO_WORKER_F_UP, &worker->flags))
io_uring/io-wq.c
466
if (!worker->cur_work)
io_uring/io-wq.c
470
if (io_wq_hash_defer(worker->cur_work, acct)) {
io_uring/io-wq.c
478
io_queue_worker_create(worker, acct, create_worker_cb);
io_uring/io-wq.c
485
static void __io_worker_busy(struct io_wq_acct *acct, struct io_worker *worker)
io_uring/io-wq.c
487
if (test_bit(IO_WORKER_F_FREE, &worker->flags)) {
io_uring/io-wq.c
488
clear_bit(IO_WORKER_F_FREE, &worker->flags);
io_uring/io-wq.c
490
hlist_nulls_del_init_rcu(&worker->nulls_node);
io_uring/io-wq.c
498
static void __io_worker_idle(struct io_wq_acct *acct, struct io_worker *worker)
io_uring/io-wq.c
501
if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) {
io_uring/io-wq.c
502
set_bit(IO_WORKER_F_FREE, &worker->flags);
io_uring/io-wq.c
503
hlist_nulls_add_head_rcu(&worker->nulls_node, &acct->free_list);
io_uring/io-wq.c
582
static void io_assign_current_work(struct io_worker *worker,
io_uring/io-wq.c
590
raw_spin_lock(&worker->lock);
io_uring/io-wq.c
591
worker->cur_work = work;
io_uring/io-wq.c
592
raw_spin_unlock(&worker->lock);
io_uring/io-wq.c
599
struct io_worker *worker)
io_uring/io-wq.c
602
struct io_wq *wq = worker->wq;
io_uring/io-wq.c
624
raw_spin_lock(&worker->lock);
io_uring/io-wq.c
625
worker->cur_work = work;
io_uring/io-wq.c
626
raw_spin_unlock(&worker->lock);
io_uring/io-wq.c
634
__io_worker_busy(acct, worker);
io_uring/io-wq.c
636
io_assign_current_work(worker, work);
io_uring/io-wq.c
653
io_assign_current_work(worker, NULL);
io_uring/io-wq.c
661
io_assign_current_work(worker, work);
io_uring/io-wq.c
684
struct io_worker *worker = data;
io_uring/io-wq.c
685
struct io_wq_acct *acct = io_wq_get_acct(worker);
io_uring/io-wq.c
686
struct io_wq *wq = worker->wq;
io_uring/io-wq.c
690
set_mask_bits(&worker->flags, 0,
io_uring/io-wq.c
706
io_worker_handle_work(acct, worker);
io_uring/io-wq.c
724
__io_worker_idle(acct, worker);
io_uring/io-wq.c
744
io_worker_handle_work(acct, worker);
io_uring/io-wq.c
746
io_worker_exit(worker);
io_uring/io-wq.c
755
struct io_worker *worker = tsk->worker_private;
io_uring/io-wq.c
757
if (!worker)
io_uring/io-wq.c
759
if (!test_bit(IO_WORKER_F_UP, &worker->flags))
io_uring/io-wq.c
761
if (test_bit(IO_WORKER_F_RUNNING, &worker->flags))
io_uring/io-wq.c
763
set_bit(IO_WORKER_F_RUNNING, &worker->flags);
io_uring/io-wq.c
764
io_wq_inc_running(worker);
io_uring/io-wq.c
773
struct io_worker *worker = tsk->worker_private;
io_uring/io-wq.c
775
if (!worker)
io_uring/io-wq.c
777
if (!test_bit(IO_WORKER_F_UP, &worker->flags))
io_uring/io-wq.c
779
if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags))
io_uring/io-wq.c
782
clear_bit(IO_WORKER_F_RUNNING, &worker->flags);
io_uring/io-wq.c
783
io_wq_dec_running(worker);
io_uring/io-wq.c
786
static void io_init_new_worker(struct io_wq *wq, struct io_wq_acct *acct, struct io_worker *worker,
io_uring/io-wq.c
789
tsk->worker_private = worker;
io_uring/io-wq.c
790
worker->task = tsk;
io_uring/io-wq.c
794
hlist_nulls_add_head_rcu(&worker->nulls_node, &acct->free_list);
io_uring/io-wq.c
795
list_add_tail_rcu(&worker->all_list, &acct->all_list);
io_uring/io-wq.c
796
set_bit(IO_WORKER_F_FREE, &worker->flags);
io_uring/io-wq.c
806
static inline bool io_should_retry_thread(struct io_worker *worker, long err)
io_uring/io-wq.c
815
worker->init_retries++;
io_uring/io-wq.c
818
return worker->init_retries <= WORKER_INIT_LIMIT;
io_uring/io-wq.c
829
static void queue_create_worker_retry(struct io_worker *worker)
io_uring/io-wq.c
837
schedule_delayed_work(&worker->work,
io_uring/io-wq.c
838
msecs_to_jiffies(worker->init_retries * 5));
io_uring/io-wq.c
843
struct io_worker *worker;
io_uring/io-wq.c
848
worker = container_of(cb, struct io_worker, create_work);
io_uring/io-wq.c
849
clear_bit_unlock(0, &worker->create_state);
io_uring/io-wq.c
850
wq = worker->wq;
io_uring/io-wq.c
851
acct = io_wq_get_acct(worker);
io_uring/io-wq.c
852
tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
io_uring/io-wq.c
854
io_init_new_worker(wq, acct, worker, tsk);
io_uring/io-wq.c
855
io_worker_release(worker);
io_uring/io-wq.c
857
} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
io_uring/io-wq.c
874
kfree(worker);
io_uring/io-wq.c
879
io_worker_release(worker);
io_uring/io-wq.c
880
queue_create_worker_retry(worker);
io_uring/io-wq.c
885
struct io_worker *worker = container_of(work, struct io_worker,
io_uring/io-wq.c
887
struct io_wq_acct *acct = io_wq_get_acct(worker);
io_uring/io-wq.c
889
if (!io_queue_worker_create(worker, acct, create_worker_cont))
io_uring/io-wq.c
890
kfree(worker);
io_uring/io-wq.c
895
struct io_worker *worker;
io_uring/io-wq.c
900
worker = kzalloc_obj(*worker);
io_uring/io-wq.c
901
if (!worker) {
io_uring/io-wq.c
911
refcount_set(&worker->ref, 1);
io_uring/io-wq.c
912
worker->wq = wq;
io_uring/io-wq.c
913
worker->acct = acct;
io_uring/io-wq.c
914
raw_spin_lock_init(&worker->lock);
io_uring/io-wq.c
915
init_completion(&worker->ref_done);
io_uring/io-wq.c
917
tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
io_uring/io-wq.c
919
io_init_new_worker(wq, acct, worker, tsk);
io_uring/io-wq.c
920
} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
io_uring/io-wq.c
921
kfree(worker);
io_uring/io-wq.c
924
INIT_DELAYED_WORK(&worker->work, io_workqueue_create);
io_uring/io-wq.c
925
queue_create_worker_retry(worker);
io_uring/io-wq.c
939
struct io_worker *worker;
io_uring/io-wq.c
942
list_for_each_entry_rcu(worker, &acct->all_list, all_list) {
io_uring/io-wq.c
943
if (io_worker_get(worker)) {
io_uring/io-wq.c
945
if (worker->task)
io_uring/io-wq.c
946
ret = func(worker, data);
io_uring/io-wq.c
947
io_worker_release(worker);
io_uring/io-wq.c
965
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
io_uring/io-wq.c
967
__set_notify_signal(worker->task);
io_uring/io-wq.c
968
wake_up_process(worker->task);
kernel/async.c
344
struct worker *worker = current_wq_worker();
kernel/async.c
346
return worker && worker->current_func == async_run_entry_fn;
kernel/bpf/helpers.c
1122
struct irq_work worker;
kernel/bpf/helpers.c
1281
struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
kernel/bpf/helpers.c
1292
cb->worker = IRQ_WORK_INIT(worker_for_call_rcu);
kernel/bpf/helpers.c
1293
irq_work_queue(&cb->worker);
kernel/bpf/helpers.c
1349
cb->worker = IRQ_WORK_INIT(bpf_async_irq_worker);
kernel/bpf/helpers.c
1470
irq_work_queue(&cb->worker);
kernel/bpf/helpers.c
1649
struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
kernel/kthread.c
1005
raw_spin_lock_irq(&worker->lock);
kernel/kthread.c
1006
worker->task = NULL;
kernel/kthread.c
1007
raw_spin_unlock_irq(&worker->lock);
kernel/kthread.c
1012
raw_spin_lock_irq(&worker->lock);
kernel/kthread.c
1013
if (!list_empty(&worker->work_list)) {
kernel/kthread.c
1014
work = list_first_entry(&worker->work_list,
kernel/kthread.c
1018
worker->current_work = work;
kernel/kthread.c
1019
raw_spin_unlock_irq(&worker->lock);
kernel/kthread.c
1052
struct kthread_worker *worker;
kernel/kthread.c
1055
worker = kzalloc_obj(*worker);
kernel/kthread.c
1056
if (!worker)
kernel/kthread.c
1059
kthread_init_worker(worker);
kernel/kthread.c
1061
task = __kthread_create_on_node(kthread_worker_fn, worker,
kernel/kthread.c
1066
worker->flags = flags;
kernel/kthread.c
1067
worker->task = task;
kernel/kthread.c
1069
return worker;
kernel/kthread.c
1072
kfree(worker);
kernel/kthread.c
1089
struct kthread_worker *worker;
kernel/kthread.c
1093
worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
kernel/kthread.c
1096
return worker;
kernel/kthread.c
1140
struct kthread_worker *worker;
kernel/kthread.c
1142
worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
kernel/kthread.c
1143
if (!IS_ERR(worker))
kernel/kthread.c
1144
kthread_bind(worker->task, cpu);
kernel/kthread.c
1146
return worker;
kernel/kthread.c
1155
static inline bool queuing_blocked(struct kthread_worker *worker,
kernel/kthread.c
1158
lockdep_assert_held(&worker->lock);
kernel/kthread.c
1163
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
kernel/kthread.c
1166
lockdep_assert_held(&worker->lock);
kernel/kthread.c
1169
WARN_ON_ONCE(work->worker && work->worker != worker);
kernel/kthread.c
1173
static void kthread_insert_work(struct kthread_worker *worker,
kernel/kthread.c
1177
kthread_insert_work_sanity_check(worker, work);
kernel/kthread.c
1179
trace_sched_kthread_work_queue_work(worker, work);
kernel/kthread.c
1182
work->worker = worker;
kernel/kthread.c
1183
if (!worker->current_work && likely(worker->task))
kernel/kthread.c
1184
wake_up_process(worker->task);
kernel/kthread.c
1199
bool kthread_queue_work(struct kthread_worker *worker,
kernel/kthread.c
1205
raw_spin_lock_irqsave(&worker->lock, flags);
kernel/kthread.c
1206
if (!queuing_blocked(worker, work)) {
kernel/kthread.c
1207
kthread_insert_work(worker, work, &worker->work_list);
kernel/kthread.c
1210
raw_spin_unlock_irqrestore(&worker->lock, flags);
kernel/kthread.c
1228
struct kthread_worker *worker = work->worker;
kernel/kthread.c
1235
if (WARN_ON_ONCE(!worker))
kernel/kthread.c
1238
raw_spin_lock_irqsave(&worker->lock, flags);
kernel/kthread.c
1240
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1246
kthread_insert_work(worker, work, &worker->work_list);
kernel/kthread.c
1248
raw_spin_unlock_irqrestore(&worker->lock, flags);
kernel/kthread.c
1252
static void __kthread_queue_delayed_work(struct kthread_worker *worker,
kernel/kthread.c
1268
kthread_insert_work(worker, work, &worker->work_list);
kernel/kthread.c
1273
kthread_insert_work_sanity_check(worker, work);
kernel/kthread.c
1275
list_add(&work->node, &worker->delayed_work_list);
kernel/kthread.c
1276
work->worker = worker;
kernel/kthread.c
1296
bool kthread_queue_delayed_work(struct kthread_worker *worker,
kernel/kthread.c
1304
raw_spin_lock_irqsave(&worker->lock, flags);
kernel/kthread.c
1306
if (!queuing_blocked(worker, work)) {
kernel/kthread.c
1307
__kthread_queue_delayed_work(worker, dwork, delay);
kernel/kthread.c
1311
raw_spin_unlock_irqrestore(&worker->lock, flags);
kernel/kthread.c
1340
struct kthread_worker *worker;
kernel/kthread.c
1343
worker = work->worker;
kernel/kthread.c
1344
if (!worker)
kernel/kthread.c
1347
raw_spin_lock_irq(&worker->lock);
kernel/kthread.c
1349
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1352
kthread_insert_work(worker, &fwork.work, work->node.next);
kernel/kthread.c
1353
else if (worker->current_work == work)
kernel/kthread.c
1354
kthread_insert_work(worker, &fwork.work,
kernel/kthread.c
1355
worker->work_list.next);
kernel/kthread.c
1359
raw_spin_unlock_irq(&worker->lock);
kernel/kthread.c
1378
struct kthread_worker *worker = work->worker;
kernel/kthread.c
1387
raw_spin_unlock_irqrestore(&worker->lock, *flags);
kernel/kthread.c
1389
raw_spin_lock_irqsave(&worker->lock, *flags);
kernel/kthread.c
1443
bool kthread_mod_delayed_work(struct kthread_worker *worker,
kernel/kthread.c
1451
raw_spin_lock_irqsave(&worker->lock, flags);
kernel/kthread.c
1454
if (!work->worker) {
kernel/kthread.c
1460
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1483
__kthread_queue_delayed_work(worker, dwork, delay);
kernel/kthread.c
1485
raw_spin_unlock_irqrestore(&worker->lock, flags);
kernel/kthread.c
1492
struct kthread_worker *worker = work->worker;
kernel/kthread.c
1496
if (!worker)
kernel/kthread.c
1499
raw_spin_lock_irqsave(&worker->lock, flags);
kernel/kthread.c
1501
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1508
if (worker->current_work != work)
kernel/kthread.c
1516
raw_spin_unlock_irqrestore(&worker->lock, flags);
kernel/kthread.c
1518
raw_spin_lock_irqsave(&worker->lock, flags);
kernel/kthread.c
1522
raw_spin_unlock_irqrestore(&worker->lock, flags);
kernel/kthread.c
1571
void kthread_flush_worker(struct kthread_worker *worker)
kernel/kthread.c
1578
kthread_queue_work(worker, &fwork.work);
kernel/kthread.c
1595
void kthread_destroy_worker(struct kthread_worker *worker)
kernel/kthread.c
1599
task = worker->task;
kernel/kthread.c
1603
kthread_flush_worker(worker);
kernel/kthread.c
1605
WARN_ON(!list_empty(&worker->delayed_work_list));
kernel/kthread.c
1606
WARN_ON(!list_empty(&worker->work_list));
kernel/kthread.c
1607
kfree(worker);
kernel/kthread.c
958
void __kthread_init_worker(struct kthread_worker *worker,
kernel/kthread.c
962
memset(worker, 0, sizeof(struct kthread_worker));
kernel/kthread.c
963
raw_spin_lock_init(&worker->lock);
kernel/kthread.c
964
lockdep_set_class_and_name(&worker->lock, key, name);
kernel/kthread.c
965
INIT_LIST_HEAD(&worker->work_list);
kernel/kthread.c
966
INIT_LIST_HEAD(&worker->delayed_work_list);
kernel/kthread.c
987
struct kthread_worker *worker = worker_ptr;
kernel/kthread.c
994
WARN_ON(worker->task && worker->task != current);
kernel/kthread.c
995
worker->task = current;
kernel/kthread.c
997
if (worker->flags & KTW_FREEZABLE)
kernel/sched/cpufreq_schedutil.c
34
struct kthread_worker worker;
kernel/sched/cpufreq_schedutil.c
574
kthread_queue_work(&sg_policy->worker, &sg_policy->work);
kernel/sched/cpufreq_schedutil.c
680
kthread_init_worker(&sg_policy->worker);
kernel/sched/cpufreq_schedutil.c
681
thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
kernel/sched/cpufreq_schedutil.c
716
kthread_flush_worker(&sg_policy->worker);
kernel/workqueue.c
1000
unsigned int oflags = worker->flags;
kernel/workqueue.c
1004
worker->flags &= ~flags;
kernel/workqueue.c
1012
if (!(worker->flags & WORKER_NOT_RUNNING))
kernel/workqueue.c
1017
static struct worker *first_idle_worker(struct worker_pool *pool)
kernel/workqueue.c
1022
return list_first_entry(&pool->idle_list, struct worker, entry);
kernel/workqueue.c
1035
static void worker_enter_idle(struct worker *worker)
kernel/workqueue.c
1037
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
1039
if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
kernel/workqueue.c
1040
WARN_ON_ONCE(!list_empty(&worker->entry) &&
kernel/workqueue.c
1041
(worker->hentry.next || worker->hentry.pprev)))
kernel/workqueue.c
1045
worker->flags |= WORKER_IDLE;
kernel/workqueue.c
1047
worker->last_active = jiffies;
kernel/workqueue.c
1050
list_add(&worker->entry, &pool->idle_list);
kernel/workqueue.c
1068
static void worker_leave_idle(struct worker *worker)
kernel/workqueue.c
1070
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
1072
if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
kernel/workqueue.c
1074
worker_clr_flags(worker, WORKER_IDLE);
kernel/workqueue.c
1076
list_del_init(&worker->entry);
kernel/workqueue.c
1112
static struct worker *find_worker_executing_work(struct worker_pool *pool,
kernel/workqueue.c
1115
struct worker *worker;
kernel/workqueue.c
1117
hash_for_each_possible(pool->busy_hash, worker, hentry,
kernel/workqueue.c
1119
if (worker->current_work == work &&
kernel/workqueue.c
1120
worker->current_func == work->func)
kernel/workqueue.c
1121
return worker;
kernel/workqueue.c
1186
static bool assign_work(struct work_struct *work, struct worker *worker,
kernel/workqueue.c
1189
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
1190
struct worker *collision;
kernel/workqueue.c
1197
WARN_ON_ONCE(worker->rescue_wq);
kernel/workqueue.c
1218
move_linked_works(work, &worker->scheduled, nextp);
kernel/workqueue.c
1254
struct worker *worker = first_idle_worker(pool);
kernel/workqueue.c
1259
if (!need_more_worker(pool) || !worker)
kernel/workqueue.c
1267
p = worker->task;
kernel/workqueue.c
1406
struct worker *worker = kthread_data(task);
kernel/workqueue.c
1408
if (!READ_ONCE(worker->sleeping))
kernel/workqueue.c
1418
if (!(worker->flags & WORKER_NOT_RUNNING))
kernel/workqueue.c
1419
worker->pool->nr_running++;
kernel/workqueue.c
1426
worker->current_at = worker->task->se.sum_exec_runtime;
kernel/workqueue.c
1428
WRITE_ONCE(worker->sleeping, 0);
kernel/workqueue.c
1440
struct worker *worker = kthread_data(task);
kernel/workqueue.c
1448
if (worker->flags & WORKER_NOT_RUNNING)
kernel/workqueue.c
1451
pool = worker->pool;
kernel/workqueue.c
1454
if (READ_ONCE(worker->sleeping))
kernel/workqueue.c
1457
WRITE_ONCE(worker->sleeping, 1);
kernel/workqueue.c
1465
if (worker->flags & WORKER_NOT_RUNNING) {
kernel/workqueue.c
1472
worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
kernel/workqueue.c
1486
struct worker *worker = kthread_data(task);
kernel/workqueue.c
1487
struct pool_workqueue *pwq = worker->current_pwq;
kernel/workqueue.c
1488
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
1510
if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
kernel/workqueue.c
1511
worker->task->se.sum_exec_runtime - worker->current_at <
kernel/workqueue.c
1517
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
kernel/workqueue.c
1518
wq_cpu_intensive_report(worker->current_func);
kernel/workqueue.c
1553
struct worker *worker = kthread_data(task);
kernel/workqueue.c
1555
return worker->last_func;
kernel/workqueue.c
219
struct worker *manager; /* L: purely informational */
kernel/workqueue.c
2213
struct worker *worker;
kernel/workqueue.c
2215
worker = current_wq_worker();
kernel/workqueue.c
2220
return worker && worker->current_pwq->wq == wq;
kernel/workqueue.c
2298
struct worker *worker;
kernel/workqueue.c
2302
worker = find_worker_executing_work(last_pool, work);
kernel/workqueue.c
2304
if (worker && worker->current_pwq->wq == wq) {
kernel/workqueue.c
2305
pwq = worker->current_pwq;
kernel/workqueue.c
2656
static struct worker *alloc_worker(int node)
kernel/workqueue.c
2658
struct worker *worker;
kernel/workqueue.c
2660
worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
kernel/workqueue.c
2661
if (worker) {
kernel/workqueue.c
2662
INIT_LIST_HEAD(&worker->entry);
kernel/workqueue.c
2663
INIT_LIST_HEAD(&worker->scheduled);
kernel/workqueue.c
2664
INIT_LIST_HEAD(&worker->node);
kernel/workqueue.c
2666
worker->flags = WORKER_PREP;
kernel/workqueue.c
2668
return worker;
kernel/workqueue.c
2688
static void worker_attach_to_pool(struct worker *worker,
kernel/workqueue.c
2699
worker->flags |= WORKER_UNBOUND;
kernel/workqueue.c
2702
kthread_set_per_cpu(worker->task, pool->cpu);
kernel/workqueue.c
2705
if (worker->rescue_wq)
kernel/workqueue.c
2706
set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
kernel/workqueue.c
2708
list_add_tail(&worker->node, &pool->workers);
kernel/workqueue.c
2709
worker->pool = pool;
kernel/workqueue.c
2714
static void unbind_worker(struct worker *worker)
kernel/workqueue.c
2718
kthread_set_per_cpu(worker->task, -1);
kernel/workqueue.c
2720
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
kernel/workqueue.c
2722
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
kernel/workqueue.c
2726
static void detach_worker(struct worker *worker)
kernel/workqueue.c
2730
unbind_worker(worker);
kernel/workqueue.c
2731
list_del(&worker->node);
kernel/workqueue.c
2742
static void worker_detach_from_pool(struct worker *worker)
kernel/workqueue.c
2744
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
2750
detach_worker(worker);
kernel/workqueue.c
2751
worker->pool = NULL;
kernel/workqueue.c
2755
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
kernel/workqueue.c
2758
static int format_worker_id(char *buf, size_t size, struct worker *worker,
kernel/workqueue.c
2761
if (worker->rescue_wq)
kernel/workqueue.c
2763
worker->rescue_wq->name);
kernel/workqueue.c
2768
pool->cpu, worker->id,
kernel/workqueue.c
2772
pool->id, worker->id);
kernel/workqueue.c
2790
static struct worker *create_worker(struct worker_pool *pool)
kernel/workqueue.c
2792
struct worker *worker;
kernel/workqueue.c
2803
worker = alloc_worker(pool->node);
kernel/workqueue.c
2804
if (!worker) {
kernel/workqueue.c
2809
worker->id = id;
kernel/workqueue.c
2814
format_worker_id(id_buf, sizeof(id_buf), worker, pool);
kernel/workqueue.c
2815
worker->task = kthread_create_on_node(worker_thread, worker,
kernel/workqueue.c
2817
if (IS_ERR(worker->task)) {
kernel/workqueue.c
2818
if (PTR_ERR(worker->task) == -EINTR) {
kernel/workqueue.c
2823
worker->task);
kernel/workqueue.c
2828
set_user_nice(worker->task, pool->attrs->nice);
kernel/workqueue.c
2829
kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
kernel/workqueue.c
2833
worker_attach_to_pool(worker, pool);
kernel/workqueue.c
2838
worker->pool->nr_workers++;
kernel/workqueue.c
2839
worker_enter_idle(worker);
kernel/workqueue.c
2846
if (worker->task)
kernel/workqueue.c
2847
wake_up_process(worker->task);
kernel/workqueue.c
2851
return worker;
kernel/workqueue.c
2855
kfree(worker);
kernel/workqueue.c
2861
struct worker *worker;
kernel/workqueue.c
2863
list_for_each_entry(worker, cull_list, entry)
kernel/workqueue.c
2864
detach_worker(worker);
kernel/workqueue.c
2869
struct worker *worker, *tmp;
kernel/workqueue.c
2871
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
kernel/workqueue.c
2872
list_del_init(&worker->entry);
kernel/workqueue.c
2873
kthread_stop_put(worker->task);
kernel/workqueue.c
2874
kfree(worker);
kernel/workqueue.c
2889
static void set_worker_dying(struct worker *worker, struct list_head *list)
kernel/workqueue.c
2891
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
2897
if (WARN_ON(worker->current_work) ||
kernel/workqueue.c
2898
WARN_ON(!list_empty(&worker->scheduled)) ||
kernel/workqueue.c
2899
WARN_ON(!(worker->flags & WORKER_IDLE)))
kernel/workqueue.c
2905
worker->flags |= WORKER_DIE;
kernel/workqueue.c
2907
list_move(&worker->entry, list);
kernel/workqueue.c
2910
get_task_struct(worker->task);
kernel/workqueue.c
2934
struct worker *worker;
kernel/workqueue.c
2938
worker = list_last_entry(&pool->idle_list, struct worker, entry);
kernel/workqueue.c
2939
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
kernel/workqueue.c
2977
struct worker *worker;
kernel/workqueue.c
2980
worker = list_last_entry(&pool->idle_list, struct worker, entry);
kernel/workqueue.c
2981
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
kernel/workqueue.c
2988
set_worker_dying(worker, &cull_list);
kernel/workqueue.c
3142
static bool manage_workers(struct worker *worker)
kernel/workqueue.c
3144
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
3150
pool->manager = worker;
kernel/workqueue.c
3174
static void process_one_work(struct worker *worker, struct work_struct *work)
kernel/workqueue.c
3179
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
3201
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
kernel/workqueue.c
3202
worker->current_work = work;
kernel/workqueue.c
3203
worker->current_func = work->func;
kernel/workqueue.c
3204
worker->current_pwq = pwq;
kernel/workqueue.c
3205
if (worker->task)
kernel/workqueue.c
3206
worker->current_at = worker->task->se.sum_exec_runtime;
kernel/workqueue.c
3207
worker->current_start = jiffies;
kernel/workqueue.c
3209
worker->current_color = get_work_color(work_data);
kernel/workqueue.c
3215
strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
kernel/workqueue.c
3226
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
kernel/workqueue.c
3276
worker->current_func(work);
kernel/workqueue.c
3281
trace_workqueue_execute_end(work, worker->current_func);
kernel/workqueue.c
3287
if (unlikely((worker->task && in_atomic()) ||
kernel/workqueue.c
3295
worker->current_func);
kernel/workqueue.c
3308
if (worker->task)
kernel/workqueue.c
3320
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
kernel/workqueue.c
3323
worker->last_func = worker->current_func;
kernel/workqueue.c
3326
hash_del(&worker->hentry);
kernel/workqueue.c
3327
worker->current_work = NULL;
kernel/workqueue.c
3328
worker->current_func = NULL;
kernel/workqueue.c
3329
worker->current_pwq = NULL;
kernel/workqueue.c
3330
worker->current_color = INT_MAX;
kernel/workqueue.c
3348
static void process_scheduled_works(struct worker *worker)
kernel/workqueue.c
3353
while ((work = list_first_entry_or_null(&worker->scheduled,
kernel/workqueue.c
3356
worker->pool->last_progress_ts = jiffies;
kernel/workqueue.c
3359
process_one_work(worker, work);
kernel/workqueue.c
3387
struct worker *worker = __worker;
kernel/workqueue.c
3388
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
3396
if (unlikely(worker->flags & WORKER_DIE)) {
kernel/workqueue.c
3403
worker->pool = NULL;
kernel/workqueue.c
3404
ida_free(&pool->worker_ida, worker->id);
kernel/workqueue.c
3408
worker_leave_idle(worker);
kernel/workqueue.c
3415
if (unlikely(!may_start_working(pool)) && manage_workers(worker))
kernel/workqueue.c
3423
WARN_ON_ONCE(!list_empty(&worker->scheduled));
kernel/workqueue.c
3432
worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
kernel/workqueue.c
3439
if (assign_work(work, worker, NULL))
kernel/workqueue.c
3440
process_scheduled_works(worker);
kernel/workqueue.c
3443
worker_set_flags(worker, WORKER_PREP);
kernel/workqueue.c
3452
worker_enter_idle(worker);
kernel/workqueue.c
3459
static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescuer)
kernel/workqueue.c
353
struct worker *rescuer; /* MD: rescue worker */
kernel/workqueue.c
3538
struct worker *rescuer = __rescuer;
kernel/workqueue.c
3636
static void bh_worker(struct worker *worker)
kernel/workqueue.c
3638
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
3644
worker_leave_idle(worker);
kernel/workqueue.c
3653
WARN_ON_ONCE(!list_empty(&worker->scheduled));
kernel/workqueue.c
3654
worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
kernel/workqueue.c
3661
if (assign_work(work, worker, NULL))
kernel/workqueue.c
3662
process_scheduled_works(worker);
kernel/workqueue.c
3666
worker_set_flags(worker, WORKER_PREP);
kernel/workqueue.c
3668
worker_enter_idle(worker);
kernel/workqueue.c
3690
bh_worker(list_first_entry(&pool->workers, struct worker, node));
kernel/workqueue.c
3717
bh_worker(list_first_entry(&pool->workers, struct worker, node));
kernel/workqueue.c
3790
struct worker *worker;
kernel/workqueue.c
3795
worker = current_wq_worker();
kernel/workqueue.c
3801
WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
kernel/workqueue.c
3804
worker->current_pwq->wq->name, worker->current_func,
kernel/workqueue.c
3846
struct work_struct *target, struct worker *worker)
kernel/workqueue.c
3878
if (worker) {
kernel/workqueue.c
3879
head = worker->scheduled.next;
kernel/workqueue.c
3880
work_color = worker->current_color;
kernel/workqueue.c
4229
struct worker *worker = NULL;
kernel/workqueue.c
4248
worker = find_worker_executing_work(pool, work);
kernel/workqueue.c
4249
if (!worker)
kernel/workqueue.c
4251
pwq = worker->current_pwq;
kernel/workqueue.c
4257
insert_wq_barrier(pwq, barr, work, worker);
kernel/workqueue.c
5016
struct worker *worker;
kernel/workqueue.c
5062
while ((worker = first_idle_worker(pool)))
kernel/workqueue.c
5063
set_worker_dying(worker, &cull_list);
kernel/workqueue.c
5651
struct worker *rescuer;
kernel/workqueue.c
584
#define for_each_pool_worker(worker, pool) \
kernel/workqueue.c
585
list_for_each_entry((worker), &(pool)->workers, node) \
kernel/workqueue.c
6096
struct worker *worker = current_wq_worker();
kernel/workqueue.c
6098
return worker ? worker->current_work : NULL;
kernel/workqueue.c
6112
struct worker *worker = current_wq_worker();
kernel/workqueue.c
6114
return worker && worker->rescue_wq;
kernel/workqueue.c
6201
struct worker *worker = current_wq_worker();
kernel/workqueue.c
6204
if (worker) {
kernel/workqueue.c
6206
vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
kernel/workqueue.c
6232
struct worker *worker;
kernel/workqueue.c
6241
worker = kthread_probe_data(task);
kernel/workqueue.c
6247
copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
kernel/workqueue.c
6248
copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
kernel/workqueue.c
6251
copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
kernel/workqueue.c
6274
static void pr_cont_worker_id(struct worker *worker)
kernel/workqueue.c
6276
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
6282
pr_cont("%d%s", task_pid_nr(worker->task),
kernel/workqueue.c
6283
worker->rescue_wq ? "(RESCUER)" : "");
kernel/workqueue.c
6335
struct worker *worker;
kernel/workqueue.c
6346
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
kernel/workqueue.c
6347
if (worker->current_pwq == pwq) {
kernel/workqueue.c
6356
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
kernel/workqueue.c
6357
if (worker->current_pwq != pwq)
kernel/workqueue.c
6361
pr_cont_worker_id(worker);
kernel/workqueue.c
6362
pr_cont(":%ps", worker->current_func);
kernel/workqueue.c
6364
jiffies_to_msecs(jiffies - worker->current_start) / 1000);
kernel/workqueue.c
6365
list_for_each_entry(work, &worker->scheduled, entry)
kernel/workqueue.c
6457
struct worker *worker;
kernel/workqueue.c
6482
list_for_each_entry(worker, &pool->idle_list, entry) {
kernel/workqueue.c
6484
pr_cont_worker_id(worker);
kernel/workqueue.c
6554
struct worker *worker = kthread_data(task);
kernel/workqueue.c
6555
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
6558
off = format_worker_id(buf, size, worker, pool);
kernel/workqueue.c
6567
if (worker->desc[0] != '\0') {
kernel/workqueue.c
6568
if (worker->current_work)
kernel/workqueue.c
6570
worker->desc);
kernel/workqueue.c
6573
worker->desc);
kernel/workqueue.c
6604
struct worker *worker;
kernel/workqueue.c
6618
for_each_pool_worker(worker, pool)
kernel/workqueue.c
6619
worker->flags |= WORKER_UNBOUND;
kernel/workqueue.c
6642
for_each_pool_worker(worker, pool)
kernel/workqueue.c
6643
unbind_worker(worker);
kernel/workqueue.c
6657
struct worker *worker;
kernel/workqueue.c
6668
for_each_pool_worker(worker, pool) {
kernel/workqueue.c
6669
kthread_set_per_cpu(worker->task, pool->cpu);
kernel/workqueue.c
6670
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
kernel/workqueue.c
6678
for_each_pool_worker(worker, pool) {
kernel/workqueue.c
6679
unsigned int worker_flags = worker->flags;
kernel/workqueue.c
6699
WRITE_ONCE(worker->flags, worker_flags);
kernel/workqueue.c
6718
struct worker *worker;
kernel/workqueue.c
6729
for_each_pool_worker(worker, pool)
kernel/workqueue.c
6730
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
kernel/workqueue.c
7004
struct worker *worker;
kernel/workqueue.c
7018
for_each_pool_worker(worker, pool)
kernel/workqueue.c
7019
unbind_worker(worker);
kernel/workqueue.c
7592
struct worker *worker;
kernel/workqueue.c
7598
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
kernel/workqueue.c
7607
sched_show_task(worker->task);
kernel/workqueue.c
975
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
kernel/workqueue.c
977
struct worker_pool *pool = worker->pool;
kernel/workqueue.c
983
!(worker->flags & WORKER_NOT_RUNNING)) {
kernel/workqueue.c
987
worker->flags |= flags;
kernel/workqueue.c
997
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
kernel/workqueue.c
999
struct worker_pool *pool = worker->pool;
kernel/workqueue_internal.h
68
static inline struct worker *current_wq_worker(void)
lib/test_objpool.c
165
item->worker(item, 1);
lib/test_objpool.c
200
void (*worker)(struct ot_item *, int))
lib/test_objpool.c
205
item->worker = worker;
lib/test_objpool.c
231
item->worker(item, 0);
lib/test_objpool.c
66
void (*worker)(struct ot_item *item, int irq);
samples/seccomp/user-trap.c
208
pid_t worker = 0 , tracer = 0;
samples/seccomp/user-trap.c
215
worker = fork();
samples/seccomp/user-trap.c
216
if (worker < 0) {
samples/seccomp/user-trap.c
221
if (worker == 0) {
samples/seccomp/user-trap.c
347
if (waitpid(worker, &status, 0) != worker) {
samples/seccomp/user-trap.c
372
if (worker > 0)
samples/seccomp/user-trap.c
373
kill(worker, SIGKILL);
tools/perf/bench/epoll-ctl.c
132
static inline void do_epoll_op(struct worker *w, int op, int fd)
tools/perf/bench/epoll-ctl.c
160
static inline void do_random_epoll_op(struct worker *w)
tools/perf/bench/epoll-ctl.c
174
struct worker *w = (struct worker *) arg;
tools/perf/bench/epoll-ctl.c
204
static void init_fdmaps(struct worker *w, int pct)
tools/perf/bench/epoll-ctl.c
223
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
tools/perf/bench/epoll-ctl.c
241
struct worker *w = &worker[i];
tools/perf/bench/epoll-ctl.c
277
(void *)(struct worker *) w);
tools/perf/bench/epoll-ctl.c
317
struct worker *worker = NULL;
tools/perf/bench/epoll-ctl.c
352
worker = calloc(nthreads, sizeof(*worker));
tools/perf/bench/epoll-ctl.c
353
if (!worker)
tools/perf/bench/epoll-ctl.c
379
do_threads(worker, cpu);
tools/perf/bench/epoll-ctl.c
392
ret = pthread_join(worker[i].thread, NULL);
tools/perf/bench/epoll-ctl.c
406
t[j] = worker[i].ops[j];
tools/perf/bench/epoll-ctl.c
412
worker[i].tid, &worker[i].fdmap[0],
tools/perf/bench/epoll-ctl.c
416
worker[i].tid, &worker[i].fdmap[0],
tools/perf/bench/epoll-ctl.c
417
&worker[i].fdmap[nfds-1],
tools/perf/bench/epoll-ctl.c
426
free(worker[i].fdmap);
tools/perf/bench/epoll-ctl.c
428
free(worker);
tools/perf/bench/epoll-wait.c
186
struct worker *w = (struct worker *) arg;
tools/perf/bench/epoll-wait.c
240
static void nest_epollfd(struct worker *w)
tools/perf/bench/epoll-wait.c
292
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
tools/perf/bench/epoll-wait.c
318
struct worker *w = &worker[i];
tools/perf/bench/epoll-wait.c
366
(void *)(struct worker *) w);
tools/perf/bench/epoll-wait.c
382
struct worker *worker = p;
tools/perf/bench/epoll-wait.c
394
shuffle((void *)worker, nthreads, sizeof(*worker));
tools/perf/bench/epoll-wait.c
398
struct worker *w = &worker[i];
tools/perf/bench/epoll-wait.c
421
struct worker *w1 = (struct worker *) p1;
tools/perf/bench/epoll-wait.c
422
struct worker *w2 = (struct worker *) p2;
tools/perf/bench/epoll-wait.c
436
struct worker *worker = NULL;
tools/perf/bench/epoll-wait.c
476
worker = calloc(nthreads, sizeof(*worker));
tools/perf/bench/epoll-wait.c
477
if (!worker) {
tools/perf/bench/epoll-wait.c
502
do_threads(worker, cpu);
tools/perf/bench/epoll-wait.c
516
(void *)(struct worker *) worker);
tools/perf/bench/epoll-wait.c
537
qsort(worker, nthreads, sizeof(struct worker), cmpworker);
tools/perf/bench/epoll-wait.c
541
worker[i].ops / bench__runtime.tv_sec : 0;
tools/perf/bench/epoll-wait.c
547
worker[i].tid, &worker[i].fdmap[0], t);
tools/perf/bench/epoll-wait.c
550
worker[i].tid, &worker[i].fdmap[0],
tools/perf/bench/epoll-wait.c
551
&worker[i].fdmap[nfds-1], t);
tools/perf/bench/epoll-wait.c
559
free(worker[i].fdmap);
tools/perf/bench/epoll-wait.c
561
free(worker);
tools/perf/bench/futex-hash.c
134
struct worker *worker = NULL;
tools/perf/bench/futex-hash.c
162
worker = calloc(params.nthreads, sizeof(*worker));
tools/perf/bench/futex-hash.c
163
if (!worker)
tools/perf/bench/futex-hash.c
188
worker[i].tid = i;
tools/perf/bench/futex-hash.c
189
worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
tools/perf/bench/futex-hash.c
190
if (!worker[i].futex)
tools/perf/bench/futex-hash.c
201
ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
tools/perf/bench/futex-hash.c
202
(void *)(struct worker *) &worker[i]);
tools/perf/bench/futex-hash.c
222
ret = pthread_join(worker[i].thread, NULL);
tools/perf/bench/futex-hash.c
234
worker[i].ops / bench__runtime.tv_sec : 0;
tools/perf/bench/futex-hash.c
239
worker[i].tid, &worker[i].futex[0], t);
tools/perf/bench/futex-hash.c
242
worker[i].tid, &worker[i].futex[0],
tools/perf/bench/futex-hash.c
243
&worker[i].futex[params.nfutexes-1], t);
tools/perf/bench/futex-hash.c
246
zfree(&worker[i].futex);
tools/perf/bench/futex-hash.c
251
free(worker);
tools/perf/bench/futex-hash.c
76
struct worker *w = (struct worker *) arg;
tools/perf/bench/futex-lock-pi.c
124
static void create_threads(struct worker *w, struct perf_cpu_map *cpu)
tools/perf/bench/futex-lock-pi.c
141
worker[i].tid = i;
tools/perf/bench/futex-lock-pi.c
144
worker[i].futex = calloc(1, sizeof(u_int32_t));
tools/perf/bench/futex-lock-pi.c
145
if (!worker[i].futex)
tools/perf/bench/futex-lock-pi.c
148
worker[i].futex = &global_futex;
tools/perf/bench/futex-lock-pi.c
158
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
tools/perf/bench/futex-lock-pi.c
195
worker = calloc(params.nthreads, sizeof(*worker));
tools/perf/bench/futex-lock-pi.c
196
if (!worker)
tools/perf/bench/futex-lock-pi.c
214
create_threads(worker, cpu);
tools/perf/bench/futex-lock-pi.c
226
ret = pthread_join(worker[i].thread, NULL);
tools/perf/bench/futex-lock-pi.c
238
worker[i].ops / bench__runtime.tv_sec : 0;
tools/perf/bench/futex-lock-pi.c
243
worker[i].tid, worker[i].futex, t);
tools/perf/bench/futex-lock-pi.c
246
zfree(&worker[i].futex);
tools/perf/bench/futex-lock-pi.c
251
free(worker);
tools/perf/bench/futex-lock-pi.c
35
static struct worker *worker;
tools/perf/bench/futex-lock-pi.c
87
struct worker *w = (struct worker *) arg;
tools/perf/bench/futex-requeue.c
197
worker = calloc(params.nthreads, sizeof(*worker));
tools/perf/bench/futex-requeue.c
198
if (!worker)
tools/perf/bench/futex-requeue.c
228
block_threads(worker, cpu);
tools/perf/bench/futex-requeue.c
300
ret = pthread_join(worker[i], NULL);
tools/perf/bench/futex-requeue.c
313
free(worker);
tools/perf/bench/futex-requeue.c
36
static pthread_t *worker;
tools/perf/bench/futex-wake-parallel.c
118
if (pthread_create(&td[i].worker, &thread_attr,
tools/perf/bench/futex-wake-parallel.c
126
if (pthread_join(td[i].worker, NULL))
tools/perf/bench/futex-wake-parallel.c
41
pthread_t worker;
tools/perf/bench/futex-wake.c
172
worker = calloc(params.nthreads, sizeof(*worker));
tools/perf/bench/futex-wake.c
173
if (!worker)
tools/perf/bench/futex-wake.c
195
block_threads(worker, cpu);
tools/perf/bench/futex-wake.c
224
ret = pthread_join(worker[i], NULL);
tools/perf/bench/futex-wake.c
238
free(worker);
tools/perf/bench/futex-wake.c
36
static pthread_t *worker;
tools/perf/bench/sched-messaging.c
150
static void create_thread_worker(union messaging_worker *worker,
tools/perf/bench/sched-messaging.c
164
ret = pthread_create(&worker->thread, &attr, func, ctx);
tools/perf/bench/sched-messaging.c
171
static void create_process_worker(union messaging_worker *worker,
tools/perf/bench/sched-messaging.c
175
worker->pid = fork();
tools/perf/bench/sched-messaging.c
177
if (worker->pid == -1) {
tools/perf/bench/sched-messaging.c
179
} else if (worker->pid == 0) {
tools/perf/bench/sched-messaging.c
185
static void create_worker(union messaging_worker *worker,
tools/perf/bench/sched-messaging.c
189
return create_process_worker(worker, ctx, func);
tools/perf/bench/sched-messaging.c
191
return create_thread_worker(worker, ctx, func);
tools/perf/bench/sched-messaging.c
194
static void reap_worker(union messaging_worker *worker)
tools/perf/bench/sched-messaging.c
205
pthread_join(worker->thread, &thread_status);
tools/perf/bench/sched-messaging.c
210
static unsigned int group(union messaging_worker *worker,
tools/perf/bench/sched-messaging.c
241
create_worker(worker + i, ctx, (void *)receiver);
tools/perf/bench/sched-messaging.c
254
create_worker(worker + num_fds + i, snd_ctx, (void *)sender);
tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c
50
err = pthread_create(threads + i, NULL, worker, NULL);
tools/testing/selftests/coredump/coredump_socket_protocol_test.c
1505
pid_t worker = fork();
tools/testing/selftests/coredump/coredump_socket_protocol_test.c
1506
if (worker == 0) {
tools/testing/selftests/coredump/coredump_socket_protocol_test.c
1510
worker_pids[n_conns] = worker;
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
104
pthread_create(&thread[i], NULL, &worker, NULL);
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
45
struct worker_ctx *worker = arg;
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
49
CPU_SET(worker->cpu, &mask);
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
54
while (!worker->stop) {
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
56
worker->iters++;
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
62
static int join_worker(struct worker_ctx *worker)
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
68
if (!worker->started)
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
75
err = pthread_timedjoin_np(worker->tid, &ret, &ts);
tools/testing/selftests/sched_ext/cyclic_kick_wait.c
77
pthread_detach(worker->tid);