arch/alpha/include/asm/core_t2.h
497
unsigned long r0, r1, work;
arch/alpha/include/asm/core_t2.h
501
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
arch/alpha/include/asm/core_t2.h
502
r0 = *(vuip)(work);
arch/alpha/include/asm/core_t2.h
503
r1 = *(vuip)(work + (4 << 5));
arch/alpha/include/asm/core_t2.h
545
unsigned long work;
arch/alpha/include/asm/core_t2.h
549
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
arch/alpha/include/asm/core_t2.h
550
*(vuip)work = b;
arch/alpha/include/asm/core_t2.h
551
*(vuip)(work + (4 << 5)) = b >> 32;
arch/arc/include/asm/entry-arcv2.h
157
; Saving pt_regs->sp correctly requires some extra work due to the way
arch/arm64/kvm/pmu-emul.c
468
static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
arch/arm64/kvm/pmu-emul.c
472
vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
arch/mips/cavium-octeon/executive/cvmx-helper.c
793
struct cvmx_wqe *work;
arch/mips/cavium-octeon/executive/cvmx-helper.c
929
work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
arch/mips/cavium-octeon/executive/cvmx-helper.c
931
} while ((work == NULL) && (retry_cnt > 0));
arch/mips/cavium-octeon/executive/cvmx-helper.c
938
if (work)
arch/mips/cavium-octeon/executive/cvmx-helper.c
939
cvmx_helper_free_packet_data(work);
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
63
4: # core-16057 work around
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
66
5: # No core-16057 work around
arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
53
struct delayed_work work;
arch/mips/include/asm/octeon/cvmx-helper-util.h
126
static inline void cvmx_helper_free_packet_data(struct cvmx_wqe *work)
arch/mips/include/asm/octeon/cvmx-helper-util.h
133
number_buffers = work->word2.s.bufs;
arch/mips/include/asm/octeon/cvmx-helper-util.h
136
buffer_ptr = work->packet_ptr;
arch/mips/include/asm/octeon/cvmx-helper-util.h
147
if (cvmx_ptr_to_phys(work) == start_of_buffer) {
arch/mips/include/asm/octeon/cvmx-wqe.h
598
static inline int cvmx_wqe_get_port(struct cvmx_wqe *work)
arch/mips/include/asm/octeon/cvmx-wqe.h
603
port = work->word2.s_cn68xx.port;
arch/mips/include/asm/octeon/cvmx-wqe.h
605
port = work->word1.cn38xx.ipprt;
arch/mips/include/asm/octeon/cvmx-wqe.h
610
static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
arch/mips/include/asm/octeon/cvmx-wqe.h
613
work->word2.s_cn68xx.port = port;
arch/mips/include/asm/octeon/cvmx-wqe.h
615
work->word1.cn38xx.ipprt = port;
arch/mips/include/asm/octeon/cvmx-wqe.h
618
static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
arch/mips/include/asm/octeon/cvmx-wqe.h
623
grp = work->word1.cn68xx.grp;
arch/mips/include/asm/octeon/cvmx-wqe.h
625
grp = work->word1.cn38xx.grp;
arch/mips/include/asm/octeon/cvmx-wqe.h
630
static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
arch/mips/include/asm/octeon/cvmx-wqe.h
633
work->word1.cn68xx.grp = grp;
arch/mips/include/asm/octeon/cvmx-wqe.h
635
work->word1.cn38xx.grp = grp;
arch/mips/include/asm/octeon/cvmx-wqe.h
638
static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
arch/mips/include/asm/octeon/cvmx-wqe.h
643
qos = work->word1.cn68xx.qos;
arch/mips/include/asm/octeon/cvmx-wqe.h
645
qos = work->word1.cn38xx.qos;
arch/mips/include/asm/octeon/cvmx-wqe.h
650
static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
arch/mips/include/asm/octeon/cvmx-wqe.h
653
work->word1.cn68xx.qos = qos;
arch/mips/include/asm/octeon/cvmx-wqe.h
655
work->word1.cn38xx.qos = qos;
arch/mips/loongson2ef/lemote-2f/pm.c
84
static void yeeloong_lid_update_task(struct work_struct *work)
arch/powerpc/kernel/mce.c
284
static void machine_process_ue_event(struct work_struct *work)
arch/powerpc/kernel/mce.c
31
static void machine_process_ue_event(struct work_struct *work);
arch/powerpc/kernel/tau_6xx.c
158
static void tau_work_func(struct work_struct *work)
arch/powerpc/kernel/tau_6xx.c
163
queue_work(tau_workq, work);
arch/powerpc/kvm/book3s_64_mmu_hv.c
1412
static void resize_hpt_prepare_work(struct work_struct *work)
arch/powerpc/kvm/book3s_64_mmu_hv.c
1414
struct kvm_resize_hpt *resize = container_of(work,
arch/powerpc/kvm/book3s_64_mmu_hv.c
1416
work);
arch/powerpc/kvm/book3s_64_mmu_hv.c
1506
INIT_WORK(&resize->work, resize_hpt_prepare_work);
arch/powerpc/kvm/book3s_64_mmu_hv.c
1509
schedule_work(&resize->work);
arch/powerpc/kvm/book3s_64_mmu_hv.c
54
struct work_struct work;
arch/powerpc/platforms/85xx/sgy_cts1000.c
34
static void gpio_halt_wfn(struct work_struct *work)
arch/powerpc/platforms/powermac/backlight.c
134
static void pmac_backlight_set_legacy_worker(struct work_struct *work)
arch/powerpc/platforms/powermac/backlight.c
22
static void pmac_backlight_key_worker(struct work_struct *work);
arch/powerpc/platforms/powermac/backlight.c
23
static void pmac_backlight_set_legacy_worker(struct work_struct *work);
arch/powerpc/platforms/powermac/backlight.c
67
static void pmac_backlight_key_worker(struct work_struct *work)
arch/powerpc/platforms/powernv/opal-hmi.c
270
static void hmi_event_handler(struct work_struct *work)
arch/powerpc/platforms/powernv/opal-memory-errors.c
76
static void mem_error_handler(struct work_struct *work)
arch/powerpc/platforms/ps3/os-area.c
669
static void os_area_queue_work_handler(struct work_struct *work)
arch/powerpc/platforms/pseries/dlpar.c
31
struct work_struct work;
arch/powerpc/platforms/pseries/dlpar.c
616
static void pseries_hp_work_fn(struct work_struct *work)
arch/powerpc/platforms/pseries/dlpar.c
619
container_of(work, struct pseries_hp_work, work);
arch/powerpc/platforms/pseries/dlpar.c
624
kfree(work);
arch/powerpc/platforms/pseries/dlpar.c
629
struct pseries_hp_work *work;
arch/powerpc/platforms/pseries/dlpar.c
636
work = kmalloc_obj(struct pseries_hp_work, GFP_ATOMIC);
arch/powerpc/platforms/pseries/dlpar.c
637
if (work) {
arch/powerpc/platforms/pseries/dlpar.c
638
INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
arch/powerpc/platforms/pseries/dlpar.c
639
work->errlog = hp_errlog_copy;
arch/powerpc/platforms/pseries/dlpar.c
640
queue_work(pseries_hp_wq, (struct work_struct *)work);
arch/powerpc/platforms/pseries/lpar.c
145
struct delayed_work work;
arch/powerpc/platforms/pseries/lpar.c
361
static void process_dtl_buffer(struct work_struct *work)
arch/powerpc/platforms/pseries/lpar.c
368
struct dtl_worker *d = container_of(work, struct dtl_worker, work.work);
arch/powerpc/platforms/pseries/lpar.c
405
schedule_delayed_work_on(d->cpu, to_delayed_work(work),
arch/powerpc/platforms/pseries/lpar.c
414
INIT_DELAYED_WORK(&d->work, process_dtl_buffer);
arch/powerpc/platforms/pseries/lpar.c
424
schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
arch/powerpc/platforms/pseries/lpar.c
432
cancel_delayed_work_sync(&d->work);
arch/powerpc/platforms/pseries/papr-hvpipe.c
573
static void papr_hvpipe_work_fn(struct work_struct *work)
arch/powerpc/platforms/pseries/papr-indices.c
260
.work = indices_sequence_fill_work_area,
arch/powerpc/platforms/pseries/papr-phy-attest.c
240
.work = phy_attest_sequence_fill_work_area,
arch/powerpc/platforms/pseries/papr-rtas-common.c
90
if (!seq->work)
arch/powerpc/platforms/pseries/papr-rtas-common.c
94
while (err == 0 && (buf = seq->work(seq, &len)))
arch/powerpc/platforms/pseries/papr-rtas-common.h
42
const char *(*work)(struct papr_rtas_sequence *seq, size_t *len);
arch/powerpc/platforms/pseries/papr-vpd.c
227
.work = vpd_sequence_fill_work_area,
arch/powerpc/platforms/pseries/vio.c
386
static void vio_cmo_balance(struct work_struct *work)
arch/powerpc/platforms/pseries/vio.c
395
cmo = container_of(work, struct vio_cmo, balance_q.work);
arch/powerpc/platforms/pseries/vio.c
478
cancel_delayed_work(to_delayed_work(work));
arch/powerpc/sysdev/fsl_rmu.c
479
static void fsl_pw_dpc(struct work_struct *work)
arch/powerpc/sysdev/fsl_rmu.c
481
struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
arch/riscv/include/asm/cpufeature.h
96
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
arch/riscv/kernel/traps_misaligned.c
455
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused)
arch/riscv/kernel/unaligned_access_speed.c
281
static void check_vector_unaligned_access(struct work_struct *work __always_unused)
arch/s390/appldata/appldata_base.c
108
static void appldata_work_fn(struct work_struct *work)
arch/s390/appldata/appldata_base.c
81
static void appldata_work_fn(struct work_struct *work);
arch/s390/include/asm/kvm_host.h
699
struct kvm_async_pf *work);
arch/s390/include/asm/kvm_host.h
702
struct kvm_async_pf *work);
arch/s390/include/asm/kvm_host.h
705
struct kvm_async_pf *work);
arch/s390/kernel/diag/diag324.c
111
static void pibwork_handler(struct work_struct *work)
arch/s390/kernel/diag/diag324.c
96
static void pibwork_handler(struct work_struct *work);
arch/s390/kernel/hiperdispatch.c
237
static void hd_capacity_work_fn(struct work_struct *work)
arch/s390/kernel/hiperdispatch.c
85
static void hd_capacity_work_fn(struct work_struct *work);
arch/s390/kernel/time.c
403
static void stp_work_fn(struct work_struct *work);
arch/s390/kernel/time.c
563
static void stp_work_fn(struct work_struct *work)
arch/s390/kernel/topology.c
343
static void topology_work_fn(struct work_struct *work)
arch/s390/kernel/topology.c
50
static void topology_work_fn(struct work_struct *work);
arch/s390/kvm/kvm-s390.c
4390
struct kvm_async_pf *work)
arch/s390/kvm/kvm-s390.c
4392
trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
arch/s390/kvm/kvm-s390.c
4393
__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
arch/s390/kvm/kvm-s390.c
4399
struct kvm_async_pf *work)
arch/s390/kvm/kvm-s390.c
4401
trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
arch/s390/kvm/kvm-s390.c
4402
__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
arch/s390/kvm/kvm-s390.c
4406
struct kvm_async_pf *work)
arch/sh/drivers/push-switch.c
105
flush_work(&psw->work);
arch/sh/drivers/push-switch.c
30
schedule_work(&psw->work);
arch/sh/drivers/push-switch.c
33
static void switch_work_handler(struct work_struct *work)
arch/sh/drivers/push-switch.c
35
struct push_switch *psw = container_of(work, struct push_switch, work);
arch/sh/drivers/push-switch.c
77
INIT_WORK(&psw->work, switch_work_handler);
arch/sh/include/asm/push-switch.h
16
struct work_struct work;
arch/sparc/kernel/leon_smp.c
275
struct leon_ipi_work *work;
arch/sparc/kernel/leon_smp.c
298
work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
299
work->single = work->msk = work->resched = 0;
arch/sparc/kernel/leon_smp.c
312
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
315
work->single = 1;
arch/sparc/kernel/leon_smp.c
323
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
326
work->msk = 1;
arch/sparc/kernel/leon_smp.c
334
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
337
work->resched = 1;
arch/sparc/kernel/leon_smp.c
345
struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work);
arch/sparc/kernel/leon_smp.c
347
if (work->single) {
arch/sparc/kernel/leon_smp.c
348
work->single = 0;
arch/sparc/kernel/leon_smp.c
351
if (work->msk) {
arch/sparc/kernel/leon_smp.c
352
work->msk = 0;
arch/sparc/kernel/leon_smp.c
355
if (work->resched) {
arch/sparc/kernel/leon_smp.c
356
work->resched = 0;
arch/sparc/kernel/sun4d_smp.c
196
struct sun4d_ipi_work *work;
arch/sparc/kernel/sun4d_smp.c
201
work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
202
work->single = work->msk = work->resched = 0;
arch/sparc/kernel/sun4d_smp.c
208
struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work);
arch/sparc/kernel/sun4d_smp.c
210
if (work->single) {
arch/sparc/kernel/sun4d_smp.c
211
work->single = 0;
arch/sparc/kernel/sun4d_smp.c
214
if (work->msk) {
arch/sparc/kernel/sun4d_smp.c
215
work->msk = 0;
arch/sparc/kernel/sun4d_smp.c
218
if (work->resched) {
arch/sparc/kernel/sun4d_smp.c
219
work->resched = 0;
arch/sparc/kernel/sun4d_smp.c
239
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
242
work->single = 1;
arch/sparc/kernel/sun4d_smp.c
250
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
253
work->msk = 1;
arch/sparc/kernel/sun4d_smp.c
261
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
264
work->resched = 1;
arch/um/drivers/chan_kern.c
165
static void line_timer_cb(struct work_struct *work)
arch/um/drivers/chan_kern.c
167
struct line *line = container_of(work, struct line, task.work);
arch/um/drivers/line.c
598
struct work_struct work;
arch/um/drivers/line.c
601
static void __free_winch(struct work_struct *work)
arch/um/drivers/line.c
603
struct winch *winch = container_of(work, struct winch, work);
arch/um/drivers/line.c
619
__free_winch(&winch->work);
arch/um/drivers/line.c
646
INIT_WORK(&winch->work, __free_winch);
arch/um/drivers/line.c
647
schedule_work(&winch->work);
arch/um/drivers/vector_kern.c
1185
static void vector_reset_tx(struct work_struct *work)
arch/um/drivers/vector_kern.c
1188
container_of(work, struct vector_private, reset_tx);
arch/x86/include/asm/kvm_host.h
2429
struct kvm_async_pf *work);
arch/x86/include/asm/kvm_host.h
2431
struct kvm_async_pf *work);
arch/x86/include/asm/kvm_host.h
2433
struct kvm_async_pf *work);
arch/x86/kernel/cpu/aperfmperf.c
337
static void disable_freq_invariance_workfn(struct work_struct *work)
arch/x86/kernel/cpu/bus_lock.c
185
static void __split_lock_reenable_unlock(struct work_struct *work)
arch/x86/kernel/cpu/bus_lock.c
193
static void __split_lock_reenable(struct work_struct *work)
arch/x86/kernel/cpu/bus_lock.c
216
struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
arch/x86/kernel/cpu/bus_lock.c
218
INIT_DELAYED_WORK(work, __split_lock_reenable);
arch/x86/kernel/cpu/bus_lock.c
244
struct delayed_work *work;
arch/x86/kernel/cpu/bus_lock.c
269
work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu);
arch/x86/kernel/cpu/bus_lock.c
270
schedule_delayed_work_on(cpu, work, 2);
arch/x86/kernel/cpu/mce/dev-mcelog.c
81
static void mce_do_trigger(struct work_struct *work)
arch/x86/kernel/tsc.c
1307
static void tsc_refine_calibration_work(struct work_struct *work);
arch/x86/kernel/tsc.c
1323
static void tsc_refine_calibration_work(struct work_struct *work)
arch/x86/kernel/tsc_sync.c
345
static void tsc_sync_mark_tsc_unstable(struct work_struct *work)
arch/x86/kvm/i8254.c
240
static void pit_do_work(struct kthread_work *work)
arch/x86/kvm/i8254.c
242
struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
arch/x86/kvm/ioapic.c
519
static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
arch/x86/kvm/ioapic.c
522
struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
arch/x86/kvm/ioapic.c
523
eoi_inject.work);
arch/x86/kvm/mmu/mmu.c
4536
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
arch/x86/kvm/mmu/mmu.c
4540
if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
arch/x86/kvm/mmu/mmu.c
4543
if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
arch/x86/kvm/mmu/mmu.c
4544
work->wakeup_all)
arch/x86/kvm/mmu/mmu.c
4552
work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
arch/x86/kvm/mmu/mmu.c
4555
r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
arch/x86/kvm/x86.c
13945
struct kvm_async_pf *work)
arch/x86/kvm/x86.c
13949
trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
arch/x86/kvm/x86.c
13950
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
arch/x86/kvm/x86.c
13958
fault.address = work->arch.token;
arch/x86/kvm/x86.c
13977
struct kvm_async_pf *work)
arch/x86/kvm/x86.c
13984
if (work->wakeup_all)
arch/x86/kvm/x86.c
13985
work->arch.token = ~0; /* broadcast wakeup */
arch/x86/kvm/x86.c
13987
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
arch/x86/kvm/x86.c
13988
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
arch/x86/kvm/x86.c
13990
if ((work->wakeup_all || work->notpresent_injected) &&
arch/x86/kvm/x86.c
13992
!apf_put_user_ready(vcpu, work->arch.token)) {
arch/x86/kvm/x86.c
9929
static void pvclock_gtod_update_fn(struct work_struct *work)
arch/x86/platform/efi/quirks.c
782
current_work() != &efi_rts_work.work)
arch/x86/platform/olpc/olpc-xo1-sci.c
205
static void process_sci_queue_work(struct work_struct *work)
arch/x86/platform/olpc/olpc-xo15-sci.c
126
static void process_sci_queue_work(struct work_struct *work)
block/bio-integrity-auto.c
19
struct work_struct work;
block/bio-integrity-auto.c
36
static void bio_integrity_verify_fn(struct work_struct *work)
block/bio-integrity-auto.c
39
container_of(work, struct bio_integrity_data, work);
block/bio-integrity-auto.c
75
INIT_WORK(&bid->work, bio_integrity_verify_fn);
block/bio-integrity-auto.c
76
queue_work(kintegrityd_wq, &bid->work);
block/bio.c
1669
static void bio_dirty_fn(struct work_struct *work);
block/bio.c
1678
static void bio_dirty_fn(struct work_struct *work)
block/bio.c
416
static void bio_alloc_rescue(struct work_struct *work)
block/bio.c
418
struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
block/blk-cgroup.c
113
static void blkg_free_workfn(struct work_struct *work)
block/blk-cgroup.c
115
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
block/blk-cgroup.c
204
static void blkg_async_bio_workfn(struct work_struct *work)
block/blk-cgroup.c
206
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
block/blk-core.c
1116
int kblockd_schedule_work(struct work_struct *work)
block/blk-core.c
1118
return queue_work(kblockd_workqueue, work);
block/blk-core.c
389
static void blk_timeout_work(struct work_struct *work)
block/blk-crypto-fallback.c
421
static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
block/blk-crypto-fallback.c
424
container_of(work, struct bio_fallback_crypt_ctx, work);
block/blk-crypto-fallback.c
466
INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
block/blk-crypto-fallback.c
468
queue_work(blk_crypto_wq, &f_ctx->work);
block/blk-crypto-fallback.c
51
struct work_struct work;
block/blk-ioc.c
105
static void ioc_release_fn(struct work_struct *work)
block/blk-ioc.c
107
struct io_context *ioc = container_of(work, struct io_context,
block/blk-iolatency.c
728
static void blkiolatency_enable_work_fn(struct work_struct *work)
block/blk-iolatency.c
730
struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
block/blk-mq.c
1568
static void blk_mq_requeue_work(struct work_struct *work)
block/blk-mq.c
1571
container_of(work, struct request_queue, requeue_work.work);
block/blk-mq.c
1735
static void blk_mq_timeout_work(struct work_struct *work)
block/blk-mq.c
1738
container_of(work, struct request_queue, timeout_work);
block/blk-mq.c
2557
static void blk_mq_run_work_fn(struct work_struct *work)
block/blk-mq.c
2560
container_of(work, struct blk_mq_hw_ctx, run_work.work);
block/blk-throttle.c
1198
static void blk_throtl_dispatch_work_fn(struct work_struct *work)
block/blk-throttle.c
1200
struct throtl_data *td = container_of(work, struct throtl_data,
block/blk-zoned.c
1730
static void blk_zone_wplug_bio_work(struct work_struct *work)
block/blk-zoned.c
1733
container_of(work, struct blk_zone_wplug, bio_work);
block/blk-zoned.c
658
static void blk_zone_wplug_bio_work(struct work_struct *work);
block/disk-events.c
307
static void disk_events_workfn(struct work_struct *work)
block/disk-events.c
309
struct delayed_work *dwork = to_delayed_work(work);
crypto/acompress.c
357
static void acomp_stream_workfn(struct work_struct *work)
crypto/acompress.c
360
container_of(work, struct crypto_acomp_streams, stream_work);
crypto/cryptd.c
113
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
crypto/cryptd.c
149
queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
crypto/cryptd.c
166
static void cryptd_queue_worker(struct work_struct *work)
crypto/cryptd.c
171
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
crypto/cryptd.c
190
queue_work(cryptd_wq, &cpu_queue->work);
crypto/cryptd.c
39
struct work_struct work;
crypto/cryptd.c
99
static void cryptd_queue_worker(struct work_struct *work);
crypto/crypto_engine.c
174
static void crypto_pump_work(struct kthread_work *work)
crypto/crypto_engine.c
177
container_of(work, struct crypto_engine, pump_requests);
crypto/scompress.c
45
static void scomp_scratch_workfn(struct work_struct *work);
crypto/scompress.c
95
static void scomp_scratch_workfn(struct work_struct *work)
drivers/accel/amdxdna/aie2_error.c
24
struct work_struct work;
drivers/accel/amdxdna/aie2_error.c
276
queue_work(e->wq, &e->work);
drivers/accel/amdxdna/aie2_error.c
295
e = container_of(err_work, struct async_event, work);
drivers/accel/amdxdna/aie2_error.c
382
INIT_WORK(&e->work, aie2_error_worker);
drivers/accel/amdxdna/amdxdna_gem.c
185
static void amdxdna_hmm_unreg_work(struct work_struct *work)
drivers/accel/amdxdna/amdxdna_gem.c
187
struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
drivers/accel/habanalabs/common/command_submission.c
1166
static void job_wq_completion(struct work_struct *work)
drivers/accel/habanalabs/common/command_submission.c
1168
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
drivers/accel/habanalabs/common/command_submission.c
1177
static void cs_completion(struct work_struct *work)
drivers/accel/habanalabs/common/command_submission.c
1179
struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
drivers/accel/habanalabs/common/command_submission.c
52
static void job_wq_completion(struct work_struct *work);
drivers/accel/habanalabs/common/command_submission.c
816
static void cs_timedout(struct work_struct *work)
drivers/accel/habanalabs/common/command_submission.c
818
struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work);
drivers/accel/habanalabs/common/decoder.c
46
static void dec_abnrm_intr_work(struct work_struct *work)
drivers/accel/habanalabs/common/decoder.c
48
struct hl_dec *dec = container_of(work, struct hl_dec, abnrm_intr_work);
drivers/accel/habanalabs/common/device.c
1100
static void hl_device_heartbeat(struct work_struct *work)
drivers/accel/habanalabs/common/device.c
1102
struct hl_device *hdev = container_of(work, struct hl_device,
drivers/accel/habanalabs/common/device.c
1103
work_heartbeat.work);
drivers/accel/habanalabs/common/device.c
33
static void hl_device_heartbeat(struct work_struct *work);
drivers/accel/habanalabs/common/device.c
791
static void device_hard_reset_pending(struct work_struct *work)
drivers/accel/habanalabs/common/device.c
794
container_of(work, struct hl_device_reset_work, reset_work.work);
drivers/accel/habanalabs/common/device.c
824
static void device_release_watchdog_func(struct work_struct *work)
drivers/accel/habanalabs/common/device.c
827
container_of(work, struct hl_device_reset_work, reset_work.work);
drivers/accel/habanalabs/common/irq.c
203
static void hl_ts_free_objects(struct work_struct *work)
drivers/accel/habanalabs/common/irq.c
206
container_of(work, struct timestamp_reg_work_obj, free_obj);
drivers/accel/habanalabs/common/irq.c
58
static void irq_handle_eqe(struct work_struct *work)
drivers/accel/habanalabs/common/irq.c
60
struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
drivers/accel/habanalabs/common/mmu/mmu.c
672
static void hl_mmu_prefetch_work_function(struct work_struct *work)
drivers/accel/habanalabs/common/mmu/mmu.c
674
struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, prefetch_work);
drivers/accel/habanalabs/goya/goya.c
831
static void goya_set_freq_to_low_job(struct work_struct *work)
drivers/accel/habanalabs/goya/goya.c
833
struct goya_work_freq *goya_work = container_of(work,
drivers/accel/habanalabs/goya/goya.c
835
work_freq.work);
drivers/accel/ivpu/ivpu_ipc.c
465
void ivpu_ipc_irq_work_fn(struct work_struct *work)
drivers/accel/ivpu/ivpu_ipc.c
467
struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_ipc_work);
drivers/accel/ivpu/ivpu_ipc.h
93
void ivpu_ipc_irq_work_fn(struct work_struct *work);
drivers/accel/ivpu/ivpu_job.c
1102
void ivpu_context_abort_work_fn(struct work_struct *work)
drivers/accel/ivpu/ivpu_job.c
1104
struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
drivers/accel/ivpu/ivpu_job.h
89
void ivpu_context_abort_work_fn(struct work_struct *work);
drivers/accel/ivpu/ivpu_pm.c
158
static void ivpu_pm_recovery_work(struct work_struct *work)
drivers/accel/ivpu/ivpu_pm.c
160
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
drivers/accel/ivpu/ivpu_pm.c
196
static void ivpu_job_timeout_work(struct work_struct *work)
drivers/accel/ivpu/ivpu_pm.c
198
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
drivers/accel/ivpu/ivpu_pm.c
493
void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
drivers/accel/ivpu/ivpu_pm.c
495
struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_dct_work);
drivers/accel/ivpu/ivpu_pm.h
47
void ivpu_pm_irq_dct_work_fn(struct work_struct *work);
drivers/accel/qaic/qaic.h
351
void qaic_irq_polling_work(struct work_struct *work);
drivers/accel/qaic/qaic_control.c
1404
static void resp_worker(struct work_struct *work)
drivers/accel/qaic/qaic_control.c
1406
struct resp_work *resp = container_of(work, struct resp_work, work);
drivers/accel/qaic/qaic_control.c
1510
INIT_WORK(&resp->work, resp_worker);
drivers/accel/qaic/qaic_control.c
1513
queue_work(qdev->cntl_wq, &resp->work);
drivers/accel/qaic/qaic_control.c
236
struct work_struct work;
drivers/accel/qaic/qaic_data.c
1517
void qaic_irq_polling_work(struct work_struct *work)
drivers/accel/qaic/qaic_data.c
1519
struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
drivers/accel/qaic/qaic_debugfs.c
182
static void bootlog_log(struct work_struct *work)
drivers/accel/qaic/qaic_debugfs.c
184
struct bootlog_msg *msg = container_of(work, struct bootlog_msg, work);
drivers/accel/qaic/qaic_debugfs.c
232
INIT_WORK(&msg->work, bootlog_log);
drivers/accel/qaic/qaic_debugfs.c
277
queue_work(qdev->bootlog_wq, &msg->work);
drivers/accel/qaic/qaic_debugfs.c
33
struct work_struct work;
drivers/accel/qaic/qaic_ssr.c
110
struct work_struct work;
drivers/accel/qaic/qaic_ssr.c
158
struct work_struct work;
drivers/accel/qaic/qaic_ssr.c
360
static void ssr_dump_worker(struct work_struct *work)
drivers/accel/qaic/qaic_ssr.c
362
struct ssr_crashdump *ssr_crash = container_of(work, struct ssr_crashdump, work);
drivers/accel/qaic/qaic_ssr.c
557
static void ssr_worker(struct work_struct *work)
drivers/accel/qaic/qaic_ssr.c
559
struct ssr_resp *resp = container_of(work, struct ssr_resp, work);
drivers/accel/qaic/qaic_ssr.c
693
INIT_WORK(&resp->work, ssr_worker);
drivers/accel/qaic/qaic_ssr.c
765
queue_work(qdev->ssr_wq, &ssr_crash->work);
drivers/accel/qaic/qaic_ssr.c
767
queue_work(qdev->ssr_wq, &resp->work);
drivers/accel/qaic/qaic_ssr.c
801
INIT_WORK(&ssr_crash->work, ssr_dump_worker);
drivers/accel/qaic/qaic_timesync.c
257
static void qaic_boot_timesync_worker(struct work_struct *work)
drivers/accel/qaic/qaic_timesync.c
259
struct qts_resp *resp = container_of(work, struct qts_resp, work);
drivers/accel/qaic/qaic_timesync.c
312
INIT_WORK(&resp->work, qaic_boot_timesync_worker);
drivers/accel/qaic/qaic_timesync.c
369
queue_work(resp->qdev->qts_wq, &resp->work);
drivers/accel/qaic/qaic_timesync.c
93
struct work_struct work;
drivers/accel/qaic/sahara.c
538
static void sahara_processing(struct work_struct *work)
drivers/accel/qaic/sahara.c
540
struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
drivers/accel/qaic/sahara.c
732
static void sahara_dump_processing(struct work_struct *work)
drivers/accel/qaic/sahara.c
734
struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
drivers/accel/qaic/sahara.c
790
static void sahara_read_data_processing(struct work_struct *work)
drivers/accel/qaic/sahara.c
792
struct sahara_context *context = container_of(work, struct sahara_context, read_data_work);
drivers/accel/rocket/rocket_core.h
51
struct work_struct work;
drivers/accel/rocket/rocket_job.c
396
static void rocket_reset_work(struct work_struct *work)
drivers/accel/rocket/rocket_job.c
400
core = container_of(work, struct rocket_core, reset.work);
drivers/accel/rocket/rocket_job.c
448
INIT_WORK(&core->reset.work, rocket_reset_work);
drivers/accel/rocket/rocket_job.c
492
cancel_work_sync(&core->reset.work);
drivers/accessibility/speakup/selection.c
106
static void __speakup_paste_selection(struct work_struct *work)
drivers/accessibility/speakup/selection.c
109
container_of(work, struct speakup_selection_work, work);
drivers/accessibility/speakup/selection.c
117
.work = __WORK_INITIALIZER(speakup_paste_work.work,
drivers/accessibility/speakup/selection.c
129
schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
drivers/accessibility/speakup/selection.c
137
cancel_work_sync(&speakup_paste_work.work);
drivers/accessibility/speakup/selection.c
20
struct work_struct work;
drivers/accessibility/speakup/selection.c
25
static void __speakup_set_selection(struct work_struct *work)
drivers/accessibility/speakup/selection.c
28
container_of(work, struct speakup_selection_work, work);
drivers/accessibility/speakup/selection.c
58
.work = __WORK_INITIALIZER(speakup_sel_work.work,
drivers/accessibility/speakup/selection.c
87
schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work);
drivers/accessibility/speakup/selection.c
96
cancel_work_sync(&speakup_sel_work.work);
drivers/acpi/acpi_video.c
1413
acpi_video_switch_brightness(struct work_struct *work)
drivers/acpi/acpi_video.c
1415
struct acpi_video_device *device = container_of(to_delayed_work(work),
drivers/acpi/acpi_video.c
218
static void acpi_video_switch_brightness(struct work_struct *work);
drivers/acpi/apei/ghes.c
169
struct work_struct work;
drivers/acpi/apei/ghes.c
692
static void ghes_vendor_record_work_func(struct work_struct *work)
drivers/acpi/apei/ghes.c
698
entry = container_of(work, struct ghes_vendor_record_entry, work);
drivers/acpi/apei/ghes.c
724
INIT_WORK(&entry->work, ghes_vendor_record_work_func);
drivers/acpi/apei/ghes.c
725
schedule_work(&entry->work);
drivers/acpi/apei/ghes.c
763
int cxl_cper_register_prot_err_work(struct work_struct *work)
drivers/acpi/apei/ghes.c
769
cxl_cper_prot_err_work = work;
drivers/acpi/apei/ghes.c
774
int cxl_cper_unregister_prot_err_work(struct work_struct *work)
drivers/acpi/apei/ghes.c
776
if (cxl_cper_prot_err_work != work)
drivers/acpi/apei/ghes.c
832
int cxl_cper_register_work(struct work_struct *work)
drivers/acpi/apei/ghes.c
838
cxl_cper_work = work;
drivers/acpi/apei/ghes.c
843
int cxl_cper_unregister_work(struct work_struct *work)
drivers/acpi/apei/ghes.c
845
if (cxl_cper_work != work)
drivers/acpi/ec.c
1152
static void acpi_ec_event_processor(struct work_struct *work)
drivers/acpi/ec.c
1154
struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
drivers/acpi/ec.c
1184
INIT_WORK(&q->work, acpi_ec_event_processor);
drivers/acpi/ec.c
1235
queue_work(ec_query_wq, &q->work);
drivers/acpi/ec.c
1247
static void acpi_ec_event_handler(struct work_struct *work)
drivers/acpi/ec.c
1249
struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
drivers/acpi/ec.c
1433
INIT_WORK(&ec->work, acpi_ec_event_handler);
drivers/acpi/ec.c
169
struct work_struct work;
drivers/acpi/ec.c
176
static void acpi_ec_event_handler(struct work_struct *work);
drivers/acpi/ec.c
475
queue_work(ec_wq, &ec->work);
drivers/acpi/internal.h
208
struct work_struct work;
drivers/acpi/internal.h
81
bool acpi_queue_hotplug_work(struct work_struct *work);
drivers/acpi/nfit/core.c
2925
static void acpi_nfit_scrub(struct work_struct *work)
drivers/acpi/nfit/core.c
2931
acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
drivers/acpi/nfit/core.c
3188
if (work_busy(&acpi_desc->dwork.work))
drivers/acpi/osl.c
1126
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
drivers/acpi/osl.c
1135
ret = queue_work(kacpi_notify_wq, &dpc->work);
drivers/acpi/osl.c
1145
ret = queue_work_on(0, kacpid_wq, &dpc->work);
drivers/acpi/osl.c
1178
struct work_struct work;
drivers/acpi/osl.c
1183
static void acpi_hotplug_work_fn(struct work_struct *work)
drivers/acpi/osl.c
1185
struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
drivers/acpi/osl.c
1204
INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
drivers/acpi/osl.c
1213
if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
drivers/acpi/osl.c
1220
bool acpi_queue_hotplug_work(struct work_struct *work)
drivers/acpi/osl.c
1222
return queue_work(kacpi_hotplug_wq, work);
drivers/acpi/osl.c
382
static void acpi_os_map_remove(struct work_struct *work)
drivers/acpi/osl.c
384
struct acpi_ioremap *map = container_of(to_rcu_work(work),
drivers/acpi/osl.c
48
struct work_struct work;
drivers/acpi/osl.c
875
static void acpi_os_execute_deferred(struct work_struct *work)
drivers/acpi/osl.c
877
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
drivers/acpi/scan.c
2930
static void acpi_table_events_fn(struct work_struct *work)
drivers/acpi/scan.c
2936
kfree(work);
drivers/acpi/scan.c
2941
struct work_struct *work;
drivers/acpi/scan.c
2946
work = kmalloc_obj(*work);
drivers/acpi/scan.c
2947
if (!work)
drivers/acpi/scan.c
2950
INIT_WORK(work, acpi_table_events_fn);
drivers/acpi/scan.c
2951
schedule_work(work);
drivers/acpi/scan.c
608
static DECLARE_WORK(work, acpi_device_del_work_fn);
drivers/acpi/scan.c
624
acpi_queue_hotplug_work(&work);
drivers/acpi/thermal.c
744
static void acpi_thermal_check_fn(struct work_struct *work)
drivers/acpi/thermal.c
746
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
drivers/android/binder.c
1221
binder_dequeue_work(ref->proc, &ref->death->work);
drivers/android/binder.c
1226
binder_dequeue_work(ref->proc, &ref->freeze->work);
drivers/android/binder.c
1707
&target_thread->reply_error.work);
drivers/android/binder.c
2815
t_queued = container_of(w, struct binder_transaction, work);
drivers/android/binder.c
2881
binder_enqueue_thread_work_ilocked(thread, &t->work);
drivers/android/binder.c
2883
binder_enqueue_work_ilocked(&t->work, &proc->todo);
drivers/android/binder.c
2892
list_del_init(&t_outdated->work.entry);
drivers/android/binder.c
2896
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
drivers/android/binder.c
3124
t->work.type = BINDER_WORK_TRANSACTION;
drivers/android/binder.c
3759
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
drivers/android/binder.c
3909
binder_enqueue_thread_work(thread, &thread->return_error.work);
drivers/android/binder.c
3917
binder_enqueue_thread_work(thread, &thread->return_error.work);
drivers/android/binder.c
3953
INIT_LIST_HEAD(&freeze->work.entry);
drivers/android/binder.c
3955
freeze->work.type = BINDER_WORK_FROZEN_BINDER;
drivers/android/binder.c
3964
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
drivers/android/binder.c
4020
freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
drivers/android/binder.c
4021
if (list_empty(&freeze->work.entry)) {
drivers/android/binder.c
4022
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
drivers/android/binder.c
4044
container_of(w, struct binder_ref_freeze, work);
drivers/android/binder.c
4057
binder_dequeue_work_ilocked(&freeze->work);
drivers/android/binder.c
4061
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
drivers/android/binder.c
4404
&thread->return_error.work);
drivers/android/binder.c
443
binder_enqueue_work_ilocked(struct binder_work *work,
drivers/android/binder.c
4447
INIT_LIST_HEAD(&death->work.entry);
drivers/android/binder.c
4451
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
drivers/android/binder.c
4455
&ref->death->work, &proc->todo);
drivers/android/binder.c
447
BUG_ON(work->entry.next && !list_empty(&work->entry));
drivers/android/binder.c
4479
if (list_empty(&death->work.entry)) {
drivers/android/binder.c
448
list_add_tail(&work->entry, target_list);
drivers/android/binder.c
4480
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
drivers/android/binder.c
4486
&death->work);
drivers/android/binder.c
4489
&death->work,
drivers/android/binder.c
4495
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
drivers/android/binder.c
4496
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
drivers/android/binder.c
4518
work);
drivers/android/binder.c
4535
binder_dequeue_work_ilocked(&death->work);
drivers/android/binder.c
4536
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
drivers/android/binder.c
4537
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
drivers/android/binder.c
4542
thread, &death->work);
drivers/android/binder.c
4545
&death->work,
drivers/android/binder.c
464
struct binder_work *work)
drivers/android/binder.c
467
binder_enqueue_work_ilocked(work, &thread->todo);
drivers/android/binder.c
4816
t = container_of(w, struct binder_transaction, work);
drivers/android/binder.c
482
struct binder_work *work)
drivers/android/binder.c
4820
w, struct binder_error, work);
drivers/android/binder.c
485
binder_enqueue_work_ilocked(work, &thread->todo);
drivers/android/binder.c
4855
struct binder_node *node = container_of(w, struct binder_node, work);
drivers/android/binder.c
4948
death = container_of(w, struct binder_ref_death, work);
drivers/android/binder.c
4988
freeze = container_of(w, struct binder_ref_freeze, work);
drivers/android/binder.c
5007
container_of(w, struct binder_ref_freeze, work);
drivers/android/binder.c
509
struct binder_work *work)
drivers/android/binder.c
512
binder_enqueue_thread_work_ilocked(thread, work);
drivers/android/binder.c
517
binder_dequeue_work_ilocked(struct binder_work *work)
drivers/android/binder.c
519
list_del_init(&work->entry);
drivers/android/binder.c
5195
t = container_of(w, struct binder_transaction, work);
drivers/android/binder.c
5202
w, struct binder_error, work);
drivers/android/binder.c
5220
death = container_of(w, struct binder_ref_death, work);
drivers/android/binder.c
5232
freeze = container_of(w, struct binder_ref_freeze, work);
drivers/android/binder.c
5278
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
drivers/android/binder.c
5280
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
drivers/android/binder.c
531
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
drivers/android/binder.c
534
binder_dequeue_work_ilocked(work);
drivers/android/binder.c
5653
ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
drivers/android/binder.c
5654
if (list_empty(&ref->freeze->work.entry)) {
drivers/android/binder.c
5656
binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
drivers/android/binder.c
6205
binder_dequeue_work_ilocked(&node->work);
drivers/android/binder.c
6243
BUG_ON(!list_empty(&ref->death->work.entry));
drivers/android/binder.c
6244
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
drivers/android/binder.c
6245
binder_enqueue_work_ilocked(&ref->death->work,
drivers/android/binder.c
6348
static void binder_deferred_func(struct work_struct *work)
drivers/android/binder.c
6442
t = container_of(w, struct binder_transaction, work);
drivers/android/binder.c
6448
w, struct binder_error, work);
drivers/android/binder.c
6457
node = container_of(w, struct binder_node, work);
drivers/android/binder.c
779
node->work.type = BINDER_WORK_NODE;
drivers/android/binder.c
784
INIT_LIST_HEAD(&node->work.entry);
drivers/android/binder.c
846
binder_dequeue_work_ilocked(&node->work);
drivers/android/binder.c
849
&node->work);
drivers/android/binder.c
854
if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
drivers/android/binder.c
855
binder_enqueue_work_ilocked(&node->work, target_list);
drivers/android/binder.c
896
if (list_empty(&node->work.entry)) {
drivers/android/binder.c
897
binder_enqueue_work_ilocked(&node->work, &proc->todo);
drivers/android/binder.c
904
binder_dequeue_work_ilocked(&node->work);
drivers/android/binder.c
910
BUG_ON(!list_empty(&node->work.entry));
drivers/android/binder_internal.h
166
struct binder_work work;
drivers/android/binder_internal.h
233
struct binder_work work;
drivers/android/binder_internal.h
274
struct binder_work work;
drivers/android/binder_internal.h
279
struct binder_work work;
drivers/android/binder_internal.h
532
struct binder_work work;
drivers/ata/libata-scsi.c
1661
void ata_scsi_deferred_qc_work(struct work_struct *work)
drivers/ata/libata-scsi.c
1664
container_of(work, struct ata_port, deferred_qc_work);
drivers/ata/libata-scsi.c
4885
void ata_scsi_hotplug(struct work_struct *work)
drivers/ata/libata-scsi.c
4888
container_of(work, struct ata_port, hotplug_task.work);
drivers/ata/libata-scsi.c
4988
void ata_scsi_dev_rescan(struct work_struct *work)
drivers/ata/libata-scsi.c
4991
container_of(work, struct ata_port, scsi_rescan_task.work);
drivers/ata/libata-sff.c
1164
void ata_sff_queue_work(struct work_struct *work)
drivers/ata/libata-sff.c
1166
queue_work(ata_sff_wq, work);
drivers/ata/libata-sff.c
1210
static void ata_sff_pio_task(struct work_struct *work)
drivers/ata/libata-sff.c
1213
container_of(work, struct ata_port, sff_pio_task.work);
drivers/ata/libata.h
161
extern void ata_scsi_hotplug(struct work_struct *work);
drivers/ata/libata.h
162
extern void ata_scsi_dev_rescan(struct work_struct *work);
drivers/ata/libata.h
170
void ata_scsi_deferred_qc_work(struct work_struct *work);
drivers/ata/pata_arasan_cf.c
214
struct work_struct work;
drivers/ata/pata_arasan_cf.c
518
static void data_xfer(struct work_struct *work)
drivers/ata/pata_arasan_cf.c
520
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
drivers/ata/pata_arasan_cf.c
521
work);
drivers/ata/pata_arasan_cf.c
576
static void delayed_finish(struct work_struct *work)
drivers/ata/pata_arasan_cf.c
578
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
drivers/ata/pata_arasan_cf.c
579
dwork.work);
drivers/ata/pata_arasan_cf.c
669
cancel_work_sync(&acdev->work);
drivers/ata/pata_arasan_cf.c
686
ata_sff_queue_work(&acdev->work);
drivers/ata/pata_arasan_cf.c
870
INIT_WORK(&acdev->work, data_xfer);
drivers/atm/idt77252.c
134
static void idt77252_softint(struct work_struct *work);
drivers/atm/idt77252.c
2803
idt77252_softint(struct work_struct *work)
drivers/atm/idt77252.c
2806
container_of(work, struct idt77252_dev, tqueue);
drivers/auxdisplay/arm-charlcd.c
248
static void charlcd_init_work(struct work_struct *work)
drivers/auxdisplay/arm-charlcd.c
251
container_of(work, struct charlcd, init_work.work);
drivers/auxdisplay/cfag12864b.c
247
static void cfag12864b_update(struct work_struct *work)
drivers/auxdisplay/charlcd.c
73
static void charlcd_bl_off(struct work_struct *work)
drivers/auxdisplay/charlcd.c
75
struct delayed_work *dwork = to_delayed_work(work);
drivers/auxdisplay/ht16k33.c
103
container_of(p, struct ht16k33_priv, work.work)
drivers/auxdisplay/ht16k33.c
213
schedule_delayed_work(&priv->work, HZ / fbdev->refresh_rate);
drivers/auxdisplay/ht16k33.c
219
static void ht16k33_fb_update(struct work_struct *work)
drivers/auxdisplay/ht16k33.c
221
struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
drivers/auxdisplay/ht16k33.c
400
static void ht16k33_seg7_update(struct work_struct *work)
drivers/auxdisplay/ht16k33.c
402
struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
drivers/auxdisplay/ht16k33.c
420
static void ht16k33_seg14_update(struct work_struct *work)
drivers/auxdisplay/ht16k33.c
422
struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
drivers/auxdisplay/ht16k33.c
441
INIT_DELAYED_WORK(&priv->work, ht16k33_seg7_update);
drivers/auxdisplay/ht16k33.c
445
INIT_DELAYED_WORK(&priv->work, ht16k33_seg14_update);
drivers/auxdisplay/ht16k33.c
457
schedule_delayed_work(&priv->work, 0);
drivers/auxdisplay/ht16k33.c
618
INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update);
drivers/auxdisplay/ht16k33.c
728
cancel_delayed_work_sync(&priv->work);
drivers/auxdisplay/ht16k33.c
91
struct delayed_work work;
drivers/auxdisplay/max6959.c
151
cancel_delayed_work_sync(&priv->work);
drivers/auxdisplay/max6959.c
46
struct delayed_work work;
drivers/auxdisplay/max6959.c
50
static void max6959_disp_update(struct work_struct *work)
drivers/auxdisplay/max6959.c
52
struct max6959_priv *priv = container_of(work, struct max6959_priv, work.work);
drivers/auxdisplay/max6959.c
71
INIT_DELAYED_WORK(&priv->work, max6959_disp_update);
drivers/auxdisplay/max6959.c
79
schedule_delayed_work(&priv->work, 0);
drivers/auxdisplay/seg-led-gpio.c
26
struct delayed_work work;
drivers/auxdisplay/seg-led-gpio.c
30
static void seg_led_update(struct work_struct *work)
drivers/auxdisplay/seg-led-gpio.c
32
struct seg_led_priv *priv = container_of(work, struct seg_led_priv, work.work);
drivers/auxdisplay/seg-led-gpio.c
46
INIT_DELAYED_WORK(&priv->work, seg_led_update);
drivers/auxdisplay/seg-led-gpio.c
54
schedule_delayed_work(&priv->work, 0);
drivers/auxdisplay/seg-led-gpio.c
87
cancel_delayed_work_sync(&priv->work);
drivers/base/arch_topology.c
209
static void update_topology_flags_workfn(struct work_struct *work);
drivers/base/arch_topology.c
223
static void update_topology_flags_workfn(struct work_struct *work)
drivers/base/arch_topology.c
386
static void parsing_done_workfn(struct work_struct *work);
drivers/base/arch_topology.c
455
static void parsing_done_workfn(struct work_struct *work)
drivers/base/core.c
501
static void device_link_release_fn(struct work_struct *work)
drivers/base/core.c
503
struct device_link *link = container_of(work, struct device_link, rm_work);
drivers/base/dd.c
305
static void deferred_probe_timeout_work_func(struct work_struct *work)
drivers/base/dd.c
82
static void deferred_probe_work_func(struct work_struct *work)
drivers/base/devcoredump.c
106
devcd = container_of(wk, struct devcd_entry, del_wk.work);
drivers/base/firmware_loader/main.c
1134
struct work_struct work;
drivers/base/firmware_loader/main.c
1143
static void request_firmware_work_func(struct work_struct *work)
drivers/base/firmware_loader/main.c
1148
fw_work = container_of(work, struct firmware_work, work);
drivers/base/firmware_loader/main.c
1198
INIT_WORK(&fw_work->work, request_firmware_work_func);
drivers/base/firmware_loader/main.c
1199
schedule_work(&fw_work->work);
drivers/base/firmware_loader/main.c
1500
cancel_delayed_work_sync(&fwc->work);
drivers/base/firmware_loader/main.c
1527
static void device_uncache_fw_images_work(struct work_struct *work)
drivers/base/firmware_loader/main.c
1541
queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
drivers/base/firmware_loader/main.c
1600
INIT_DELAYED_WORK(&fw_cache.work,
drivers/base/firmware_loader/main.c
67
struct delayed_work work;
drivers/base/firmware_loader/sysfs_upload.c
161
static void fw_upload_main(struct work_struct *work)
drivers/base/firmware_loader/sysfs_upload.c
170
fwlp = container_of(work, struct fw_upload_priv, work);
drivers/base/firmware_loader/sysfs_upload.c
264
queue_work(system_long_wq, &fwlp->work);
drivers/base/firmware_loader/sysfs_upload.c
337
INIT_WORK(&fw_upload_priv->work, fw_upload_main);
drivers/base/firmware_loader/sysfs_upload.c
404
flush_work(&fw_upload_priv->work);
drivers/base/firmware_loader/sysfs_upload.h
33
struct work_struct work;
drivers/base/power/runtime.c
1432
cancel_work_sync(&dev->power.work);
drivers/base/power/runtime.c
1856
INIT_WORK(&dev->power.work, pm_runtime_work);
drivers/base/power/runtime.c
1898
flush_work(&dev->power.work);
drivers/base/power/runtime.c
537
queue_work(pm_wq, &dev->power.work);
drivers/base/power/runtime.c
688
queue_work(pm_wq, &dev->power.work);
drivers/base/power/runtime.c
890
queue_work(pm_wq, &dev->power.work);
drivers/base/power/runtime.c
977
static void pm_runtime_work(struct work_struct *work)
drivers/base/power/runtime.c
979
struct device *dev = container_of(work, struct device, power.work);
drivers/block/aoe/aoe.h
170
struct work_struct work;/* disk create work struct */
drivers/block/aoe/aoeblk.c
430
queue_work(aoe_wq, &d->work);
drivers/block/aoe/aoecmd.c
790
queue_work(aoe_wq, &d->work);
drivers/block/aoe/aoecmd.c
898
aoecmd_sleepwork(struct work_struct *work)
drivers/block/aoe/aoecmd.c
900
struct aoedev *d = container_of(work, struct aoedev, work);
drivers/block/aoe/aoecmd.c
983
queue_work(aoe_wq, &d->work);
drivers/block/aoe/aoedev.c
484
INIT_WORK(&d->work, aoecmd_sleepwork);
drivers/block/drbd/drbd_main.c
3429
struct bm_io_work *work = &device->bm_io_work;
drivers/block/drbd/drbd_main.c
3432
if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
drivers/block/drbd/drbd_main.c
3436
cnt, work->why);
drivers/block/drbd/drbd_main.c
3440
drbd_bm_lock(device, work->why, work->flags);
drivers/block/drbd/drbd_main.c
3441
rv = work->io_fn(device, work->peer_device);
drivers/block/drbd/drbd_main.c
3449
if (work->done)
drivers/block/drbd/drbd_main.c
3450
work->done(device, rv);
drivers/block/drbd/drbd_main.c
3453
work->why = NULL;
drivers/block/drbd/drbd_main.c
3454
work->flags = 0;
drivers/block/floppy.c
1008
static void fd_timer_workfn(struct work_struct *work)
drivers/block/floppy.c
1858
fd_timer.work.func,
drivers/block/floppy.c
1862
fd_timeout.work.func,
drivers/block/floppy.c
991
static void floppy_work_workfn(struct work_struct *work)
drivers/block/loop.c
1971
if (worker && !work_pending(&worker->work)) {
drivers/block/loop.c
1980
static void loop_workfn(struct work_struct *work)
drivers/block/loop.c
1983
container_of(work, struct loop_worker, work);
drivers/block/loop.c
1987
static void loop_rootcg_workfn(struct work_struct *work)
drivers/block/loop.c
1990
container_of(work, struct loop_device, rootcg_work);
drivers/block/loop.c
774
struct work_struct work;
drivers/block/loop.c
782
static void loop_workfn(struct work_struct *work);
drivers/block/loop.c
800
struct work_struct *work;
drivers/block/loop.c
840
INIT_WORK(&worker->work, loop_workfn);
drivers/block/loop.c
855
work = &worker->work;
drivers/block/loop.c
858
work = &lo->rootcg_work;
drivers/block/loop.c
862
queue_work(lo->workqueue, work);
drivers/block/mtip32xx/mtip32xx.c
2731
dd->work[i].port = dd->port;
drivers/block/mtip32xx/mtip32xx.c
3769
dd->work[0].cpu_binding = dd->isr_binding;
drivers/block/mtip32xx/mtip32xx.c
3770
dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
drivers/block/mtip32xx/mtip32xx.c
3771
dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
drivers/block/mtip32xx/mtip32xx.c
3772
dd->work[3].cpu_binding = dd->work[0].cpu_binding;
drivers/block/mtip32xx/mtip32xx.c
3773
dd->work[4].cpu_binding = dd->work[1].cpu_binding;
drivers/block/mtip32xx/mtip32xx.c
3774
dd->work[5].cpu_binding = dd->work[2].cpu_binding;
drivers/block/mtip32xx/mtip32xx.c
3775
dd->work[6].cpu_binding = dd->work[2].cpu_binding;
drivers/block/mtip32xx/mtip32xx.c
3776
dd->work[7].cpu_binding = dd->work[1].cpu_binding;
drivers/block/mtip32xx/mtip32xx.c
3782
if (dd->work[i].cpu_binding == cpu) {
drivers/block/mtip32xx/mtip32xx.c
3791
INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
drivers/block/mtip32xx/mtip32xx.c
3792
INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
drivers/block/mtip32xx/mtip32xx.c
3793
INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
drivers/block/mtip32xx/mtip32xx.c
3794
INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
drivers/block/mtip32xx/mtip32xx.c
3795
INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
drivers/block/mtip32xx/mtip32xx.c
3796
INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
drivers/block/mtip32xx/mtip32xx.c
3797
INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
drivers/block/mtip32xx/mtip32xx.c
3798
INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
drivers/block/mtip32xx/mtip32xx.c
3836
drop_cpu(dd->work[0].cpu_binding);
drivers/block/mtip32xx/mtip32xx.c
3837
drop_cpu(dd->work[1].cpu_binding);
drivers/block/mtip32xx/mtip32xx.c
3838
drop_cpu(dd->work[2].cpu_binding);
drivers/block/mtip32xx/mtip32xx.c
3908
drop_cpu(dd->work[0].cpu_binding);
drivers/block/mtip32xx/mtip32xx.c
3909
drop_cpu(dd->work[1].cpu_binding);
drivers/block/mtip32xx/mtip32xx.c
3910
drop_cpu(dd->work[2].cpu_binding);
drivers/block/mtip32xx/mtip32xx.c
756
twork = &dd->work[i];
drivers/block/mtip32xx/mtip32xx.c
765
twork = &dd->work[i];
drivers/block/mtip32xx/mtip32xx.c
770
&twork->work);
drivers/block/mtip32xx/mtip32xx.c
773
if (likely(dd->work[0].completed))
drivers/block/mtip32xx/mtip32xx.c
775
dd->work[0].completed);
drivers/block/mtip32xx/mtip32xx.h
171
struct work_struct work;
drivers/block/mtip32xx/mtip32xx.h
178
void mtip_workq_sdbf##group(struct work_struct *work) \
drivers/block/mtip32xx/mtip32xx.h
180
struct mtip_work *w = (struct mtip_work *) work; \
drivers/block/mtip32xx/mtip32xx.h
460
struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
drivers/block/nbd.c
1300
INIT_WORK(&nsock->work, nbd_pending_cmd_work);
drivers/block/nbd.c
1351
INIT_WORK(&args->work, recv_work);
drivers/block/nbd.c
1364
queue_work(nbd->recv_workq, &args->work);
drivers/block/nbd.c
1534
INIT_WORK(&args->work, recv_work);
drivers/block/nbd.c
1538
queue_work(nbd->recv_workq, &args->work);
drivers/block/nbd.c
176
static void nbd_dead_link_work(struct work_struct *work);
drivers/block/nbd.c
2635
static void nbd_dead_link_work(struct work_struct *work)
drivers/block/nbd.c
2637
struct link_dead_args *args = container_of(work, struct link_dead_args,
drivers/block/nbd.c
2638
work);
drivers/block/nbd.c
282
static void nbd_dev_remove_work(struct work_struct *work)
drivers/block/nbd.c
284
nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
drivers/block/nbd.c
312
INIT_WORK(&args->work, nbd_dead_link_work);
drivers/block/nbd.c
314
queue_work(system_percpu_wq, &args->work);
drivers/block/nbd.c
638
schedule_work(&nsock->work);
drivers/block/nbd.c
65
struct work_struct work;
drivers/block/nbd.c
69
struct work_struct work;
drivers/block/nbd.c
76
struct work_struct work;
drivers/block/nbd.c
808
static void nbd_pending_cmd_work(struct work_struct *work)
drivers/block/nbd.c
810
struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
drivers/block/nbd.c
970
static void recv_work(struct work_struct *work)
drivers/block/nbd.c
972
struct recv_thread_args *args = container_of(work,
drivers/block/nbd.c
974
work);
drivers/block/rbd.c
2742
static void rbd_img_handle_request_work(struct work_struct *work)
drivers/block/rbd.c
2745
container_of(work, struct rbd_img_request, work);
drivers/block/rbd.c
2752
INIT_WORK(&img_req->work, rbd_img_handle_request_work);
drivers/block/rbd.c
2754
queue_work(rbd_wq, &img_req->work);
drivers/block/rbd.c
347
struct work_struct work;
drivers/block/rbd.c
3756
static void rbd_notify_acquired_lock(struct work_struct *work)
drivers/block/rbd.c
3758
struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
drivers/block/rbd.c
3764
static void rbd_notify_released_lock(struct work_struct *work)
drivers/block/rbd.c
3766
struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
drivers/block/rbd.c
4149
static void rbd_acquire_lock(struct work_struct *work)
drivers/block/rbd.c
4151
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
drivers/block/rbd.c
4248
static void rbd_release_lock_work(struct work_struct *work)
drivers/block/rbd.c
4250
struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
drivers/block/rbd.c
4634
static void rbd_reregister_watch(struct work_struct *work)
drivers/block/rbd.c
4636
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
drivers/block/rbd.c
4737
static void rbd_queue_workfn(struct work_struct *work)
drivers/block/rbd.c
4740
container_of(work, struct rbd_img_request, work);
drivers/block/rbd.c
4819
INIT_WORK(&img_req->work, rbd_queue_workfn);
drivers/block/rbd.c
4820
queue_work(rbd_wq, &img_req->work);
drivers/block/rnbd/rnbd-clt.c
1740
static void unmap_device_work(struct work_struct *work)
drivers/block/rnbd/rnbd-clt.c
1744
dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
drivers/block/rnbd/rnbd-clt.c
407
schedule_work(&iu->work);
drivers/block/rnbd/rnbd-clt.c
413
void (*conf)(struct work_struct *work),
drivers/block/rnbd/rnbd-clt.c
419
INIT_WORK(&iu->work, conf);
drivers/block/rnbd/rnbd-clt.c
436
static void msg_close_conf(struct work_struct *work)
drivers/block/rnbd/rnbd-clt.c
438
struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
drivers/block/rnbd/rnbd-clt.c
482
static void msg_open_conf(struct work_struct *work)
drivers/block/rnbd/rnbd-clt.c
484
struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
drivers/block/rnbd/rnbd-clt.c
517
static void msg_sess_info_conf(struct work_struct *work)
drivers/block/rnbd/rnbd-clt.c
519
struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
drivers/block/rnbd/rnbd-clt.h
63
struct work_struct work;
drivers/block/sunvdc.c
1135
static void vdc_ldc_reset_timer_work(struct work_struct *work)
drivers/block/sunvdc.c
1140
port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
drivers/block/sunvdc.c
1153
static void vdc_ldc_reset_work(struct work_struct *work)
drivers/block/sunvdc.c
1159
port = container_of(work, struct vdc_port, ldc_reset_work);
drivers/block/sunvdc.c
92
static void vdc_ldc_reset_work(struct work_struct *work);
drivers/block/sunvdc.c
93
static void vdc_ldc_reset_timer_work(struct work_struct *work);
drivers/block/ublk_drv.c
2354
static void ublk_partition_scan_work(struct work_struct *work)
drivers/block/ublk_drv.c
2357
container_of(work, struct ublk_device, partition_scan_work);
drivers/block/ublk_drv.c
2426
static void ublk_ch_release_work_fn(struct work_struct *work)
drivers/block/ublk_drv.c
2429
container_of(work, struct ublk_device, exit_work.work);
drivers/block/virtio_blk.c
943
static void virtblk_config_changed_work(struct work_struct *work)
drivers/block/virtio_blk.c
946
container_of(work, struct virtio_blk, config_work);
drivers/block/xen-blkback/blkback.c
290
void xen_blkbk_unmap_purged_grants(struct work_struct *work)
drivers/block/xen-blkback/blkback.c
296
struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
drivers/block/xen-blkback/blkback.c
705
struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
drivers/block/xen-blkback/blkback.c
713
work->data = req;
drivers/block/xen-blkback/blkback.c
714
work->done = xen_blkbk_unmap_and_respond_callback;
drivers/block/xen-blkback/blkback.c
715
work->unmap_ops = req->unmap;
drivers/block/xen-blkback/blkback.c
716
work->kunmap_ops = NULL;
drivers/block/xen-blkback/blkback.c
717
work->pages = req->unmap_pages;
drivers/block/xen-blkback/blkback.c
718
work->count = invcount;
drivers/block/xen-blkback/common.h
395
void xen_blkbk_unmap_purged_grants(struct work_struct *work);
drivers/block/xen-blkback/xenbus.c
47
static void xen_blkif_deferred_free(struct work_struct *work)
drivers/block/xen-blkback/xenbus.c
51
blkif = container_of(work, struct xen_blkif, free_work);
drivers/block/xen-blkfront.c
1199
static void blkif_restart_queue(struct work_struct *work)
drivers/block/xen-blkfront.c
1201
struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
drivers/block/xen-blkfront.c
1290
flush_work(&rinfo->work);
drivers/block/xen-blkfront.c
183
struct work_struct work;
drivers/block/xen-blkfront.c
1923
INIT_WORK(&rinfo->work, blkif_restart_queue);
drivers/block/xen-blkfront.c
2142
flush_work(&rinfo->work);
drivers/block/xen-blkfront.c
2554
static void blkfront_delay_work(struct work_struct *work)
drivers/block/xen-blkfront.c
493
schedule_work(&rinfo->work);
drivers/block/zloop.c
137
struct work_struct work;
drivers/block/zloop.c
607
static void zloop_cmd_workfn(struct work_struct *work)
drivers/block/zloop.c
609
struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work);
drivers/block/zloop.c
726
INIT_WORK(&cmd->work, zloop_cmd_workfn);
drivers/block/zloop.c
727
queue_work(zlo->workqueue, &cmd->work);
drivers/block/zram/zram_drv.c
1379
struct zram_rb_req *req = container_of(w, struct zram_rb_req, work);
drivers/block/zram/zram_drv.c
1428
INIT_WORK(&req->work, zram_deferred_decompress);
drivers/block/zram/zram_drv.c
1429
queue_work(system_highpri_wq, &req->work);
drivers/block/zram/zram_drv.c
1466
struct zram_rb_req *req = container_of(w, struct zram_rb_req, work);
drivers/block/zram/zram_drv.c
1490
INIT_WORK_ONSTACK(&req.work, zram_sync_read);
drivers/block/zram/zram_drv.c
1491
queue_work(system_dfl_wq, &req.work);
drivers/block/zram/zram_drv.c
1492
flush_work(&req.work);
drivers/block/zram/zram_drv.c
1493
destroy_work_on_stack(&req.work);
drivers/block/zram/zram_drv.c
520
struct work_struct work;
drivers/bluetooth/bcm203x.c
140
static void bcm203x_work(struct work_struct *work)
drivers/bluetooth/bcm203x.c
143
container_of(work, struct bcm203x_data, work);
drivers/bluetooth/bcm203x.c
223
INIT_WORK(&data->work, bcm203x_work);
drivers/bluetooth/bcm203x.c
228
schedule_work(&data->work);
drivers/bluetooth/bcm203x.c
240
cancel_work_sync(&data->work);
drivers/bluetooth/bcm203x.c
52
struct work_struct work;
drivers/bluetooth/bcm203x.c
87
schedule_work(&data->work);
drivers/bluetooth/btintel_pcie.c
122
struct work_struct work;
drivers/bluetooth/btintel_pcie.c
1323
static void btintel_pcie_rx_work(struct work_struct *work)
drivers/bluetooth/btintel_pcie.c
1325
struct btintel_pcie_data *data = container_of(work,
drivers/bluetooth/btintel_pcie.c
2245
container_of(wk, struct btintel_pcie_removal, work);
drivers/bluetooth/btintel_pcie.c
2314
INIT_WORK(&removal->work, btintel_pcie_removal_work);
drivers/bluetooth/btintel_pcie.c
2316
schedule_work(&removal->work);
drivers/bluetooth/btmtksdio.c
545
static void btmtksdio_txrx_work(struct work_struct *work)
drivers/bluetooth/btmtksdio.c
547
struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
drivers/bluetooth/btmtkuart.c
238
static void btmtkuart_tx_work(struct work_struct *work)
drivers/bluetooth/btmtkuart.c
240
struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
drivers/bluetooth/btnxpuart.c
154
struct work_struct work;
drivers/bluetooth/btnxpuart.c
1656
static void btnxpuart_tx_work(struct work_struct *work)
drivers/bluetooth/btnxpuart.c
1658
struct btnxpuart_dev *nxpdev = container_of(work, struct btnxpuart_dev,
drivers/bluetooth/btnxpuart.c
417
cancel_work_sync(&psdata->work);
drivers/bluetooth/btnxpuart.c
424
flush_work(&psdata->work);
drivers/bluetooth/btnxpuart.c
473
static void ps_work_func(struct work_struct *work)
drivers/bluetooth/btnxpuart.c
475
struct ps_data *data = container_of(work, struct ps_data, work);
drivers/bluetooth/btnxpuart.c
493
schedule_work(&data->work);
drivers/bluetooth/btnxpuart.c
558
INIT_WORK(&psdata->work, ps_work_func);
drivers/bluetooth/btnxpuart.c
576
schedule_work(&psdata->work);
drivers/bluetooth/btnxpuart.c
595
cancel_work_sync(&psdata->work);
drivers/bluetooth/btsdio.c
267
schedule_work(&data->work);
drivers/bluetooth/btsdio.c
309
INIT_WORK(&data->work, btsdio_work);
drivers/bluetooth/btsdio.c
353
cancel_work_sync(&data->work);
drivers/bluetooth/btsdio.c
44
struct work_struct work;
drivers/bluetooth/btsdio.c
88
static void btsdio_work(struct work_struct *work)
drivers/bluetooth/btsdio.c
90
struct btsdio_data *data = container_of(work, struct btsdio_data, work);
drivers/bluetooth/btusb.c
2017
cancel_work_sync(&data->work);
drivers/bluetooth/btusb.c
2251
schedule_work(&data->work);
drivers/bluetooth/btusb.c
2357
static void btusb_work(struct work_struct *work)
drivers/bluetooth/btusb.c
2359
struct btusb_data *data = container_of(work, struct btusb_data, work);
drivers/bluetooth/btusb.c
2420
static void btusb_waker(struct work_struct *work)
drivers/bluetooth/btusb.c
2422
struct btusb_data *data = container_of(work, struct btusb_data, waker);
drivers/bluetooth/btusb.c
2432
static void btusb_rx_work(struct work_struct *work)
drivers/bluetooth/btusb.c
2434
struct btusb_data *data = container_of(work, struct btusb_data,
drivers/bluetooth/btusb.c
2435
rx_work.work);
drivers/bluetooth/btusb.c
4104
INIT_WORK(&data->work, btusb_work);
drivers/bluetooth/btusb.c
4501
cancel_work_sync(&data->work);
drivers/bluetooth/btusb.c
4616
schedule_work(&data->work);
drivers/bluetooth/btusb.c
926
struct work_struct work;
drivers/bluetooth/hci_ath.c
71
static void ath_hci_uart_work(struct work_struct *work)
drivers/bluetooth/hci_ath.c
78
ath = container_of(work, struct ath_struct, ctxtsw);
drivers/bluetooth/hci_h5.c
1049
struct work_struct work;
drivers/bluetooth/hci_h5.c
1052
static void h5_btrtl_reprobe_worker(struct work_struct *work)
drivers/bluetooth/hci_h5.c
1055
container_of(work, struct h5_btrtl_reprobe, work);
drivers/bluetooth/hci_h5.c
1078
INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker);
drivers/bluetooth/hci_h5.c
1080
queue_work(system_long_wq, &reprobe->work);
drivers/bluetooth/hci_intel.c
357
static void intel_busy_work(struct work_struct *work)
drivers/bluetooth/hci_intel.c
359
struct intel_data *intel = container_of(work, struct intel_data,
drivers/bluetooth/hci_ldisc.c
147
static void hci_uart_write_work(struct work_struct *work)
drivers/bluetooth/hci_ldisc.c
149
struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
drivers/bluetooth/hci_ldisc.c
185
void hci_uart_init_work(struct work_struct *work)
drivers/bluetooth/hci_ldisc.c
187
struct hci_uart *hu = container_of(work, struct hci_uart, init_ready);
drivers/bluetooth/hci_qca.c
1033
static void qca_controller_memdump(struct work_struct *work)
drivers/bluetooth/hci_qca.c
1035
struct qca_data *qca = container_of(work, struct qca_data,
drivers/bluetooth/hci_qca.c
241
static void qca_controller_memdump(struct work_struct *work);
drivers/bluetooth/hci_qca.c
390
static void qca_wq_awake_device(struct work_struct *work)
drivers/bluetooth/hci_qca.c
392
struct qca_data *qca = container_of(work, struct qca_data,
drivers/bluetooth/hci_qca.c
421
static void qca_wq_awake_rx(struct work_struct *work)
drivers/bluetooth/hci_qca.c
423
struct qca_data *qca = container_of(work, struct qca_data,
drivers/bluetooth/hci_qca.c
449
static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
drivers/bluetooth/hci_qca.c
451
struct qca_data *qca = container_of(work, struct qca_data,
drivers/bluetooth/hci_qca.c
460
static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
drivers/bluetooth/hci_qca.c
462
struct qca_data *qca = container_of(work, struct qca_data,
drivers/bluetooth/hci_qca.c
556
static void qca_controller_memdump_timeout(struct work_struct *work)
drivers/bluetooth/hci_qca.c
558
struct qca_data *qca = container_of(work, struct qca_data,
drivers/bluetooth/hci_qca.c
559
ctrl_memdump_timeout.work);
drivers/bluetooth/hci_serdev.c
57
static void hci_uart_write_work(struct work_struct *work)
drivers/bluetooth/hci_serdev.c
59
struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
drivers/bluetooth/hci_uart.h
117
void hci_uart_init_work(struct work_struct *work);
drivers/bluetooth/hci_vhci.c
124
static void vhci_suspend_work(struct work_struct *work)
drivers/bluetooth/hci_vhci.c
126
struct vhci_data *data = container_of(work, struct vhci_data,
drivers/bluetooth/hci_vhci.c
631
static void vhci_open_timeout(struct work_struct *work)
drivers/bluetooth/hci_vhci.c
633
struct vhci_data *data = container_of(work, struct vhci_data,
drivers/bluetooth/hci_vhci.c
634
open_timeout.work);
drivers/bluetooth/virtio_bt.c
219
static void virtbt_rx_work(struct work_struct *work)
drivers/bluetooth/virtio_bt.c
221
struct virtio_bluetooth *vbt = container_of(work,
drivers/bus/mhi/ep/main.c
1083
static void mhi_ep_reset_worker(struct work_struct *work)
drivers/bus/mhi/ep/main.c
1085
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
drivers/bus/mhi/ep/main.c
767
static void mhi_ep_cmd_ring_worker(struct work_struct *work)
drivers/bus/mhi/ep/main.c
769
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
drivers/bus/mhi/ep/main.c
801
static void mhi_ep_ch_ring_worker(struct work_struct *work)
drivers/bus/mhi/ep/main.c
803
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
drivers/bus/mhi/ep/main.c
865
static void mhi_ep_state_worker(struct work_struct *work)
drivers/bus/mhi/ep/main.c
867
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
drivers/bus/mhi/ep/main.c
907
bool work = !!ch_int;
drivers/bus/mhi/ep/main.c
926
if (work) {
drivers/bus/mhi/ep/ring.c
169
static void mhi_ep_raise_irq(struct work_struct *work)
drivers/bus/mhi/ep/ring.c
171
struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work);
drivers/bus/mhi/host/internal.h
336
void mhi_pm_st_worker(struct work_struct *work);
drivers/bus/mhi/host/pci_generic.c
1203
static void mhi_pci_recovery_work(struct work_struct *work)
drivers/bus/mhi/host/pci_generic.c
1205
struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
drivers/bus/mhi/host/pm.c
798
void mhi_pm_st_worker(struct work_struct *work)
drivers/bus/mhi/host/pm.c
802
struct mhi_controller *mhi_cntrl = container_of(work,
drivers/bus/mips_cdmm.c
158
struct mips_cdmm_work_dev *work = data;
drivers/bus/mips_cdmm.c
159
void (*fn)(struct mips_cdmm_device *) = work->fn;
drivers/bus/mips_cdmm.c
161
fn(work->dev);
drivers/bus/mips_cdmm.c
174
struct mips_cdmm_work_dev *work = data;
drivers/bus/mips_cdmm.c
175
int (*fn)(struct mips_cdmm_device *) = work->fn;
drivers/bus/mips_cdmm.c
177
return fn(work->dev);
drivers/bus/mips_cdmm.c
197
struct mips_cdmm_work_dev work = { \
drivers/bus/mips_cdmm.c
203
mips_cdmm_##_ret##_work, &work); \
drivers/bus/ti-sysc.c
2878
static void ti_sysc_idle(struct work_struct *work)
drivers/bus/ti-sysc.c
2882
ddata = container_of(work, struct sysc, idle_work.work);
drivers/bus/ti-sysc.c
3279
ti_sysc_idle(&ddata->idle_work.work);
drivers/cdx/controller/cdx_rpmsg.c
111
static void cdx_rpmsg_post_probe_work(struct work_struct *work)
drivers/cdx/controller/cdx_rpmsg.c
116
cdx_mcdi = container_of(work, struct cdx_mcdi, work);
drivers/cdx/controller/cdx_rpmsg.c
145
schedule_work(&cdx_mcdi->work);
drivers/cdx/controller/cdx_rpmsg.c
154
flush_work(&cdx_mcdi->work);
drivers/cdx/controller/cdx_rpmsg.c
186
INIT_WORK(&cdx_mcdi->work, cdx_rpmsg_post_probe_work);
drivers/cdx/controller/mcdi.c
540
INIT_WORK(&cmd->work, cdx_mcdi_cmd_work);
drivers/cdx/controller/mcdi.c
547
queue_work(mcdi->workqueue, &cmd->work);
drivers/cdx/controller/mcdi.c
635
container_of(context, struct cdx_mcdi_cmd, work);
drivers/char/hw_random/ba431-rng.c
101
struct ba431_trng *ba431 = container_of(work, struct ba431_trng,
drivers/char/hw_random/ba431-rng.c
99
static void ba431_trng_reset_work(struct work_struct *work)
drivers/char/hw_random/core.c
69
static void cleanup_rng_work(struct work_struct *work)
drivers/char/hw_random/core.c
71
struct hwrng *rng = container_of(work, struct hwrng, cleanup_work);
drivers/char/hw_random/n2-drv.c
443
schedule_delayed_work(&np->work, 0);
drivers/char/hw_random/n2-drv.c
660
static void n2rng_work(struct work_struct *work)
drivers/char/hw_random/n2-drv.c
662
struct n2rng *np = container_of(work, struct n2rng, work.work);
drivers/char/hw_random/n2-drv.c
685
schedule_delayed_work(&np->work, HZ * 2);
drivers/char/hw_random/n2-drv.c
709
INIT_DELAYED_WORK(&np->work, n2rng_work);
drivers/char/hw_random/n2-drv.c
773
schedule_delayed_work(&np->work, 0);
drivers/char/hw_random/n2-drv.c
790
cancel_delayed_work_sync(&np->work);
drivers/char/hw_random/n2rng.h
119
struct delayed_work work;
drivers/char/ipmi/ipmi_msghandler.c
3086
static void cleanup_bmc_work(struct work_struct *work)
drivers/char/ipmi/ipmi_msghandler.c
3088
struct bmc_device *bmc = container_of(work, struct bmc_device,
drivers/char/ipmi/ipmi_msghandler.c
3597
static void redo_bmc_reg(struct work_struct *work)
drivers/char/ipmi/ipmi_msghandler.c
3599
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
drivers/char/ipmi/ipmi_msghandler.c
5182
static void ipmi_timeout_work(struct work_struct *work)
drivers/char/random.c
1015
static void mix_interrupt_randomness(struct timer_list *work);
drivers/char/random.c
1067
static void mix_interrupt_randomness(struct timer_list *work)
drivers/char/random.c
1069
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
drivers/char/random.c
118
static void __cold crng_set_ready(struct work_struct *work)
drivers/char/random.c
247
static void crng_reseed(struct work_struct *work)
drivers/char/sonypi.c
762
static void input_keyrelease(struct work_struct *work)
drivers/char/tpm/tpm-dev-common.c
102
static void tpm_timeout_work(struct work_struct *work)
drivers/char/tpm/tpm-dev-common.c
104
struct file_priv *priv = container_of(work, struct file_priv,
drivers/char/tpm/tpm-dev-common.c
60
static void tpm_dev_async_work(struct work_struct *work)
drivers/char/tpm/tpm-dev-common.c
63
container_of(work, struct file_priv, async_work);
drivers/char/tpm/tpm_tis_core.c
908
static void tpm_tis_free_irq_func(struct work_struct *work)
drivers/char/tpm/tpm_tis_core.c
910
struct tpm_tis_data *priv = container_of(work, typeof(*priv), free_irq_work);
drivers/char/tpm/tpm_vtpm_proxy.c
452
static void vtpm_proxy_work(struct work_struct *work)
drivers/char/tpm/tpm_vtpm_proxy.c
454
struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev,
drivers/char/tpm/tpm_vtpm_proxy.c
455
work);
drivers/char/tpm/tpm_vtpm_proxy.c
47
struct work_struct work; /* task that retrieves TPM timeouts */
drivers/char/tpm/tpm_vtpm_proxy.c
474
flush_work(&proxy_dev->work);
drivers/char/tpm/tpm_vtpm_proxy.c
482
queue_work(workqueue, &proxy_dev->work);
drivers/char/tpm/tpm_vtpm_proxy.c
500
INIT_WORK(&proxy_dev->work, vtpm_proxy_work);
drivers/char/virtio_console.c
1660
static void control_work_handler(struct work_struct *work)
drivers/char/virtio_console.c
1667
portdev = container_of(work, struct ports_device, control_work);
drivers/char/virtio_console.c
1774
static void config_work_handler(struct work_struct *work)
drivers/char/virtio_console.c
1778
portdev = container_of(work, struct ports_device, config_work);
drivers/char/xillybus/xillybus_core.c
1185
static void xillybus_autoflush(struct work_struct *work)
drivers/char/xillybus/xillybus_core.c
1187
struct delayed_work *workitem = to_delayed_work(work);
drivers/char/xillybus/xillybus_core.c
292
static void xillybus_autoflush(struct work_struct *work);
drivers/char/xillybus/xillyusb.c
1024
static void bulk_in_work(struct work_struct *work)
drivers/char/xillybus/xillyusb.c
1027
container_of(work, struct xillyusb_endpoint, workitem);
drivers/char/xillybus/xillyusb.c
490
void (*work)(struct work_struct *),
drivers/char/xillybus/xillyusb.c
510
INIT_WORK(&ep->workitem, work);
drivers/char/xillybus/xillyusb.c
575
static void wakeup_all(struct work_struct *work)
drivers/char/xillybus/xillyusb.c
578
struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
drivers/char/xillybus/xillyusb.c
873
static void bulk_out_work(struct work_struct *work)
drivers/char/xillybus/xillyusb.c
875
struct xillyusb_endpoint *ep = container_of(work,
drivers/clocksource/numachip.c
57
static __init void numachip_timer_each(struct work_struct *work)
drivers/comedi/drivers/ni_pcidio.c
382
int work = 0;
drivers/comedi/drivers/ni_pcidio.c
404
work++;
drivers/comedi/drivers/ni_pcidio.c
405
if (work > 20) {
drivers/comedi/drivers/ni_pcidio.c
416
work++;
drivers/comedi/drivers/ni_pcidio.c
417
if (work > 100) {
drivers/cpufreq/cppc_cpufreq.c
108
static void cppc_scale_freq_workfn(struct kthread_work *work)
drivers/cpufreq/cppc_cpufreq.c
112
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
drivers/cpufreq/cppc_cpufreq.c
121
kthread_queue_work(kworker_fie, &cppc_fi->work);
drivers/cpufreq/cppc_cpufreq.c
161
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
drivers/cpufreq/cppc_cpufreq.c
207
kthread_cancel_work_sync(&cppc_fi->work);
drivers/cpufreq/cppc_cpufreq.c
45
struct kthread_work work;
drivers/cpufreq/cpufreq.c
1205
static void handle_update(struct work_struct *work)
drivers/cpufreq/cpufreq.c
1208
container_of(work, struct cpufreq_policy, update);
drivers/cpufreq/cpufreq_governor.c
233
static void dbs_work_handler(struct work_struct *work)
drivers/cpufreq/cpufreq_governor.c
239
policy_dbs = container_of(work, struct policy_dbs_info, work);
drivers/cpufreq/cpufreq_governor.c
267
schedule_work_on(smp_processor_id(), &policy_dbs->work);
drivers/cpufreq/cpufreq_governor.c
365
INIT_WORK(&policy_dbs->work, dbs_work_handler);
drivers/cpufreq/cpufreq_governor.c
558
cancel_work_sync(&policy_dbs->work);
drivers/cpufreq/cpufreq_governor.h
90
struct work_struct work;
drivers/cpufreq/intel_pstate.c
1908
static void intel_pstate_notify_work(struct work_struct *work)
drivers/cpufreq/intel_pstate.c
1911
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
drivers/cpufreq/intel_pstate.c
353
static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
drivers/cpufreq/powernv-cpufreq.c
913
static void powernv_cpufreq_work_fn(struct work_struct *work)
drivers/cpufreq/powernv-cpufreq.c
915
struct chip *chip = container_of(work, struct chip, throttle);
drivers/cpufreq/qcom-cpufreq-hw.c
377
static void qcom_lmh_dcvs_poll(struct work_struct *work)
drivers/cpufreq/qcom-cpufreq-hw.c
381
data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
drivers/cpufreq/tegra194-cpufreq.c
274
static void tegra_read_counters(struct work_struct *work)
drivers/cpufreq/tegra194-cpufreq.c
292
read_counters_work = container_of(work, struct read_counters_work,
drivers/cpufreq/tegra194-cpufreq.c
293
work);
drivers/cpufreq/tegra194-cpufreq.c
333
INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
drivers/cpufreq/tegra194-cpufreq.c
334
queue_work_on(cpu, read_counters_wq, &read_counters_work.work);
drivers/cpufreq/tegra194-cpufreq.c
335
flush_work(&read_counters_work.work);
drivers/cpufreq/tegra194-cpufreq.c
55
struct work_struct work;
drivers/crypto/atmel-i2c.c
278
static void atmel_i2c_work_handler(struct work_struct *work)
drivers/crypto/atmel-i2c.c
281
container_of(work, struct atmel_i2c_work_data, work);
drivers/crypto/atmel-i2c.c
300
INIT_WORK(&work_data->work, atmel_i2c_work_handler);
drivers/crypto/atmel-i2c.c
301
queue_work(atmel_wq, &work_data->work);
drivers/crypto/atmel-i2c.h
171
struct work_struct work;
drivers/crypto/caam/caamrng.c
140
static void caam_rng_worker(struct work_struct *work)
drivers/crypto/caam/caamrng.c
142
struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
drivers/crypto/cavium/nitrox/nitrox_common.h
27
void backlog_qflush_work(struct work_struct *work);
drivers/crypto/cavium/nitrox/nitrox_mbx.c
102
static void pf2vf_resp_handler(struct work_struct *work)
drivers/crypto/cavium/nitrox/nitrox_mbx.c
104
struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
505
void backlog_qflush_work(struct work_struct *work)
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
509
cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
drivers/crypto/ccp/ccp-dev.c
345
static void ccp_do_cmd_backlog(struct work_struct *work)
drivers/crypto/ccp/ccp-dev.c
347
struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
drivers/crypto/ccp/ccp-dev.c
412
INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
drivers/crypto/ccp/ccp-dev.c
413
schedule_work(&backlog->work);
drivers/crypto/ccree/cc_request_mgr.c
546
static void comp_work_handler(struct work_struct *work)
drivers/crypto/ccree/cc_request_mgr.c
549
container_of(work, struct cc_drvdata, compwork.work);
drivers/crypto/ccree/cc_request_mgr.c
75
static void comp_work_handler(struct work_struct *work);
drivers/crypto/chelsio/chcr_core.c
50
static void detach_work_fn(struct work_struct *work)
drivers/crypto/chelsio/chcr_core.c
54
dev = container_of(work, struct chcr_dev, detach_work.work);
drivers/crypto/hifn_795x.c
1759
static void hifn_work(struct work_struct *work)
drivers/crypto/hifn_795x.c
1761
struct delayed_work *dw = to_delayed_work(work);
drivers/crypto/hifn_795x.c
1762
struct hifn_device *dev = container_of(dw, struct hifn_device, work);
drivers/crypto/hifn_795x.c
1828
schedule_delayed_work(&dev->work, HZ);
drivers/crypto/hifn_795x.c
2427
INIT_DELAYED_WORK(&dev->work, hifn_work);
drivers/crypto/hifn_795x.c
2428
schedule_delayed_work(&dev->work, HZ);
drivers/crypto/hifn_795x.c
2471
cancel_delayed_work_sync(&dev->work);
drivers/crypto/hifn_795x.c
419
struct delayed_work work;
drivers/crypto/hisilicon/qm.c
1019
static void qm_work_process(struct work_struct *work)
drivers/crypto/hisilicon/qm.c
1022
container_of(work, struct hisi_qm_poll_data, work);
drivers/crypto/hisilicon/qm.c
1081
queue_work(qm->wq, &poll_data->work);
drivers/crypto/hisilicon/qm.c
5744
INIT_WORK(&qm->poll_data[i].work, qm_work_process);
drivers/crypto/inside-secure/safexcel.c
1074
static void safexcel_dequeue_work(struct work_struct *work)
drivers/crypto/inside-secure/safexcel.c
1077
container_of(work, struct safexcel_work_data, work);
drivers/crypto/inside-secure/safexcel.c
1133
&priv->ring[ring].work_data.work);
drivers/crypto/inside-secure/safexcel.c
1663
INIT_WORK(&priv->ring[i].work_data.work,
drivers/crypto/inside-secure/safexcel.h
689
struct work_struct work;
drivers/crypto/inside-secure/safexcel_cipher.c
1113
&priv->ring[ring].work_data.work);
drivers/crypto/inside-secure/safexcel_cipher.c
1189
&priv->ring[ring].work_data.work);
drivers/crypto/inside-secure/safexcel_cipher.c
969
&priv->ring[ring].work_data.work);
drivers/crypto/inside-secure/safexcel_hash.c
558
&priv->ring[ring].work_data.work);
drivers/crypto/inside-secure/safexcel_hash.c
647
&priv->ring[ring].work_data.work);
drivers/crypto/inside-secure/safexcel_hash.c
735
&priv->ring[ring].work_data.work);
drivers/crypto/intel/qat/qat_common/adf_aer.c
109
static void adf_device_sriov_worker(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_aer.c
112
container_of(work, struct adf_sriov_dev_data, sriov_work);
drivers/crypto/intel/qat/qat_common/adf_aer.c
118
static void adf_device_reset_worker(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_aer.c
121
container_of(work, struct adf_reset_dev_data, reset_work);
drivers/crypto/intel/qat/qat_common/adf_aer.c
14
struct work_struct work;
drivers/crypto/intel/qat/qat_common/adf_aer.c
236
static void adf_notify_fatal_error_worker(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_aer.c
239
container_of(work, struct adf_fatal_error_data, work);
drivers/crypto/intel/qat/qat_common/adf_aer.c
266
INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker);
drivers/crypto/intel/qat/qat_common/adf_aer.c
267
adf_misc_wq_queue_work(&wq_data->work);
drivers/crypto/intel/qat/qat_common/adf_common_drv.h
188
bool adf_misc_wq_queue_work(struct work_struct *work);
drivers/crypto/intel/qat/qat_common/adf_common_drv.h
189
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
60
static void pm_bh_handler(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
63
container_of(work, struct adf_gen4_pm_data, pm_irq_work);
drivers/crypto/intel/qat/qat_common/adf_isr.c
401
bool adf_misc_wq_queue_work(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_isr.c
403
return queue_work(adf_misc_wq, work);
drivers/crypto/intel/qat/qat_common/adf_isr.c
406
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
drivers/crypto/intel/qat/qat_common/adf_isr.c
409
return queue_delayed_work(adf_misc_wq, work, delay);
drivers/crypto/intel/qat/qat_common/adf_sriov.c
20
static void adf_iov_send_resp(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_sriov.c
23
container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
drivers/crypto/intel/qat/qat_common/adf_telemetry.c
151
static void tl_work_handler(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_telemetry.c
161
delayed_work = to_delayed_work(work);
drivers/crypto/intel/qat/qat_common/adf_timer.c
20
static void work_handler(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_timer.c
26
timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
29
struct work_struct work;
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
68
static void adf_dev_stop_async(struct work_struct *work)
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
71
container_of(work, struct adf_vf_stop_data, work);
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
96
INIT_WORK(&stop_data->work, adf_dev_stop_async);
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
97
queue_work(adf_vf_stop_wq, &stop_data->work);
drivers/crypto/marvell/octeontx2/otx2_cptlf.c
269
tasklet_hi_schedule(&lf->wqe->work);
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
78
struct tasklet_struct work;
drivers/crypto/marvell/octeontx2/otx2_cptpf.h
22
struct work_struct work;
drivers/crypto/marvell/octeontx2/otx2_cptpf.h
69
void otx2_cptpf_afpf_mbox_handler(struct work_struct *work);
drivers/crypto/marvell/octeontx2/otx2_cptpf.h
70
void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work);
drivers/crypto/marvell/octeontx2/otx2_cptpf.h
72
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
134
static void cptpf_flr_wq_handler(struct work_struct *work)
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
142
flr_work = container_of(work, struct cptpf_flr_work, work);
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
196
queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
349
INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
372
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
382
vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
558
void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
567
cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
635
void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
644
cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
drivers/crypto/marvell/octeontx2/otx2_cptvf.h
29
void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
141
tasklet_kill(&lfs->lf[i].wqe->work);
drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
159
tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
151
void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
163
cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
drivers/crypto/qce/core.c
124
static void qce_req_done_work(struct work_struct *work)
drivers/crypto/qce/core.c
126
struct qce_device *qce = container_of(work, struct qce_device,
drivers/crypto/virtio/virtio_crypto_core.c
336
static void vcrypto_config_changed_work(struct work_struct *work)
drivers/crypto/virtio/virtio_crypto_core.c
339
container_of(work, struct virtio_crypto, config_work);
drivers/cxl/core/memdev.c
639
static void detach_memdev(struct work_struct *work)
drivers/cxl/core/memdev.c
643
cxlmd = container_of(work, typeof(*cxlmd), detach_work);
drivers/cxl/core/ras.c
110
static void cxl_cper_prot_err_work_fn(struct work_struct *work)
drivers/cxl/pci.c
1110
static void cxl_cper_work_fn(struct work_struct *work)
drivers/cxl/pci.c
152
static void cxl_mbox_sanitize_work(struct work_struct *work)
drivers/cxl/pci.c
155
container_of(work, typeof(*mds), security.poll_dwork.work);
drivers/devfreq/devfreq.c
450
static void devfreq_monitor(struct work_struct *work)
drivers/devfreq/devfreq.c
453
struct devfreq *devfreq = container_of(work,
drivers/devfreq/devfreq.c
454
struct devfreq, work.work);
drivers/devfreq/devfreq.c
464
queue_delayed_work(devfreq_wq, &devfreq->work,
drivers/devfreq/devfreq.c
488
if (delayed_work_pending(&devfreq->work))
drivers/devfreq/devfreq.c
493
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
drivers/devfreq/devfreq.c
496
INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
drivers/devfreq/devfreq.c
503
queue_delayed_work(devfreq_wq, &devfreq->work,
drivers/devfreq/devfreq.c
533
cancel_delayed_work_sync(&devfreq->work);
drivers/devfreq/devfreq.c
564
cancel_delayed_work_sync(&devfreq->work);
drivers/devfreq/devfreq.c
588
if (!delayed_work_pending(&devfreq->work) &&
drivers/devfreq/devfreq.c
590
queue_delayed_work(devfreq_wq, &devfreq->work,
drivers/devfreq/devfreq.c
631
cancel_delayed_work_sync(&devfreq->work);
drivers/devfreq/devfreq.c
637
queue_delayed_work(devfreq_wq, &devfreq->work,
drivers/devfreq/devfreq.c
645
cancel_delayed_work_sync(&devfreq->work);
drivers/devfreq/devfreq.c
648
queue_delayed_work(devfreq_wq, &devfreq->work,
drivers/devfreq/tegra30-devfreq.c
418
static void tegra_actmon_delayed_update(struct work_struct *work)
drivers/devfreq/tegra30-devfreq.c
420
struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
drivers/devfreq/tegra30-devfreq.c
421
cpufreq_update_work.work);
drivers/dma-buf/dma-fence-array.c
210
init_irq_work(&array->work, irq_dma_fence_array_work);
drivers/dma-buf/dma-fence-array.c
47
struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
drivers/dma-buf/dma-fence-array.c
65
irq_work_queue(&array->work);
drivers/dma-buf/dma-fence-chain.c
122
static void dma_fence_chain_irq_work(struct irq_work *work)
drivers/dma-buf/dma-fence-chain.c
126
chain = container_of(work, typeof(*chain), work);
drivers/dma-buf/dma-fence-chain.c
140
init_irq_work(&chain->work, dma_fence_chain_irq_work);
drivers/dma-buf/dma-fence-chain.c
141
irq_work_queue(&chain->work);
drivers/dma/amd/ae4dma/ae4dma-dev.c
17
static void ae4_pending_work(struct work_struct *work)
drivers/dma/amd/ae4dma/ae4dma-dev.c
19
struct ae4_cmd_queue *ae4cmd_q = container_of(work, struct ae4_cmd_queue, p_work.work);
drivers/dma/amd/ptdma/ptdma.h
164
struct work_struct work;
drivers/dma/idxd/idxd.h
308
struct work_struct work;
drivers/dma/idxd/idxd.h
368
struct work_struct work;
drivers/dma/idxd/irq.c
130
static void idxd_int_handle_revoke(struct work_struct *work)
drivers/dma/idxd/irq.c
133
container_of(work, struct idxd_int_handle_revoke, work);
drivers/dma/idxd/irq.c
222
static void idxd_evl_fault_work(struct work_struct *work)
drivers/dma/idxd/irq.c
224
struct idxd_evl_fault *fault = container_of(work, struct idxd_evl_fault, work);
drivers/dma/idxd/irq.c
23
struct work_struct work;
drivers/dma/idxd/irq.c
28
struct work_struct work;
drivers/dma/idxd/irq.c
32
static void idxd_device_reinit(struct work_struct *work)
drivers/dma/idxd/irq.c
34
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
drivers/dma/idxd/irq.c
341
INIT_WORK(&fault->work, idxd_evl_fault_work);
drivers/dma/idxd/irq.c
342
queue_work(wq->wq, &fault->work);
drivers/dma/idxd/irq.c
386
static void idxd_device_flr(struct work_struct *work)
drivers/dma/idxd/irq.c
388
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
drivers/dma/idxd/irq.c
424
INIT_WORK(&idxd->work, idxd_device_reinit);
drivers/dma/idxd/irq.c
425
queue_work(idxd->wq, &idxd->work);
drivers/dma/idxd/irq.c
436
INIT_WORK(&idxd->work, idxd_device_flr);
drivers/dma/idxd/irq.c
437
queue_work(idxd->wq, &idxd->work);
drivers/dma/idxd/irq.c
514
INIT_WORK(&revoke->work, idxd_int_handle_revoke);
drivers/dma/idxd/irq.c
515
queue_work(idxd->wq, &revoke->work);
drivers/dma/idxd/irq.c
551
static void idxd_int_handle_resubmit_work(struct work_struct *work)
drivers/dma/idxd/irq.c
553
struct idxd_resubmit *irw = container_of(work, struct idxd_resubmit, work);
drivers/dma/idxd/irq.c
591
INIT_WORK(&irw->work, idxd_int_handle_resubmit_work);
drivers/dma/idxd/irq.c
592
queue_work(idxd->wq, &irw->work);
drivers/dma/imx-sdma.c
1186
static void sdma_channel_terminate_work(struct work_struct *work)
drivers/dma/imx-sdma.c
1188
struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
drivers/dma/lgm/lgm-dma.c
1050
cancel_work_sync(&c->work);
drivers/dma/lgm/lgm-dma.c
1114
queue_work(d->wq, &c->work);
drivers/dma/lgm/lgm-dma.c
1306
static void dma_work(struct work_struct *work)
drivers/dma/lgm/lgm-dma.c
1308
struct ldma_chan *c = container_of(work, struct ldma_chan, work);
drivers/dma/lgm/lgm-dma.c
1408
INIT_WORK(&c->work, dma_work);
drivers/dma/lgm/lgm-dma.c
219
struct work_struct work;
drivers/dma/ti/k3-udma.c
1086
static void udma_check_tx_completion(struct work_struct *work)
drivers/dma/ti/k3-udma.c
1088
struct udma_chan *uc = container_of(work, typeof(*uc),
drivers/dma/ti/k3-udma.c
1089
tx_drain.work.work);
drivers/dma/ti/k3-udma.c
1129
schedule_delayed_work(&uc->tx_drain.work, HZ);
drivers/dma/ti/k3-udma.c
1202
schedule_delayed_work(&uc->tx_drain.work,
drivers/dma/ti/k3-udma.c
244
struct delayed_work work;
drivers/dma/ti/k3-udma.c
2555
INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
drivers/dma/ti/k3-udma.c
2719
INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
drivers/dma/ti/k3-udma.c
3966
cancel_delayed_work(&uc->tx_drain.work);
drivers/dma/ti/k3-udma.c
4000
cancel_delayed_work_sync(&uc->tx_drain.work);
drivers/dma/ti/k3-udma.c
4094
cancel_delayed_work_sync(&uc->tx_drain.work);
drivers/dma/ti/k3-udma.c
555
static void udma_purge_desc_work(struct work_struct *work)
drivers/dma/ti/k3-udma.c
557
struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
drivers/dma/ti/k3-udma.c
5633
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
drivers/dpll/zl3073x/core.c
724
zl3073x_dev_periodic_work(struct kthread_work *work)
drivers/dpll/zl3073x/core.c
726
struct zl3073x_dev *zldev = container_of(work, struct zl3073x_dev,
drivers/dpll/zl3073x/core.c
727
work.work);
drivers/dpll/zl3073x/core.c
751
kthread_queue_delayed_work(zldev->kworker, &zldev->work,
drivers/dpll/zl3073x/core.c
895
kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
drivers/dpll/zl3073x/core.c
914
kthread_cancel_delayed_work_sync(&zldev->work);
drivers/dpll/zl3073x/core.c
968
kthread_init_delayed_work(&zldev->work, zl3073x_dev_periodic_work);
drivers/dpll/zl3073x/core.h
64
struct kthread_delayed_work work;
drivers/dpll/zl3073x/dpll.c
1178
zl3073x_dpll_change_work(struct work_struct *work)
drivers/dpll/zl3073x/dpll.c
1182
zldpll = container_of(work, struct zl3073x_dpll, change_work);
drivers/edac/edac_device.c
295
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
drivers/edac/edac_device.c
297
edac_queue_work(&edac_dev->work, edac_dev->delay);
drivers/edac/edac_device.c
317
INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
drivers/edac/edac_device.c
325
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
drivers/edac/edac_device.c
327
edac_queue_work(&edac_dev->work, edac_dev->delay);
drivers/edac/edac_device.c
341
edac_stop_work(&edac_dev->work);
drivers/edac/edac_device.c
359
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
drivers/edac/edac_device.c
361
edac_mod_work(&edac_dev->work, edac_dev->delay);
drivers/edac/edac_device.h
174
struct delayed_work work;
drivers/edac/edac_device.h
220
container_of(w, struct mem_ctl_info, work)
drivers/edac/edac_device.h
223
container_of(w,struct edac_device_ctl_info,work)
drivers/edac/edac_mc.c
486
edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
drivers/edac/edac_mc.c
506
edac_mod_work(&mci->work, value);
drivers/edac/edac_mc.c
657
INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
drivers/edac/edac_mc.c
658
edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
drivers/edac/edac_mc.c
708
edac_stop_work(&mci->work);
drivers/edac/edac_module.h
51
bool edac_queue_work(struct delayed_work *work, unsigned long delay);
drivers/edac/edac_module.h
52
bool edac_stop_work(struct delayed_work *work);
drivers/edac/edac_module.h
53
bool edac_mod_work(struct delayed_work *work, unsigned long delay);
drivers/edac/edac_pci.c
189
edac_queue_work(&pci->work, delay);
drivers/edac/edac_pci.c
221
INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
drivers/edac/edac_pci.c
222
edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
drivers/edac/edac_pci.c
269
edac_stop_work(&pci->work);
drivers/edac/edac_pci.h
53
struct delayed_work work;
drivers/edac/edac_pci.h
91
container_of(w, struct edac_pci_ctl_info,work)
drivers/edac/i5100_edac.c
565
static void i5100_refresh_scrubbing(struct work_struct *work)
drivers/edac/i5100_edac.c
567
struct delayed_work *i5100_scrubbing = to_delayed_work(work);
drivers/edac/igen6_edac.c
956
static void ecclog_work_cb(struct work_struct *work)
drivers/edac/wq.c
12
bool edac_mod_work(struct delayed_work *work, unsigned long delay)
drivers/edac/wq.c
14
return mod_delayed_work(wq, work, delay);
drivers/edac/wq.c
18
bool edac_stop_work(struct delayed_work *work)
drivers/edac/wq.c
22
ret = cancel_delayed_work_sync(work);
drivers/edac/wq.c
6
bool edac_queue_work(struct delayed_work *work, unsigned long delay)
drivers/edac/wq.c
8
return queue_delayed_work(wq, work, delay);
drivers/extcon/extcon-adc-jack.c
159
adc_jack_handler(&data->handler.work);
drivers/extcon/extcon-adc-jack.c
170
cancel_work_sync(&data->handler.work);
drivers/extcon/extcon-adc-jack.c
57
static void adc_jack_handler(struct work_struct *work)
drivers/extcon/extcon-adc-jack.c
59
struct adc_jack_data *data = container_of(to_delayed_work(work),
drivers/extcon/extcon-axp288.c
178
static void axp288_usb_role_work(struct work_struct *work)
drivers/extcon/extcon-axp288.c
181
container_of(work, struct axp288_extcon_info, role_work);
drivers/extcon/extcon-gpio.c
116
ret = devm_delayed_work_autocancel(dev, &data->work, gpio_extcon_work);
drivers/extcon/extcon-gpio.c
132
gpio_extcon_work(&data->work.work);
drivers/extcon/extcon-gpio.c
145
&data->work, data->debounce_jiffies);
drivers/extcon/extcon-gpio.c
37
struct delayed_work work;
drivers/extcon/extcon-gpio.c
45
static void gpio_extcon_work(struct work_struct *work)
drivers/extcon/extcon-gpio.c
49
container_of(to_delayed_work(work), struct gpio_extcon_data,
drivers/extcon/extcon-gpio.c
50
work);
drivers/extcon/extcon-gpio.c
60
queue_delayed_work(system_power_efficient_wq, &data->work,
drivers/extcon/extcon-intel-int3496.c
109
mod_delayed_work(system_wq, &data->work, DEBOUNCE_TIME);
drivers/extcon/extcon-intel-int3496.c
133
ret = devm_delayed_work_autocancel(dev, &data->work, int3496_do_usb_id);
drivers/extcon/extcon-intel-int3496.c
184
queue_delayed_work(system_wq, &data->work, 0);
drivers/extcon/extcon-intel-int3496.c
185
flush_delayed_work(&data->work);
drivers/extcon/extcon-intel-int3496.c
30
struct delayed_work work;
drivers/extcon/extcon-intel-int3496.c
80
static void int3496_do_usb_id(struct work_struct *work)
drivers/extcon/extcon-intel-int3496.c
83
container_of(work, struct int3496_data, work.work);
drivers/extcon/extcon-lc824206xa.c
109
struct work_struct work;
drivers/extcon/extcon-lc824206xa.c
245
static void lc824206xa_work(struct work_struct *work)
drivers/extcon/extcon-lc824206xa.c
247
struct lc824206xa_data *data = container_of(work, struct lc824206xa_data, work);
drivers/extcon/extcon-lc824206xa.c
355
schedule_work(&data->work);
drivers/extcon/extcon-lc824206xa.c
427
INIT_WORK(&data->work, lc824206xa_work);
drivers/extcon/extcon-lc824206xa.c
473
schedule_work(&data->work);
drivers/extcon/extcon-max14577.c
481
static void max14577_muic_irq_work(struct work_struct *work)
drivers/extcon/extcon-max14577.c
483
struct max14577_muic_info *info = container_of(work,
drivers/extcon/extcon-max14577.c
648
static void max14577_muic_detect_cable_wq(struct work_struct *work)
drivers/extcon/extcon-max14577.c
650
struct max14577_muic_info *info = container_of(to_delayed_work(work),
drivers/extcon/extcon-max77693.c
1060
static void max77693_muic_detect_cable_wq(struct work_struct *work)
drivers/extcon/extcon-max77693.c
1062
struct max77693_muic_info *info = container_of(to_delayed_work(work),
drivers/extcon/extcon-max77693.c
936
static void max77693_muic_irq_work(struct work_struct *work)
drivers/extcon/extcon-max77693.c
938
struct max77693_muic_info *info = container_of(work,
drivers/extcon/extcon-max77843.c
626
static void max77843_muic_irq_work(struct work_struct *work)
drivers/extcon/extcon-max77843.c
628
struct max77843_muic_info *info = container_of(work,
drivers/extcon/extcon-max77843.c
702
static void max77843_muic_detect_cable_wq(struct work_struct *work)
drivers/extcon/extcon-max77843.c
704
struct max77843_muic_info *info = container_of(to_delayed_work(work),
drivers/extcon/extcon-max8997.c
508
static void max8997_muic_irq_work(struct work_struct *work)
drivers/extcon/extcon-max8997.c
510
struct max8997_muic_info *info = container_of(work,
drivers/extcon/extcon-max8997.c
621
static void max8997_muic_detect_cable_wq(struct work_struct *work)
drivers/extcon/extcon-max8997.c
623
struct max8997_muic_info *info = container_of(to_delayed_work(work),
drivers/extcon/extcon-palmas.c
115
static void palmas_gpio_id_detect(struct work_struct *work)
drivers/extcon/extcon-palmas.c
118
struct palmas_usb *palmas_usb = container_of(to_delayed_work(work),
drivers/extcon/extcon-palmas.c
361
palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
drivers/extcon/extcon-palmas.c
402
palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
drivers/extcon/extcon-ptn5150.c
116
static void ptn5150_irq_work(struct work_struct *work)
drivers/extcon/extcon-ptn5150.c
118
struct ptn5150_info *info = container_of(work,
drivers/extcon/extcon-qcom-spmi-misc.c
161
qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
drivers/extcon/extcon-qcom-spmi-misc.c
37
static void qcom_usb_extcon_detect_cable(struct work_struct *work)
drivers/extcon/extcon-qcom-spmi-misc.c
42
struct qcom_usb_extcon_info *info = container_of(to_delayed_work(work),
drivers/extcon/extcon-rt8973a.c
406
static void rt8973a_muic_irq_work(struct work_struct *work)
drivers/extcon/extcon-rt8973a.c
408
struct rt8973a_muic_info *info = container_of(work,
drivers/extcon/extcon-rt8973a.c
488
static void rt8973a_muic_detect_cable_wq(struct work_struct *work)
drivers/extcon/extcon-rt8973a.c
490
struct rt8973a_muic_info *info = container_of(to_delayed_work(work),
drivers/extcon/extcon-rtk-type-c.c
597
static void host_device_switch(struct work_struct *work)
drivers/extcon/extcon-rtk-type-c.c
599
struct type_c_data *type_c = container_of(work, struct type_c_data,
drivers/extcon/extcon-rtk-type-c.c
600
delayed_work.work);
drivers/extcon/extcon-sm5502.c
523
static void sm5502_muic_irq_work(struct work_struct *work)
drivers/extcon/extcon-sm5502.c
525
struct sm5502_muic_info *info = container_of(work,
drivers/extcon/extcon-sm5502.c
630
static void sm5502_muic_detect_cable_wq(struct work_struct *work)
drivers/extcon/extcon-sm5502.c
632
struct sm5502_muic_info *info = container_of(to_delayed_work(work),
drivers/extcon/extcon-usb-gpio.c
191
usb_extcon_detect_cable(&info->wq_detcable.work);
drivers/extcon/extcon-usb-gpio.c
60
static void usb_extcon_detect_cable(struct work_struct *work)
drivers/extcon/extcon-usb-gpio.c
63
struct usb_extcon_info *info = container_of(to_delayed_work(work),
drivers/firewire/core-card.c
235
static void br_work(struct work_struct *work)
drivers/firewire/core-card.c
237
struct fw_card *card = from_work(card, work, br_work.work);
drivers/firewire/core-card.c
382
static void bm_work(struct work_struct *work)
drivers/firewire/core-card.c
387
struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
drivers/firewire/core-cdev.c
1296
static void iso_resource_work(struct work_struct *work)
drivers/firewire/core-cdev.c
1299
struct iso_resource *r = from_work(r, work, work.work);
drivers/firewire/core-cdev.c
135
struct delayed_work work;
drivers/firewire/core-cdev.c
1381
cancel_delayed_work(&r->work);
drivers/firewire/core-cdev.c
1420
INIT_DELAYED_WORK(&r->work, iso_resource_work);
drivers/firewire/core-cdev.c
182
if (!queue_delayed_work(fw_workqueue, &r->work, delay))
drivers/firewire/core-device.c
1076
static void fw_device_init(struct work_struct *work)
drivers/firewire/core-device.c
1078
struct fw_device *device = from_work(device, work, work.work);
drivers/firewire/core-device.c
1257
static void fw_device_refresh(struct work_struct *work)
drivers/firewire/core-device.c
1259
struct fw_device *device = from_work(device, work, work.work);
drivers/firewire/core-device.c
1274
fw_device_update(work);
drivers/firewire/core-device.c
1323
static void fw_device_workfn(struct work_struct *work)
drivers/firewire/core-device.c
1325
struct fw_device *device = from_work(device, to_delayed_work(work), work);
drivers/firewire/core-device.c
1326
device->workfn(work);
drivers/firewire/core-device.c
1379
INIT_DELAYED_WORK(&device->work, fw_device_workfn);
drivers/firewire/core-device.c
911
queue_delayed_work(fw_workqueue, &device->work, delay);
drivers/firewire/core-device.c
930
static void fw_device_shutdown(struct work_struct *work)
drivers/firewire/core-device.c
932
struct fw_device *device = from_work(device, work, work.work);
drivers/firewire/core-device.c
996
static void fw_device_update(struct work_struct *work)
drivers/firewire/core-device.c
998
struct fw_device *device = from_work(device, work, work.work);
drivers/firewire/core-iso.c
246
if (WARN_ON_ONCE(current_work() == &ctx->work))
drivers/firewire/core-iso.c
249
disable_work_sync(&ctx->work);
drivers/firewire/core-iso.c
253
enable_work(&ctx->work);
drivers/firewire/core-iso.c
270
if (WARN_ON_ONCE(current_work() == &ctx->work))
drivers/firewire/core-iso.c
275
cancel_work_sync(&ctx->work);
drivers/firewire/core.h
173
INIT_WORK(&ctx->work, func);
drivers/firewire/ohci.c
105
struct work_struct work;
drivers/firewire/ohci.c
1334
if (WARN_ON_ONCE(current_work() == &ctx->work))
drivers/firewire/ohci.c
1337
disable_work_sync(&ctx->work);
drivers/firewire/ohci.c
1340
ohci_at_context_work(&ctx->work);
drivers/firewire/ohci.c
1343
enable_work(&ctx->work);
drivers/firewire/ohci.c
164
struct work_struct work;
drivers/firewire/ohci.c
2083
queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
drivers/firewire/ohci.c
2086
queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
drivers/firewire/ohci.c
2089
queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
drivers/firewire/ohci.c
2092
queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
drivers/firewire/ohci.c
2452
flush_work(&ohci->ar_request_ctx.work);
drivers/firewire/ohci.c
2453
flush_work(&ohci->ar_response_ctx.work);
drivers/firewire/ohci.c
2454
flush_work(&ohci->at_request_ctx.work);
drivers/firewire/ohci.c
2455
flush_work(&ohci->at_response_ctx.work);
drivers/firewire/ohci.c
2459
flush_work(&ohci->ir_context_list[i].base.work);
drivers/firewire/ohci.c
2463
flush_work(&ohci->it_context_list[i].base.work);
drivers/firewire/ohci.c
2573
if (WARN_ON_ONCE(current_work() == &ctx->work))
drivers/firewire/ohci.c
2575
disable_work_sync(&ctx->work);
drivers/firewire/ohci.c
2593
enable_work(&ctx->work);
drivers/firewire/ohci.c
3485
ohci_isoc_context_work(&base->work);
drivers/firewire/ohci.c
3659
INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work);
drivers/firewire/ohci.c
3665
INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work);
drivers/firewire/ohci.c
805
static void ohci_ar_context_work(struct work_struct *work)
drivers/firewire/ohci.c
807
struct ar_context *ctx = from_work(ctx, work, work);
drivers/firewire/ohci.c
858
INIT_WORK(&ctx->work, ohci_ar_context_work);
drivers/firewire/ohci.c
986
static void ohci_at_context_work(struct work_struct *work)
drivers/firewire/ohci.c
988
struct at_context *ctx = from_work(ctx, work, work);
drivers/firewire/ohci.c
993
static void ohci_isoc_context_work(struct work_struct *work)
drivers/firewire/ohci.c
995
struct fw_iso_context *base = from_work(base, work, work);
drivers/firewire/sbp2.c
1225
cancel_delayed_work_sync(&lu->work);
drivers/firewire/sbp2.c
138
struct delayed_work work;
drivers/firewire/sbp2.c
145
queue_delayed_work(fw_workqueue, &lu->work, delay);
drivers/firewire/sbp2.c
795
static void sbp2_reconnect(struct work_struct *work);
drivers/firewire/sbp2.c
797
static void sbp2_login(struct work_struct *work)
drivers/firewire/sbp2.c
800
container_of(work, struct sbp2_logical_unit, work.work);
drivers/firewire/sbp2.c
907
static void sbp2_reconnect(struct work_struct *work)
drivers/firewire/sbp2.c
910
container_of(work, struct sbp2_logical_unit, work.work);
drivers/firewire/sbp2.c
958
static void sbp2_lu_workfn(struct work_struct *work)
drivers/firewire/sbp2.c
960
struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
drivers/firewire/sbp2.c
961
struct sbp2_logical_unit, work);
drivers/firewire/sbp2.c
962
lu->workfn(work);
drivers/firewire/sbp2.c
992
INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
drivers/firmware/arm_ffa/driver.c
1540
static void notif_pcpu_irq_work_fn(struct work_struct *work)
drivers/firmware/arm_ffa/driver.c
1542
struct ffa_drv_info *info = container_of(work, struct ffa_drv_info,
drivers/firmware/arm_ffa/driver.c
1819
static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
drivers/firmware/arm_scmi/notify.c
1581
static void scmi_protocols_late_init(struct work_struct *work)
drivers/firmware/arm_scmi/notify.c
1588
ni = container_of(work, struct scmi_notify_instance, init_work);
drivers/firmware/arm_scmi/notify.c
541
static void scmi_events_dispatcher(struct work_struct *work)
drivers/firmware/arm_scmi/notify.c
547
eq = container_of(work, struct events_queue, notify_work);
drivers/firmware/arm_scmi/raw_mode.c
432
static void scmi_xfer_raw_worker(struct work_struct *work)
drivers/firmware/arm_scmi/raw_mode.c
438
raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
drivers/firmware/arm_scmi/scmi_power_control.c
180
static void scmi_forceful_work_func(struct work_struct *work)
drivers/firmware/arm_scmi/scmi_power_control.c
188
dwork = to_delayed_work(work);
drivers/firmware/arm_scmi/scmi_power_control.c
326
static void scmi_suspend_work_func(struct work_struct *work)
drivers/firmware/arm_scmi/transports/virtio.c
320
static void scmi_vio_deferred_tx_worker(struct work_struct *work)
drivers/firmware/arm_scmi/transports/virtio.c
326
vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
drivers/firmware/efi/runtime-wrappers.c
218
static void __nocfi efi_call_rts(struct work_struct *work)
drivers/firmware/efi/runtime-wrappers.c
335
INIT_WORK(&efi_rts_work.work, efi_call_rts);
drivers/firmware/efi/runtime-wrappers.c
341
if (queue_work(efi_rts_wq, &efi_rts_work.work))
drivers/firmware/imx/imx-scu-irq.c
84
static void imx_scu_irq_work_handler(struct work_struct *work)
drivers/gpio/gpio-ljca.c
270
static void ljca_gpio_async(struct work_struct *work)
drivers/gpio/gpio-ljca.c
273
container_of(work, struct ljca_gpio_dev, work);
drivers/gpio/gpio-ljca.c
300
schedule_work(&ljca_gpio->work);
drivers/gpio/gpio-ljca.c
457
INIT_WORK(&ljca_gpio->work, ljca_gpio_async);
drivers/gpio/gpio-ljca.c
471
cancel_work_sync(&ljca_gpio->work);
drivers/gpio/gpio-ljca.c
69
struct work_struct work;
drivers/gpio/gpio-mpsse.c
393
cancel_work_sync(&worker->work);
drivers/gpio/gpio-mpsse.c
403
struct mpsse_worker *my_worker = container_of(my_work, struct mpsse_worker, work);
drivers/gpio/gpio-mpsse.c
45
struct work_struct work;
drivers/gpio/gpio-mpsse.c
527
INIT_WORK(&worker->work, gpio_mpsse_poll);
drivers/gpio/gpio-mpsse.c
528
schedule_work(&worker->work);
drivers/gpio/gpio-virtuser.c
105
irq_work_queue(&ctx->work);
drivers/gpio/gpio-virtuser.c
121
static void gpio_virtuser_get_value_array_atomic(struct irq_work *work)
drivers/gpio/gpio-virtuser.c
124
to_gpio_virtuser_irq_work_context(work);
drivers/gpio/gpio-virtuser.c
143
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_get_value_array_atomic);
drivers/gpio/gpio-virtuser.c
201
static void gpio_virtuser_set_value_array_atomic(struct irq_work *work)
drivers/gpio/gpio-virtuser.c
204
to_gpio_virtuser_irq_work_context(work);
drivers/gpio/gpio-virtuser.c
221
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_array_atomic);
drivers/gpio/gpio-virtuser.c
315
static void gpio_virtuser_do_get_direction_atomic(struct irq_work *work)
drivers/gpio/gpio-virtuser.c
318
to_gpio_virtuser_irq_work_context(work);
drivers/gpio/gpio-virtuser.c
329
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_do_get_direction_atomic);
drivers/gpio/gpio-virtuser.c
367
static void gpio_virtuser_do_set_direction_atomic(struct irq_work *work)
drivers/gpio/gpio-virtuser.c
370
to_gpio_virtuser_irq_work_context(work);
drivers/gpio/gpio-virtuser.c
382
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_do_set_direction_atomic);
drivers/gpio/gpio-virtuser.c
511
static void gpio_virtuser_get_value_atomic(struct irq_work *work)
drivers/gpio/gpio-virtuser.c
514
to_gpio_virtuser_irq_work_context(work);
drivers/gpio/gpio-virtuser.c
526
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_get_value_atomic);
drivers/gpio/gpio-virtuser.c
539
static void gpio_virtuser_set_value_atomic(struct irq_work *work)
drivers/gpio/gpio-virtuser.c
542
to_gpio_virtuser_irq_work_context(work);
drivers/gpio/gpio-virtuser.c
557
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_atomic);
drivers/gpio/gpio-virtuser.c
73
struct irq_work work;
drivers/gpio/gpio-virtuser.c
90
to_gpio_virtuser_irq_work_context(struct irq_work *work)
drivers/gpio/gpio-virtuser.c
92
return container_of(work, struct gpio_virtuser_irq_work_context, work);
drivers/gpio/gpiolib-cdev.c
1623
INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
drivers/gpio/gpiolib-cdev.c
2480
struct work_struct work;
drivers/gpio/gpiolib-cdev.c
2486
static void lineinfo_changed_func(struct work_struct *work)
drivers/gpio/gpiolib-cdev.c
2489
container_of(work, struct lineinfo_changed_ctx, work);
drivers/gpio/gpiolib-cdev.c
2563
INIT_WORK(&ctx->work, lineinfo_changed_func);
drivers/gpio/gpiolib-cdev.c
2564
queue_work(ctx->gdev->line_state_wq, &ctx->work);
drivers/gpio/gpiolib-cdev.c
451
struct delayed_work work;
drivers/gpio/gpiolib-cdev.c
679
mod_delayed_work(system_percpu_wq, &line->work,
drivers/gpio/gpiolib-cdev.c
820
mod_delayed_work(system_percpu_wq, &line->work,
drivers/gpio/gpiolib-cdev.c
826
static void debounce_work_func(struct work_struct *work)
drivers/gpio/gpiolib-cdev.c
829
struct line *line = container_of(work, struct line, work.work);
drivers/gpio/gpiolib-cdev.c
987
cancel_delayed_work_sync(&line->work);
drivers/gpu/drm/amd/amdgpu/aldebaran.c
126
static void aldebaran_async_reset(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/aldebaran.c
130
container_of(work, struct amdgpu_reset_control, reset_work);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
125
static void amdgpu_amdkfd_reset_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
127
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2803
static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2805
struct delayed_work *dwork = to_delayed_work(work);
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
75
static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3627
static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3630
container_of(work, struct amdgpu_device, delayed_init_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3638
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3641
container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
4519
INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
101
static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
111
if (!dma_fence_add_callback(fence, &work->cb,
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
122
container_of(__work, struct delayed_work, work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
123
struct amdgpu_flip_work *work =
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
125
struct amdgpu_device *adev = work->adev;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
126
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
133
for (i = 0; i < work->shared_count; ++i)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
134
if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
141
(amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
146
(int)(work->target_vblank -
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
148
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
156
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
165
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
174
struct amdgpu_flip_work *work =
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
179
r = amdgpu_bo_reserve(work->old_abo, true);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
181
amdgpu_bo_unpin(work->old_abo);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
182
amdgpu_bo_unreserve(work->old_abo);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
186
amdgpu_bo_unref(&work->old_abo);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
187
kfree(work->shared);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
188
kfree(work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
201
struct amdgpu_flip_work *work;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
207
work = kzalloc_obj(*work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
208
if (work == NULL)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
211
INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
212
INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
214
work->event = event;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
215
work->adev = adev;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
216
work->crtc_id = amdgpu_crtc->crtc_id;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
217
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
223
work->old_abo = gem_to_amdgpu_bo(obj);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
224
amdgpu_bo_ref(work->old_abo);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
253
&work->shared_count,
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
254
&work->shared);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
264
work->base = amdgpu_bo_gpu_offset(new_abo);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
265
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
278
amdgpu_crtc->pflip_works = work;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
282
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
286
amdgpu_display_flip_work_func(&work->flip_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
302
amdgpu_bo_unref(&work->old_abo);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
303
for (i = 0; i < work->shared_count; ++i)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
304
dma_fence_put(work->shared[i]);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
305
kfree(work->shared);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
306
kfree(work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
67
void amdgpu_display_hotplug_work_func(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
69
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
70
hotplug_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
94
struct amdgpu_flip_work *work =
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
98
schedule_work(&work->flip_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
40
void amdgpu_display_hotplug_work_func(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
103
amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
105
struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
965
static void amdgpu_debugfs_reset_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
967
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2101
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2106
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2134
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2137
container_of(work, struct amdgpu_isolation_work, work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2160
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2308
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
2311
container_of(work, struct amdgpu_device, gfx.idle_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
366
struct work_struct work;
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
407
struct delayed_work work;
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
645
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
649
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
222
static void amdgpu_irq_handle_ih1(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
224
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
237
static void amdgpu_irq_handle_ih2(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
239
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
252
static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
254
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
109
static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
112
container_of(work, struct amdgpu_device, jpeg.idle_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
35
static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2549
static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2552
container_of(work, struct ras_ih_data, ih_work);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2890
static void amdgpu_ras_do_recovery(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
2893
container_of(work, struct amdgpu_ras, recovery_work);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3584
static void amdgpu_ras_do_page_retirement(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3586
struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3587
page_retirement_dwork.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4174
static void amdgpu_ras_counte_dw(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4176
struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
4177
ras_counte_delay_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
135
struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
137
return queue_work(domain->wq, work);
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
86
void (*async_reset)(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1271
static void amdgpu_userq_restore_worker(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1273
struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1321
void amdgpu_userq_reset_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1323
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
151
static void amdgpu_userq_hang_detect_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
153
struct amdgpu_usermode_queue *queue = container_of(work,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
155
hang_detect_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
153
void amdgpu_userq_reset_work(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1263
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1266
container_of(work, struct amdgpu_device, uvd.idle_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
137
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
356
static void amdgpu_vce_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
359
container_of(work, struct amdgpu_device, vce.idle_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
90
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
459
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
462
container_of(work, struct amdgpu_vcn_inst, idle_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
96
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
646
static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
648
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
614
void amdgpu_vm_pt_free_work(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
100
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
108
schedule_work(&f->work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
35
struct work_struct work;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
51
static void amdgpu_tlb_fence_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
53
struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
345
static void vpe_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
348
container_of(work, struct amdgpu_device, vpe.idle_work.work);
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
1624
static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
1627
container_of(work, struct amdgpu_hive_info, reset_on_init_work);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1884
static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1944
INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
6731
static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
6734
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
6735
struct sq_work *sq_work = container_of(work, struct sq_work, work);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
6751
if (work_pending(&adev->gfx.sq_work.work)) {
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
6755
schedule_work(&adev->gfx.sq_work.work);
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
273
static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
275
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
295
static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
297
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
314
static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
316
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
358
static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
360
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
383
static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
385
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
402
static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
404
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
513
static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
515
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
110
static void sienna_cichlid_async_reset(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
114
container_of(work, struct amdgpu_reset_control, reset_work);
drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
109
static void smu_v13_0_10_async_reset(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
113
container_of(work, struct amdgpu_reset_control, reset_work);
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
154
adev->vcn.inst[0].idle_work.work.func = vcn_v1_0_idle_work_handler;
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1868
static void vcn_v1_0_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1871
container_of(work, struct amdgpu_vcn_inst, idle_work.work);
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
93
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
112
static void vcn_v2_5_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
115
container_of(work, struct amdgpu_vcn_inst, idle_work.work);
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
309
adev->vcn.inst[j].idle_work.work.func = vcn_v2_5_idle_work_handler;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
103
process = container_of(work,
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
96
void debug_event_write_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_debug.h
86
void debug_event_write_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3247
static void copy_context_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3257
workarea = container_of(work,
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
136
static void interrupt_wq(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
138
struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
112
workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1213
static void kfd_process_wq_release(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1215
struct kfd_process *p = container_of(work, struct kfd_process,
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2108
static void evict_process_worker(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2114
dwork = to_delayed_work(work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2161
static void restore_process_worker(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2167
dwork = to_delayed_work(work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2319
struct work_struct work;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2325
static void send_exception_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2336
workarea = container_of(work,
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2338
work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2374
INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2380
schedule_work(&worker.work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2381
flush_work(&worker.work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2382
destroy_work_on_stack(&worker.work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
72
static void evict_process_worker(struct work_struct *work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
73
static void restore_process_worker(struct work_struct *work);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
99
static void kfd_sdma_activity_worker(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1884
static void svm_range_restore_work(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1886
struct delayed_work *dwork = to_delayed_work(work);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2403
static void svm_range_deferred_list_work(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2409
svms = container_of(work, struct svm_range_list, deferred_list_work);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3631
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3637
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
438
static void svm_range_bo_wq_release(struct work_struct *work)
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
442
svm_bo = container_of(work, struct svm_range_bo, release_work);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
72
static void svm_range_evict_svm_bo_worker(struct work_struct *work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1552
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1564
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2233
flush_work(&adev->dm.idle_workqueue->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3996
static void hdmi_hpd_debounce_work(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3999
container_of(to_delayed_work(work), struct amdgpu_dm_connector,
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
4169
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
4174
queue_work(offload_wq->wq, &offload_work->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
530
struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
531
struct amdgpu_device *adev = work->adev;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
532
struct dc_stream_state *stream = work->stream;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
533
struct dc_crtc_timing_adjust *adjust = work->adjust;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
540
kfree(work->adjust);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
541
kfree(work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
566
INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
571
queue_work(system_wq, &offload_work->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
933
static void dm_handle_hpd_work(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
937
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
150
struct work_struct work;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
165
struct work_struct work;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
179
struct work_struct work;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
306
struct work_struct work;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
341
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
357
crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
424
amdgpu_dm_forward_crc_window(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
435
crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
163
schedule_work(&dm->idle_workqueue->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
195
static void amdgpu_dm_idle_worker(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
199
idle_work = container_of(work, struct idle_workqueue, work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
241
INIT_WORK(&idle_work->work, amdgpu_dm_idle_worker);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
246
static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
249
container_of(work, struct vblank_control_work, work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
296
struct vblank_control_work *work;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
395
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
396
if (!work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
399
INIT_WORK(&work->work, amdgpu_dm_crtc_vblank_control_worker);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
400
work->dm = dm;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
401
work->acrtc = acrtc;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
402
work->enable = enable;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
406
work->stream = acrtc_state->stream;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
409
queue_work(dm->vblank_control_workqueue, &work->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
182
static void link_lock(struct hdcp_workqueue *work, bool lock)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
186
for (i = 0; i < work->max_link; i++) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
188
mutex_lock(&work[i].mutex);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
190
mutex_unlock(&work[i].mutex);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
313
static void event_callback(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
317
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
330
static void event_property_update(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
332
struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
400
static void event_property_validate(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
403
container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
450
static void event_watchdog_timer(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
454
hdcp_work = container_of(to_delayed_work(work),
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
469
static void event_cpirq(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
473
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
657
struct hdcp_workqueue *work;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
660
work = container_of(bin_attr, struct hdcp_workqueue, attr);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
661
link_lock(work, true);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
663
memcpy(work->srm_temp + pos, buffer, count);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
665
if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
667
memcpy(work->srm, work->srm_temp, pos + count);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
668
work->srm_size = pos + count;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
669
work->srm_version = srm_version;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
672
link_lock(work, false);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
681
struct hdcp_workqueue *work;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
687
work = container_of(bin_attr, struct hdcp_workqueue, attr);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
689
link_lock(work, true);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
691
srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
710
link_lock(work, false);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
84
void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
85
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
86
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
1381
schedule_work(&adev->dm.idle_workqueue->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
115
static void dm_irq_work_func(struct work_struct *work)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
118
container_of(work, struct amdgpu_dm_irq_handler_data, work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
338
INIT_WORK(&handler_data->work, dm_irq_work_func);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
469
flush_work(&handler->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
509
flush_work(&handler->work);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
585
if (queue_work(system_highpri_wq, &handler_data->work)) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
611
INIT_WORK(&handler_data_add->work, dm_irq_work_func);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
613
if (queue_work(system_highpri_wq, &handler_data_add->work))
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
87
struct work_struct work;
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
77
struct work_struct work;
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
3010
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
3031
flush_work(&adev->pm.dpm.thermal.work);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
3073
cancel_work_sync(&adev->pm.dpm.thermal.work);
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
3189
schedule_work(&adev->pm.dpm.thermal.work);
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
972
void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
975
container_of(work, struct amdgpu_device,
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
976
pm.dpm.thermal.work);
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
36
void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
7668
schedule_work(&adev->pm.dpm.thermal.work);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
7801
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
7821
flush_work(&adev->pm.dpm.thermal.work);
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
7863
cancel_work_sync(&adev->pm.dpm.thermal.work);
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
96
static void pp_swctf_delayed_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
99
container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1245
static void smu_throttling_logging_work_fn(struct work_struct *work)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1247
struct smu_context *smu = container_of(work, struct smu_context,
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1253
static void smu_interrupt_work_fn(struct work_struct *work)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1255
struct smu_context *smu = container_of(work, struct smu_context,
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1262
static void smu_swctf_delayed_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1265
container_of(work, struct smu_context, swctf_delayed_work.work);
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1569
static void smu_wbrf_delayed_work_handler(struct work_struct *work)
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1571
struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
34
static void ras_process_retire_page_dwork(struct work_struct *work)
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
37
container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work);
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
404
static void adv7511_hpd_work(struct work_struct *work)
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
406
struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
drivers/gpu/drm/bridge/analogix/anx7625.c
1612
static void anx7625_work_func(struct work_struct *work)
drivers/gpu/drm/bridge/analogix/anx7625.c
1615
struct anx7625_data *ctx = container_of(work,
drivers/gpu/drm/bridge/analogix/anx7625.c
1616
struct anx7625_data, work);
drivers/gpu/drm/bridge/analogix/anx7625.c
1640
queue_work(ctx->workqueue, &ctx->work);
drivers/gpu/drm/bridge/analogix/anx7625.c
2106
static void hdcp_check_work_func(struct work_struct *work)
drivers/gpu/drm/bridge/analogix/anx7625.c
2114
dwork = to_delayed_work(work);
drivers/gpu/drm/bridge/analogix/anx7625.c
2669
INIT_WORK(&platform->work, anx7625_work_func);
drivers/gpu/drm/bridge/analogix/anx7625.c
2752
queue_work(platform->workqueue, &platform->work);
drivers/gpu/drm/bridge/analogix/anx7625.h
466
struct work_struct work;
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
2292
static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
2297
mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
2368
static void cdns_mhdp_hpd_work(struct work_struct *work)
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
2370
struct cdns_mhdp_device *mhdp = container_of(work,
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
467
static void cdns_mhdp_hdcp_check_work(struct work_struct *work)
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
469
struct delayed_work *d_work = to_delayed_work(work);
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
482
static void cdns_mhdp_hdcp_prop_work(struct work_struct *work)
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
484
struct cdns_mhdp_hdcp *hdcp = container_of(work,
drivers/gpu/drm/bridge/ite-it6505.c
2205
static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
drivers/gpu/drm/bridge/ite-it6505.c
2207
struct it6505 *it6505 = container_of(work, struct it6505,
drivers/gpu/drm/bridge/ite-it6505.c
2244
static void it6505_hdcp_work(struct work_struct *work)
drivers/gpu/drm/bridge/ite-it6505.c
2246
struct it6505 *it6505 = container_of(work, struct it6505,
drivers/gpu/drm/bridge/ite-it6505.c
2247
hdcp_work.work);
drivers/gpu/drm/bridge/ite-it6505.c
2377
static void it6505_link_training_work(struct work_struct *work)
drivers/gpu/drm/bridge/ite-it6505.c
2379
struct it6505 *it6505 = container_of(work, struct it6505, link_works);
drivers/gpu/drm/bridge/ite-it6505.c
2887
static void it6505_extcon_work(struct work_struct *work)
drivers/gpu/drm/bridge/ite-it6505.c
2889
struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq);
drivers/gpu/drm/bridge/ite-it6505.c
2963
static void __maybe_unused it6505_delayed_audio(struct work_struct *work)
drivers/gpu/drm/bridge/ite-it6505.c
2965
struct it6505 *it6505 = container_of(work, struct it6505,
drivers/gpu/drm/bridge/ite-it6505.c
2966
delayed_audio.work);
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
156
schedule_work(<9611uxc->work);
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
164
static void lt9611uxc_hpd_work(struct work_struct *work)
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
166
struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
45
struct work_struct work;
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
820
INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work);
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
866
cancel_work_sync(<9611uxc->work);
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
884
cancel_work_sync(<9611uxc->work);
drivers/gpu/drm/bridge/sil-sii8620.c
2135
static void sii8620_extcon_work(struct work_struct *work)
drivers/gpu/drm/bridge/sil-sii8620.c
2138
container_of(work, struct sii8620, extcon_wq);
drivers/gpu/drm/bridge/synopsys/dw-dp.c
1864
static void dw_dp_hpd_work(struct work_struct *work)
drivers/gpu/drm/bridge/synopsys/dw-dp.c
1866
struct dw_dp *dp = container_of(work, struct dw_dp, hpd_work);
drivers/gpu/drm/bridge/tda998x_drv.c
778
static void tda998x_detect_work(struct work_struct *work)
drivers/gpu/drm/bridge/tda998x_drv.c
781
container_of(work, struct tda998x_priv, detect_work);
drivers/gpu/drm/bridge/ti-sn65dsi83.c
465
static void sn65dsi83_monitor_work(struct work_struct *work)
drivers/gpu/drm/bridge/ti-sn65dsi83.c
467
struct sn65dsi83 *ctx = container_of(to_delayed_work(work),
drivers/gpu/drm/bridge/ti-tfp410.c
103
static void tfp410_hpd_work_func(struct work_struct *work)
drivers/gpu/drm/bridge/ti-tfp410.c
107
dvi = container_of(work, struct tfp410, hpd_work.work);
drivers/gpu/drm/display/drm_dp_cec.c
284
static void drm_dp_cec_unregister_work(struct work_struct *work)
drivers/gpu/drm/display/drm_dp_cec.c
286
struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
drivers/gpu/drm/display/drm_dp_cec.c
287
cec.unregister_work.work);
drivers/gpu/drm/display/drm_dp_helper.c
2287
static void drm_dp_aux_crc_work(struct work_struct *work)
drivers/gpu/drm/display/drm_dp_helper.c
2289
struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
drivers/gpu/drm/display/drm_dp_mst_topology.c
2654
static void drm_dp_mst_link_probe_work(struct work_struct *work)
drivers/gpu/drm/display/drm_dp_mst_topology.c
2657
container_of(work, struct drm_dp_mst_topology_mgr, work);
drivers/gpu/drm/display/drm_dp_mst_topology.c
2704
queue_work(system_long_wq, &mgr->work);
drivers/gpu/drm/display/drm_dp_mst_topology.c
3766
flush_work(&mgr->work);
drivers/gpu/drm/display/drm_dp_mst_topology.c
3847
flush_work(&mgr->work);
drivers/gpu/drm/display/drm_dp_mst_topology.c
4092
queue_work(system_long_wq, &mgr->work);
drivers/gpu/drm/display/drm_dp_mst_topology.c
4096
static void drm_dp_mst_up_req_work(struct work_struct *work)
drivers/gpu/drm/display/drm_dp_mst_topology.c
4099
container_of(work, struct drm_dp_mst_topology_mgr,
drivers/gpu/drm/display/drm_dp_mst_topology.c
5009
static void drm_dp_tx_work(struct work_struct *work)
drivers/gpu/drm/display/drm_dp_mst_topology.c
5011
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
drivers/gpu/drm/display/drm_dp_mst_topology.c
5065
static void drm_dp_delayed_destroy_work(struct work_struct *work)
drivers/gpu/drm/display/drm_dp_mst_topology.c
5068
container_of(work, struct drm_dp_mst_topology_mgr,
drivers/gpu/drm/display/drm_dp_mst_topology.c
5735
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
drivers/gpu/drm/display/drm_dp_mst_topology.c
5771
flush_work(&mgr->work);
drivers/gpu/drm/drm_atomic_helper.c
2087
static void commit_work(struct work_struct *work)
drivers/gpu/drm/drm_atomic_helper.c
2089
struct drm_atomic_state *state = container_of(work,
drivers/gpu/drm/drm_connector.c
204
void drm_connector_free_work_fn(struct work_struct *work)
drivers/gpu/drm/drm_connector.c
208
container_of(work, struct drm_device, mode_config.connector_free_work);
drivers/gpu/drm/drm_crtc_internal.h
198
void drm_connector_free_work_fn(struct work_struct *work);
drivers/gpu/drm/drm_fb_helper.c
217
static void drm_fb_helper_resume_worker(struct work_struct *work)
drivers/gpu/drm/drm_fb_helper.c
219
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
drivers/gpu/drm/drm_fb_helper.c
267
static void drm_fb_helper_damage_work(struct work_struct *work)
drivers/gpu/drm/drm_fb_helper.c
269
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
drivers/gpu/drm/drm_flip_work.c
104
struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
drivers/gpu/drm/drm_flip_work.c
112
spin_lock_irqsave(&work->lock, flags);
drivers/gpu/drm/drm_flip_work.c
113
list_splice_tail(&work->commited, &tasks);
drivers/gpu/drm/drm_flip_work.c
114
INIT_LIST_HEAD(&work->commited);
drivers/gpu/drm/drm_flip_work.c
115
spin_unlock_irqrestore(&work->lock, flags);
drivers/gpu/drm/drm_flip_work.c
121
work->func(work, task->data);
drivers/gpu/drm/drm_flip_work.c
135
void drm_flip_work_init(struct drm_flip_work *work,
drivers/gpu/drm/drm_flip_work.c
138
work->name = name;
drivers/gpu/drm/drm_flip_work.c
139
INIT_LIST_HEAD(&work->queued);
drivers/gpu/drm/drm_flip_work.c
140
INIT_LIST_HEAD(&work->commited);
drivers/gpu/drm/drm_flip_work.c
141
spin_lock_init(&work->lock);
drivers/gpu/drm/drm_flip_work.c
142
work->func = func;
drivers/gpu/drm/drm_flip_work.c
144
INIT_WORK(&work->worker, flip_worker);
drivers/gpu/drm/drm_flip_work.c
154
void drm_flip_work_cleanup(struct drm_flip_work *work)
drivers/gpu/drm/drm_flip_work.c
156
WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
drivers/gpu/drm/drm_flip_work.c
47
static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task)
drivers/gpu/drm/drm_flip_work.c
51
spin_lock_irqsave(&work->lock, flags);
drivers/gpu/drm/drm_flip_work.c
52
list_add_tail(&task->node, &work->queued);
drivers/gpu/drm/drm_flip_work.c
53
spin_unlock_irqrestore(&work->lock, flags);
drivers/gpu/drm/drm_flip_work.c
64
void drm_flip_work_queue(struct drm_flip_work *work, void *val)
drivers/gpu/drm/drm_flip_work.c
71
drm_flip_work_queue_task(work, task);
drivers/gpu/drm/drm_flip_work.c
73
DRM_ERROR("%s could not allocate task!\n", work->name);
drivers/gpu/drm/drm_flip_work.c
74
work->func(work, val);
drivers/gpu/drm/drm_flip_work.c
89
void drm_flip_work_commit(struct drm_flip_work *work,
drivers/gpu/drm/drm_flip_work.c
94
spin_lock_irqsave(&work->lock, flags);
drivers/gpu/drm/drm_flip_work.c
95
list_splice_tail(&work->queued, &work->commited);
drivers/gpu/drm/drm_flip_work.c
96
INIT_LIST_HEAD(&work->queued);
drivers/gpu/drm/drm_flip_work.c
97
spin_unlock_irqrestore(&work->lock, flags);
drivers/gpu/drm/drm_flip_work.c
98
queue_work(wq, &work->worker);
drivers/gpu/drm/drm_framebuffer.c
379
struct work_struct work;
drivers/gpu/drm/drm_framebuffer.c
385
struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
drivers/gpu/drm/drm_framebuffer.c
464
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
drivers/gpu/drm/drm_framebuffer.c
469
schedule_work(&arg.work);
drivers/gpu/drm/drm_framebuffer.c
470
flush_work(&arg.work);
drivers/gpu/drm/drm_framebuffer.c
471
destroy_work_on_stack(&arg.work);
drivers/gpu/drm/drm_framebuffer.c
819
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
drivers/gpu/drm/drm_framebuffer.c
821
schedule_work(&arg.work);
drivers/gpu/drm/drm_framebuffer.c
822
flush_work(&arg.work);
drivers/gpu/drm/drm_framebuffer.c
823
destroy_work_on_stack(&arg.work);
drivers/gpu/drm/drm_pagemap.c
783
static void drm_pagemap_dev_unhold_work(struct work_struct *work);
drivers/gpu/drm/drm_pagemap.c
827
static void drm_pagemap_dev_unhold_work(struct work_struct *work)
drivers/gpu/drm/drm_probe_helper.c
756
static void output_poll_execute(struct work_struct *work)
drivers/gpu/drm/drm_probe_helper.c
758
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/gpu/drm/drm_probe_helper.c
868
struct work_struct *work = current_work();
drivers/gpu/drm/drm_probe_helper.c
870
return work && work->func == output_poll_execute;
drivers/gpu/drm/drm_self_refresh_helper.c
68
static void drm_self_refresh_helper_entry_work(struct work_struct *work)
drivers/gpu/drm/drm_self_refresh_helper.c
71
to_delayed_work(work),
drivers/gpu/drm/drm_vblank_work.c
111
int drm_vblank_work_schedule(struct drm_vblank_work *work,
drivers/gpu/drm/drm_vblank_work.c
114
struct drm_vblank_crtc *vblank = work->vblank;
drivers/gpu/drm/drm_vblank_work.c
122
if (work->cancelling)
drivers/gpu/drm/drm_vblank_work.c
131
if (list_empty(&work->node)) {
drivers/gpu/drm/drm_vblank_work.c
135
} else if (work->count == count) {
drivers/gpu/drm/drm_vblank_work.c
142
work->count = count;
drivers/gpu/drm/drm_vblank_work.c
152
ret = kthread_queue_work(vblank->worker, &work->base);
drivers/gpu/drm/drm_vblank_work.c
155
list_del_init(&work->node);
drivers/gpu/drm/drm_vblank_work.c
160
list_add_tail(&work->node, &vblank->pending_work);
drivers/gpu/drm/drm_vblank_work.c
187
bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
drivers/gpu/drm/drm_vblank_work.c
189
struct drm_vblank_crtc *vblank = work->vblank;
drivers/gpu/drm/drm_vblank_work.c
194
if (!list_empty(&work->node)) {
drivers/gpu/drm/drm_vblank_work.c
195
list_del_init(&work->node);
drivers/gpu/drm/drm_vblank_work.c
200
work->cancelling++;
drivers/gpu/drm/drm_vblank_work.c
205
if (kthread_cancel_work_sync(&work->base))
drivers/gpu/drm/drm_vblank_work.c
209
work->cancelling--;
drivers/gpu/drm/drm_vblank_work.c
223
void drm_vblank_work_flush(struct drm_vblank_work *work)
drivers/gpu/drm/drm_vblank_work.c
225
struct drm_vblank_crtc *vblank = work->vblank;
drivers/gpu/drm/drm_vblank_work.c
229
wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
drivers/gpu/drm/drm_vblank_work.c
233
kthread_flush_work(&work->base);
drivers/gpu/drm/drm_vblank_work.c
267
void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
drivers/gpu/drm/drm_vblank_work.c
268
void (*func)(struct kthread_work *work))
drivers/gpu/drm/drm_vblank_work.c
270
kthread_init_work(&work->base, func);
drivers/gpu/drm/drm_vblank_work.c
271
INIT_LIST_HEAD(&work->node);
drivers/gpu/drm/drm_vblank_work.c
272
work->vblank = drm_crtc_vblank_crtc(crtc);
drivers/gpu/drm/drm_vblank_work.c
50
struct drm_vblank_work *work, *next;
drivers/gpu/drm/drm_vblank_work.c
56
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
drivers/gpu/drm/drm_vblank_work.c
57
if (!drm_vblank_passed(count, work->count))
drivers/gpu/drm/drm_vblank_work.c
60
list_del_init(&work->node);
drivers/gpu/drm/drm_vblank_work.c
62
kthread_queue_work(vblank->worker, &work->base);
drivers/gpu/drm/drm_vblank_work.c
74
struct drm_vblank_work *work, *next;
drivers/gpu/drm/drm_vblank_work.c
81
list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
drivers/gpu/drm/drm_vblank_work.c
82
list_del_init(&work->node);
drivers/gpu/drm/drm_writeback.c
515
static void cleanup_work(struct work_struct *work)
drivers/gpu/drm/drm_writeback.c
517
struct drm_writeback_job *job = container_of(work,
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1472
static void sync_point_worker(struct work_struct *work)
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1474
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
drivers/gpu/drm/exynos/exynos_drm_g2d.c
865
static void g2d_runqueue_worker(struct work_struct *work)
drivers/gpu/drm/exynos/exynos_drm_g2d.c
867
struct g2d_data *g2d = container_of(work, struct g2d_data,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
748
static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
750
struct exynos_drm_ipp_task *task = container_of(work,
drivers/gpu/drm/exynos/exynos_hdmi.c
1681
static void hdmi_hotplug_work_func(struct work_struct *work)
drivers/gpu/drm/exynos/exynos_hdmi.c
1685
hdata = container_of(work, struct hdmi_context, hotplug_work.work);
drivers/gpu/drm/gma500/cdv_device.c
403
static void cdv_hotplug_work_func(struct work_struct *work)
drivers/gpu/drm/gma500/cdv_device.c
405
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
drivers/gpu/drm/gma500/opregion.c
173
static void psb_intel_opregion_asle_work(struct work_struct *work)
drivers/gpu/drm/gma500/opregion.c
176
container_of(work, struct psb_intel_opregion, asle_work);
drivers/gpu/drm/gud/gud_connector.c
58
static void gud_connector_backlight_update_status_work(struct work_struct *work)
drivers/gpu/drm/gud/gud_connector.c
60
struct gud_connector *gconn = container_of(work, struct gud_connector, backlight_work);
drivers/gpu/drm/gud/gud_drv.c
481
INIT_WORK(&gdrm->work, gud_flush_work);
drivers/gpu/drm/gud/gud_internal.h
19
struct work_struct work;
drivers/gpu/drm/gud/gud_internal.h
64
void gud_flush_work(struct work_struct *work);
drivers/gpu/drm/gud/gud_pipe.c
365
void gud_flush_work(struct work_struct *work)
drivers/gpu/drm/gud/gud_pipe.c
367
struct gud_device *gdrm = container_of(work, struct gud_device, work);
drivers/gpu/drm/gud/gud_pipe.c
427
queue_work(system_long_wq, &gdrm->work);
drivers/gpu/drm/gud/gud_pipe.c
631
cancel_work_sync(&gdrm->work);
drivers/gpu/drm/i915/display/intel_connector.c
41
static void intel_connector_modeset_retry_work_fn(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_connector.c
43
struct intel_connector *connector = container_of(work, typeof(*connector),
drivers/gpu/drm/i915/display/intel_crtc.c
458
struct drm_vblank_work *work = to_drm_vblank_work(base);
drivers/gpu/drm/i915/display/intel_crtc.c
460
container_of(work, typeof(*crtc_state), vblank_work);
drivers/gpu/drm/i915/display/intel_cursor.c
790
struct drm_vblank_work *work = to_drm_vblank_work(base);
drivers/gpu/drm/i915/display/intel_cursor.c
792
container_of(work, typeof(*plane_state), unpin_work);
drivers/gpu/drm/i915/display/intel_display.c
7208
static void intel_atomic_cleanup_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_display.c
7211
container_of(work, struct intel_atomic_state, cleanup_work);
drivers/gpu/drm/i915/display/intel_display.c
7648
static void intel_atomic_commit_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_display.c
7651
container_of(work, struct intel_atomic_state, base.commit_work);
drivers/gpu/drm/i915/display/intel_display_irq.c
1771
static void intel_display_vblank_notify_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_display_irq.c
1774
container_of(work, typeof(*display), irq.vblank_notify_work);
drivers/gpu/drm/i915/display/intel_display_power.c
676
intel_display_power_put_async_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_display_power.c
678
struct intel_display *display = container_of(work, struct intel_display,
drivers/gpu/drm/i915/display/intel_display_power.c
679
power.domains.async_put_work.work);
drivers/gpu/drm/i915/display/intel_display_types.h
1528
struct delayed_work work;
drivers/gpu/drm/i915/display/intel_display_types.h
1756
struct work_struct work;
drivers/gpu/drm/i915/display/intel_dmc.c
1347
static void dmc_load_work_fn(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_dmc.c
1349
struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
drivers/gpu/drm/i915/display/intel_dmc.c
1426
INIT_WORK(&dmc->work, dmc_load_work_fn);
drivers/gpu/drm/i915/display/intel_dmc.c
1447
queue_work(display->wq.unordered, &dmc->work);
drivers/gpu/drm/i915/display/intel_dmc.c
1471
flush_work(&dmc->work);
drivers/gpu/drm/i915/display/intel_dmc.c
1486
flush_work(&dmc->work);
drivers/gpu/drm/i915/display/intel_dmc.c
66
struct work_struct work;
drivers/gpu/drm/i915/display/intel_dmc_wl.c
161
queue_delayed_work(display->wq.unordered, &wl->work,
drivers/gpu/drm/i915/display/intel_dmc_wl.c
165
static void intel_dmc_wl_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_dmc_wl.c
168
container_of(work, struct intel_dmc_wl, work.work);
drivers/gpu/drm/i915/display/intel_dmc_wl.c
335
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
drivers/gpu/drm/i915/display/intel_dmc_wl.c
431
flush_delayed_work(&wl->work);
drivers/gpu/drm/i915/display/intel_dmc_wl.c
454
cancel_delayed_work(&wl->work);
drivers/gpu/drm/i915/display/intel_dmc_wl.h
29
struct delayed_work work;
drivers/gpu/drm/i915/display/intel_drrs.c
129
mod_delayed_work(display->wq.unordered, &crtc->drrs.work, msecs_to_jiffies(1000));
drivers/gpu/drm/i915/display/intel_drrs.c
209
cancel_delayed_work_sync(&crtc->drrs.work);
drivers/gpu/drm/i915/display/intel_drrs.c
212
static void intel_drrs_downclock_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_drrs.c
214
struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work);
drivers/gpu/drm/i915/display/intel_drrs.c
256
cancel_delayed_work(&crtc->drrs.work);
drivers/gpu/drm/i915/display/intel_drrs.c
306
INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work);
drivers/gpu/drm/i915/display/intel_encoder.c
13
static void intel_encoder_link_check_work_fn(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_encoder.c
16
container_of(work, typeof(*encoder), link_check_work.work);
drivers/gpu/drm/i915/display/intel_fbc.c
2136
static void intel_fbc_underrun_work_fn(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_fbc.c
2138
struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
drivers/gpu/drm/i915/display/intel_frontbuffer.c
172
static void intel_frontbuffer_flush_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_frontbuffer.c
175
container_of(work, struct intel_frontbuffer, flush_work);
drivers/gpu/drm/i915/display/intel_hdcp.c
1189
static void intel_hdcp_prop_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_hdcp.c
1191
struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
drivers/gpu/drm/i915/display/intel_hdcp.c
2231
static void intel_hdcp_check_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_hdcp.c
2233
struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
drivers/gpu/drm/i915/display/intel_hotplug.c
195
mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
drivers/gpu/drm/i915/display/intel_hotplug.c
202
return mod_delayed_work(display->wq.unordered, work, delay);
drivers/gpu/drm/i915/display/intel_hotplug.c
206
queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
drivers/gpu/drm/i915/display/intel_hotplug.c
213
return queue_delayed_work(display->wq.unordered, work, delay);
drivers/gpu/drm/i915/display/intel_hotplug.c
217
queue_detection_work(struct intel_display *display, struct work_struct *work)
drivers/gpu/drm/i915/display/intel_hotplug.c
224
return queue_work(display->wq.unordered, work);
drivers/gpu/drm/i915/display/intel_hotplug.c
269
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_hotplug.c
272
container_of(work, typeof(*display), hotplug.reenable_work.work);
drivers/gpu/drm/i915/display/intel_hotplug.c
390
static void i915_digport_work_func(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_hotplug.c
393
container_of(work, struct intel_display, hotplug.dig_port_work);
drivers/gpu/drm/i915/display/intel_hotplug.c
468
static void i915_hotplug_work_func(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_hotplug.c
471
container_of(work, struct intel_display, hotplug.hotplug_work.work);
drivers/gpu/drm/i915/display/intel_hotplug.c
782
static void i915_hpd_poll_init_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_hotplug.c
785
container_of(work, typeof(*display), hotplug.poll_init_work);
drivers/gpu/drm/i915/display/intel_opregion.c
614
static void asle_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_opregion.c
617
container_of(work, struct intel_opregion, asle_work);
drivers/gpu/drm/i915/display/intel_psr.c
1197
static void tgl_dc3co_disable_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_psr.c
1200
container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
drivers/gpu/drm/i915/display/intel_psr.c
2403
cancel_work_sync(&intel_dp->psr.work);
drivers/gpu/drm/i915/display/intel_psr.c
2434
cancel_work_sync(&psr->work);
drivers/gpu/drm/i915/display/intel_psr.c
3454
static void intel_psr_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_psr.c
3457
container_of(work, typeof(*intel_dp), psr.work);
drivers/gpu/drm/i915/display/intel_psr.c
3637
queue_work(display->wq.unordered, &intel_dp->psr.work);
drivers/gpu/drm/i915/display/intel_psr.c
3741
INIT_WORK(&intel_dp->psr.work, intel_psr_work);
drivers/gpu/drm/i915/display/intel_psr.c
3986
static void psr_dc5_dc6_wa_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_psr.c
3988
struct intel_display *display = container_of(work, typeof(*display),
drivers/gpu/drm/i915/display/intel_psr.c
479
queue_work(display->wq.unordered, &intel_dp->psr.work);
drivers/gpu/drm/i915/display/intel_tc.c
1822
static void intel_tc_port_link_reset_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_tc.c
1825
container_of(work, struct intel_tc_port, link_reset_work.work);
drivers/gpu/drm/i915/display/intel_tc.c
1892
static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
drivers/gpu/drm/i915/display/intel_tc.c
1895
container_of(work, struct intel_tc_port, disconnect_phy_work.work);
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
45
.work = clflush_work,
drivers/gpu/drm/i915/gem/i915_gem_context.c
1280
static void i915_gem_context_release_work(struct work_struct *work)
drivers/gpu/drm/i915/gem/i915_gem_context.c
1282
struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
drivers/gpu/drm/i915/gem/i915_gem_object.c
437
static void __i915_gem_free_work(struct work_struct *work)
drivers/gpu/drm/i915/gem/i915_gem_object.c
440
container_of(work, struct drm_i915_private, mm.free_work);
drivers/gpu/drm/i915/gem/i915_gem_pm.h
16
void i915_gem_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
295
struct work_struct work;
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
348
static void __memcpy_work(struct work_struct *work)
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
351
container_of(work, typeof(*copy_work), work);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
410
INIT_WORK(©_work->work, __memcpy_work);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
411
queue_work(system_unbound_wq, ©_work->work);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
434
i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work,
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
439
spin_lock_init(&work->lock);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
440
dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
441
dma_fence_get(&work->fence);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
442
ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
451
return &work->fence;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
185
struct kthread_work work;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
190
static void __live_parallel_switch1(struct kthread_work *work)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
193
container_of(work, typeof(*arg), work);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
239
static void __live_parallel_switchN(struct kthread_work *work)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
242
container_of(work, typeof(*arg), work);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
394
kthread_init_work(&data[n].work, *fn);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
395
kthread_queue_work(data[n].worker, &data[n].work);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
400
kthread_flush_work(&data[n].work);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
172
static void signal_irq_work(struct irq_work *work)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
174
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
143
container_of(wrk, typeof(*engine), heartbeat.work.work);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
250
if (cancel_delayed_work(&engine->heartbeat.work))
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
275
INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
65
mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay + 1);
drivers/gpu/drm/i915/gt/intel_engine_types.h
446
struct delayed_work work;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2214
struct work_struct work;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2219
static void execlists_capture_work(struct work_struct *work)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2221
struct execlists_capture *cap = container_of(work, typeof(*cap), work);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2375
INIT_WORK(&cap->work, execlists_capture_work);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2376
queue_work(i915->unordered_wq, &cap->work);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3601
container_of(wrk, typeof(*ve), rcu.work);
drivers/gpu/drm/i915/gt/intel_gt.c
48
INIT_WORK(>->watchdog.work, intel_gt_watchdog_work);
drivers/gpu/drm/i915/gt/intel_gt.h
201
void intel_gt_watchdog_work(struct work_struct *work);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
122
queue_delayed_work(gt->i915->unordered_wq, &pool->work,
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
229
INIT_DELAYED_WORK(&pool->work, pool_free_work);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
239
} while (cancel_delayed_work_sync(&pool->work));
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
91
container_of(wrk, typeof(*pool), work.work);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
95
queue_delayed_work(gt->i915->unordered_wq, &pool->work,
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
19
struct delayed_work work;
drivers/gpu/drm/i915/gt/intel_gt_requests.c
205
static void retire_work_handler(struct work_struct *work)
drivers/gpu/drm/i915/gt/intel_gt_requests.c
208
container_of(work, typeof(*gt), requests.retire_work.work);
drivers/gpu/drm/i915/gt/intel_gt_requests.c
236
flush_work(>->watchdog.work);
drivers/gpu/drm/i915/gt/intel_gt_requests.c
239
void intel_gt_watchdog_work(struct work_struct *work)
drivers/gpu/drm/i915/gt/intel_gt_requests.c
242
container_of(work, typeof(*gt), watchdog.work);
drivers/gpu/drm/i915/gt/intel_gt_requests.c
51
flush_delayed_work(&engine->wakeref.work);
drivers/gpu/drm/i915/gt/intel_gt_requests.c
60
static void engine_retire(struct work_struct *work)
drivers/gpu/drm/i915/gt/intel_gt_requests.c
63
container_of(work, typeof(*engine), retire_work);
drivers/gpu/drm/i915/gt/intel_gt_types.h
146
struct work_struct work;
drivers/gpu/drm/i915/gt/intel_gtt.c
234
static void __i915_vm_release(struct work_struct *work)
drivers/gpu/drm/i915/gt/intel_gtt.c
237
container_of(work, struct i915_address_space, release_work);
drivers/gpu/drm/i915/gt/intel_reset.c
1677
static void intel_wedge_me(struct work_struct *work)
drivers/gpu/drm/i915/gt/intel_reset.c
1679
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
drivers/gpu/drm/i915/gt/intel_reset.c
1693
INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
drivers/gpu/drm/i915/gt/intel_reset.c
1694
queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
drivers/gpu/drm/i915/gt/intel_reset.c
1699
cancel_delayed_work_sync(&w->work);
drivers/gpu/drm/i915/gt/intel_reset.c
1700
destroy_delayed_work_on_stack(&w->work);
drivers/gpu/drm/i915/gt/intel_reset.h
65
struct delayed_work work;
drivers/gpu/drm/i915/gt/intel_rps.c
1070
queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
drivers/gpu/drm/i915/gt/intel_rps.c
143
queue_work(gt->i915->unordered_wq, &rps->work);
drivers/gpu/drm/i915/gt/intel_rps.c
148
queue_work(gt->i915->unordered_wq, &rps->work);
drivers/gpu/drm/i915/gt/intel_rps.c
170
cancel_work_sync(&rps->work);
drivers/gpu/drm/i915/gt/intel_rps.c
1820
static void rps_work(struct work_struct *work)
drivers/gpu/drm/i915/gt/intel_rps.c
1822
struct intel_rps *rps = container_of(work, typeof(*rps), work);
drivers/gpu/drm/i915/gt/intel_rps.c
1925
queue_work(gt->i915->unordered_wq, &rps->work);
drivers/gpu/drm/i915/gt/intel_rps.c
1942
queue_work(gt->i915->unordered_wq, &rps->work);
drivers/gpu/drm/i915/gt/intel_rps.c
1996
INIT_WORK(&rps->work, rps_work);
drivers/gpu/drm/i915/gt/intel_rps.c
259
cancel_work_sync(&rps->work);
drivers/gpu/drm/i915/gt/intel_rps.c
985
queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
drivers/gpu/drm/i915/gt/intel_rps_types.h
63
struct work_struct work;
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
30
queue_work(gt->i915->unordered_wq, >->rps.work);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
31
flush_work(>->rps.work);
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
203
flush_delayed_work(&engine->heartbeat.work);
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
204
if (!delayed_work_pending(&engine->heartbeat.work)) {
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
216
flush_delayed_work(&engine->heartbeat.work);
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
217
if (delayed_work_pending(&engine->heartbeat.work)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3473
struct kthread_work work;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3536
static void smoke_crescendo_work(struct kthread_work *work)
drivers/gpu/drm/i915/gt/selftest_execlists.c
3538
struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3586
kthread_init_work(&arg[id].work, smoke_crescendo_work);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3587
kthread_queue_work(worker[id], &arg[id].work);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3595
kthread_flush_work(&arg[id].work);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1039
kthread_init_work(&threads[tmp].work, active_engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1041
&threads[tmp].work);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1195
kthread_flush_work(&threads[tmp].work);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
855
struct kthread_work work;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
891
static void active_engine(struct kthread_work *work)
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
894
struct active_engine *arg = container_of(work, typeof(*arg), work);
drivers/gpu/drm/i915/gt/selftest_rps.c
1054
saved_work = rps->work.func;
drivers/gpu/drm/i915/gt/selftest_rps.c
1055
rps->work.func = dummy_rps_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
1095
rps->work.func = saved_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
1162
saved_work = rps->work.func;
drivers/gpu/drm/i915/gt/selftest_rps.c
1163
rps->work.func = dummy_rps_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
1247
rps->work.func = saved_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
237
saved_work = rps->work.func;
drivers/gpu/drm/i915/gt/selftest_rps.c
238
rps->work.func = dummy_rps_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
364
rps->work.func = saved_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
401
saved_work = rps->work.func;
drivers/gpu/drm/i915/gt/selftest_rps.c
402
rps->work.func = dummy_rps_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
501
rps->work.func = saved_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
632
saved_work = rps->work.func;
drivers/gpu/drm/i915/gt/selftest_rps.c
633
rps->work.func = dummy_rps_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
737
rps->work.func = saved_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
771
saved_work = rps->work.func;
drivers/gpu/drm/i915/gt/selftest_rps.c
772
rps->work.func = dummy_rps_work;
drivers/gpu/drm/i915/gt/selftest_rps.c
875
rps->work.func = saved_work;
drivers/gpu/drm/i915/gt/selftest_slpc.c
21
struct kthread_work work;
drivers/gpu/drm/i915/gt/selftest_slpc.c
488
static void slpc_spinner_thread(struct kthread_work *work)
drivers/gpu/drm/i915/gt/selftest_slpc.c
490
struct slpc_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/gt/selftest_slpc.c
515
kthread_init_work(&threads[i].work, slpc_spinner_thread);
drivers/gpu/drm/i915/gt/selftest_slpc.c
516
kthread_queue_work(threads[i].worker, &threads[i].work);
drivers/gpu/drm/i915/gt/selftest_slpc.c
525
kthread_flush_work(&threads[i].work);
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
307
queue_work(gsc->wq, &gsc->work);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
128
INIT_WORK(&gsc->work, gsc_work);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
16
static void gsc_work(struct work_struct *work)
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
18
struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
258
flush_work(&gsc->work);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
279
flush_work(&gsc->work);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
315
queue_work(gsc->wq, &gsc->work);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
45
struct work_struct work;
drivers/gpu/drm/i915/gt/uc/intel_guc.h
273
struct delayed_work work;
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
481
static void copy_debug_logs_work(struct work_struct *work)
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
484
container_of(work, struct intel_guc_log, relay.flush_work);
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
255
static void slpc_boost_work(struct work_struct *work)
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
257
struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1388
mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1432
cancel_delayed_work(&guc->timestamp.work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1434
cancel_delayed_work_sync(&guc->timestamp.work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1523
timestamp.work.work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3344
container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3402
__delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4869
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
drivers/gpu/drm/i915/i915_active.c
179
struct i915_active *ref = container_of(wrk, typeof(*ref), work);
drivers/gpu/drm/i915/i915_active.c
196
queue_work(system_unbound_wq, &ref->work);
drivers/gpu/drm/i915/i915_active.c
358
INIT_WORK(&ref->work, active_work);
drivers/gpu/drm/i915/i915_active.c
360
lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
drivers/gpu/drm/i915/i915_active.c
604
flush_work(&ref->work);
drivers/gpu/drm/i915/i915_active.c
743
GEM_BUG_ON(work_pending(&ref->work));
drivers/gpu/drm/i915/i915_active_types.h
42
struct work_struct work;
drivers/gpu/drm/i915/i915_irq.c
156
static void ivb_parity_work(struct work_struct *work)
drivers/gpu/drm/i915/i915_irq.c
159
container_of(work, typeof(*dev_priv), l3_parity.error_work);
drivers/gpu/drm/i915/i915_request.c
186
struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
drivers/gpu/drm/i915/i915_request.c
202
work.node.llist)
drivers/gpu/drm/i915/i915_request.c
203
fn(&cb->work);
drivers/gpu/drm/i915/i915_request.c
286
queue_work(gt->i915->unordered_wq, >->watchdog.work);
drivers/gpu/drm/i915/i915_request.c
518
init_irq_work(&cb->work, irq_execute_cb);
drivers/gpu/drm/i915/i915_request.c
533
if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
drivers/gpu/drm/i915/i915_request.c
55
struct irq_work work;
drivers/gpu/drm/i915/i915_sw_fence.c
414
struct irq_work work;
drivers/gpu/drm/i915/i915_sw_fence.c
467
irq_work_queue(&cb->work);
drivers/gpu/drm/i915/i915_sw_fence.c
473
container_of(wrk, typeof(*cb), work);
drivers/gpu/drm/i915/i915_sw_fence.c
523
init_irq_work(&timer->work, irq_i915_sw_fence_work);
drivers/gpu/drm/i915/i915_sw_fence_work.c
16
static void fence_work(struct work_struct *work)
drivers/gpu/drm/i915/i915_sw_fence_work.c
18
struct dma_fence_work *f = container_of(work, typeof(*f), work);
drivers/gpu/drm/i915/i915_sw_fence_work.c
20
f->ops->work(f);
drivers/gpu/drm/i915/i915_sw_fence_work.c
39
fence_work(&f->work);
drivers/gpu/drm/i915/i915_sw_fence_work.c
41
queue_work(system_unbound_wq, &f->work);
drivers/gpu/drm/i915/i915_sw_fence_work.c
90
INIT_WORK(&f->work, fence_work);
drivers/gpu/drm/i915/i915_sw_fence_work.h
20
void (*work)(struct dma_fence_work *f);
drivers/gpu/drm/i915/i915_sw_fence_work.h
31
struct work_struct work;
drivers/gpu/drm/i915/i915_vma.c
1437
struct i915_vma_work *work = NULL;
drivers/gpu/drm/i915/i915_vma.c
1475
work = i915_vma_work();
drivers/gpu/drm/i915/i915_vma.c
1476
if (!work) {
drivers/gpu/drm/i915/i915_vma.c
1481
work->vm = vma->vm;
drivers/gpu/drm/i915/i915_vma.c
1487
dma_fence_work_chain(&work->base, moving);
drivers/gpu/drm/i915/i915_vma.c
1492
&work->stash,
drivers/gpu/drm/i915/i915_vma.c
1497
err = i915_vm_map_pt_stash(vma->vm, &work->stash);
drivers/gpu/drm/i915/i915_vma.c
1571
flags, work, vma_res);
drivers/gpu/drm/i915/i915_vma.c
1600
if (work) {
drivers/gpu/drm/i915/i915_vma.c
1610
dma_fence_work_commit(&work->base);
drivers/gpu/drm/i915/i915_vma.c
1612
dma_fence_work_commit_imm(&work->base);
drivers/gpu/drm/i915/i915_vma.c
357
static void __vma_bind(struct dma_fence_work *work)
drivers/gpu/drm/i915/i915_vma.c
359
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
drivers/gpu/drm/i915/i915_vma.c
375
static void __vma_release(struct dma_fence_work *work)
drivers/gpu/drm/i915/i915_vma.c
377
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
drivers/gpu/drm/i915/i915_vma.c
389
.work = __vma_bind,
drivers/gpu/drm/i915/i915_vma.c
477
struct i915_vma_work *work,
drivers/gpu/drm/i915/i915_vma.c
515
if (work && bind_flags & vma->vm->bind_async_flags)
drivers/gpu/drm/i915/i915_vma.c
517
&work->base.chain,
drivers/gpu/drm/i915/i915_vma.c
541
if (work && bind_flags & vma->vm->bind_async_flags) {
drivers/gpu/drm/i915/i915_vma.c
544
work->vma_res = i915_vma_resource_get(vma->resource);
drivers/gpu/drm/i915/i915_vma.c
545
work->pat_index = pat_index;
drivers/gpu/drm/i915/i915_vma.c
546
work->flags = bind_flags;
drivers/gpu/drm/i915/i915_vma.c
557
prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
drivers/gpu/drm/i915/i915_vma.c
559
__i915_sw_fence_await_dma_fence(&work->base.chain,
drivers/gpu/drm/i915/i915_vma.c
561
&work->cb);
drivers/gpu/drm/i915/i915_vma.c
565
work->base.dma.error = 0; /* enable the queue_work() */
drivers/gpu/drm/i915/i915_vma.c
566
work->obj = i915_gem_object_get(vma->obj);
drivers/gpu/drm/i915/i915_vma.h
255
struct i915_vma_work *work,
drivers/gpu/drm/i915/i915_vma_resource.c
173
static void i915_vma_resource_unbind_work(struct work_struct *work)
drivers/gpu/drm/i915/i915_vma_resource.c
176
container_of(work, typeof(*vma_res), work);
drivers/gpu/drm/i915/i915_vma_resource.c
202
i915_vma_resource_unbind_work(&vma_res->work);
drivers/gpu/drm/i915/i915_vma_resource.c
204
INIT_WORK(&vma_res->work, i915_vma_resource_unbind_work);
drivers/gpu/drm/i915/i915_vma_resource.c
205
queue_work(system_unbound_wq, &vma_res->work);
drivers/gpu/drm/i915/i915_vma_resource.h
105
struct work_struct work;
drivers/gpu/drm/i915/intel_wakeref.c
114
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
drivers/gpu/drm/i915/intel_wakeref.c
115
lockdep_init_map(&wf->work.work.lockdep_map,
drivers/gpu/drm/i915/intel_wakeref.c
116
"wakeref.work", &key->work, 0);
drivers/gpu/drm/i915/intel_wakeref.c
78
INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
drivers/gpu/drm/i915/intel_wakeref.c
82
mod_delayed_work(wf->i915->unordered_wq, &wf->work,
drivers/gpu/drm/i915/intel_wakeref.c
92
struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
drivers/gpu/drm/i915/intel_wakeref.h
229
flush_delayed_work(&wf->work);
drivers/gpu/drm/i915/intel_wakeref.h
49
struct delayed_work work;
drivers/gpu/drm/i915/intel_wakeref.h
58
struct lock_class_key work;
drivers/gpu/drm/i915/pxp/intel_pxp_session.c
150
static void pxp_session_work(struct work_struct *work)
drivers/gpu/drm/i915/pxp/intel_pxp_session.c
152
struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
drivers/gpu/drm/i915/selftests/i915_active.c
353
flush_work(&ref->work);
drivers/gpu/drm/i915/selftests/i915_request.c
1460
struct kthread_work work;
drivers/gpu/drm/i915/selftests/i915_request.c
1465
static void __live_parallel_engine1(struct kthread_work *work)
drivers/gpu/drm/i915/selftests/i915_request.c
1468
container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
1503
static void __live_parallel_engineN(struct kthread_work *work)
drivers/gpu/drm/i915/selftests/i915_request.c
1506
container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
1555
static void __live_parallel_spin(struct kthread_work *work)
drivers/gpu/drm/i915/selftests/i915_request.c
1558
container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
1660
kthread_init_work(&threads[idx].work, *fn);
drivers/gpu/drm/i915/selftests/i915_request.c
1661
kthread_queue_work(worker, &threads[idx].work);
drivers/gpu/drm/i915/selftests/i915_request.c
1672
kthread_flush_work(&threads[idx].work);
drivers/gpu/drm/i915/selftests/i915_request.c
1817
kthread_init_work(&threads[i].work,
drivers/gpu/drm/i915/selftests/i915_request.c
1819
kthread_queue_work(worker, &threads[i].work);
drivers/gpu/drm/i915/selftests/i915_request.c
1840
kthread_flush_work(&threads[i].work);
drivers/gpu/drm/i915/selftests/i915_request.c
2958
struct kthread_work work;
drivers/gpu/drm/i915/selftests/i915_request.c
2963
static void p_sync0(struct kthread_work *work)
drivers/gpu/drm/i915/selftests/i915_request.c
2965
struct p_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
3037
static void p_sync1(struct kthread_work *work)
drivers/gpu/drm/i915/selftests/i915_request.c
3039
struct p_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
305
struct kthread_work work;
drivers/gpu/drm/i915/selftests/i915_request.c
311
static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
drivers/gpu/drm/i915/selftests/i915_request.c
3114
static void p_many(struct kthread_work *work)
drivers/gpu/drm/i915/selftests/i915_request.c
3116
struct p_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
313
struct smoke_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
3233
kthread_init_work(&engines[idx].work, *fn);
drivers/gpu/drm/i915/selftests/i915_request.c
3234
kthread_queue_work(worker, &engines[idx].work);
drivers/gpu/drm/i915/selftests/i915_request.c
3245
kthread_flush_work(&engines[idx].work);
drivers/gpu/drm/i915/selftests/i915_request.c
508
kthread_init_work(&threads[n].work,
drivers/gpu/drm/i915/selftests/i915_request.c
510
kthread_queue_work(worker, &threads[n].work);
drivers/gpu/drm/i915/selftests/i915_request.c
519
kthread_flush_work(&threads[n].work);
drivers/gpu/drm/i915/selftests/i915_sw_fence.c
506
struct work_struct work;
drivers/gpu/drm/i915/selftests/i915_sw_fence.c
512
static void task_ipc(struct work_struct *work)
drivers/gpu/drm/i915/selftests/i915_sw_fence.c
514
struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
drivers/gpu/drm/i915/selftests/i915_sw_fence.c
549
INIT_WORK_ONSTACK(&ipc.work, task_ipc);
drivers/gpu/drm/i915/selftests/i915_sw_fence.c
550
queue_work(wq, &ipc.work);
drivers/gpu/drm/i915/selftests/i915_sw_fence.c
568
flush_work(&ipc.work);
drivers/gpu/drm/i915/selftests/i915_sw_fence.c
569
destroy_work_on_stack(&ipc.work);
drivers/gpu/drm/imagination/pvr_device.h
248
struct delayed_work work;
drivers/gpu/drm/imagination/pvr_power.c
140
queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
drivers/gpu/drm/imagination/pvr_power.c
207
pvr_watchdog_worker(struct work_struct *work)
drivers/gpu/drm/imagination/pvr_power.c
209
struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
drivers/gpu/drm/imagination/pvr_power.c
210
watchdog.work.work);
drivers/gpu/drm/imagination/pvr_power.c
236
queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
drivers/gpu/drm/imagination/pvr_power.c
252
INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker);
drivers/gpu/drm/imagination/pvr_power.c
618
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
drivers/gpu/drm/imagination/pvr_power.c
98
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
drivers/gpu/drm/lima/lima_sched.c
498
static void lima_sched_recover_work(struct work_struct *work)
drivers/gpu/drm/lima/lima_sched.c
501
container_of(work, struct lima_sched_pipe, recover_work);
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
1888
kthread_init_work(&dpu_crtc->frame_events[i].work,
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
660
static void dpu_crtc_frame_event_work(struct kthread_work *work)
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
662
struct dpu_crtc_frame_event *fevent = container_of(work,
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
663
struct dpu_crtc_frame_event, work);
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
751
kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work);
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
111
struct kthread_work work;
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1613
static void dpu_encoder_off_work(struct work_struct *work)
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1615
struct dpu_encoder_virt *dpu_enc = container_of(work,
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1616
struct dpu_encoder_virt, delayed_off_work.work);
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
115
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
118
container_of(work, struct mdp4_crtc, unref_cursor_work);
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
165
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
168
container_of(work, struct mdp5_crtc, unref_cursor_work);
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
53
static void _msm_disp_snapshot_work(struct kthread_work *work)
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
55
struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
drivers/gpu/drm/msm/dsi/dsi_host.c
1549
static void dsi_err_worker(struct work_struct *work)
drivers/gpu/drm/msm/dsi/dsi_host.c
1552
container_of(work, struct msm_dsi_host, err_work);
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
479
msm_hdmi_hotplug_work(struct work_struct *work)
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
482
container_of(work, struct hdmi_bridge, hpd_work);
drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
1234
static void msm_hdmi_hdcp_auth_work(struct work_struct *work)
drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
1236
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
396
static void msm_hdmi_hdcp_reauth_work(struct work_struct *work)
drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
398
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
drivers/gpu/drm/msm/msm_atomic.c
104
static void msm_atomic_pending_work(struct kthread_work *work)
drivers/gpu/drm/msm/msm_atomic.c
106
struct msm_pending_timer *timer = container_of(work,
drivers/gpu/drm/msm/msm_atomic.c
107
struct msm_pending_timer, work.work);
drivers/gpu/drm/msm/msm_atomic.c
126
msm_hrtimer_work_init(&timer->work, timer->worker,
drivers/gpu/drm/msm/msm_atomic.c
272
msm_hrtimer_queue_work(&timer->work, wakeup_time,
drivers/gpu/drm/msm/msm_drv.h
488
struct kthread_work work;
drivers/gpu/drm/msm/msm_drv.h
492
void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
drivers/gpu/drm/msm/msm_drv.h
495
void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
drivers/gpu/drm/msm/msm_fence.c
29
static void deadline_work(struct kthread_work *work)
drivers/gpu/drm/msm/msm_fence.c
31
struct msm_fence_context *fctx = container_of(work,
drivers/gpu/drm/msm/msm_gpu.c
463
static void recover_worker(struct kthread_work *work)
drivers/gpu/drm/msm/msm_gpu.c
465
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
drivers/gpu/drm/msm/msm_gpu.c
859
static void retire_worker(struct kthread_work *work)
drivers/gpu/drm/msm/msm_gpu.c
861
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
drivers/gpu/drm/msm/msm_gpu_devfreq.c
130
static void msm_devfreq_boost_work(struct kthread_work *work);
drivers/gpu/drm/msm/msm_gpu_devfreq.c
131
static void msm_devfreq_idle_work(struct kthread_work *work);
drivers/gpu/drm/msm/msm_gpu_devfreq.c
208
kthread_cancel_work_sync(&df->idle_work.work);
drivers/gpu/drm/msm/msm_gpu_devfreq.c
214
kthread_cancel_work_sync(&df->boost_work.work);
drivers/gpu/drm/msm/msm_gpu_devfreq.c
262
static void msm_devfreq_boost_work(struct kthread_work *work)
drivers/gpu/drm/msm/msm_gpu_devfreq.c
264
struct msm_gpu_devfreq *df = container_of(work,
drivers/gpu/drm/msm/msm_gpu_devfreq.c
265
struct msm_gpu_devfreq, boost_work.work);
drivers/gpu/drm/msm/msm_gpu_devfreq.c
341
static void msm_devfreq_idle_work(struct kthread_work *work)
drivers/gpu/drm/msm/msm_gpu_devfreq.c
343
struct msm_gpu_devfreq *df = container_of(work,
drivers/gpu/drm/msm/msm_gpu_devfreq.c
344
struct msm_gpu_devfreq, idle_work.work);
drivers/gpu/drm/msm/msm_io_utils.c
117
struct msm_hrtimer_work *work = container_of(t,
drivers/gpu/drm/msm/msm_io_utils.c
120
kthread_queue_work(work->worker, &work->work);
drivers/gpu/drm/msm/msm_io_utils.c
125
void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
drivers/gpu/drm/msm/msm_io_utils.c
129
hrtimer_start(&work->timer, wakeup_time, mode);
drivers/gpu/drm/msm/msm_io_utils.c
132
void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
drivers/gpu/drm/msm/msm_io_utils.c
138
hrtimer_setup(&work->timer, msm_hrtimer_worktimer, clock_id, mode);
drivers/gpu/drm/msm/msm_io_utils.c
139
work->worker = worker;
drivers/gpu/drm/msm/msm_io_utils.c
140
kthread_init_work(&work->work, fn);
drivers/gpu/drm/msm/msm_kms.c
105
struct work_struct work;
drivers/gpu/drm/msm/msm_kms.c
111
static void vblank_ctrl_worker(struct work_struct *work)
drivers/gpu/drm/msm/msm_kms.c
113
struct msm_vblank_work *vbl_work = container_of(work,
drivers/gpu/drm/msm/msm_kms.c
114
struct msm_vblank_work, work);
drivers/gpu/drm/msm/msm_kms.c
135
INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
drivers/gpu/drm/msm/msm_kms.c
141
queue_work(priv->kms->wq, &vbl_work->work);
drivers/gpu/drm/msm/msm_kms.h
126
struct msm_hrtimer_work work;
drivers/gpu/drm/nouveau/dispnv50/crc.c
101
drm_vblank_work_schedule(work, start_vbl + 1, true);
drivers/gpu/drm/nouveau/dispnv50/crc.c
85
struct drm_vblank_work *work = to_drm_vblank_work(base);
drivers/gpu/drm/nouveau/dispnv50/crc.c
86
struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work);
drivers/gpu/drm/nouveau/dispnv50/disp.c
2419
nv50_disp_atomic_commit_work(struct work_struct *work)
drivers/gpu/drm/nouveau/dispnv50/disp.c
2422
container_of(work, typeof(*state), commit_work);
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
42
struct work_struct work;
drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
20
struct work_struct work;
drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
92
struct work_struct work;
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
193
struct work_struct work;
drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
30
struct work_struct work;
drivers/gpu/drm/nouveau/nouveau_display.c
422
nouveau_display_hpd_work(struct work_struct *work)
drivers/gpu/drm/nouveau/nouveau_display.c
424
struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
drivers/gpu/drm/nouveau/nouveau_dp.c
483
nouveau_dp_irq(struct work_struct *work)
drivers/gpu/drm/nouveau/nouveau_dp.c
486
container_of(work, typeof(*nv_connector), irq_work);
drivers/gpu/drm/nouveau/nouveau_drm.c
174
struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
drivers/gpu/drm/nouveau/nouveau_drm.c
175
struct nouveau_cli_work *work, *wtmp;
drivers/gpu/drm/nouveau/nouveau_drm.c
177
list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
drivers/gpu/drm/nouveau/nouveau_drm.c
178
if (!work->fence || nouveau_cli_work_ready(work->fence)) {
drivers/gpu/drm/nouveau/nouveau_drm.c
179
list_del(&work->head);
drivers/gpu/drm/nouveau/nouveau_drm.c
180
work->func(work);
drivers/gpu/drm/nouveau/nouveau_drm.c
189
struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
drivers/gpu/drm/nouveau/nouveau_drm.c
190
schedule_work(&work->cli->work);
drivers/gpu/drm/nouveau/nouveau_drm.c
195
struct nouveau_cli_work *work)
drivers/gpu/drm/nouveau/nouveau_drm.c
197
work->fence = dma_fence_get(fence);
drivers/gpu/drm/nouveau/nouveau_drm.c
198
work->cli = cli;
drivers/gpu/drm/nouveau/nouveau_drm.c
200
list_add_tail(&work->head, &cli->worker);
drivers/gpu/drm/nouveau/nouveau_drm.c
201
if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
drivers/gpu/drm/nouveau/nouveau_drm.c
202
nouveau_cli_work_fence(fence, &work->cb);
drivers/gpu/drm/nouveau/nouveau_drm.c
216
flush_work(&cli->work);
drivers/gpu/drm/nouveau/nouveau_drm.c
259
INIT_WORK(&cli->work, nouveau_cli_work);
drivers/gpu/drm/nouveau/nouveau_drv.h
114
struct work_struct work;
drivers/gpu/drm/nouveau/nouveau_fence.c
142
nouveau_fence_uevent_work(struct work_struct *work)
drivers/gpu/drm/nouveau/nouveau_fence.c
144
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
drivers/gpu/drm/nouveau/nouveau_gem.c
138
struct nouveau_cli_work work;
drivers/gpu/drm/nouveau/nouveau_gem.c
152
struct nouveau_gem_object_unmap *work =
drivers/gpu/drm/nouveau/nouveau_gem.c
153
container_of(w, typeof(*work), work);
drivers/gpu/drm/nouveau/nouveau_gem.c
154
nouveau_gem_object_delete(work->vma);
drivers/gpu/drm/nouveau/nouveau_gem.c
155
kfree(work);
drivers/gpu/drm/nouveau/nouveau_gem.c
162
struct nouveau_gem_object_unmap *work;
drivers/gpu/drm/nouveau/nouveau_gem.c
171
if (!(work = kmalloc_obj(*work))) {
drivers/gpu/drm/nouveau/nouveau_gem.c
177
work->work.func = nouveau_gem_object_delete_work;
drivers/gpu/drm/nouveau/nouveau_gem.c
178
work->vma = vma;
drivers/gpu/drm/nouveau/nouveau_gem.c
179
nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
drivers/gpu/drm/nouveau/nouveau_svm.c
1006
INIT_WORK(&buffer->work, nouveau_svm_fault);
drivers/gpu/drm/nouveau/nouveau_svm.c
55
struct work_struct work;
drivers/gpu/drm/nouveau/nouveau_svm.c
717
nouveau_svm_fault(struct work_struct *work)
drivers/gpu/drm/nouveau/nouveau_svm.c
719
struct nouveau_svm_fault_buffer *buffer = container_of(work, typeof(*buffer), work);
drivers/gpu/drm/nouveau/nouveau_svm.c
888
schedule_work(&buffer->work);
drivers/gpu/drm/nouveau/nouveau_svm.c
947
flush_work(&buffer->work);
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
256
INIT_WORK(&disp->super.work, func->super);
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
1011
gf119_disp_super(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
1013
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
1130
queue_work(disp->super.wq, &disp->super.work);
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
835
gv100_disp_super(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
837
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
944
queue_work(disp->super.wq, &disp->super.work);
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1365
nv50_disp_super(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1367
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1501
queue_work(disp->super.wq, &disp->super.work);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
141
schedule_work(&runl->work);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
172
nvkm_runl_work(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
174
struct nvkm_runl *runl = container_of(work, typeof(*runl), work);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
314
flush_work(&runl->work);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
412
INIT_WORK(&runl->work, nvkm_runl_work);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
94
struct work_struct work;
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
300
nvkm_pstate_work(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
302
struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
339
schedule_work(&clk->work);
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
583
flush_work(&clk->work);
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
674
INIT_WORK(&clk->work, nvkm_pstate_work);
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
31
gv100_fault_buffer_process(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
33
struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
303
INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
49
r535_gsp_msgq_work(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
51
struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
76
schedule_work(&gsp->msgq.work);
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
135
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
55
nvkm_pmu_recv(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
57
struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
205
flush_work(&pmu->recv.work);
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
158
schedule_work(&pmu->recv.work);
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
181
flush_work(&pmu->recv.work);
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
120
struct work_struct *work;
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
122
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
123
if (work) {
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
124
INIT_WORK(work, nv_poweroff_work);
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
125
schedule_work(work);
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
74
nv_poweroff_work(struct work_struct *work)
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
77
kfree(work);
drivers/gpu/drm/omapdrm/dss/dsi.c
3191
static void dsi_framedone_timeout_work_callback(struct work_struct *work)
drivers/gpu/drm/omapdrm/dss/dsi.c
3193
struct dsi_data *dsi = container_of(work, struct dsi_data,
drivers/gpu/drm/omapdrm/dss/dsi.c
3194
framedone_timeout_work.work);
drivers/gpu/drm/omapdrm/dss/dsi.c
4292
static void omap_dsi_te_timeout_work_callback(struct work_struct *work)
drivers/gpu/drm/omapdrm/dss/dsi.c
4295
container_of(work, struct dsi_data, te_timeout_work.work);
drivers/gpu/drm/omapdrm/dss/dsi.c
4874
static void omap_dsi_disable_work_callback(struct work_struct *work)
drivers/gpu/drm/omapdrm/dss/dsi.c
4876
struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work);
drivers/gpu/drm/omapdrm/omap_crtc.c
352
container_of(data, struct omap_crtc, update_work.work);
drivers/gpu/drm/omapdrm/omap_fbdev.c
289
INIT_WORK(&fbdev->work, pan_worker);
drivers/gpu/drm/omapdrm/omap_fbdev.c
37
struct work_struct work;
drivers/gpu/drm/omapdrm/omap_fbdev.c
42
static void pan_worker(struct work_struct *work)
drivers/gpu/drm/omapdrm/omap_fbdev.c
44
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
drivers/gpu/drm/omapdrm/omap_fbdev.c
75
pan_worker(&fbdev->work);
drivers/gpu/drm/omapdrm/omap_fbdev.c
77
queue_work(priv->wq, &fbdev->work);
drivers/gpu/drm/panfrost/panfrost_device.h
167
struct work_struct work;
drivers/gpu/drm/panfrost/panfrost_device.h
341
queue_work(pfdev->reset.wq, &pfdev->reset.work);
drivers/gpu/drm/panfrost/panfrost_job.c
806
static void panfrost_reset_work(struct work_struct *work)
drivers/gpu/drm/panfrost/panfrost_job.c
810
pfdev = container_of(work, struct panfrost_device, reset.work);
drivers/gpu/drm/panfrost/panfrost_job.c
875
INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
drivers/gpu/drm/panfrost/panfrost_job.c
932
cancel_work_sync(&pfdev->reset.work);
drivers/gpu/drm/panthor/panthor_device.c
145
disable_work_sync(&ptdev->reset.work);
drivers/gpu/drm/panthor/panthor_device.c
149
static void panthor_device_reset_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_device.c
151
struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
drivers/gpu/drm/panthor/panthor_device.c
233
INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
drivers/gpu/drm/panthor/panthor_device.c
603
cancel_work_sync(&ptdev->reset.work);
drivers/gpu/drm/panthor/panthor_device.h
178
struct work_struct work;
drivers/gpu/drm/panthor/panthor_device.h
290
queue_work(ptdev->reset.wq, &ptdev->reset.work);
drivers/gpu/drm/panthor/panthor_fw.c
1404
static void panthor_fw_ping_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_fw.c
1406
struct panthor_fw *fw = container_of(work, struct panthor_fw, watchdog.ping_work.work);
drivers/gpu/drm/panthor/panthor_mmu.c
2580
static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_mmu.c
2583
container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
drivers/gpu/drm/panthor/panthor_sched.c
1102
&queue->timeout.work,
drivers/gpu/drm/panthor/panthor_sched.c
1136
qtimeout = queue->timeout.work.timer.expires;
drivers/gpu/drm/panthor/panthor_sched.c
1139
timer_was_active = cancel_delayed_work(&queue->timeout.work);
drivers/gpu/drm/panthor/panthor_sched.c
1166
&queue->timeout.work,
drivers/gpu/drm/panthor/panthor_sched.c
1661
static void group_tiler_oom_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
1664
container_of(work, struct panthor_group, tiler_oom_work);
drivers/gpu/drm/panthor/panthor_sched.c
1871
static void process_fw_events_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
1873
struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
drivers/gpu/drm/panthor/panthor_sched.c
2206
static void group_term_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
2209
container_of(work, struct panthor_group, term_work);
drivers/gpu/drm/panthor/panthor_sched.c
2446
static void tick_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
2448
struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
drivers/gpu/drm/panthor/panthor_sched.c
2449
tick_work.work);
drivers/gpu/drm/panthor/panthor_sched.c
2606
static void sync_upd_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
2608
struct panthor_scheduler *sched = container_of(work,
drivers/gpu/drm/panthor/panthor_sched.c
2728
disable_delayed_work_sync(&queue->timeout.work);
drivers/gpu/drm/panthor/panthor_sched.c
2740
enable_delayed_work(&queue->timeout.work);
drivers/gpu/drm/panthor/panthor_sched.c
2801
flush_work(&ptdev->scheduler->tick_work.work);
drivers/gpu/drm/panthor/panthor_sched.c
3094
static void group_sync_upd_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
3097
container_of(work, struct panthor_group, sync_upd_work);
drivers/gpu/drm/panthor/panthor_sched.c
3471
static void queue_timeout_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
3473
struct panthor_queue *queue = container_of(work, struct panthor_queue,
drivers/gpu/drm/panthor/panthor_sched.c
3474
timeout.work.work);
drivers/gpu/drm/panthor/panthor_sched.c
3521
INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work);
drivers/gpu/drm/panthor/panthor_sched.c
362
struct delayed_work work;
drivers/gpu/drm/panthor/panthor_sched.c
919
disable_delayed_work_sync(&queue->timeout.work);
drivers/gpu/drm/panthor/panthor_sched.c
941
static void group_release_work(struct work_struct *work)
drivers/gpu/drm/panthor/panthor_sched.c
943
struct panthor_group *group = container_of(work,
drivers/gpu/drm/qxl/qxl_irq.c
75
static void qxl_client_monitors_config_work_func(struct work_struct *work)
drivers/gpu/drm/qxl/qxl_irq.c
77
struct qxl_device *qdev = container_of(work, struct qxl_device,
drivers/gpu/drm/qxl/qxl_kms.c
101
static void qxl_gc_work(struct work_struct *work)
drivers/gpu/drm/qxl/qxl_kms.c
103
struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
drivers/gpu/drm/radeon/cik.c
8094
schedule_work(&rdev->pm.dpm.thermal.work);
drivers/gpu/drm/radeon/evergreen.c
4928
schedule_work(&rdev->pm.dpm.thermal.work);
drivers/gpu/drm/radeon/r600.c
4329
schedule_work(&rdev->pm.dpm.thermal.work);
drivers/gpu/drm/radeon/r600_hdmi.c
115
void r600_audio_update_hdmi(struct work_struct *work)
drivers/gpu/drm/radeon/r600_hdmi.c
117
struct radeon_device *rdev = container_of(work, struct radeon_device,
drivers/gpu/drm/radeon/radeon.h
1347
struct work_struct work;
drivers/gpu/drm/radeon/radeon.h
2872
void r600_audio_update_hdmi(struct work_struct *work);
drivers/gpu/drm/radeon/radeon_display.c
267
struct radeon_flip_work *work =
drivers/gpu/drm/radeon/radeon_display.c
272
r = radeon_bo_reserve(work->old_rbo, false);
drivers/gpu/drm/radeon/radeon_display.c
274
radeon_bo_unpin(work->old_rbo);
drivers/gpu/drm/radeon/radeon_display.c
275
radeon_bo_unreserve(work->old_rbo);
drivers/gpu/drm/radeon/radeon_display.c
277
drm_err(&work->rdev->ddev, "failed to reserve buffer after flip\n");
drivers/gpu/drm/radeon/radeon_display.c
279
drm_gem_object_put(&work->old_rbo->tbo.base);
drivers/gpu/drm/radeon/radeon_display.c
280
kfree(work);
drivers/gpu/drm/radeon/radeon_display.c
367
struct radeon_flip_work *work;
drivers/gpu/drm/radeon/radeon_display.c
375
work = radeon_crtc->flip_work;
drivers/gpu/drm/radeon/radeon_display.c
390
if (work->event)
drivers/gpu/drm/radeon/radeon_display.c
391
drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
drivers/gpu/drm/radeon/radeon_display.c
396
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
drivers/gpu/drm/radeon/radeon_display.c
397
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
drivers/gpu/drm/radeon/radeon_display.c
409
struct radeon_flip_work *work =
drivers/gpu/drm/radeon/radeon_display.c
411
struct radeon_device *rdev = work->rdev;
drivers/gpu/drm/radeon/radeon_display.c
413
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
drivers/gpu/drm/radeon/radeon_display.c
421
if (work->fence) {
drivers/gpu/drm/radeon/radeon_display.c
424
fence = to_radeon_fence(work->fence);
drivers/gpu/drm/radeon/radeon_display.c
435
r = dma_fence_wait(work->fence, false);
drivers/gpu/drm/radeon/radeon_display.c
445
dma_fence_put(work->fence);
drivers/gpu/drm/radeon/radeon_display.c
446
work->fence = NULL;
drivers/gpu/drm/radeon/radeon_display.c
455
(radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0,
drivers/gpu/drm/radeon/radeon_display.c
461
((int) (work->target_vblank -
drivers/gpu/drm/radeon/radeon_display.c
472
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
drivers/gpu/drm/radeon/radeon_display.c
490
struct radeon_flip_work *work;
drivers/gpu/drm/radeon/radeon_display.c
497
work = kzalloc_obj(*work);
drivers/gpu/drm/radeon/radeon_display.c
498
if (work == NULL)
drivers/gpu/drm/radeon/radeon_display.c
501
INIT_WORK(&work->flip_work, radeon_flip_work_func);
drivers/gpu/drm/radeon/radeon_display.c
502
INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
drivers/gpu/drm/radeon/radeon_display.c
504
work->rdev = rdev;
drivers/gpu/drm/radeon/radeon_display.c
505
work->crtc_id = radeon_crtc->crtc_id;
drivers/gpu/drm/radeon/radeon_display.c
506
work->event = event;
drivers/gpu/drm/radeon/radeon_display.c
507
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
drivers/gpu/drm/radeon/radeon_display.c
514
work->old_rbo = gem_to_radeon_bo(obj);
drivers/gpu/drm/radeon/radeon_display.c
521
work->old_rbo, new_rbo);
drivers/gpu/drm/radeon/radeon_display.c
538
&work->fence);
drivers/gpu/drm/radeon/radeon_display.c
582
work->base = base;
drivers/gpu/drm/radeon/radeon_display.c
583
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
drivers/gpu/drm/radeon/radeon_display.c
596
radeon_crtc->flip_work = work;
drivers/gpu/drm/radeon/radeon_display.c
603
queue_work(radeon_crtc->flip_queue, &work->flip_work);
drivers/gpu/drm/radeon/radeon_display.c
615
drm_gem_object_put(&work->old_rbo->tbo.base);
drivers/gpu/drm/radeon/radeon_display.c
616
dma_fence_put(work->fence);
drivers/gpu/drm/radeon/radeon_display.c
617
kfree(work);
drivers/gpu/drm/radeon/radeon_fence.c
267
static void radeon_fence_check_lockup(struct work_struct *work)
drivers/gpu/drm/radeon/radeon_fence.c
273
fence_drv = container_of(work, struct radeon_fence_driver,
drivers/gpu/drm/radeon/radeon_fence.c
274
lockup_work.work);
drivers/gpu/drm/radeon/radeon_irq_kms.c
100
static void radeon_dp_work_func(struct work_struct *work)
drivers/gpu/drm/radeon/radeon_irq_kms.c
102
struct radeon_device *rdev = container_of(work, struct radeon_device,
drivers/gpu/drm/radeon/radeon_irq_kms.c
79
static void radeon_hotplug_work_func(struct work_struct *work)
drivers/gpu/drm/radeon/radeon_irq_kms.c
81
struct radeon_device *rdev = container_of(work, struct radeon_device,
drivers/gpu/drm/radeon/radeon_irq_kms.c
82
hotplug_work.work);
drivers/gpu/drm/radeon/radeon_pm.c
1440
INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
drivers/gpu/drm/radeon/radeon_pm.c
1853
static void radeon_dynpm_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/radeon/radeon_pm.c
1857
rdev = container_of(work, struct radeon_device,
drivers/gpu/drm/radeon/radeon_pm.c
1858
pm.dynpm_idle_work.work);
drivers/gpu/drm/radeon/radeon_pm.c
50
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/radeon/radeon_pm.c
875
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
drivers/gpu/drm/radeon/radeon_pm.c
878
container_of(work, struct radeon_device,
drivers/gpu/drm/radeon/radeon_pm.c
879
pm.dpm.thermal.work);
drivers/gpu/drm/radeon/radeon_uvd.c
64
static void radeon_uvd_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/radeon/radeon_uvd.c
859
static void radeon_uvd_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/radeon/radeon_uvd.c
862
container_of(work, struct radeon_device, uvd.idle_work.work);
drivers/gpu/drm/radeon/radeon_vce.c
262
static void radeon_vce_idle_work_handler(struct work_struct *work)
drivers/gpu/drm/radeon/radeon_vce.c
265
container_of(work, struct radeon_device, vce.idle_work.work);
drivers/gpu/drm/radeon/radeon_vce.c
47
static void radeon_vce_idle_work_handler(struct work_struct *work);
drivers/gpu/drm/radeon/si.c
6424
schedule_work(&rdev->pm.dpm.thermal.work);
drivers/gpu/drm/rockchip/cdn-dp-core.c
879
static void cdn_dp_pd_event_work(struct work_struct *work)
drivers/gpu/drm/rockchip/cdn-dp-core.c
881
struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
249
static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work)
drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
251
struct rockchip_hdmi_qp *hdmi = container_of(work,
drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
253
hpd_work.work);
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1750
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1752
struct vop *vop = container_of(work, struct vop, fb_unref_work);
drivers/gpu/drm/scheduler/sched_entity.c
181
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
drivers/gpu/drm/scheduler/sched_entity.c
227
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
drivers/gpu/drm/scheduler/sched_entity.c
228
schedule_work(&job->work);
drivers/gpu/drm/scheduler/sched_main.c
537
static void drm_sched_job_timedout(struct work_struct *work)
drivers/gpu/drm/scheduler/sched_main.c
543
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
drivers/gpu/drm/tegra/dpaux.c
272
static void tegra_dpaux_hotplug(struct work_struct *work)
drivers/gpu/drm/tegra/dpaux.c
274
struct tegra_dpaux *dpaux = work_to_dpaux(work);
drivers/gpu/drm/tegra/dpaux.c
290
schedule_work(&dpaux->work);
drivers/gpu/drm/tegra/dpaux.c
458
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
drivers/gpu/drm/tegra/dpaux.c
57
struct work_struct work;
drivers/gpu/drm/tegra/dpaux.c
585
cancel_work_sync(&dpaux->work);
drivers/gpu/drm/tegra/dpaux.c
71
static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
drivers/gpu/drm/tegra/dpaux.c
73
return container_of(work, struct tegra_dpaux, work);
drivers/gpu/drm/tegra/sor.c
2180
static void tegra_sor_hdmi_scdc_work(struct work_struct *work)
drivers/gpu/drm/tegra/sor.c
2182
struct tegra_sor *sor = container_of(work, struct tegra_sor, scdc.work);
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
570
static void tilcdc_crtc_recover_work(struct work_struct *work)
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
573
container_of(work, struct tilcdc_crtc, recover_work);
drivers/gpu/drm/tiny/gm12u320.c
321
static void gm12u320_fb_update_work(struct work_struct *work)
drivers/gpu/drm/tiny/gm12u320.c
324
container_of(to_delayed_work(work), struct gm12u320_device,
drivers/gpu/drm/tiny/gm12u320.c
325
fb_update.work);
drivers/gpu/drm/tiny/gm12u320.c
391
queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
drivers/gpu/drm/tiny/gm12u320.c
430
mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0);
drivers/gpu/drm/tiny/gm12u320.c
440
cancel_delayed_work_sync(&gm12u320->fb_update.work);
drivers/gpu/drm/tiny/gm12u320.c
655
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
drivers/gpu/drm/tiny/gm12u320.c
95
struct delayed_work work;
drivers/gpu/drm/ttm/ttm_bo.c
236
static void ttm_bo_delayed_delete(struct work_struct *work)
drivers/gpu/drm/ttm/ttm_bo.c
240
bo = container_of(work, typeof(*bo), delayed_delete);
drivers/gpu/drm/v3d/v3d_irq.c
41
v3d_overflow_mem_work(struct work_struct *work)
drivers/gpu/drm/v3d/v3d_irq.c
44
container_of(work, struct v3d_dev, overflow_mem_work);
drivers/gpu/drm/vboxvideo/vbox_irq.c
164
static void vbox_hotplug_worker(struct work_struct *work)
drivers/gpu/drm/vboxvideo/vbox_irq.c
166
struct vbox_private *vbox = container_of(work, struct vbox_private,
drivers/gpu/drm/vc4/vc4_bo.c
609
static void vc4_bo_cache_time_work(struct work_struct *work)
drivers/gpu/drm/vc4/vc4_bo.c
612
container_of(work, struct vc4_dev, bo_cache.time_work);
drivers/gpu/drm/vc4/vc4_gem.c
317
vc4_reset_work(struct work_struct *work)
drivers/gpu/drm/vc4/vc4_gem.c
320
container_of(work, struct vc4_dev, hangcheck.reset_work);
drivers/gpu/drm/vc4/vc4_gem.c
923
vc4_job_done_work(struct work_struct *work)
drivers/gpu/drm/vc4/vc4_gem.c
926
container_of(work, struct vc4_dev, job_done_work);
drivers/gpu/drm/vc4/vc4_hdmi.c
862
static void vc4_hdmi_scrambling_wq(struct work_struct *work)
drivers/gpu/drm/vc4/vc4_hdmi.c
864
struct vc4_hdmi *vc4_hdmi = container_of(to_delayed_work(work),
drivers/gpu/drm/vc4/vc4_irq.c
62
vc4_overflow_mem_work(struct work_struct *work)
drivers/gpu/drm/vc4/vc4_irq.c
65
container_of(work, struct vc4_dev, overflow_mem_work);
drivers/gpu/drm/virtio/virtgpu_drv.h
326
void virtio_gpu_array_put_free_work(struct work_struct *work);
drivers/gpu/drm/virtio/virtgpu_drv.h
419
void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
drivers/gpu/drm/virtio/virtgpu_drv.h
420
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
drivers/gpu/drm/virtio/virtgpu_gem.c
282
void virtio_gpu_array_put_free_work(struct work_struct *work)
drivers/gpu/drm/virtio/virtgpu_gem.c
285
container_of(work, struct virtio_gpu_device, obj_free_work);
drivers/gpu/drm/virtio/virtgpu_kms.c
36
static void virtio_gpu_config_changed_work_func(struct work_struct *work)
drivers/gpu/drm/virtio/virtgpu_kms.c
39
container_of(work, struct virtio_gpu_device,
drivers/gpu/drm/virtio/virtgpu_kms.c
61
void (*work_func)(struct work_struct *work))
drivers/gpu/drm/virtio/virtgpu_vq.c
225
void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
drivers/gpu/drm/virtio/virtgpu_vq.c
228
container_of(work, struct virtio_gpu_device,
drivers/gpu/drm/virtio/virtgpu_vq.c
277
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
drivers/gpu/drm/virtio/virtgpu_vq.c
280
container_of(work, struct virtio_gpu_device,
drivers/gpu/drm/vkms/vkms_composer.c
594
void vkms_composer_worker(struct work_struct *work)
drivers/gpu/drm/vkms/vkms_composer.c
596
struct vkms_crtc_state *crtc_state = container_of(work,
drivers/gpu/drm/vkms/vkms_drv.h
319
void vkms_composer_worker(struct work_struct *work);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
115
struct work_struct work;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1340
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1403
(void) cancel_work_sync(&man->work);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
406
schedule_work(&man->work);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
516
static void vmw_cmdbuf_work_func(struct work_struct *work)
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
519
container_of(work, struct vmw_cmdbuf_man, work);
drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
107
crc_generate_worker(struct work_struct *work)
drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
110
container_of(work, struct vmw_display_unit, vkms.crc_generator_work);
drivers/gpu/drm/xe/xe_bo.c
3574
static void xe_bo_dev_work_func(struct work_struct *work)
drivers/gpu/drm/xe/xe_bo.c
3576
struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
drivers/gpu/drm/xe/xe_devcoredump.c
206
flush_work(&ss->work);
drivers/gpu/drm/xe/xe_devcoredump.c
257
cancel_work_sync(&coredump->snapshot.work);
drivers/gpu/drm/xe/xe_devcoredump.c
273
static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
drivers/gpu/drm/xe/xe_devcoredump.c
275
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
drivers/gpu/drm/xe/xe_devcoredump.c
343
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
drivers/gpu/drm/xe/xe_devcoredump.c
359
queue_work(system_unbound_wq, &ss->work);
drivers/gpu/drm/xe/xe_devcoredump_types.h
39
struct work_struct work;
drivers/gpu/drm/xe/xe_eu_stall.c
714
static void eu_stall_data_buf_poll_work_fn(struct work_struct *work)
drivers/gpu/drm/xe/xe_eu_stall.c
717
container_of(work, typeof(*stream), buf_poll_work.work);
drivers/gpu/drm/xe/xe_ggtt.c
436
static void ggtt_node_remove_work_func(struct work_struct *work)
drivers/gpu/drm/xe/xe_ggtt.c
438
struct xe_ggtt_node *node = container_of(work, typeof(*node),
drivers/gpu/drm/xe/xe_gsc.c
350
static void gsc_work(struct work_struct *work)
drivers/gpu/drm/xe/xe_gsc.c
352
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
drivers/gpu/drm/xe/xe_gsc.c
396
queue_work(gsc->wq, &gsc->work);
drivers/gpu/drm/xe/xe_gsc.c
407
INIT_WORK(&gsc->work, gsc_work);
drivers/gpu/drm/xe/xe_gsc.c
546
queue_work(gsc->wq, &gsc->work);
drivers/gpu/drm/xe/xe_gsc.c
552
flush_work(&gsc->work);
drivers/gpu/drm/xe/xe_gsc_proxy.c
374
queue_work(gsc->wq, &gsc->work);
drivers/gpu/drm/xe/xe_gsc_types.h
42
struct work_struct work;
drivers/gpu/drm/xe/xe_guc_ct.c
579
struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
drivers/gpu/drm/xe/xe_hw_error.c
54
static void csc_hw_error_work(struct work_struct *work)
drivers/gpu/drm/xe/xe_hw_error.c
56
struct xe_tile *tile = container_of(work, typeof(*tile), csc_hw_error_work);
drivers/gpu/drm/xe/xe_hw_fence.c
106
irq_work_queue(&irq->work);
drivers/gpu/drm/xe/xe_hw_fence.c
50
static void hw_fence_irq_run_cb(struct irq_work *work)
drivers/gpu/drm/xe/xe_hw_fence.c
52
struct xe_hw_fence_irq *irq = container_of(work, typeof(*irq), work);
drivers/gpu/drm/xe/xe_hw_fence.c
77
init_irq_work(&irq->work, hw_fence_irq_run_cb);
drivers/gpu/drm/xe/xe_hw_fence_types.h
28
struct irq_work work;
drivers/gpu/drm/xe/xe_i2c.c
339
INIT_WORK(&i2c->work, xe_i2c_client_work);
drivers/gpu/drm/xe/xe_i2c.c
64
static void xe_i2c_client_work(struct work_struct *work)
drivers/gpu/drm/xe/xe_i2c.c
66
struct xe_i2c *i2c = container_of(work, struct xe_i2c, work);
drivers/gpu/drm/xe/xe_i2c.c
85
schedule_work(&i2c->work);
drivers/gpu/drm/xe/xe_i2c.h
39
struct work_struct work;
drivers/gpu/drm/xe/xe_late_bind_fw.c
207
flush_work(&lbfw->work);
drivers/gpu/drm/xe/xe_late_bind_fw.c
212
static void xe_late_bind_work(struct work_struct *work)
drivers/gpu/drm/xe/xe_late_bind_fw.c
214
struct xe_late_bind_fw *lbfw = container_of(work, struct xe_late_bind_fw, work);
drivers/gpu/drm/xe/xe_late_bind_fw.c
288
queue_work(late_bind->wq, &lbfw->work);
drivers/gpu/drm/xe/xe_late_bind_fw.c
360
INIT_WORK(&lb_fw->work, xe_late_bind_work);
drivers/gpu/drm/xe/xe_late_bind_fw_types.h
44
struct work_struct work;
drivers/gpu/drm/xe/xe_oa.c
118
struct delayed_work work;
drivers/gpu/drm/xe/xe_oa.c
956
struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work);
drivers/gpu/drm/xe/xe_oa.c
970
INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
drivers/gpu/drm/xe/xe_oa.c
971
queue_delayed_work(system_unbound_wq, &ofence->work,
drivers/gpu/drm/xe/xe_pxp.c
250
static void pxp_irq_work(struct work_struct *work)
drivers/gpu/drm/xe/xe_pxp.c
252
struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
drivers/gpu/drm/xe/xe_pxp.c
310
queue_work(pxp->irq.wq, &pxp->irq.work);
drivers/gpu/drm/xe/xe_pxp.c
403
INIT_WORK(&pxp->irq.work, pxp_irq_work);
drivers/gpu/drm/xe/xe_pxp_types.h
91
struct work_struct work;
drivers/gpu/drm/xe/xe_shrinker.c
257
static void xe_shrinker_pm(struct work_struct *work)
drivers/gpu/drm/xe/xe_shrinker.c
260
container_of(work, typeof(*shrinker), pm_worker);
drivers/gpu/drm/xe/xe_svm.c
144
queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work);
drivers/gpu/drm/xe/xe_svm.c
1688
static void xe_pagemap_destroy_work(struct work_struct *work)
drivers/gpu/drm/xe/xe_svm.c
1690
struct xe_pagemap *xpagemap = container_of(work, typeof(*xpagemap), destroy_work);
drivers/gpu/drm/xe/xe_svm.c
2027
flush_work(&vm->svm.garbage_collector.work);
drivers/gpu/drm/xe/xe_svm.c
425
svm.garbage_collector.work);
drivers/gpu/drm/xe/xe_svm.c
861
INIT_WORK(&vm->svm.garbage_collector.work,
drivers/gpu/drm/xe/xe_svm.c
906
disable_work_sync(&vm->svm.garbage_collector.work);
drivers/gpu/drm/xe/xe_tlb_inval.c
66
static void xe_tlb_inval_fence_timeout(struct work_struct *work)
drivers/gpu/drm/xe/xe_tlb_inval.c
68
struct xe_tlb_inval *tlb_inval = container_of(work, struct xe_tlb_inval,
drivers/gpu/drm/xe/xe_tlb_inval.c
69
fence_tdr.work);
drivers/gpu/drm/xe/xe_vm_types.h
202
struct work_struct work;
drivers/gpu/drm/xen/xen_drm_front_kms.c
168
static void pflip_to_worker(struct work_struct *work)
drivers/gpu/drm/xen/xen_drm_front_kms.c
170
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/gpu/drm/xlnx/zynqmp_dp.c
2319
static void zynqmp_dp_hpd_work_func(struct work_struct *work)
drivers/gpu/drm/xlnx/zynqmp_dp.c
2321
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work);
drivers/gpu/drm/xlnx/zynqmp_dp.c
2334
static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
drivers/gpu/drm/xlnx/zynqmp_dp.c
2336
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp,
drivers/gpu/host1x/cdma.c
491
static void cdma_update_work(struct work_struct *work)
drivers/gpu/host1x/cdma.c
493
struct host1x_cdma *cdma = container_of(work, struct host1x_cdma, update_work);
drivers/gpu/host1x/fence.c
100
struct delayed_work *dwork = (struct delayed_work *)work;
drivers/gpu/host1x/fence.c
98
static void do_fence_timeout(struct work_struct *work)
drivers/gpu/host1x/hw/cdma_hw.c
289
static void cdma_timeout_handler(struct work_struct *work)
drivers/gpu/host1x/hw/cdma_hw.c
296
cdma = container_of(to_delayed_work(work), struct host1x_cdma,
drivers/greybus/gb-beagleplay.c
307
static void hdlc_transmit(struct work_struct *work)
drivers/greybus/gb-beagleplay.c
309
struct gb_beagleplay *bg = container_of(work, struct gb_beagleplay, tx_work);
drivers/greybus/interface.c
250
static void gb_interface_mode_switch_work(struct work_struct *work)
drivers/greybus/interface.c
257
intf = container_of(work, struct gb_interface, mode_switch_work);
drivers/greybus/operation.c
1016
queue_work(gb_operation_completion_wq, &operation->work);
drivers/greybus/operation.c
1079
queue_work(gb_operation_completion_wq, &operation->work);
drivers/greybus/operation.c
1104
flush_work(&operation->work);
drivers/greybus/operation.c
272
static void gb_operation_work(struct work_struct *work)
drivers/greybus/operation.c
277
operation = container_of(work, struct gb_operation, work);
drivers/greybus/operation.c
306
queue_work(gb_operation_completion_wq, &operation->work);
drivers/greybus/operation.c
552
INIT_WORK(&operation->work, gb_operation_work);
drivers/greybus/operation.c
901
&operation->work);
drivers/greybus/operation.c
947
queue_work(connection->wq, &operation->work);
drivers/greybus/svc.c
1089
static void gb_svc_process_deferred_request(struct work_struct *work)
drivers/greybus/svc.c
1096
dr = container_of(work, struct gb_svc_deferred_request, work);
drivers/greybus/svc.c
1137
INIT_WORK(&dr->work, gb_svc_process_deferred_request);
drivers/greybus/svc.c
1139
queue_work(svc->wq, &dr->work);
drivers/greybus/svc.c
20
struct work_struct work;
drivers/greybus/svc_watchdog.c
104
schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
drivers/greybus/svc_watchdog.c
121
INIT_DELAYED_WORK(&watchdog->work, do_work);
drivers/greybus/svc_watchdog.c
16
struct delayed_work work;
drivers/greybus/svc_watchdog.c
179
schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
drivers/greybus/svc_watchdog.c
195
cancel_delayed_work_sync(&watchdog->work);
drivers/greybus/svc_watchdog.c
44
static void greybus_reset(struct work_struct *work)
drivers/greybus/svc_watchdog.c
63
static void do_work(struct work_struct *work)
drivers/greybus/svc_watchdog.c
69
watchdog = container_of(work, struct gb_svc_watchdog, work.work);
drivers/hid/amd-sfh-hid/amd_sfh_client.c
116
schedule_delayed_work(&cli_data->work, 0);
drivers/hid/amd-sfh-hid/amd_sfh_client.c
119
void amd_sfh_work_buffer(struct work_struct *work)
drivers/hid/amd-sfh-hid/amd_sfh_client.c
121
struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work_buffer.work);
drivers/hid/amd-sfh-hid/amd_sfh_client.c
243
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
drivers/hid/amd-sfh-hid/amd_sfh_client.c
380
cancel_delayed_work_sync(&cl_data->work);
drivers/hid/amd-sfh-hid/amd_sfh_client.c
66
schedule_delayed_work(&cli_data->work, 0);
drivers/hid/amd-sfh-hid/amd_sfh_client.c
70
void amd_sfh_work(struct work_struct *work)
drivers/hid/amd-sfh-hid/amd_sfh_client.c
72
struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work.work);
drivers/hid/amd-sfh-hid/amd_sfh_common.h
62
struct work_struct work;
drivers/hid/amd-sfh-hid/amd_sfh_common.h
87
void amd_sfh_work(struct work_struct *work);
drivers/hid/amd-sfh-hid/amd_sfh_common.h
88
void amd_sfh_work_buffer(struct work_struct *work);
drivers/hid/amd-sfh-hid/amd_sfh_hid.h
54
struct delayed_work work;
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
389
static void sfh1_1_init_work(struct work_struct *work)
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
391
struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
407
static void sfh_init_work(struct work_struct *work)
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
409
struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
429
flush_work(&mp2->work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
479
rc = devm_work_autocancel(&pdev->dev, &privdata->work, sfh1_1_init_work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
483
schedule_work(&privdata->work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
495
rc = devm_work_autocancel(&pdev->dev, &privdata->work, sfh_init_work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
501
schedule_work(&privdata->work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
510
flush_work(&mp2->work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
520
flush_work(&mp2->work);
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
531
flush_work(&mp2->work);
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
103
cancel_delayed_work_sync(&cl_data->work);
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
132
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
drivers/hid/hid-asus.c
118
struct work_struct work;
drivers/hid/hid-asus.c
1350
cancel_work_sync(&drvdata->kbd_backlight->work);
drivers/hid/hid-asus.c
586
static void asus_sync_fn_lock(struct work_struct *work)
drivers/hid/hid-asus.c
589
container_of(work, struct asus_drvdata, fn_lock_sync_work);
drivers/hid/hid-asus.c
600
schedule_work(&led->work);
drivers/hid/hid-asus.c
618
static void asus_kbd_backlight_work(struct work_struct *work)
drivers/hid/hid-asus.c
620
struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
drivers/hid/hid-asus.c
774
INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
drivers/hid/hid-bigbenff.c
198
static void bigben_worker(struct work_struct *work)
drivers/hid/hid-bigbenff.c
200
struct bigben_device *bigben = container_of(work,
drivers/hid/hid-bigbenff.c
307
bool work;
drivers/hid/hid-bigbenff.c
319
work = (bigben->led_state & BIT(n));
drivers/hid/hid-bigbenff.c
322
work = !(bigben->led_state & BIT(n));
drivers/hid/hid-bigbenff.c
327
if (work) {
drivers/hid/hid-corsair-void.c
504
static void corsair_void_status_work_handler(struct work_struct *work)
drivers/hid/hid-corsair-void.c
510
delayed_work = to_delayed_work(work);
drivers/hid/hid-corsair-void.c
522
static void corsair_void_firmware_work_handler(struct work_struct *work)
drivers/hid/hid-corsair-void.c
528
delayed_work = to_delayed_work(work);
drivers/hid/hid-corsair-void.c
569
static void corsair_void_battery_work_handler(struct work_struct *work)
drivers/hid/hid-corsair-void.c
571
struct corsair_void_drvdata *drvdata = container_of(work,
drivers/hid/hid-corsair.c
198
schedule_work(&led->work);
drivers/hid/hid-corsair.c
201
static void k90_backlight_work(struct work_struct *work)
drivers/hid/hid-corsair.c
204
struct k90_led *led = container_of(work, struct k90_led, work);
drivers/hid/hid-corsair.c
226
static void k90_record_led_work(struct work_struct *work)
drivers/hid/hid-corsair.c
229
struct k90_led *led = container_of(work, struct k90_led, work);
drivers/hid/hid-corsair.c
32
struct work_struct work;
drivers/hid/hid-corsair.c
450
INIT_WORK(&drvdata->backlight->work, k90_backlight_work);
drivers/hid/hid-corsair.c
495
INIT_WORK(&k90->record_led.work, k90_record_led_work);
drivers/hid/hid-corsair.c
511
cancel_work_sync(&k90->record_led.work);
drivers/hid/hid-corsair.c
528
cancel_work_sync(&drvdata->backlight->work);
drivers/hid/hid-corsair.c
544
cancel_work_sync(&k90->record_led.work);
drivers/hid/hid-cp2112.c
1109
static void cp2112_gpio_poll_callback(struct work_struct *work)
drivers/hid/hid-cp2112.c
1111
struct cp2112_device *dev = container_of(work, struct cp2112_device,
drivers/hid/hid-cp2112.c
1112
gpio_poll_worker.work);
drivers/hid/hid-elo.c
135
static void elo_work(struct work_struct *work)
drivers/hid/hid-elo.c
137
struct elo_priv *priv = container_of(work, struct elo_priv, work.work);
drivers/hid/hid-elo.c
182
queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL);
drivers/hid/hid-elo.c
239
INIT_DELAYED_WORK(&priv->work, elo_work);
drivers/hid/hid-elo.c
258
queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL);
drivers/hid/hid-elo.c
272
cancel_delayed_work_sync(&priv->work);
drivers/hid/hid-elo.c
30
struct delayed_work work;
drivers/hid/hid-google-stadiaff.c
139
cancel_work_sync(&stadiaff->work);
drivers/hid/hid-google-stadiaff.c
24
struct work_struct work;
drivers/hid/hid-google-stadiaff.c
27
static void stadiaff_work(struct work_struct *work)
drivers/hid/hid-google-stadiaff.c
30
container_of(work, struct stadiaff_device, work);
drivers/hid/hid-google-stadiaff.c
53
schedule_work(&stadiaff->work);
drivers/hid/hid-google-stadiaff.c
96
INIT_WORK(&stadiaff->work, stadiaff_work);
drivers/hid/hid-gt683r.c
118
schedule_work(&led->work);
drivers/hid/hid-gt683r.c
179
static void gt683r_led_work(struct work_struct *work)
drivers/hid/hid-gt683r.c
184
struct gt683r_led *led = container_of(work, struct gt683r_led, work);
drivers/hid/hid-gt683r.c
237
INIT_WORK(&led->work, gt683r_led_work);
drivers/hid/hid-gt683r.c
295
flush_work(&led->work);
drivers/hid/hid-gt683r.c
48
struct work_struct work;
drivers/hid/hid-gt683r.c
74
schedule_work(&led->work);
drivers/hid/hid-haptic.c
294
static void haptic_work_handler(struct work_struct *work)
drivers/hid/hid-haptic.c
297
struct hid_haptic_effect *effect = container_of(work,
drivers/hid/hid-haptic.c
299
work);
drivers/hid/hid-haptic.c
318
queue_work(haptic->wq, &haptic->effect[effect_id].work);
drivers/hid/hid-haptic.c
320
queue_work(haptic->wq, &haptic->stop_effect.work);
drivers/hid/hid-haptic.c
493
INIT_WORK(&haptic->effect[r].work, haptic_work_handler);
drivers/hid/hid-haptic.c
505
INIT_WORK(&haptic->stop_effect.work, haptic_work_handler);
drivers/hid/hid-haptic.h
19
struct work_struct work;
drivers/hid/hid-input.c
1834
static void hidinput_led_worker(struct work_struct *work)
drivers/hid/hid-input.c
1836
struct hid_device *hid = container_of(work, struct hid_device,
drivers/hid/hid-kysona.c
154
static void kysona_battery_timer_tick(struct work_struct *work)
drivers/hid/hid-kysona.c
156
struct kysona_drvdata *drv_data = container_of(work,
drivers/hid/hid-kysona.c
157
struct kysona_drvdata, battery_work.work);
drivers/hid/hid-lenovo.c
116
static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
drivers/hid/hid-lenovo.c
119
container_of(work, struct lenovo_drvdata, fn_lock_sync_work);
drivers/hid/hid-lg-g15.c
1188
INIT_WORK(&g15->work, lg_g15_leds_changed_work);
drivers/hid/hid-lg-g15.c
1200
INIT_WORK(&g15->work, lg_g15_leds_changed_work);
drivers/hid/hid-lg-g15.c
1207
INIT_WORK(&g15->work, lg_g510_leds_sync_work);
drivers/hid/hid-lg-g15.c
368
static void lg_g15_leds_changed_work(struct work_struct *work)
drivers/hid/hid-lg-g15.c
370
struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
drivers/hid/hid-lg-g15.c
506
static void lg_g510_leds_sync_work(struct work_struct *work)
drivers/hid/hid-lg-g15.c
508
struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
drivers/hid/hid-lg-g15.c
785
schedule_work(&g15->work);
drivers/hid/hid-lg-g15.c
817
schedule_work(&g15->work);
drivers/hid/hid-lg-g15.c
87
struct work_struct work;
drivers/hid/hid-lg-g15.c
888
schedule_work(&g15->work);
drivers/hid/hid-logitech-dj.c
1001
schedule_work(&djrcv_dev->work);
drivers/hid/hid-logitech-dj.c
1180
schedule_work(&djrcv_dev->work);
drivers/hid/hid-logitech-dj.c
148
struct work_struct work;
drivers/hid/hid-logitech-dj.c
2017
cancel_work_sync(&djrcv_dev->work);
drivers/hid/hid-logitech-dj.c
647
static void delayedwork_callback(struct work_struct *work);
drivers/hid/hid-logitech-dj.c
740
INIT_WORK(&djrcv_dev->work, delayedwork_callback);
drivers/hid/hid-logitech-dj.c
887
static void delayedwork_callback(struct work_struct *work)
drivers/hid/hid-logitech-dj.c
890
container_of(work, struct dj_receiver_dev, work);
drivers/hid/hid-logitech-dj.c
919
schedule_work(&djrcv_dev->work);
drivers/hid/hid-logitech-dj.c
963
schedule_work(&djrcv_dev->work);
drivers/hid/hid-logitech-hidpp.c
196
struct work_struct work;
drivers/hid/hid-logitech-hidpp.c
2404
struct work_struct work;
drivers/hid/hid-logitech-hidpp.c
2462
struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work);
drivers/hid/hid-logitech-hidpp.c
2536
INIT_WORK(&wd->work, hidpp_ff_work_handler);
drivers/hid/hid-logitech-hidpp.c
2545
queue_work(data->wq, &wd->work);
drivers/hid/hid-logitech-hidpp.c
3874
if (schedule_work(&hidpp->work) == 0)
drivers/hid/hid-logitech-hidpp.c
4171
static void hidpp_connect_event(struct work_struct *work)
drivers/hid/hid-logitech-hidpp.c
4173
struct hidpp_device *hidpp = container_of(work, struct hidpp_device, work);
drivers/hid/hid-logitech-hidpp.c
4292
static void hidpp_reset_hi_res_handler(struct work_struct *work)
drivers/hid/hid-logitech-hidpp.c
4294
struct hidpp_device *hidpp = container_of(work, struct hidpp_device, reset_hi_res_work);
drivers/hid/hid-logitech-hidpp.c
4428
INIT_WORK(&hidpp->work, hidpp_connect_event);
drivers/hid/hid-logitech-hidpp.c
4480
schedule_work(&hidpp->work);
drivers/hid/hid-logitech-hidpp.c
4481
flush_work(&hidpp->work);
drivers/hid/hid-logitech-hidpp.c
4511
cancel_work_sync(&hidpp->work);
drivers/hid/hid-logitech-hidpp.c
4526
cancel_work_sync(&hidpp->work);
drivers/hid/hid-magicmouse.c
150
struct delayed_work work;
drivers/hid/hid-magicmouse.c
788
static void magicmouse_enable_mt_work(struct work_struct *work)
drivers/hid/hid-magicmouse.c
791
container_of(work, struct magicmouse_sc, work.work);
drivers/hid/hid-magicmouse.c
868
INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work);
drivers/hid/hid-magicmouse.c
953
schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
drivers/hid/hid-magicmouse.c
971
cancel_delayed_work_sync(&msc->work);
drivers/hid/hid-mcp2221.c
1161
static void mcp_init_work(struct work_struct *work)
drivers/hid/hid-mcp2221.c
1164
struct mcp2221 *mcp = container_of(work, struct mcp2221, init_work.work);
drivers/hid/hid-microsoft.c
281
static void ms_ff_worker(struct work_struct *work)
drivers/hid/hid-microsoft.c
283
struct ms_data *ms = container_of(work, struct ms_data, ff_worker);
drivers/hid/hid-nintendo.c
1805
static void joycon_rumble_worker(struct work_struct *work)
drivers/hid/hid-nintendo.c
1807
struct joycon_ctlr *ctlr = container_of(work, struct joycon_ctlr,
drivers/hid/hid-nvidia-shield.c
319
static void thunderstrike_hostcmd_req_work_handler(struct work_struct *work)
drivers/hid/hid-nvidia-shield.c
322
container_of(work, struct thunderstrike, hostcmd_req_work);
drivers/hid/hid-picolcd_core.c
101
data->pending = work;
drivers/hid/hid-picolcd_core.c
104
wait_for_completion_interruptible_timeout(&work->ready, HZ*2);
drivers/hid/hid-picolcd_core.c
110
return work;
drivers/hid/hid-picolcd_core.c
72
struct picolcd_pending *work;
drivers/hid/hid-picolcd_core.c
81
work = kzalloc_obj(*work);
drivers/hid/hid-picolcd_core.c
82
if (!work)
drivers/hid/hid-picolcd_core.c
85
init_completion(&work->ready);
drivers/hid/hid-picolcd_core.c
86
work->out_report = report;
drivers/hid/hid-picolcd_core.c
87
work->in_report = NULL;
drivers/hid/hid-picolcd_core.c
88
work->raw_size = 0;
drivers/hid/hid-picolcd_core.c
98
kfree(work);
drivers/hid/hid-picolcd_core.c
99
work = NULL;
drivers/hid/hid-playstation.c
1313
static void dualsense_output_worker(struct work_struct *work)
drivers/hid/hid-playstation.c
1315
struct dualsense *ds = container_of(work, struct dualsense, output_worker);
drivers/hid/hid-playstation.c
1881
static void dualshock4_dongle_calibration_work(struct work_struct *work)
drivers/hid/hid-playstation.c
1883
struct dualshock4 *ds4 = container_of(work, struct dualshock4, dongle_hotplug_worker);
drivers/hid/hid-playstation.c
2276
static void dualshock4_output_worker(struct work_struct *work)
drivers/hid/hid-playstation.c
2278
struct dualshock4 *ds4 = container_of(work, struct dualshock4, output_worker);
drivers/hid/hid-rmi.c
311
static void rmi_reset_work(struct work_struct *work)
drivers/hid/hid-rmi.c
313
struct rmi_data *hdata = container_of(work, struct rmi_data,
drivers/hid/hid-sony.c
1660
static void sony_state_worker(struct work_struct *work)
drivers/hid/hid-sony.c
1662
struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
drivers/hid/hid-steam.c
1018
static void steam_work_connect_cb(struct work_struct *work)
drivers/hid/hid-steam.c
1020
struct steam_device *steam = container_of(work, struct steam_device,
drivers/hid/hid-steam.c
1042
static void steam_mode_switch_cb(struct work_struct *work)
drivers/hid/hid-steam.c
1044
struct steam_device *steam = container_of(to_delayed_work(work),
drivers/hid/hid-steam.c
1070
static void steam_work_unregister_cb(struct work_struct *work)
drivers/hid/hid-steam.c
1072
struct steam_device *steam = container_of(work, struct steam_device,
drivers/hid/hid-steam.c
536
static void steam_haptic_rumble_cb(struct work_struct *work)
drivers/hid/hid-steam.c
538
struct steam_device *steam = container_of(work, struct steam_device,
drivers/hid/hid-steelseries.c
398
static void steelseries_headset_battery_timer_tick(struct work_struct *work)
drivers/hid/hid-steelseries.c
400
struct steelseries_device *sd = container_of(work,
drivers/hid/hid-steelseries.c
401
struct steelseries_device, battery_work.work);
drivers/hid/hid-uclogic-core-test.c
59
static void fake_work(struct work_struct *work)
drivers/hid/hid-uclogic-core-test.c
85
INIT_WORK(&filter->work, fake_work);
drivers/hid/hid-uclogic-core.c
306
schedule_work(&curr->work);
drivers/hid/hid-uclogic-params.c
1325
static void uclogic_params_ugee_v2_reconnect_work(struct work_struct *work)
drivers/hid/hid-uclogic-params.c
1329
event_hook = container_of(work, struct uclogic_raw_event_hook, work);
drivers/hid/hid-uclogic-params.c
1371
INIT_WORK(&event_hook->work, uclogic_params_ugee_v2_reconnect_work);
drivers/hid/hid-uclogic-params.c
635
cancel_work_sync(&curr->work);
drivers/hid/hid-uclogic-params.h
197
struct work_struct work;
drivers/hid/hid-wiimote-core.c
1208
static void wiimote_init_worker(struct work_struct *work)
drivers/hid/hid-wiimote-core.c
1210
struct wiimote_data *wdata = container_of(work, struct wiimote_data,
drivers/hid/hid-wiimote-core.c
41
static void wiimote_queue_worker(struct work_struct *work)
drivers/hid/hid-wiimote-core.c
43
struct wiimote_queue *queue = container_of(work, struct wiimote_queue,
drivers/hid/hid-wiimote-modules.c
120
static void wiimod_rumble_worker(struct work_struct *work)
drivers/hid/hid-wiimote-modules.c
122
struct wiimote_data *wdata = container_of(work, struct wiimote_data,
drivers/hid/i2c-hid/i2c-hid-core.c
1122
static void ihid_core_panel_follower_work(struct work_struct *work)
drivers/hid/i2c-hid/i2c-hid-core.c
1124
struct i2c_hid *ihid = container_of(work, struct i2c_hid,
drivers/hid/intel-ish-hid/ipc/ipc.c
559
static void fw_reset_work_fn(struct work_struct *work)
drivers/hid/intel-ish-hid/ipc/ipc.c
569
if (!work_pending(work))
drivers/hid/intel-ish-hid/ipc/pci-ish.c
294
static void __maybe_unused ish_resume_handler(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp-fw-loader.c
827
static void load_fw_from_host_handler(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp-fw-loader.c
831
client_data = container_of(work, struct ishtp_cl_data,
drivers/hid/intel-ish-hid/ishtp-fw-loader.c
880
static void reset_handler(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp-fw-loader.c
887
client_data = container_of(work, struct ishtp_cl_data,
drivers/hid/intel-ish-hid/ishtp-hid-client.c
723
static void hid_ishtp_cl_reset_handler(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp-hid-client.c
730
client_data = container_of(work, struct ishtp_cl_data, work);
drivers/hid/intel-ish-hid/ishtp-hid-client.c
755
static void hid_ishtp_cl_resume_handler(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp-hid-client.c
757
struct ishtp_cl_data *client_data = container_of(work, struct ishtp_cl_data, resume_work);
drivers/hid/intel-ish-hid/ishtp-hid-client.c
813
INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
drivers/hid/intel-ish-hid/ishtp-hid-client.c
871
queue_work(ishtp_get_workqueue(cl_device), &client_data->work);
drivers/hid/intel-ish-hid/ishtp-hid.h
140
struct work_struct work;
drivers/hid/intel-ish-hid/ishtp/bus.c
529
static void ishtp_bus_event_work(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp/bus.c
533
device = container_of(work, struct ishtp_cl_device, event_work);
drivers/hid/intel-ish-hid/ishtp/hbm.c
744
void bh_hbm_work_fn(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp/hbm.c
750
dev = container_of(work, struct ishtp_device, bh_hbm_work);
drivers/hid/intel-ish-hid/ishtp/hbm.h
301
void bh_hbm_work_fn(struct work_struct *work);
drivers/hid/intel-ish-hid/ishtp/loader.c
405
void ishtp_loader_work(struct work_struct *work)
drivers/hid/intel-ish-hid/ishtp/loader.c
408
struct ishtp_device *dev = container_of(work, struct ishtp_device, work_fw_loader);
drivers/hid/intel-ish-hid/ishtp/loader.h
230
void ishtp_loader_work(struct work_struct *work);
drivers/hid/uhid.c
68
static void uhid_device_add_worker(struct work_struct *work)
drivers/hid/uhid.c
70
struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
drivers/hid/usbhid/hid-core.c
118
static void hid_reset(struct work_struct *work)
drivers/hid/usbhid/hid-core.c
121
container_of(work, struct usbhid_device, reset_work);
drivers/hid/wacom.h
244
void wacom_battery_work(struct work_struct *work);
drivers/hid/wacom_sys.c
1722
static void wacom_init_work(struct work_struct *work)
drivers/hid/wacom_sys.c
1724
struct wacom *wacom = container_of(work, struct wacom, init_work.work);
drivers/hid/wacom_sys.c
1845
static void wacom_aes_battery_handler(struct work_struct *work)
drivers/hid/wacom_sys.c
1847
struct wacom *wacom = container_of(work, struct wacom, aes_battery_work.work);
drivers/hid/wacom_sys.c
2233
void wacom_battery_work(struct work_struct *work)
drivers/hid/wacom_sys.c
2235
struct wacom *wacom = container_of(work, struct wacom, battery_work);
drivers/hid/wacom_sys.c
2505
static void wacom_wireless_work(struct work_struct *work)
drivers/hid/wacom_sys.c
2507
struct wacom *wacom = container_of(work, struct wacom, wireless_work);
drivers/hid/wacom_sys.c
2726
static void wacom_remote_work(struct work_struct *work)
drivers/hid/wacom_sys.c
2728
struct wacom *wacom = container_of(work, struct wacom, remote_work);
drivers/hid/wacom_sys.c
2778
static void wacom_mode_change_work(struct work_struct *work)
drivers/hid/wacom_sys.c
2780
struct wacom *wacom = container_of(work, struct wacom, mode_change_work);
drivers/hsi/clients/ssi_protocol.c
1022
schedule_work(&ssi->work);
drivers/hsi/clients/ssi_protocol.c
1093
INIT_WORK(&ssi->work, ssip_xmit_work);
drivers/hsi/clients/ssi_protocol.c
138
struct work_struct work;
drivers/hsi/clients/ssi_protocol.c
404
cancel_work_sync(&ssi->work);
drivers/hsi/clients/ssi_protocol.c
961
static void ssip_xmit_work(struct work_struct *work)
drivers/hsi/clients/ssi_protocol.c
964
container_of(work, struct ssi_protocol, work);
drivers/hsi/controllers/omap_ssi.h
96
struct work_struct work;
drivers/hsi/controllers/omap_ssi_port.c
1163
INIT_WORK(&omap_port->work, start_tx_work);
drivers/hsi/controllers/omap_ssi_port.c
168
static void ssi_process_errqueue(struct work_struct *work)
drivers/hsi/controllers/omap_ssi_port.c
174
omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
drivers/hsi/controllers/omap_ssi_port.c
575
static void start_tx_work(struct work_struct *work)
drivers/hsi/controllers/omap_ssi_port.c
578
container_of(work, struct omap_ssi_port, work);
drivers/hsi/controllers/omap_ssi_port.c
601
schedule_work(&omap_port->work);
drivers/hv/channel_mgmt.c
503
static void vmbus_add_channel_work(struct work_struct *work)
drivers/hv/channel_mgmt.c
506
container_of(work, struct vmbus_channel, add_channel_work);
drivers/hv/hv_util.c
110
INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
drivers/hv/hv_util.c
182
struct work_struct *work = NULL;
drivers/hv/hv_util.c
238
work = &shutdown_work;
drivers/hv/hv_util.c
244
work = &restart_work;
drivers/hv/hv_util.c
253
work = &hibernate_context.work;
drivers/hv/hv_util.c
273
if (work)
drivers/hv/hv_util.c
274
schedule_work(work);
drivers/hv/hv_util.c
337
static void hv_set_host_time(struct work_struct *work)
drivers/hv/hv_util.c
87
struct work_struct work;
drivers/hv/hv_util.c
94
static void send_hibernate_uevent(struct work_struct *work)
drivers/hv/hv_util.c
99
ctx = container_of(work, struct hibernate_work_context, work);
drivers/hv/mshv_eventfd.c
247
static void mshv_irqfd_shutdown(struct work_struct *work)
drivers/hv/mshv_eventfd.c
250
container_of(work, struct mshv_irqfd, irqfd_shutdown);
drivers/hv/vmbus_drv.c
1040
struct work_struct work;
drivers/hv/vmbus_drv.c
1047
static void vmbus_onmessage_work(struct work_struct *work)
drivers/hv/vmbus_drv.c
1055
ctx = container_of(work, struct onmessage_work_context,
drivers/hv/vmbus_drv.c
1056
work);
drivers/hv/vmbus_drv.c
1127
INIT_WORK(&ctx->work, vmbus_onmessage_work);
drivers/hv/vmbus_drv.c
1160
queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
drivers/hv/vmbus_drv.c
1192
queue_work(vmbus_connection.work_queue, &ctx->work);
drivers/hv/vmbus_drv.c
1240
INIT_WORK(&ctx->work, vmbus_onmessage_work);
drivers/hv/vmbus_drv.c
1242
queue_work(vmbus_connection.work_queue, &ctx->work);
drivers/hv/vmbus_drv.c
1422
static void vmbus_percpu_work(struct work_struct *work)
drivers/hv/vmbus_drv.c
1451
struct work_struct *work = per_cpu_ptr(works, cpu);
drivers/hv/vmbus_drv.c
1453
INIT_WORK(work, vmbus_percpu_work);
drivers/hv/vmbus_drv.c
1454
schedule_work_on(cpu, work);
drivers/hwmon/applesmc.c
964
static void applesmc_backlight_set(struct work_struct *work)
drivers/hwmon/lm90.c
1089
static void lm90_report_alarms(struct work_struct *work)
drivers/hwmon/lm90.c
1091
struct lm90_data *data = container_of(work, struct lm90_data, report_work);
drivers/hwmon/peci/dimmtemp.c
359
static void create_dimm_temp_info_delayed(struct work_struct *work)
drivers/hwmon/peci/dimmtemp.c
361
struct peci_dimmtemp *priv = container_of(to_delayed_work(work),
drivers/hwmon/pmbus/pmbus_core.c
3377
static void pmbus_regulator_notify_worker(struct work_struct *work)
drivers/hwmon/pmbus/pmbus_core.c
3380
container_of(work, struct pmbus_data, regulator_notify_work);
drivers/hwmon/raspberrypi-hwmon.c
144
get_values_poll(&data->get_values_poll_work.work);
drivers/hwmon/raspberrypi-hwmon.c
59
static void get_values_poll(struct work_struct *work)
drivers/hwmon/raspberrypi-hwmon.c
63
data = container_of(work, struct rpi_hwmon_data,
drivers/hwmon/raspberrypi-hwmon.c
64
get_values_poll_work.work);
drivers/hwmon/xgene-hwmon.c
430
static void xgene_hwmon_evt_work(struct work_struct *work)
drivers/hwmon/xgene-hwmon.c
436
ctx = container_of(work, struct xgene_hwmon_dev, workq);
drivers/hwtracing/coresight/coresight-etm-perf.c
215
static void free_event_data(struct work_struct *work)
drivers/hwtracing/coresight/coresight-etm-perf.c
221
event_data = container_of(work, struct etm_event_data, work);
drivers/hwtracing/coresight/coresight-etm-perf.c
296
schedule_work(&event_data->work);
drivers/hwtracing/coresight/coresight-etm-perf.c
331
INIT_WORK(&event_data->work, free_event_data);
drivers/hwtracing/coresight/coresight-etm-perf.h
95
struct work_struct work;
drivers/hwtracing/intel_th/core.c
584
static void __intel_th_request_hub_module(struct work_struct *work)
drivers/hwtracing/intel_th/core.c
586
struct intel_th *th = container_of(work, struct intel_th,
drivers/hwtracing/intel_th/msu.c
146
struct work_struct work;
drivers/hwtracing/intel_th/msu.c
1753
static void msc_work(struct work_struct *work)
drivers/hwtracing/intel_th/msu.c
1755
struct msc *msc = container_of(work, struct msc, work);
drivers/hwtracing/intel_th/msu.c
1791
schedule_work(&msc->work);
drivers/hwtracing/intel_th/msu.c
2157
INIT_WORK(&msc->work, msc_work);
drivers/hwtracing/ptt/hisi_ptt.c
1263
cancel_delayed_work_sync(&hisi_ptt->work);
drivers/hwtracing/ptt/hisi_ptt.c
549
static void hisi_ptt_update_filters(struct work_struct *work)
drivers/hwtracing/ptt/hisi_ptt.c
551
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/hwtracing/ptt/hisi_ptt.c
556
hisi_ptt = container_of(delayed_work, struct hisi_ptt, work);
drivers/hwtracing/ptt/hisi_ptt.c
559
schedule_delayed_work(&hisi_ptt->work, HISI_PTT_WORK_DELAY_MS);
drivers/hwtracing/ptt/hisi_ptt.c
651
schedule_delayed_work(&hisi_ptt->work, 0);
drivers/hwtracing/ptt/hisi_ptt.c
740
INIT_DELAYED_WORK(&hisi_ptt->work, hisi_ptt_update_filters);
drivers/hwtracing/ptt/hisi_ptt.h
249
struct delayed_work work;
drivers/i2c/busses/i2c-amd-asf-plat.c
58
static void amd_asf_process_target(struct work_struct *work)
drivers/i2c/busses/i2c-amd-asf-plat.c
60
struct amd_asf_dev *dev = container_of(work, struct amd_asf_dev, work_buf.work);
drivers/i2c/busses/i2c-designware-amdpsp.c
152
static void psp_release_i2c_bus_deferred(struct work_struct *work)
drivers/i2c/i2c-slave-testunit.c
168
static void i2c_slave_testunit_work(struct work_struct *work)
drivers/i2c/i2c-slave-testunit.c
170
struct testunit_data *tu = container_of(work, struct testunit_data, worker.work);
drivers/i2c/i2c-smbus.c
155
static void smbalert_work(struct work_struct *work)
drivers/i2c/i2c-smbus.c
159
alert = container_of(work, struct i2c_smbus_alert, alert);
drivers/i3c/master.c
2758
queue_work(dev->ibi->wq, &slot->work);
drivers/i3c/master.c
2762
static void i3c_master_handle_ibi(struct work_struct *work)
drivers/i3c/master.c
2764
struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot,
drivers/i3c/master.c
2765
work);
drivers/i3c/master.c
2785
INIT_WORK(&slot->work, i3c_master_handle_ibi);
drivers/i3c/master/dw-i3c-master.c
1577
static void dw_i3c_hj_work(struct work_struct *work)
drivers/i3c/master/dw-i3c-master.c
1580
container_of(work, typeof(*master), hj_work);
drivers/i3c/master/i3c-master-cdns.c
1531
static void cdns_i3c_master_hj(struct work_struct *work)
drivers/i3c/master/i3c-master-cdns.c
1533
struct cdns_i3c_master *master = container_of(work,
drivers/i3c/master/svc-i3c-master.c
369
static void svc_i3c_master_hj_work(struct work_struct *work)
drivers/i3c/master/svc-i3c-master.c
373
master = container_of(work, struct svc_i3c_master, hj_work);
drivers/iio/accel/bmc150-accel-i2c.c
106
static void bmc150_acpi_resume_work(struct work_struct *work)
drivers/iio/accel/bmc150-accel-i2c.c
109
container_of(work, struct bmc150_accel_data, resume_work.work);
drivers/iio/adc/ad4062.c
600
static void ad4062_trigger_work(struct work_struct *work)
drivers/iio/adc/ad4062.c
603
container_of(work, struct ad4062_state, trig_conv);
drivers/iio/adc/envelope-detector.c
171
static void envelope_detector_timeout(struct work_struct *work)
drivers/iio/adc/envelope-detector.c
173
struct envelope *env = container_of(work, struct envelope,
drivers/iio/adc/envelope-detector.c
174
comp_timeout.work);
drivers/iio/adc/pac1934.c
1022
static void pac1934_work_periodic_rfsh(struct work_struct *work)
drivers/iio/adc/pac1934.c
1024
struct pac1934_chip_info *info = TO_PAC1934_CHIP_INFO((struct delayed_work *)work);
drivers/iio/adc/rohm-bd79124.c
781
static void bd79124_alm_enable_worker(struct work_struct *work)
drivers/iio/adc/rohm-bd79124.c
784
struct bd79124_data *data = container_of(work, struct bd79124_data,
drivers/iio/adc/rohm-bd79124.c
785
alm_enable_work.work);
drivers/iio/adc/xilinx-ams.c
1059
static void ams_unmask_worker(struct work_struct *work)
drivers/iio/adc/xilinx-ams.c
1061
struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
drivers/iio/adc/xilinx-xadc-core.c
1308
struct delayed_work *work = data;
drivers/iio/adc/xilinx-xadc-core.c
1310
cancel_delayed_work_sync(work);
drivers/iio/adc/xilinx-xadc-core.c
263
static void xadc_zynq_unmask_worker(struct work_struct *work)
drivers/iio/adc/xilinx-xadc-core.c
265
struct xadc *xadc = container_of(work, struct xadc, zynq_unmask_work.work);
drivers/iio/buffer/industrialio-buffer-dma.c
134
static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
drivers/iio/chemical/atlas-sensor.c
440
static void atlas_work_handler(struct irq_work *work)
drivers/iio/chemical/atlas-sensor.c
442
struct atlas_data *data = container_of(work, struct atlas_data, work);
drivers/iio/chemical/atlas-sensor.c
473
irq_work_queue(&data->work);
drivers/iio/chemical/atlas-sensor.c
669
init_irq_work(&data->work, atlas_work_handler);
drivers/iio/chemical/atlas-sensor.c
91
struct irq_work work;
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
180
static void hid_sensor_set_power_work(struct work_struct *work)
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
182
struct hid_sensor_common *attrb = container_of(work,
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
184
work);
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
229
cancel_work_sync(&attrb->work);
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
290
INIT_WORK(&attrb->work, hid_sensor_set_power_work);
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
317
schedule_work(&attrb->work);
drivers/iio/common/ssp_sensors/ssp_dev.c
155
static void ssp_wdt_work_func(struct work_struct *work)
drivers/iio/common/ssp_sensors/ssp_dev.c
157
struct ssp_data *data = container_of(work, struct ssp_data, work_wdt);
drivers/iio/common/ssp_sensors/ssp_dev.c
398
static void ssp_refresh_task(struct work_struct *work)
drivers/iio/common/ssp_sensors/ssp_dev.c
400
struct ssp_data *data = container_of((struct delayed_work *)work,
drivers/iio/industrialio-buffer.c
1950
static void iio_buffer_cleanup(struct work_struct *work)
drivers/iio/industrialio-buffer.c
1953
container_of(work, struct iio_dma_fence, work);
drivers/iio/industrialio-buffer.c
1982
INIT_WORK(&iio_fence->work, iio_buffer_cleanup);
drivers/iio/industrialio-buffer.c
1983
schedule_work(&iio_fence->work);
drivers/iio/industrialio-buffer.c
62
struct work_struct work;
drivers/iio/industrialio-trigger.c
156
static void iio_reenable_work_fn(struct work_struct *work)
drivers/iio/industrialio-trigger.c
158
struct iio_trigger *trig = container_of(work, struct iio_trigger,
drivers/iio/light/gp2ap020a00f.c
1545
init_irq_work(&data->work, gp2ap020a00f_iio_trigger_work);
drivers/iio/light/gp2ap020a00f.c
250
struct irq_work work;
drivers/iio/light/gp2ap020a00f.c
822
static void gp2ap020a00f_iio_trigger_work(struct irq_work *work)
drivers/iio/light/gp2ap020a00f.c
825
container_of(work, struct gp2ap020a00f_data, work);
drivers/iio/light/gp2ap020a00f.c
954
irq_work_queue(&priv->work);
drivers/iio/light/tsl2563.c
176
static void tsl2563_poweroff_work(struct work_struct *work)
drivers/iio/light/tsl2563.c
179
container_of(work, struct tsl2563_chip, poweroff_work.work);
drivers/iio/proximity/as3935.c
242
static void as3935_event_work(struct work_struct *work)
drivers/iio/proximity/as3935.c
248
st = container_of(work, struct as3935_state, work.work);
drivers/iio/proximity/as3935.c
282
schedule_delayed_work(&st->work, msecs_to_jiffies(3));
drivers/iio/proximity/as3935.c
422
ret = devm_delayed_work_autocancel(dev, &st->work, as3935_event_work);
drivers/iio/proximity/as3935.c
58
struct delayed_work work;
drivers/iio/trigger/iio-trig-sysfs.c
106
irq_work_queue(&sysfs_trig->work);
drivers/iio/trigger/iio-trig-sysfs.c
158
t->work = IRQ_WORK_INIT_HARD(iio_sysfs_trigger_work);
drivers/iio/trigger/iio-trig-sysfs.c
18
struct irq_work work;
drivers/iio/trigger/iio-trig-sysfs.c
193
irq_work_sync(&t->work);
drivers/iio/trigger/iio-trig-sysfs.c
92
static void iio_sysfs_trigger_work(struct irq_work *work)
drivers/iio/trigger/iio-trig-sysfs.c
94
struct iio_sysfs_trig *trig = container_of(work, struct iio_sysfs_trig,
drivers/iio/trigger/iio-trig-sysfs.c
95
work);
drivers/infiniband/core/addr.c
293
mod_delayed_work(addr_wq, &req->work, delay);
drivers/infiniband/core/addr.c
602
req = container_of(_work, struct addr_req, work.work);
drivers/infiniband/core/addr.c
631
cancel_delayed_work(&req->work);
drivers/infiniband/core/addr.c
64
struct delayed_work work;
drivers/infiniband/core/addr.c
672
INIT_DELAYED_WORK(&req->work, process_one_req);
drivers/infiniband/core/addr.c
772
cancel_delayed_work_sync(&found->work);
drivers/infiniband/core/cache.c
1537
struct ib_update_work *work =
drivers/infiniband/core/cache.c
1538
container_of(_work, struct ib_update_work, work);
drivers/infiniband/core/cache.c
1544
ret = ib_cache_update(work->event.device, work->event.element.port_num,
drivers/infiniband/core/cache.c
1545
work->event.event == IB_EVENT_GID_CHANGE ||
drivers/infiniband/core/cache.c
1546
work->event.event == IB_EVENT_CLIENT_REREGISTER,
drivers/infiniband/core/cache.c
1547
work->event.event == IB_EVENT_PKEY_CHANGE,
drivers/infiniband/core/cache.c
1548
work->enforce_security);
drivers/infiniband/core/cache.c
1554
if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
drivers/infiniband/core/cache.c
1555
ib_dispatch_event_clients(&work->event);
drivers/infiniband/core/cache.c
1557
kfree(work);
drivers/infiniband/core/cache.c
1562
struct ib_update_work *work =
drivers/infiniband/core/cache.c
1563
container_of(_work, struct ib_update_work, work);
drivers/infiniband/core/cache.c
1565
ib_dispatch_event_clients(&work->event);
drivers/infiniband/core/cache.c
1566
kfree(work);
drivers/infiniband/core/cache.c
1589
struct ib_update_work *work;
drivers/infiniband/core/cache.c
1591
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/core/cache.c
1592
if (!work)
drivers/infiniband/core/cache.c
1596
INIT_WORK(&work->work, ib_cache_event_task);
drivers/infiniband/core/cache.c
1598
INIT_WORK(&work->work, ib_generic_event_task);
drivers/infiniband/core/cache.c
1600
work->event = *event;
drivers/infiniband/core/cache.c
1603
work->enforce_security = true;
drivers/infiniband/core/cache.c
1605
queue_work(ib_wq, &work->work);
drivers/infiniband/core/cache.c
280
static void free_gid_work(struct work_struct *work)
drivers/infiniband/core/cache.c
283
container_of(work, struct ib_gid_table_entry, del_work);
drivers/infiniband/core/cache.c
53
struct work_struct work;
drivers/infiniband/core/cm.c
1017
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
drivers/infiniband/core/cm.c
1025
BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
drivers/infiniband/core/cm.c
1060
struct cm_work *work;
drivers/infiniband/core/cm.c
1177
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
drivers/infiniband/core/cm.c
1178
cm_free_work(work);
drivers/infiniband/core/cm.c
1771
static u16 cm_get_bth_pkey(struct cm_work *work)
drivers/infiniband/core/cm.c
1773
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
drivers/infiniband/core/cm.c
1774
u32 port_num = work->port->port_num;
drivers/infiniband/core/cm.c
1775
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
drivers/infiniband/core/cm.c
1798
static void cm_opa_to_ib_sgid(struct cm_work *work,
drivers/infiniband/core/cm.c
1801
struct ib_device *dev = work->port->cm_dev->ib_device;
drivers/infiniband/core/cm.c
1802
u32 port_num = work->port->port_num;
drivers/infiniband/core/cm.c
1818
static void cm_format_req_event(struct cm_work *work,
drivers/infiniband/core/cm.c
1825
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
1826
param = &work->cm_event.param.req_rcvd;
drivers/infiniband/core/cm.c
1828
param->bth_pkey = cm_get_bth_pkey(work);
drivers/infiniband/core/cm.c
1830
param->primary_path = &work->path[0];
drivers/infiniband/core/cm.c
1831
cm_opa_to_ib_sgid(work, param->primary_path);
drivers/infiniband/core/cm.c
1833
param->alternate_path = &work->path[1];
drivers/infiniband/core/cm.c
1834
cm_opa_to_ib_sgid(work, param->alternate_path);
drivers/infiniband/core/cm.c
1858
work->cm_event.private_data =
drivers/infiniband/core/cm.c
1863
struct cm_work *work)
drivers/infiniband/core/cm.c
1868
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
drivers/infiniband/core/cm.c
1869
cm_free_work(work);
drivers/infiniband/core/cm.c
187
struct delayed_work work;
drivers/infiniband/core/cm.c
1873
work = cm_dequeue_work(cm_id_priv);
drivers/infiniband/core/cm.c
1875
if (!work)
drivers/infiniband/core/cm.c
1879
&work->cm_event);
drivers/infiniband/core/cm.c
1880
cm_free_work(work);
drivers/infiniband/core/cm.c
1952
static void cm_dup_req_handler(struct cm_work *work,
drivers/infiniband/core/cm.c
1959
&work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
drivers/infiniband/core/cm.c
1969
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg);
drivers/infiniband/core/cm.c
198
struct cm_work work;
drivers/infiniband/core/cm.c
2001
static struct cm_id_private *cm_match_req(struct cm_work *work,
drivers/infiniband/core/cm.c
2008
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2014
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
drivers/infiniband/core/cm.c
2015
timewait_info->work.remote_id);
drivers/infiniband/core/cm.c
2018
cm_dup_req_handler(work, cur_cm_id_priv);
drivers/infiniband/core/cm.c
2028
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
drivers/infiniband/core/cm.c
2029
timewait_info->work.remote_id);
drivers/infiniband/core/cm.c
2032
cm_issue_rej(work->port, work->mad_recv_wc,
drivers/infiniband/core/cm.c
2049
cm_issue_rej(work->port, work->mad_recv_wc,
drivers/infiniband/core/cm.c
2094
static int cm_req_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
2102
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2105
cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
drivers/infiniband/core/cm.c
2130
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
drivers/infiniband/core/cm.c
2131
work->mad_recv_wc->recv_buf.grh,
drivers/infiniband/core/cm.c
2142
cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
drivers/infiniband/core/cm.c
2153
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
drivers/infiniband/core/cm.c
2161
memset(&work->path[0], 0, sizeof(work->path[0]));
drivers/infiniband/core/cm.c
2163
memset(&work->path[1], 0, sizeof(work->path[1]));
drivers/infiniband/core/cm.c
2168
work->path[0].rec_type =
drivers/infiniband/core/cm.c
2171
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
drivers/infiniband/core/cm.c
2173
work->port->cm_dev->ib_device, work->port->port_num,
drivers/infiniband/core/cm.c
2174
&work->path[0],
drivers/infiniband/core/cm.c
2179
work->path[1].rec_type = work->path[0].rec_type;
drivers/infiniband/core/cm.c
2180
cm_format_paths_from_req(req_msg, &work->path[0],
drivers/infiniband/core/cm.c
2181
&work->path[1], work->mad_recv_wc->wc);
drivers/infiniband/core/cm.c
2183
sa_path_set_dmac(&work->path[0],
drivers/infiniband/core/cm.c
2185
work->path[0].hop_limit = grh->hop_limit;
drivers/infiniband/core/cm.c
2189
ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
drivers/infiniband/core/cm.c
2193
err = rdma_query_gid(work->port->cm_dev->ib_device,
drivers/infiniband/core/cm.c
2194
work->port->port_num, 0,
drivers/infiniband/core/cm.c
2195
&work->path[0].sgid);
drivers/infiniband/core/cm.c
2201
&work->path[0].sgid,
drivers/infiniband/core/cm.c
2202
sizeof(work->path[0].sgid),
drivers/infiniband/core/cm.c
2211
ret = cm_init_av_by_path(&work->path[1], NULL,
drivers/infiniband/core/cm.c
2216
&work->path[0].sgid,
drivers/infiniband/core/cm.c
2217
sizeof(work->path[0].sgid), NULL, 0);
drivers/infiniband/core/cm.c
2224
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
drivers/infiniband/core/cm.c
2232
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
2416
static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
drivers/infiniband/core/cm.c
2421
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2422
param = &work->cm_event.param.rep_rcvd;
drivers/infiniband/core/cm.c
2440
work->cm_event.private_data =
drivers/infiniband/core/cm.c
2444
static void cm_dup_rep_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
2451
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2459
&work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
drivers/infiniband/core/cm.c
2460
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg);
drivers/infiniband/core/cm.c
2489
static int cm_rep_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
2497
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2501
cm_dup_rep_handler(work);
drivers/infiniband/core/cm.c
2507
cm_format_rep_event(work, cm_id_priv->qp_type);
drivers/infiniband/core/cm.c
2524
cm_id_priv->timewait_info->work.remote_id =
drivers/infiniband/core/cm.c
2544
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
drivers/infiniband/core/cm.c
2545
timewait_info->work.remote_id);
drivers/infiniband/core/cm.c
2549
cm_issue_rej(work->port, work->mad_recv_wc,
drivers/infiniband/core/cm.c
2586
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
2594
static int cm_establish_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
2599
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
drivers/infiniband/core/cm.c
2610
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
2617
static int cm_rtu_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
2622
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2629
work->cm_event.private_data =
drivers/infiniband/core/cm.c
2636
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
2643
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
269
static void cm_work_handler(struct work_struct *work);
drivers/infiniband/core/cm.c
2839
static int cm_dreq_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
2845
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2850
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
2852
cm_issue_drep(work->port, work->mad_recv_wc);
drivers/infiniband/core/cm.c
2859
work->cm_event.private_data =
drivers/infiniband/core/cm.c
2879
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
2881
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc,
drivers/infiniband/core/cm.c
2891
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
drivers/infiniband/core/cm.c
2896
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
2905
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
2913
static int cm_drep_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
2918
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
2925
work->cm_event.private_data =
drivers/infiniband/core/cm.c
2937
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
3017
static void cm_format_rej_event(struct cm_work *work)
drivers/infiniband/core/cm.c
3022
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3023
param = &work->cm_event.param.rej_rcvd;
drivers/infiniband/core/cm.c
3027
work->cm_event.private_data =
drivers/infiniband/core/cm.c
3055
static int cm_rej_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
3060
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3065
cm_format_rej_event(work);
drivers/infiniband/core/cm.c
3104
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
3171
static int cm_mra_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
3177
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3182
work->cm_event.private_data =
drivers/infiniband/core/cm.c
3184
work->cm_event.param.mra_rcvd.service_timeout =
drivers/infiniband/core/cm.c
3212
&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
3220
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
3230
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
3284
static int cm_lap_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
3297
if (rdma_protocol_roce(work->port->cm_dev->ib_device,
drivers/infiniband/core/cm.c
3298
work->port->port_num))
drivers/infiniband/core/cm.c
3302
lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3309
param = &work->cm_event.param.lap_rcvd;
drivers/infiniband/core/cm.c
3310
memset(&work->path[0], 0, sizeof(work->path[1]));
drivers/infiniband/core/cm.c
3311
cm_path_set_rec_type(work->port->cm_dev->ib_device,
drivers/infiniband/core/cm.c
3312
work->port->port_num, &work->path[0],
drivers/infiniband/core/cm.c
3315
param->alternate_path = &work->path[0];
drivers/infiniband/core/cm.c
3317
work->cm_event.private_data =
drivers/infiniband/core/cm.c
3320
ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
drivers/infiniband/core/cm.c
3321
work->port->port_num,
drivers/infiniband/core/cm.c
3322
work->mad_recv_wc->wc,
drivers/infiniband/core/cm.c
3323
work->mad_recv_wc->recv_buf.grh,
drivers/infiniband/core/cm.c
3335
cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
drivers/infiniband/core/cm.c
3347
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
3349
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc,
drivers/infiniband/core/cm.c
3360
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
drivers/infiniband/core/cm.c
3365
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
3374
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
3382
static int cm_apr_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
3390
if (rdma_protocol_roce(work->port->cm_dev->ib_device,
drivers/infiniband/core/cm.c
3391
work->port->port_num))
drivers/infiniband/core/cm.c
3394
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3401
work->cm_event.param.apr_rcvd.ap_status =
drivers/infiniband/core/cm.c
3403
work->cm_event.param.apr_rcvd.apr_info =
drivers/infiniband/core/cm.c
3405
work->cm_event.param.apr_rcvd.info_len =
drivers/infiniband/core/cm.c
3407
work->cm_event.private_data =
drivers/infiniband/core/cm.c
3419
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
3426
static int cm_timewait_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
3431
timewait_info = container_of(work, struct cm_timewait_info, work);
drivers/infiniband/core/cm.c
3436
cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
drivers/infiniband/core/cm.c
3437
timewait_info->work.remote_id);
drivers/infiniband/core/cm.c
3448
cm_queue_work_unlock(cm_id_priv, work);
drivers/infiniband/core/cm.c
3525
static void cm_format_sidr_req_event(struct cm_work *work,
drivers/infiniband/core/cm.c
3533
work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3534
param = &work->cm_event.param.sidr_req_rcvd;
drivers/infiniband/core/cm.c
3539
param->bth_pkey = cm_get_bth_pkey(work);
drivers/infiniband/core/cm.c
3540
param->port = work->port->port_num;
drivers/infiniband/core/cm.c
3542
work->cm_event.private_data =
drivers/infiniband/core/cm.c
3546
static int cm_sidr_req_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
3554
cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
drivers/infiniband/core/cm.c
3560
work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3568
wc = work->mad_recv_wc->wc;
drivers/infiniband/core/cm.c
3570
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
drivers/infiniband/core/cm.c
3571
work->mad_recv_wc->recv_buf.grh,
drivers/infiniband/core/cm.c
3580
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
drivers/infiniband/core/cm.c
3605
cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
drivers/infiniband/core/cm.c
3606
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
drivers/infiniband/core/cm.c
3607
cm_free_work(work);
drivers/infiniband/core/cm.c
3702
static void cm_format_sidr_rep_event(struct cm_work *work,
drivers/infiniband/core/cm.c
3709
work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3710
param = &work->cm_event.param.sidr_rep_rcvd;
drivers/infiniband/core/cm.c
3719
work->cm_event.private_data =
drivers/infiniband/core/cm.c
3723
static int cm_sidr_rep_handler(struct cm_work *work)
drivers/infiniband/core/cm.c
3729
work->mad_recv_wc->recv_buf.mad;
drivers/infiniband/core/cm.c
3744
cm_format_sidr_rep_event(work, cm_id_priv);
drivers/infiniband/core/cm.c
3745
cm_process_work(cm_id_priv, work);
drivers/infiniband/core/cm.c
3841
struct cm_work *work = container_of(_work, struct cm_work, work.work);
drivers/infiniband/core/cm.c
3844
switch (work->cm_event.event) {
drivers/infiniband/core/cm.c
3846
ret = cm_req_handler(work);
drivers/infiniband/core/cm.c
3849
ret = cm_mra_handler(work);
drivers/infiniband/core/cm.c
3852
ret = cm_rej_handler(work);
drivers/infiniband/core/cm.c
3855
ret = cm_rep_handler(work);
drivers/infiniband/core/cm.c
3858
ret = cm_rtu_handler(work);
drivers/infiniband/core/cm.c
3861
ret = cm_establish_handler(work);
drivers/infiniband/core/cm.c
3864
ret = cm_dreq_handler(work);
drivers/infiniband/core/cm.c
3867
ret = cm_drep_handler(work);
drivers/infiniband/core/cm.c
3870
ret = cm_sidr_req_handler(work);
drivers/infiniband/core/cm.c
3873
ret = cm_sidr_rep_handler(work);
drivers/infiniband/core/cm.c
3876
ret = cm_lap_handler(work);
drivers/infiniband/core/cm.c
3879
ret = cm_apr_handler(work);
drivers/infiniband/core/cm.c
3882
ret = cm_timewait_handler(work);
drivers/infiniband/core/cm.c
3885
trace_icm_handler_err(work->cm_event.event);
drivers/infiniband/core/cm.c
3890
cm_free_work(work);
drivers/infiniband/core/cm.c
3896
struct cm_work *work;
drivers/infiniband/core/cm.c
3905
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/core/cm.c
3906
if (!work)
drivers/infiniband/core/cm.c
3927
kfree(work);
drivers/infiniband/core/cm.c
3937
INIT_DELAYED_WORK(&work->work, cm_work_handler);
drivers/infiniband/core/cm.c
3938
work->local_id = cm_id->local_id;
drivers/infiniband/core/cm.c
3939
work->remote_id = cm_id->remote_id;
drivers/infiniband/core/cm.c
3940
work->mad_recv_wc = NULL;
drivers/infiniband/core/cm.c
3941
work->cm_event.event = IB_CM_USER_ESTABLISHED;
drivers/infiniband/core/cm.c
3946
queue_delayed_work(cm.wq, &work->work, 0);
drivers/infiniband/core/cm.c
3948
kfree(work);
drivers/infiniband/core/cm.c
4000
struct cm_work *work;
drivers/infiniband/core/cm.c
4053
work = kmalloc_flex(*work, path, paths);
drivers/infiniband/core/cm.c
4054
if (!work) {
drivers/infiniband/core/cm.c
4059
INIT_DELAYED_WORK(&work->work, cm_work_handler);
drivers/infiniband/core/cm.c
4060
work->cm_event.event = event;
drivers/infiniband/core/cm.c
4061
work->mad_recv_wc = mad_recv_wc;
drivers/infiniband/core/cm.c
4062
work->port = port;
drivers/infiniband/core/cm.c
4067
queue_delayed_work(cm.wq, &work->work, 0);
drivers/infiniband/core/cm.c
4073
kfree(work);
drivers/infiniband/core/cm.c
4542
cancel_delayed_work(&timewait_info->work.work);
drivers/infiniband/core/cm.c
710
__be32 remote_id = timewait_info->work.remote_id;
drivers/infiniband/core/cm.c
716
if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
drivers/infiniband/core/cm.c
718
else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
drivers/infiniband/core/cm.c
744
if (be32_lt(remote_id, timewait_info->work.remote_id))
drivers/infiniband/core/cm.c
746
else if (be32_gt(remote_id, timewait_info->work.remote_id))
drivers/infiniband/core/cm.c
753
res = cm_acquire_id(timewait_info->work.local_id,
drivers/infiniband/core/cm.c
754
timewait_info->work.remote_id);
drivers/infiniband/core/cm.c
888
struct cm_work *work;
drivers/infiniband/core/cm.c
893
work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
drivers/infiniband/core/cm.c
894
list_del(&work->list);
drivers/infiniband/core/cm.c
895
return work;
drivers/infiniband/core/cm.c
898
static void cm_free_work(struct cm_work *work)
drivers/infiniband/core/cm.c
900
if (work->mad_recv_wc)
drivers/infiniband/core/cm.c
901
ib_free_recv_mad(work->mad_recv_wc);
drivers/infiniband/core/cm.c
902
kfree(work);
drivers/infiniband/core/cm.c
906
struct cm_work *work)
drivers/infiniband/core/cm.c
920
list_add_tail(&work->list, &cm_id_priv->work_list);
drivers/infiniband/core/cm.c
932
cm_process_work(cm_id_priv, work);
drivers/infiniband/core/cm.c
95
struct cm_work *work);
drivers/infiniband/core/cm.c
983
timewait_info->work.local_id = local_id;
drivers/infiniband/core/cm.c
984
INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
drivers/infiniband/core/cm.c
985
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
drivers/infiniband/core/cma.c
2038
cancel_work_sync(&mc->iboe_join.work);
drivers/infiniband/core/cma.c
2880
static int route_set_path_rec_inbound(struct cma_work *work,
drivers/infiniband/core/cma.c
2883
struct rdma_route *route = &work->id->id.route;
drivers/infiniband/core/cma.c
2895
static int route_set_path_rec_outbound(struct cma_work *work,
drivers/infiniband/core/cma.c
2898
struct rdma_route *route = &work->id->id.route;
drivers/infiniband/core/cma.c
2913
struct cma_work *work = context;
drivers/infiniband/core/cma.c
2917
route = &work->id->id.route;
drivers/infiniband/core/cma.c
2926
status = route_set_path_rec_inbound(work, &path_rec[i]);
drivers/infiniband/core/cma.c
2928
status = route_set_path_rec_outbound(work,
drivers/infiniband/core/cma.c
2938
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
2942
work->old_state = RDMA_CM_ROUTE_QUERY;
drivers/infiniband/core/cma.c
2943
work->new_state = RDMA_CM_ADDR_RESOLVED;
drivers/infiniband/core/cma.c
2944
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
drivers/infiniband/core/cma.c
2945
work->event.status = status;
drivers/infiniband/core/cma.c
2948
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
2952
unsigned long timeout_ms, struct cma_work *work)
drivers/infiniband/core/cma.c
2999
work, &id_priv->query);
drivers/infiniband/core/cma.c
3004
static void cma_iboe_join_work_handler(struct work_struct *work)
drivers/infiniband/core/cma.c
3007
container_of(work, struct cma_multicast, iboe_join.work);
drivers/infiniband/core/cma.c
3028
struct cma_work *work = container_of(_work, struct cma_work, work);
drivers/infiniband/core/cma.c
3029
struct rdma_id_private *id_priv = work->id;
drivers/infiniband/core/cma.c
3035
if (work->old_state != 0 || work->new_state != 0) {
drivers/infiniband/core/cma.c
3036
if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
drivers/infiniband/core/cma.c
3040
if (cma_cm_event_handler(id_priv, &work->event)) {
drivers/infiniband/core/cma.c
3050
if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
drivers/infiniband/core/cma.c
3051
rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
drivers/infiniband/core/cma.c
3052
kfree(work);
drivers/infiniband/core/cma.c
3055
static void cma_init_resolve_route_work(struct cma_work *work,
drivers/infiniband/core/cma.c
3058
work->id = id_priv;
drivers/infiniband/core/cma.c
3059
INIT_WORK(&work->work, cma_work_handler);
drivers/infiniband/core/cma.c
3060
work->old_state = RDMA_CM_ROUTE_QUERY;
drivers/infiniband/core/cma.c
3061
work->new_state = RDMA_CM_ROUTE_RESOLVED;
drivers/infiniband/core/cma.c
3062
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
drivers/infiniband/core/cma.c
3065
static void enqueue_resolve_addr_work(struct cma_work *work,
drivers/infiniband/core/cma.c
3071
work->id = id_priv;
drivers/infiniband/core/cma.c
3072
INIT_WORK(&work->work, cma_work_handler);
drivers/infiniband/core/cma.c
3073
work->old_state = RDMA_CM_ADDR_QUERY;
drivers/infiniband/core/cma.c
3074
work->new_state = RDMA_CM_ADDR_RESOLVED;
drivers/infiniband/core/cma.c
3075
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
drivers/infiniband/core/cma.c
3077
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
3084
struct cma_work *work;
drivers/infiniband/core/cma.c
3087
work = kzalloc_obj(*work);
drivers/infiniband/core/cma.c
3088
if (!work)
drivers/infiniband/core/cma.c
3091
cma_init_resolve_route_work(work, id_priv);
drivers/infiniband/core/cma.c
3100
ret = cma_query_ib_route(id_priv, timeout_ms, work);
drivers/infiniband/core/cma.c
3109
kfree(work);
drivers/infiniband/core/cma.c
3206
struct cma_work *work;
drivers/infiniband/core/cma.c
3208
work = kzalloc_obj(*work);
drivers/infiniband/core/cma.c
3209
if (!work)
drivers/infiniband/core/cma.c
3212
cma_init_resolve_route_work(work, id_priv);
drivers/infiniband/core/cma.c
3213
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
3303
struct cma_work *work;
drivers/infiniband/core/cma.c
3315
work = kzalloc_obj(*work);
drivers/infiniband/core/cma.c
3316
if (!work)
drivers/infiniband/core/cma.c
3377
cma_init_resolve_route_work(work, id_priv);
drivers/infiniband/core/cma.c
3378
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
3387
kfree(work);
drivers/infiniband/core/cma.c
347
struct work_struct work;
drivers/infiniband/core/cma.c
3560
struct cma_work *work;
drivers/infiniband/core/cma.c
3564
work = kzalloc_obj(*work);
drivers/infiniband/core/cma.c
3565
if (!work)
drivers/infiniband/core/cma.c
3577
enqueue_resolve_addr_work(work, id_priv);
drivers/infiniband/core/cma.c
358
struct work_struct work;
drivers/infiniband/core/cma.c
3580
kfree(work);
drivers/infiniband/core/cma.c
3586
struct cma_work *work;
drivers/infiniband/core/cma.c
3589
work = kzalloc_obj(*work);
drivers/infiniband/core/cma.c
3590
if (!work)
drivers/infiniband/core/cma.c
3602
enqueue_resolve_addr_work(work, id_priv);
drivers/infiniband/core/cma.c
3605
kfree(work);
drivers/infiniband/core/cma.c
5078
INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
drivers/infiniband/core/cma.c
5080
queue_work(cma_wq, &mc->iboe_join.work);
drivers/infiniband/core/cma.c
5161
struct cma_work *work;
drivers/infiniband/core/cma.c
5170
work = kzalloc_obj(*work);
drivers/infiniband/core/cma.c
5171
if (!work)
drivers/infiniband/core/cma.c
5174
INIT_WORK(&work->work, cma_work_handler);
drivers/infiniband/core/cma.c
5175
work->id = id_priv;
drivers/infiniband/core/cma.c
5176
work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
drivers/infiniband/core/cma.c
5178
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
5555
struct cma_work *work = context;
drivers/infiniband/core/cma.c
5556
struct rdma_id_private *id_priv = work->id;
drivers/infiniband/core/cma.c
5592
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
5596
work->old_state = RDMA_CM_ADDRINFO_QUERY;
drivers/infiniband/core/cma.c
5597
work->new_state = RDMA_CM_ADDR_BOUND;
drivers/infiniband/core/cma.c
5598
work->event.event = RDMA_CM_EVENT_ADDRINFO_ERROR;
drivers/infiniband/core/cma.c
5599
work->event.status = status;
drivers/infiniband/core/cma.c
5603
queue_work(cma_wq, &work->work);
drivers/infiniband/core/cma.c
5611
struct cma_work *work;
drivers/infiniband/core/cma.c
5613
work = kzalloc_obj(*work);
drivers/infiniband/core/cma.c
5614
if (!work)
drivers/infiniband/core/cma.c
5619
work->id = id_priv;
drivers/infiniband/core/cma.c
5620
INIT_WORK(&work->work, cma_work_handler);
drivers/infiniband/core/cma.c
5621
work->old_state = RDMA_CM_ADDRINFO_QUERY;
drivers/infiniband/core/cma.c
5622
work->new_state = RDMA_CM_ADDRINFO_RESOLVED;
drivers/infiniband/core/cma.c
5623
work->event.event = RDMA_CM_EVENT_ADDRINFO_RESOLVED;
drivers/infiniband/core/cma.c
5640
work, &id_priv->query);
drivers/infiniband/core/cma.c
5644
kfree(work);
drivers/infiniband/core/cq.c
178
static void ib_cq_poll_work(struct work_struct *work)
drivers/infiniband/core/cq.c
180
struct ib_cq *cq = container_of(work, struct ib_cq, work);
drivers/infiniband/core/cq.c
187
queue_work(cq->comp_wq, &cq->work);
drivers/infiniband/core/cq.c
195
queue_work(cq->comp_wq, &cq->work);
drivers/infiniband/core/cq.c
259
INIT_WORK(&cq->work, ib_cq_poll_work);
drivers/infiniband/core/cq.c
340
cancel_work_sync(&cq->work);
drivers/infiniband/core/cq.c
41
struct dim *dim = container_of(w, struct dim, work);
drivers/infiniband/core/cq.c
71
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
drivers/infiniband/core/cq.c
79
cancel_work_sync(&cq->dim->work);
drivers/infiniband/core/device.c
1634
static void ib_unregister_work(struct work_struct *work)
drivers/infiniband/core/device.c
1637
container_of(work, struct ib_device, unregistration_work);
drivers/infiniband/core/device.c
215
static void ib_unregister_work(struct work_struct *work);
drivers/infiniband/core/device.c
219
static void ib_policy_change_task(struct work_struct *work);
drivers/infiniband/core/device.c
890
static void ib_policy_change_task(struct work_struct *work)
drivers/infiniband/core/iwcm.c
1016
struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
drivers/infiniband/core/iwcm.c
1018
struct iwcm_id_private *cm_id_priv = work->cm_id;
drivers/infiniband/core/iwcm.c
1023
levent = work->event;
drivers/infiniband/core/iwcm.c
1024
put_work(work);
drivers/infiniband/core/iwcm.c
1055
struct iwcm_work *work;
drivers/infiniband/core/iwcm.c
1063
work = get_work(cm_id_priv);
drivers/infiniband/core/iwcm.c
1064
if (!work) {
drivers/infiniband/core/iwcm.c
1069
INIT_WORK(&work->work, cm_work_handler);
drivers/infiniband/core/iwcm.c
1070
work->cm_id = cm_id_priv;
drivers/infiniband/core/iwcm.c
1071
work->event = *iw_event;
drivers/infiniband/core/iwcm.c
1073
if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
drivers/infiniband/core/iwcm.c
1074
work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
drivers/infiniband/core/iwcm.c
1075
work->event.private_data_len) {
drivers/infiniband/core/iwcm.c
1076
ret = copy_private_data(&work->event);
drivers/infiniband/core/iwcm.c
1078
put_work(work);
drivers/infiniband/core/iwcm.c
1084
queue_work(iwcm_wq, &work->work);
drivers/infiniband/core/iwcm.c
143
struct iwcm_work *work;
drivers/infiniband/core/iwcm.c
147
work = list_first_entry(&cm_id_priv->work_free_list, struct iwcm_work,
drivers/infiniband/core/iwcm.c
149
list_del_init(&work->free_list);
drivers/infiniband/core/iwcm.c
150
return work;
drivers/infiniband/core/iwcm.c
153
static void put_work(struct iwcm_work *work)
drivers/infiniband/core/iwcm.c
155
list_add(&work->free_list, &work->cm_id->work_free_list);
drivers/infiniband/core/iwcm.c
170
struct iwcm_work *work;
drivers/infiniband/core/iwcm.c
174
work = kmalloc_obj(struct iwcm_work);
drivers/infiniband/core/iwcm.c
175
if (!work) {
drivers/infiniband/core/iwcm.c
179
work->cm_id = cm_id_priv;
drivers/infiniband/core/iwcm.c
180
put_work(work);
drivers/infiniband/core/iwcm.c
96
struct work_struct work;
drivers/infiniband/core/mad.c
100
static void timeout_sends(struct work_struct *work);
drivers/infiniband/core/mad.c
101
static void local_completions(struct work_struct *work);
drivers/infiniband/core/mad.c
2748
static void local_completions(struct work_struct *work)
drivers/infiniband/core/mad.c
2760
container_of(work, struct ib_mad_agent_private, local_work);
drivers/infiniband/core/mad.c
2875
static void timeout_sends(struct work_struct *work)
drivers/infiniband/core/mad.c
2884
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
drivers/infiniband/core/mad.c
2885
timed_work.work);
drivers/infiniband/core/mad_rmpp.c
241
static void recv_timeout_handler(struct work_struct *work)
drivers/infiniband/core/mad_rmpp.c
244
container_of(work, struct mad_rmpp_recv, timeout_work.work);
drivers/infiniband/core/mad_rmpp.c
263
static void recv_cleanup_handler(struct work_struct *work)
drivers/infiniband/core/mad_rmpp.c
266
container_of(work, struct mad_rmpp_recv, cleanup_work.work);
drivers/infiniband/core/multicast.c
101
struct work_struct work;
drivers/infiniband/core/multicast.c
216
queue_work(mcast_wq, &group->work);
drivers/infiniband/core/multicast.c
424
static void mcast_work_handler(struct work_struct *work)
drivers/infiniband/core/multicast.c
432
group = container_of(work, typeof(*group), work);
drivers/infiniband/core/multicast.c
542
mcast_work_handler(&group->work);
drivers/infiniband/core/multicast.c
554
mcast_work_handler(&group->work);
drivers/infiniband/core/multicast.c
583
INIT_WORK(&group->work, mcast_work_handler);
drivers/infiniband/core/multicast.c
680
queue_work(mcast_wq, &group->work);
drivers/infiniband/core/multicast.c
784
queue_work(mcast_wq, &group->work);
drivers/infiniband/core/roce_gid_mgmt.c
53
struct work_struct work;
drivers/infiniband/core/roce_gid_mgmt.c
643
struct netdev_event_work *work =
drivers/infiniband/core/roce_gid_mgmt.c
644
container_of(_work, struct netdev_event_work, work);
drivers/infiniband/core/roce_gid_mgmt.c
647
for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
drivers/infiniband/core/roce_gid_mgmt.c
648
ib_enum_all_roce_netdevs(work->cmds[i].filter,
drivers/infiniband/core/roce_gid_mgmt.c
649
work->cmds[i].filter_ndev,
drivers/infiniband/core/roce_gid_mgmt.c
650
work->cmds[i].cb,
drivers/infiniband/core/roce_gid_mgmt.c
651
work->cmds[i].ndev);
drivers/infiniband/core/roce_gid_mgmt.c
652
dev_put(work->cmds[i].ndev);
drivers/infiniband/core/roce_gid_mgmt.c
653
dev_put(work->cmds[i].filter_ndev);
drivers/infiniband/core/roce_gid_mgmt.c
656
kfree(work);
drivers/infiniband/core/roce_gid_mgmt.c
677
INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
drivers/infiniband/core/roce_gid_mgmt.c
679
queue_work(gid_cache_wq, &ndev_work->work);
drivers/infiniband/core/roce_gid_mgmt.c
68
struct work_struct work;
drivers/infiniband/core/roce_gid_mgmt.c
827
struct update_gid_event_work *work =
drivers/infiniband/core/roce_gid_mgmt.c
828
container_of(_work, struct update_gid_event_work, work);
drivers/infiniband/core/roce_gid_mgmt.c
831
work->gid_attr.ndev,
drivers/infiniband/core/roce_gid_mgmt.c
832
callback_for_addr_gid_device_scan, work);
drivers/infiniband/core/roce_gid_mgmt.c
834
dev_put(work->gid_attr.ndev);
drivers/infiniband/core/roce_gid_mgmt.c
835
kfree(work);
drivers/infiniband/core/roce_gid_mgmt.c
841
struct update_gid_event_work *work;
drivers/infiniband/core/roce_gid_mgmt.c
860
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/core/roce_gid_mgmt.c
861
if (!work)
drivers/infiniband/core/roce_gid_mgmt.c
864
INIT_WORK(&work->work, update_gid_event_work_handler);
drivers/infiniband/core/roce_gid_mgmt.c
866
rdma_ip2gid(sa, &work->gid);
drivers/infiniband/core/roce_gid_mgmt.c
867
work->gid_op = gid_op;
drivers/infiniband/core/roce_gid_mgmt.c
869
memset(&work->gid_attr, 0, sizeof(work->gid_attr));
drivers/infiniband/core/roce_gid_mgmt.c
871
work->gid_attr.ndev = ndev;
drivers/infiniband/core/roce_gid_mgmt.c
873
queue_work(gid_cache_wq, &work->work);
drivers/infiniband/core/sa_query.c
1010
static void ib_nl_request_timeout(struct work_struct *work)
drivers/infiniband/core/sa_query.c
2173
static void update_ib_cpi(struct work_struct *work)
drivers/infiniband/core/sa_query.c
2176
container_of(work, struct ib_sa_port, ib_cpi_work.work);
drivers/infiniband/core/sa_query.c
2295
static void update_sm_ah(struct work_struct *work)
drivers/infiniband/core/sa_query.c
2298
container_of(work, struct ib_sa_port, update_task);
drivers/infiniband/core/ucma.c
177
static void ucma_close_id(struct work_struct *work)
drivers/infiniband/core/ucma.c
179
struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
drivers/infiniband/hw/bnxt_re/main.c
317
struct work_struct work;
drivers/infiniband/hw/bnxt_re/main.c
371
static void bnxt_re_dcb_wq_task(struct work_struct *work)
drivers/infiniband/hw/bnxt_re/main.c
374
container_of(work, struct bnxt_re_dcb_work, work);
drivers/infiniband/hw/bnxt_re/main.c
429
INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
drivers/infiniband/hw/bnxt_re/main.c
430
queue_work(rdev->dcb_wq, &dcb_work->work);
drivers/infiniband/hw/bnxt_re/main.c
764
static void bnxt_re_db_fifo_check(struct work_struct *work)
drivers/infiniband/hw/bnxt_re/main.c
766
struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
drivers/infiniband/hw/bnxt_re/main.c
805
static void bnxt_re_pacing_timer_exp(struct work_struct *work)
drivers/infiniband/hw/bnxt_re/main.c
807
struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
drivers/infiniband/hw/bnxt_re/main.c
808
dbq_pacing_work.work);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
157
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
drivers/infiniband/hw/bnxt_re/qplib_fp.c
160
container_of(work, struct bnxt_qplib_nq_work, work);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2102
INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2103
queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2189
INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2190
queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
drivers/infiniband/hw/bnxt_re/qplib_fp.h
540
struct work_struct work;
drivers/infiniband/hw/cxgb4/cm.c
4299
static void process_work(struct work_struct *work)
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
982
void c4iw_register_device(struct work_struct *work);
drivers/infiniband/hw/cxgb4/provider.c
526
void c4iw_register_device(struct work_struct *work)
drivers/infiniband/hw/cxgb4/provider.c
529
struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
drivers/infiniband/hw/erdma/erdma_cm.c
120
struct erdma_cm_work *work;
drivers/infiniband/hw/erdma/erdma_cm.c
123
work = list_entry(w, struct erdma_cm_work, list);
drivers/infiniband/hw/erdma/erdma_cm.c
124
list_del(&work->list);
drivers/infiniband/hw/erdma/erdma_cm.c
125
kfree(work);
drivers/infiniband/hw/erdma/erdma_cm.c
133
if (cancel_delayed_work(&cep->mpa_timer->work)) {
drivers/infiniband/hw/erdma/erdma_cm.c
142
static void erdma_put_work(struct erdma_cm_work *work)
drivers/infiniband/hw/erdma/erdma_cm.c
144
INIT_LIST_HEAD(&work->list);
drivers/infiniband/hw/erdma/erdma_cm.c
145
spin_lock_bh(&work->cep->lock);
drivers/infiniband/hw/erdma/erdma_cm.c
146
list_add(&work->list, &work->cep->work_freelist);
drivers/infiniband/hw/erdma/erdma_cm.c
147
spin_unlock_bh(&work->cep->lock);
drivers/infiniband/hw/erdma/erdma_cm.c
202
struct erdma_cm_work *work = NULL;
drivers/infiniband/hw/erdma/erdma_cm.c
206
work = list_entry(cep->work_freelist.next, struct erdma_cm_work,
drivers/infiniband/hw/erdma/erdma_cm.c
208
list_del_init(&work->list);
drivers/infiniband/hw/erdma/erdma_cm.c
212
return work;
drivers/infiniband/hw/erdma/erdma_cm.c
217
struct erdma_cm_work *work;
drivers/infiniband/hw/erdma/erdma_cm.c
220
work = kmalloc_obj(*work);
drivers/infiniband/hw/erdma/erdma_cm.c
221
if (!work) {
drivers/infiniband/hw/erdma/erdma_cm.c
226
work->cep = cep;
drivers/infiniband/hw/erdma/erdma_cm.c
227
INIT_LIST_HEAD(&work->list);
drivers/infiniband/hw/erdma/erdma_cm.c
228
list_add(&work->list, &cep->work_freelist);
drivers/infiniband/hw/erdma/erdma_cm.c
743
struct erdma_cm_work *work;
drivers/infiniband/hw/erdma/erdma_cm.c
747
work = container_of(w, struct erdma_cm_work, work.work);
drivers/infiniband/hw/erdma/erdma_cm.c
748
cep = work->cep;
drivers/infiniband/hw/erdma/erdma_cm.c
752
switch (work->type) {
drivers/infiniband/hw/erdma/erdma_cm.c
854
WARN(1, "Undefined CM work type: %d\n", work->type);
drivers/infiniband/hw/erdma/erdma_cm.c
891
erdma_put_work(work);
drivers/infiniband/hw/erdma/erdma_cm.c
897
struct erdma_cm_work *work = erdma_get_work(cep);
drivers/infiniband/hw/erdma/erdma_cm.c
900
if (!work)
drivers/infiniband/hw/erdma/erdma_cm.c
903
work->type = type;
drivers/infiniband/hw/erdma/erdma_cm.c
904
work->cep = cep;
drivers/infiniband/hw/erdma/erdma_cm.c
908
INIT_DELAYED_WORK(&work->work, erdma_cm_work_handler);
drivers/infiniband/hw/erdma/erdma_cm.c
911
cep->mpa_timer = work;
drivers/infiniband/hw/erdma/erdma_cm.c
918
cep->mpa_timer = work;
drivers/infiniband/hw/erdma/erdma_cm.c
923
queue_delayed_work(erdma_cm_wq, &work->work, delay);
drivers/infiniband/hw/erdma/erdma_cm.h
134
struct delayed_work work;
drivers/infiniband/hw/erdma/erdma_verbs.c
455
static void erdma_flush_worker(struct work_struct *work)
drivers/infiniband/hw/erdma/erdma_verbs.c
457
struct delayed_work *dwork = to_delayed_work(work);
drivers/infiniband/hw/hfi1/chip.c
12493
static void do_update_synth_timer(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
12502
struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
drivers/infiniband/hw/hfi1/chip.c
6657
void handle_sma_message(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
6659
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
6870
void handle_freeze(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
6872
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
6967
void handle_link_up(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
6969
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
7095
void handle_link_down(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
7099
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
7184
void handle_link_bounce(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
7186
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
7421
void handle_verify_cap(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
7423
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
7690
void handle_link_downgrade(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
7692
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
9660
void qsfp_event(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
9666
qd = container_of(work, struct qsfp_data, qsfp_work);
drivers/infiniband/hw/hfi1/chip.c
9834
void handle_start_link(struct work_struct *work)
drivers/infiniband/hw/hfi1/chip.c
9836
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/chip.c
9837
start_link_work.work);
drivers/infiniband/hw/hfi1/chip.h
740
void handle_verify_cap(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
741
void handle_freeze(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
742
void handle_link_up(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
743
void handle_link_down(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
744
void handle_link_downgrade(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
745
void handle_link_bounce(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
746
void handle_start_link(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
747
void handle_sma_message(struct work_struct *work);
drivers/infiniband/hw/hfi1/chip.h
749
void qsfp_event(struct work_struct *work);
drivers/infiniband/hw/hfi1/driver.c
1172
void receive_interrupt_work(struct work_struct *work)
drivers/infiniband/hw/hfi1/driver.c
1174
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
drivers/infiniband/hw/hfi1/hfi.h
1603
void receive_interrupt_work(struct work_struct *work);
drivers/infiniband/hw/hfi1/iowait.c
42
void (*func)(struct work_struct *work),
drivers/infiniband/hw/hfi1/iowait.c
43
void (*tidfunc)(struct work_struct *work),
drivers/infiniband/hw/hfi1/iowait.h
131
void (*func)(struct work_struct *work),
drivers/infiniband/hw/hfi1/iowait.h
132
void (*tidfunc)(struct work_struct *work),
drivers/infiniband/hw/hfi1/iowait.h
20
typedef void (*restart_t)(struct work_struct *work);
drivers/infiniband/hw/hfi1/ipoib_tx.c
668
static void hfi1_ipoib_flush_txq(struct work_struct *work)
drivers/infiniband/hw/hfi1/ipoib_tx.c
671
container_of(work, struct iowait_work, iowork);
drivers/infiniband/hw/hfi1/mmu_rb.c
23
static void handle_remove(struct work_struct *work);
drivers/infiniband/hw/hfi1/mmu_rb.c
292
static void handle_remove(struct work_struct *work)
drivers/infiniband/hw/hfi1/mmu_rb.c
294
struct mmu_rb_handler *handler = container_of(work,
drivers/infiniband/hw/hfi1/opfn.c
111
void opfn_send_conn_request(struct work_struct *work)
drivers/infiniband/hw/hfi1/opfn.c
116
od = container_of(work, struct hfi1_opfn_data, opfn_work);
drivers/infiniband/hw/hfi1/opfn.h
77
void opfn_send_conn_request(struct work_struct *work);
drivers/infiniband/hw/hfi1/pio.c
532
static void sc_halted(struct work_struct *work)
drivers/infiniband/hw/hfi1/pio.c
536
sc = container_of(work, struct send_context, halt_work);
drivers/infiniband/hw/hfi1/ruc.c
476
void _hfi1_do_send(struct work_struct *work)
drivers/infiniband/hw/hfi1/ruc.c
478
struct iowait_work *w = container_of(work, struct iowait_work, iowork);
drivers/infiniband/hw/hfi1/sdma.c
406
static void sdma_field_flush(struct work_struct *work)
drivers/infiniband/hw/hfi1/sdma.c
410
container_of(work, struct sdma_engine, flush_worker);
drivers/infiniband/hw/hfi1/sdma.c
418
static void sdma_err_halt_wait(struct work_struct *work)
drivers/infiniband/hw/hfi1/sdma.c
420
struct sdma_engine *sde = container_of(work, struct sdma_engine,
drivers/infiniband/hw/hfi1/tid_rdma.c
110
static void tid_rdma_trigger_resume(struct work_struct *work);
drivers/infiniband/hw/hfi1/tid_rdma.c
5342
void _hfi1_do_tid_send(struct work_struct *work)
drivers/infiniband/hw/hfi1/tid_rdma.c
5344
struct iowait_work *w = container_of(work, struct iowait_work, iowork);
drivers/infiniband/hw/hfi1/tid_rdma.c
643
static void tid_rdma_trigger_resume(struct work_struct *work)
drivers/infiniband/hw/hfi1/tid_rdma.c
649
tr = container_of(work, struct tid_rdma_qp_params, trigger_work);
drivers/infiniband/hw/hfi1/tid_rdma.h
313
void _hfi1_do_tid_send(struct work_struct *work);
drivers/infiniband/hw/hfi1/verbs.h
406
void _hfi1_do_send(struct work_struct *work);
drivers/infiniband/hw/hfi1/vnic_sdma.c
254
struct iowait_work *work;
drivers/infiniband/hw/hfi1/vnic_sdma.c
258
work = iowait_get_ib_work(&vnic_sdma->wait);
drivers/infiniband/hw/hfi1/vnic_sdma.c
259
list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
drivers/infiniband/hw/hns/hns_roce_bond.c
536
static void hns_roce_bond_work(struct work_struct *work)
drivers/infiniband/hw/hns/hns_roce_bond.c
538
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/infiniband/hw/hns/hns_roce_device.h
596
struct work_struct work;
drivers/infiniband/hw/hns/hns_roce_device.h
722
struct work_struct work;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5973
flush_work(&hr_qp->flush_work.work);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6410
static void hns_roce_irq_work_handle(struct work_struct *work)
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6413
container_of(work, struct hns_roce_work, work);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6457
INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6462
queue_work(hr_dev->irq_workq, &irq_work->work);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6572
queue_work(system_bh_wq, &eq->work);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6960
static void hns_roce_ceq_work(struct work_struct *work)
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
6962
struct hns_roce_eq *eq = from_work(eq, work, work);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
7024
INIT_WORK(&eq_table->eq[j - other_num].work,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
7054
cancel_work_sync(&eq_table->eq[j - other_num].work);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
7079
cancel_work_sync(&hr_dev->eq_table.eq[i].work);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
949
cancel_work_sync(&ibcq->work);
drivers/infiniband/hw/hns/hns_roce_qp.c
102
queue_work(hr_dev->irq_workq, &flush_work->work);
drivers/infiniband/hw/hns/hns_roce_qp.c
1191
INIT_WORK(&flush_work->work, flush_work_handle);
drivers/infiniband/hw/hns/hns_roce_qp.c
60
static void flush_work_handle(struct work_struct *work)
drivers/infiniband/hw/hns/hns_roce_qp.c
62
struct hns_roce_work *flush_work = container_of(work,
drivers/infiniband/hw/hns/hns_roce_qp.c
63
struct hns_roce_work, work);
drivers/infiniband/hw/ionic/ionic_admin.c
1001
INIT_WORK(&eq->work, ionic_poll_eq_work);
drivers/infiniband/hw/ionic/ionic_admin.c
1041
flush_work(&eq->work);
drivers/infiniband/hw/ionic/ionic_admin.c
1058
flush_work(&eq->work);
drivers/infiniband/hw/ionic/ionic_admin.c
112
complete_all(&wr->work);
drivers/infiniband/hw/ionic/ionic_admin.c
119
complete_all(&wr->work);
drivers/infiniband/hw/ionic/ionic_admin.c
1207
cancel_work_sync(&aq->work);
drivers/infiniband/hw/ionic/ionic_admin.c
166
complete_all(&wr->work);
drivers/infiniband/hw/ionic/ionic_admin.c
182
queue_work(ionic_evt_workq, &aq->work);
drivers/infiniband/hw/ionic/ionic_admin.c
189
queue_work(ionic_evt_workq, &aq->work);
drivers/infiniband/hw/ionic/ionic_admin.c
264
container_of(ws, struct ionic_ibdev, admin_dwork.work);
drivers/infiniband/hw/ionic/ionic_admin.c
321
struct ionic_aq *aq = container_of(ws, struct ionic_aq, work);
drivers/infiniband/hw/ionic/ionic_admin.c
377
if (completion_done(&wr->work))
drivers/infiniband/hw/ionic/ionic_admin.c
409
timo = wait_for_completion_interruptible_timeout(&wr->work,
drivers/infiniband/hw/ionic/ionic_admin.c
425
wait_for_completion(&wr->work);
drivers/infiniband/hw/ionic/ionic_admin.c
461
.work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
drivers/infiniband/hw/ionic/ionic_admin.c
476
.work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
drivers/infiniband/hw/ionic/ionic_admin.c
500
queue_work(ionic_evt_workq, &aq->work);
drivers/infiniband/hw/ionic/ionic_admin.c
587
INIT_WORK(&aq->work, ionic_admin_work);
drivers/infiniband/hw/ionic/ionic_admin.c
935
static void ionic_poll_eq_work(struct work_struct *work)
drivers/infiniband/hw/ionic/ionic_admin.c
937
struct ionic_eq *eq = container_of(work, struct ionic_eq, work);
drivers/infiniband/hw/ionic/ionic_admin.c
947
queue_work(ionic_evt_workq, &eq->work);
drivers/infiniband/hw/ionic/ionic_admin.c
970
queue_work(ionic_evt_workq, &eq->work);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1167
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
1196
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
1337
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
1397
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
1528
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
1675
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
625
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
707
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
801
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_controlpath.c
834
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_hw_stats.c
88
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
drivers/infiniband/hw/ionic/ionic_ibdev.h
132
struct work_struct work;
drivers/infiniband/hw/ionic/ionic_ibdev.h
139
struct completion work;
drivers/infiniband/hw/ionic/ionic_ibdev.h
156
struct work_struct work;
drivers/infiniband/hw/irdma/cm.c
3447
struct disconn_work *work;
drivers/infiniband/hw/irdma/cm.c
3450
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/hw/irdma/cm.c
3451
if (!work)
drivers/infiniband/hw/irdma/cm.c
3460
kfree(work);
drivers/infiniband/hw/irdma/cm.c
3466
work->iwqp = iwqp;
drivers/infiniband/hw/irdma/cm.c
3467
INIT_WORK(&work->work, irdma_disconnect_worker);
drivers/infiniband/hw/irdma/cm.c
3468
queue_work(iwdev->cleanup_wq, &work->work);
drivers/infiniband/hw/irdma/cm.c
3605
static void irdma_disconnect_worker(struct work_struct *work)
drivers/infiniband/hw/irdma/cm.c
3607
struct disconn_work *dwork = container_of(work, struct disconn_work, work);
drivers/infiniband/hw/irdma/cm.c
4225
static void irdma_cm_event_handler(struct work_struct *work)
drivers/infiniband/hw/irdma/cm.c
4227
struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work);
drivers/infiniband/hw/irdma/cm.c
7
static void irdma_disconnect_worker(struct work_struct *work);
drivers/infiniband/hw/irdma/hw.c
2257
void cqp_compl_worker(struct work_struct *work)
drivers/infiniband/hw/irdma/hw.c
2259
struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
drivers/infiniband/hw/irdma/main.h
578
void cqp_compl_worker(struct work_struct *work);
drivers/infiniband/hw/irdma/verbs.c
1922
static void irdma_free_cqbuf(struct work_struct *work)
drivers/infiniband/hw/irdma/verbs.c
1924
struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
drivers/infiniband/hw/irdma/verbs.c
1952
queue_work(iwdev->cleanup_wq, &cq_buf->work);
drivers/infiniband/hw/irdma/verbs.c
2147
INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
drivers/infiniband/hw/irdma/verbs.c
891
static void irdma_flush_worker(struct work_struct *work)
drivers/infiniband/hw/irdma/verbs.c
893
struct delayed_work *dwork = to_delayed_work(work);
drivers/infiniband/hw/irdma/verbs.h
164
struct work_struct work;
drivers/infiniband/hw/irdma/verbs.h
89
struct work_struct work;
drivers/infiniband/hw/mlx4/alias_GUID.c
743
static void alias_guid_work(struct work_struct *work)
drivers/infiniband/hw/mlx4/alias_GUID.c
745
struct delayed_work *delay = to_delayed_work(work);
drivers/infiniband/hw/mlx4/cm.c
177
static void id_map_ent_timeout(struct work_struct *work)
drivers/infiniband/hw/mlx4/cm.c
179
struct delayed_work *delay = to_delayed_work(work);
drivers/infiniband/hw/mlx4/cm.c
346
static void rej_tmout_timeout(struct work_struct *work)
drivers/infiniband/hw/mlx4/cm.c
348
struct delayed_work *delay = to_delayed_work(work);
drivers/infiniband/hw/mlx4/mad.c
1177
void handle_port_mgmt_change_event(struct work_struct *work)
drivers/infiniband/hw/mlx4/mad.c
1179
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
drivers/infiniband/hw/mlx4/mad.c
1298
queue_work(ctx->wq, &ctx->work);
drivers/infiniband/hw/mlx4/mad.c
1310
queue_work(ctx->wi_wq, &ctx->work);
drivers/infiniband/hw/mlx4/mad.c
1732
static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
drivers/infiniband/hw/mlx4/mad.c
1738
ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
drivers/infiniband/hw/mlx4/mad.c
1894
static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
drivers/infiniband/hw/mlx4/mad.c
1902
ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
drivers/infiniband/hw/mlx4/mad.c
2044
INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
drivers/infiniband/hw/mlx4/mad.c
2046
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
drivers/infiniband/hw/mlx4/mad.c
2142
void mlx4_ib_tunnels_update_work(struct work_struct *work)
drivers/infiniband/hw/mlx4/mad.c
2146
dmxw = container_of(work, struct mlx4_ib_demux_work, work);
drivers/infiniband/hw/mlx4/main.c
3045
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
drivers/infiniband/hw/mlx4/main.c
3055
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
drivers/infiniband/hw/mlx4/main.c
3128
static void handle_bonded_port_state_event(struct work_struct *work)
drivers/infiniband/hw/mlx4/main.c
3131
container_of(work, struct ib_event_work, work);
drivers/infiniband/hw/mlx4/main.c
3178
static void ib_sl2vl_update_work(struct work_struct *work)
drivers/infiniband/hw/mlx4/main.c
3180
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
drivers/infiniband/hw/mlx4/main.c
3196
INIT_WORK(&ew->work, ib_sl2vl_update_work);
drivers/infiniband/hw/mlx4/main.c
3199
queue_work(wq, &ew->work);
drivers/infiniband/hw/mlx4/main.c
3220
INIT_WORK(&ew->work, handle_bonded_port_state_event);
drivers/infiniband/hw/mlx4/main.c
3222
queue_work(wq, &ew->work);
drivers/infiniband/hw/mlx4/main.c
3270
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
drivers/infiniband/hw/mlx4/main.c
3275
queue_work(wq, &ew->work);
drivers/infiniband/hw/mlx4/main.c
3277
handle_port_mgmt_change_event(&ew->work);
drivers/infiniband/hw/mlx4/mcg.c
108
struct work_struct work;
drivers/infiniband/hw/mlx4/mcg.c
1124
struct work_struct work;
drivers/infiniband/hw/mlx4/mcg.c
1129
static void mcg_clean_task(struct work_struct *work)
drivers/infiniband/hw/mlx4/mcg.c
1131
struct clean_work *cw = container_of(work, struct clean_work, work);
drivers/infiniband/hw/mlx4/mcg.c
1140
struct clean_work *work;
drivers/infiniband/hw/mlx4/mcg.c
1153
work = kmalloc_obj(*work);
drivers/infiniband/hw/mlx4/mcg.c
1154
if (!work) {
drivers/infiniband/hw/mlx4/mcg.c
1159
work->ctx = ctx;
drivers/infiniband/hw/mlx4/mcg.c
1160
work->destroy_wq = destroy_wq;
drivers/infiniband/hw/mlx4/mcg.c
1161
INIT_WORK(&work->work, mcg_clean_task);
drivers/infiniband/hw/mlx4/mcg.c
1162
queue_work(clean_wq, &work->work);
drivers/infiniband/hw/mlx4/mcg.c
539
static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
drivers/infiniband/hw/mlx4/mcg.c
541
struct delayed_work *delay = to_delayed_work(work);
drivers/infiniband/hw/mlx4/mcg.c
578
if (!queue_work(group->demux->mcg_wq, &group->work))
drivers/infiniband/hw/mlx4/mcg.c
642
static void mlx4_ib_mcg_work_handler(struct work_struct *work)
drivers/infiniband/hw/mlx4/mcg.c
652
group = container_of(work, typeof(*group), work);
drivers/infiniband/hw/mlx4/mcg.c
837
INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
drivers/infiniband/hw/mlx4/mcg.c
878
if (!queue_work(group->demux->mcg_wq, &group->work))
drivers/infiniband/hw/mlx4/mcg.c
914
if (!queue_work(ctx->mcg_wq, &group->work))
drivers/infiniband/hw/mlx4/mlx4_ib.h
446
struct work_struct work;
drivers/infiniband/hw/mlx4/mlx4_ib.h
484
struct work_struct work;
drivers/infiniband/hw/mlx4/mlx4_ib.h
652
struct work_struct work;
drivers/infiniband/hw/mlx4/mlx4_ib.h
862
void mlx4_ib_tunnels_update_work(struct work_struct *work);
drivers/infiniband/hw/mlx4/qp.c
106
struct work_struct work;
drivers/infiniband/hw/mlx4/qp.c
214
container_of(_work, struct mlx4_ib_qp_event_work, work);
drivers/infiniband/hw/mlx4/qp.c
276
INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event);
drivers/infiniband/hw/mlx4/qp.c
277
queue_work(mlx4_ib_qp_event_wq, &qpe_work->work);
drivers/infiniband/hw/mlx4/qp.c
4437
cancel_work_sync(&cq->work);
drivers/infiniband/hw/mlx5/cq.c
944
static void notify_soft_wc_handler(struct work_struct *work)
drivers/infiniband/hw/mlx5/cq.c
946
struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
drivers/infiniband/hw/mlx5/main.c
2915
static void pkey_change_handler(struct work_struct *work)
drivers/infiniband/hw/mlx5/main.c
2918
container_of(work, struct mlx5_ib_port_resources,
drivers/infiniband/hw/mlx5/main.c
2990
static void delay_drop_handler(struct work_struct *work)
drivers/infiniband/hw/mlx5/main.c
2994
container_of(work, struct mlx5_ib_delay_drop,
drivers/infiniband/hw/mlx5/main.c
3080
struct mlx5_ib_event_work *work =
drivers/infiniband/hw/mlx5/main.c
3081
container_of(_work, struct mlx5_ib_event_work, work);
drivers/infiniband/hw/mlx5/main.c
3085
if (work->is_slave) {
drivers/infiniband/hw/mlx5/main.c
3086
ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
drivers/infiniband/hw/mlx5/main.c
3090
ibdev = work->dev;
drivers/infiniband/hw/mlx5/main.c
3093
switch (work->event) {
drivers/infiniband/hw/mlx5/main.c
3095
if (handle_port_change(ibdev, work->param, &ibev))
drivers/infiniband/hw/mlx5/main.c
3099
handle_general_event(ibdev, work->param, &ibev);
drivers/infiniband/hw/mlx5/main.c
3116
kfree(work);
drivers/infiniband/hw/mlx5/main.c
3122
struct mlx5_ib_event_work *work;
drivers/infiniband/hw/mlx5/main.c
3124
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/hw/mlx5/main.c
3125
if (!work)
drivers/infiniband/hw/mlx5/main.c
3128
INIT_WORK(&work->work, mlx5_ib_handle_event);
drivers/infiniband/hw/mlx5/main.c
3129
work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
drivers/infiniband/hw/mlx5/main.c
3130
work->is_slave = false;
drivers/infiniband/hw/mlx5/main.c
3131
work->param = param;
drivers/infiniband/hw/mlx5/main.c
3132
work->event = event;
drivers/infiniband/hw/mlx5/main.c
3134
queue_work(mlx5_ib_event_wq, &work->work);
drivers/infiniband/hw/mlx5/main.c
3142
struct mlx5_ib_event_work *work;
drivers/infiniband/hw/mlx5/main.c
3144
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/hw/mlx5/main.c
3145
if (!work)
drivers/infiniband/hw/mlx5/main.c
3148
INIT_WORK(&work->work, mlx5_ib_handle_event);
drivers/infiniband/hw/mlx5/main.c
3149
work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
drivers/infiniband/hw/mlx5/main.c
3150
work->is_slave = true;
drivers/infiniband/hw/mlx5/main.c
3151
work->param = param;
drivers/infiniband/hw/mlx5/main.c
3152
work->event = event;
drivers/infiniband/hw/mlx5/main.c
3153
queue_work(mlx5_ib_event_wq, &work->work);
drivers/infiniband/hw/mlx5/main.c
3160
struct mlx5_ib_event_work *work =
drivers/infiniband/hw/mlx5/main.c
3161
container_of(_work, struct mlx5_ib_event_work, work);
drivers/infiniband/hw/mlx5/main.c
3162
struct mlx5_ib_dev *ibdev = work->dev;
drivers/infiniband/hw/mlx5/main.c
3167
ibev.element.port_num = (u8)(unsigned long)work->param;
drivers/infiniband/hw/mlx5/main.c
3180
kfree(work);
drivers/infiniband/hw/mlx5/main.c
3186
struct mlx5_ib_event_work *work;
drivers/infiniband/hw/mlx5/main.c
3191
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/hw/mlx5/main.c
3192
if (!work)
drivers/infiniband/hw/mlx5/main.c
3195
INIT_WORK(&work->work, mlx5_ib_handle_sys_error_event);
drivers/infiniband/hw/mlx5/main.c
3196
work->dev = container_of(nb, struct mlx5_ib_dev, sys_error_events);
drivers/infiniband/hw/mlx5/main.c
3197
work->is_slave = false;
drivers/infiniband/hw/mlx5/main.c
3198
work->param = param;
drivers/infiniband/hw/mlx5/main.c
3199
work->event = event;
drivers/infiniband/hw/mlx5/main.c
3201
queue_work(mlx5_ib_event_wq, &work->work);
drivers/infiniband/hw/mlx5/main.c
64
struct work_struct work;
drivers/infiniband/hw/mlx5/mlx5_ib.h
1122
struct work_struct work;
drivers/infiniband/hw/mlx5/mlx5_ib.h
727
struct work_struct work;
drivers/infiniband/hw/mlx5/mr.c
623
static void delayed_cache_work_func(struct work_struct *work)
drivers/infiniband/hw/mlx5/mr.c
627
ent = container_of(work, struct mlx5_cache_ent, dwork.work);
drivers/infiniband/hw/mlx5/odp.c
1625
static void mlx5_ib_eqe_pf_action(struct work_struct *work)
drivers/infiniband/hw/mlx5/odp.c
1627
struct mlx5_pagefault *pfault = container_of(work,
drivers/infiniband/hw/mlx5/odp.c
1629
work);
drivers/infiniband/hw/mlx5/odp.c
1647
schedule_work(&eq->work);
drivers/infiniband/hw/mlx5/odp.c
1752
INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
drivers/infiniband/hw/mlx5/odp.c
1753
queue_work(eq->wq, &pfault->work);
drivers/infiniband/hw/mlx5/odp.c
1772
schedule_work(&eq->work);
drivers/infiniband/hw/mlx5/odp.c
1788
static void mlx5_ib_eq_pf_action(struct work_struct *work)
drivers/infiniband/hw/mlx5/odp.c
1791
container_of(work, struct mlx5_ib_pf_eq, work);
drivers/infiniband/hw/mlx5/odp.c
1813
INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
drivers/infiniband/hw/mlx5/odp.c
1871
cancel_work_sync(&eq->work);
drivers/infiniband/hw/mlx5/odp.c
1947
struct work_struct work;
drivers/infiniband/hw/mlx5/odp.c
1957
static void destroy_prefetch_work(struct prefetch_mr_work *work)
drivers/infiniband/hw/mlx5/odp.c
1961
for (i = 0; i < work->num_sge; ++i)
drivers/infiniband/hw/mlx5/odp.c
1962
mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
drivers/infiniband/hw/mlx5/odp.c
1964
kvfree(work);
drivers/infiniband/hw/mlx5/odp.c
2008
struct prefetch_mr_work *work =
drivers/infiniband/hw/mlx5/odp.c
2009
container_of(w, struct prefetch_mr_work, work);
drivers/infiniband/hw/mlx5/odp.c
2015
WARN_ON(!work->num_sge);
drivers/infiniband/hw/mlx5/odp.c
2016
for (i = 0; i < work->num_sge; ++i) {
drivers/infiniband/hw/mlx5/odp.c
2017
ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
drivers/infiniband/hw/mlx5/odp.c
2018
work->frags[i].length, &bytes_mapped,
drivers/infiniband/hw/mlx5/odp.c
2019
work->pf_flags, false);
drivers/infiniband/hw/mlx5/odp.c
2022
mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
drivers/infiniband/hw/mlx5/odp.c
2025
destroy_prefetch_work(work);
drivers/infiniband/hw/mlx5/odp.c
2030
u32 pf_flags, struct prefetch_mr_work *work,
drivers/infiniband/hw/mlx5/odp.c
2035
INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
drivers/infiniband/hw/mlx5/odp.c
2036
work->pf_flags = pf_flags;
drivers/infiniband/hw/mlx5/odp.c
2043
work->num_sge = i;
drivers/infiniband/hw/mlx5/odp.c
2046
work->frags[i].io_virt = sg_list[i].addr;
drivers/infiniband/hw/mlx5/odp.c
2047
work->frags[i].length = sg_list[i].length;
drivers/infiniband/hw/mlx5/odp.c
2048
work->frags[i].mr = mr;
drivers/infiniband/hw/mlx5/odp.c
2050
work->num_sge = num_sge;
drivers/infiniband/hw/mlx5/odp.c
2087
struct prefetch_mr_work *work;
drivers/infiniband/hw/mlx5/odp.c
2100
work = kvzalloc_flex(*work, frags, num_sge);
drivers/infiniband/hw/mlx5/odp.c
2101
if (!work)
drivers/infiniband/hw/mlx5/odp.c
2104
rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
drivers/infiniband/hw/mlx5/odp.c
2106
destroy_prefetch_work(work);
drivers/infiniband/hw/mlx5/odp.c
2109
queue_work(system_dfl_wq, &work->work);
drivers/infiniband/hw/mlx5/odp.c
211
static void free_implicit_child_mr_work(struct work_struct *work)
drivers/infiniband/hw/mlx5/odp.c
214
container_of(work, struct mlx5_ib_mr, odp_destroy.work);
drivers/infiniband/hw/mlx5/odp.c
261
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
drivers/infiniband/hw/mlx5/odp.c
262
queue_work(system_dfl_wq, &mr->odp_destroy.work);
drivers/infiniband/hw/mlx5/odp.c
91
struct work_struct work;
drivers/infiniband/hw/mlx5/qp.c
358
container_of(_work, struct mlx5_ib_qp_event_work, work);
drivers/infiniband/hw/mlx5/qp.c
425
INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event);
drivers/infiniband/hw/mlx5/qp.c
426
queue_work(mlx5_ib_qp_event_wq, &qpe_work->work);
drivers/infiniband/hw/mlx5/qp.c
5761
cancel_work_sync(&cq->work);
drivers/infiniband/hw/mlx5/qp.c
79
struct work_struct work;
drivers/infiniband/hw/mthca/mthca_catas.c
59
static void catas_reset(struct work_struct *work)
drivers/infiniband/hw/mthca/mthca_eq.c
434
int work = 0;
drivers/infiniband/hw/mthca/mthca_eq.c
442
work = 1;
drivers/infiniband/hw/mthca/mthca_eq.c
449
return IRQ_RETVAL(work);
drivers/infiniband/hw/ocrdma/ocrdma.h
76
void ocrdma_eqd_set_task(struct work_struct *work);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
3155
void ocrdma_eqd_set_task(struct work_struct *work)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
3158
container_of(work, struct ocrdma_dev, eqd_work.work);
drivers/infiniband/hw/qedr/qedr_iw_cm.c
202
struct work_struct work;
drivers/infiniband/hw/qedr/qedr_iw_cm.c
208
static void qedr_iw_disconnect_worker(struct work_struct *work)
drivers/infiniband/hw/qedr/qedr_iw_cm.c
211
container_of(work, struct qedr_discon_work, work);
drivers/infiniband/hw/qedr/qedr_iw_cm.c
257
struct qedr_discon_work *work;
drivers/infiniband/hw/qedr/qedr_iw_cm.c
261
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/infiniband/hw/qedr/qedr_iw_cm.c
262
if (!work)
drivers/infiniband/hw/qedr/qedr_iw_cm.c
271
work->ep = ep;
drivers/infiniband/hw/qedr/qedr_iw_cm.c
272
work->event = params->event;
drivers/infiniband/hw/qedr/qedr_iw_cm.c
273
work->status = params->status;
drivers/infiniband/hw/qedr/qedr_iw_cm.c
275
INIT_WORK(&work->work, qedr_iw_disconnect_worker);
drivers/infiniband/hw/qedr/qedr_iw_cm.c
276
queue_work(dev->iwarp_wq, &work->work);
drivers/infiniband/hw/qedr/verbs.c
3186
int work = info->completed - info->completed_handled - 1;
drivers/infiniband/hw/qedr/verbs.c
3188
DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
drivers/infiniband/hw/qedr/verbs.c
3189
while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
drivers/infiniband/hw/usnic/usnic_uiom.h
73
struct work_struct work;
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
253
struct work_struct work;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
724
static void pvrdma_netdevice_event_work(struct work_struct *work)
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
729
netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
756
INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
759
queue_work(event_wq, &netdev_work->work);
drivers/infiniband/sw/rdmavt/cq.c
120
static void send_complete(struct work_struct *work)
drivers/infiniband/sw/rdmavt/cq.c
122
struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
drivers/infiniband/sw/rxe/rxe_odp.c
412
struct work_struct work;
drivers/infiniband/sw/rxe/rxe_odp.c
424
struct prefetch_mr_work *work =
drivers/infiniband/sw/rxe/rxe_odp.c
425
container_of(w, struct prefetch_mr_work, work);
drivers/infiniband/sw/rxe/rxe_odp.c
433
WARN_ON(!work->num_sge);
drivers/infiniband/sw/rxe/rxe_odp.c
434
for (i = 0; i < work->num_sge; ++i) {
drivers/infiniband/sw/rxe/rxe_odp.c
437
ret = rxe_odp_do_pagefault_and_lock(work->frags[i].mr,
drivers/infiniband/sw/rxe/rxe_odp.c
438
work->frags[i].io_virt,
drivers/infiniband/sw/rxe/rxe_odp.c
439
work->frags[i].length,
drivers/infiniband/sw/rxe/rxe_odp.c
440
work->pf_flags);
drivers/infiniband/sw/rxe/rxe_odp.c
442
rxe_dbg_mr(work->frags[i].mr,
drivers/infiniband/sw/rxe/rxe_odp.c
447
umem_odp = to_ib_umem_odp(work->frags[i].mr->umem);
drivers/infiniband/sw/rxe/rxe_odp.c
451
rxe_put(work->frags[i].mr);
drivers/infiniband/sw/rxe/rxe_odp.c
454
kvfree(work);
drivers/infiniband/sw/rxe/rxe_odp.c
510
struct prefetch_mr_work *work;
drivers/infiniband/sw/rxe/rxe_odp.c
526
work = kvzalloc_flex(*work, frags, num_sge);
drivers/infiniband/sw/rxe/rxe_odp.c
527
if (!work)
drivers/infiniband/sw/rxe/rxe_odp.c
530
INIT_WORK(&work->work, rxe_ib_prefetch_mr_work);
drivers/infiniband/sw/rxe/rxe_odp.c
531
work->pf_flags = pf_flags;
drivers/infiniband/sw/rxe/rxe_odp.c
532
work->num_sge = num_sge;
drivers/infiniband/sw/rxe/rxe_odp.c
543
work->frags[i].io_virt = sg_list[i].addr;
drivers/infiniband/sw/rxe/rxe_odp.c
544
work->frags[i].length = sg_list[i].length;
drivers/infiniband/sw/rxe/rxe_odp.c
545
work->frags[i].mr = mr;
drivers/infiniband/sw/rxe/rxe_odp.c
548
queue_work(system_unbound_wq, &work->work);
drivers/infiniband/sw/rxe/rxe_odp.c
556
rxe_put(work->frags[i].mr);
drivers/infiniband/sw/rxe/rxe_odp.c
559
kvfree(work);
drivers/infiniband/sw/rxe/rxe_qp.c
853
static void rxe_qp_do_cleanup(struct work_struct *work)
drivers/infiniband/sw/rxe/rxe_qp.c
855
struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
drivers/infiniband/sw/rxe/rxe_task.c
189
static void do_work(struct work_struct *work)
drivers/infiniband/sw/rxe/rxe_task.c
191
do_task(container_of(work, struct rxe_task, work));
drivers/infiniband/sw/rxe/rxe_task.c
203
INIT_WORK(&task->work, do_work);
drivers/infiniband/sw/rxe/rxe_task.c
253
queue_work(rxe_wq, &task->work);
drivers/infiniband/sw/rxe/rxe_task.c
62
if (work_pending(&task->work))
drivers/infiniband/sw/rxe/rxe_task.h
25
struct work_struct work;
drivers/infiniband/sw/siw/siw_cm.c
1060
struct siw_cm_work *work;
drivers/infiniband/sw/siw/siw_cm.c
1064
work = container_of(w, struct siw_cm_work, work.work);
drivers/infiniband/sw/siw/siw_cm.c
1065
cep = work->cep;
drivers/infiniband/sw/siw/siw_cm.c
1069
work->type, cep->state);
drivers/infiniband/sw/siw/siw_cm.c
1073
switch (work->type) {
drivers/infiniband/sw/siw/siw_cm.c
1202
WARN(1, "Undefined CM work type: %d\n", work->type);
drivers/infiniband/sw/siw/siw_cm.c
1241
siw_put_work(work);
drivers/infiniband/sw/siw/siw_cm.c
1249
struct siw_cm_work *work = siw_get_work(cep);
drivers/infiniband/sw/siw/siw_cm.c
1252
if (!work) {
drivers/infiniband/sw/siw/siw_cm.c
1256
work->type = type;
drivers/infiniband/sw/siw/siw_cm.c
1257
work->cep = cep;
drivers/infiniband/sw/siw/siw_cm.c
1261
INIT_DELAYED_WORK(&work->work, siw_cm_work_handler);
drivers/infiniband/sw/siw/siw_cm.c
1264
cep->mpa_timer = work;
drivers/infiniband/sw/siw/siw_cm.c
1274
queue_delayed_work(siw_cm_wq, &work->work, delay);
drivers/infiniband/sw/siw/siw_cm.c
236
struct siw_cm_work *work;
drivers/infiniband/sw/siw/siw_cm.c
239
work = list_entry(w, struct siw_cm_work, list);
drivers/infiniband/sw/siw/siw_cm.c
240
list_del(&work->list);
drivers/infiniband/sw/siw/siw_cm.c
241
kfree(work);
drivers/infiniband/sw/siw/siw_cm.c
249
if (cancel_delayed_work(&cep->mpa_timer->work)) {
drivers/infiniband/sw/siw/siw_cm.c
258
static void siw_put_work(struct siw_cm_work *work)
drivers/infiniband/sw/siw/siw_cm.c
260
INIT_LIST_HEAD(&work->list);
drivers/infiniband/sw/siw/siw_cm.c
261
spin_lock_bh(&work->cep->lock);
drivers/infiniband/sw/siw/siw_cm.c
262
list_add(&work->list, &work->cep->work_freelist);
drivers/infiniband/sw/siw/siw_cm.c
263
spin_unlock_bh(&work->cep->lock);
drivers/infiniband/sw/siw/siw_cm.c
320
struct siw_cm_work *work = NULL;
drivers/infiniband/sw/siw/siw_cm.c
324
work = list_entry(cep->work_freelist.next, struct siw_cm_work,
drivers/infiniband/sw/siw/siw_cm.c
326
list_del_init(&work->list);
drivers/infiniband/sw/siw/siw_cm.c
329
return work;
drivers/infiniband/sw/siw/siw_cm.c
334
struct siw_cm_work *work;
drivers/infiniband/sw/siw/siw_cm.c
337
work = kmalloc_obj(*work);
drivers/infiniband/sw/siw/siw_cm.c
338
if (!work) {
drivers/infiniband/sw/siw/siw_cm.c
343
work->cep = cep;
drivers/infiniband/sw/siw/siw_cm.c
344
INIT_LIST_HEAD(&work->list);
drivers/infiniband/sw/siw/siw_cm.c
345
list_add(&work->list, &cep->work_freelist);
drivers/infiniband/sw/siw/siw_cm.h
86
struct delayed_work work;
drivers/infiniband/ulp/ipoib/ipoib.h
312
struct work_struct work;
drivers/infiniband/ulp/ipoib/ipoib.h
497
void ipoib_reap_ah(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
499
void ipoib_napi_schedule_work(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
507
void ipoib_ib_dev_flush_light(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
508
void ipoib_ib_dev_flush_normal(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
509
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
512
void ipoib_ib_tx_timeout_work(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
523
void ipoib_mcast_join_task(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
524
void ipoib_mcast_carrier_on_task(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib.h
527
void ipoib_mcast_restart_task(struct work_struct *work);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1337
static void ipoib_cm_tx_start(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1339
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1397
static void ipoib_cm_tx_reap(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1399
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1422
static void ipoib_cm_skb_reap(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1424
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1471
static void ipoib_cm_rx_reap(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1473
ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1477
static void ipoib_cm_stale_task(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1479
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1480
cm.stale_task.work);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
1274
void ipoib_ib_dev_flush_light(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_ib.c
1277
container_of(work, struct ipoib_dev_priv, flush_light);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
1282
void ipoib_ib_dev_flush_normal(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_ib.c
1285
container_of(work, struct ipoib_dev_priv, flush_normal);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
1290
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_ib.c
1293
container_of(work, struct ipoib_dev_priv, flush_heavy);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
346
static void ipoib_qp_state_validate_work(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_ib.c
349
container_of(work, struct ipoib_qp_state_validate, work);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
429
INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
431
queue_work(priv->wq, &qp_work->work);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
536
void ipoib_napi_schedule_work(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_ib.c
539
container_of(work, struct ipoib_dev_priv, reschedule_napi_work);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
717
void ipoib_reap_ah(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_ib.c
720
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
drivers/infiniband/ulp/ipoib/ipoib_main.c
1248
void ipoib_ib_tx_timeout_work(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_main.c
1250
struct ipoib_dev_priv *priv = container_of(work,
drivers/infiniband/ulp/ipoib/ipoib_main.c
137
struct work_struct work;
drivers/infiniband/ulp/ipoib/ipoib_main.c
1429
static void ipoib_reap_neigh(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_main.c
143
static void ipoib_ifupdown_task(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_main.c
1432
container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
drivers/infiniband/ulp/ipoib/ipoib_main.c
146
container_of(work, struct ipoib_ifupdown_work, work);
drivers/infiniband/ulp/ipoib/ipoib_main.c
166
struct ipoib_ifupdown_work *work;
drivers/infiniband/ulp/ipoib/ipoib_main.c
172
work = kmalloc_obj(*work);
drivers/infiniband/ulp/ipoib/ipoib_main.c
173
if (!work)
drivers/infiniband/ulp/ipoib/ipoib_main.c
175
work->dev = dev;
drivers/infiniband/ulp/ipoib/ipoib_main.c
176
netdev_hold(dev, &work->dev_tracker, GFP_KERNEL);
drivers/infiniband/ulp/ipoib/ipoib_main.c
177
work->up = up;
drivers/infiniband/ulp/ipoib/ipoib_main.c
178
INIT_WORK(&work->work, ipoib_ifupdown_task);
drivers/infiniband/ulp/ipoib/ipoib_main.c
179
queue_work(ipoib_workqueue, &work->work);
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
324
void ipoib_mcast_carrier_on_task(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
326
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
549
void ipoib_mcast_join_task(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
552
container_of(work, struct ipoib_dev_priv, mcast_task.work);
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
864
void ipoib_mcast_restart_task(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
867
container_of(work, struct ipoib_dev_priv, restart_task);
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
216
struct work_struct work;
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
230
static void ipoib_vlan_delete_task(struct work_struct *work)
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
233
container_of(work, struct ipoib_vlan_delete_work, work);
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
275
struct ipoib_vlan_delete_work *work;
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
277
work = kmalloc_obj(*work);
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
278
if (!work) {
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
284
work->dev = priv->dev;
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
285
INIT_WORK(&work->work, ipoib_vlan_delete_task);
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
286
queue_work(ipoib_workqueue, &work->work);
drivers/infiniband/ulp/iser/iscsi_iser.h
494
void iser_release_work(struct work_struct *work);
drivers/infiniband/ulp/iser/iser_verbs.c
343
void iser_release_work(struct work_struct *work)
drivers/infiniband/ulp/iser/iser_verbs.c
347
iser_conn = container_of(work, struct iser_conn, release_work);
drivers/infiniband/ulp/isert/ib_isert.c
1658
isert_do_control_comp(struct work_struct *work)
drivers/infiniband/ulp/isert/ib_isert.c
1660
struct isert_cmd *isert_cmd = container_of(work,
drivers/infiniband/ulp/isert/ib_isert.c
2478
static void isert_release_work(struct work_struct *work)
drivers/infiniband/ulp/isert/ib_isert.c
2480
struct isert_conn *isert_conn = container_of(work,
drivers/infiniband/ulp/isert/ib_isert.c
57
static void isert_release_work(struct work_struct *work);
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1512
static void rtrs_clt_reconnect_work(struct work_struct *work);
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1513
static void rtrs_clt_close_work(struct work_struct *work);
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1515
static void rtrs_clt_err_recovery_work(struct work_struct *work)
drivers/infiniband/ulp/rtrs/rtrs-clt.c
1521
clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
drivers/infiniband/ulp/rtrs/rtrs-clt.c
2341
static void rtrs_clt_close_work(struct work_struct *work)
drivers/infiniband/ulp/rtrs/rtrs-clt.c
2345
clt_path = container_of(work, struct rtrs_clt_path, close_work);
drivers/infiniband/ulp/rtrs/rtrs-clt.c
2665
static void rtrs_clt_reconnect_work(struct work_struct *work)
drivers/infiniband/ulp/rtrs/rtrs-clt.c
2671
clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
drivers/infiniband/ulp/rtrs/rtrs-srv.c
1585
static void rtrs_srv_close_work(struct work_struct *work)
drivers/infiniband/ulp/rtrs/rtrs-srv.c
1591
srv_path = container_of(work, typeof(*srv_path), close_work);
drivers/infiniband/ulp/rtrs/rtrs.c
353
static void hb_work(struct work_struct *work)
drivers/infiniband/ulp/rtrs/rtrs.c
360
path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
drivers/infiniband/ulp/srp/ib_srp.c
1068
static void srp_remove_work(struct work_struct *work)
drivers/infiniband/ulp/srp/ib_srp.c
1071
container_of(work, struct srp_target_port, remove_work);
drivers/infiniband/ulp/srp/ib_srp.c
2127
static void srp_tl_err_work(struct work_struct *work)
drivers/infiniband/ulp/srp/ib_srp.c
2131
target = container_of(work, struct srp_target_port, tl_err_work);
drivers/infiniband/ulp/srpt/ib_srpt.c
234
schedule_work(&sport->work);
drivers/infiniband/ulp/srpt/ib_srpt.c
3001
static void srpt_refresh_port_work(struct work_struct *work)
drivers/infiniband/ulp/srpt/ib_srpt.c
3003
struct srpt_port *sport = container_of(work, struct srpt_port, work);
drivers/infiniband/ulp/srpt/ib_srpt.c
3277
INIT_WORK(&sport->work, srpt_refresh_port_work);
drivers/infiniband/ulp/srpt/ib_srpt.c
3327
cancel_work_sync(&sdev->port[i].work);
drivers/infiniband/ulp/srpt/ib_srpt.h
423
struct work_struct work;
drivers/input/gameport/gameport.c
319
static void gameport_handle_events(struct work_struct *work)
drivers/input/input-poller.c
171
cancel_delayed_work_sync(&poller->work);
drivers/input/input-poller.c
24
struct delayed_work work;
drivers/input/input-poller.c
35
queue_delayed_work(system_freezable_wq, &poller->work, delay);
drivers/input/input-poller.c
38
static void input_dev_poller_work(struct work_struct *work)
drivers/input/input-poller.c
41
container_of(work, struct input_dev_poller, work.work);
drivers/input/input-poller.c
66
cancel_delayed_work_sync(&poller->work);
drivers/input/input-poller.c
86
INIT_DELAYED_WORK(&poller->work, input_dev_poller_work);
drivers/input/joystick/xpad.c
1036
schedule_work(&xpad->work);
drivers/input/joystick/xpad.c
1901
flush_work(&xpad->work);
drivers/input/joystick/xpad.c
2114
INIT_WORK(&xpad->work, xpad_presence_work);
drivers/input/joystick/xpad.c
804
struct work_struct work; /* init/remove device from callback */
drivers/input/joystick/xpad.c
986
static void xpad_presence_work(struct work_struct *work)
drivers/input/joystick/xpad.c
988
struct usb_xpad *xpad = container_of(work, struct usb_xpad, work);
drivers/input/keyboard/applespi.c
1327
static void applespi_worker(struct work_struct *work)
drivers/input/keyboard/applespi.c
1330
container_of(work, struct applespi_data, work);
drivers/input/keyboard/applespi.c
1346
schedule_work(&applespi->work);
drivers/input/keyboard/applespi.c
1631
INIT_WORK(&applespi->work, applespi_worker);
drivers/input/keyboard/applespi.c
424
struct work_struct work;
drivers/input/keyboard/atkbd.c
638
static void atkbd_event_work(struct work_struct *work)
drivers/input/keyboard/atkbd.c
640
struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work);
drivers/input/keyboard/gpio_keys.c
139
cancel_delayed_work_sync(&bdata->work);
drivers/input/keyboard/gpio_keys.c
393
static void gpio_keys_gpio_work_func(struct work_struct *work)
drivers/input/keyboard/gpio_keys.c
396
container_of(work, struct gpio_button_data, work.work);
drivers/input/keyboard/gpio_keys.c
43
struct delayed_work work;
drivers/input/keyboard/gpio_keys.c
438
&bdata->work,
drivers/input/keyboard/gpio_keys.c
593
INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
drivers/input/keyboard/imx-sm-bbm-key.c
30
static void scmi_imx_bbm_pwrkey_check_for_events(struct work_struct *work)
drivers/input/keyboard/imx-sm-bbm-key.c
32
struct scmi_imx_bbm *bbnsm = container_of(to_delayed_work(work),
drivers/input/keyboard/imx_sc_key.c
58
static void imx_sc_check_for_events(struct work_struct *work)
drivers/input/keyboard/imx_sc_key.c
61
container_of(work,
drivers/input/keyboard/imx_sc_key.c
63
check_work.work);
drivers/input/keyboard/lkkbd.c
563
static void lkkbd_reinit(struct work_struct *work)
drivers/input/keyboard/lkkbd.c
565
struct lkkbd *lk = container_of(work, struct lkkbd, tq);
drivers/input/keyboard/lm8323.c
129
struct work_struct work;
drivers/input/keyboard/lm8323.c
154
#define work_to_pwm(w) container_of(w, struct lm8323_pwm, work)
drivers/input/keyboard/lm8323.c
358
schedule_work(&pwm->work);
drivers/input/keyboard/lm8323.c
440
static void lm8323_pwm_work(struct work_struct *work)
drivers/input/keyboard/lm8323.c
442
struct lm8323_pwm *pwm = work_to_pwm(work);
drivers/input/keyboard/lm8323.c
504
schedule_work(&pwm->work);
drivers/input/keyboard/lm8323.c
511
schedule_work(&pwm->work);
drivers/input/keyboard/lm8323.c
513
lm8323_pwm_work(&pwm->work);
drivers/input/keyboard/lm8323.c
567
INIT_WORK(&pwm->work, lm8323_pwm_work);
drivers/input/keyboard/matrix_keypad.c
120
static void matrix_keypad_scan(struct work_struct *work)
drivers/input/keyboard/matrix_keypad.c
123
container_of(work, struct matrix_keypad, work.work);
drivers/input/keyboard/matrix_keypad.c
189
schedule_delayed_work(&keypad->work,
drivers/input/keyboard/matrix_keypad.c
210
schedule_delayed_work(&keypad->work,
drivers/input/keyboard/matrix_keypad.c
228
schedule_delayed_work(&keypad->work, 0);
drivers/input/keyboard/matrix_keypad.c
241
flush_delayed_work(&keypad->work);
drivers/input/keyboard/matrix_keypad.c
414
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
drivers/input/keyboard/matrix_keypad.c
44
struct delayed_work work;
drivers/input/keyboard/sunkbd.c
225
static void sunkbd_reinit(struct work_struct *work)
drivers/input/keyboard/sunkbd.c
227
struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
drivers/input/misc/arizona-haptics.c
132
schedule_work(&haptics->work);
drivers/input/misc/arizona-haptics.c
142
cancel_work_sync(&haptics->work);
drivers/input/misc/arizona-haptics.c
168
INIT_WORK(&haptics->work, arizona_haptics_work);
drivers/input/misc/arizona-haptics.c
25
struct work_struct work;
drivers/input/misc/arizona-haptics.c
31
static void arizona_haptics_work(struct work_struct *work)
drivers/input/misc/arizona-haptics.c
33
struct arizona_haptics *haptics = container_of(work,
drivers/input/misc/arizona-haptics.c
35
work);
drivers/input/misc/atc260x-onkey.c
122
schedule_delayed_work(&onkey->work, msecs_to_jiffies(200));
drivers/input/misc/atc260x-onkey.c
144
static void atc260x_onkey_work(struct work_struct *work)
drivers/input/misc/atc260x-onkey.c
146
struct atc260x_onkey *onkey = container_of(work, struct atc260x_onkey,
drivers/input/misc/atc260x-onkey.c
147
work.work);
drivers/input/misc/atc260x-onkey.c
187
cancel_delayed_work_sync(&onkey->work);
drivers/input/misc/atc260x-onkey.c
259
INIT_DELAYED_WORK(&onkey->work, atc260x_onkey_work);
drivers/input/misc/atc260x-onkey.c
44
struct delayed_work work;
drivers/input/misc/aw86927.c
405
static void aw86927_haptics_play_work(struct work_struct *work)
drivers/input/misc/aw86927.c
408
container_of(work, struct aw86927_data, play_work);
drivers/input/misc/cs40l50-vibra.c
264
static void cs40l50_add_worker(struct work_struct *work)
drivers/input/misc/cs40l50-vibra.c
266
struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
drivers/input/misc/cs40l50-vibra.c
338
INIT_WORK_ONSTACK(&work_data.work, cs40l50_add_worker);
drivers/input/misc/cs40l50-vibra.c
341
queue_work(vib->vib_wq, &work_data.work);
drivers/input/misc/cs40l50-vibra.c
342
flush_work(&work_data.work);
drivers/input/misc/cs40l50-vibra.c
343
destroy_work_on_stack(&work_data.work);
drivers/input/misc/cs40l50-vibra.c
350
static void cs40l50_start_worker(struct work_struct *work)
drivers/input/misc/cs40l50-vibra.c
352
struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
drivers/input/misc/cs40l50-vibra.c
375
static void cs40l50_stop_worker(struct work_struct *work)
drivers/input/misc/cs40l50-vibra.c
377
struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
drivers/input/misc/cs40l50-vibra.c
404
INIT_WORK(&work_data->work, cs40l50_start_worker);
drivers/input/misc/cs40l50-vibra.c
407
INIT_WORK(&work_data->work, cs40l50_stop_worker);
drivers/input/misc/cs40l50-vibra.c
410
queue_work(vib->vib_wq, &work_data->work);
drivers/input/misc/cs40l50-vibra.c
415
static void cs40l50_erase_worker(struct work_struct *work)
drivers/input/misc/cs40l50-vibra.c
417
struct cs40l50_work *work_data = container_of(work, struct cs40l50_work, work);
drivers/input/misc/cs40l50-vibra.c
469
INIT_WORK_ONSTACK(&work_data.work, cs40l50_erase_worker);
drivers/input/misc/cs40l50-vibra.c
472
queue_work(vib->vib_wq, &work_data.work);
drivers/input/misc/cs40l50-vibra.c
473
flush_work(&work_data.work);
drivers/input/misc/cs40l50-vibra.c
474
destroy_work_on_stack(&work_data.work);
drivers/input/misc/cs40l50-vibra.c
91
struct work_struct work;
drivers/input/misc/da7280.c
1194
INIT_WORK(&haptics->work, da7280_haptic_work);
drivers/input/misc/da7280.c
233
struct work_struct work;
drivers/input/misc/da7280.c
492
static void da7280_haptic_work(struct work_struct *work)
drivers/input/misc/da7280.c
495
container_of(work, struct da7280_haptic, work);
drivers/input/misc/da7280.c
670
schedule_work(&haptics->work);
drivers/input/misc/da7280.c
696
cancel_work_sync(&haptics->work);
drivers/input/misc/da9052_onkey.c
122
cancel_delayed_work_sync(&onkey->work);
drivers/input/misc/da9052_onkey.c
135
cancel_delayed_work_sync(&onkey->work);
drivers/input/misc/da9052_onkey.c
21
struct delayed_work work;
drivers/input/misc/da9052_onkey.c
49
schedule_delayed_work(&onkey->work,
drivers/input/misc/da9052_onkey.c
54
static void da9052_onkey_work(struct work_struct *work)
drivers/input/misc/da9052_onkey.c
56
struct da9052_onkey *onkey = container_of(work, struct da9052_onkey,
drivers/input/misc/da9052_onkey.c
57
work.work);
drivers/input/misc/da9052_onkey.c
93
INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work);
drivers/input/misc/da9055_onkey.c
103
INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
drivers/input/misc/da9055_onkey.c
128
cancel_delayed_work_sync(&onkey->work);
drivers/input/misc/da9055_onkey.c
142
cancel_delayed_work_sync(&onkey->work);
drivers/input/misc/da9055_onkey.c
20
struct delayed_work work;
drivers/input/misc/da9055_onkey.c
47
schedule_delayed_work(&onkey->work, msecs_to_jiffies(10));
drivers/input/misc/da9055_onkey.c
51
static void da9055_onkey_work(struct work_struct *work)
drivers/input/misc/da9055_onkey.c
53
struct da9055_onkey *onkey = container_of(work, struct da9055_onkey,
drivers/input/misc/da9055_onkey.c
54
work.work);
drivers/input/misc/da9063_onkey.c
152
schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
drivers/input/misc/da9063_onkey.c
168
schedule_delayed_work(&onkey->work, 0);
drivers/input/misc/da9063_onkey.c
217
error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
drivers/input/misc/da9063_onkey.c
39
struct delayed_work work;
drivers/input/misc/da9063_onkey.c
78
static void da9063_poll_on(struct work_struct *work)
drivers/input/misc/da9063_onkey.c
80
struct da9063_onkey *onkey = container_of(work,
drivers/input/misc/da9063_onkey.c
82
work.work);
drivers/input/misc/drv260x.c
186
struct work_struct work;
drivers/input/misc/drv260x.c
210
static void drv260x_worker(struct work_struct *work)
drivers/input/misc/drv260x.c
212
struct drv260x_data *haptics = container_of(work, struct drv260x_data, work);
drivers/input/misc/drv260x.c
248
schedule_work(&haptics->work);
drivers/input/misc/drv260x.c
258
cancel_work_sync(&haptics->work);
drivers/input/misc/drv260x.c
510
INIT_WORK(&haptics->work, drv260x_worker);
drivers/input/misc/drv2665.c
108
schedule_work(&haptics->work);
drivers/input/misc/drv2665.c
118
cancel_work_sync(&haptics->work);
drivers/input/misc/drv2665.c
196
INIT_WORK(&haptics->work, drv2665_worker);
drivers/input/misc/drv2665.c
57
struct work_struct work;
drivers/input/misc/drv2665.c
76
static void drv2665_worker(struct work_struct *work)
drivers/input/misc/drv2665.c
79
container_of(work, struct drv2665_data, work);
drivers/input/misc/drv2667.c
106
struct work_struct work;
drivers/input/misc/drv2667.c
182
static void drv2667_worker(struct work_struct *work)
drivers/input/misc/drv2667.c
184
struct drv2667_data *haptics = container_of(work, struct drv2667_data, work);
drivers/input/misc/drv2667.c
240
schedule_work(&haptics->work);
drivers/input/misc/drv2667.c
250
cancel_work_sync(&haptics->work);
drivers/input/misc/drv2667.c
373
INIT_WORK(&haptics->work, drv2667_worker);
drivers/input/misc/gpio-beeper.c
18
struct work_struct work;
drivers/input/misc/gpio-beeper.c
28
static void gpio_beeper_work(struct work_struct *work)
drivers/input/misc/gpio-beeper.c
30
struct gpio_beeper *beep = container_of(work, struct gpio_beeper, work);
drivers/input/misc/gpio-beeper.c
48
schedule_work(&beep->work);
drivers/input/misc/gpio-beeper.c
57
cancel_work_sync(&beep->work);
drivers/input/misc/gpio-beeper.c
78
INIT_WORK(&beep->work, gpio_beeper_work);
drivers/input/misc/gpio-vibra.c
66
static void gpio_vibrator_play_work(struct work_struct *work)
drivers/input/misc/gpio-vibra.c
69
container_of(work, struct gpio_vibrator, play_work);
drivers/input/misc/max77693-haptic.c
229
static void max77693_haptic_play_work(struct work_struct *work)
drivers/input/misc/max77693-haptic.c
232
container_of(work, struct max77693_haptic, work);
drivers/input/misc/max77693-haptic.c
263
schedule_work(&haptic->work);
drivers/input/misc/max77693-haptic.c
292
cancel_work_sync(&haptic->work);
drivers/input/misc/max77693-haptic.c
335
INIT_WORK(&haptic->work, max77693_haptic_play_work);
drivers/input/misc/max77693-haptic.c
66
struct work_struct work;
drivers/input/misc/max8997_haptic.c
199
static void max8997_haptic_play_effect_work(struct work_struct *work)
drivers/input/misc/max8997_haptic.c
202
container_of(work, struct max8997_haptic, work);
drivers/input/misc/max8997_haptic.c
219
schedule_work(&chip->work);
drivers/input/misc/max8997_haptic.c
228
cancel_work_sync(&chip->work);
drivers/input/misc/max8997_haptic.c
258
INIT_WORK(&chip->work, max8997_haptic_play_effect_work);
drivers/input/misc/max8997_haptic.c
38
struct work_struct work;
drivers/input/misc/palmas-pwrbutton.c
53
static void palmas_power_button_work(struct work_struct *work)
drivers/input/misc/palmas-pwrbutton.c
55
struct palmas_pwron *pwron = container_of(work,
drivers/input/misc/palmas-pwrbutton.c
57
input_work.work);
drivers/input/misc/pm8xxx-vibrator.c
139
static void pm8xxx_work_handler(struct work_struct *work)
drivers/input/misc/pm8xxx-vibrator.c
141
struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work);
drivers/input/misc/pm8xxx-vibrator.c
175
cancel_work_sync(&vib->work);
drivers/input/misc/pm8xxx-vibrator.c
197
schedule_work(&vib->work);
drivers/input/misc/pm8xxx-vibrator.c
222
INIT_WORK(&vib->work, pm8xxx_work_handler);
drivers/input/misc/pm8xxx-vibrator.c
83
struct work_struct work;
drivers/input/misc/pwm-beeper.c
104
schedule_work(&beeper->work);
drivers/input/misc/pwm-beeper.c
111
cancel_work_sync(&beeper->work);
drivers/input/misc/pwm-beeper.c
153
INIT_WORK(&beeper->work, pwm_beeper_work);
drivers/input/misc/pwm-beeper.c
22
struct work_struct work;
drivers/input/misc/pwm-beeper.c
224
schedule_work(&beeper->work);
drivers/input/misc/pwm-beeper.c
69
static void pwm_beeper_work(struct work_struct *work)
drivers/input/misc/pwm-beeper.c
71
struct pwm_beeper *beeper = container_of(work, struct pwm_beeper, work);
drivers/input/misc/pwm-vibra.c
95
static void pwm_vibrator_play_work(struct work_struct *work)
drivers/input/misc/pwm-vibra.c
97
struct pwm_vibrator *vibrator = container_of(work,
drivers/input/misc/qnap-mcu-input.c
54
static void qnap_mcu_input_beeper_work(struct work_struct *work)
drivers/input/misc/qnap-mcu-input.c
57
container_of(work, struct qnap_mcu_input_dev, beep_work);
drivers/input/misc/regulator-haptic.c
102
schedule_work(&haptic->work);
drivers/input/misc/regulator-haptic.c
111
cancel_work_sync(&haptic->work);
drivers/input/misc/regulator-haptic.c
156
INIT_WORK(&haptic->work, regulator_haptic_work);
drivers/input/misc/regulator-haptic.c
26
struct work_struct work;
drivers/input/misc/regulator-haptic.c
82
static void regulator_haptic_work(struct work_struct *work)
drivers/input/misc/regulator-haptic.c
84
struct regulator_haptic *haptic = container_of(work,
drivers/input/misc/regulator-haptic.c
85
struct regulator_haptic, work);
drivers/input/misc/sc27xx-vibra.c
84
static void sc27xx_vibra_play_work(struct work_struct *work)
drivers/input/misc/sc27xx-vibra.c
86
struct vibra_info *info = container_of(work, struct vibra_info,
drivers/input/misc/twl4030-vibra.c
85
static void vibra_play_work(struct work_struct *work)
drivers/input/misc/twl4030-vibra.c
87
struct vibra_info *info = container_of(work,
drivers/input/misc/twl6040-vibra.c
166
static void vibra_play_work(struct work_struct *work)
drivers/input/misc/twl6040-vibra.c
168
struct vibra_info *info = container_of(work,
drivers/input/misc/wm831x-on.c
132
cancel_delayed_work_sync(&wm831x_on->work);
drivers/input/misc/wm831x-on.c
32
struct delayed_work work;
drivers/input/misc/wm831x-on.c
40
static void wm831x_poll_on(struct work_struct *work)
drivers/input/misc/wm831x-on.c
42
struct wm831x_on *wm831x_on = container_of(work, struct wm831x_on,
drivers/input/misc/wm831x-on.c
43
work.work);
drivers/input/misc/wm831x-on.c
59
schedule_delayed_work(&wm831x_on->work, 100);
drivers/input/misc/wm831x-on.c
66
schedule_delayed_work(&wm831x_on->work, 0);
drivers/input/misc/wm831x-on.c
86
INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on);
drivers/input/mouse/alps.c
1452
static void alps_register_bare_ps2_mouse(struct work_struct *work)
drivers/input/mouse/alps.c
1454
struct alps_data *priv = container_of(work, struct alps_data,
drivers/input/mouse/alps.c
1455
dev3_register_work.work);
drivers/input/mouse/appletouch.c
215
struct work_struct work;
drivers/input/mouse/appletouch.c
317
static void atp_reinit(struct work_struct *work)
drivers/input/mouse/appletouch.c
319
struct atp *dev = container_of(work, struct atp, work);
drivers/input/mouse/appletouch.c
780
schedule_work(&dev->work);
drivers/input/mouse/appletouch.c
810
cancel_work_sync(&dev->work);
drivers/input/mouse/appletouch.c
916
INIT_WORK(&dev->work, atp_reinit);
drivers/input/mouse/bcm5974.c
706
static void bcm5974_mode_reset_work(struct work_struct *work)
drivers/input/mouse/bcm5974.c
708
struct bcm5974 *dev = container_of(work, struct bcm5974, mode_reset_work);
drivers/input/mouse/hgpk.c
914
static void hgpk_recalib_work(struct work_struct *work)
drivers/input/mouse/hgpk.c
916
struct delayed_work *w = to_delayed_work(work);
drivers/input/mouse/psmouse-base.c
1331
static void psmouse_resync(struct work_struct *work)
drivers/input/mouse/psmouse-base.c
1334
container_of(work, struct psmouse, resync_work.work);
drivers/input/mouse/psmouse-base.c
244
void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work,
drivers/input/mouse/psmouse-base.c
247
queue_delayed_work(kpsmoused_wq, work, delay);
drivers/input/mouse/psmouse-smbus.c
128
struct work_struct work;
drivers/input/mouse/psmouse-smbus.c
132
static void psmouse_smbus_remove_i2c_device(struct work_struct *work)
drivers/input/mouse/psmouse-smbus.c
135
container_of(work, struct psmouse_smbus_removal_work, work);
drivers/input/mouse/psmouse-smbus.c
159
INIT_WORK(&rwork->work, psmouse_smbus_remove_i2c_device);
drivers/input/mouse/psmouse-smbus.c
162
queue_work(psmouse_smbus_wq, &rwork->work);
drivers/input/mouse/psmouse.h
135
void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work,
drivers/input/mouse/synaptics_i2c.c
443
static void synaptics_i2c_work_handler(struct work_struct *work)
drivers/input/mouse/synaptics_i2c.c
447
container_of(work, struct synaptics_i2c, dwork.work);
drivers/input/rmi4/rmi_f54.c
114
struct delayed_work work;
drivers/input/rmi4/rmi_f54.c
210
queue_delayed_work(f54->workqueue, &f54->work, 0);
drivers/input/rmi4/rmi_f54.c
531
static void rmi_f54_work(struct work_struct *work)
drivers/input/rmi4/rmi_f54.c
533
struct f54_data *f54 = container_of(work, struct f54_data, work.work);
drivers/input/rmi4/rmi_f54.c
602
queue_delayed_work(f54->workqueue, &f54->work,
drivers/input/rmi4/rmi_f54.c
687
INIT_DELAYED_WORK(&f54->work, rmi_f54_work);
drivers/input/rmi4/rmi_f54.c
733
cancel_delayed_work_sync(&f54->work);
drivers/input/serio/hp_sdc.c
930
static void request_module_delayed(struct work_struct *work)
drivers/input/serio/ps2-gpio.c
111
flush_delayed_work(&drvdata->tx.work);
drivers/input/serio/ps2-gpio.c
125
schedule_delayed_work(&drvdata->tx.work, usecs_to_jiffies(200));
drivers/input/serio/ps2-gpio.c
149
static void ps2_gpio_tx_work_fn(struct work_struct *work)
drivers/input/serio/ps2-gpio.c
151
struct delayed_work *dwork = to_delayed_work(work);
drivers/input/serio/ps2-gpio.c
154
tx.work);
drivers/input/serio/ps2-gpio.c
463
INIT_DELAYED_WORK(&drvdata->tx.work, ps2_gpio_tx_work_fn);
drivers/input/serio/ps2-gpio.c
92
struct delayed_work work;
drivers/input/serio/serio.c
186
static void serio_handle_event(struct work_struct *work)
drivers/input/tablet/pegasus_notetaker.c
209
static void pegasus_init(struct work_struct *work)
drivers/input/tablet/pegasus_notetaker.c
211
struct pegasus *pegasus = container_of(work, struct pegasus, init);
drivers/input/touchscreen/da9034-ts.c
223
static void da9034_tsi_work(struct work_struct *work)
drivers/input/touchscreen/da9034-ts.c
226
container_of(work, struct da9034_touch, tsi_work.work);
drivers/input/touchscreen/da9052_tsi.c
102
static void da9052_ts_pen_work(struct work_struct *work)
drivers/input/touchscreen/da9052_tsi.c
104
struct da9052_tsi *tsi = container_of(work, struct da9052_tsi,
drivers/input/touchscreen/da9052_tsi.c
105
ts_pen_work.work);
drivers/input/touchscreen/hp680_ts_input.c
111
cancel_delayed_work_sync(&work);
drivers/input/touchscreen/hp680_ts_input.c
119
cancel_delayed_work_sync(&work);
drivers/input/touchscreen/hp680_ts_input.c
21
static void do_softint(struct work_struct *work);
drivers/input/touchscreen/hp680_ts_input.c
24
static DECLARE_DELAYED_WORK(work, do_softint);
drivers/input/touchscreen/hp680_ts_input.c
26
static void do_softint(struct work_struct *work)
drivers/input/touchscreen/hp680_ts_input.c
72
schedule_delayed_work(&work, HZ / 20);
drivers/input/touchscreen/mc13783_ts.c
103
schedule_delayed_work(&priv->work, HZ / 50);
drivers/input/touchscreen/mc13783_ts.c
117
static void mc13783_ts_work(struct work_struct *work)
drivers/input/touchscreen/mc13783_ts.c
120
container_of(work, struct mc13783_ts_priv, work.work);
drivers/input/touchscreen/mc13783_ts.c
162
cancel_delayed_work_sync(&priv->work);
drivers/input/touchscreen/mc13783_ts.c
176
INIT_DELAYED_WORK(&priv->work, mc13783_ts_work);
drivers/input/touchscreen/mc13783_ts.c
36
struct delayed_work work;
drivers/input/touchscreen/mc13783_ts.c
51
schedule_delayed_work(&priv->work, 0);
drivers/input/touchscreen/pcap_ts.c
109
schedule_delayed_work(&pcap_ts->work, 0);
drivers/input/touchscreen/pcap_ts.c
119
schedule_delayed_work(&pcap_ts->work, 0);
drivers/input/touchscreen/pcap_ts.c
128
cancel_delayed_work_sync(&pcap_ts->work);
drivers/input/touchscreen/pcap_ts.c
152
INIT_DELAYED_WORK(&pcap_ts->work, pcap_ts_work);
drivers/input/touchscreen/pcap_ts.c
205
cancel_delayed_work_sync(&pcap_ts->work);
drivers/input/touchscreen/pcap_ts.c
23
struct delayed_work work;
drivers/input/touchscreen/pcap_ts.c
48
schedule_delayed_work(&pcap_ts->work, 0);
drivers/input/touchscreen/pcap_ts.c
60
schedule_delayed_work(&pcap_ts->work, 0);
drivers/input/touchscreen/pcap_ts.c
71
schedule_delayed_work(&pcap_ts->work,
drivers/input/touchscreen/pcap_ts.c
84
static void pcap_ts_work(struct work_struct *work)
drivers/input/touchscreen/pcap_ts.c
86
struct delayed_work *dw = to_delayed_work(work);
drivers/input/touchscreen/pcap_ts.c
87
struct pcap_ts *pcap_ts = container_of(dw, struct pcap_ts, work);
drivers/input/touchscreen/stmpe-ts.c
104
container_of(work, struct stmpe_touch, work.work);
drivers/input/touchscreen/stmpe-ts.c
139
cancel_delayed_work_sync(&ts->work);
drivers/input/touchscreen/stmpe-ts.c
169
schedule_delayed_work(&ts->work, msecs_to_jiffies(50));
drivers/input/touchscreen/stmpe-ts.c
253
cancel_delayed_work_sync(&ts->work);
drivers/input/touchscreen/stmpe-ts.c
314
INIT_DELAYED_WORK(&ts->work, stmpe_work);
drivers/input/touchscreen/stmpe-ts.c
75
struct delayed_work work;
drivers/input/touchscreen/stmpe-ts.c
98
static void stmpe_work(struct work_struct *work)
drivers/input/touchscreen/tsc200x-core.c
358
static void tsc200x_esd_work(struct work_struct *work)
drivers/input/touchscreen/tsc200x-core.c
360
struct tsc200x *ts = container_of(work, struct tsc200x, esd_work.work);
drivers/input/touchscreen/wm831x-ts.c
69
static void wm831x_pd_data_work(struct work_struct *work)
drivers/input/touchscreen/wm831x-ts.c
72
container_of(work, struct wm831x_ts, pd_data_work);
drivers/input/touchscreen/wm97xx-core.c
450
static void wm97xx_ts_reader(struct work_struct *work)
drivers/input/touchscreen/wm97xx-core.c
453
struct wm97xx *wm = container_of(work, struct wm97xx, ts_reader.work);
drivers/iommu/iommu-sva.c
275
static void iommu_sva_handle_iopf(struct work_struct *work)
drivers/iommu/iommu-sva.c
281
group = container_of(work, struct iopf_group, work);
drivers/iommu/iommu-sva.c
302
INIT_WORK(&group->work, iommu_sva_handle_iopf);
drivers/iommu/iommu-sva.c
303
if (!queue_work(fault_param->queue->wq, &group->work))
drivers/iommu/iova.c
591
struct delayed_work work;
drivers/iommu/iova.c
695
static void iova_depot_work_func(struct work_struct *work)
drivers/iommu/iova.c
697
struct iova_rcache *rcache = container_of(work, typeof(*rcache), work.work);
drivers/iommu/iova.c
709
schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
drivers/iommu/iova.c
730
INIT_DELAYED_WORK(&rcache->work, iova_depot_work_func);
drivers/iommu/iova.c
791
schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
drivers/iommu/iova.c
892
cancel_delayed_work_sync(&rcache->work);
drivers/irqchip/irq-gic-v3-its.c
5438
static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
drivers/isdn/capi/kcapi.c
237
static void do_notify_work(struct work_struct *work)
drivers/isdn/capi/kcapi.c
240
container_of(work, struct capictr_event, work);
drivers/isdn/capi/kcapi.c
261
INIT_WORK(&event->work, do_notify_work);
drivers/isdn/capi/kcapi.c
265
queue_work(kcapi_wq, &event->work);
drivers/isdn/capi/kcapi.c
271
static void recv_handler(struct work_struct *work)
drivers/isdn/capi/kcapi.c
275
container_of(work, struct capi20_appl, recv_work);
drivers/isdn/capi/kcapi.c
43
struct work_struct work;
drivers/isdn/mISDN/dsp_core.c
1005
dsp_send_bh(struct work_struct *work)
drivers/isdn/mISDN/dsp_core.c
1007
struct dsp *dsp = container_of(work, struct dsp, workq);
drivers/isdn/mISDN/l1oip_core.c
806
l1oip_send_bh(struct work_struct *work)
drivers/isdn/mISDN/l1oip_core.c
808
struct l1oip *hc = container_of(work, struct l1oip, workq);
drivers/isdn/mISDN/timerdev.c
108
while (list_empty(list) && (dev->work == 0)) {
drivers/isdn/mISDN/timerdev.c
112
wait_event_interruptible(dev->wait, (READ_ONCE(dev->work) ||
drivers/isdn/mISDN/timerdev.c
118
if (dev->work)
drivers/isdn/mISDN/timerdev.c
119
WRITE_ONCE(dev->work, 0);
drivers/isdn/mISDN/timerdev.c
144
u32 work;
drivers/isdn/mISDN/timerdev.c
148
work = READ_ONCE(dev->work);
drivers/isdn/mISDN/timerdev.c
149
if (work || !list_empty(&dev->expired))
drivers/isdn/mISDN/timerdev.c
153
work, list_empty(&dev->expired));
drivers/isdn/mISDN/timerdev.c
178
WRITE_ONCE(dev->work, 1);
drivers/isdn/mISDN/timerdev.c
32
u_int work;
drivers/isdn/mISDN/timerdev.c
57
dev->work = 0;
drivers/leds/flash/leds-tps6131x.c
297
static void tps6131x_torch_refresh_handler(struct work_struct *work)
drivers/leds/flash/leds-tps6131x.c
299
struct tps6131x *tps6131x = container_of(work, struct tps6131x, torch_refresh_work.work);
drivers/leds/leds-cr0014114.c
144
static void cr0014114_recount_work(struct work_struct *work)
drivers/leds/leds-cr0014114.c
147
struct cr0014114 *priv = container_of(work,
drivers/leds/leds-cr0014114.c
149
work.work);
drivers/leds/leds-cr0014114.c
159
schedule_delayed_work(&priv->work, CR_RECOUNT_DELAY);
drivers/leds/leds-cr0014114.c
234
INIT_DELAYED_WORK(&priv->work, cr0014114_recount_work);
drivers/leds/leds-cr0014114.c
260
schedule_delayed_work(&priv->work, CR_RECOUNT_DELAY);
drivers/leds/leds-cr0014114.c
271
cancel_delayed_work_sync(&priv->work);
drivers/leds/leds-cr0014114.c
53
struct delayed_work work;
drivers/leds/leds-pca9532.c
277
schedule_work(&data->work);
drivers/leds/leds-pca9532.c
282
static void pca9532_input_work(struct work_struct *work)
drivers/leds/leds-pca9532.c
285
container_of(work, struct pca9532_data, work);
drivers/leds/leds-pca9532.c
375
cancel_work_sync(&data->work);
drivers/leds/leds-pca9532.c
46
struct work_struct work;
drivers/leds/leds-pca9532.c
460
INIT_WORK(&data->work, pca9532_input_work);
drivers/leds/leds-pca9532.c
463
cancel_work_sync(&data->work);
drivers/leds/leds-tca6507.c
166
struct work_struct work;
drivers/leds/leds-tca6507.c
341
static void tca6507_work(struct work_struct *work)
drivers/leds/leds-tca6507.c
343
struct tca6507_chip *tca = container_of(work, struct tca6507_chip,
drivers/leds/leds-tca6507.c
344
work);
drivers/leds/leds-tca6507.c
538
schedule_work(&tca->work);
drivers/leds/leds-tca6507.c
606
schedule_work(&tca->work);
drivers/leds/leds-tca6507.c
738
INIT_WORK(&tca->work, tca6507_work);
drivers/leds/leds-tca6507.c
764
schedule_work(&tca->work);
drivers/leds/leds-tca6507.c
773
cancel_work_sync(&tca->work);
drivers/leds/trigger/ledtrig-input-events.c
138
INIT_DELAYED_WORK(&input_events_data.work, led_input_events_work);
drivers/leds/trigger/ledtrig-input-events.c
155
cancel_delayed_work_sync(&input_events_data.work);
drivers/leds/trigger/ledtrig-input-events.c
24
struct delayed_work work;
drivers/leds/trigger/ledtrig-input-events.c
33
static void led_input_events_work(struct work_struct *work)
drivers/leds/trigger/ledtrig-input-events.c
36
container_of(work, struct input_events_data, work.work);
drivers/leds/trigger/ledtrig-input-events.c
69
mod_delayed_work(system_percpu_wq, &data->work, led_off_delay);
drivers/leds/trigger/ledtrig-netdev.c
152
schedule_delayed_work(&trigger_data->work, 0);
drivers/leds/trigger/ledtrig-netdev.c
265
cancel_delayed_work_sync(&trigger_data->work);
drivers/leds/trigger/ledtrig-netdev.c
402
cancel_delayed_work_sync(&trigger_data->work);
drivers/leds/trigger/ledtrig-netdev.c
469
cancel_delayed_work_sync(&trigger_data->work);
drivers/leds/trigger/ledtrig-netdev.c
587
cancel_delayed_work_sync(&trigger_data->work);
drivers/leds/trigger/ledtrig-netdev.c
60
struct delayed_work work;
drivers/leds/trigger/ledtrig-netdev.c
627
static void netdev_trig_work(struct work_struct *work)
drivers/leds/trigger/ledtrig-netdev.c
630
container_of(work, struct led_netdev_data, work.work);
drivers/leds/trigger/ledtrig-netdev.c
683
schedule_delayed_work(&trigger_data->work,
drivers/leds/trigger/ledtrig-netdev.c
703
INIT_DELAYED_WORK(&trigger_data->work, netdev_trig_work);
drivers/leds/trigger/ledtrig-netdev.c
745
cancel_delayed_work_sync(&trigger_data->work);
drivers/leds/trigger/ledtrig-tty.c
195
static void ledtrig_tty_work(struct work_struct *work)
drivers/leds/trigger/ledtrig-tty.c
198
container_of(work, struct ledtrig_tty_data, dwork.work);
drivers/macintosh/ams/ams-core.c
87
static void ams_worker(struct work_struct *work)
drivers/macintosh/rack-meter.c
212
static void rackmeter_do_timer(struct work_struct *work)
drivers/macintosh/rack-meter.c
215
container_of(work, struct rackmeter_cpu, sniffer.work);
drivers/md/bcache/btree.c
1107
cancel_delayed_work(&b->work);
drivers/md/bcache/btree.c
313
queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
drivers/md/bcache/btree.c
323
__btree_node_write_done(&cl->work);
drivers/md/bcache/btree.c
417
cancel_delayed_work(&b->work);
drivers/md/bcache/btree.c
467
struct btree *b = container_of(to_delayed_work(w), struct btree, work);
drivers/md/bcache/btree.c
486
queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
drivers/md/bcache/btree.c
598
INIT_DELAYED_WORK(&b->work, btree_node_write_work);
drivers/md/bcache/btree.c
787
cancel_delayed_work_sync(&b->work);
drivers/md/bcache/btree.h
142
struct delayed_work work;
drivers/md/bcache/journal.c
754
journal_write_unlocked(&cl->work);
drivers/md/bcache/journal.c
829
static void journal_write_work(struct work_struct *work)
drivers/md/bcache/journal.c
831
struct cache_set *c = container_of(to_delayed_work(work),
drivers/md/bcache/journal.c
833
journal.work);
drivers/md/bcache/journal.c
874
queue_delayed_work(bch_flush_wq, &c->journal.work,
drivers/md/bcache/journal.c
910
INIT_DELAYED_WORK(&j->work, journal_write_work);
drivers/md/bcache/journal.h
113
struct delayed_work work;
drivers/md/bcache/request.c
166
bch_data_insert_keys(&cl->work);
drivers/md/bcache/request.c
317
bch_data_insert_start(&cl->work);
drivers/md/bcache/request.c
772
search_free(&cl->work);
drivers/md/bcache/request.c
787
cached_dev_bio_complete(&cl->work);
drivers/md/bcache/request.c
829
cached_dev_bio_complete(&cl->work);
drivers/md/bcache/request.c
978
cached_dev_bio_complete(&cl->work);
drivers/md/bcache/super.c
1752
cancel_delayed_work_sync(&c->journal.work);
drivers/md/bcache/super.c
1754
c->journal.work.work.func(&c->journal.work.work);
drivers/md/bcache/super.c
2490
static void register_bdev_worker(struct work_struct *work)
drivers/md/bcache/super.c
2494
container_of(work, struct async_reg_args, reg_work.work);
drivers/md/bcache/super.c
2511
static void register_cache_worker(struct work_struct *work)
drivers/md/bcache/super.c
2515
container_of(work, struct async_reg_args, reg_work.work);
drivers/md/bcache/writeback.c
242
static void update_writeback_rate(struct work_struct *work)
drivers/md/bcache/writeback.c
244
struct cached_dev *dc = container_of(to_delayed_work(work),
drivers/md/dm-bio-prison-v1.c
373
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
drivers/md/dm-bio-prison-v1.c
383
list_add(work, &ds->entries[ds->current_entry].work_items);
drivers/md/dm-bio-prison-v1.h
126
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
drivers/md/dm-cache-background-tracker.c
114
cmp = cmp_oblock(w->work.oblock, oblock);
drivers/md/dm-cache-background-tracker.c
168
struct policy_work *work,
drivers/md/dm-cache-background-tracker.c
180
memcpy(&w->work, work, sizeof(*work));
drivers/md/dm-cache-background-tracker.c
192
*pwork = &w->work;
drivers/md/dm-cache-background-tracker.c
196
update_stats(b, &w->work, 1);
drivers/md/dm-cache-background-tracker.c
205
int btracker_issue(struct background_tracker *b, struct policy_work **work)
drivers/md/dm-cache-background-tracker.c
214
*work = &w->work;
drivers/md/dm-cache-background-tracker.c
223
struct bt_work *w = container_of(op, struct bt_work, work);
drivers/md/dm-cache-background-tracker.c
225
update_stats(b, &w->work, -1);
drivers/md/dm-cache-background-tracker.c
86
cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
drivers/md/dm-cache-background-tracker.h
32
struct policy_work work;
drivers/md/dm-cache-background-tracker.h
65
struct policy_work *work,
drivers/md/dm-cache-background-tracker.h
72
int btracker_issue(struct background_tracker *b, struct policy_work **work);
drivers/md/dm-cache-policy-internal.h
25
struct policy_work **work)
drivers/md/dm-cache-policy-internal.h
28
*work = NULL;
drivers/md/dm-cache-policy-internal.h
32
return p->lookup_with_work(p, oblock, cblock, data_dir, fast_copy, work);
drivers/md/dm-cache-policy-internal.h
42
struct policy_work *work,
drivers/md/dm-cache-policy-internal.h
45
return p->complete_background_work(p, work, success);
drivers/md/dm-cache-policy-smq.c
1188
struct policy_work work;
drivers/md/dm-cache-policy-smq.c
1196
work.op = POLICY_WRITEBACK;
drivers/md/dm-cache-policy-smq.c
1197
work.oblock = e->oblock;
drivers/md/dm-cache-policy-smq.c
1198
work.cblock = infer_cblock(mq, e);
drivers/md/dm-cache-policy-smq.c
1200
r = btracker_queue(mq->bg_work, &work, NULL);
drivers/md/dm-cache-policy-smq.c
1211
struct policy_work work;
drivers/md/dm-cache-policy-smq.c
1227
work.op = POLICY_DEMOTE;
drivers/md/dm-cache-policy-smq.c
1228
work.oblock = e->oblock;
drivers/md/dm-cache-policy-smq.c
1229
work.cblock = infer_cblock(mq, e);
drivers/md/dm-cache-policy-smq.c
1230
r = btracker_queue(mq->bg_work, &work, NULL);
drivers/md/dm-cache-policy-smq.c
1242
struct policy_work work;
drivers/md/dm-cache-policy-smq.c
1267
work.op = POLICY_PROMOTE;
drivers/md/dm-cache-policy-smq.c
1268
work.oblock = oblock;
drivers/md/dm-cache-policy-smq.c
1269
work.cblock = infer_cblock(mq, e);
drivers/md/dm-cache-policy-smq.c
1270
r = btracker_queue(mq->bg_work, &work, workp);
drivers/md/dm-cache-policy-smq.c
1378
struct policy_work **work, bool *background_work)
drivers/md/dm-cache-policy-smq.c
1403
queue_promotion(mq, oblock, work);
drivers/md/dm-cache-policy-smq.c
1431
struct policy_work **work)
drivers/md/dm-cache-policy-smq.c
1439
r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
drivers/md/dm-cache-policy-smq.c
1470
struct policy_work *work,
drivers/md/dm-cache-policy-smq.c
1474
from_cblock(work->cblock));
drivers/md/dm-cache-policy-smq.c
1476
switch (work->op) {
drivers/md/dm-cache-policy-smq.c
1481
e->oblock = work->oblock;
drivers/md/dm-cache-policy-smq.c
1512
btracker_complete(mq->bg_work, work);
drivers/md/dm-cache-policy-smq.c
1516
struct policy_work *work,
drivers/md/dm-cache-policy-smq.c
1523
__complete_background_work(mq, work, success);
drivers/md/dm-cache-policy.h
72
struct policy_work **work);
drivers/md/dm-cache-policy.h
86
struct policy_work *work,
drivers/md/dm-cache-target.c
3091
do_waker(&cache->waker.work);
drivers/md/dm-clone-target.c
1274
static void do_worker(struct work_struct *work)
drivers/md/dm-clone-target.c
1276
struct clone *clone = container_of(work, typeof(*clone), worker);
drivers/md/dm-clone-target.c
1301
static void do_waker(struct work_struct *work)
drivers/md/dm-clone-target.c
1303
struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
drivers/md/dm-clone-target.c
1998
do_waker(&clone->waker.work);
drivers/md/dm-core.h
95
struct work_struct work;
drivers/md/dm-crypt.c
1906
static void kcryptd_io_read_work(struct work_struct *work)
drivers/md/dm-crypt.c
1908
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
drivers/md/dm-crypt.c
1920
INIT_WORK(&io->work, kcryptd_io_read_work);
drivers/md/dm-crypt.c
1921
queue_work(cc->io_queue, &io->work);
drivers/md/dm-crypt.c
2048
static void kcryptd_crypt_write_continue(struct work_struct *work)
drivers/md/dm-crypt.c
2050
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
drivers/md/dm-crypt.c
2114
INIT_WORK(&io->work, kcryptd_crypt_write_continue);
drivers/md/dm-crypt.c
2115
queue_work(cc->crypt_queue, &io->work);
drivers/md/dm-crypt.c
2148
static void kcryptd_crypt_read_continue(struct work_struct *work)
drivers/md/dm-crypt.c
2150
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
drivers/md/dm-crypt.c
2189
INIT_WORK(&io->work, kcryptd_crypt_read_continue);
drivers/md/dm-crypt.c
2190
queue_work(cc->crypt_queue, &io->work);
drivers/md/dm-crypt.c
2258
static void kcryptd_crypt(struct work_struct *work)
drivers/md/dm-crypt.c
2260
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
drivers/md/dm-crypt.c
2280
INIT_WORK(&io->work, kcryptd_crypt);
drivers/md/dm-crypt.c
2281
queue_work(system_bh_wq, &io->work);
drivers/md/dm-crypt.c
2284
kcryptd_crypt(&io->work);
drivers/md/dm-crypt.c
2289
INIT_WORK(&io->work, kcryptd_crypt);
drivers/md/dm-crypt.c
2290
queue_work(cc->crypt_queue, &io->work);
drivers/md/dm-crypt.c
85
struct work_struct work;
drivers/md/dm-delay.c
151
static void flush_expired_bios(struct work_struct *work)
drivers/md/dm-delay.c
155
dc = container_of(work, struct delay_c, flush_expired_bios);
drivers/md/dm-integrity.c
1625
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
1626
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
1880
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
2294
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
2295
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
2386
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
2387
queue_work(ic->wait_wq, &dio->work);
drivers/md/dm-integrity.c
2444
queue_work(ic->writer_wq, &bbs->work);
drivers/md/dm-integrity.c
2465
integrity_metadata(&dio->work);
drivers/md/dm-integrity.c
2490
integrity_metadata(&dio->work);
drivers/md/dm-integrity.c
2495
INIT_WORK(&dio->work, integrity_metadata);
drivers/md/dm-integrity.c
2496
queue_work(ic->metadata_wq, &dio->work);
drivers/md/dm-integrity.c
2572
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
2573
queue_work(ic->wait_wq, &dio->work);
drivers/md/dm-integrity.c
2641
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
2728
INIT_WORK(&dio->work, dm_integrity_inline_recheck);
drivers/md/dm-integrity.c
2729
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
2742
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
2763
INIT_WORK(&dio->work, dm_integrity_inline_async_check);
drivers/md/dm-integrity.c
2764
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
2780
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
314
struct work_struct work;
drivers/md/dm-integrity.c
3416
struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
drivers/md/dm-integrity.c
3437
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
3438
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
3460
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
3461
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
3467
static void bitmap_flush_work(struct work_struct *work)
drivers/md/dm-integrity.c
3469
struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
drivers/md/dm-integrity.c
352
struct work_struct work;
drivers/md/dm-integrity.c
5285
INIT_WORK(&bbs->work, bitmap_block_work);
drivers/md/dm-integrity.c
5357
if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
drivers/md/dm-kcopyd.c
650
static void do_work(struct work_struct *work)
drivers/md/dm-kcopyd.c
652
struct dm_kcopyd_client *kc = container_of(work,
drivers/md/dm-log-userspace-base.c
157
static void do_flush(struct work_struct *work)
drivers/md/dm-log-userspace-base.c
160
struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
drivers/md/dm-mpath.c
120
static void trigger_event(struct work_struct *work);
drivers/md/dm-mpath.c
122
static void activate_path_work(struct work_struct *work);
drivers/md/dm-mpath.c
123
static void process_queued_bios(struct work_struct *work);
drivers/md/dm-mpath.c
1397
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
drivers/md/dm-mpath.c
1639
static void activate_path_work(struct work_struct *work)
drivers/md/dm-mpath.c
1642
container_of(work, struct pgpath, activate_path.work);
drivers/md/dm-mpath.c
686
static void process_queued_bios(struct work_struct *work)
drivers/md/dm-mpath.c
693
container_of(work, struct multipath, process_queued_bios);
drivers/md/dm-mpath.c
822
static void trigger_event(struct work_struct *work)
drivers/md/dm-mpath.c
825
container_of(work, struct multipath, trigger_event);
drivers/md/dm-pcache/backing_dev.c
110
static void req_complete_fn(struct work_struct *work)
drivers/md/dm-pcache/backing_dev.c
112
struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work);
drivers/md/dm-pcache/backing_dev.c
142
static void req_submit_fn(struct work_struct *work)
drivers/md/dm-pcache/backing_dev.c
144
struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work);
drivers/md/dm-pcache/backing_dev.c
20
static void req_submit_fn(struct work_struct *work);
drivers/md/dm-pcache/backing_dev.c
21
static void req_complete_fn(struct work_struct *work);
drivers/md/dm-pcache/cache.h
326
void clean_fn(struct work_struct *work);
drivers/md/dm-pcache/cache.h
327
void kset_flush_fn(struct work_struct *work);
drivers/md/dm-pcache/cache.h
343
void miss_read_end_work_fn(struct work_struct *work);
drivers/md/dm-pcache/cache.h
347
void pcache_cache_gc_fn(struct work_struct *work);
drivers/md/dm-pcache/cache.h
352
void cache_writeback_fn(struct work_struct *work);
drivers/md/dm-pcache/cache_gc.c
101
struct pcache_cache *cache = container_of(work, struct pcache_cache, gc_work.work);
drivers/md/dm-pcache/cache_gc.c
99
void pcache_cache_gc_fn(struct work_struct *work)
drivers/md/dm-pcache/cache_key.c
645
void clean_fn(struct work_struct *work)
drivers/md/dm-pcache/cache_key.c
647
struct pcache_cache *cache = container_of(work, struct pcache_cache, clean_work);
drivers/md/dm-pcache/cache_key.c
694
void kset_flush_fn(struct work_struct *work)
drivers/md/dm-pcache/cache_key.c
696
struct pcache_cache_kset *kset = container_of(work, struct pcache_cache_kset, flush_work.work);
drivers/md/dm-pcache/cache_writeback.c
216
void cache_writeback_fn(struct work_struct *work)
drivers/md/dm-pcache/cache_writeback.c
218
struct pcache_cache *cache = container_of(work, struct pcache_cache, writeback_work.work);
drivers/md/dm-pcache/dm_pcache.c
34
static void defered_req_fn(struct work_struct *work)
drivers/md/dm-pcache/dm_pcache.c
36
struct dm_pcache *pcache = container_of(work, struct dm_pcache, defered_req_work);
drivers/md/dm-raid1.c
846
static void trigger_event(struct work_struct *work)
drivers/md/dm-raid1.c
849
container_of(work, struct mirror_set, trigger_event);
drivers/md/dm-raid1.c
859
static void do_mirror(struct work_struct *work)
drivers/md/dm-raid1.c
861
struct mirror_set *ms = container_of(work, struct mirror_set,
drivers/md/dm-rq.c
22
struct kthread_work work;
drivers/md/dm-snap-persistent.c
218
struct work_struct work;
drivers/md/dm-snap-persistent.c
222
static void do_metadata(struct work_struct *work)
drivers/md/dm-snap-persistent.c
224
struct mdata_req *req = container_of(work, struct mdata_req, work);
drivers/md/dm-snap-persistent.c
259
INIT_WORK_ONSTACK(&req.work, do_metadata);
drivers/md/dm-snap-persistent.c
260
queue_work(ps->metadata_wq, &req.work);
drivers/md/dm-snap-persistent.c
262
destroy_work_on_stack(&req.work);
drivers/md/dm-stripe.c
54
static void trigger_event(struct work_struct *work)
drivers/md/dm-stripe.c
56
struct stripe_c *sc = container_of(work, struct stripe_c,
drivers/md/dm-thin.c
3640
do_waker(&pool->waker.work);
drivers/md/dm-thin.c
4338
struct list_head work;
drivers/md/dm-thin.c
4343
INIT_LIST_HEAD(&work);
drivers/md/dm-thin.c
4344
dm_deferred_entry_dec(h->shared_read_entry, &work);
drivers/md/dm-thin.c
4347
list_for_each_entry_safe(m, tmp, &work, list) {
drivers/md/dm-thin.c
4355
INIT_LIST_HEAD(&work);
drivers/md/dm-thin.c
4356
dm_deferred_entry_dec(h->all_io_entry, &work);
drivers/md/dm-thin.c
4357
if (!list_empty(&work)) {
drivers/md/dm-thin.c
4359
list_for_each_entry_safe(m, tmp, &work, list)
drivers/md/dm-verity-target.c
646
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
drivers/md/dm-verity-target.c
65
struct work_struct work;
drivers/md/dm-verity-target.c
655
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
drivers/md/dm-verity-target.c
662
INIT_WORK(&io->work, verity_work);
drivers/md/dm-verity-target.c
663
queue_work(io->v->verify_wq, &io->work);
drivers/md/dm-verity-target.c
694
INIT_WORK(&io->work, verity_bh_work);
drivers/md/dm-verity-target.c
695
queue_work(system_bh_wq, &io->work);
drivers/md/dm-verity-target.c
697
verity_bh_work(&io->work);
drivers/md/dm-verity-target.c
700
INIT_WORK(&io->work, verity_work);
drivers/md/dm-verity-target.c
701
queue_work(io->v->verify_wq, &io->work);
drivers/md/dm-verity-target.c
710
static void verity_prefetch_io(struct work_struct *work)
drivers/md/dm-verity-target.c
713
container_of(work, struct dm_verity_prefetch_work, work);
drivers/md/dm-verity-target.c
773
INIT_WORK(&pw->work, verity_prefetch_io);
drivers/md/dm-verity-target.c
778
queue_work(v->verify_wq, &pw->work);
drivers/md/dm-verity.h
111
struct work_struct work;
drivers/md/dm-writecache.c
1941
static void writecache_writeback(struct work_struct *work)
drivers/md/dm-writecache.c
1943
struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
drivers/md/dm-writecache.c
857
static void writecache_flush_work(struct work_struct *work)
drivers/md/dm-writecache.c
859
struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
drivers/md/dm-zoned-reclaim.c
17
struct delayed_work work;
drivers/md/dm-zoned-reclaim.c
502
static void dmz_reclaim_work(struct work_struct *work)
drivers/md/dm-zoned-reclaim.c
504
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
drivers/md/dm-zoned-reclaim.c
514
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
drivers/md/dm-zoned-reclaim.c
576
INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
drivers/md/dm-zoned-reclaim.c
585
queue_delayed_work(zrc->wq, &zrc->work, 0);
drivers/md/dm-zoned-reclaim.c
601
cancel_delayed_work_sync(&zrc->work);
drivers/md/dm-zoned-reclaim.c
612
cancel_delayed_work_sync(&zrc->work);
drivers/md/dm-zoned-reclaim.c
620
queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
drivers/md/dm-zoned-reclaim.c
639
mod_delayed_work(zrc->wq, &zrc->work, 0);
drivers/md/dm-zoned-target.c
30
struct work_struct work;
drivers/md/dm-zoned-target.c
478
static void dmz_chunk_work(struct work_struct *work)
drivers/md/dm-zoned-target.c
480
struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
drivers/md/dm-zoned-target.c
503
static void dmz_flush_work(struct work_struct *work)
drivers/md/dm-zoned-target.c
505
struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
drivers/md/dm-zoned-target.c
554
INIT_WORK(&cw->work, dmz_chunk_work);
drivers/md/dm-zoned-target.c
569
if (queue_work(dmz->chunk_wq, &cw->work))
drivers/md/dm.c
2212
static void dm_wq_work(struct work_struct *work);
drivers/md/dm.c
2339
INIT_WORK(&md->work, dm_wq_work);
drivers/md/dm.c
2828
static void dm_wq_work(struct work_struct *work)
drivers/md/dm.c
2830
struct mapped_device *md = container_of(work, struct mapped_device, work);
drivers/md/dm.c
2851
queue_work(md->wq, &md->work);
drivers/md/dm.c
676
queue_work(md->wq, &md->work);
drivers/md/dm.c
887
queue_work(md->wq, &md->work);
drivers/md/dm.c
992
static void dm_wq_requeue_work(struct work_struct *work)
drivers/md/dm.c
994
struct mapped_device *md = container_of(work, struct mapped_device,
drivers/md/md-bitmap.c
1267
struct work_struct work;
drivers/md/md-bitmap.c
1272
static void md_bitmap_unplug_fn(struct work_struct *work)
drivers/md/md-bitmap.c
1275
container_of(work, struct bitmap_unplug_work, work);
drivers/md/md-bitmap.c
1286
INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn);
drivers/md/md-bitmap.c
1290
queue_work(md_bitmap_wq, &unplug_work.work);
drivers/md/md-bitmap.c
1292
destroy_work_on_stack(&unplug_work.work);
drivers/md/md-llbitmap.c
1127
static void llbitmap_unplug_fn(struct work_struct *work)
drivers/md/md-llbitmap.c
1130
container_of(work, struct llbitmap_unplug_work, work);
drivers/md/md-llbitmap.c
1180
INIT_WORK_ONSTACK(&unplug_work.work, llbitmap_unplug_fn);
drivers/md/md-llbitmap.c
1181
queue_work(md_llbitmap_unplug_wq, &unplug_work.work);
drivers/md/md-llbitmap.c
1183
destroy_work_on_stack(&unplug_work.work);
drivers/md/md-llbitmap.c
289
struct work_struct work;
drivers/md/md-llbitmap.c
923
static void md_llbitmap_daemon_fn(struct work_struct *work)
drivers/md/md-llbitmap.c
926
container_of(work, struct llbitmap, daemon_work);
drivers/md/raid5-cache.c
658
static void r5l_submit_io_async(struct work_struct *work)
drivers/md/raid5-cache.c
660
struct r5l_log *log = container_of(work, struct r5l_log,
drivers/md/raid5-cache.c
679
static void r5c_disable_writeback_async(struct work_struct *work)
drivers/md/raid5-cache.c
681
struct r5l_log *log = container_of(work, struct r5l_log,
drivers/md/raid5.c
211
queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
drivers/md/raid5.c
219
&group->workers[i].work);
drivers/md/raid5.c
6713
static void raid5_do_work(struct work_struct *work)
drivers/md/raid5.c
6715
struct r5worker *worker = container_of(work, struct r5worker, work);
drivers/md/raid5.c
7290
INIT_WORK(&worker->work, raid5_do_work);
drivers/md/raid5.h
504
struct work_struct work;
drivers/media/cec/core/cec-adap.c
1262
if (!cancel_delayed_work(&data->work)) {
drivers/media/cec/core/cec-adap.c
1264
cancel_delayed_work_sync(&data->work);
drivers/media/cec/core/cec-adap.c
418
if (cancel_delayed_work(&data->work))
drivers/media/cec/core/cec-adap.c
707
schedule_delayed_work(&data->work,
drivers/media/cec/core/cec-adap.c
754
static void cec_wait_timeout(struct work_struct *work)
drivers/media/cec/core/cec-adap.c
756
struct cec_data *data = container_of(work, struct cec_data, work.work);
drivers/media/cec/core/cec-adap.c
947
INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
drivers/media/cec/core/cec-adap.c
968
cancel_delayed_work_sync(&data->work);
drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
488
work_update_edid.work);
drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
582
static void extron_irq_work_handler(struct work_struct *work)
drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
585
container_of(work, struct extron_port, irq_work);
drivers/media/cec/usb/pulse8/pulse8-cec.c
284
static void pulse8_tx_work_handler(struct work_struct *work)
drivers/media/cec/usb/pulse8/pulse8-cec.c
286
struct pulse8 *pulse8 = container_of(work, struct pulse8, tx_work);
drivers/media/cec/usb/pulse8/pulse8-cec.c
330
static void pulse8_irq_work_handler(struct work_struct *work)
drivers/media/cec/usb/pulse8/pulse8-cec.c
333
container_of(work, struct pulse8, irq_work);
drivers/media/cec/usb/pulse8/pulse8-cec.c
804
static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
drivers/media/cec/usb/pulse8/pulse8-cec.c
807
container_of(work, struct pulse8, ping_eeprom_work.work);
drivers/media/cec/usb/rainshadow/rainshadow-cec.c
108
static void rain_irq_work_handler(struct work_struct *work)
drivers/media/cec/usb/rainshadow/rainshadow-cec.c
111
container_of(work, struct rain, work);
drivers/media/cec/usb/rainshadow/rainshadow-cec.c
185
schedule_work(&rain->work);
drivers/media/cec/usb/rainshadow/rainshadow-cec.c
193
cancel_work_sync(&rain->work);
drivers/media/cec/usb/rainshadow/rainshadow-cec.c
330
INIT_WORK(&rain->work, rain_irq_work_handler);
drivers/media/cec/usb/rainshadow/rainshadow-cec.c
46
struct work_struct work;
drivers/media/dvb-core/dvb_net.c
1219
static void wq_set_multicast_list (struct work_struct *work)
drivers/media/dvb-core/dvb_net.c
1222
container_of(work, struct dvb_net_priv, set_multicast_list_wq);
drivers/media/dvb-core/dvb_net.c
1260
static void wq_restart_net_feed (struct work_struct *work)
drivers/media/dvb-core/dvb_net.c
1263
container_of(work, struct dvb_net_priv, restart_net_feed_wq);
drivers/media/dvb-frontends/rtl2832.c
784
static void rtl2832_i2c_gate_work(struct work_struct *work)
drivers/media/dvb-frontends/rtl2832.c
786
struct rtl2832_dev *dev = container_of(work, struct rtl2832_dev, i2c_gate_work.work);
drivers/media/dvb-frontends/ts2020.c
148
ts2020_stat_work(&priv->stat_work.work);
drivers/media/dvb-frontends/ts2020.c
427
static void ts2020_stat_work(struct work_struct *work)
drivers/media/dvb-frontends/ts2020.c
429
struct ts2020_priv *priv = container_of(work, struct ts2020_priv,
drivers/media/dvb-frontends/ts2020.c
430
stat_work.work);
drivers/media/dvb-frontends/ts2020.c
45
static void ts2020_stat_work(struct work_struct *work);
drivers/media/dvb-frontends/ts2020.c
462
ts2020_stat_work(&priv->stat_work.work);
drivers/media/firewire/firedtv-avc.c
904
void avc_remote_ctrl_work(struct work_struct *work)
drivers/media/firewire/firedtv-avc.c
907
container_of(work, struct firedtv, remote_ctrl_work);
drivers/media/firewire/firedtv.h
121
void avc_remote_ctrl_work(struct work_struct *work);
drivers/media/i2c/adv7511-v4l2.c
1473
static void adv7511_edid_handler(struct work_struct *work)
drivers/media/i2c/adv7511-v4l2.c
1475
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/i2c/adv7604.c
540
static void adv76xx_delayed_work_enable_hotplug(struct work_struct *work)
drivers/media/i2c/adv7604.c
542
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/i2c/adv7842.c
686
static void adv7842_delayed_work_enable_hotplug(struct work_struct *work)
drivers/media/i2c/adv7842.c
688
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/i2c/cx25840/cx25840-core.c
600
static void cx25840_work_handler(struct work_struct *work)
drivers/media/i2c/cx25840/cx25840-core.c
602
struct cx25840_state *state = container_of(work, struct cx25840_state, fw_work);
drivers/media/i2c/ds90ub960.c
4342
static void ub960_handler_work(struct work_struct *work)
drivers/media/i2c/ds90ub960.c
4344
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/i2c/ir-kbd-i2c.c
344
static void ir_work(struct work_struct *work)
drivers/media/i2c/ir-kbd-i2c.c
347
struct IR_i2c *ir = container_of(work, struct IR_i2c, work.work);
drivers/media/i2c/ir-kbd-i2c.c
363
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling_interval));
drivers/media/i2c/ir-kbd-i2c.c
370
schedule_delayed_work(&ir->work, 0);
drivers/media/i2c/ir-kbd-i2c.c
379
cancel_delayed_work_sync(&ir->work);
drivers/media/i2c/ir-kbd-i2c.c
934
INIT_DELAYED_WORK(&ir->work, ir_work);
drivers/media/i2c/ir-kbd-i2c.c
970
cancel_delayed_work_sync(&ir->work);
drivers/media/i2c/saa6588.c
316
static void saa6588_work(struct work_struct *work)
drivers/media/i2c/saa6588.c
318
struct saa6588 *s = container_of(work, struct saa6588, work.work);
drivers/media/i2c/saa6588.c
321
schedule_delayed_work(&s->work, msecs_to_jiffies(20));
drivers/media/i2c/saa6588.c
480
INIT_DELAYED_WORK(&s->work, saa6588_work);
drivers/media/i2c/saa6588.c
481
schedule_delayed_work(&s->work, 0);
drivers/media/i2c/saa6588.c
492
cancel_delayed_work_sync(&s->work);
drivers/media/i2c/saa6588.c
55
struct delayed_work work;
drivers/media/i2c/saa7115.c
1627
u8 work;
drivers/media/i2c/saa7115.c
1634
work = saa711x_read(sd, R_08_SYNC_CNTL);
drivers/media/i2c/saa7115.c
1635
work &= ~SAA7113_R_08_HTC_MASK;
drivers/media/i2c/saa7115.c
1636
work |= ((*data->saa7113_r08_htc) << SAA7113_R_08_HTC_OFFSET);
drivers/media/i2c/saa7115.c
1637
saa711x_write(sd, R_08_SYNC_CNTL, work);
drivers/media/i2c/saa7115.c
1641
work = saa711x_read(sd, R_10_CHROMA_CNTL_2);
drivers/media/i2c/saa7115.c
1642
work &= ~SAA7113_R_10_VRLN_MASK;
drivers/media/i2c/saa7115.c
1644
work |= (1 << SAA7113_R_10_VRLN_OFFSET);
drivers/media/i2c/saa7115.c
1645
saa711x_write(sd, R_10_CHROMA_CNTL_2, work);
drivers/media/i2c/saa7115.c
1649
work = saa711x_read(sd, R_10_CHROMA_CNTL_2);
drivers/media/i2c/saa7115.c
1650
work &= ~SAA7113_R_10_OFTS_MASK;
drivers/media/i2c/saa7115.c
1651
work |= (*data->saa7113_r10_ofts << SAA7113_R_10_OFTS_OFFSET);
drivers/media/i2c/saa7115.c
1652
saa711x_write(sd, R_10_CHROMA_CNTL_2, work);
drivers/media/i2c/saa7115.c
1656
work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL);
drivers/media/i2c/saa7115.c
1657
work &= ~SAA7113_R_12_RTS0_MASK;
drivers/media/i2c/saa7115.c
1658
work |= (*data->saa7113_r12_rts0 << SAA7113_R_12_RTS0_OFFSET);
drivers/media/i2c/saa7115.c
1663
saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work);
drivers/media/i2c/saa7115.c
1667
work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL);
drivers/media/i2c/saa7115.c
1668
work &= ~SAA7113_R_12_RTS1_MASK;
drivers/media/i2c/saa7115.c
1669
work |= (*data->saa7113_r12_rts1 << SAA7113_R_12_RTS1_OFFSET);
drivers/media/i2c/saa7115.c
1670
saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work);
drivers/media/i2c/saa7115.c
1674
work = saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL);
drivers/media/i2c/saa7115.c
1675
work &= ~SAA7113_R_13_ADLSB_MASK;
drivers/media/i2c/saa7115.c
1677
work |= (1 << SAA7113_R_13_ADLSB_OFFSET);
drivers/media/i2c/saa7115.c
1678
saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL, work);
drivers/media/i2c/tc358743.c
1613
static void tc358743_work_i2c_poll(struct work_struct *work)
drivers/media/i2c/tc358743.c
1615
struct tc358743_state *state = container_of(work,
drivers/media/i2c/tc358743.c
395
static void tc358743_delayed_work_enable_hotplug(struct work_struct *work)
drivers/media/i2c/tc358743.c
397
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/i2c/tda1997x.c
558
static void tda1997x_delayed_work_enable_hpd(struct work_struct *work)
drivers/media/i2c/tda1997x.c
560
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/pci/b2c2/flexcop-pci.c
105
static void flexcop_pci_irq_check_work(struct work_struct *work)
drivers/media/pci/b2c2/flexcop-pci.c
108
container_of(work, struct flexcop_pci, irq_check_work.work);
drivers/media/pci/bt8xx/bttv-driver.c
185
static void request_module_async(struct work_struct *work)
drivers/media/pci/cobalt/cobalt-irq.c
199
void cobalt_irq_work_handler(struct work_struct *work)
drivers/media/pci/cobalt/cobalt-irq.c
202
container_of(work, struct cobalt, irq_work_queue);
drivers/media/pci/cobalt/cobalt-irq.h
12
void cobalt_irq_work_handler(struct work_struct *work);
drivers/media/pci/cx18/cx18-driver.c
1235
cancel_work_sync(&cx->in_work_order[i].work);
drivers/media/pci/cx18/cx18-driver.c
240
static void request_module_async(struct work_struct *work)
drivers/media/pci/cx18/cx18-driver.c
242
struct cx18 *dev = container_of(work, struct cx18, request_module_wk);
drivers/media/pci/cx18/cx18-driver.c
704
INIT_WORK(&cx->in_work_order[i].work, cx18_in_work_handler);
drivers/media/pci/cx18/cx18-driver.h
342
struct work_struct work;
drivers/media/pci/cx18/cx18-mailbox.c
369
void cx18_in_work_handler(struct work_struct *work)
drivers/media/pci/cx18/cx18-mailbox.c
372
container_of(work, struct cx18_in_work_order, work);
drivers/media/pci/cx18/cx18-mailbox.c
572
queue_work(cx->in_work_queue, &order->work);
drivers/media/pci/cx18/cx18-mailbox.h
79
void cx18_in_work_handler(struct work_struct *work);
drivers/media/pci/cx18/cx18-streams.c
728
void cx18_out_work_handler(struct work_struct *work)
drivers/media/pci/cx18/cx18-streams.c
731
container_of(work, struct cx18_stream, out_work_order);
drivers/media/pci/cx18/cx18-streams.h
42
void cx18_out_work_handler(struct work_struct *work);
drivers/media/pci/cx23885/altera-ci.c
102
struct work_struct work;
drivers/media/pci/cx23885/altera-ci.c
389
static void netup_read_ci_status(struct work_struct *work)
drivers/media/pci/cx23885/altera-ci.c
392
container_of(work, struct fpga_internal, work);
drivers/media/pci/cx23885/altera-ci.c
435
schedule_work(&inter->work);
drivers/media/pci/cx23885/altera-ci.c
771
INIT_WORK(&inter->work, netup_read_ci_status);
drivers/media/pci/cx23885/altera-ci.c
796
schedule_work(&inter->work);
drivers/media/pci/cx23885/cimax2.c
335
static void netup_read_ci_status(struct work_struct *work)
drivers/media/pci/cx23885/cimax2.c
338
container_of(work, struct netup_ci_state, work);
drivers/media/pci/cx23885/cimax2.c
384
schedule_work(&state->work);
drivers/media/pci/cx23885/cimax2.c
391
schedule_work(&state->work);
drivers/media/pci/cx23885/cimax2.c
505
INIT_WORK(&state->work, netup_read_ci_status);
drivers/media/pci/cx23885/cimax2.c
506
schedule_work(&state->work);
drivers/media/pci/cx23885/cimax2.c
74
struct work_struct work;
drivers/media/pci/cx23885/cx23885-av.c
14
void cx23885_av_work_handler(struct work_struct *work)
drivers/media/pci/cx23885/cx23885-av.c
17
container_of(work, struct cx23885_dev, cx25840_work);
drivers/media/pci/cx23885/cx23885-av.h
12
void cx23885_av_work_handler(struct work_struct *work);
drivers/media/pci/cx23885/cx23885-ir.c
24
void cx23885_ir_rx_work_handler(struct work_struct *work)
drivers/media/pci/cx23885/cx23885-ir.c
27
container_of(work, struct cx23885_dev, ir_rx_work);
drivers/media/pci/cx23885/cx23885-ir.c
47
void cx23885_ir_tx_work_handler(struct work_struct *work)
drivers/media/pci/cx23885/cx23885-ir.c
50
container_of(work, struct cx23885_dev, ir_tx_work);
drivers/media/pci/cx23885/cx23885-ir.h
15
void cx23885_ir_rx_work_handler(struct work_struct *work);
drivers/media/pci/cx23885/cx23885-ir.h
16
void cx23885_ir_tx_work_handler(struct work_struct *work);
drivers/media/pci/cx88/cx88-mpeg.c
42
static void request_module_async(struct work_struct *work)
drivers/media/pci/cx88/cx88-mpeg.c
44
struct cx8802_dev *dev = container_of(work, struct cx8802_dev,
drivers/media/pci/ddbridge/ddbridge-core.c
2164
static void input_work(struct work_struct *work)
drivers/media/pci/ddbridge/ddbridge-core.c
2166
struct ddb_dma *dma = container_of(work, struct ddb_dma, work);
drivers/media/pci/ddbridge/ddbridge-core.c
2192
queue_work(ddb_wq, &dma->work);
drivers/media/pci/ddbridge/ddbridge-core.c
2195
static void output_work(struct work_struct *work)
drivers/media/pci/ddbridge/ddbridge-core.c
2197
struct ddb_dma *dma = container_of(work, struct ddb_dma, work);
drivers/media/pci/ddbridge/ddbridge-core.c
2219
queue_work(ddb_wq, &dma->work);
drivers/media/pci/ddbridge/ddbridge-core.c
2252
INIT_WORK(&dma->work, output_work);
drivers/media/pci/ddbridge/ddbridge-core.c
2259
INIT_WORK(&dma->work, input_work);
drivers/media/pci/ddbridge/ddbridge-core.c
2455
cancel_work_sync(&port->input[0]->dma->work);
drivers/media/pci/ddbridge/ddbridge-core.c
2457
cancel_work_sync(&port->input[1]->dma->work);
drivers/media/pci/ddbridge/ddbridge-core.c
2459
cancel_work_sync(&port->output->dma->work);
drivers/media/pci/ddbridge/ddbridge.h
144
struct work_struct work;
drivers/media/pci/dm1105/dm1105.c
1124
INIT_WORK(&dev->work, dm1105_dmx_buffer);
drivers/media/pci/dm1105/dm1105.c
1179
cancel_work_sync(&dev->ir.work);
drivers/media/pci/dm1105/dm1105.c
313
struct work_struct work;
drivers/media/pci/dm1105/dm1105.c
343
struct work_struct work;
drivers/media/pci/dm1105/dm1105.c
653
static void dm1105_emit_key(struct work_struct *work)
drivers/media/pci/dm1105/dm1105.c
655
struct infrared *ir = container_of(work, struct infrared, work);
drivers/media/pci/dm1105/dm1105.c
669
static void dm1105_dmx_buffer(struct work_struct *work)
drivers/media/pci/dm1105/dm1105.c
671
struct dm1105_dev *dev = container_of(work, struct dm1105_dev, work);
drivers/media/pci/dm1105/dm1105.c
713
queue_work(dev->wq, &dev->work);
drivers/media/pci/dm1105/dm1105.c
717
schedule_work(&dev->ir.work);
drivers/media/pci/dm1105/dm1105.c
751
INIT_WORK(&dm1105->ir.work, dm1105_emit_key);
drivers/media/pci/intel/ipu-bridge.c
565
struct work_struct work;
drivers/media/pci/intel/ipu-bridge.c
571
static void ipu_bridge_instantiate_vcm_work(struct work_struct *work)
drivers/media/pci/intel/ipu-bridge.c
574
container_of(work, struct ipu_bridge_instantiate_vcm_work_data,
drivers/media/pci/intel/ipu-bridge.c
575
work);
drivers/media/pci/intel/ipu-bridge.c
647
INIT_WORK(&data->work, ipu_bridge_instantiate_vcm_work);
drivers/media/pci/intel/ipu-bridge.c
659
queue_work(system_long_wq, &data->work);
drivers/media/pci/intel/ivsc/mei_ace.c
182
struct work_struct work;
drivers/media/pci/intel/ivsc/mei_ace.c
436
static void mei_ace_post_probe_work(struct work_struct *work)
drivers/media/pci/intel/ivsc/mei_ace.c
443
ace = container_of(work, struct mei_ace, work);
drivers/media/pci/intel/ivsc/mei_ace.c
473
INIT_WORK(&ace->work, mei_ace_post_probe_work);
drivers/media/pci/intel/ivsc/mei_ace.c
502
schedule_work(&ace->work);
drivers/media/pci/intel/ivsc/mei_ace.c
523
cancel_work_sync(&ace->work);
drivers/media/pci/ivtv/ivtv-driver.c
270
static void request_module_async(struct work_struct *work)
drivers/media/pci/ivtv/ivtv-driver.c
272
struct ivtv *dev = container_of(work, struct ivtv, request_module_wk);
drivers/media/pci/ivtv/ivtv-irq.c
90
void ivtv_irq_work_handler(struct kthread_work *work)
drivers/media/pci/ivtv/ivtv-irq.c
92
struct ivtv *itv = container_of(work, struct ivtv, irq_work);
drivers/media/pci/ivtv/ivtv-irq.h
37
void ivtv_irq_work_handler(struct kthread_work *work);
drivers/media/pci/mantis/mantis_evm.c
27
static void mantis_hifevm_work(struct work_struct *work)
drivers/media/pci/mantis/mantis_evm.c
29
struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work);
drivers/media/pci/mantis/mantis_uart.c
79
static void mantis_uart_work(struct work_struct *work)
drivers/media/pci/mantis/mantis_uart.c
81
struct mantis_pci *mantis = container_of(work, struct mantis_pci, uart_work);
drivers/media/pci/mgb4/mgb4_vin.c
724
static void dma_transfer(struct work_struct *work)
drivers/media/pci/mgb4/mgb4_vin.c
726
struct mgb4_vin_dev *vindev = container_of(work, struct mgb4_vin_dev,
drivers/media/pci/mgb4/mgb4_vin.c
767
static void signal_change(struct work_struct *work)
drivers/media/pci/mgb4/mgb4_vin.c
769
struct mgb4_vin_dev *vindev = container_of(work, struct mgb4_vin_dev,
drivers/media/pci/mgb4/mgb4_vout.c
592
static void dma_transfer(struct work_struct *work)
drivers/media/pci/mgb4/mgb4_vout.c
594
struct mgb4_vout_dev *voutdev = container_of(work, struct mgb4_vout_dev,
drivers/media/pci/netup_unidvb/netup_unidvb.h
66
struct work_struct work;
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
241
queue_work(dma->ndev->wq, &dma->work);
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
579
static void netup_unidvb_dma_worker(struct work_struct *work)
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
581
struct netup_dma *dma = container_of(work, struct netup_dma, work);
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
662
INIT_WORK(&dma->work, netup_unidvb_dma_worker);
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
700
cancel_work_sync(&dma->work);
drivers/media/pci/saa7134/saa7134-core.c
146
static void request_module_async(struct work_struct *work){
drivers/media/pci/saa7134/saa7134-core.c
147
struct saa7134_dev* dev = container_of(work, struct saa7134_dev, request_module_wk);
drivers/media/pci/saa7134/saa7134-empress.c
207
static void empress_signal_update(struct work_struct *work)
drivers/media/pci/saa7134/saa7134-empress.c
210
container_of(work, struct saa7134_dev, empress_workqueue);
drivers/media/platform/amphion/vpu_msgs.c
274
void vpu_inst_run_work(struct work_struct *work)
drivers/media/platform/amphion/vpu_msgs.c
276
struct vpu_inst *inst = container_of(work, struct vpu_inst, msg_work);
drivers/media/platform/amphion/vpu_msgs.c
363
void vpu_msg_run_work(struct work_struct *work)
drivers/media/platform/amphion/vpu_msgs.c
365
struct vpu_core *core = container_of(work, struct vpu_core, msg_work);
drivers/media/platform/amphion/vpu_msgs.c
372
void vpu_msg_delayed_work(struct work_struct *work)
drivers/media/platform/amphion/vpu_msgs.c
379
if (!work)
drivers/media/platform/amphion/vpu_msgs.c
382
dwork = to_delayed_work(work);
drivers/media/platform/amphion/vpu_msgs.h
10
void vpu_inst_run_work(struct work_struct *work);
drivers/media/platform/amphion/vpu_msgs.h
11
void vpu_msg_run_work(struct work_struct *work);
drivers/media/platform/amphion/vpu_msgs.h
12
void vpu_msg_delayed_work(struct work_struct *work);
drivers/media/platform/aspeed/aspeed-video.c
1843
static void aspeed_video_resolution_work(struct work_struct *work)
drivers/media/platform/aspeed/aspeed-video.c
1845
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/platform/chips-media/coda/coda-bit.c
1705
static void coda_seq_end_work(struct work_struct *work)
drivers/media/platform/chips-media/coda/coda-bit.c
1707
struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work);
drivers/media/platform/chips-media/coda/coda-bit.c
2041
static void coda_dec_seq_init_work(struct work_struct *work)
drivers/media/platform/chips-media/coda/coda-bit.c
2043
struct coda_ctx *ctx = container_of(work,
drivers/media/platform/chips-media/coda/coda-common.c
1528
static void coda_pic_run_work(struct work_struct *work)
drivers/media/platform/chips-media/coda/coda-common.c
1530
struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work);
drivers/media/platform/chips-media/coda/coda.h
211
void (*seq_init_work)(struct work_struct *work);
drivers/media/platform/chips-media/coda/coda.h
212
void (*seq_end_work)(struct work_struct *work);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
141
static void wave5_vpu_irq_work_fn(struct kthread_work *work)
drivers/media/platform/chips-media/wave5/wave5-vpu.c
143
struct vpu_device *dev = container_of(work, struct vpu_device, work);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
154
kthread_queue_work(dev->worker, &dev->work);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
352
kthread_init_work(&dev->work, wave5_vpu_irq_work_fn);
drivers/media/platform/chips-media/wave5/wave5-vpu.c
444
kthread_cancel_work_sync(&dev->work);
drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
765
struct kthread_work work;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1224
static void mtk_jpeg_job_timeout_work(struct work_struct *work)
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1226
struct mtk_jpeg_dev *jpeg = container_of(work, struct mtk_jpeg_dev,
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1227
job_timeout_work.work);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1579
static void mtk_jpegenc_worker(struct work_struct *work)
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1588
struct mtk_jpeg_ctx *ctx = container_of(work,
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1674
static void mtk_jpegdec_worker(struct work_struct *work)
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1676
struct mtk_jpeg_ctx *ctx = container_of(work, struct mtk_jpeg_ctx,
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
83
void (*jpeg_worker)(struct work_struct *work);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
522
static void mtk_jpegdec_timeout_work(struct work_struct *work)
drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
526
container_of(work, struct mtk_jpegdec_comp_dev,
drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
527
job_timeout_work.work);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
251
static void mtk_jpegenc_timeout_work(struct work_struct *work)
drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
253
struct delayed_work *dly_work = to_delayed_work(work);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
72
static void mtk_mdp_wdt_worker(struct work_struct *work)
drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
75
container_of(work, struct mtk_mdp_dev, wdt_work);
drivers/media/platform/mediatek/mdp/mtk_mdp_core.h
219
struct work_struct work;
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
1080
INIT_WORK(&ctx->work, mtk_mdp_m2m_worker);
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
494
static void mtk_mdp_m2m_worker(struct work_struct *work)
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
497
container_of(work, struct mtk_mdp_ctx, work);
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
537
queue_work(ctx->mdp_dev->job_wq, &ctx->work);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
468
static void mdp_auto_release_work(struct work_struct *work)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
475
cmd = container_of(work, struct mdp_cmdq_cmd, auto_release_work);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
226
vpu->work, &vpu->work_addr, vpu->work_size,
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
274
memset(vpu->work, 0, vpu->work_size);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
277
param->self_data.va = (unsigned long)vpu->work;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
35
if (!vpu->work) {
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
36
vpu->work = dma_alloc_wc(dev, vpu->work_size,
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
38
if (!vpu->work)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
52
dma_free_wc(dev, vpu->work_size, vpu->work, vpu->work_addr);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
53
vpu->work = NULL;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
73
if (vpu->work && vpu->work_addr)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
74
dma_free_wc(dev, vpu->work_size, vpu->work, vpu->work_addr);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h
48
void *work;
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
114
void (*worker)(struct work_struct *work);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
275
static void mtk_vdec_worker(struct work_struct *work)
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
278
container_of(work, struct mtk_vcodec_dec_ctx, decode_work);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
312
static void mtk_vdec_worker(struct work_struct *work)
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
315
container_of(work, struct mtk_vcodec_dec_ctx, decode_work);
drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.c
238
static void vdec_msg_queue_core_work(struct work_struct *work)
drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.c
241
container_of(work, struct vdec_msg_queue, core_work);
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
1120
static void mtk_venc_worker(struct work_struct *work)
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
1122
struct mtk_vcodec_enc_ctx *ctx = container_of(work, struct mtk_vcodec_enc_ctx,
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
31
static void mtk_venc_worker(struct work_struct *work);
drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c
49
static void tegra_vde_delayed_unmap(struct work_struct *work)
drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c
54
entry = container_of(work, struct tegra_vde_cache_entry,
drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c
55
dwork.work);
drivers/media/platform/nvidia/tegra-vde/v4l2.c
484
static void tegra_decode_complete(struct work_struct *work)
drivers/media/platform/nvidia/tegra-vde/v4l2.c
486
struct tegra_ctx *ctx = container_of(work, struct tegra_ctx, work);
drivers/media/platform/nvidia/tegra-vde/v4l2.c
820
INIT_WORK(&ctx->work, tegra_decode_complete);
drivers/media/platform/nvidia/tegra-vde/v4l2.c
894
queue_work(ctx->vde->wq, &ctx->work);
drivers/media/platform/nvidia/tegra-vde/vde.h
147
struct work_struct work;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1505
static void mxc_jpeg_device_run_timeout(struct work_struct *work)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1507
struct delayed_work *dwork = to_delayed_work(work);
drivers/media/platform/qcom/iris/iris_probe.c
205
static void iris_sys_error_handler(struct work_struct *work)
drivers/media/platform/qcom/iris/iris_probe.c
208
container_of(work, struct iris_core, sys_error_handler.work);
drivers/media/platform/qcom/venus/core.c
166
schedule_delayed_work(&core->work, msecs_to_jiffies(10));
drivers/media/platform/qcom/venus/core.c
429
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
drivers/media/platform/qcom/venus/core.c
538
cancel_delayed_work_sync(&core->work);
drivers/media/platform/qcom/venus/core.c
78
schedule_delayed_work(&core->work, msecs_to_jiffies(10));
drivers/media/platform/qcom/venus/core.c
87
static void venus_sys_error_handler(struct work_struct *work)
drivers/media/platform/qcom/venus/core.c
90
container_of(work, struct venus_core, work.work);
drivers/media/platform/qcom/venus/core.h
235
struct delayed_work work;
drivers/media/platform/qcom/venus/helpers.c
1352
static void delayed_process_buf_func(struct work_struct *work)
drivers/media/platform/qcom/venus/helpers.c
1358
inst = container_of(work, struct venus_inst, delayed_process_work);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-dev.c
122
queue_work(ivc->buffers.async_wq, &ivc->buffers.work);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
145
static void rzv2h_ivc_transfer_buffer(struct work_struct *work)
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
147
struct rzv2h_ivc *ivc = container_of(work, struct rzv2h_ivc,
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
148
buffers.work);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
204
queue_work(ivc->buffers.async_wq, &ivc->buffers.work);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
280
queue_work(ivc->buffers.async_wq, &ivc->buffers.work);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
446
INIT_WORK(&ivc->buffers.work, rzv2h_ivc_transfer_buffer);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc.h
106
struct work_struct work;
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1599
static void rkvdec_watchdog_func(struct work_struct *work)
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1604
rkvdec = container_of(to_delayed_work(work), struct rkvdec_dev,
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
166
static void s5p_mfc_watchdog_worker(struct work_struct *work)
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
174
dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
drivers/media/platform/st/sti/delta/delta-v4l2.c
926
static void delta_run_work(struct work_struct *work)
drivers/media/platform/st/sti/delta/delta-v4l2.c
928
struct delta_ctx *ctx = container_of(work, struct delta_ctx, run_work);
drivers/media/platform/st/sti/hva/hva-v4l2.c
805
static void hva_run_work(struct work_struct *work)
drivers/media/platform/st/sti/hva/hva-v4l2.c
807
struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
2198
static void hdmirx_delayed_work_hotplug(struct work_struct *work)
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
2203
hdmirx_dev = container_of(work, struct snps_hdmirx_dev,
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
2204
delayed_work_hotplug.work);
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
2220
static void hdmirx_delayed_work_res_change(struct work_struct *work)
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
2225
hdmirx_dev = container_of(work, struct snps_hdmirx_dev,
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
2226
delayed_work_res_change.work);
drivers/media/platform/ti/omap3isp/ispccdc.c
383
static void ccdc_lsc_free_table_work(struct work_struct *work)
drivers/media/platform/ti/omap3isp/ispccdc.c
388
lsc = container_of(work, struct ispccdc_lsc, table_work);
drivers/media/platform/ti/vpe/vip.c
977
static void vip_overflow_recovery_work(struct work_struct *work)
drivers/media/platform/ti/vpe/vip.c
979
struct vip_stream *stream = container_of(work, struct vip_stream,
drivers/media/platform/verisilicon/hantro_drv.c
118
void hantro_watchdog(struct work_struct *work)
drivers/media/platform/verisilicon/hantro_drv.c
123
vpu = container_of(to_delayed_work(work),
drivers/media/platform/verisilicon/hantro_hw.h
429
void hantro_watchdog(struct work_struct *work);
drivers/media/radio/radio-shark.c
155
static void shark_led_work(struct work_struct *work)
drivers/media/radio/radio-shark.c
158
container_of(work, struct shark_device, led_work);
drivers/media/radio/radio-shark2.c
142
static void shark_led_work(struct work_struct *work)
drivers/media/radio/radio-shark2.c
145
container_of(work, struct shark_device, led_work);
drivers/media/radio/si4713/si4713.c
1477
init_completion(&sdev->work);
drivers/media/radio/si4713/si4713.c
178
complete(&sdev->work);
drivers/media/radio/si4713/si4713.c
221
if (!wait_for_completion_timeout(&sdev->work,
drivers/media/radio/si4713/si4713.c
502
!wait_for_completion_timeout(&sdev->work, usecs_to_jiffies(usecs) + 1))
drivers/media/radio/si4713/si4713.h
238
struct completion work;
drivers/media/rc/mceusb.c
1516
static void mceusb_deferred_kevent(struct work_struct *work)
drivers/media/rc/mceusb.c
1519
container_of(work, struct mceusb_dev, kevent);
drivers/media/test-drivers/vicodec/codec-fwht.c
621
s16 *work = tmp;
drivers/media/test-drivers/vicodec/codec-fwht.c
632
*deltablock = *work - *reference;
drivers/media/test-drivers/vicodec/codec-fwht.c
634
work++;
drivers/media/test-drivers/vidtv/vidtv_mux.c
389
static void vidtv_mux_tick(struct work_struct *work)
drivers/media/test-drivers/vidtv/vidtv_mux.c
391
struct vidtv_mux *m = container_of(work,
drivers/media/test-drivers/vim2m.c
626
curr_ctx = container_of(w, struct vim2m_ctx, work_run.work);
drivers/media/test-drivers/vivid/vivid-core.c
1759
static void update_hdmi_ctrls_work_handler(struct work_struct *work)
drivers/media/test-drivers/vivid/vivid-core.c
1782
static void update_svid_ctrls_work_handler(struct work_struct *work)
drivers/media/tuners/si2157.c
850
static void si2157_stat_work(struct work_struct *work)
drivers/media/tuners/si2157.c
852
struct si2157_dev *dev = container_of(work, struct si2157_dev, stat_work.work);
drivers/media/tuners/xc5000.c
1216
timer_sleep.work);
drivers/media/usb/au0828/au0828-dvb.c
335
static void au0828_restart_dvb_streaming(struct work_struct *work)
drivers/media/usb/au0828/au0828-dvb.c
337
struct au0828_dev *dev = container_of(work, struct au0828_dev,
drivers/media/usb/au0828/au0828-dvb.c
96
static void au0828_restart_dvb_streaming(struct work_struct *work);
drivers/media/usb/au0828/au0828-input.c
216
static void au0828_rc_work(struct work_struct *work)
drivers/media/usb/au0828/au0828-input.c
218
struct au0828_rc *ir = container_of(work, struct au0828_rc, work.work);
drivers/media/usb/au0828/au0828-input.c
225
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
drivers/media/usb/au0828/au0828-input.c
232
INIT_DELAYED_WORK(&ir->work, au0828_rc_work);
drivers/media/usb/au0828/au0828-input.c
237
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
drivers/media/usb/au0828/au0828-input.c
246
cancel_delayed_work_sync(&ir->work);
drivers/media/usb/au0828/au0828-input.c
31
struct delayed_work work;
drivers/media/usb/au0828/au0828-input.c
375
cancel_delayed_work_sync(&ir->work);
drivers/media/usb/au0828/au0828-input.c
395
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
drivers/media/usb/cx231xx/cx231xx-audio.c
489
static void audio_trigger(struct work_struct *work)
drivers/media/usb/cx231xx/cx231xx-audio.c
491
struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger);
drivers/media/usb/cx231xx/cx231xx-cards.c
1519
static void request_module_async(struct work_struct *work)
drivers/media/usb/cx231xx/cx231xx-cards.c
1521
struct cx231xx *dev = container_of(work,
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
92
static void dvb_usb_read_remote_control(struct work_struct *work)
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
94
struct dvb_usb_device *d = container_of(work,
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
95
struct dvb_usb_device, rc_query_work.work);
drivers/media/usb/dvb-usb/cxusb-analog.c
598
static void cxusb_medion_v_complete_work(struct work_struct *work)
drivers/media/usb/dvb-usb/cxusb-analog.c
600
struct cxusb_medion_dev *cxdev = container_of(work,
drivers/media/usb/dvb-usb/dvb-usb-remote.c
109
static void legacy_dvb_usb_read_remote_control(struct work_struct *work)
drivers/media/usb/dvb-usb/dvb-usb-remote.c
112
container_of(work, struct dvb_usb_device, rc_query_work.work);
drivers/media/usb/dvb-usb/dvb-usb-remote.c
246
static void dvb_usb_read_remote_control(struct work_struct *work)
drivers/media/usb/dvb-usb/dvb-usb-remote.c
249
container_of(work, struct dvb_usb_device, rc_query_work.work);
drivers/media/usb/dvb-usb/technisat-usb2.c
297
static void technisat_usb2_green_led_control(struct work_struct *work)
drivers/media/usb/dvb-usb/technisat-usb2.c
300
container_of(work, struct technisat_usb2_state, green_led_work.work);
drivers/media/usb/em28xx/em28xx-audio.c
330
static void audio_trigger(struct work_struct *work)
drivers/media/usb/em28xx/em28xx-audio.c
333
container_of(work, struct em28xx_audio, wq_trigger);
drivers/media/usb/em28xx/em28xx-cards.c
3434
static void request_module_async(struct work_struct *work)
drivers/media/usb/em28xx/em28xx-cards.c
3436
struct em28xx *dev = container_of(work,
drivers/media/usb/em28xx/em28xx-input.c
356
static void em28xx_ir_work(struct work_struct *work)
drivers/media/usb/em28xx/em28xx-input.c
358
struct em28xx_IR *ir = container_of(work, struct em28xx_IR, work.work);
drivers/media/usb/em28xx/em28xx-input.c
364
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
drivers/media/usb/em28xx/em28xx-input.c
371
INIT_DELAYED_WORK(&ir->work, em28xx_ir_work);
drivers/media/usb/em28xx/em28xx-input.c
372
schedule_delayed_work(&ir->work, 0);
drivers/media/usb/em28xx/em28xx-input.c
381
cancel_delayed_work_sync(&ir->work);
drivers/media/usb/em28xx/em28xx-input.c
497
static void em28xx_query_buttons(struct work_struct *work)
drivers/media/usb/em28xx/em28xx-input.c
500
container_of(work, struct em28xx, buttons_query_work.work);
drivers/media/usb/em28xx/em28xx-input.c
56
struct delayed_work work;
drivers/media/usb/em28xx/em28xx-input.c
878
cancel_delayed_work_sync(&ir->work);
drivers/media/usb/em28xx/em28xx-input.c
901
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
drivers/media/usb/gspca/finepix.c
74
static void dostream(struct work_struct *work)
drivers/media/usb/gspca/finepix.c
76
struct usb_fpix *dev = container_of(work, struct usb_fpix, work_struct);
drivers/media/usb/gspca/jl2005bcd.c
304
static void jl2005c_dostream(struct work_struct *work)
drivers/media/usb/gspca/jl2005bcd.c
306
struct sd *dev = container_of(work, struct sd, work_struct);
drivers/media/usb/gspca/sn9c20x.c
108
static void qual_upd(struct work_struct *work);
drivers/media/usb/gspca/sn9c20x.c
1660
INIT_WORK(&sd->work, qual_upd);
drivers/media/usb/gspca/sn9c20x.c
2088
flush_work(&sd->work);
drivers/media/usb/gspca/sn9c20x.c
2171
static void qual_upd(struct work_struct *work)
drivers/media/usb/gspca/sn9c20x.c
2173
struct sd *sd = container_of(work, struct sd, work);
drivers/media/usb/gspca/sn9c20x.c
2242
schedule_work(&sd->work);
drivers/media/usb/gspca/sn9c20x.c
83
struct work_struct work;
drivers/media/usb/gspca/sonixj.c
1538
INIT_WORK(&sd->work, qual_upd);
drivers/media/usb/gspca/sonixj.c
2156
static void qual_upd(struct work_struct *work)
drivers/media/usb/gspca/sonixj.c
2158
struct sd *sd = container_of(work, struct sd, work);
drivers/media/usb/gspca/sonixj.c
2565
flush_work(&sd->work);
drivers/media/usb/gspca/sonixj.c
2777
schedule_work(&sd->work);
drivers/media/usb/gspca/sonixj.c
43
struct work_struct work;
drivers/media/usb/gspca/sonixj.c
91
static void qual_upd(struct work_struct *work);
drivers/media/usb/gspca/sq905.c
198
static void sq905_dostream(struct work_struct *work)
drivers/media/usb/gspca/sq905.c
200
struct sd *dev = container_of(work, struct sd, work_struct);
drivers/media/usb/gspca/sq905c.c
121
static void sq905c_dostream(struct work_struct *work)
drivers/media/usb/gspca/sq905c.c
123
struct sd *dev = container_of(work, struct sd, work_struct);
drivers/media/usb/gspca/vicam.c
167
static void vicam_dostream(struct work_struct *work)
drivers/media/usb/gspca/vicam.c
169
struct sd *sd = container_of(work, struct sd, work_struct);
drivers/media/usb/gspca/zc3xx.c
41
struct work_struct work;
drivers/media/usb/gspca/zc3xx.c
5938
static void transfer_update(struct work_struct *work)
drivers/media/usb/gspca/zc3xx.c
5940
struct sd *sd = container_of(work, struct sd, work);
drivers/media/usb/gspca/zc3xx.c
6333
INIT_WORK(&sd->work, transfer_update);
drivers/media/usb/gspca/zc3xx.c
6841
schedule_work(&sd->work);
drivers/media/usb/gspca/zc3xx.c
6853
flush_work(&sd->work);
drivers/media/usb/hdpvr/hdpvr-video.c
249
static void hdpvr_transmit_buffers(struct work_struct *work)
drivers/media/usb/hdpvr/hdpvr-video.c
251
struct hdpvr_device *dev = container_of(work, struct hdpvr_device,
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
318
static void pvr2_hdw_worker_poll(struct work_struct *work);
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
3191
static void pvr2_hdw_worker_poll(struct work_struct *work)
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
3194
struct pvr2_hdw *hdw = container_of(work,struct pvr2_hdw,workpoll);
drivers/media/usb/siano/smsusb.c
70
static void do_submit_urb(struct work_struct *work)
drivers/media/usb/siano/smsusb.c
72
struct smsusb_urb_t *surb = container_of(work, struct smsusb_urb_t, wq);
drivers/media/usb/usbtv/usbtv-audio.c
268
static void snd_usbtv_trigger(struct work_struct *work)
drivers/media/usb/usbtv/usbtv-audio.c
270
struct usbtv *chip = container_of(work, struct usbtv, snd_trigger);
drivers/media/usb/uvc/uvc_ctrl.c
2011
static void uvc_ctrl_status_event_work(struct work_struct *work)
drivers/media/usb/uvc/uvc_ctrl.c
2013
struct uvc_device *dev = container_of(work, struct uvc_device,
drivers/media/usb/uvc/uvc_ctrl.c
2014
async_ctrl.work);
drivers/media/usb/uvc/uvc_ctrl.c
2046
schedule_work(&w->work);
drivers/media/usb/uvc/uvc_ctrl.c
3369
INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
drivers/media/usb/uvc/uvc_ctrl.c
3427
if (dev->async_ctrl.work.func)
drivers/media/usb/uvc/uvc_ctrl.c
3428
cancel_work_sync(&dev->async_ctrl.work);
drivers/media/usb/uvc/uvc_status.c
343
if (cancel_work_sync(&w->work))
drivers/media/usb/uvc/uvc_status.c
355
if (cancel_work_sync(&w->work))
drivers/media/usb/uvc/uvc_video.c
1292
static void uvc_video_copy_data_work(struct work_struct *work)
drivers/media/usb/uvc/uvc_video.c
1294
struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work);
drivers/media/usb/uvc/uvc_video.c
1748
queue_work(stream->async_wq, &uvc_urb->work);
drivers/media/usb/uvc/uvc_video.c
2289
INIT_WORK(&uvc_urb->work, uvc_video_copy_data_work);
drivers/media/usb/uvc/uvcvideo.h
451
struct work_struct work;
drivers/media/usb/uvc/uvcvideo.h
619
struct work_struct work;
drivers/media/v4l2-core/v4l2-mem2mem.c
399
static void v4l2_m2m_device_run_work(struct work_struct *work)
drivers/media/v4l2-core/v4l2-mem2mem.c
402
container_of(work, struct v4l2_m2m_dev, job_work);
drivers/memstick/core/memstick.c
434
static void memstick_check(struct work_struct *work)
drivers/memstick/core/memstick.c
436
struct memstick_host *host = container_of(work, struct memstick_host,
drivers/memstick/core/ms_block.c
1875
static void msb_io_work(struct work_struct *work)
drivers/memstick/core/ms_block.c
1877
struct msb_data *msb = container_of(work, struct msb_data, io_work);
drivers/memstick/host/rtsx_usb_ms.c
509
static void rtsx_usb_ms_handle_req(struct work_struct *work)
drivers/memstick/host/rtsx_usb_ms.c
511
struct rtsx_usb_ms *host = container_of(work,
drivers/memstick/host/rtsx_usb_ms.c
719
static void rtsx_usb_ms_poll_card(struct work_struct *work)
drivers/memstick/host/rtsx_usb_ms.c
721
struct rtsx_usb_ms *host = container_of(work, struct rtsx_usb_ms,
drivers/memstick/host/rtsx_usb_ms.c
722
poll_card.work);
drivers/message/fusion/mptbase.c
357
mpt_fault_reset_work(struct work_struct *work)
drivers/message/fusion/mptbase.c
360
container_of(work, MPT_ADAPTER, fault_reset_work.work);
drivers/message/fusion/mptfc.c
1105
mptfc_link_status_change(struct work_struct *work)
drivers/message/fusion/mptfc.c
1108
container_of(work, MPT_ADAPTER, fc_rescan_work);
drivers/message/fusion/mptfc.c
1117
mptfc_setup_reset(struct work_struct *work)
drivers/message/fusion/mptfc.c
1120
container_of(work, MPT_ADAPTER, fc_setup_reset_work);
drivers/message/fusion/mptfc.c
1151
mptfc_rescan_devices(struct work_struct *work)
drivers/message/fusion/mptfc.c
1154
container_of(work, MPT_ADAPTER, fc_rescan_work);
drivers/message/fusion/mptlan.c
1286
mpt_lan_post_receive_buckets_work(struct work_struct *work)
drivers/message/fusion/mptlan.c
1288
mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
drivers/message/fusion/mptlan.c
1289
post_buckets_task.work));
drivers/message/fusion/mptsas.c
106
static void mptsas_firmware_event_work(struct work_struct *work);
drivers/message/fusion/mptsas.c
1644
mptsas_firmware_event_work(struct work_struct *work)
drivers/message/fusion/mptsas.c
1647
container_of(work, struct fw_event_work, work.work);
drivers/message/fusion/mptsas.c
293
INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
drivers/message/fusion/mptsas.c
298
&fw_event->work, delay);
drivers/message/fusion/mptsas.c
314
&fw_event->work, msecs_to_jiffies(delay));
drivers/message/fusion/mptsas.c
375
if (cancel_delayed_work_sync(&fw_event->work))
drivers/message/fusion/mptsas.h
109
struct delayed_work work;
drivers/message/fusion/mptsas.h
118
struct work_struct work;
drivers/message/fusion/mptspi.c
1107
struct work_struct work;
drivers/message/fusion/mptspi.c
1112
static void mpt_work_wrapper(struct work_struct *work)
drivers/message/fusion/mptspi.c
1115
container_of(work, struct work_queue_wrapper, work);
drivers/message/fusion/mptspi.c
1164
INIT_WORK(&wqw->work, mpt_work_wrapper);
drivers/message/fusion/mptspi.c
1168
schedule_work(&wqw->work);
drivers/message/fusion/mptspi.c
1258
mptspi_dv_renegotiate_work(struct work_struct *work)
drivers/message/fusion/mptspi.c
1261
container_of(work, struct work_queue_wrapper, work);
drivers/message/fusion/mptspi.c
1296
INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
drivers/message/fusion/mptspi.c
1299
schedule_work(&wqw->work);
drivers/mfd/cs42l43.c
903
static void cs42l43_boot_work(struct work_struct *work)
drivers/mfd/cs42l43.c
905
struct cs42l43 *cs42l43 = container_of(work, struct cs42l43, boot_work);
drivers/mfd/da903x.c
394
static void da903x_irq_work(struct work_struct *work)
drivers/mfd/da903x.c
397
container_of(work, struct da903x_chip, irq_work);
drivers/mfd/db8500-prcmu.c
1022
wait_for_completion(&mb1_transfer.work);
drivers/mfd/db8500-prcmu.c
1053
wait_for_completion(&mb1_transfer.work);
drivers/mfd/db8500-prcmu.c
1115
if (!wait_for_completion_timeout(&mb2_transfer.work,
drivers/mfd/db8500-prcmu.c
1937
wait_for_completion(&mb4_transfer.work);
drivers/mfd/db8500-prcmu.c
1955
wait_for_completion(&mb4_transfer.work);
drivers/mfd/db8500-prcmu.c
1976
wait_for_completion(&mb4_transfer.work);
drivers/mfd/db8500-prcmu.c
1995
wait_for_completion(&mb4_transfer.work);
drivers/mfd/db8500-prcmu.c
2033
wait_for_completion(&mb4_transfer.work);
drivers/mfd/db8500-prcmu.c
2116
if (!wait_for_completion_timeout(&mb5_transfer.work,
drivers/mfd/db8500-prcmu.c
2166
if (!wait_for_completion_timeout(&mb5_transfer.work,
drivers/mfd/db8500-prcmu.c
2308
wait_for_completion(&mb1_transfer.work);
drivers/mfd/db8500-prcmu.c
2387
complete(&mb1_transfer.work);
drivers/mfd/db8500-prcmu.c
2395
complete(&mb2_transfer.work);
drivers/mfd/db8500-prcmu.c
2431
complete(&mb4_transfer.work);
drivers/mfd/db8500-prcmu.c
2441
complete(&mb5_transfer.work);
drivers/mfd/db8500-prcmu.c
2495
static void prcmu_mask_work(struct work_struct *work)
drivers/mfd/db8500-prcmu.c
2681
init_completion(&mb1_transfer.work);
drivers/mfd/db8500-prcmu.c
2684
init_completion(&mb2_transfer.work);
drivers/mfd/db8500-prcmu.c
2690
init_completion(&mb4_transfer.work);
drivers/mfd/db8500-prcmu.c
2692
init_completion(&mb5_transfer.work);
drivers/mfd/db8500-prcmu.c
376
struct completion work;
drivers/mfd/db8500-prcmu.c
397
struct completion work;
drivers/mfd/db8500-prcmu.c
424
struct completion work;
drivers/mfd/db8500-prcmu.c
435
struct completion work;
drivers/mfd/db8500-prcmu.c
822
wait_for_completion(&mb1_transfer.work);
drivers/mfd/db8500-prcmu.c
930
wait_for_completion(&mb1_transfer.work);
drivers/mfd/db8500-prcmu.c
992
wait_for_completion(&mb1_transfer.work);
drivers/mfd/ezx-pcap.c
164
static void pcap_msr_work(struct work_struct *work)
drivers/mfd/ezx-pcap.c
166
struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
drivers/mfd/ezx-pcap.c
171
static void pcap_isr_work(struct work_struct *work)
drivers/mfd/ezx-pcap.c
173
struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
drivers/mfd/ls2k-bmc-core.c
292
static void ls2k_bmc_events_fn(struct work_struct *work)
drivers/mfd/ls2k-bmc-core.c
294
struct ls2k_bmc_ddata *ddata = container_of(work, struct ls2k_bmc_ddata, bmc_reset_work);
drivers/mfd/menelaus.c
1196
INIT_WORK(&menelaus->work, menelaus_work);
drivers/mfd/menelaus.c
1219
flush_work(&menelaus->work);
drivers/mfd/menelaus.c
1228
flush_work(&menelaus->work);
drivers/mfd/menelaus.c
155
struct work_struct work;
drivers/mfd/menelaus.c
762
container_of(_menelaus, struct menelaus_chip, work);
drivers/mfd/menelaus.c
800
(void)schedule_work(&menelaus->work);
drivers/mfd/si476x-i2c.c
359
static void si476x_core_drain_rds_fifo(struct work_struct *work)
drivers/mfd/si476x-i2c.c
363
struct si476x_core *core = container_of(work, struct si476x_core,
drivers/mfd/si476x-i2c.c
515
static void si476x_core_poll_loop(struct work_struct *work)
drivers/mfd/si476x-i2c.c
517
struct si476x_core *core = SI476X_WORK_TO_CORE(work);
drivers/mfd/tps65010.c
233
queue_delayed_work(system_power_efficient_wq, &tps->work,
drivers/mfd/tps65010.c
391
queue_delayed_work(system_power_efficient_wq, &tps->work,
drivers/mfd/tps65010.c
398
static void tps65010_work(struct work_struct *work)
drivers/mfd/tps65010.c
402
tps = container_of(to_delayed_work(work), struct tps65010, work);
drivers/mfd/tps65010.c
439
queue_delayed_work(system_power_efficient_wq, &tps->work, 0);
drivers/mfd/tps65010.c
516
cancel_delayed_work_sync(&tps->work);
drivers/mfd/tps65010.c
540
INIT_DELAYED_WORK(&tps->work, tps65010_work);
drivers/mfd/tps65010.c
610
tps65010_work(&tps->work.work);
drivers/mfd/tps65010.c
64
struct delayed_work work;
drivers/mfd/tps65010.c
697
queue_delayed_work(system_power_efficient_wq, &the_tps->work,
drivers/misc/bcm-vk/bcm_vk.h
342
struct delayed_work work;
drivers/misc/bcm-vk/bcm_vk_dev.c
913
static void bcm_vk_wq_handler(struct work_struct *work)
drivers/misc/bcm-vk/bcm_vk_dev.c
915
struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work);
drivers/misc/bcm-vk/bcm_vk_msg.c
140
static void bcm_vk_hb_poll(struct work_struct *work)
drivers/misc/bcm-vk/bcm_vk_msg.c
143
struct bcm_vk_hb_ctrl *hb = container_of(to_delayed_work(work), struct bcm_vk_hb_ctrl,
drivers/misc/bcm-vk/bcm_vk_msg.c
144
work);
drivers/misc/bcm-vk/bcm_vk_msg.c
180
schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE);
drivers/misc/bcm-vk/bcm_vk_msg.c
187
INIT_DELAYED_WORK(&hb->work, bcm_vk_hb_poll);
drivers/misc/bcm-vk/bcm_vk_msg.c
188
schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE);
drivers/misc/bcm-vk/bcm_vk_msg.c
195
cancel_delayed_work_sync(&hb->work);
drivers/misc/bcm-vk/bcm_vk_tty.c
61
static void bcm_vk_tty_wq_handler(struct work_struct *work)
drivers/misc/bcm-vk/bcm_vk_tty.c
63
struct bcm_vk *vk = container_of(work, struct bcm_vk, tty_wq_work);
drivers/misc/bh1770glc.c
557
static void bh1770_prox_work(struct work_struct *work)
drivers/misc/bh1770glc.c
560
container_of(work, struct bh1770_chip, prox_work.work);
drivers/misc/cardreader/rtsx_pcr.c
877
static void rtsx_pci_card_detect(struct work_struct *work)
drivers/misc/cardreader/rtsx_pcr.c
885
dwork = to_delayed_work(work);
drivers/misc/fastrpc.c
1231
if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
drivers/misc/fastrpc.c
1234
err = wait_for_completion_interruptible(&ctx->work);
drivers/misc/fastrpc.c
241
struct completion work;
drivers/misc/fastrpc.c
2468
complete(&ctx->work);
drivers/misc/fastrpc.c
2528
complete(&ctx->work);
drivers/misc/fastrpc.c
537
static void fastrpc_context_put_wq(struct work_struct *work)
drivers/misc/fastrpc.c
540
container_of(work, struct fastrpc_invoke_ctx, put_work);
drivers/misc/fastrpc.c
637
init_completion(&ctx->work);
drivers/misc/hisi_hikey_usb.c
141
schedule_work(&hisi_hikey_usb->work);
drivers/misc/hisi_hikey_usb.c
189
INIT_WORK(&hisi_hikey_usb->work, relay_set_role_switch);
drivers/misc/hisi_hikey_usb.c
47
struct work_struct work;
drivers/misc/hisi_hikey_usb.c
92
static void relay_set_role_switch(struct work_struct *work)
drivers/misc/hisi_hikey_usb.c
94
struct hisi_hikey_usb *hisi_hikey_usb = container_of(work,
drivers/misc/hisi_hikey_usb.c
96
work);
drivers/misc/mei/bus.c
1566
void mei_cl_bus_rescan_work(struct work_struct *work)
drivers/misc/mei/bus.c
1569
container_of(work, struct mei_device, bus_rescan_work);
drivers/misc/mei/bus.c
424
static void mei_cl_bus_rx_work(struct work_struct *work)
drivers/misc/mei/bus.c
429
cldev = container_of(work, struct mei_cl_device, rx_work);
drivers/misc/mei/bus.c
447
static void mei_cl_bus_notif_work(struct work_struct *work)
drivers/misc/mei/bus.c
451
cldev = container_of(work, struct mei_cl_device, notif_work);
drivers/misc/mei/init.c
283
static void mei_reset_work(struct work_struct *work)
drivers/misc/mei/init.c
286
container_of(work, struct mei_device, reset_work);
drivers/misc/mei/interrupt.c
641
void mei_timer(struct work_struct *work)
drivers/misc/mei/interrupt.c
644
struct mei_device *dev = container_of(work,
drivers/misc/mei/interrupt.c
645
struct mei_device, timer_work.work);
drivers/misc/mei/mei_dev.h
397
void mei_cl_bus_rescan_work(struct work_struct *work);
drivers/misc/mei/mei_dev.h
729
void mei_timer(struct work_struct *work);
drivers/misc/mei/vsc-tp.c
115
static void vsc_tp_event_work(struct work_struct *work)
drivers/misc/mei/vsc-tp.c
117
struct vsc_tp *tp = container_of(work, struct vsc_tp, event_work);
drivers/misc/mrvl_cn10k_dpi.c
166
struct work_struct work;
drivers/misc/mrvl_cn10k_dpi.c
275
schedule_work(&dpi->mbox[vf]->work);
drivers/misc/mrvl_cn10k_dpi.c
315
static void dpi_pfvf_mbox_work(struct work_struct *work)
drivers/misc/mrvl_cn10k_dpi.c
317
struct dpi_mbox *mbox = container_of(work, struct dpi_mbox, work);
drivers/misc/mrvl_cn10k_dpi.c
367
INIT_WORK(&dpi->mbox[vf]->work, dpi_pfvf_mbox_work);
drivers/misc/mrvl_cn10k_dpi.c
380
if (work_pending(&dpi->mbox[vf]->work))
drivers/misc/mrvl_cn10k_dpi.c
381
cancel_work_sync(&dpi->mbox[vf]->work);
drivers/misc/tifm_7xx1.c
135
static void tifm_7xx1_switch_media(struct work_struct *work)
drivers/misc/tifm_7xx1.c
137
struct tifm_adapter *fm = container_of(work, struct tifm_adapter,
drivers/misc/tifm_core.c
307
void tifm_queue_work(struct work_struct *work)
drivers/misc/tifm_core.c
309
queue_work(workqueue, work);
drivers/misc/vmw_balloon.c
1464
static void vmballoon_work(struct work_struct *work)
drivers/misc/vmw_balloon.c
1466
struct delayed_work *dwork = to_delayed_work(work);
drivers/misc/vmw_vmci/vmci_datagram.c
136
static void dg_delayed_dispatch(struct work_struct *work)
drivers/misc/vmw_vmci/vmci_datagram.c
139
container_of(work, struct delayed_datagram_info, work);
drivers/misc/vmw_vmci/vmci_datagram.c
240
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
drivers/misc/vmw_vmci/vmci_datagram.c
241
schedule_work(&dg_info->work);
drivers/misc/vmw_vmci/vmci_datagram.c
37
struct work_struct work;
drivers/misc/vmw_vmci/vmci_datagram.c
384
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
drivers/misc/vmw_vmci/vmci_datagram.c
385
schedule_work(&dg_info->work);
drivers/misc/vmw_vmci/vmci_doorbell.c
263
static void dbell_delayed_dispatch(struct work_struct *work)
drivers/misc/vmw_vmci/vmci_doorbell.c
265
struct dbell_entry *entry = container_of(work,
drivers/misc/vmw_vmci/vmci_doorbell.c
266
struct dbell_entry, work);
drivers/misc/vmw_vmci/vmci_doorbell.c
296
if (!schedule_work(&entry->work))
drivers/misc/vmw_vmci/vmci_doorbell.c
348
if (!schedule_work(&dbell->work))
drivers/misc/vmw_vmci/vmci_doorbell.c
36
struct work_struct work;
drivers/misc/vmw_vmci/vmci_doorbell.c
451
INIT_WORK(&entry->work, dbell_delayed_dispatch);
drivers/mmc/core/block.c
2260
void mmc_blk_mq_complete_work(struct work_struct *work)
drivers/mmc/core/block.c
2262
struct mmc_queue *mq = container_of(work, struct mmc_queue,
drivers/mmc/core/block.h
18
void mmc_blk_mq_complete_work(struct work_struct *work);
drivers/mmc/core/core.c
2251
void mmc_rescan(struct work_struct *work)
drivers/mmc/core/core.c
2254
container_of(work, struct mmc_host, detect.work);
drivers/mmc/core/core.c
63
static int mmc_schedule_delayed_work(struct delayed_work *work,
drivers/mmc/core/core.c
72
return queue_delayed_work(system_freezable_wq, work, delay);
drivers/mmc/core/core.h
66
void mmc_undervoltage_workfn(struct work_struct *work);
drivers/mmc/core/core.h
76
void mmc_rescan(struct work_struct *work);
drivers/mmc/core/queue.c
136
static void mmc_mq_recovery_handler(struct work_struct *work)
drivers/mmc/core/queue.c
138
struct mmc_queue *mq = container_of(work, struct mmc_queue,
drivers/mmc/core/regulator.c
267
void mmc_undervoltage_workfn(struct work_struct *work)
drivers/mmc/core/regulator.c
272
supply = container_of(work, struct mmc_supply, uv_work);
drivers/mmc/core/sdio_irq.c
124
void sdio_irq_work(struct work_struct *work)
drivers/mmc/core/sdio_irq.c
127
container_of(work, struct mmc_host, sdio_irq_work);
drivers/mmc/core/sdio_ops.h
24
void sdio_irq_work(struct work_struct *work);
drivers/mmc/host/alcor.c
962
static void alcor_timeout_timer(struct work_struct *work)
drivers/mmc/host/alcor.c
964
struct delayed_work *d = to_delayed_work(work);
drivers/mmc/host/bcm2835.c
1049
static void bcm2835_dma_complete_work(struct work_struct *work)
drivers/mmc/host/bcm2835.c
1052
container_of(work, struct bcm2835_host, dma_work);
drivers/mmc/host/bcm2835.c
823
static void bcm2835_timeout(struct work_struct *work)
drivers/mmc/host/bcm2835.c
825
struct delayed_work *d = to_delayed_work(work);
drivers/mmc/host/mmc_hsq.c
16
static void mmc_hsq_retry_handler(struct work_struct *work)
drivers/mmc/host/mmc_hsq.c
18
struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
drivers/mmc/host/mmci.c
1563
static void ux500_busy_timeout_work(struct work_struct *work)
drivers/mmc/host/mmci.c
1565
struct mmci_host *host = container_of(work, struct mmci_host,
drivers/mmc/host/mmci.c
1566
ux500_busy_timeout_work.work);
drivers/mmc/host/mtk-sd.c
1678
static void msdc_request_timeout(struct work_struct *work)
drivers/mmc/host/mtk-sd.c
1680
struct msdc_host *host = container_of(work, struct msdc_host,
drivers/mmc/host/mtk-sd.c
1681
req_timeout.work);
drivers/mmc/host/mxcmmc.c
629
static void mxcmci_datawork(struct work_struct *work)
drivers/mmc/host/mxcmmc.c
631
struct mxcmci_host *host = container_of(work, struct mxcmci_host,
drivers/mmc/host/omap.c
249
static void mmc_omap_slot_release_work(struct work_struct *work)
drivers/mmc/host/omap.c
251
struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
drivers/mmc/host/omap.c
439
static void mmc_omap_send_stop_work(struct work_struct *work)
drivers/mmc/host/omap.c
441
struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
drivers/mmc/host/omap.c
608
static void mmc_omap_abort_command(struct work_struct *work)
drivers/mmc/host/omap.c
610
struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
drivers/mmc/host/renesas_sdhi_internal_dmac.c
446
static void renesas_sdhi_internal_dmac_issue_work_fn(struct work_struct *work)
drivers/mmc/host/renesas_sdhi_internal_dmac.c
448
struct tmio_mmc_host *host = from_work(host, work, dma_issue);
drivers/mmc/host/renesas_sdhi_internal_dmac.c
490
static void renesas_sdhi_internal_dmac_complete_work_fn(struct work_struct *work)
drivers/mmc/host/renesas_sdhi_internal_dmac.c
492
struct renesas_sdhi_dma *dma_priv = from_work(dma_priv, work, dma_complete);
drivers/mmc/host/renesas_sdhi_sys_dmac.c
317
static void renesas_sdhi_sys_dmac_issue_work_fn(struct work_struct *work)
drivers/mmc/host/renesas_sdhi_sys_dmac.c
319
struct tmio_mmc_host *host = from_work(host, work, dma_issue);
drivers/mmc/host/rtsx_pci_sdmmc.c
1553
INIT_WORK(&host->work, sd_request);
drivers/mmc/host/rtsx_pci_sdmmc.c
1590
cancel_work_sync(&host->work);
drivers/mmc/host/rtsx_pci_sdmmc.c
1610
flush_work(&host->work);
drivers/mmc/host/rtsx_pci_sdmmc.c
33
struct work_struct work;
drivers/mmc/host/rtsx_pci_sdmmc.c
795
static void sd_request(struct work_struct *work)
drivers/mmc/host/rtsx_pci_sdmmc.c
797
struct realtek_pci_sdmmc *host = container_of(work,
drivers/mmc/host/rtsx_pci_sdmmc.c
798
struct realtek_pci_sdmmc, work);
drivers/mmc/host/rtsx_pci_sdmmc.c
884
schedule_work(&host->work);
drivers/mmc/host/rtsx_usb_sdmmc.c
1314
static void rtsx_usb_update_led(struct work_struct *work)
drivers/mmc/host/rtsx_usb_sdmmc.c
1317
container_of(work, struct rtsx_usb_sdmmc, led_work);
drivers/mmc/host/sdhci-uhs2.c
976
static void sdhci_uhs2_complete_work(struct work_struct *work)
drivers/mmc/host/sdhci-uhs2.c
978
struct sdhci_host *host = container_of(work, struct sdhci_host,
drivers/mmc/host/sdhci-uhs2.c
982
sdhci_complete_work(work);
drivers/mmc/host/sdhci.c
3228
void sdhci_complete_work(struct work_struct *work)
drivers/mmc/host/sdhci.c
3230
struct sdhci_host *host = container_of(work, struct sdhci_host,
drivers/mmc/host/sdhci.h
628
void (*complete_work_fn)(struct work_struct *work);
drivers/mmc/host/sdhci.h
871
void sdhci_complete_work(struct work_struct *work);
drivers/mmc/host/sh_mmcif.c
1358
static void sh_mmcif_timeout_work(struct work_struct *work)
drivers/mmc/host/sh_mmcif.c
1360
struct delayed_work *d = to_delayed_work(work);
drivers/mmc/host/tmio_mmc_core.c
229
static void tmio_mmc_reset_work(struct work_struct *work)
drivers/mmc/host/tmio_mmc_core.c
231
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
drivers/mmc/host/tmio_mmc_core.c
232
delayed_reset_work.work);
drivers/mmc/host/tmio_mmc_core.c
673
!work_pending(&mmc->detect.work))
drivers/mmc/host/tmio_mmc_core.c
904
static void tmio_mmc_done_work(struct work_struct *work)
drivers/mmc/host/tmio_mmc_core.c
906
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
drivers/mmc/host/usdhi6rol0.c
1666
if (!work_pending(&mmc->detect.work) &&
drivers/mmc/host/usdhi6rol0.c
1681
static void usdhi6_timeout_work(struct work_struct *work)
drivers/mmc/host/usdhi6rol0.c
1683
struct delayed_work *d = to_delayed_work(work);
drivers/mmc/host/via-sdmmc.c
985
static void via_sdc_card_detect(struct work_struct *work)
drivers/mmc/host/via-sdmmc.c
992
host = container_of(work, struct via_crdr_mmc_host, carddet_work);
drivers/mmc/host/vub300.c
1738
static void vub300_cmndwork_thread(struct work_struct *work)
drivers/mmc/host/vub300.c
1741
container_of(work, struct vub300_mmc_host, cmndwork);
drivers/mmc/host/vub300.c
675
static void vub300_pollwork_thread(struct work_struct *work)
drivers/mmc/host/vub300.c
677
struct vub300_mmc_host *vub300 = container_of(work,
drivers/mmc/host/vub300.c
678
struct vub300_mmc_host, pollwork.work);
drivers/mmc/host/vub300.c
706
static void vub300_deadwork_thread(struct work_struct *work)
drivers/mmc/host/vub300.c
709
container_of(work, struct vub300_mmc_host, deadwork);
drivers/mtd/mtdoops.c
166
static void mtdoops_workfunc_erase(struct work_struct *work)
drivers/mtd/mtdoops.c
169
container_of(work, struct mtdoops_context, work_erase);
drivers/mtd/mtdoops.c
237
static void mtdoops_workfunc_write(struct work_struct *work)
drivers/mtd/mtdoops.c
240
container_of(work, struct mtdoops_context, work_write);
drivers/mtd/nand/raw/r852.c
683
static void r852_card_detect_work(struct work_struct *work)
drivers/mtd/nand/raw/r852.c
686
container_of(work, struct r852_device, card_detect_work.work);
drivers/mtd/sm_ftl.c
1001
struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
drivers/mtd/sm_ftl.c
999
static void sm_cache_flush_work(struct work_struct *work)
drivers/net/amt.c
108
static void amt_source_gc_work(struct work_struct *work)
drivers/net/amt.c
1340
static void amt_tunnel_expire(struct work_struct *work)
drivers/net/amt.c
1342
struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
drivers/net/amt.c
2869
static void amt_event_work(struct work_struct *work)
drivers/net/amt.c
2871
struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
drivers/net/amt.c
289
static void amt_source_work(struct work_struct *work)
drivers/net/amt.c
291
struct amt_source_node *snode = container_of(to_delayed_work(work),
drivers/net/amt.c
410
static void amt_group_work(struct work_struct *work)
drivers/net/amt.c
412
struct amt_group_node *gnode = container_of(to_delayed_work(work),
drivers/net/amt.c
917
static void amt_secret_work(struct work_struct *work)
drivers/net/amt.c
919
struct amt_dev *amt = container_of(to_delayed_work(work),
drivers/net/amt.c
942
static void amt_discovery_work(struct work_struct *work)
drivers/net/amt.c
944
struct amt_dev *amt = container_of(to_delayed_work(work),
drivers/net/amt.c
987
static void amt_req_work(struct work_struct *work)
drivers/net/amt.c
989
struct amt_dev *amt = container_of(to_delayed_work(work),
drivers/net/arcnet/arcnet.c
398
static void reset_device_work(struct work_struct *work)
drivers/net/arcnet/arcnet.c
403
lp = container_of(work, struct arcnet_local, reset_work);
drivers/net/bonding/bond_3ad.c
2518
void bond_3ad_state_machine_handler(struct work_struct *work)
drivers/net/bonding/bond_3ad.c
2520
struct bonding *bond = container_of(work, struct bonding,
drivers/net/bonding/bond_3ad.c
2521
ad_work.work);
drivers/net/bonding/bond_alb.c
1532
void bond_alb_monitor(struct work_struct *work)
drivers/net/bonding/bond_alb.c
1534
struct bonding *bond = container_of(work, struct bonding,
drivers/net/bonding/bond_alb.c
1535
alb_work.work);
drivers/net/bonding/bond_main.c
1220
static void bond_peer_notify_handler(struct work_struct *work)
drivers/net/bonding/bond_main.c
1222
struct bonding *bond = container_of(work, struct bonding,
drivers/net/bonding/bond_main.c
1223
peer_notify_work.work);
drivers/net/bonding/bond_main.c
1806
notify_work.work);
drivers/net/bonding/bond_main.c
267
static void bond_slave_arr_handler(struct work_struct *work);
drivers/net/bonding/bond_main.c
270
static void bond_netdev_notify_work(struct work_struct *work);
drivers/net/bonding/bond_main.c
2882
static void bond_mii_monitor(struct work_struct *work)
drivers/net/bonding/bond_main.c
2884
struct bonding *bond = container_of(work, struct bonding,
drivers/net/bonding/bond_main.c
2885
mii_work.work);
drivers/net/bonding/bond_main.c
3881
static void bond_arp_monitor(struct work_struct *work)
drivers/net/bonding/bond_main.c
3883
struct bonding *bond = container_of(work, struct bonding,
drivers/net/bonding/bond_main.c
3884
arp_work.work);
drivers/net/bonding/bond_main.c
5075
static void bond_slave_arr_handler(struct work_struct *work)
drivers/net/bonding/bond_main.c
5077
struct bonding *bond = container_of(work, struct bonding,
drivers/net/bonding/bond_main.c
5078
slave_arr_work.work);
drivers/net/bonding/bond_main.c
892
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
drivers/net/bonding/bond_main.c
894
struct bonding *bond = container_of(work, struct bonding,
drivers/net/bonding/bond_main.c
895
mcast_work.work);
drivers/net/caif/caif_serial.c
283
static void ser_release(struct work_struct *work)
drivers/net/can/can327.c
951
static void can327_ldisc_tx_worker(struct work_struct *work)
drivers/net/can/can327.c
953
struct can327 *elm = container_of(work, struct can327, tx_work);
drivers/net/can/dev/dev.c
232
static void can_restart_work(struct work_struct *work)
drivers/net/can/dev/dev.c
234
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/can/m_can/m_can.c
1986
struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
drivers/net/can/m_can/m_can.c
2001
queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
drivers/net/can/m_can/m_can.c
2121
INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
drivers/net/can/m_can/m_can.h
76
struct work_struct work;
drivers/net/can/rockchip/rockchip_canfd-timestamp.c
29
static void rkcanfd_timestamp_work(struct work_struct *work)
drivers/net/can/rockchip/rockchip_canfd-timestamp.c
31
const struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/can/sja1000/peak_pci.c
287
static void peak_pciec_led_work(struct work_struct *work)
drivers/net/can/sja1000/peak_pci.c
290
container_of(work, struct peak_pciec_card, led_work.work);
drivers/net/can/slcan/slcan-core.c
558
static void slcan_transmit(struct work_struct *work)
drivers/net/can/slcan/slcan-core.c
560
struct slcan *sl = container_of(work, struct slcan, tx_work);
drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
30
static void mcp251xfd_timestamp_work(struct work_struct *work)
drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
32
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
154
void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
156
struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
198
static bool mcp251xfd_work_busy(struct work_struct *work)
drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
200
return work_busy(work);
drivers/net/can/spi/mcp251xfd/mcp251xfd.h
971
void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
drivers/net/can/usb/f81604.c
917
static void f81604_clear_reg_work(struct work_struct *work)
drivers/net/can/usb/f81604.c
922
priv = container_of(work, struct f81604_port_priv, clear_reg_work);
drivers/net/can/usb/gs_usb.c
446
static void gs_usb_timestamp_work(struct work_struct *work)
drivers/net/can/usb/gs_usb.c
448
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
1097
static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
1100
container_of(work, struct kvaser_usb_net_leaf_priv,
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
1101
chip_state_req_work.work);
drivers/net/can/usb/nct6694_canfd.c
433
static void nct6694_canfd_tx_work(struct work_struct *work)
drivers/net/can/usb/nct6694_canfd.c
435
struct nct6694_canfd_priv *priv = container_of(work,
drivers/net/dsa/hirschmann/hellcreek.c
1710
static void hellcreek_check_schedule(struct work_struct *work)
drivers/net/dsa/hirschmann/hellcreek.c
1712
struct delayed_work *dw = to_delayed_work(work);
drivers/net/dsa/hirschmann/hellcreek_ptp.c
233
static void hellcreek_ptp_overflow_check(struct work_struct *work)
drivers/net/dsa/hirschmann/hellcreek_ptp.c
235
struct delayed_work *dw = to_delayed_work(work);
drivers/net/dsa/lantiq/mxl-gsw1xx.c
453
static void gsw1xx_pcs_clear_raneg(struct work_struct *work)
drivers/net/dsa/lantiq/mxl-gsw1xx.c
456
container_of(work, struct gsw1xx_priv, clear_raneg.work);
drivers/net/dsa/microchip/ksz_common.c
3153
static void ksz_mib_read_work(struct work_struct *work)
drivers/net/dsa/microchip/ksz_common.c
3155
struct ksz_device *dev = container_of(work, struct ksz_device,
drivers/net/dsa/microchip/ksz_common.c
3156
mib_read.work);
drivers/net/dsa/microchip/ksz_ptp.c
22
container_of((w), struct ksz_deferred_xmit_work, work)
drivers/net/dsa/microchip/ksz_ptp.c
565
void ksz_port_deferred_xmit(struct kthread_work *work)
drivers/net/dsa/microchip/ksz_ptp.c
567
struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
drivers/net/dsa/microchip/ksz_ptp.h
48
void ksz_port_deferred_xmit(struct kthread_work *work);
drivers/net/dsa/mv88e6xxx/chip.c
375
static void mv88e6xxx_irq_poll(struct kthread_work *work)
drivers/net/dsa/mv88e6xxx/chip.c
377
struct mv88e6xxx_chip *chip = container_of(work,
drivers/net/dsa/mv88e6xxx/chip.c
379
irq_poll_work.work);
drivers/net/dsa/mv88e6xxx/ptp.c
461
static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
drivers/net/dsa/mv88e6xxx/ptp.c
463
struct delayed_work *dw = to_delayed_work(work);
drivers/net/dsa/ocelot/felix.c
1640
container_of((w), struct felix_deferred_xmit_work, work)
drivers/net/dsa/ocelot/felix.c
1642
static void felix_port_deferred_xmit(struct kthread_work *work)
drivers/net/dsa/ocelot/felix.c
1644
struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
drivers/net/dsa/qca/ar9331.c
658
static void ar9331_do_stats_poll(struct work_struct *work)
drivers/net/dsa/qca/ar9331.c
660
struct ar9331_sw_port *port = container_of(work, struct ar9331_sw_port,
drivers/net/dsa/qca/ar9331.c
661
mib_read.work);
drivers/net/dsa/realtek/rtl8365mb.c
1512
static void rtl8365mb_stats_poll(struct work_struct *work)
drivers/net/dsa/realtek/rtl8365mb.c
1514
struct rtl8365mb_port *p = container_of(to_delayed_work(work),
drivers/net/dsa/sja1105/sja1105_main.c
2662
container_of((w), struct sja1105_deferred_xmit_work, work)
drivers/net/dsa/sja1105/sja1105_main.c
2668
static void sja1105_port_deferred_xmit(struct kthread_work *work)
drivers/net/dsa/sja1105/sja1105_main.c
2670
struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
drivers/net/dsa/sja1105/sja1105_tas.c
724
static void sja1105_tas_state_machine(struct work_struct *work)
drivers/net/dsa/sja1105/sja1105_tas.c
726
struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
drivers/net/dsa/xrs700x/xrs700x.c
147
static void xrs700x_mib_work(struct work_struct *work)
drivers/net/dsa/xrs700x/xrs700x.c
149
struct xrs700x *priv = container_of(work, struct xrs700x,
drivers/net/dsa/xrs700x/xrs700x.c
150
mib_work.work);
drivers/net/dsa/yt921x.c
729
static void yt921x_poll_mib(struct work_struct *work)
drivers/net/dsa/yt921x.c
731
struct yt921x_port *pp = container_of_const(work, struct yt921x_port,
drivers/net/dsa/yt921x.c
732
mib_read.work);
drivers/net/ethernet/actions/owl-emac.c
1199
static void owl_emac_reset_task(struct work_struct *work)
drivers/net/ethernet/actions/owl-emac.c
1203
priv = container_of(work, struct owl_emac_priv, mac_reset_task);
drivers/net/ethernet/adi/adin1110.c
1451
static void adin1110_switchdev_event_work(struct work_struct *work)
drivers/net/ethernet/adi/adin1110.c
1457
switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work);
drivers/net/ethernet/adi/adin1110.c
1499
INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work);
drivers/net/ethernet/adi/adin1110.c
1522
queue_work(system_long_wq, &switchdev_work->work);
drivers/net/ethernet/adi/adin1110.c
167
struct work_struct work;
drivers/net/ethernet/adi/adin1110.c
858
static void adin1110_rx_mode_work(struct work_struct *work)
drivers/net/ethernet/adi/adin1110.c
863
port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work);
drivers/net/ethernet/adi/adin1110.c
964
static void adin1110_tx_work(struct work_struct *work)
drivers/net/ethernet/adi/adin1110.c
971
port_priv = container_of(work, struct adin1110_port_priv, tx_work);
drivers/net/ethernet/airoha/airoha_npu.c
293
static void airoha_npu_wdt_work(struct work_struct *work)
drivers/net/ethernet/airoha/airoha_npu.c
301
core = container_of(work, struct airoha_npu_core, wdt_work);
drivers/net/ethernet/alteon/acenic.c
1563
static void ace_bh_work(struct work_struct *work)
drivers/net/ethernet/alteon/acenic.c
1565
struct ace_private *ap = from_work(ap, work, ace_bh_work);
drivers/net/ethernet/alteon/acenic.h
779
static void ace_bh_work(struct work_struct *work);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1339
struct dim *dim = container_of(w, struct dim, work);
drivers/net/ethernet/amazon/ena/ena_netdev.c
2016
INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
drivers/net/ethernet/amazon/ena/ena_netdev.c
2026
cancel_work_sync(&adapter->ena_napi[i].dim.work);
drivers/net/ethernet/amazon/ena/ena_netdev.c
3371
static void ena_fw_reset_device(struct work_struct *work)
drivers/net/ethernet/amazon/ena/ena_netdev.c
3376
container_of(work, struct ena_adapter, reset_task);
drivers/net/ethernet/amazon/ena/ena_netdev.c
762
cancel_work_sync(&adapter->ena_napi[i].dim.work);
drivers/net/ethernet/amd/pds_core/adminq.c
131
void pdsc_work_thread(struct work_struct *work)
drivers/net/ethernet/amd/pds_core/adminq.c
133
struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
drivers/net/ethernet/amd/pds_core/adminq.c
151
queue_work(pdsc->wq, &qcq->work);
drivers/net/ethernet/amd/pds_core/core.c
207
INIT_WORK(&qcq->work, pdsc_work_thread);
drivers/net/ethernet/amd/pds_core/core.c
480
if (pdsc->adminqcq.work.func)
drivers/net/ethernet/amd/pds_core/core.c
481
cancel_work_sync(&pdsc->adminqcq.work);
drivers/net/ethernet/amd/pds_core/core.c
600
void pdsc_pci_reset_thread(struct work_struct *work)
drivers/net/ethernet/amd/pds_core/core.c
602
struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
drivers/net/ethernet/amd/pds_core/core.c
628
void pdsc_health_thread(struct work_struct *work)
drivers/net/ethernet/amd/pds_core/core.c
630
struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
drivers/net/ethernet/amd/pds_core/core.h
126
struct work_struct work;
drivers/net/ethernet/amd/pds_core/core.h
303
void pdsc_health_thread(struct work_struct *work);
drivers/net/ethernet/amd/pds_core/core.h
315
void pdsc_work_thread(struct work_struct *work);
drivers/net/ethernet/amd/pds_core/core.h
323
void pdsc_pci_reset_thread(struct work_struct *work);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1360
static void xgbe_stopdev(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1362
struct xgbe_prv_data *pdata = container_of(work,
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1406
static void xgbe_restart(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1408
struct xgbe_prv_data *pdata = container_of(work,
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
297
static void xgbe_ecc_isr_bh_work(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
299
struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
367
static void xgbe_isr_bh_work(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
369
struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
596
static void xgbe_service(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
598
struct xgbe_prv_data *pdata = container_of(work,
drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
134
void xgbe_tx_tstamp(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
136
struct xgbe_prv_data *pdata = container_of(work,
drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
168
static void xgbe_i2c_isr_bh_work(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
170
struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work);
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
601
static void xgbe_an_isr_bh_work(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
603
struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work);
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
640
static void xgbe_an_irq_work(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
642
struct xgbe_prv_data *pdata = container_of(work,
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
826
static void xgbe_an_state_machine(struct work_struct *work)
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
828
struct xgbe_prv_data *pdata = container_of(work,
drivers/net/ethernet/amd/xgbe/xgbe.h
1312
void xgbe_tx_tstamp(struct work_struct *work);
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
154
void (*link_state)(struct work_struct *work);
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
534
static void xgene_enet_link_state(struct work_struct *work)
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
536
struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
480
static void xgene_enet_link_state(struct work_struct *work)
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
482
struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
drivers/net/ethernet/aquantia/atlantic/aq_main.c
40
void aq_ndev_schedule_work(struct work_struct *work)
drivers/net/ethernet/aquantia/atlantic/aq_main.c
42
queue_work(aq_ndev_wq, work);
drivers/net/ethernet/aquantia/atlantic/aq_main.h
17
void aq_ndev_schedule_work(struct work_struct *work);
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
228
static void aq_nic_service_task(struct work_struct *work)
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
230
struct aq_nic_s *self = container_of(work, struct aq_nic_s,
drivers/net/ethernet/asix/ax88796c_main.c
607
static void ax88796c_work(struct work_struct *work)
drivers/net/ethernet/asix/ax88796c_main.c
610
container_of(work, struct ax88796c_device, ax_work);
drivers/net/ethernet/atheros/ag71xx.c
1589
static void ag71xx_restart_work_func(struct work_struct *work)
drivers/net/ethernet/atheros/ag71xx.c
1591
struct ag71xx *ag = container_of(work, struct ag71xx,
drivers/net/ethernet/atheros/ag71xx.c
1592
restart_work.work);
drivers/net/ethernet/atheros/alx/main.c
1373
static void alx_link_check(struct work_struct *work)
drivers/net/ethernet/atheros/alx/main.c
1377
alx = container_of(work, struct alx_priv, link_check_wk);
drivers/net/ethernet/atheros/alx/main.c
1384
static void alx_reset(struct work_struct *work)
drivers/net/ethernet/atheros/alx/main.c
1386
struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
drivers/net/ethernet/atheros/alx/main.c
223
int work = 0;
drivers/net/ethernet/atheros/alx/main.c
227
while (work < budget) {
drivers/net/ethernet/atheros/alx/main.c
238
return work;
drivers/net/ethernet/atheros/alx/main.c
278
work++;
drivers/net/ethernet/atheros/alx/main.c
293
return work;
drivers/net/ethernet/atheros/alx/main.c
303
int work = 0;
drivers/net/ethernet/atheros/alx/main.c
308
work = alx_clean_rx_irq(np->rxq, budget);
drivers/net/ethernet/atheros/alx/main.c
310
if (!tx_complete || work == budget)
drivers/net/ethernet/atheros/alx/main.c
313
napi_complete_done(&np->napi, work);
drivers/net/ethernet/atheros/alx/main.c
327
return work;
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
331
static void atl1c_common_task(struct work_struct *work)
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
336
adapter = container_of(work, struct atl1c_adapter, common_task);
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
137
static void atl1e_reset_task(struct work_struct *work)
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
140
adapter = container_of(work, struct atl1e_adapter, reset_task);
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
199
static void atl1e_link_chg_task(struct work_struct *work)
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
204
adapter = container_of(work, struct atl1e_adapter, link_chg_task);
drivers/net/ethernet/atheros/atlx/atl1.c
2696
static void atl1_reset_dev_task(struct work_struct *work)
drivers/net/ethernet/atheros/atlx/atl1.c
2699
container_of(work, struct atl1_adapter, reset_dev_task);
drivers/net/ethernet/atheros/atlx/atl2.c
1097
static void atl2_reset_task(struct work_struct *work)
drivers/net/ethernet/atheros/atlx/atl2.c
1100
adapter = container_of(work, struct atl2_adapter, reset_task);
drivers/net/ethernet/atheros/atlx/atl2.c
1241
static void atl2_link_chg_task(struct work_struct *work)
drivers/net/ethernet/atheros/atlx/atl2.c
1246
adapter = container_of(work, struct atl2_adapter, link_chg_task);
drivers/net/ethernet/atheros/atlx/atlx.c
196
static void atlx_link_chg_task(struct work_struct *work)
drivers/net/ethernet/atheros/atlx/atlx.c
201
adapter = container_of(work, struct atlx_adapter, link_chg_task);
drivers/net/ethernet/broadcom/bcmsysport.c
1089
static void bcm_sysport_dim_work(struct work_struct *work)
drivers/net/ethernet/broadcom/bcmsysport.c
1091
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/broadcom/bcmsysport.c
1449
void (*cb)(struct work_struct *work))
drivers/net/ethernet/broadcom/bcmsysport.c
1453
INIT_WORK(&dim->dim.work, cb);
drivers/net/ethernet/broadcom/bcmsysport.c
2066
cancel_work_sync(&priv->dim.dim.work);
drivers/net/ethernet/broadcom/bnx2.c
6429
bnx2_reset_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnx2.c
6431
struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
186
void bnx2x_dcbx_update(struct work_struct *work);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
10236
static void bnx2x_sp_rtnl_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
10238
struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
10366
static void bnx2x_period_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
10368
struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
15099
static void bnx2x_ptp_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
15101
struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5685
static void bnx2x_sp_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5687
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3174
void bnx2x_iov_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3176
struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
547
void bnx2x_iov_task(struct work_struct *work);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
619
static inline void bnx2x_iov_task(struct work_struct *work) {}
drivers/net/ethernet/broadcom/bnxt/bnxt.c
11895
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
14610
static void bnxt_sp_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnxt/bnxt.c
14612
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
15136
static void bnxt_fw_reset_task(struct work_struct *work)
drivers/net/ethernet/broadcom/bnxt/bnxt.c
15138
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
16296
cancel_work_sync(&cpr->dim.work);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
7484
cancel_work_sync(&bnapi->cp_ring.dim.work);
drivers/net/ethernet/broadcom/bnxt/bnxt.h
3001
void bnxt_dim_work(struct work_struct *work);
drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
14
void bnxt_dim_work(struct work_struct *work)
drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
16
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/broadcom/cnic.c
1878
u32 num, int *work)
drivers/net/ethernet/broadcom/cnic.c
1891
*work = num;
drivers/net/ethernet/broadcom/cnic.c
1898
*work = num;
drivers/net/ethernet/broadcom/cnic.c
1901
*work = 2 + req2->num_additional_wqes;
drivers/net/ethernet/broadcom/cnic.c
2124
u32 num, int *work)
drivers/net/ethernet/broadcom/cnic.c
2140
*work = num;
drivers/net/ethernet/broadcom/cnic.c
2145
*work = 3;
drivers/net/ethernet/broadcom/cnic.c
2147
*work = 2;
drivers/net/ethernet/broadcom/cnic.c
2149
if (num < *work) {
drivers/net/ethernet/broadcom/cnic.c
2150
*work = num;
drivers/net/ethernet/broadcom/cnic.c
2185
kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
drivers/net/ethernet/broadcom/cnic.c
2285
u32 num, int *work)
drivers/net/ethernet/broadcom/cnic.c
2298
*work = num;
drivers/net/ethernet/broadcom/cnic.c
2305
*work = 1;
drivers/net/ethernet/broadcom/cnic.c
2309
*work = 2;
drivers/net/ethernet/broadcom/cnic.c
2337
*work = 3;
drivers/net/ethernet/broadcom/cnic.c
2342
u32 num, int *work)
drivers/net/ethernet/broadcom/cnic.c
2361
*work = num;
drivers/net/ethernet/broadcom/cnic.c
2369
*work = 4;
drivers/net/ethernet/broadcom/cnic.c
2661
int i, work, ret;
drivers/net/ethernet/broadcom/cnic.c
2671
work = 1;
drivers/net/ethernet/broadcom/cnic.c
2682
num_wqes - i, &work);
drivers/net/ethernet/broadcom/cnic.c
2692
&work);
drivers/net/ethernet/broadcom/cnic.c
2726
i += work;
drivers/net/ethernet/broadcom/cnic.c
2735
int i, work, ret;
drivers/net/ethernet/broadcom/cnic.c
2748
work = 1;
drivers/net/ethernet/broadcom/cnic.c
2753
num_wqes - i, &work);
drivers/net/ethernet/broadcom/cnic.c
2757
num_wqes - i, &work);
drivers/net/ethernet/broadcom/cnic.c
2791
i += work;
drivers/net/ethernet/broadcom/cnic.c
3017
static void cnic_service_bnx2_msix(struct work_struct *work)
drivers/net/ethernet/broadcom/cnic.c
3019
struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
drivers/net/ethernet/broadcom/cnic.c
3142
static void cnic_service_bnx2x_bh_work(struct work_struct *work)
drivers/net/ethernet/broadcom/cnic.c
3144
struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
drivers/net/ethernet/broadcom/cnic.c
4269
static void cnic_delete_task(struct work_struct *work)
drivers/net/ethernet/broadcom/cnic.c
4276
cp = container_of(work, struct cnic_local, delete_task.work);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2484
static void bcmgenet_dim_work(struct work_struct *work)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2486
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2670
void (*cb)(struct work_struct *work))
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2674
INIT_WORK(&dim->dim.work, cb);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2949
cancel_work_sync(&ring->dim.dim.work);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3136
static void bcmgenet_irq_task(struct work_struct *work)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3140
work, struct bcmgenet_priv, bcmgenet_irq_work);
drivers/net/ethernet/broadcom/tg3.c
11263
static void tg3_reset_task(struct work_struct *work)
drivers/net/ethernet/broadcom/tg3.c
11265
struct tg3 *tp = container_of(work, struct tg3, reset_task);
drivers/net/ethernet/brocade/bna/bnad.c
1095
bnad_tx_cleanup(struct work_struct *work)
drivers/net/ethernet/brocade/bna/bnad.c
1098
container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
drivers/net/ethernet/brocade/bna/bnad.c
1174
bnad_rx_cleanup(struct work_struct *work)
drivers/net/ethernet/brocade/bna/bnad.c
1177
container_of(work, struct bnad_rx_info, rx_cleanup_work);
drivers/net/ethernet/cadence/macb_main.c
1112
static void macb_tx_error_task(struct work_struct *work)
drivers/net/ethernet/cadence/macb_main.c
1114
struct macb_queue *queue = container_of(work, struct macb_queue,
drivers/net/ethernet/cadence/macb_main.c
1846
static void macb_hresp_error_task(struct work_struct *work)
drivers/net/ethernet/cadence/macb_main.c
1848
struct macb *bp = from_work(bp, work, hresp_err_bh_work);
drivers/net/ethernet/calxeda/xgmac.c
902
static void xgmac_tx_timeout_work(struct work_struct *work)
drivers/net/ethernet/calxeda/xgmac.c
906
container_of(work, struct xgmac_priv, tx_timeout_work);
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
509
static void cn23xx_pf_mbox_thread(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
511
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
534
schedule_delayed_work(&wk->work, msecs_to_jiffies(10));
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
578
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
588
schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
612
&oct->mbox[q_no]->mbox_poll_wk.work);
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
815
struct delayed_work *work;
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
828
work = &oct->mbox[q_no]->mbox_poll_wk.work;
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
829
schedule_delayed_work(work,
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
263
static void cn23xx_vf_mbox_thread(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
265
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
273
cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work);
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
304
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
482
schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
drivers/net/ethernet/cavium/liquidio/lio_core.c
1456
void lio_fetch_stats(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_core.c
1458
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_core.c
1519
schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
drivers/net/ethernet/cavium/liquidio/lio_core.c
443
queue_delayed_work(wq->wq, &wq->wk.work,
drivers/net/ethernet/cavium/liquidio/lio_core.c
447
static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_core.c
449
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_core.c
479
INIT_DELAYED_WORK(&wq->wk.work,
drivers/net/ethernet/cavium/liquidio/lio_core.c
499
cancel_delayed_work_sync(&wq->wk.work);
drivers/net/ethernet/cavium/liquidio/lio_main.c
1104
cancel_delayed_work_sync(&oct->nic_poll_work.work);
drivers/net/ethernet/cavium/liquidio/lio_main.c
1716
static void octnet_poll_check_txq_status(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_main.c
1718
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_main.c
1726
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
drivers/net/ethernet/cavium/liquidio/lio_main.c
1744
INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
drivers/net/ethernet/cavium/liquidio/lio_main.c
1748
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
drivers/net/ethernet/cavium/liquidio/lio_main.c
1757
cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
drivers/net/ethernet/cavium/liquidio/lio_main.c
1810
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
drivers/net/ethernet/cavium/liquidio/lio_main.c
1812
schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
drivers/net/ethernet/cavium/liquidio/lio_main.c
1880
cancel_delayed_work_sync(&lio->stats_wk.work);
drivers/net/ethernet/cavium/liquidio/lio_main.c
3942
static void nic_starter(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_main.c
3945
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_main.c
3958
schedule_delayed_work(&oct->nic_poll_work.work,
drivers/net/ethernet/cavium/liquidio/lio_main.c
4120
INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
drivers/net/ethernet/cavium/liquidio/lio_main.c
4122
schedule_delayed_work(&octeon_dev->nic_poll_work.work,
drivers/net/ethernet/cavium/liquidio/lio_main.c
505
static void octnet_link_status_change(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_main.c
507
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_main.c
535
INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
drivers/net/ethernet/cavium/liquidio/lio_main.c
547
cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
drivers/net/ethernet/cavium/liquidio/lio_main.c
595
&lio->link_status_wq.wk.work, 0);
drivers/net/ethernet/cavium/liquidio/lio_main.c
606
static void lio_sync_octeon_time(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_main.c
608
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_main.c
648
&lio->sync_octeon_time_wq.wk.work,
drivers/net/ethernet/cavium/liquidio/lio_main.c
669
INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
drivers/net/ethernet/cavium/liquidio/lio_main.c
673
&lio->sync_octeon_time_wq.wk.work,
drivers/net/ethernet/cavium/liquidio/lio_main.c
693
cancel_delayed_work_sync(&time_wq->wk.work);
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
283
static void octnet_link_status_change(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
285
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
313
INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
325
cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
371
&lio->link_status_wq.wk.work, 0);
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
570
cancel_delayed_work_sync(&oct->nic_poll_work.work);
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
933
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
935
schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
993
cancel_delayed_work_sync(&lio->stats_wk.work);
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
461
lio_vf_rep_fetch_stats(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
463
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
484
schedule_delayed_work(&vf_rep->stats_wk.work,
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
536
INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
539
schedule_delayed_work(&vf_rep->stats_wk.work,
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
563
(&vf_rep->stats_wk.work);
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
593
(&vf_rep->stats_wk.work);
drivers/net/ethernet/cavium/liquidio/octeon_console.c
478
static void check_console(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/octeon_console.c
483
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/octeon_console.c
530
schedule_delayed_work(&wk->work, msecs_to_jiffies(delay));
drivers/net/ethernet/cavium/liquidio/octeon_console.c
658
struct delayed_work *work;
drivers/net/ethernet/cavium/liquidio/octeon_console.c
687
work = &oct->console_poll_work[console_num].work;
drivers/net/ethernet/cavium/liquidio/octeon_console.c
691
INIT_DELAYED_WORK(work, check_console);
drivers/net/ethernet/cavium/liquidio/octeon_console.c
695
schedule_delayed_work(work, msecs_to_jiffies(delay));
drivers/net/ethernet/cavium/liquidio/octeon_console.c
726
work);
drivers/net/ethernet/cavium/liquidio/octeon_device.h
304
struct delayed_work work;
drivers/net/ethernet/cavium/liquidio/octeon_network.h
227
void lio_fetch_stats(struct work_struct *work);
drivers/net/ethernet/cavium/liquidio/request_manager.c
148
INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
drivers/net/ethernet/cavium/liquidio/request_manager.c
151
queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
drivers/net/ethernet/cavium/liquidio/request_manager.c
161
cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
drivers/net/ethernet/cavium/liquidio/request_manager.c
38
static void check_db_timeout(struct work_struct *work);
drivers/net/ethernet/cavium/liquidio/request_manager.c
433
queue_work(cwq->wq, &cwq->wk.work.work);
drivers/net/ethernet/cavium/liquidio/request_manager.c
523
static void check_db_timeout(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/request_manager.c
525
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/request_manager.c
532
queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
drivers/net/ethernet/cavium/liquidio/response_manager.c
227
static void oct_poll_req_completion(struct work_struct *work)
drivers/net/ethernet/cavium/liquidio/response_manager.c
229
struct cavium_wk *wk = (struct cavium_wk *)work;
drivers/net/ethernet/cavium/liquidio/response_manager.c
237
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
drivers/net/ethernet/cavium/liquidio/response_manager.c
28
static void oct_poll_req_completion(struct work_struct *work);
drivers/net/ethernet/cavium/liquidio/response_manager.c
50
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
drivers/net/ethernet/cavium/liquidio/response_manager.c
60
cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
drivers/net/ethernet/cavium/thunder/nic.h
271
struct work_struct work;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1441
link_change_work.work);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1742
static void nicvf_reset_task(struct work_struct *work)
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1746
nic = container_of(work, struct nicvf, reset_task);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
2019
work);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
2073
queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
2239
INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
451
void nicvf_rbdr_work(struct work_struct *work)
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
453
struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
352
void nicvf_rbdr_work(struct work_struct *work);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
1001
lmac = container_of(work, struct lmac, dwork.work);
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
996
static void bgx_poll_for_link(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
906
static void mac_stats_task(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
910
container_of(work, struct adapter, stats_update_task.work);
drivers/net/ethernet/chelsio/cxgb/my3126.c
97
static void my3126_poll(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb/my3126.c
99
struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2716
static void t3_adap_check_task(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2718
struct adapter *adapter = container_of(work, struct adapter,
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2719
adap_check_task.work);
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2794
static void db_full_task(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2796
struct adapter *adapter = container_of(work, struct adapter,
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2802
static void db_empty_task(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2804
struct adapter *adapter = container_of(work, struct adapter,
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2810
static void db_drop_task(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2812
struct adapter *adapter = container_of(work, struct adapter,
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2833
static void ext_intr_task(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2835
struct adapter *adapter = container_of(work, struct adapter,
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2974
static void fatal_error_task(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2976
struct adapter *adapter = container_of(work, struct adapter,
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
543
static void t3_process_tid_release_list(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
545
struct t3c_data *td = container_of(work, struct t3c_data,
drivers/net/ethernet/chelsio/cxgb3/sge.c
2703
int work;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2705
work = process_responses(adap, rspq_to_qset(rq), -1);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2708
return work;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1763
static void process_tid_release_list(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1768
adap = container_of(work, struct adapter, tid_release_task);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2492
static void process_db_full(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2496
adap = container_of(work, struct adapter, db_full_task);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2565
static void process_db_drop(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2569
adap = container_of(work, struct adapter, db_drop_task);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3908
static void notify_fatal_err(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3912
adap = container_of(work, struct adapter, fatal_err_notify_task);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1020
static void ch_flower_stats_handler(struct work_struct *work)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1022
struct adapter *adap = container_of(work, struct adapter,
drivers/net/ethernet/cisco/enic/enic_main.c
1875
static void enic_change_mtu_work(struct work_struct *work)
drivers/net/ethernet/cisco/enic/enic_main.c
1877
struct enic *enic = container_of(work, struct enic, change_mtu_work);
drivers/net/ethernet/cisco/enic/enic_main.c
2126
static void enic_reset(struct work_struct *work)
drivers/net/ethernet/cisco/enic/enic_main.c
2128
struct enic *enic = container_of(work, struct enic, reset);
drivers/net/ethernet/cisco/enic/enic_main.c
2155
static void enic_tx_hang_reset(struct work_struct *work)
drivers/net/ethernet/cisco/enic/enic_main.c
2157
struct enic *enic = container_of(work, struct enic, tx_hang_reset);
drivers/net/ethernet/davicom/dm9051.c
905
static void dm9051_tx_delay(struct work_struct *work)
drivers/net/ethernet/davicom/dm9051.c
907
struct board_info *db = container_of(work, struct board_info, tx_work);
drivers/net/ethernet/davicom/dm9051.c
919
static void dm9051_rxctl_delay(struct work_struct *work)
drivers/net/ethernet/davicom/dm9051.c
921
struct board_info *db = container_of(work, struct board_info, rxctrl_work);
drivers/net/ethernet/dec/tulip/21142.c
28
void t21142_media_task(struct work_struct *work)
drivers/net/ethernet/dec/tulip/21142.c
31
container_of(work, struct tulip_private, media_work);
drivers/net/ethernet/dec/tulip/timer.c
17
void tulip_media_task(struct work_struct *work)
drivers/net/ethernet/dec/tulip/timer.c
20
container_of(work, struct tulip_private, media_work);
drivers/net/ethernet/dec/tulip/tulip.h
472
void t21142_media_task(struct work_struct *work);
drivers/net/ethernet/dec/tulip/tulip.h
509
void tulip_media_task(struct work_struct *work);
drivers/net/ethernet/emulex/benet/be.h
596
struct delayed_work work;
drivers/net/ethernet/emulex/benet/be.h
677
struct work_struct work;
drivers/net/ethernet/emulex/benet/be_main.c
1847
static void be_work_set_rx_mode(struct work_struct *work)
drivers/net/ethernet/emulex/benet/be_main.c
1850
container_of(work, struct be_cmd_work, work);
drivers/net/ethernet/emulex/benet/be_main.c
3301
int max_work = 0, work, i, num_evts;
drivers/net/ethernet/emulex/benet/be_main.c
3316
work = be_process_rx(rxo, napi, budget);
drivers/net/ethernet/emulex/benet/be_main.c
3317
max_work = max(work, max_work);
drivers/net/ethernet/emulex/benet/be_main.c
3956
cancel_delayed_work_sync(&adapter->work);
drivers/net/ethernet/emulex/benet/be_main.c
4617
queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
drivers/net/ethernet/emulex/benet/be_main.c
5046
struct be_cmd_work *work;
drivers/net/ethernet/emulex/benet/be_main.c
5048
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/emulex/benet/be_main.c
5049
if (!work) {
drivers/net/ethernet/emulex/benet/be_main.c
5055
INIT_WORK(&work->work, func);
drivers/net/ethernet/emulex/benet/be_main.c
5056
work->adapter = adapter;
drivers/net/ethernet/emulex/benet/be_main.c
5057
return work;
drivers/net/ethernet/emulex/benet/be_main.c
5146
struct be_cmd_work *work;
drivers/net/ethernet/emulex/benet/be_main.c
5148
work = be_alloc_work(adapter, be_work_set_rx_mode);
drivers/net/ethernet/emulex/benet/be_main.c
5149
if (work)
drivers/net/ethernet/emulex/benet/be_main.c
5150
queue_work(be_wq, &work->work);
drivers/net/ethernet/emulex/benet/be_main.c
5411
static void be_err_detection_task(struct work_struct *work)
drivers/net/ethernet/emulex/benet/be_main.c
5414
container_of(work, struct be_error_recovery,
drivers/net/ethernet/emulex/benet/be_main.c
5415
err_detection_work.work);
drivers/net/ethernet/emulex/benet/be_main.c
5483
static void be_worker(struct work_struct *work)
drivers/net/ethernet/emulex/benet/be_main.c
5486
container_of(work, struct be_adapter, work.work);
drivers/net/ethernet/emulex/benet/be_main.c
5529
queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
drivers/net/ethernet/emulex/benet/be_main.c
5676
INIT_DELAYED_WORK(&adapter->work, be_worker);
drivers/net/ethernet/emulex/benet/be_main.c
5932
cancel_delayed_work_sync(&adapter->work);
drivers/net/ethernet/faraday/ftgmac100.c
1428
static void ftgmac100_reset_task(struct work_struct *work)
drivers/net/ethernet/faraday/ftgmac100.c
1430
struct ftgmac100 *priv = container_of(work, struct ftgmac100,
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1520
static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
1522
struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2273
struct work_struct work;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2279
static void dpaa2_switch_event_work(struct work_struct *work)
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2282
container_of(work, struct ethsw_switchdev_event_work, work);
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2342
INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2366
queue_work(ethsw->workqueue, &switchdev_work->work);
drivers/net/ethernet/freescale/enetc/enetc.c
1100
struct dim *dim = container_of(w, struct dim, work);
drivers/net/ethernet/freescale/enetc/enetc.c
2919
static void enetc_tx_onestep_tstamp(struct work_struct *work)
drivers/net/ethernet/freescale/enetc/enetc.c
2924
priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
drivers/net/ethernet/freescale/enetc/enetc.c
3498
INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
drivers/net/ethernet/freescale/enetc/enetc.c
3535
cancel_work_sync(&v->rx_dim.work);
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
497
static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
499
struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
drivers/net/ethernet/freescale/enetc/enetc_msg.c
31
static void enetc_msg_task(struct work_struct *work)
drivers/net/ethernet/freescale/enetc/enetc_msg.c
33
struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task);
drivers/net/ethernet/freescale/fec_main.c
1449
static void fec_enet_timeout_work(struct work_struct *work)
drivers/net/ethernet/freescale/fec_main.c
1452
container_of(work, struct fec_enet_private, tx_timeout_work);
drivers/net/ethernet/freescale/fec_ptp.c
678
static void fec_time_keep(struct work_struct *work)
drivers/net/ethernet/freescale/fec_ptp.c
680
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
583
static void fs_timeout_work(struct work_struct *work)
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
585
struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
drivers/net/ethernet/freescale/gianfar.c
2035
static void gfar_reset_task(struct work_struct *work)
drivers/net/ethernet/freescale/gianfar.c
2037
struct gfar_private *priv = container_of(work, struct gfar_private,
drivers/net/ethernet/freescale/ucc_geth.c
3238
static void ucc_geth_timeout_work(struct work_struct *work)
drivers/net/ethernet/freescale/ucc_geth.c
3243
ugeth = container_of(work, struct ucc_geth_private, timeout_work);
drivers/net/ethernet/fungible/funcore/fun_dev.c
672
static void fun_serv_handler(struct work_struct *work)
drivers/net/ethernet/fungible/funcore/fun_dev.c
674
struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
drivers/net/ethernet/google/gve/gve_main.c
2324
static void gve_service_task(struct work_struct *work)
drivers/net/ethernet/google/gve/gve_main.c
2326
struct gve_priv *priv = container_of(work, struct gve_priv,
drivers/net/ethernet/google/gve/gve_main.c
251
static void gve_stats_report_task(struct work_struct *work)
drivers/net/ethernet/google/gve/gve_main.c
253
struct gve_priv *priv = container_of(work, struct gve_priv,
drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
284
static void hbg_service_task(struct work_struct *work)
drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
286
struct hbg_priv *priv = container_of(work, struct hbg_priv,
drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
287
service_task.work);
drivers/net/ethernet/hisilicon/hip04_eth.c
789
static void hip04_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/hisilicon/hip04_eth.c
793
priv = container_of(work, struct hip04_priv, tx_timeout_task);
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
888
static void hix5hd2_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
892
priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
drivers/net/ethernet/hisilicon/hns/hns_enet.c
2054
static void hns_nic_service_task(struct work_struct *work)
drivers/net/ethernet/hisilicon/hns/hns_enet.c
2057
= container_of(work, struct hns_nic_priv, service_task);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4674
static void hns3_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4676
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4694
static void hns3_tx_dim_work(struct work_struct *work)
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4696
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4716
INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4717
INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
488
cancel_work_sync(&tqp_vector->rx_group.dim.work);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
489
cancel_work_sync(&tqp_vector->tx_group.dim.work);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
11502
if (hdev->service_task.work.func)
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
4721
static void hclge_service_task(struct work_struct *work)
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
4724
container_of(work, struct hclge_dev, service_task.work);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1984
static void hclgevf_service_task(struct work_struct *work)
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1986
struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
1987
service_task.work);
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2348
if (hdev->service_task.work.func)
drivers/net/ethernet/huawei/hinic/hinic_dev.h
38
struct work_struct work;
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
361
static void eq_irq_work(struct work_struct *work)
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
363
struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
399
queue_work(aeqs->workq, &aeq_work->work);
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
783
INIT_WORK(&aeq_work->work, eq_irq_work);
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
831
cancel_work_sync(&aeq_work->work);
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
85
#define work_to_aeq_work(work) \
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
86
container_of(work, struct hinic_eq_work, work)
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
993
q_id, ci, pi, work_busy(&eq->aeq_work.work),
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
169
struct work_struct work;
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
409
static void recv_func_mbox_work_handler(struct work_struct *work)
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
412
container_of(work, struct hinic_mbox_work, work);
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
499
INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
500
queue_work(func_to_func->workq, &mbox_work->work);
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
551
static void update_random_id_work_handler(struct work_struct *work)
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
554
container_of(work, struct hinic_mbox_work, work);
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
613
INIT_WORK(&mbox_work->work, update_random_id_work_handler);
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
614
queue_work(func_to_func->workq, &mbox_work->work);
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
118
struct work_struct work;
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
387
static void recv_mgmt_msg_work_handler(struct work_struct *work)
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
390
container_of(work, struct hinic_mgmt_msg_handle_work, work);
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
464
INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
465
queue_work(pf_to_mgmt->workq, &mgmt_work->work);
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
148
struct work_struct work;
drivers/net/ethernet/huawei/hinic/hinic_main.c
1272
INIT_WORK(&rx_mode_work->work, set_rx_mode);
drivers/net/ethernet/huawei/hinic/hinic_main.c
1337
cancel_work_sync(&rx_mode_work->work);
drivers/net/ethernet/huawei/hinic/hinic_main.c
1446
cancel_work_sync(&rx_mode_work->work);
drivers/net/ethernet/huawei/hinic/hinic_main.c
65
#define work_to_rx_mode_work(work) \
drivers/net/ethernet/huawei/hinic/hinic_main.c
66
container_of(work, struct hinic_rx_mode_work, work)
drivers/net/ethernet/huawei/hinic/hinic_main.c
780
static void set_rx_mode(struct work_struct *work)
drivers/net/ethernet/huawei/hinic/hinic_main.c
782
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
drivers/net/ethernet/huawei/hinic/hinic_main.c
812
queue_work(nic_dev->workq, &rx_mode_work->work);
drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
322
static void aeq_irq_work(struct work_struct *work)
drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
324
struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work);
drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
384
void hinic3_set_rx_mode_work(struct work_struct *work)
drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
390
nic_dev = container_of(work, struct hinic3_nic_dev, rx_mode_work);
drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
482
static void hinic3_auto_sync_time_work(struct work_struct *work)
drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
484
struct delayed_work *delay = to_delayed_work(work);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
181
static void hinic3_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
187
dim = container_of(work, struct dim, work);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
231
INIT_WORK(&irq_cfg->rxq->dim.work, hinic3_rx_dim_work);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
264
disable_work_sync(&irq_cfg->rxq->dim.work);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
289
disable_work_sync(&irq_cfg->rxq->dim.work);
drivers/net/ethernet/huawei/hinic3/hinic3_main.c
119
static void hinic3_periodic_work_handler(struct work_struct *work)
drivers/net/ethernet/huawei/hinic3/hinic3_main.c
121
struct delayed_work *delay = to_delayed_work(work);
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
145
INIT_WORK(&mgmt_work->work, hinic3_recv_mgmt_msg_work_handler);
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
146
queue_work(pf_to_mgmt->workq, &mgmt_work->work);
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
48
static void hinic3_recv_mgmt_msg_work_handler(struct work_struct *work)
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
54
mgmt_work = container_of(work, struct mgmt_msg_handle_work, work);
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
52
struct work_struct work;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
148
void hinic3_set_rx_mode_work(struct work_struct *work);
drivers/net/ethernet/ibm/ehea/ehea_main.c
2682
static void ehea_reset_port(struct work_struct *work)
drivers/net/ethernet/ibm/ehea/ehea_main.c
2686
container_of(work, struct ehea_port, reset_task);
drivers/net/ethernet/ibm/ehea/ehea_main.c
347
static void ehea_update_stats(struct work_struct *work)
drivers/net/ethernet/ibm/ehea/ehea_main.c
350
container_of(work, struct ehea_port, stats_work.work);
drivers/net/ethernet/ibm/emac/core.c
1313
static void emac_link_timer(struct work_struct *work)
drivers/net/ethernet/ibm/emac/core.c
1316
container_of(to_delayed_work(work),
drivers/net/ethernet/ibm/emac/core.c
757
static void emac_reset_work(struct work_struct *work)
drivers/net/ethernet/ibm/emac/core.c
759
struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
drivers/net/ethernet/ibm/ibmveth.c
1556
schedule_work(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
1821
INIT_WORK(&adapter->work, ibmveth_reset);
drivers/net/ethernet/ibm/ibmveth.c
1927
cancel_work_sync(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
2190
INIT_WORK(&adapter->work, ibmveth_reset_kunit);
drivers/net/ethernet/ibm/ibmveth.c
2215
flush_work(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
2237
INIT_WORK(&adapter->work, ibmveth_reset_kunit);
drivers/net/ethernet/ibm/ibmveth.c
2265
flush_work(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
243
schedule_work(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
467
schedule_work(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
473
schedule_work(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
516
schedule_work(&adapter->work);
drivers/net/ethernet/ibm/ibmveth.c
837
struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
drivers/net/ethernet/ibm/ibmveth.h
160
struct work_struct work;
drivers/net/ethernet/ibm/ibmvnic.c
3203
static void __ibmvnic_reset(struct work_struct *work)
drivers/net/ethernet/ibm/ibmvnic.c
3217
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
drivers/net/ethernet/ibm/ibmvnic.c
3389
static void __ibmvnic_delayed_reset(struct work_struct *work)
drivers/net/ethernet/ibm/ibmvnic.c
3393
adapter = container_of(work, struct ibmvnic_adapter,
drivers/net/ethernet/ibm/ibmvnic.c
3394
ibmvnic_delayed_reset.work);
drivers/net/ethernet/intel/e100.c
2326
static void e100_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/intel/e100.c
2328
struct nic *nic = container_of(work, struct nic, tx_timeout_task);
drivers/net/ethernet/intel/e1000/e1000_main.c
102
static void e1000_update_phy_info_task(struct work_struct *work);
drivers/net/ethernet/intel/e1000/e1000_main.c
103
static void e1000_watchdog(struct work_struct *work);
drivers/net/ethernet/intel/e1000/e1000_main.c
104
static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
drivers/net/ethernet/intel/e1000/e1000_main.c
136
static void e1000_reset_task(struct work_struct *work);
drivers/net/ethernet/intel/e1000/e1000_main.c
2342
static void e1000_update_phy_info_task(struct work_struct *work)
drivers/net/ethernet/intel/e1000/e1000_main.c
2344
struct e1000_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/e1000/e1000_main.c
2346
phy_info_task.work);
drivers/net/ethernet/intel/e1000/e1000_main.c
2355
static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
drivers/net/ethernet/intel/e1000/e1000_main.c
2357
struct e1000_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/e1000/e1000_main.c
2359
fifo_stall_task.work);
drivers/net/ethernet/intel/e1000/e1000_main.c
2427
static void e1000_watchdog(struct work_struct *work)
drivers/net/ethernet/intel/e1000/e1000_main.c
2429
struct e1000_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/e1000/e1000_main.c
2431
watchdog_task.work);
drivers/net/ethernet/intel/e1000/e1000_main.c
3503
static void e1000_reset_task(struct work_struct *work)
drivers/net/ethernet/intel/e1000/e1000_main.c
3506
container_of(work, struct e1000_adapter, reset_task);
drivers/net/ethernet/intel/e1000e/netdev.c
1088
static void e1000_print_hw_hang(struct work_struct *work)
drivers/net/ethernet/intel/e1000e/netdev.c
1090
struct e1000_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/e1000e/netdev.c
1173
static void e1000e_tx_hwtstamp_work(struct work_struct *work)
drivers/net/ethernet/intel/e1000e/netdev.c
1175
struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
drivers/net/ethernet/intel/e1000e/netdev.c
1734
static void e1000e_downshift_workaround(struct work_struct *work)
drivers/net/ethernet/intel/e1000e/netdev.c
1736
struct e1000_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/e1000e/netdev.c
4836
static void e1000e_update_phy_task(struct work_struct *work)
drivers/net/ethernet/intel/e1000e/netdev.c
4838
struct e1000_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/e1000e/netdev.c
5208
static void e1000_watchdog_task(struct work_struct *work)
drivers/net/ethernet/intel/e1000e/netdev.c
5210
struct e1000_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/e1000e/netdev.c
5967
static void e1000_reset_task(struct work_struct *work)
drivers/net/ethernet/intel/e1000e/netdev.c
5970
adapter = container_of(work, struct e1000_adapter, reset_task);
drivers/net/ethernet/intel/e1000e/ptp.c
226
static void e1000e_systim_overflow_work(struct work_struct *work)
drivers/net/ethernet/intel/e1000e/ptp.c
228
struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
drivers/net/ethernet/intel/e1000e/ptp.c
229
systim_overflow_work.work);
drivers/net/ethernet/intel/fm10k/fm10k_main.c
1446
int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
drivers/net/ethernet/intel/fm10k/fm10k_main.c
1448
work_done += work;
drivers/net/ethernet/intel/fm10k/fm10k_main.c
1449
if (work >= per_ring_budget)
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
733
static void fm10k_service_task(struct work_struct *work)
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
737
interface = container_of(work, struct fm10k_intfc, service_task);
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
765
static void fm10k_macvlan_task(struct work_struct *work)
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
774
dwork = to_delayed_work(work);
drivers/net/ethernet/intel/i40e/i40e_main.c
11332
static void i40e_service_task(struct work_struct *work)
drivers/net/ethernet/intel/i40e/i40e_main.c
11334
struct i40e_pf *pf = container_of(work,
drivers/net/ethernet/intel/i40e/i40e_ptp.c
150
static void i40e_ptp_extts0_work(struct work_struct *work)
drivers/net/ethernet/intel/i40e/i40e_ptp.c
152
struct i40e_pf *pf = container_of(work, struct i40e_pf,
drivers/net/ethernet/intel/iavf/iavf_main.c
1967
static void iavf_finish_config(struct work_struct *work)
drivers/net/ethernet/intel/iavf/iavf_main.c
1973
adapter = container_of(work, struct iavf_adapter, finish_config);
drivers/net/ethernet/intel/iavf/iavf_main.c
2981
static void iavf_watchdog_task(struct work_struct *work)
drivers/net/ethernet/intel/iavf/iavf_main.c
2983
struct iavf_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/iavf/iavf_main.c
2985
watchdog_task.work);
drivers/net/ethernet/intel/iavf/iavf_main.c
3323
static void iavf_reset_task(struct work_struct *work)
drivers/net/ethernet/intel/iavf/iavf_main.c
3325
struct iavf_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/iavf/iavf_main.c
3339
static void iavf_adminq_task(struct work_struct *work)
drivers/net/ethernet/intel/iavf/iavf_main.c
3342
container_of(work, struct iavf_adapter, adminq_task);
drivers/net/ethernet/intel/ice/ice_dpll.c
2826
static void ice_dpll_periodic_work(struct kthread_work *work)
drivers/net/ethernet/intel/ice/ice_dpll.c
2828
struct ice_dplls *d = container_of(work, struct ice_dplls, work.work);
drivers/net/ethernet/intel/ice/ice_dpll.c
2865
kthread_queue_delayed_work(d->kworker, &d->work,
drivers/net/ethernet/intel/ice/ice_dpll.c
3202
static void ice_dpll_pin_notify_work(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_dpll.c
3204
struct ice_dpll_pin_work *w = container_of(work,
drivers/net/ethernet/intel/ice/ice_dpll.c
3206
work);
drivers/net/ethernet/intel/ice/ice_dpll.c
3270
struct ice_dpll_pin_work *work;
drivers/net/ethernet/intel/ice/ice_dpll.c
3279
work = kzalloc_obj(*work);
drivers/net/ethernet/intel/ice/ice_dpll.c
3280
if (!work)
drivers/net/ethernet/intel/ice/ice_dpll.c
3283
INIT_WORK(&work->work, ice_dpll_pin_notify_work);
drivers/net/ethernet/intel/ice/ice_dpll.c
3284
work->action = action;
drivers/net/ethernet/intel/ice/ice_dpll.c
3285
work->pin = pin;
drivers/net/ethernet/intel/ice/ice_dpll.c
3287
queue_work(pin->pf->dplls.wq, &work->work);
drivers/net/ethernet/intel/ice/ice_dpll.c
3758
kthread_cancel_delayed_work_sync(&d->work);
drivers/net/ethernet/intel/ice/ice_dpll.c
3778
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
drivers/net/ethernet/intel/ice/ice_dpll.c
3785
kthread_queue_delayed_work(d->kworker, &d->work, 0);
drivers/net/ethernet/intel/ice/ice_dpll.h
128
struct kthread_delayed_work work;
drivers/net/ethernet/intel/ice/ice_dpll.h
24
struct work_struct work;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
1276
static void ice_eswitch_br_update_work(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
1280
br_offloads = ice_work_to_br_offloads(work);
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
475
ice_eswitch_br_fdb_event_work(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
477
struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
513
struct ice_esw_br_fdb_work *work;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
516
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
517
if (!work)
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
520
INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
521
memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
525
kfree(work);
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
530
work->fdb_info.addr = mac;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
531
work->event = event;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
532
work->dev = dev;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
534
return work;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
545
struct ice_esw_br_fdb_work *work;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
570
work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
571
if (IS_ERR(work)) {
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
573
return notifier_from_errno(PTR_ERR(work));
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
577
queue_work(br_offloads->wq, &work->work);
drivers/net/ethernet/intel/ice/ice_eswitch_br.h
100
update_work.work)
drivers/net/ethernet/intel/ice/ice_eswitch_br.h
105
work)
drivers/net/ethernet/intel/ice/ice_eswitch_br.h
81
struct work_struct work;
drivers/net/ethernet/intel/ice/ice_gnss.c
84
static void ice_gnss_read(struct kthread_work *work)
drivers/net/ethernet/intel/ice/ice_gnss.c
86
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
drivers/net/ethernet/intel/ice/ice_gnss.c
87
read_work.work);
drivers/net/ethernet/intel/ice/ice_lag.c
2209
static void ice_lag_process_event(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_lag.c
2217
lag_work = container_of(work, struct ice_lag_work, lag_task);
drivers/net/ethernet/intel/ice/ice_main.c
2349
static void ice_service_task_recovery_mode(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_main.c
2351
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
drivers/net/ethernet/intel/ice/ice_main.c
2365
static void ice_service_task(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_main.c
2367
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
drivers/net/ethernet/intel/ice/ice_main.c
6668
static void ice_tx_dim_work(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_main.c
6674
dim = container_of(work, struct dim, work);
drivers/net/ethernet/intel/ice/ice_main.c
6688
static void ice_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/intel/ice/ice_main.c
6694
dim = container_of(work, struct dim, work);
drivers/net/ethernet/intel/ice/ice_main.c
6726
INIT_WORK(&rc->dim.work, ice_tx_dim_work);
drivers/net/ethernet/intel/ice/ice_main.c
6737
INIT_WORK(&rc->dim.work, ice_rx_dim_work);
drivers/net/ethernet/intel/ice/ice_main.c
7268
cancel_work_sync(&q_vector->tx.dim.work);
drivers/net/ethernet/intel/ice/ice_main.c
7269
cancel_work_sync(&q_vector->rx.dim.work);
drivers/net/ethernet/intel/ice/ice_ptp.c
1137
static void ice_ptp_wait_for_offsets(struct kthread_work *work)
drivers/net/ethernet/intel/ice/ice_ptp.c
1145
port = container_of(work, struct ice_ptp_port, ov_work.work);
drivers/net/ethernet/intel/ice/ice_ptp.c
2856
static void ice_ptp_periodic_work(struct kthread_work *work)
drivers/net/ethernet/intel/ice/ice_ptp.c
2858
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
drivers/net/ethernet/intel/ice/ice_ptp.c
2870
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
drivers/net/ethernet/intel/ice/ice_ptp.c
2885
kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
drivers/net/ethernet/intel/ice/ice_ptp.c
2938
kthread_cancel_delayed_work_sync(&ptp->work);
drivers/net/ethernet/intel/ice/ice_ptp.c
3205
kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
drivers/net/ethernet/intel/ice/ice_ptp.c
3218
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
drivers/net/ethernet/intel/ice/ice_ptp.c
3393
kthread_cancel_delayed_work_sync(&pf->ptp.work);
drivers/net/ethernet/intel/ice/ice_ptp.c
998
kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
drivers/net/ethernet/intel/ice/ice_ptp.h
252
struct kthread_delayed_work work;
drivers/net/ethernet/intel/idpf/idpf.h
1043
void idpf_statistics_task(struct work_struct *work);
drivers/net/ethernet/intel/idpf/idpf.h
1044
void idpf_init_task(struct work_struct *work);
drivers/net/ethernet/intel/idpf/idpf.h
1045
void idpf_service_task(struct work_struct *work);
drivers/net/ethernet/intel/idpf/idpf.h
1046
void idpf_mbx_task(struct work_struct *work);
drivers/net/ethernet/intel/idpf/idpf.h
1047
void idpf_vc_event_task(struct work_struct *work);
drivers/net/ethernet/intel/idpf/idpf_lib.c
1351
void idpf_statistics_task(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_lib.c
1356
adapter = container_of(work, struct idpf_adapter, stats_task.work);
drivers/net/ethernet/intel/idpf/idpf_lib.c
1374
void idpf_mbx_task(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_lib.c
1378
adapter = container_of(work, struct idpf_adapter, mbx_task.work);
drivers/net/ethernet/intel/idpf/idpf_lib.c
1394
void idpf_service_task(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_lib.c
1398
adapter = container_of(work, struct idpf_adapter, serv_task.work);
drivers/net/ethernet/intel/idpf/idpf_lib.c
1645
void idpf_init_task(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_lib.c
1656
adapter = container_of(work, struct idpf_adapter, init_task.work);
drivers/net/ethernet/intel/idpf/idpf_lib.c
1972
void idpf_vc_event_task(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_lib.c
1976
adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
drivers/net/ethernet/intel/idpf/idpf_ptp.c
754
void idpf_tstamp_task(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_ptp.c
758
vport = container_of(work, struct idpf_vport, tstamp_task);
drivers/net/ethernet/intel/idpf/idpf_ptp.h
288
void idpf_tstamp_task(struct work_struct *work);
drivers/net/ethernet/intel/idpf/idpf_ptp.h
377
static inline void idpf_tstamp_task(struct work_struct *work) { }
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4169
static void idpf_tx_dim_work(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4176
dim = container_of(work, struct dim, work);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4195
static void idpf_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4202
dim = container_of(work, struct dim, work);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4223
INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4227
INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
drivers/net/ethernet/intel/idpf/xdp.c
150
static void idpf_xdp_tx_timer(struct work_struct *work);
drivers/net/ethernet/intel/idpf/xsk.c
10
static void idpf_xsk_tx_timer(struct work_struct *work);
drivers/net/ethernet/intel/igb/igb_main.c
5563
static void igb_watchdog_task(struct work_struct *work)
drivers/net/ethernet/intel/igb/igb_main.c
5565
struct igb_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/igb/igb_main.c
6664
static void igb_reset_task(struct work_struct *work)
drivers/net/ethernet/intel/igb/igb_main.c
6667
adapter = container_of(work, struct igb_adapter, reset_task);
drivers/net/ethernet/intel/igb/igb_ptp.c
806
static void igb_ptp_tx_work(struct work_struct *work)
drivers/net/ethernet/intel/igb/igb_ptp.c
808
struct igb_adapter *adapter = container_of(work, struct igb_adapter,
drivers/net/ethernet/intel/igb/igb_ptp.c
838
static void igb_ptp_overflow_check(struct work_struct *work)
drivers/net/ethernet/intel/igb/igb_ptp.c
841
container_of(work, struct igb_adapter, ptp_overflow_work.work);
drivers/net/ethernet/intel/igbvf/netdev.c
1901
static void igbvf_watchdog_task(struct work_struct *work)
drivers/net/ethernet/intel/igbvf/netdev.c
1903
struct igbvf_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/igbvf/netdev.c
2371
static void igbvf_reset_task(struct work_struct *work)
drivers/net/ethernet/intel/igbvf/netdev.c
2375
adapter = container_of(work, struct igbvf_adapter, reset_task);
drivers/net/ethernet/intel/igc/igc_main.c
5395
static void igc_reset_task(struct work_struct *work)
drivers/net/ethernet/intel/igc/igc_main.c
5399
adapter = container_of(work, struct igc_adapter, reset_task);
drivers/net/ethernet/intel/igc/igc_main.c
5832
static void igc_watchdog_task(struct work_struct *work)
drivers/net/ethernet/intel/igc/igc_main.c
5834
struct igc_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
8634
static void ixgbe_recovery_service_task(struct work_struct *work)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
8636
struct ixgbe_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
8650
static void ixgbe_service_task(struct work_struct *work)
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
8652
struct ixgbe_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
839
static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
841
struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter,
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
3374
static void ixgbevf_service_task(struct work_struct *work)
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
3376
struct ixgbevf_adapter *adapter = container_of(work,
drivers/net/ethernet/intel/libeth/xdp.c
73
void (*poll)(struct work_struct *work))
drivers/net/ethernet/jme.c
1261
static void jme_link_change_work(struct work_struct *work)
drivers/net/ethernet/jme.c
1263
struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task);
drivers/net/ethernet/korina.c
1149
static void korina_restart_task(struct work_struct *work)
drivers/net/ethernet/korina.c
1151
struct korina_private *lp = container_of(work,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2119
static void mvpp2_gather_hw_statistics(struct work_struct *work)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2121
struct delayed_work *del_work = to_delayed_work(work);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2147
mvpp2_gather_hw_statistics(&port->stats_work.work);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
399
schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
450
schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1038
static void octep_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1040
struct octep_device *oct = container_of(work, struct octep_device,
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1215
static void octep_intr_poll_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1217
struct octep_device *oct = container_of(work, struct octep_device,
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1218
intr_poll_task.work);
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1239
static void octep_hb_timeout_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1241
struct octep_device *oct = container_of(work, struct octep_device,
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1242
hb_task.work);
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1268
static void octep_ctrl_mbox_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1270
struct octep_device *oct = container_of(work, struct octep_device,
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
125
struct work_struct work;
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
280
INIT_WORK(&oct->mbox[ring]->wk.work, octep_pfvf_mbox_work);
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
292
cancel_work_sync(&oct->mbox[ring]->wk.work);
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
311
if (work_pending(&oct->mbox[ring]->wk.work))
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
312
cancel_work_sync(&oct->mbox[ring]->wk.work);
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
410
void octep_pfvf_mbox_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
412
struct octep_pfvf_mbox_wk *wk = container_of(work, struct octep_pfvf_mbox_wk, work);
drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
165
void octep_pfvf_mbox_work(struct work_struct *work);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
264
schedule_work(&oct->mbox->wk.work);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
307
schedule_work(&oct->mbox->wk.work);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
826
static void octep_vf_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
828
struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
337
void octep_vf_mbox_work(struct work_struct *work);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
90
struct work_struct work;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
34
INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
44
if (work_pending(&oct->mbox->wk.work))
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
45
cancel_work_sync(&oct->mbox->wk.work);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
77
void octep_vf_mbox_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
79
struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work);
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
1612
static void cgx_lmac_linkup_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
1614
struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
153
static void mcs_intr_handler_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
155
struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2329
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2331
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2339
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2341
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2414
static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2416
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2421
static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2423
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2616
INIT_WORK(&mwork->work, mbox_handler);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2620
INIT_WORK(&mwork->work, mbox_up_handler);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2693
queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2701
queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2863
static void rvu_flr_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2865
struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2913
queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
2936
queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3297
INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
117
struct work_struct work;
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
281
static void cgx_evhandler_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
283
struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
116
static void rvu_nix_err_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
120
rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
151
static void rvu_nix_ras_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
155
rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
46
static void rvu_nix_intr_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
50
rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
601
static void rvu_npa_intr_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
605
rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
636
static void rvu_npa_gen_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
640
rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
671
static void rvu_npa_err_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
675
rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
705
static void rvu_npa_ras_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
709
rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
81
static void rvu_nix_gen_work(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
85
rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
69
static void rvu_rep_wq_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
71
struct rvu *rvu = container_of(work, struct rvu, rep_evt_work);
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
752
static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
754
struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1154
static void otx2_pool_refill_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1161
wrk = container_of(work, struct refill_work, pool_refill_work.work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
297
struct work_struct work;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1005
static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1007
struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
115
static void otx2_flr_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
117
struct flr_work *flrwork = container_of(work, struct flr_work, work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1534
struct work_struct *work;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1539
work = &cq_poll->dim.work;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1540
if (work->func)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1541
cancel_work_sync(work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
165
queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1899
dim = container_of(w, struct dim, work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
2015
INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
2339
static void otx2_rx_mode_wrk_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
2341
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
2367
static void otx2_reset_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
2369
struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
293
INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3345
static void otx2_vf_link_event_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3354
config = container_of(work, struct otx2_vf_config,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3355
link_event_work.work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
452
static void otx2_pfvf_mbox_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
46
static void otx2_vf_link_event_task(struct work_struct *work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
461
vf_mbox = container_of(work, struct mbox, mbox_wrk);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
496
static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
498
struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
843
static void otx2_pfaf_mbox_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
854
af_mbox = container_of(work, struct mbox, mbox_wrk);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
294
static void otx2_ptp_extts_check(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
296
struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
297
extts_work.work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
322
static void otx2_sync_tstamp(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
324
struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
325
synctstamp_work.work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
613
struct refill_work *work;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
617
work = &pfvf->refill_wrk[cq->cq_idx];
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
618
dwork = &work->pool_refill_work;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
621
work->napi = napi;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
151
static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
162
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
458
static void otx2vf_do_set_rx_mode(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
460
struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
505
static void otx2vf_reset_task(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
507
struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
86
static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
96
af_mbox = container_of(work, struct mbox, mbox_wrk);
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
334
static void rvu_rep_get_stats(struct work_struct *work)
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
336
struct delayed_work *del_work = to_delayed_work(work);
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
584
struct delayed_work *work;
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
588
work = &priv->refill_wrk[wrk].pool_refill_work;
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
589
cancel_delayed_work_sync(work);
drivers/net/ethernet/marvell/prestera/prestera.h
374
void prestera_queue_work(struct work_struct *work);
drivers/net/ethernet/marvell/prestera/prestera.h
375
void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay);
drivers/net/ethernet/marvell/prestera/prestera_counter.c
336
static void prestera_counter_stats_work(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_counter.c
338
struct delayed_work *dl_work = to_delayed_work(work);
drivers/net/ethernet/marvell/prestera/prestera_main.c
34
void prestera_queue_work(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_main.c
36
queue_work(prestera_owq, work);
drivers/net/ethernet/marvell/prestera/prestera_main.c
39
void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay)
drivers/net/ethernet/marvell/prestera/prestera_main.c
41
queue_delayed_work(prestera_wq, work, delay);
drivers/net/ethernet/marvell/prestera/prestera_main.c
532
static void prestera_port_stats_update(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_main.c
535
container_of(work, struct prestera_port,
drivers/net/ethernet/marvell/prestera/prestera_main.c
536
cached_hw_stats.caching_dw.work);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
317
static void prestera_fw_evt_work_fn(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
323
fw = container_of(work, struct prestera_fw, evt_work);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1385
struct work_struct work;
drivers/net/ethernet/marvell/prestera/prestera_router.c
1391
static void __prestera_router_fib_event_work(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_router.c
1394
container_of(work, struct prestera_fib_event_work, work);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1459
INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1460
prestera_queue_work(&fib_work->work);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1470
struct work_struct work;
drivers/net/ethernet/marvell/prestera/prestera_router.c
1475
static void prestera_router_neigh_event_work(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_router.c
1478
container_of(work, struct prestera_netevent_work, work);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1513
INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1514
prestera_queue_work(&net_work->work);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1520
static void prestera_router_update_neighs_work(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_router.c
1524
router = container_of(work, struct prestera_router,
drivers/net/ethernet/marvell/prestera/prestera_router.c
1525
neighs_update.dw.work);
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
491
static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
498
sdma = container_of(work, struct prestera_sdma, tx_work);
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
1245
static void prestera_fdb_event_work(struct work_struct *work)
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
1253
swdev_work = container_of(work, struct prestera_fdb_event_work, work);
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
1330
INIT_WORK(&swdev_work->work, prestera_fdb_event_work);
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
1348
queue_work(swdev_wq, &swdev_work->work);
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
23
struct work_struct work;
drivers/net/ethernet/marvell/pxa168_eth.c
754
static void pxa168_eth_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/marvell/pxa168_eth.c
756
struct pxa168_eth_private *pep = container_of(work,
drivers/net/ethernet/marvell/sky2.c
3507
static void sky2_restart(struct work_struct *work)
drivers/net/ethernet/marvell/sky2.c
3509
struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3713
cancel_work_sync(ð->rx_dim.work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3714
cancel_work_sync(ð->tx_dim.work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3819
static void mtk_dim_rx(struct work_struct *work)
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3821
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3850
static void mtk_dim_tx(struct work_struct *work)
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3852
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4074
static void mtk_hw_reset_monitor_work(struct work_struct *work)
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4076
struct delayed_work *del_work = to_delayed_work(work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4123
mtk_dim_rx(ð->rx_dim.work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4124
mtk_dim_tx(ð->tx_dim.work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4189
mtk_dim_rx(ð->rx_dim.work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4190
mtk_dim_tx(ð->tx_dim.work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4374
static void mtk_pending_work(struct work_struct *work)
drivers/net/ethernet/mediatek/mtk_eth_soc.c
4376
struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
5088
INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
5092
INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
drivers/net/ethernet/mellanox/mlx4/catas.c
268
static void catas_reset(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/catas.c
271
container_of(work, struct mlx4_dev_persistent,
drivers/net/ethernet/mellanox/mlx4/cmd.c
1846
struct mlx4_vf_immed_vlan_work *work;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1876
work = kzalloc_obj(*work);
drivers/net/ethernet/mellanox/mlx4/cmd.c
1877
if (!work)
drivers/net/ethernet/mellanox/mlx4/cmd.c
1886
kfree(work);
drivers/net/ethernet/mellanox/mlx4/cmd.c
1895
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1903
work->orig_vlan_id = vp_oper->state.default_vlan;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1904
work->orig_vlan_ix = vp_oper->vlan_idx;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1908
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1910
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
drivers/net/ethernet/mellanox/mlx4/cmd.c
1920
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1923
work->port = port;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1924
work->slave = slave;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1925
work->qos = vp_oper->state.default_qos;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1926
work->qos_vport = vp_oper->state.qos_vport;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1927
work->vlan_id = vp_oper->state.default_vlan;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1928
work->vlan_ix = vp_oper->vlan_idx;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1929
work->vlan_proto = vp_oper->state.vlan_proto;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1930
work->priv = priv;
drivers/net/ethernet/mellanox/mlx4/cmd.c
1931
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
drivers/net/ethernet/mellanox/mlx4/cmd.c
1932
queue_work(priv->mfunc.master.comm_wq, &work->work);
drivers/net/ethernet/mellanox/mlx4/cmd.c
2223
void mlx4_master_comm_channel(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/cmd.c
2226
container_of(work,
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1259
static void mlx4_en_do_set_rx_mode(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1261
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1517
static void mlx4_en_do_get_stats(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1519
struct delayed_work *delay = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1547
static void mlx4_en_service_task(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1549
struct delayed_work *delay = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1588
static void mlx4_en_linkstate_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1590
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
165
struct work_struct work;
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
201
static void mlx4_en_filter_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2019
static void mlx4_en_restart(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2021
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
203
struct mlx4_en_filter *filter = container_of(work,
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
205
work);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
304
INIT_WORK(&filter->work, mlx4_en_filter_work);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
416
queue_work(priv->mdev->workqueue, &filter->work);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
439
cancel_work_sync(&filter->work);
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
456
!work_pending(&filter->work) &&
drivers/net/ethernet/mellanox/mlx4/eq.c
136
void mlx4_gen_slave_eqe(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/eq.c
139
container_of(work, struct mlx4_mfunc_master_ctx,
drivers/net/ethernet/mellanox/mlx4/eq.c
449
void mlx4_master_handle_slave_flr(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/eq.c
452
container_of(work, struct mlx4_mfunc_master_ctx,
drivers/net/ethernet/mellanox/mlx4/eq.c
853
int work = 0;
drivers/net/ethernet/mellanox/mlx4/eq.c
859
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
drivers/net/ethernet/mellanox/mlx4/eq.c
861
return IRQ_RETVAL(work);
drivers/net/ethernet/mellanox/mlx4/fw.c
2702
void mlx4_opreq_action(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/fw.c
2704
struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
drivers/net/ethernet/mellanox/mlx4/fw.h
255
void mlx4_opreq_action(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx4/main.c
1512
struct work_struct work;
drivers/net/ethernet/mellanox/mlx4/main.c
1518
static void mlx4_bond_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/main.c
1520
struct mlx4_bond *bond = container_of(work, struct mlx4_bond, work);
drivers/net/ethernet/mellanox/mlx4/main.c
1555
INIT_WORK(&bond->work, mlx4_bond_work);
drivers/net/ethernet/mellanox/mlx4/main.c
1561
queue_work(mlx4_wq, &bond->work);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
1070
void mlx4_master_comm_channel(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
1071
void mlx4_gen_slave_eqe(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
1072
void mlx4_master_handle_slave_flr(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
654
struct work_struct work;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5260
struct mlx4_vf_immed_vlan_work *work)
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5263
ctx->qp_context.qos_vport = work->qos_vport;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5268
struct mlx4_vf_immed_vlan_work *work =
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5269
container_of(_work, struct mlx4_vf_immed_vlan_work, work);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5272
struct mlx4_dev *dev = &work->priv->dev;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5274
&work->priv->mfunc.master.res_tracker;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5276
&tracker->slave_list[work->slave].res_list[RES_QP];
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5302
work->slave);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5309
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5316
else if (!work->vlan_id)
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5319
else if (work->vlan_proto == htons(ETH_P_8021AD))
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5335
if (qp->com.owner == work->slave) {
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5344
if (port != work->port) {
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5353
if (work->vlan_id == MLX4_VGT) {
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5365
upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5370
if (work->vlan_proto == htons(ETH_P_8021AD))
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5379
((work->qos & 0x7) << 3);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5383
update_qos_vpp(upd_context, work);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5392
work->slave, port, qp->local_qpn, err);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5403
errors, work->slave, work->port);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5408
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5409
NO_INDX != work->orig_vlan_ix)
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5410
__mlx4_unregister_vlan(&work->priv->dev, work->port,
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5411
work->orig_vlan_id);
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
5413
kfree(work);
drivers/net/ethernet/mellanox/mlx4/sense.c
91
static void mlx4_sense_port(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx4/sense.c
93
struct delayed_work *delay = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1173
cancel_work_sync(&ent->work)) {
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1300
INIT_WORK(&ent->work, cmd_work_handler);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1302
cmd_work_handler(&ent->work);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1303
} else if (!queue_work(cmd->wq, &ent->work)) {
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2188
struct mlx5_async_work *work = _work;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2194
ctx = work->ctx;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2196
throttle_locked = work->throttle_locked;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2197
unpriv_locked = work->unpriv_locked;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2198
status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2199
work->user_callback(status, work);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2213
struct mlx5_async_work *work)
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2219
work->ctx = ctx;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2220
work->user_callback = callback;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2221
work->opcode = in_to_opcode(in);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2222
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2223
work->out = out;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2224
work->throttle_locked = false;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2225
work->unpriv_locked = false;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2237
work->unpriv_locked = true;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2244
work->throttle_locked = true;
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2248
mlx5_cmd_exec_cb_handler, work, false);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2255
if (work->throttle_locked)
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2257
if (work->unpriv_locked)
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
933
static void cb_timeout_handler(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
935
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
979
static void cmd_work_handler(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
981
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
1007
static void mlx5_fw_tracer_update_db(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
1010
container_of(work, struct mlx5_fw_tracer, update_db_work);
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
265
static void mlx5_tracer_read_strings_db(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
267
struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer,
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
762
static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
765
container_of(work, struct mlx5_fw_tracer, handle_traces_work);
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
915
static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
918
container_of(work, struct mlx5_fw_tracer, ownership_change_work);
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
17
struct delayed_work work;
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
331
queue_delayed_work(mdpll->wq, &mdpll->work,
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
335
static void mlx5_dpll_periodic_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
337
struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
338
work.work);
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
477
INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work);
drivers/net/ethernet/mellanox/mlx5/core/dpll.c
501
cancel_delayed_work_sync(&mdpll->work);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1045
void mlx5e_set_rx_mode_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/en.h
866
struct delayed_work work;
drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
40
void mlx5e_rx_dim_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
41
void mlx5e_tx_dim_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
106
cancel_delayed_work_sync(&priv->stats_agent.work);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
113
queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
120
cancel_delayed_work_sync(&priv->stats_agent.work);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
149
INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
57
static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
66
dwork = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
67
sagent = container_of(dwork, struct mlx5e_hv_vhca_stats_agent, work);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
85
queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
162
static void mapping_work_handler(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
169
ctx = container_of(work, struct mapping_ctx, dwork.work);
drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
57
static void mlx5e_monitor_counters_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
59
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
196
static void mlx5e_pcie_cong_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
205
cong_event = container_of(work, struct mlx5e_pcie_cong_event, work);
drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
246
queue_work(cong_event->priv->wq, &cong_event->work);
drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
327
INIT_WORK(&cong_event->work, mlx5e_pcie_cong_event_work);
drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
36
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
369
cancel_work_sync(&cong_event->work);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
456
static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
459
container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
15
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
360
static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
363
container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
394
struct mlx5_bridge_switchdev_fdb_work *work;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
397
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
398
if (!work)
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
401
INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
402
memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
406
kfree(work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
410
work->fdb_info.addr = addr;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
413
work->dev = dev;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
414
work->br_offloads = br_offloads;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
415
work->add = add;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
416
return work;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
427
struct mlx5_bridge_switchdev_fdb_work *work;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
482
work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
486
if (IS_ERR(work)) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
488
work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
489
return notifier_from_errno(PTR_ERR(work));
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
492
queue_work(br_offloads->wq, &work->work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
500
static void mlx5_esw_bridge_update_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
502
struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
504
update_work.work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
114
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
126
static void mlx5e_rep_neigh_update(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
128
struct neigh_update_work *update_work = container_of(work, struct neigh_update_work,
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
129
work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
194
INIT_WORK(&update_work->work, mlx5e_rep_neigh_update);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
230
queue_work(priv->wq, &update_work->work);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
95
static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
97
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
98
neigh_update.neigh_stats_work.work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1001
static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1003
struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1014
INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1015
queue_work(entry->ct_priv->wq, &entry->work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
162
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1242
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1248
static void mlx5e_tc_fib_event_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1258
INIT_WORK(&fib_work->work, mlx5e_tc_fib_event_work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1285
queue_work(priv->wq, &fib_work->work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1689
static void mlx5e_tc_fib_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1692
container_of(work, struct mlx5e_tc_fib_event_data, work);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1845
queue_work(priv->wq, &fib_work->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1062
struct mlx5e_ipsec_work *work = sa_entry->work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1070
sa_entry_shadow = work->data;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1073
queue_work(sa_entry->ipsec->wq, &work->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
63
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
645
struct mlx5e_ipsec_work *work =
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
646
container_of(_work, struct mlx5e_ipsec_work, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
647
struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
650
attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
673
struct mlx5e_ipsec_work *work =
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
674
container_of(_work, struct mlx5e_ipsec_work, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
675
struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
676
struct mlx5e_ipsec_netevent_data *data = work->data;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
698
struct mlx5e_ipsec_work *work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
714
work = kzalloc_obj(*work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
715
if (!work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
724
INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
731
INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
737
work->data = data;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
738
work->sa_entry = sa_entry;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
739
sa_entry->work = work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
743
kfree(work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
890
if (sa_entry->work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
891
kfree(sa_entry->work->data);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
892
kfree(sa_entry->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
927
if (sa_entry->work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
928
cancel_work_sync(&sa_entry->work->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
936
if (sa_entry->work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
937
kfree(sa_entry->work->data);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
938
kfree(sa_entry->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
971
data = sa_entry->work->data;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
974
queue_work(ipsec->wq, &sa_entry->work->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
160
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
236
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
286
struct mlx5e_ipsec_work *work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
2876
struct mlx5e_ipsec_mpv_work *work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
2885
work = &slave_priv->ipsec->mpv_work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
2887
INIT_WORK(&work->work, ipsec_mpv_work_handler);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
2888
work->event = event;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
2889
work->slave_priv = slave_priv;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
2890
work->master_priv = master_priv;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
2891
queue_work(slave_priv->ipsec->wq, &work->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
645
struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
646
struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
648
switch (work->event) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
684
complete(&work->master_priv->ipsec->comp);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
441
struct mlx5e_ipsec_work *work =
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
442
container_of(_work, struct mlx5e_ipsec_work, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
443
struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
472
kfree(work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
481
struct mlx5e_ipsec_work *work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
498
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
499
if (!work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
502
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
503
work->data = sa_entry;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
505
queue_work(ipsec->wq, &work->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
103
static void accel_rule_handle_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
109
accel_rule = container_of(work, struct accel_rule, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
12
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
125
INIT_WORK(&rule->work, accel_rule_handle_work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
329
static void resync_handle_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
336
resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
359
INIT_WORK(&resync->work, resync_handle_work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
38
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
488
if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
620
queue_work(rule->priv->tls->rx_wq, &rule->work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
725
if (!cancel_work_sync(&priv_rx->rule.work))
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
731
if (cancel_work_sync(&resync->work))
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
278
static void create_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
281
container_of(work, struct mlx5e_tls_tx_pool, create_work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
319
queue_work(pool->wq, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
328
static void destroy_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
331
container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
349
queue_work(pool->wq, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1511
static void macsec_async_event(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1521
async_work = container_of(work, struct mlx5e_macsec_async_work, work);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1576
INIT_WORK(&async_work->work, macsec_async_event);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1578
WARN_ON(!queue_work(macsec->wq, &async_work->work));
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
55
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
628
static void arfs_handle_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
630
struct arfs_rule *arfs_rule = container_of(work,
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
45
void mlx5e_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
47
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
55
void mlx5e_tx_dim_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
57
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
77
INIT_WORK(&dim->work, work_fun);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
93
cancel_work_sync(&dim->work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1412
cancel_work_sync(&rq->dim->work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
148
static void mlx5e_update_carrier_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
150
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
160
static void mlx5e_update_stats_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
162
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1927
cancel_work_sync(&sq->dim->work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5086
static void mlx5e_tx_timeout_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5088
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6199
void mlx5e_set_rx_mode_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6201
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1139
static void mlx5e_rep_mpesw_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1142
container_of(work, struct mlx5_rep_uplink_priv,
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5534
void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5537
container_of(work, struct mlx5_rep_uplink_priv,
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
227
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
761
static void esw_vport_change_handler(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
764
container_of(work, struct mlx5_vport, vport_change_handler);
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
336
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3622
static void esw_functions_changed_event_handler(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3628
host_work = container_of(work, struct mlx5_host_work, work);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3657
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3658
queue_work(esw->work_queue, &host_work->work);
drivers/net/ethernet/mellanox/mlx5/core/events.c
300
static void mlx5_pcie_event(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/events.c
309
events = container_of(work, struct mlx5_events, pcie_core_work);
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
208
static void mlx5_fc_stats_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
210
struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats,
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
211
work.work);
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
214
queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval);
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
348
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
351
queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
367
cancel_delayed_work_sync(&fc_stats->work);
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
51
struct delayed_work work;
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
277
if (current_work() != &fw_reset->reset_timeout_work.work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
285
static void mlx5_sync_reset_reload_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
287
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
358
static void mlx5_fw_live_patch_event(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
360
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
473
static void mlx5_sync_reset_request_event(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
475
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
697
static void mlx5_sync_reset_now_event(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
699
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
727
static void mlx5_sync_reset_unload_event(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
733
fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
751
static void mlx5_sync_reset_abort_event(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
753
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
787
static void mlx5_sync_reset_timeout_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
789
struct delayed_work *dwork = container_of(work, struct delayed_work,
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
790
work);
drivers/net/ethernet/mellanox/mlx5/core/health.c
553
static void mlx5_fw_reporter_err_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/health.c
558
health = container_of(work, struct mlx5_core_health, report_work);
drivers/net/ethernet/mellanox/mlx5/core/health.c
630
static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/health.c
638
health = container_of(work, struct mlx5_core_health, fatal_report_work);
drivers/net/ethernet/mellanox/mlx5/core/health.c
763
static void mlx5_health_log_ts_update(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/health.c
765
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
1261
static void mlx5_do_bond_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
1263
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
230
static void mlx5_do_bond_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
117
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
258
static void mlx5_lag_fib_update(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
261
container_of(work, struct mlx5_fib_event_work, work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
298
INIT_WORK(&fib_work->work, mlx5_lag_fib_update);
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
355
queue_work(mp->wq, &fib_work->work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
137
static void mlx5_mpesw_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
139
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
168
struct mlx5_mpesw_work_st *work;
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
174
work = kzalloc_obj(*work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
175
if (!work)
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
178
INIT_WORK(&work->work, mlx5_mpesw_work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
179
init_completion(&work->comp);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
180
work->op = op;
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
181
work->lag = ldev;
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
183
if (!queue_work(ldev->wq, &work->work)) {
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
188
wait_for_completion(&work->comp);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
189
err = work->result;
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
191
kfree(work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
229
void mlx5_mpesw_speed_update_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
231
struct mlx5_lag *ldev = container_of(work, struct mlx5_lag,
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
21
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
41
void mlx5_mpesw_speed_update_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
45
static inline void mlx5_mpesw_speed_update_work(struct work_struct *work) {}
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
437
static void mlx5_pps_out(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
439
struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
589
static void mlx5_crypto_dek_sync_work_fn(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
592
container_of(work, struct mlx5_crypto_dek_pool, sync_work);
drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
670
static void mlx5_crypto_dek_destroy_work_fn(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
673
container_of(work, struct mlx5_crypto_dek_pool, destroy_work);
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
101
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
102
if (!work)
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
105
INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work);
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
106
work->hv_vhca = hv_vhca;
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
107
work->block_mask = block_mask;
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
109
queue_work(hv_vhca->work_queue, &work->invalidate_work);
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
70
static void mlx5_hv_vhca_invalidate_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
76
hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work);
drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
99
struct mlx5_hv_vhca_work *work;
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
53
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
588
static void pages_work_handler(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
590
struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
662
INIT_WORK(&req->work, pages_work_handler);
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
663
queue_work(dev->priv.pg_wq, &req->work);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
20
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
230
work_ctx = container_of(_work, struct mlx5_sf_dev_active_work_ctx, work);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
255
struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
26
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
287
INIT_WORK(&work_ctx->work, &mlx5_sf_dev_add_active_work);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
293
mlx5_vhca_events_work_enqueue(dev, wq_idx, &work_ctx->work);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
309
INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works);
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
310
queue_work(table->active_wq, &table->work);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
100
kfree(work);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
103
void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
105
queue_work(dev->priv.vhca_events->handler[idx].wq, work);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
113
struct mlx5_vhca_event_work *work;
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
117
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
118
if (!work)
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
120
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
121
work->dev = dev;
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
122
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
123
wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
124
mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
13
struct work_struct work;
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
97
struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
99
mlx5_vhca_event_notify(work->dev, &work->event);
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
32
void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work);
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
298
static void hws_action_ste_pool_cleanup(struct work_struct *work)
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
305
ctx = container_of(work, struct mlx5hws_context,
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
306
action_ste_cleanup.work);
drivers/net/ethernet/mellanox/mlxsw/core.c
1775
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/core.c
1778
static void mlxsw_core_health_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core.c
1783
event = container_of(work, struct mlxsw_core_health_event, work);
drivers/net/ethernet/mellanox/mlxsw/core.c
1801
INIT_WORK(&event->work, mlxsw_core_health_event_work);
drivers/net/ethernet/mellanox/mlxsw/core.c
1802
mlxsw_core_schedule_work(&event->work);
drivers/net/ethernet/mellanox/mlxsw/core.c
3284
bool mlxsw_core_schedule_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core.c
3286
return queue_work(mlxsw_owq, work);
drivers/net/ethernet/mellanox/mlxsw/core.c
785
static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core.c
787
struct mlxsw_reg_trans *trans = container_of(work,
drivers/net/ethernet/mellanox/mlxsw/core.c
789
timeout_dw.work);
drivers/net/ethernet/mellanox/mlxsw/core.h
294
bool mlxsw_core_schedule_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlxsw/core_env.c
1021
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/core_env.c
1024
static void mlxsw_env_pmpe_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core_env.c
1033
event = container_of(work, struct mlxsw_env_module_plug_unplug_event,
drivers/net/ethernet/mellanox/mlxsw/core_env.c
1034
work);
drivers/net/ethernet/mellanox/mlxsw/core_env.c
1090
INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
drivers/net/ethernet/mellanox/mlxsw/core_env.c
1091
mlxsw_core_schedule_work(&event->work);
drivers/net/ethernet/mellanox/mlxsw/core_env.c
927
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/core_env.c
930
static void mlxsw_env_mtwe_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core_env.c
938
event = container_of(work, struct mlxsw_env_module_temp_warn_event,
drivers/net/ethernet/mellanox/mlxsw/core_env.c
939
work);
drivers/net/ethernet/mellanox/mlxsw/core_env.c
995
INIT_WORK(&event->work, mlxsw_env_mtwe_event_work);
drivers/net/ethernet/mellanox/mlxsw/core_env.c
996
mlxsw_core_schedule_work(&event->work);
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1172
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1175
static void mlxsw_linecard_status_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1181
event = container_of(work, struct mlxsw_linecard_status_event, work);
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1200
INIT_WORK(&event->work, mlxsw_linecard_status_event_work);
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1201
mlxsw_core_schedule_work(&event->work);
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1207
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1210
static void mlxsw_linecard_bct_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1215
event = container_of(work, struct mlxsw_linecard_bct_event, work);
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1233
INIT_WORK(&event->work, mlxsw_linecard_bct_event_work);
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
1234
mlxsw_core_schedule_work(&event->work);
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
810
static void mlxsw_linecard_status_event_to_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
813
container_of(work, struct mlxsw_linecard,
drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
814
status_event_to_dw.work);
drivers/net/ethernet/mellanox/mlxsw/i2c.c
564
static void mlxsw_i2c_work_handler(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/i2c.c
568
mlxsw_i2c = container_of(work, struct mlxsw_i2c, irq_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1841
static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1852
events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1908
mlxsw_core_schedule_work(&events->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1920
cancel_work_sync(&events->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1976
INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
864
static void update_stats_cache(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
867
container_of(work, struct mlxsw_sp_port,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
868
periodic_hw_stats.update_dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
147
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
240
void (*shaper_work)(struct work_struct *work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
1000
rule_activity_update.dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
997
static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
999
struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
714
static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
717
container_of(work, struct mlxsw_sp_acl_tcam_vregion,
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
718
rehash.dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
1014
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
1016
struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
1017
stats_update_dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1231
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1233
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
260
static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
262
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
843
static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
845
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
135
static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
201
static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
43
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2620
static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2625
router = container_of(work, struct mlxsw_sp_router,
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2626
neighs_update.dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2636
static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2641
router = container_of(work, struct mlxsw_sp_router,
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2642
nexthop_probe_dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2766
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2771
static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2774
container_of(work, struct mlxsw_sp_netevent_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2825
static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2828
container_of(work, struct mlxsw_sp_netevent_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2837
static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2840
container_of(work, struct mlxsw_sp_netevent_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2866
INIT_WORK(&net_work->work, cb);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2869
mlxsw_core_schedule_work(&net_work->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4876
static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4882
router = container_of(work, struct mlxsw_sp_router,
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4883
nh_grp_activity_dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7801
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7863
static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7866
container_of(work, struct mlxsw_sp_fib_event_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7899
static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7902
container_of(work, struct mlxsw_sp_fib_event_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7946
static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7949
container_of(work, struct mlxsw_sp_fib_event_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8148
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8152
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8159
INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8164
mlxsw_core_schedule_work(&fib_work->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8513
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8518
static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8521
container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8522
work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8546
INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
8549
mlxsw_core_schedule_work(&hws_work->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
9550
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
9557
static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
9560
container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
9599
INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
9604
mlxsw_core_schedule_work(&inet6addr_work->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
1029
static void mlxsw_sp_span_respin_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
1035
span = container_of(work, struct mlxsw_sp_span, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
1065
mlxsw_core_schedule_work(&mlxsw_sp->span->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
111
INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
125
cancel_work_sync(&mlxsw_sp->span->work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
22
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
71
static void mlxsw_sp_span_respin_work(struct work_struct *work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3355
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3370
bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3402
struct work_struct work;
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3512
static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3515
container_of(work, struct mlxsw_sp_switchdev_event_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3682
static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3685
container_of(work, struct mlxsw_sp_switchdev_event_work, work);
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3802
INIT_WORK(&switchdev_work->work,
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3819
INIT_WORK(&switchdev_work->work,
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3832
mlxsw_core_schedule_work(&switchdev_work->work);
drivers/net/ethernet/meta/fbnic/fbnic_pci.c
210
static void fbnic_service_task(struct work_struct *work)
drivers/net/ethernet/meta/fbnic/fbnic_pci.c
212
struct fbnic_dev *fbd = container_of(to_delayed_work(work),
drivers/net/ethernet/micrel/ks8842.c
1075
static void ks8842_tx_timeout_work(struct work_struct *work)
drivers/net/ethernet/micrel/ks8842.c
1078
container_of(work, struct ks8842_adapter, timeout_work);
drivers/net/ethernet/micrel/ks8851_common.c
566
static void ks8851_rxctrl_work(struct work_struct *work)
drivers/net/ethernet/micrel/ks8851_common.c
568
struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
drivers/net/ethernet/micrel/ks8851_spi.c
306
static void ks8851_tx_work(struct work_struct *work)
drivers/net/ethernet/micrel/ks8851_spi.c
316
kss = container_of(work, struct ks8851_net_spi, tx_work);
drivers/net/ethernet/micrel/ksz884x.c
6261
static void mib_read_work(struct work_struct *work)
drivers/net/ethernet/micrel/ksz884x.c
6264
container_of(work, struct dev_info, mib_read);
drivers/net/ethernet/microchip/enc28j60.c
1303
static void enc28j60_tx_work_handler(struct work_struct *work)
drivers/net/ethernet/microchip/enc28j60.c
1306
container_of(work, struct enc28j60_net, tx_work);
drivers/net/ethernet/microchip/enc28j60.c
1405
static void enc28j60_setrx_work_handler(struct work_struct *work)
drivers/net/ethernet/microchip/enc28j60.c
1408
container_of(work, struct enc28j60_net, setrx_work);
drivers/net/ethernet/microchip/enc28j60.c
1430
static void enc28j60_restart_work_handler(struct work_struct *work)
drivers/net/ethernet/microchip/enc28j60.c
1433
container_of(work, struct enc28j60_net, restart_work);
drivers/net/ethernet/microchip/lan865x/lan865x.c
205
static void lan865x_multicast_work_handler(struct work_struct *work)
drivers/net/ethernet/microchip/lan865x/lan865x.c
207
struct lan865x_priv *priv = container_of(work, struct lan865x_priv,
drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
584
static void lan966x_check_stats_work(struct work_struct *work)
drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
586
struct delayed_work *del_work = to_delayed_work(work);
drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
230
static void lan966x_fdb_event_work(struct work_struct *work)
drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
233
container_of(work, struct lan966x_fdb_event_work, work);
drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
273
INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
281
queue_work(lan966x->fdb_work, &fdb_work->work);
drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
8
struct work_struct work;
drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
1130
static void sparx5_check_stats_work(struct work_struct *work)
drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
1132
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
422
void sparx5_mact_pull_work(struct work_struct *work)
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
424
struct delayed_work *del_work = to_delayed_work(work);
drivers/net/ethernet/microchip/sparx5/sparx5_main.h
473
void sparx5_mact_pull_work(struct work_struct *work);
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
16
struct work_struct work;
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
323
static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
326
container_of(work, struct sparx5_switchdev_event_work, work);
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
375
static void sparx5_schedule_work(struct work_struct *work)
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
377
queue_work(sparx5_owq, work);
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
412
INIT_WORK(&switchdev_work->work,
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
424
sparx5_schedule_work(&switchdev_work->work);
drivers/net/ethernet/microsoft/mana/gdma_main.c
2089
schedule_delayed_work(&mana_dev_recovery_work.work,
drivers/net/ethernet/microsoft/mana/gdma_main.c
2199
INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func);
drivers/net/ethernet/microsoft/mana/gdma_main.c
2217
disable_delayed_work_sync(&mana_dev_recovery_work.work);
drivers/net/ethernet/microsoft/mana/gdma_main.c
26
struct delayed_work work;
drivers/net/ethernet/microsoft/mana/gdma_main.c
521
struct mana_dev_recovery_work *work;
drivers/net/ethernet/microsoft/mana/gdma_main.c
525
work = container_of(w, struct mana_dev_recovery_work, work.work);
drivers/net/ethernet/microsoft/mana/gdma_main.c
527
spin_lock_irqsave(&work->lock, flags);
drivers/net/ethernet/microsoft/mana/gdma_main.c
529
while (!list_empty(&work->dev_list)) {
drivers/net/ethernet/microsoft/mana/gdma_main.c
530
dev = list_first_entry(&work->dev_list,
drivers/net/ethernet/microsoft/mana/gdma_main.c
533
spin_unlock_irqrestore(&work->lock, flags);
drivers/net/ethernet/microsoft/mana/gdma_main.c
539
spin_lock_irqsave(&work->lock, flags);
drivers/net/ethernet/microsoft/mana/gdma_main.c
542
spin_unlock_irqrestore(&work->lock, flags);
drivers/net/ethernet/microsoft/mana/mana_en.c
302
static void mana_per_port_queue_reset_work_handler(struct work_struct *work)
drivers/net/ethernet/microsoft/mana/mana_en.c
304
struct mana_port_context *apc = container_of(work,
drivers/net/ethernet/microsoft/mana/mana_en.c
3480
static void mana_rdma_service_handle(struct work_struct *work)
drivers/net/ethernet/microsoft/mana/mana_en.c
3483
container_of(work, struct mana_service_work, work);
drivers/net/ethernet/microsoft/mana/mana_en.c
3538
INIT_WORK(&serv_work->work, mana_rdma_service_handle);
drivers/net/ethernet/microsoft/mana/mana_en.c
3539
queue_work(gc->service_wq, &serv_work->work);
drivers/net/ethernet/microsoft/mana/mana_en.c
3546
static void mana_gf_stats_work_handler(struct work_struct *work)
drivers/net/ethernet/microsoft/mana/mana_en.c
3549
container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
drivers/net/ethernet/mscc/ocelot_net.c
635
struct work_struct work;
drivers/net/ethernet/mscc/ocelot_net.c
655
container_of((x), struct ocelot_mact_work_ctx, work)
drivers/net/ethernet/mscc/ocelot_net.c
657
static void ocelot_mact_work(struct work_struct *work)
drivers/net/ethernet/mscc/ocelot_net.c
659
struct ocelot_mact_work_ctx *w = ocelot_work_to_ctx(work);
drivers/net/ethernet/mscc/ocelot_net.c
686
INIT_WORK(&w->work, ocelot_mact_work);
drivers/net/ethernet/mscc/ocelot_net.c
687
queue_work(ocelot->owq, &w->work);
drivers/net/ethernet/mscc/ocelot_stats.c
364
static void ocelot_check_stats_work(struct work_struct *work)
drivers/net/ethernet/mscc/ocelot_stats.c
366
struct delayed_work *del_work = to_delayed_work(work);
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
3407
static void myri10ge_watchdog(struct work_struct *work)
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
3410
container_of(work, struct myri10ge_priv, watchdog_work);
drivers/net/ethernet/natsemi/ns83820.c
583
static inline void queue_refill(struct work_struct *work)
drivers/net/ethernet/natsemi/ns83820.c
585
struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
606
static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
611
nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work);
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
625
static void nfp_ccm_mbox_post_wait_work(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
631
nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work);
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
309
void nfp_flower_cmsg_process_rx(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
315
priv = container_of(work, struct nfp_flower_priv, cmsg_work);
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
746
void nfp_flower_cmsg_process_rx(struct work_struct *work);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
294
static void nfp_fl_lag_do_work(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
303
delayed_work = to_delayed_work(work);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
304
lag = container_of(delayed_work, struct nfp_fl_lag, work);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
325
schedule_delayed_work(&lag->work,
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
340
schedule_delayed_work(&lag->work,
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
397
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
495
schedule_delayed_work(&priv->nfp_lag.work, 0);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
507
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
535
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
616
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
661
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
698
INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
713
cancel_delayed_work_sync(&lag->work);
drivers/net/ethernet/netronome/nfp/flower/main.h
234
struct delayed_work work;
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
449
static void update_stats_cache(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
454
delayed_work = to_delayed_work(work);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
170
struct work_struct work;
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
629
static void nfp_tun_neigh_update(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
637
update_work = container_of(work, struct nfp_neigh_update_work, work);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
703
INIT_WORK(&update_work->work, nfp_tun_neigh_update);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
745
queue_work(system_highpri_wq, &update_work->work);
drivers/net/ethernet/netronome/nfp/nfp_net.h
731
struct work_struct work;
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1087
cancel_work_sync(&r_vec->rx_dim.work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1090
cancel_work_sync(&r_vec->tx_dim.work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1160
static void nfp_net_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1168
dim = container_of(work, struct dim, work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1190
static void nfp_net_tx_dim_work(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1198
dim = container_of(work, struct dim, work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1233
INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1238
INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1401
schedule_work(&nn->mbox_amsg.work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1406
static void nfp_net_mbox_amsg_work(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1408
struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2926
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2950
flush_work(&nn->mbox_amsg.work);
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
644
static void nfp_net_refresh_vnics(struct work_struct *work)
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
646
struct nfp_pf *pf = container_of(work, struct nfp_pf,
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
667
static void pch_gbe_reset_task(struct work_struct *work)
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
670
adapter = container_of(work, struct pch_gbe_adapter, reset_task);
drivers/net/ethernet/pensando/ionic/ionic_api.h
30
struct completion work;
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
417
cancel_work_sync(&ionic->lif->deferred.work);
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
452
cancel_work_sync(&lif->deferred.work);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
18
struct ionic_deferred_work *work;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
38
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
39
if (!work) {
drivers/net/ethernet/pensando/ionic/ionic_dev.c
44
work->type = IONIC_DW_TYPE_RX_MODE;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
46
ionic_lif_deferred_enqueue(lif, work);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
57
void ionic_doorbell_napi_work(struct work_struct *work)
drivers/net/ethernet/pensando/ionic/ionic_dev.c
578
struct ionic_deferred_work *work;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
580
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
581
if (work) {
drivers/net/ethernet/pensando/ionic/ionic_dev.c
582
work->type = IONIC_DW_TYPE_LIF_RESET;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
583
work->fw_status = fw_status_ready;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
584
ionic_lif_deferred_enqueue(lif, work);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
59
struct ionic_qcq *qcq = container_of(work, struct ionic_qcq,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
95
static void ionic_doorbell_check_dwork(struct work_struct *work)
drivers/net/ethernet/pensando/ionic/ionic_dev.c
97
struct ionic *ionic = container_of(work, struct ionic,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
98
doorbell_check_dwork.work);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
390
void ionic_doorbell_napi_work(struct work_struct *work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1121
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1136
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1171
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
133
struct ionic_deferred_work *work)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
136
list_add_tail(&work->list, &lif->deferred.list);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
138
queue_work(lif->ionic->wq, &lif->deferred.work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1398
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1421
struct ionic_deferred_work *work;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1430
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1431
if (!work) {
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1435
work->type = IONIC_DW_TYPE_RX_MODE;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1437
ionic_lif_deferred_enqueue(lif, work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1485
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1656
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1671
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1797
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1896
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1959
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
199
struct ionic_deferred_work *work;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
206
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
207
if (!work) {
drivers/net/ethernet/pensando/ionic/ionic_lif.c
212
work->type = IONIC_DW_TYPE_LINK_STATUS;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
213
ionic_lif_deferred_enqueue(lif, work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
298
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3295
INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
340
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3542
cancel_work_sync(&lif->deferred.work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
359
cancel_work_sync(&qcq->dim.work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3616
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3661
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3808
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
55
static void ionic_dim_work(struct work_struct *work)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
57
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
724
INIT_WORK(&new->dim.work, ionic_dim_work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
837
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
86
static void ionic_lif_deferred_work(struct work_struct *work)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
88
struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
902
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_lif.h
120
struct work_struct work;
drivers/net/ethernet/pensando/ionic/ionic_lif.h
342
struct ionic_deferred_work *work);
drivers/net/ethernet/pensando/ionic/ionic_main.c
254
struct ionic_deferred_work *work;
drivers/net/ethernet/pensando/ionic/ionic_main.c
286
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/net/ethernet/pensando/ionic/ionic_main.c
287
if (!work) {
drivers/net/ethernet/pensando/ionic/ionic_main.c
291
work->type = IONIC_DW_TYPE_LIF_RESET;
drivers/net/ethernet/pensando/ionic/ionic_main.c
292
ionic_lif_deferred_enqueue(lif, work);
drivers/net/ethernet/pensando/ionic/ionic_main.c
333
complete_all(&ctx->work);
drivers/net/ethernet/pensando/ionic/ionic_main.c
436
remaining = wait_for_completion_timeout(&ctx->work,
drivers/net/ethernet/pensando/ionic/ionic_phc.c
318
ctx->work = COMPLETION_INITIALIZER_ONSTACK(ctx->work);
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
267
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
308
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
40
ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
471
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
1762
void netxen_watchdog_task(struct work_struct *work);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2220
static void netxen_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2223
container_of(work, struct netxen_adapter, tx_timeout_task);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2489
netxen_attach_work(struct work_struct *work)
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2491
struct netxen_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2492
struct netxen_adapter, fw_work.work);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2519
netxen_fwinit_work(struct work_struct *work)
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2521
struct netxen_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2522
struct netxen_adapter, fw_work.work);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2584
netxen_detach_work(struct work_struct *work)
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2586
struct netxen_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2587
struct netxen_adapter, fw_work.work);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2702
netxen_fw_poll_work(struct work_struct *work)
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2704
struct netxen_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2705
struct netxen_adapter, fw_work.work);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
52
static void netxen_tx_timeout_task(struct work_struct *work);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
53
static void netxen_fw_poll_work(struct work_struct *work);
drivers/net/ethernet/qlogic/qed/qed_main.c
1170
static void qed_slowpath_task(struct work_struct *work)
drivers/net/ethernet/qlogic/qed/qed_main.c
1172
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
drivers/net/ethernet/qlogic/qed/qed_main.c
1173
slowpath_task.work);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
5205
static void qed_iov_pf_task(struct work_struct *work)
drivers/net/ethernet/qlogic/qed/qed_sriov.c
5208
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
5209
iov_task.work);
drivers/net/ethernet/qlogic/qed/qed_vf.c
1688
void qed_iov_vf_task(struct work_struct *work)
drivers/net/ethernet/qlogic/qed/qed_vf.c
1690
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
drivers/net/ethernet/qlogic/qed/qed_vf.c
1691
iov_task.work);
drivers/net/ethernet/qlogic/qed/qed_vf.h
1058
void qed_iov_vf_task(struct work_struct *work);
drivers/net/ethernet/qlogic/qed/qed_vf.h
1246
static inline void qed_iov_vf_task(struct work_struct *work)
drivers/net/ethernet/qlogic/qede/qede_main.c
1061
static void qede_periodic_task(struct work_struct *work)
drivers/net/ethernet/qlogic/qede/qede_main.c
1063
struct qede_dev *edev = container_of(work, struct qede_dev,
drivers/net/ethernet/qlogic/qede/qede_main.c
1064
periodic_task.work);
drivers/net/ethernet/qlogic/qede/qede_main.c
1078
static void qede_sp_task(struct work_struct *work)
drivers/net/ethernet/qlogic/qede/qede_main.c
1080
struct qede_dev *edev = container_of(work, struct qede_dev,
drivers/net/ethernet/qlogic/qede/qede_main.c
1081
sp_task.work);
drivers/net/ethernet/qlogic/qede/qede_ptp.c
137
static void qede_ptp_task(struct work_struct *work)
drivers/net/ethernet/qlogic/qede/qede_ptp.c
146
ptp = container_of(work, struct qede_ptp, work);
drivers/net/ethernet/qlogic/qede/qede_ptp.c
16
struct work_struct work;
drivers/net/ethernet/qlogic/qede/qede_ptp.c
165
schedule_work(&ptp->work);
drivers/net/ethernet/qlogic/qede/qede_ptp.c
395
cancel_work_sync(&ptp->work);
drivers/net/ethernet/qlogic/qede/qede_ptp.c
430
INIT_WORK(&ptp->work, qede_ptp_task);
drivers/net/ethernet/qlogic/qede/qede_ptp.c
533
schedule_work(&ptp->work);
drivers/net/ethernet/qlogic/qede/qede_rdma.c
254
if (!work_pending(&event_node->work)) {
drivers/net/ethernet/qlogic/qede/qede_rdma.c
274
static void qede_rdma_handle_event(struct work_struct *work)
drivers/net/ethernet/qlogic/qede/qede_rdma.c
280
event_node = container_of(work, struct qede_rdma_event_work, work);
drivers/net/ethernet/qlogic/qede/qede_rdma.c
330
INIT_WORK(&event_node->work, qede_rdma_handle_event);
drivers/net/ethernet/qlogic/qede/qede_rdma.c
331
queue_work(edev->rdma_info.rdma_wq, &event_node->work);
drivers/net/ethernet/qlogic/qede/qede_rdma.c
58
cancel_work_sync(&event_node->work);
drivers/net/ethernet/qlogic/qla3xxx.c
1516
static void ql_link_state_machine_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qla3xxx.c
1519
container_of(work, struct ql3_adapter, link_state_work.work);
drivers/net/ethernet/qlogic/qla3xxx.c
3607
static void ql_reset_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qla3xxx.c
3610
container_of(work, struct ql3_adapter, reset_work.work);
drivers/net/ethernet/qlogic/qla3xxx.c
3707
static void ql_tx_timeout_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qla3xxx.c
3710
container_of(work, struct ql3_adapter, tx_timeout_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
1089
struct work_struct work;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
1033
static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
1037
adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4001
cancel_work_sync(&mbx->work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4023
queue_work(mbx->work_q, &mbx->work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4097
static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4099
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4100
work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
4186
INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
933
void qlcnic_83xx_idc_aen_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
939
adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
1105
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
1110
adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
2530
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
523
static void qlcnic_dcb_aen_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
527
dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
217
void qlcnic_fw_poll_work(struct work_struct *work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3343
qlcnic_fwinit_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3345
struct qlcnic_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3346
struct qlcnic_adapter, fw_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3447
qlcnic_detach_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3449
struct qlcnic_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3450
struct qlcnic_adapter, fw_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3594
qlcnic_attach_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3596
struct qlcnic_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3597
struct qlcnic_adapter, fw_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3717
void qlcnic_fw_poll_work(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3719
struct qlcnic_adapter *adapter = container_of(work,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
3720
struct qlcnic_adapter, fw_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
450
if (!adapter->fw_work.work.func)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
58
static void qlcnic_attach_work(struct work_struct *work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
59
static void qlcnic_fwinit_work(struct work_struct *work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1050
static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1052
struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1602
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1610
bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1905
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
1911
adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
31
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
1686
static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
1690
vf = container_of(work, struct qlcnic_vf_info, flr_work);
drivers/net/ethernet/qualcomm/emac/emac.c
387
static void emac_work_thread(struct work_struct *work)
drivers/net/ethernet/qualcomm/emac/emac.c
390
container_of(work, struct emac_adapter, work_thread);
drivers/net/ethernet/qualcomm/qca_uart.c
112
static void qcauart_transmit(struct work_struct *work)
drivers/net/ethernet/qualcomm/qca_uart.c
114
struct qcauart *qca = container_of(work, struct qcauart, tx_work);
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
549
static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
554
port = container_of(work, struct rmnet_port, agg_wq);
drivers/net/ethernet/realtek/8139cp.c
973
unsigned work = 1000;
drivers/net/ethernet/realtek/8139cp.c
977
while (work--) {
drivers/net/ethernet/realtek/8139too.c
1593
static void rtl8139_thread (struct work_struct *work)
drivers/net/ethernet/realtek/8139too.c
1596
container_of(work, struct rtl8139_private, thread.work);
drivers/net/ethernet/realtek/8139too.c
1607
rtl8139_tx_timeout_task(work);
drivers/net/ethernet/realtek/8139too.c
1639
static void rtl8139_tx_timeout_task (struct work_struct *work)
drivers/net/ethernet/realtek/8139too.c
1642
container_of(work, struct rtl8139_private, thread.work);
drivers/net/ethernet/realtek/8139too.c
657
static void rtl8139_thread (struct work_struct *work);
drivers/net/ethernet/realtek/8139too.c
658
static void rtl8139_tx_timeout_task(struct work_struct *work);
drivers/net/ethernet/realtek/r8169_main.c
2530
if (!schedule_work(&tp->wk.work))
drivers/net/ethernet/realtek/r8169_main.c
4876
static void rtl_task(struct work_struct *work)
drivers/net/ethernet/realtek/r8169_main.c
4879
container_of(work, struct rtl8169_private, wk.work);
drivers/net/ethernet/realtek/r8169_main.c
4999
disable_work_sync(&tp->wk.work);
drivers/net/ethernet/realtek/r8169_main.c
5032
enable_work(&tp->wk.work);
drivers/net/ethernet/realtek/r8169_main.c
5278
disable_work_sync(&tp->wk.work);
drivers/net/ethernet/realtek/r8169_main.c
5705
INIT_WORK(&tp->wk.work, rtl_task);
drivers/net/ethernet/realtek/r8169_main.c
5706
disable_work(&tp->wk.work);
drivers/net/ethernet/realtek/r8169_main.c
752
struct work_struct work;
drivers/net/ethernet/renesas/ravb.h
1118
struct work_struct work;
drivers/net/ethernet/renesas/ravb_main.c
2008
schedule_work(&priv->work);
drivers/net/ethernet/renesas/ravb_main.c
2011
static void ravb_tx_timeout_work(struct work_struct *work)
drivers/net/ethernet/renesas/ravb_main.c
2013
struct ravb_private *priv = container_of(work, struct ravb_private,
drivers/net/ethernet/renesas/ravb_main.c
2014
work);
drivers/net/ethernet/renesas/ravb_main.c
2021
schedule_work(&priv->work);
drivers/net/ethernet/renesas/ravb_main.c
2397
cancel_work_sync(&priv->work);
drivers/net/ethernet/renesas/ravb_main.c
2993
INIT_WORK(&priv->work, ravb_tx_timeout_work);
drivers/net/ethernet/rocker/rocker_main.c
2104
struct work_struct work;
drivers/net/ethernet/rocker/rocker_main.c
2113
static void rocker_router_fib_event_work(struct work_struct *work)
drivers/net/ethernet/rocker/rocker_main.c
2116
container_of(work, struct rocker_fib_event_work, work);
drivers/net/ethernet/rocker/rocker_main.c
2161
INIT_WORK(&fib_work->work, rocker_router_fib_event_work);
drivers/net/ethernet/rocker/rocker_main.c
2196
queue_work(rocker->rocker_owq, &fib_work->work);
drivers/net/ethernet/rocker/rocker_main.c
2692
struct work_struct work;
drivers/net/ethernet/rocker/rocker_main.c
2711
static void rocker_switchdev_event_work(struct work_struct *work)
drivers/net/ethernet/rocker/rocker_main.c
2714
container_of(work, struct rocker_switchdev_event_work, work);
drivers/net/ethernet/rocker/rocker_main.c
2768
INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work);
drivers/net/ethernet/rocker/rocker_main.c
2794
&switchdev_work->work);
drivers/net/ethernet/rocker/rocker_ofdpa.c
1812
struct work_struct work;
drivers/net/ethernet/rocker/rocker_ofdpa.c
1819
static void ofdpa_port_fdb_learn_work(struct work_struct *work)
drivers/net/ethernet/rocker/rocker_ofdpa.c
1822
container_of(work, struct ofdpa_fdb_learn_work, work);
drivers/net/ethernet/rocker/rocker_ofdpa.c
1837
kfree(work);
drivers/net/ethernet/rocker/rocker_ofdpa.c
1873
INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
drivers/net/ethernet/rocker/rocker_ofdpa.c
1880
schedule_work(&lw->work);
drivers/net/ethernet/sfc/efx_common.c
324
monitor_work.work);
drivers/net/ethernet/sfc/falcon/efx.c
1952
monitor_work.work);
drivers/net/ethernet/sfc/falcon/selftest.c
800
selftest_work.work);
drivers/net/ethernet/sfc/mae.c
203
void efx_mae_counters_grant_credits(struct work_struct *work)
drivers/net/ethernet/sfc/mae.c
206
struct efx_rx_queue *rx_queue = container_of(work, struct efx_rx_queue,
drivers/net/ethernet/sfc/mae.h
66
void efx_mae_counters_grant_credits(struct work_struct *work);
drivers/net/ethernet/sfc/net_driver.h
842
struct work_struct work;
drivers/net/ethernet/sfc/ptp.c
1485
static void efx_ptp_pps_worker(struct work_struct *work)
drivers/net/ethernet/sfc/ptp.c
1488
container_of(work, struct efx_ptp_data, pps_work);
drivers/net/ethernet/sfc/ptp.c
1500
static void efx_ptp_worker(struct work_struct *work)
drivers/net/ethernet/sfc/ptp.c
1503
container_of(work, struct efx_ptp_data, work);
drivers/net/ethernet/sfc/ptp.c
1524
static void efx_ptp_cleanup_worker(struct work_struct *work)
drivers/net/ethernet/sfc/ptp.c
1527
container_of(work, struct efx_ptp_data, cleanup_work.work);
drivers/net/ethernet/sfc/ptp.c
1596
INIT_WORK(&ptp->work, efx_ptp_worker);
drivers/net/ethernet/sfc/ptp.c
1682
cancel_work_sync(&efx->ptp_data->work);
drivers/net/ethernet/sfc/ptp.c
1780
queue_work(ptp->workwq, &ptp->work);
drivers/net/ethernet/sfc/ptp.c
1798
queue_work(ptp->workwq, &ptp->work);
drivers/net/ethernet/sfc/ptp.c
1922
queue_work(ptp->workwq, &ptp->work);
drivers/net/ethernet/sfc/ptp.c
299
struct work_struct work;
drivers/net/ethernet/sfc/rx_common.c
815
work);
drivers/net/ethernet/sfc/rx_common.c
989
INIT_WORK(&req->work, efx_filter_rfs_work);
drivers/net/ethernet/sfc/rx_common.c
992
schedule_work(&req->work);
drivers/net/ethernet/sfc/selftest.c
793
selftest_work.work);
drivers/net/ethernet/sfc/siena/efx_common.c
328
monitor_work.work);
drivers/net/ethernet/sfc/siena/net_driver.h
764
struct work_struct work;
drivers/net/ethernet/sfc/siena/ptp.c
1382
static void efx_ptp_pps_worker(struct work_struct *work)
drivers/net/ethernet/sfc/siena/ptp.c
1385
container_of(work, struct efx_ptp_data, pps_work);
drivers/net/ethernet/sfc/siena/ptp.c
1397
static void efx_ptp_worker(struct work_struct *work)
drivers/net/ethernet/sfc/siena/ptp.c
1400
container_of(work, struct efx_ptp_data, work);
drivers/net/ethernet/sfc/siena/ptp.c
1475
INIT_WORK(&ptp->work, efx_ptp_worker);
drivers/net/ethernet/sfc/siena/ptp.c
1564
cancel_work_sync(&efx->ptp_data->work);
drivers/net/ethernet/sfc/siena/ptp.c
1694
queue_work(ptp->workwq, &ptp->work);
drivers/net/ethernet/sfc/siena/ptp.c
1712
queue_work(ptp->workwq, &ptp->work);
drivers/net/ethernet/sfc/siena/ptp.c
1833
queue_work(ptp->workwq, &ptp->work);
drivers/net/ethernet/sfc/siena/ptp.c
1872
queue_work(ptp->workwq, &ptp->work);
drivers/net/ethernet/sfc/siena/ptp.c
295
struct work_struct work;
drivers/net/ethernet/sfc/siena/rx_common.c
805
work);
drivers/net/ethernet/sfc/siena/rx_common.c
980
INIT_WORK(&req->work, efx_filter_rfs_work);
drivers/net/ethernet/sfc/siena/rx_common.c
983
schedule_work(&req->work);
drivers/net/ethernet/sfc/siena/selftest.c
798
selftest_work.work);
drivers/net/ethernet/sfc/siena/siena_sriov.c
1009
static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
drivers/net/ethernet/sfc/siena/siena_sriov.c
1011
struct siena_vf *vf = container_of(work, struct siena_vf, req);
drivers/net/ethernet/sfc/siena/siena_sriov.c
872
static void efx_siena_sriov_vfdi(struct work_struct *work)
drivers/net/ethernet/sfc/siena/siena_sriov.c
874
struct siena_vf *vf = container_of(work, struct siena_vf, req);
drivers/net/ethernet/sfc/tc_counters.c
143
INIT_WORK(&cnt->work, efx_tc_counter_work);
drivers/net/ethernet/sfc/tc_counters.c
192
flush_work(&cnt->work);
drivers/net/ethernet/sfc/tc_counters.c
364
schedule_work(&cnt->work);
drivers/net/ethernet/sfc/tc_counters.c
42
flush_work(&cnt->work);
drivers/net/ethernet/sfc/tc_counters.c
87
static void efx_tc_counter_work(struct work_struct *work)
drivers/net/ethernet/sfc/tc_counters.c
89
struct efx_tc_counter *cnt = container_of(work, struct efx_tc_counter, work);
drivers/net/ethernet/sfc/tc_counters.h
35
struct work_struct work; /* For notifying encap actions */
drivers/net/ethernet/sfc/tc_encap_actions.c
202
INIT_WORK(&neigh->work, efx_neigh_update);
drivers/net/ethernet/sfc/tc_encap_actions.c
506
static void efx_neigh_update(struct work_struct *work)
drivers/net/ethernet/sfc/tc_encap_actions.c
508
struct efx_neigh_binder *neigh = container_of(work, struct efx_neigh_binder, work);
drivers/net/ethernet/sfc/tc_encap_actions.c
576
if (!schedule_work(&neigh->work))
drivers/net/ethernet/sfc/tc_encap_actions.c
84
static void efx_neigh_update(struct work_struct *work);
drivers/net/ethernet/sfc/tc_encap_actions.h
63
struct work_struct work;
drivers/net/ethernet/sis/sis190.c
916
static void sis190_phy_task(struct work_struct *work)
drivers/net/ethernet/sis/sis190.c
919
container_of(work, struct sis190_private, phy_task);
drivers/net/ethernet/smsc/smc91x.c
1024
static void smc_phy_configure(struct work_struct *work)
drivers/net/ethernet/smsc/smc91x.c
1027
container_of(work, struct smc_local, phy_configure);
drivers/net/ethernet/spacemit/k1_emac.c
1397
static void emac_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/spacemit/k1_emac.c
1402
priv = container_of(work, struct emac_priv, tx_timeout_task);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7320
static void stmmac_service_task(struct work_struct *work)
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7322
struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
drivers/net/ethernet/sun/cassini.c
3945
static void cas_reset_task(struct work_struct *work)
drivers/net/ethernet/sun/cassini.c
3947
struct cas *cp = container_of(work, struct cas, reset_task);
drivers/net/ethernet/sun/niu.c
6497
static void niu_reset_task(struct work_struct *work)
drivers/net/ethernet/sun/niu.c
6499
struct niu *np = container_of(work, struct niu, reset_task);
drivers/net/ethernet/sun/sungem.c
2213
static void gem_reset_task(struct work_struct *work)
drivers/net/ethernet/sun/sungem.c
2215
struct gem *gp = container_of(work, struct gem, reset_task);
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
627
static void xlgmac_restart(struct work_struct *work)
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
629
struct xlgmac_pdata *pdata = container_of(work,
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
19
struct work_struct work;
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
366
static void am65_cpsw_switchdev_event_work(struct work_struct *work)
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
369
container_of(work, struct am65_cpsw_switchdev_event_work, work);
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
442
INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
463
queue_work(system_long_wq, &switchdev_work->work);
drivers/net/ethernet/ti/cpsw.c
308
static void cpsw_ndo_set_rx_mode_work(struct work_struct *work)
drivers/net/ethernet/ti/cpsw.c
310
struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work);
drivers/net/ethernet/ti/cpsw_new.c
251
static void cpsw_ndo_set_rx_mode_work(struct work_struct *work)
drivers/net/ethernet/ti/cpsw_new.c
253
struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work);
drivers/net/ethernet/ti/cpsw_switchdev.c
21
struct work_struct work;
drivers/net/ethernet/ti/cpsw_switchdev.c
376
static void cpsw_switchdev_event_work(struct work_struct *work)
drivers/net/ethernet/ti/cpsw_switchdev.c
379
container_of(work, struct cpsw_switchdev_event_work, work);
drivers/net/ethernet/ti/cpsw_switchdev.c
452
INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work);
drivers/net/ethernet/ti/cpsw_switchdev.c
473
queue_work(system_long_wq, &switchdev_work->work);
drivers/net/ethernet/ti/icssg/icss_iep.c
527
static void icss_iep_cap_cmp_work(struct work_struct *work)
drivers/net/ethernet/ti/icssg/icss_iep.c
529
struct icss_iep *iep = container_of(work, struct icss_iep, work);
drivers/net/ethernet/ti/icssg/icss_iep.c
571
schedule_work(&iep->work);
drivers/net/ethernet/ti/icssg/icss_iep.c
610
cancel_work_sync(&iep->work);
drivers/net/ethernet/ti/icssg/icss_iep.c
868
INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
drivers/net/ethernet/ti/icssg/icss_iep.h
84
struct work_struct work;
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1056
static void emac_ndo_set_rx_mode_work(struct work_struct *work)
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1058
struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
983
queue_work(system_long_wq, &emac->stats_work.work);
drivers/net/ethernet/ti/icssg/icssg_prueth.h
457
void icssg_stats_work_handler(struct work_struct *work);
drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
605
queue_work(system_long_wq, &emac->stats_work.work);
drivers/net/ethernet/ti/icssg/icssg_stats.c
66
void icssg_stats_work_handler(struct work_struct *work)
drivers/net/ethernet/ti/icssg/icssg_stats.c
68
struct prueth_emac *emac = container_of(work, struct prueth_emac,
drivers/net/ethernet/ti/icssg/icssg_stats.c
69
stats_work.work);
drivers/net/ethernet/ti/icssg/icssg_switchdev.c
125
static void prueth_switchdev_event_work(struct work_struct *work)
drivers/net/ethernet/ti/icssg/icssg_switchdev.c
128
container_of(work, struct prueth_switchdev_event_work, work);
drivers/net/ethernet/ti/icssg/icssg_switchdev.c
20
struct work_struct work;
drivers/net/ethernet/ti/icssg/icssg_switchdev.c
200
INIT_WORK(&switchdev_work->work, prueth_switchdev_event_work);
drivers/net/ethernet/ti/icssg/icssg_switchdev.c
221
queue_work(system_long_wq, &switchdev_work->work);
drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
26
struct work_struct work;
drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
786
static void icssm_prueth_sw_fdb_work(struct work_struct *work)
drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
789
container_of(work, struct icssm_prueth_sw_fdb_work, work);
drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
823
INIT_WORK(&fdb_work->work, icssm_prueth_sw_fdb_work);
drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
830
queue_work(system_long_wq, &fdb_work->work);
drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
842
INIT_WORK(&fdb_work->work, icssm_prueth_sw_fdb_work);
drivers/net/ethernet/ti/icssm/icssm_prueth_switch.c
848
queue_work(system_long_wq, &fdb_work->work);
drivers/net/ethernet/ti/icssm/icssm_switchdev.c
174
INIT_WORK(&switchdev_work->work, icssm_sw_event_work);
drivers/net/ethernet/ti/icssm/icssm_switchdev.c
195
queue_work(system_long_wq, &switchdev_work->work);
drivers/net/ethernet/ti/icssm/icssm_switchdev.c
21
struct work_struct work;
drivers/net/ethernet/ti/icssm/icssm_switchdev.c
96
static void icssm_sw_event_work(struct work_struct *work)
drivers/net/ethernet/ti/icssm/icssm_switchdev.c
99
container_of(work, struct icssm_sw_event_work, work);
drivers/net/ethernet/ti/tlan.c
1004
static void tlan_tx_timeout_work(struct work_struct *work)
drivers/net/ethernet/ti/tlan.c
1007
container_of(work, struct tlan_priv, tlan_tqueue);
drivers/net/ethernet/ti/tlan.c
165
static void tlan_tx_timeout_work(struct work_struct *work);
drivers/net/ethernet/toshiba/ps3_gelic_net.c
1419
static void gelic_net_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/toshiba/ps3_gelic_net.c
1422
container_of(work, struct gelic_card, tx_timeout_task);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
142
static void gelic_eurus_sync_cmd_worker(struct work_struct *work)
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
151
cmd = container_of(work, struct gelic_eurus_cmd, work);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
2118
static void gelic_wl_event_worker(struct work_struct *work)
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
2126
wl = container_of(work, struct gelic_wl_info, event_work.work);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
2161
static void gelic_wl_assoc_worker(struct work_struct *work)
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
2171
wl = container_of(work, struct gelic_wl_info, assoc_work.work);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
221
INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
223
queue_work(wl->eurus_cmd_queue, &cmd->work);
drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
294
struct work_struct work;
drivers/net/ethernet/toshiba/tc35815.c
1171
static void tc35815_restart_work(struct work_struct *work)
drivers/net/ethernet/toshiba/tc35815.c
1174
container_of(work, struct tc35815_local, restart_work);
drivers/net/ethernet/toshiba/tc35815.c
500
static void tc35815_restart_work(struct work_struct *work);
drivers/net/ethernet/vertexcom/mse102x.c
436
static void mse102x_tx_work(struct work_struct *work)
drivers/net/ethernet/vertexcom/mse102x.c
445
mses = container_of(work, struct mse102x_net_spi, tx_work);
drivers/net/ethernet/via/via-rhine.c
1720
static void rhine_reset_task(struct work_struct *work)
drivers/net/ethernet/via/via-rhine.c
1722
struct rhine_private *rp = container_of(work, struct rhine_private,
drivers/net/ethernet/via/via-rhine.c
2186
static void rhine_slow_event_task(struct work_struct *work)
drivers/net/ethernet/via/via-rhine.c
2189
container_of(work, struct rhine_private, slow_event_task);
drivers/net/ethernet/via/via-rhine.c
501
static void rhine_reset_task(struct work_struct *work);
drivers/net/ethernet/via/via-rhine.c
502
static void rhine_slow_event_task(struct work_struct *work);
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1707
static void wx_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1709
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1725
static void wx_tx_dim_work(struct work_struct *work)
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1727
struct dim *dim = container_of(work, struct dim, work);
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1751
INIT_WORK(&q_vector->rx.dim.work, wx_rx_dim_work);
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1752
INIT_WORK(&q_vector->tx.dim.work, wx_tx_dim_work);
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1768
disable_work_sync(&q_vector->rx.dim.work);
drivers/net/ethernet/wangxun/libwx/wx_lib.c
1769
disable_work_sync(&q_vector->tx.dim.work);
drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
399
static void wxvf_service_task(struct work_struct *work)
drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
401
struct wx *wx = container_of(work, struct wx, service_task);
drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
127
static void txgbe_service_task(struct work_struct *work)
drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
129
struct wx *wx = container_of(work, struct wx, service_task);
drivers/net/ethernet/wiznet/w5100.c
780
static void w5100_restart_work(struct work_struct *work)
drivers/net/ethernet/wiznet/w5100.c
782
struct w5100_priv *priv = container_of(work, struct w5100_priv,
drivers/net/ethernet/wiznet/w5100.c
813
static void w5100_tx_work(struct work_struct *work)
drivers/net/ethernet/wiznet/w5100.c
815
struct w5100_priv *priv = container_of(work, struct w5100_priv,
drivers/net/ethernet/wiznet/w5100.c
879
static void w5100_rx_work(struct work_struct *work)
drivers/net/ethernet/wiznet/w5100.c
881
struct w5100_priv *priv = container_of(work, struct w5100_priv,
drivers/net/ethernet/wiznet/w5100.c
958
static void w5100_setrx_work(struct work_struct *work)
drivers/net/ethernet/wiznet/w5100.c
960
struct w5100_priv *priv = container_of(work, struct w5100_priv,
drivers/net/ethernet/xilinx/ll_temac_main.c
1106
static void ll_temac_restart_work_func(struct work_struct *work)
drivers/net/ethernet/xilinx/ll_temac_main.c
1108
struct temac_local *lp = container_of(work, struct temac_local,
drivers/net/ethernet/xilinx/ll_temac_main.c
1109
restart_work.work);
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1455
static void axienet_dma_err_handler(struct work_struct *work);
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1709
cancel_work_sync(&lp->rx_dim.work);
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1739
cancel_work_sync(&lp->rx_dim.work);
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2128
static void axienet_rx_dim_work(struct work_struct *work)
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2131
container_of(work, struct axienet_local, rx_dim.work);
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2260
flush_work(&lp->rx_dim.work);
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2679
static void axienet_dma_err_handler(struct work_struct *work)
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2684
struct axienet_local *lp = container_of(work, struct axienet_local,
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
3044
INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
579
static void axienet_refresh_stats(struct work_struct *work)
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
581
struct axienet_local *lp = container_of(work, struct axienet_local,
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
582
stats_work.work);
drivers/net/ethernet/xircom/xirc2ps_cs.c
1193
xirc2ps_tx_timeout_task(struct work_struct *work)
drivers/net/ethernet/xircom/xirc2ps_cs.c
1196
container_of(work, struct local_info, tx_timeout_task);
drivers/net/ethernet/xircom/xirc2ps_cs.c
292
static void xirc2ps_tx_timeout_task(struct work_struct *work);
drivers/net/fjes/fjes_hw.c
1176
static void fjes_hw_epstop_task(struct work_struct *work)
drivers/net/fjes/fjes_hw.c
1178
struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
drivers/net/fjes/fjes_hw.c
986
static void fjes_hw_update_zone_task(struct work_struct *work)
drivers/net/fjes/fjes_hw.c
988
struct fjes_hw *hw = container_of(work,
drivers/net/fjes/fjes_main.c
1060
static void fjes_force_close_task(struct work_struct *work)
drivers/net/fjes/fjes_main.c
1062
struct fjes_adapter *adapter = container_of(work,
drivers/net/fjes/fjes_main.c
1071
static void fjes_tx_stall_task(struct work_struct *work)
drivers/net/fjes/fjes_main.c
1073
struct fjes_adapter *adapter = container_of(work,
drivers/net/fjes/fjes_main.c
1127
static void fjes_raise_intr_rxdata_task(struct work_struct *work)
drivers/net/fjes/fjes_main.c
1129
struct fjes_adapter *adapter = container_of(work,
drivers/net/fjes/fjes_main.c
1177
static void fjes_watch_unshare_task(struct work_struct *work)
drivers/net/fjes/fjes_main.c
1180
container_of(work, struct fjes_adapter, unshare_watch_task);
drivers/net/fjes/fjes_main.c
1311
static void fjes_irq_watch_task(struct work_struct *work)
drivers/net/fjes/fjes_main.c
1313
struct fjes_adapter *adapter = container_of(to_delayed_work(work),
drivers/net/hamradio/baycom_epp.c
631
static void epp_bh(struct work_struct *work)
drivers/net/hamradio/baycom_epp.c
641
bc = container_of(work, struct baycom_state, run_work.work);
drivers/net/hyperv/netvsc_drv.c
2004
container_of(w, struct net_device_context, dwork.work);
drivers/net/hyperv/netvsc_drv.c
2220
= container_of(w, struct net_device_context, vf_takeover.work);
drivers/net/hyperv/netvsc_drv.c
2804
container_of(w, struct net_device_context, vfns_work.work);
drivers/net/ieee802154/adf7242.c
1263
INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
drivers/net/ieee802154/adf7242.c
1316
cancel_delayed_work_sync(&lp->work);
drivers/net/ieee802154/adf7242.c
277
struct delayed_work work;
drivers/net/ieee802154/adf7242.c
579
mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
drivers/net/ieee802154/adf7242.c
584
static void adf7242_rx_cal_work(struct work_struct *work)
drivers/net/ieee802154/adf7242.c
587
container_of(work, struct adf7242_local, work.work);
drivers/net/ieee802154/adf7242.c
714
cancel_delayed_work_sync(&lp->work);
drivers/net/ieee802154/adf7242.c
838
cancel_delayed_work_sync(&lp->work);
drivers/net/ieee802154/adf7242.c
979
mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
drivers/net/ieee802154/atusb.c
1070
cancel_delayed_work_sync(&atusb->work);
drivers/net/ieee802154/atusb.c
167
static void atusb_work_urbs(struct work_struct *work)
drivers/net/ieee802154/atusb.c
170
container_of(to_delayed_work(work), struct atusb, work);
drivers/net/ieee802154/atusb.c
188
schedule_delayed_work(&atusb->work,
drivers/net/ieee802154/atusb.c
295
schedule_delayed_work(&atusb->work, 0);
drivers/net/ieee802154/atusb.c
423
schedule_delayed_work(&atusb->work, 0);
drivers/net/ieee802154/atusb.c
53
struct delayed_work work; /* memory allocations */
drivers/net/ieee802154/atusb.c
969
INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs);
drivers/net/ieee802154/ca8210.c
342
struct work_struct work;
drivers/net/ieee802154/ca8210.c
659
static void ca8210_mlme_reset_worker(struct work_struct *work)
drivers/net/ieee802154/ca8210.c
662
work,
drivers/net/ieee802154/ca8210.c
664
work
drivers/net/ieee802154/ca8210.c
727
&mlme_reset_wpc->work,
drivers/net/ieee802154/ca8210.c
731
queue_work(priv->mlme_workqueue, &mlme_reset_wpc->work);
drivers/net/ieee802154/cc2520.c
870
static void cc2520_fifop_irqwork(struct work_struct *work)
drivers/net/ieee802154/cc2520.c
873
= container_of(work, struct cc2520_private, fifop_irqwork);
drivers/net/ipa/ipa_endpoint.c
1394
static void ipa_endpoint_replenish_work(struct work_struct *work)
drivers/net/ipa/ipa_endpoint.c
1396
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/ipa/ipa_modem.c
271
static void ipa_modem_wake_queue_work(struct work_struct *work)
drivers/net/ipa/ipa_modem.c
273
struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
drivers/net/ipa/ipa_modem.c
295
(void)queue_pm_work(&priv->work);
drivers/net/ipa/ipa_modem.c
325
INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
drivers/net/ipa/ipa_modem.c
372
cancel_work_sync(&priv->work);
drivers/net/ipa/ipa_modem.c
48
struct work_struct work;
drivers/net/ipa/ipa_qmi.c
401
static void ipa_client_init_driver_work(struct work_struct *work)
drivers/net/ipa/ipa_qmi.c
412
ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
drivers/net/ipvlan/ipvlan.h
157
void ipvlan_process_multicast(struct work_struct *work);
drivers/net/ipvlan/ipvlan_core.c
238
void ipvlan_process_multicast(struct work_struct *work)
drivers/net/ipvlan/ipvlan_core.c
240
struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
drivers/net/mctp/mctp-serial.c
108
static void mctp_serial_tx_work(struct work_struct *work)
drivers/net/mctp/mctp-serial.c
110
struct mctp_serial *dev = container_of(work, struct mctp_serial,
drivers/net/mctp/mctp-usb.c
246
static void mctp_usb_rx_retry_work(struct work_struct *work)
drivers/net/mctp/mctp-usb.c
248
struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb,
drivers/net/mctp/mctp-usb.c
249
rx_retry_work.work);
drivers/net/mhi_net.c
268
static void mhi_net_rx_refill_work(struct work_struct *work)
drivers/net/mhi_net.c
270
struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
drivers/net/mhi_net.c
271
rx_refill.work);
drivers/net/netconsole.c
306
static void process_resume_target(struct work_struct *work)
drivers/net/netconsole.c
311
nt = container_of(work, struct netconsole_target, resume_wq);
drivers/net/netdevsim/dev.c
899
static void nsim_dev_trap_report_work(struct work_struct *work)
drivers/net/netdevsim/dev.c
905
nsim_trap_data = container_of(work, struct nsim_trap_data,
drivers/net/netdevsim/dev.c
906
trap_report_dw.work);
drivers/net/netdevsim/fib.c
1478
static void nsim_fib_event_work(struct work_struct *work)
drivers/net/netdevsim/fib.c
1480
struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
drivers/net/netdevsim/fib.c
1501
static void nsim_fib_flush_work(struct work_struct *work)
drivers/net/netdevsim/fib.c
1503
struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
drivers/net/netdevsim/hwstats.c
42
static void nsim_dev_hwstats_traffic_work(struct work_struct *work)
drivers/net/netdevsim/hwstats.c
46
hwstats = container_of(work, struct nsim_dev_hwstats, traffic_dw.work);
drivers/net/netdevsim/psample.c
111
static void nsim_dev_psample_report_work(struct work_struct *work)
drivers/net/netdevsim/psample.c
118
psample = container_of(work, struct nsim_dev_psample, psample_dw.work);
drivers/net/ovpn/peer.c
1336
void ovpn_peer_keepalive_work(struct work_struct *work)
drivers/net/ovpn/peer.c
1338
struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv,
drivers/net/ovpn/peer.c
1339
keepalive_work.work);
drivers/net/ovpn/peer.c
74
static void ovpn_peer_keepalive_send(struct work_struct *work)
drivers/net/ovpn/peer.c
76
struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
drivers/net/ovpn/peer.h
156
void ovpn_peer_keepalive_work(struct work_struct *work);
drivers/net/ovpn/socket.h
41
struct work_struct work;
drivers/net/ovpn/tcp.c
315
void ovpn_tcp_tx_work(struct work_struct *work)
drivers/net/ovpn/tcp.c
319
sock = container_of(work, struct ovpn_socket, tcp_tx_work);
drivers/net/ovpn/tcp.c
499
static void ovpn_tcp_peer_del_work(struct work_struct *work)
drivers/net/ovpn/tcp.c
501
struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
drivers/net/ovpn/tcp.h
35
void ovpn_tcp_tx_work(struct work_struct *work);
drivers/net/phy/bcm-phy-ptp.c
539
container_of(pin_work, struct bcm_ptp_private, pin_work.work);
drivers/net/phy/bcm-phy-ptp.c
632
container_of(pin_work, struct bcm_ptp_private, pin_work.work);
drivers/net/phy/dp83640.c
1276
static void rx_timestamp_work(struct work_struct *work)
drivers/net/phy/dp83640.c
1279
container_of(work, struct dp83640_private, ts_work.work);
drivers/net/phy/dp83640.c
213
static void rx_timestamp_work(struct work_struct *work);
drivers/net/phy/nxp-tja11xx.c
551
static void tja1102_p1_register(struct work_struct *work)
drivers/net/phy/nxp-tja11xx.c
553
struct tja11xx_priv *priv = container_of(work, struct tja11xx_priv,
drivers/net/phy/phy.c
1621
void phy_state_machine(struct work_struct *work)
drivers/net/phy/phy.c
1623
struct delayed_work *dwork = to_delayed_work(work);
drivers/net/phy/sfp.c
1688
static void sfp_hwmon_probe(struct work_struct *work)
drivers/net/phy/sfp.c
1690
struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
drivers/net/phy/sfp.c
2973
static void sfp_timeout(struct work_struct *work)
drivers/net/phy/sfp.c
2975
struct sfp *sfp = container_of(work, struct sfp, timeout.work);
drivers/net/phy/sfp.c
3026
static void sfp_poll(struct work_struct *work)
drivers/net/phy/sfp.c
3028
struct sfp *sfp = container_of(work, struct sfp, poll.work);
drivers/net/plip/plip.c
138
static void plip_kick_bh(struct work_struct *work);
drivers/net/plip/plip.c
139
static void plip_bh(struct work_struct *work);
drivers/net/plip/plip.c
140
static void plip_timer_bh(struct work_struct *work);
drivers/net/plip/plip.c
322
plip_kick_bh(struct work_struct *work)
drivers/net/plip/plip.c
325
container_of(work, struct net_local, deferred.work);
drivers/net/plip/plip.c
366
plip_bh(struct work_struct *work)
drivers/net/plip/plip.c
368
struct net_local *nl = container_of(work, struct net_local, immediate);
drivers/net/plip/plip.c
384
plip_timer_bh(struct work_struct *work)
drivers/net/plip/plip.c
387
container_of(work, struct net_local, timer.work);
drivers/net/ppp/pppoe.c
455
static void pppoe_unbind_sock_work(struct work_struct *work)
drivers/net/ppp/pppoe.c
457
struct pppox_sock *po = container_of(work, struct pppox_sock,
drivers/net/pse-pd/pse_core.c
1012
static void pse_send_ntf_worker(struct work_struct *work)
drivers/net/pse-pd/pse_core.c
1017
pcdev = container_of(work, struct pse_controller_dev, ntf_work);
drivers/net/slip/slip.c
422
static void slip_transmit(struct work_struct *work)
drivers/net/slip/slip.c
424
struct slip *sl = container_of(work, struct slip, tx_work);
drivers/net/team/team_core.c
629
static void team_notify_peers_work(struct work_struct *work)
drivers/net/team/team_core.c
634
team = container_of(work, struct team, notify_peers.dw.work);
drivers/net/team/team_core.c
675
static void team_mcast_rejoin_work(struct work_struct *work)
drivers/net/team/team_core.c
680
team = container_of(work, struct team, mcast_rejoin.dw.work);
drivers/net/team/team_mode_loadbalance.c
459
static void lb_stats_refresh(struct work_struct *work)
drivers/net/team/team_mode_loadbalance.c
472
lb_priv_ex = container_of(work, struct lb_priv_ex,
drivers/net/team/team_mode_loadbalance.c
473
stats.refresh_dw.work);
drivers/net/thunderbolt/main.c
623
static void tbnet_connected_work(struct work_struct *work)
drivers/net/thunderbolt/main.c
625
struct tbnet *net = container_of(work, typeof(*net), connected_work);
drivers/net/thunderbolt/main.c
690
static void tbnet_login_work(struct work_struct *work)
drivers/net/thunderbolt/main.c
692
struct tbnet *net = container_of(work, typeof(*net), login_work.work);
drivers/net/thunderbolt/main.c
725
static void tbnet_disconnect_work(struct work_struct *work)
drivers/net/thunderbolt/main.c
727
struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
drivers/net/usb/cx82310_eth.c
130
static void cx82310_reenable_work(struct work_struct *work)
drivers/net/usb/cx82310_eth.c
132
struct cx82310_priv *priv = container_of(work, struct cx82310_priv,
drivers/net/usb/ipheth.c
397
static void ipheth_carrier_check_work(struct work_struct *work)
drivers/net/usb/ipheth.c
399
struct ipheth_device *dev = container_of(work, struct ipheth_device,
drivers/net/usb/ipheth.c
400
carrier_work.work);
drivers/net/usb/kaweth.c
440
static void kaweth_resubmit_tl(struct work_struct *work)
drivers/net/usb/kaweth.c
443
container_of(work, struct kaweth_device, lowmem_work.work);
drivers/net/usb/lan78xx.c
1626
static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
drivers/net/usb/lan78xx.c
1628
set_bit(work, &dev->flags);
drivers/net/usb/lan78xx.c
1630
netdev_err(dev->net, "kevent %d may have been dropped\n", work);
drivers/net/usb/lan78xx.c
4413
static void lan78xx_delayedwork(struct work_struct *work)
drivers/net/usb/lan78xx.c
4418
dev = container_of(work, struct lan78xx_net, wq.work);
drivers/net/usb/pegasus.c
1124
static void check_carrier(struct work_struct *work)
drivers/net/usb/pegasus.c
1126
pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
drivers/net/usb/r8152.c
6815
static void rtl_work_func_t(struct work_struct *work)
drivers/net/usb/r8152.c
6817
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
drivers/net/usb/r8152.c
6857
static void rtl_hw_phy_work_func_t(struct work_struct *work)
drivers/net/usb/r8152.c
6859
struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
drivers/net/usb/r8152.c
6922
if (work_busy(&tp->hw_phy_work.work) & WORK_BUSY_PENDING) {
drivers/net/usb/r8152.c
6924
rtl_hw_phy_work_func_t(&tp->hw_phy_work.work);
drivers/net/usb/r8152.c
8468
if (work_busy(&tp->schedule.work) || sw_linking != hw_linking)
drivers/net/usb/sierra_net.c
460
static void sierra_net_kevent(struct work_struct *work)
drivers/net/usb/sierra_net.c
463
container_of(work, struct sierra_net_data, sierra_net_kevent);
drivers/net/usb/sierra_net.c
561
static void sierra_net_defer_kevent(struct usbnet *dev, int work)
drivers/net/usb/sierra_net.c
565
set_bit(work, &priv->kevent_flags);
drivers/net/usb/usbnet.c
1187
usbnet_deferred_kevent(struct work_struct *work)
drivers/net/usb/usbnet.c
1190
container_of(work, struct usbnet, kevent);
drivers/net/usb/usbnet.c
1648
static void usbnet_bh_work(struct work_struct *work)
drivers/net/usb/usbnet.c
1650
struct usbnet *dev = from_work(dev, work, bh_work);
drivers/net/usb/usbnet.c
476
void usbnet_defer_kevent(struct usbnet *dev, int work)
drivers/net/usb/usbnet.c
478
set_bit (work, &dev->flags);
drivers/net/usb/usbnet.c
483
usbnet_event_names[work]);
drivers/net/usb/usbnet.c
486
"kevent %s scheduled\n", usbnet_event_names[work]);
drivers/net/virtio_net.c
3787
static void virtnet_rx_mode_work(struct work_struct *work)
drivers/net/virtio_net.c
3790
container_of(work, struct virtnet_info, rx_mode_work);
drivers/net/virtio_net.c
5293
static void virtnet_rx_dim_work(struct work_struct *work)
drivers/net/virtio_net.c
5295
struct dim *dim = container_of(work, struct dim, work);
drivers/net/virtio_net.c
6211
static void virtnet_config_changed_work(struct work_struct *work)
drivers/net/virtio_net.c
6214
container_of(work, struct virtnet_info, config_work);
drivers/net/vmxnet3/vmxnet3_drv.c
271
schedule_work(&adapter->work);
drivers/net/vmxnet3/vmxnet3_drv.c
3932
schedule_work(&adapter->work);
drivers/net/vmxnet3/vmxnet3_drv.c
3941
adapter = container_of(data, struct vmxnet3_adapter, work);
drivers/net/vmxnet3/vmxnet3_drv.c
4252
INIT_WORK(&adapter->work, vmxnet3_reset_work);
drivers/net/vmxnet3/vmxnet3_drv.c
4349
cancel_work_sync(&adapter->work);
drivers/net/vmxnet3/vmxnet3_int.h
432
struct work_struct work;
drivers/net/wan/framer/framer-core.c
325
static void framer_polling_work(struct work_struct *work)
drivers/net/wan/framer/framer-core.c
327
struct framer *framer = container_of(work, struct framer, polling_work.work);
drivers/net/wan/framer/framer-core.c
583
static void framer_notify_status_work(struct work_struct *work)
drivers/net/wan/framer/framer-core.c
585
struct framer *framer = container_of(work, struct framer, notify_status_work);
drivers/net/wireguard/device.h
25
struct work_struct work;
drivers/net/wireguard/queueing.c
20
INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
drivers/net/wireguard/queueing.h
171
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
drivers/net/wireguard/queueing.h
32
void wg_packet_handshake_receive_worker(struct work_struct *work);
drivers/net/wireguard/queueing.h
36
void wg_packet_decrypt_worker(struct work_struct *work);
drivers/net/wireguard/queueing.h
49
void wg_packet_handshake_send_worker(struct work_struct *work);
drivers/net/wireguard/queueing.h
50
void wg_packet_tx_worker(struct work_struct *work);
drivers/net/wireguard/queueing.h
51
void wg_packet_encrypt_worker(struct work_struct *work);
drivers/net/wireguard/ratelimiter.c
55
static void wg_ratelimiter_gc_entries(struct work_struct *work)
drivers/net/wireguard/ratelimiter.c
65
if (unlikely(!work) ||
drivers/net/wireguard/ratelimiter.c
71
if (unlikely(!work) ||
drivers/net/wireguard/ratelimiter.c
77
if (likely(work))
drivers/net/wireguard/ratelimiter.c
80
if (likely(work))
drivers/net/wireguard/receive.c
206
void wg_packet_handshake_receive_worker(struct work_struct *work)
drivers/net/wireguard/receive.c
208
struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
drivers/net/wireguard/receive.c
493
void wg_packet_decrypt_worker(struct work_struct *work)
drivers/net/wireguard/receive.c
495
struct crypt_queue *queue = container_of(work, struct multicore_worker,
drivers/net/wireguard/receive.c
496
work)->ptr;
drivers/net/wireguard/receive.c
571
&per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
drivers/net/wireguard/send.c
262
void wg_packet_tx_worker(struct work_struct *work)
drivers/net/wireguard/send.c
264
struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
drivers/net/wireguard/send.c
287
void wg_packet_encrypt_worker(struct work_struct *work)
drivers/net/wireguard/send.c
289
struct crypt_queue *queue = container_of(work, struct multicore_worker,
drivers/net/wireguard/send.c
290
work)->ptr;
drivers/net/wireguard/send.c
46
void wg_packet_handshake_send_worker(struct work_struct *work)
drivers/net/wireguard/send.c
48
struct wg_peer *peer = container_of(work, struct wg_peer,
drivers/net/wireguard/timers.c
125
static void wg_queued_expired_zero_key_material(struct work_struct *work)
drivers/net/wireguard/timers.c
127
struct wg_peer *peer = container_of(work, struct wg_peer,
drivers/net/wireless/ath/ar5523/ar5523.c
626
static void ar5523_rx_refill_work(struct work_struct *work)
drivers/net/wireless/ath/ar5523/ar5523.c
628
struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
drivers/net/wireless/ath/ar5523/ar5523.c
893
static void ar5523_tx_work(struct work_struct *work)
drivers/net/wireless/ath/ar5523/ar5523.c
895
struct ar5523 *ar = container_of(work, struct ar5523, tx_work);
drivers/net/wireless/ath/ar5523/ar5523.c
911
static void ar5523_tx_wd_work(struct work_struct *work)
drivers/net/wireless/ath/ar5523/ar5523.c
913
struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);
drivers/net/wireless/ath/ar5523/ar5523.c
976
static void ar5523_stat_work(struct work_struct *work)
drivers/net/wireless/ath/ar5523/ar5523.c
978
struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
drivers/net/wireless/ath/ath10k/core.c
2494
static void ath10k_core_recovery_check_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/core.c
2496
struct ath10k *ar = container_of(work, struct ath10k, recovery_check_work);
drivers/net/wireless/ath/ath10k/core.c
2566
static void ath10k_core_restart(struct work_struct *work)
drivers/net/wireless/ath/ath10k/core.c
2568
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
drivers/net/wireless/ath/ath10k/core.c
2639
static void ath10k_core_set_coverage_class_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/core.c
2641
struct ath10k *ar = container_of(work, struct ath10k,
drivers/net/wireless/ath/ath10k/core.c
3501
static void ath10k_core_register_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/core.c
3503
struct ath10k *ar = container_of(work, struct ath10k, register_work);
drivers/net/wireless/ath/ath10k/debug.c
895
static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
drivers/net/wireless/ath/ath10k/debug.c
897
struct ath10k *ar = container_of(work, struct ath10k,
drivers/net/wireless/ath/ath10k/debug.c
898
debug.htt_stats_dwork.work);
drivers/net/wireless/ath/ath10k/htc.c
805
static void ath10k_htc_bundle_tx_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/htc.c
807
struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
drivers/net/wireless/ath/ath10k/htc.c
834
static void ath10k_htc_tx_complete_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/htc.c
836
struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
drivers/net/wireless/ath/ath10k/mac.c
2084
static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/mac.c
2086
struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
drivers/net/wireless/ath/ath10k/mac.c
2152
static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/mac.c
2154
struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
drivers/net/wireless/ath/ath10k/mac.c
2155
connection_loss_work.work);
drivers/net/wireless/ath/ath10k/mac.c
4126
void ath10k_offchan_tx_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/mac.c
4128
struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
drivers/net/wireless/ath/ath10k/mac.c
4240
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/mac.c
4242
struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
drivers/net/wireless/ath/ath10k/mac.c
4638
void ath10k_scan_timeout_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/mac.c
4640
struct ath10k *ar = container_of(work, struct ath10k,
drivers/net/wireless/ath/ath10k/mac.c
4641
scan.timeout.work);
drivers/net/wireless/ath/ath10k/mac.h
38
void ath10k_scan_timeout_work(struct work_struct *work);
drivers/net/wireless/ath/ath10k/mac.h
40
void ath10k_offchan_tx_work(struct work_struct *work);
drivers/net/wireless/ath/ath10k/mac.h
42
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
drivers/net/wireless/ath/ath10k/pci.c
1751
static void ath10k_pci_fw_dump_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/pci.c
1753
struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
drivers/net/wireless/ath/ath10k/qmi.c
1026
static void ath10k_qmi_driver_event_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/qmi.c
1028
struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
drivers/net/wireless/ath/ath10k/sdio.c
1349
static void ath10k_rx_indication_async_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/sdio.c
1351
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
drivers/net/wireless/ath/ath10k/sdio.c
1457
static void ath10k_sdio_write_async_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/sdio.c
1459
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
drivers/net/wireless/ath/ath10k/usb.c
360
static void ath10k_usb_io_comp_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/usb.c
362
struct ath10k_usb_pipe *pipe = container_of(work,
drivers/net/wireless/ath/ath10k/wmi.c
4004
static void ath10k_radar_confirmation_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/wmi.c
4006
struct ath10k *ar = container_of(work, struct ath10k,
drivers/net/wireless/ath/ath10k/wmi.c
5535
static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
drivers/net/wireless/ath/ath10k/wmi.c
5537
struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
drivers/net/wireless/ath/ath11k/core.c
2391
static void ath11k_update_11d(struct work_struct *work)
drivers/net/wireless/ath/ath11k/core.c
2393
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
drivers/net/wireless/ath/ath11k/core.c
2511
static void ath11k_core_restart(struct work_struct *work)
drivers/net/wireless/ath/ath11k/core.c
2513
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
drivers/net/wireless/ath/ath11k/core.c
2531
static void ath11k_core_reset(struct work_struct *work)
drivers/net/wireless/ath/ath11k/core.c
2533
struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work);
drivers/net/wireless/ath/ath11k/coredump.c
41
void ath11k_coredump_upload(struct work_struct *work)
drivers/net/wireless/ath/ath11k/coredump.c
43
struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work);
drivers/net/wireless/ath/ath11k/coredump.h
61
void ath11k_coredump_upload(struct work_struct *work);
drivers/net/wireless/ath/ath11k/coredump.h
70
static inline void ath11k_coredump_upload(struct work_struct *work)
drivers/net/wireless/ath/ath11k/dp_rx.c
5561
int quota = 0, work = 0, count;
drivers/net/wireless/ath/ath11k/dp_rx.c
5587
work += quota;
drivers/net/wireless/ath/ath11k/dp_rx.c
5593
return work;
drivers/net/wireless/ath/ath11k/mac.c
1792
static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work)
drivers/net/wireless/ath/ath11k/mac.c
1794
struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
drivers/net/wireless/ath/ath11k/mac.c
1795
connection_loss_work.work);
drivers/net/wireless/ath/ath11k/mac.c
3978
static void ath11k_scan_timeout_work(struct work_struct *work)
drivers/net/wireless/ath/ath11k/mac.c
3980
struct ath11k *ar = container_of(work, struct ath11k,
drivers/net/wireless/ath/ath11k/mac.c
3981
scan.timeout.work);
drivers/net/wireless/ath/ath11k/mac.c
6422
static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
drivers/net/wireless/ath/ath11k/mac.c
6424
struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
drivers/net/wireless/ath/ath11k/mac.c
7068
static void ath11k_mac_bcn_tx_work(struct work_struct *work)
drivers/net/wireless/ath/ath11k/mac.c
7070
struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
drivers/net/wireless/ath/ath11k/qmi.c
3208
static void ath11k_qmi_driver_event_work(struct work_struct *work)
drivers/net/wireless/ath/ath11k/qmi.c
3210
struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi,
drivers/net/wireless/ath/ath11k/reg.c
1002
void ath11k_regd_update_work(struct work_struct *work)
drivers/net/wireless/ath/ath11k/reg.c
1004
struct ath11k *ar = container_of(work, struct ath11k,
drivers/net/wireless/ath/ath11k/reg.c
798
void ath11k_regd_update_chan_list_work(struct work_struct *work)
drivers/net/wireless/ath/ath11k/reg.c
800
struct ath11k *ar = container_of(work, struct ath11k,
drivers/net/wireless/ath/ath11k/reg.h
35
void ath11k_regd_update_work(struct work_struct *work);
drivers/net/wireless/ath/ath11k/reg.h
36
void ath11k_regd_update_chan_list_work(struct work_struct *work);
drivers/net/wireless/ath/ath12k/ahb.c
498
static void ath12k_ahb_ce_workqueue(struct work_struct *work)
drivers/net/wireless/ath/ath12k/ahb.c
500
struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
drivers/net/wireless/ath/ath12k/core.c
1394
static void ath12k_rfkill_work(struct work_struct *work)
drivers/net/wireless/ath/ath12k/core.c
1396
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
drivers/net/wireless/ath/ath12k/core.c
1524
static void ath12k_update_11d(struct work_struct *work)
drivers/net/wireless/ath/ath12k/core.c
1526
struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
drivers/net/wireless/ath/ath12k/core.c
1607
static void ath12k_core_restart(struct work_struct *work)
drivers/net/wireless/ath/ath12k/core.c
1609
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
drivers/net/wireless/ath/ath12k/core.c
1648
static void ath12k_core_reset(struct work_struct *work)
drivers/net/wireless/ath/ath12k/core.c
1650
struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
drivers/net/wireless/ath/ath12k/coredump.c
41
void ath12k_coredump_upload(struct work_struct *work)
drivers/net/wireless/ath/ath12k/coredump.c
43
struct ath12k_base *ab = container_of(work, struct ath12k_base, dump_work);
drivers/net/wireless/ath/ath12k/coredump.h
63
void ath12k_coredump_upload(struct work_struct *work);
drivers/net/wireless/ath/ath12k/coredump.h
72
static inline void ath12k_coredump_upload(struct work_struct *work)
drivers/net/wireless/ath/ath12k/mac.c
1970
static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work)
drivers/net/wireless/ath/ath12k/mac.c
1972
struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
drivers/net/wireless/ath/ath12k/mac.c
1973
connection_loss_work.work);
drivers/net/wireless/ath/ath12k/mac.c
4098
static void ath12k_mac_bcn_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/ath/ath12k/mac.c
4100
struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
drivers/net/wireless/ath/ath12k/mac.c
5236
static void ath12k_scan_timeout_work(struct work_struct *work)
drivers/net/wireless/ath/ath12k/mac.c
5238
struct ath12k *ar = container_of(work, struct ath12k,
drivers/net/wireless/ath/ath12k/mac.c
5239
scan.timeout.work);
drivers/net/wireless/ath/ath12k/mac.c
5263
static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/ath/ath12k/mac.c
5265
struct ath12k *ar = container_of(work, struct ath12k,
drivers/net/wireless/ath/ath12k/mac.c
9269
static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/ath/ath12k/mac.c
9271
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
drivers/net/wireless/ath/ath12k/pci.c
396
static void ath12k_pci_ce_workqueue(struct work_struct *work)
drivers/net/wireless/ath/ath12k/pci.c
398
struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
drivers/net/wireless/ath/ath12k/qmi.c
3965
static void ath12k_qmi_driver_event_work(struct work_struct *work)
drivers/net/wireless/ath/ath12k/qmi.c
3967
struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
drivers/net/wireless/ath/ath12k/reg.c
805
void ath12k_regd_update_chan_list_work(struct work_struct *work)
drivers/net/wireless/ath/ath12k/reg.c
807
struct ath12k *ar = container_of(work, struct ath12k,
drivers/net/wireless/ath/ath12k/reg.c
853
void ath12k_regd_update_work(struct work_struct *work)
drivers/net/wireless/ath/ath12k/reg.c
855
struct ath12k *ar = container_of(work, struct ath12k,
drivers/net/wireless/ath/ath12k/reg.h
105
void ath12k_regd_update_work(struct work_struct *work);
drivers/net/wireless/ath/ath12k/reg.h
118
void ath12k_regd_update_chan_list_work(struct work_struct *work);
drivers/net/wireless/ath/ath5k/base.c
2449
ath5k_calibrate_work(struct work_struct *work)
drivers/net/wireless/ath/ath5k/base.c
2451
struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
drivers/net/wireless/ath/ath5k/base.c
2506
ath5k_tx_complete_poll_work(struct work_struct *work)
drivers/net/wireless/ath/ath5k/base.c
2508
struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
drivers/net/wireless/ath/ath5k/base.c
2509
tx_complete_work.work);
drivers/net/wireless/ath/ath5k/base.c
3015
static void ath5k_reset_work(struct work_struct *work)
drivers/net/wireless/ath/ath5k/base.c
3017
struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
drivers/net/wireless/ath/ath6kl/recovery.c
21
static void ath6kl_recovery_work(struct work_struct *work)
drivers/net/wireless/ath/ath6kl/recovery.c
23
struct ath6kl *ar = container_of(work, struct ath6kl,
drivers/net/wireless/ath/ath6kl/sdio.c
462
static void ath6kl_sdio_write_async_work(struct work_struct *work)
drivers/net/wireless/ath/ath6kl/sdio.c
467
ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
drivers/net/wireless/ath/ath6kl/usb.c
585
static void ath6kl_usb_io_comp_work(struct work_struct *work)
drivers/net/wireless/ath/ath6kl/usb.c
587
struct ath6kl_usb_pipe *pipe = container_of(work,
drivers/net/wireless/ath/ath9k/ath9k.h
742
void ath_hw_check_work(struct work_struct *work);
drivers/net/wireless/ath/ath9k/ath9k.h
743
void ath_reset_work(struct work_struct *work);
drivers/net/wireless/ath/ath9k/ath9k.h
745
void ath_hw_pll_work(struct work_struct *work);
drivers/net/wireless/ath/ath9k/ath9k.h
746
void ath_paprd_calibrate(struct work_struct *work);
drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
158
static void owl_nvmem_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
160
struct owl_ctx *ctx = container_of(work, struct owl_ctx, work);
drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
189
INIT_WORK(&ctx->work, owl_nvmem_work);
drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
190
schedule_work(&ctx->work);
drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
27
struct work_struct work;
drivers/net/wireless/ath/ath9k/channel.c
1317
static void ath_chanctx_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/channel.c
1319
struct ath_softc *sc = container_of(work, struct ath_softc,
drivers/net/wireless/ath/ath9k/htc.h
572
void ath9k_htc_ani_work(struct work_struct *work);
drivers/net/wireless/ath/ath9k/htc.h
604
void ath9k_ps_work(struct work_struct *work);
drivers/net/wireless/ath/ath9k/htc.h
617
void ath9k_led_work(struct work_struct *work);
drivers/net/wireless/ath/ath9k/htc.h
631
static inline void ath9k_led_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
100
struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
101
duty_cycle_work.work);
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
226
void ath9k_led_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
228
struct ath9k_htc_priv *priv = container_of(work,
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
64
static void ath_btcoex_period_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
66
struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
67
coex_period_work.work);
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
98
static void ath_btcoex_duty_cycle_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/htc_drv_main.c
1288
static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/htc_drv_main.c
1291
container_of(work, struct ath9k_htc_sta, rc_update_work);
drivers/net/wireless/ath/ath9k/htc_drv_main.c
76
void ath9k_ps_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/htc_drv_main.c
768
void ath9k_htc_ani_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/htc_drv_main.c
771
container_of(work, struct ath9k_htc_priv, ani_work.work);
drivers/net/wireless/ath/ath9k/htc_drv_main.c
79
container_of(work, struct ath9k_htc_priv,
drivers/net/wireless/ath/ath9k/link.c
146
void ath_hw_pll_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/link.c
149
struct ath_softc *sc = container_of(work, struct ath_softc,
drivers/net/wireless/ath/ath9k/link.c
150
hw_pll_work.work);
drivers/net/wireless/ath/ath9k/link.c
241
void ath_paprd_calibrate(struct work_struct *work)
drivers/net/wireless/ath/ath9k/link.c
243
struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
drivers/net/wireless/ath/ath9k/link.c
85
void ath_hw_check_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/link.c
87
struct ath_softc *sc = container_of(work, struct ath_softc,
drivers/net/wireless/ath/ath9k/link.c
88
hw_check_work.work);
drivers/net/wireless/ath/ath9k/main.c
258
goto work;
drivers/net/wireless/ath/ath9k/main.c
268
work:
drivers/net/wireless/ath/ath9k/main.c
640
void ath_reset_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/main.c
642
struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
drivers/net/wireless/ath/ath9k/mci.c
236
static void ath9k_mci_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/mci.c
238
struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
drivers/net/wireless/ath/ath9k/wmi.c
195
void ath9k_fatal_work(struct work_struct *work)
drivers/net/wireless/ath/ath9k/wmi.c
197
struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
drivers/net/wireless/ath/ath9k/wmi.h
189
void ath9k_fatal_work(struct work_struct *work);
drivers/net/wireless/ath/carl9170/carl9170.h
582
void carl9170_tx_janitor(struct work_struct *work);
drivers/net/wireless/ath/carl9170/led.c
66
static void carl9170_led_update(struct work_struct *work)
drivers/net/wireless/ath/carl9170/led.c
68
struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
drivers/net/wireless/ath/carl9170/main.c
1382
static void carl9170_ampdu_work(struct work_struct *work)
drivers/net/wireless/ath/carl9170/main.c
1384
struct ar9170 *ar = container_of(work, struct ar9170,
drivers/net/wireless/ath/carl9170/main.c
467
static void carl9170_restart_work(struct work_struct *work)
drivers/net/wireless/ath/carl9170/main.c
469
struct ar9170 *ar = container_of(work, struct ar9170,
drivers/net/wireless/ath/carl9170/main.c
543
static void carl9170_ping_work(struct work_struct *work)
drivers/net/wireless/ath/carl9170/main.c
545
struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
drivers/net/wireless/ath/carl9170/main.c
845
static void carl9170_ps_work(struct work_struct *work)
drivers/net/wireless/ath/carl9170/main.c
847
struct ar9170 *ar = container_of(work, struct ar9170,
drivers/net/wireless/ath/carl9170/main.c
877
static void carl9170_stat_work(struct work_struct *work)
drivers/net/wireless/ath/carl9170/main.c
879
struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
drivers/net/wireless/ath/carl9170/tx.c
639
void carl9170_tx_janitor(struct work_struct *work)
drivers/net/wireless/ath/carl9170/tx.c
641
struct ar9170 *ar = container_of(work, struct ar9170,
drivers/net/wireless/ath/carl9170/tx.c
642
tx_janitor.work);
drivers/net/wireless/ath/wcn36xx/smd.c
3333
static void wcn36xx_ind_smd_work(struct work_struct *work)
drivers/net/wireless/ath/wcn36xx/smd.c
3336
container_of(work, struct wcn36xx, hal_ind_work);
drivers/net/wireless/ath/wil6210/cfg80211.c
2349
void wil_probe_client_worker(struct work_struct *work)
drivers/net/wireless/ath/wil6210/cfg80211.c
2351
struct wil6210_vif *vif = container_of(work, struct wil6210_vif,
drivers/net/wireless/ath/wil6210/main.c
460
void wil_disconnect_worker(struct work_struct *work)
drivers/net/wireless/ath/wil6210/main.c
462
struct wil6210_vif *vif = container_of(work,
drivers/net/wireless/ath/wil6210/main.c
526
static void wil_fw_error_worker(struct work_struct *work)
drivers/net/wireless/ath/wil6210/main.c
528
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
drivers/net/wireless/ath/wil6210/p2p.c
231
void wil_p2p_listen_expired(struct work_struct *work)
drivers/net/wireless/ath/wil6210/p2p.c
233
struct wil_p2p_info *p2p = container_of(work,
drivers/net/wireless/ath/wil6210/p2p.c
259
void wil_p2p_search_expired(struct work_struct *work)
drivers/net/wireless/ath/wil6210/p2p.c
261
struct wil_p2p_info *p2p = container_of(work,
drivers/net/wireless/ath/wil6210/p2p.c
291
void wil_p2p_delayed_listen_work(struct work_struct *work)
drivers/net/wireless/ath/wil6210/p2p.c
293
struct wil_p2p_info *p2p = container_of(work,
drivers/net/wireless/ath/wil6210/txrx.c
801
void wil_enable_tx_key_worker(struct work_struct *work)
drivers/net/wireless/ath/wil6210/txrx.c
803
struct wil6210_vif *vif = container_of(work,
drivers/net/wireless/ath/wil6210/wil6210.h
1245
void wmi_event_worker(struct work_struct *work);
drivers/net/wireless/ath/wil6210/wil6210.h
1307
void wil_p2p_listen_expired(struct work_struct *work);
drivers/net/wireless/ath/wil6210/wil6210.h
1308
void wil_p2p_search_expired(struct work_struct *work);
drivers/net/wireless/ath/wil6210/wil6210.h
1310
void wil_p2p_delayed_listen_work(struct work_struct *work);
drivers/net/wireless/ath/wil6210/wil6210.h
1356
void wil_probe_client_worker(struct work_struct *work);
drivers/net/wireless/ath/wil6210/wil6210.h
1357
void wil_disconnect_worker(struct work_struct *work);
drivers/net/wireless/ath/wil6210/wil6210.h
1358
void wil_enable_tx_key_worker(struct work_struct *work);
drivers/net/wireless/ath/wil6210/wmi.c
3406
void wmi_event_worker(struct work_struct *work)
drivers/net/wireless/ath/wil6210/wmi.c
3408
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
drivers/net/wireless/atmel/at76c50x-usb.c
1460
static void at76_work_set_promisc(struct work_struct *work)
drivers/net/wireless/atmel/at76c50x-usb.c
1462
struct at76_priv *priv = container_of(work, struct at76_priv,
drivers/net/wireless/atmel/at76c50x-usb.c
1485
static void at76_work_submit_rx(struct work_struct *work)
drivers/net/wireless/atmel/at76c50x-usb.c
1487
struct at76_priv *priv = container_of(work, struct at76_priv,
drivers/net/wireless/atmel/at76c50x-usb.c
1706
static void at76_work_join_bssid(struct work_struct *work)
drivers/net/wireless/atmel/at76c50x-usb.c
1708
struct at76_priv *priv = container_of(work, struct at76_priv,
drivers/net/wireless/atmel/at76c50x-usb.c
1909
static void at76_dwork_hw_scan(struct work_struct *work)
drivers/net/wireless/atmel/at76c50x-usb.c
1911
struct at76_priv *priv = container_of(work, struct at76_priv,
drivers/net/wireless/atmel/at76c50x-usb.c
1912
dwork_hw_scan.work);
drivers/net/wireless/broadcom/b43/leds.c
104
ieee80211_queue_work(wl->hw, &wl->leds.work);
drivers/net/wireless/broadcom/b43/leds.c
324
cancel_work_sync(&leds->work);
drivers/net/wireless/broadcom/b43/leds.c
333
INIT_WORK(&dev->wl->leds.work, b43_leds_work);
drivers/net/wireless/broadcom/b43/leds.c
75
static void b43_leds_work(struct work_struct *work)
drivers/net/wireless/broadcom/b43/leds.c
77
struct b43_leds *leds = container_of(work, struct b43_leds, work);
drivers/net/wireless/broadcom/b43/leds.h
41
struct work_struct work;
drivers/net/wireless/broadcom/b43/main.c
1798
static void b43_beacon_update_trigger_work(struct work_struct *work)
drivers/net/wireless/broadcom/b43/main.c
1800
struct b43_wl *wl = container_of(work, struct b43_wl,
drivers/net/wireless/broadcom/b43/main.c
2548
static void b43_request_firmware(struct work_struct *work)
drivers/net/wireless/broadcom/b43/main.c
2550
struct b43_wl *wl = container_of(work,
drivers/net/wireless/broadcom/b43/main.c
3428
static void b43_periodic_work_handler(struct work_struct *work)
drivers/net/wireless/broadcom/b43/main.c
3430
struct b43_wldev *dev = container_of(work, struct b43_wldev,
drivers/net/wireless/broadcom/b43/main.c
3431
periodic_work.work);
drivers/net/wireless/broadcom/b43/main.c
3457
struct delayed_work *work = &dev->periodic_work;
drivers/net/wireless/broadcom/b43/main.c
3460
INIT_DELAYED_WORK(work, b43_periodic_work_handler);
drivers/net/wireless/broadcom/b43/main.c
3461
ieee80211_queue_delayed_work(dev->wl->hw, work, 0);
drivers/net/wireless/broadcom/b43/main.c
3583
static void b43_tx_work(struct work_struct *work)
drivers/net/wireless/broadcom/b43/main.c
3585
struct b43_wl *wl = container_of(work, struct b43_wl, tx_work);
drivers/net/wireless/broadcom/b43/main.c
5205
static void b43_chip_reset(struct work_struct *work)
drivers/net/wireless/broadcom/b43/main.c
5208
container_of(work, struct b43_wldev, restart_work);
drivers/net/wireless/broadcom/b43/phy_common.c
466
void b43_phy_txpower_adjust_work(struct work_struct *work)
drivers/net/wireless/broadcom/b43/phy_common.c
468
struct b43_wl *wl = container_of(work, struct b43_wl,
drivers/net/wireless/broadcom/b43/phy_common.h
422
void b43_phy_txpower_adjust_work(struct work_struct *work);
drivers/net/wireless/broadcom/b43legacy/main.c
1210
static void b43legacy_beacon_update_trigger_work(struct work_struct *work)
drivers/net/wireless/broadcom/b43legacy/main.c
1212
struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
drivers/net/wireless/broadcom/b43legacy/main.c
1561
static void b43legacy_request_firmware(struct work_struct *work)
drivers/net/wireless/broadcom/b43legacy/main.c
1563
struct b43legacy_wl *wl = container_of(work,
drivers/net/wireless/broadcom/b43legacy/main.c
2314
static void b43legacy_periodic_work_handler(struct work_struct *work)
drivers/net/wireless/broadcom/b43legacy/main.c
2316
struct b43legacy_wldev *dev = container_of(work, struct b43legacy_wldev,
drivers/net/wireless/broadcom/b43legacy/main.c
2317
periodic_work.work);
drivers/net/wireless/broadcom/b43legacy/main.c
2343
struct delayed_work *work = &dev->periodic_work;
drivers/net/wireless/broadcom/b43legacy/main.c
2346
INIT_DELAYED_WORK(work, b43legacy_periodic_work_handler);
drivers/net/wireless/broadcom/b43legacy/main.c
2347
ieee80211_queue_delayed_work(dev->wl->hw, work, 0);
drivers/net/wireless/broadcom/b43legacy/main.c
2446
static void b43legacy_tx_work(struct work_struct *work)
drivers/net/wireless/broadcom/b43legacy/main.c
2448
struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
drivers/net/wireless/broadcom/b43legacy/main.c
3556
static void b43legacy_chip_reset(struct work_struct *work)
drivers/net/wireless/broadcom/b43legacy/main.c
3559
container_of(work, struct b43legacy_wldev, restart_work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
280
schedule_work(&bt_local->work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
287
static void brcmf_btcoex_handler(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
290
btci = container_of(work, struct brcmf_btcoex_info, work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
379
INIT_WORK(&btci->work, brcmf_btcoex_handler);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
399
cancel_work_sync(&cfg->btcoex->work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
419
schedule_work(&btci->work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
436
schedule_work(&btci->work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
83
struct work_struct work;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3564
static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3567
container_of(work, struct brcmf_cfg80211_info,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1170
static void brcmf_core_bus_reset(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1172
struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
138
static void _brcmf_set_multicast_list(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
140
struct brcmf_if *ifp = container_of(work, struct brcmf_if,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
210
static void _brcmf_update_ndtable(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
212
struct brcmf_if *ifp = container_of(work, struct brcmf_if,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
233
static void _brcmf_update_ndtable(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
254
static void brcmf_fweh_event_worker(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
264
fweh = container_of(work, struct brcmf_fweh_info, event_work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
1660
struct brcmf_msgbuf_work_item *work;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
1667
work = list_first_entry(&msgbuf->work_queue,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
1670
list_del(&work->queue);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
1671
kfree(work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
567
struct brcmf_msgbuf_work_item *work = NULL;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
572
work = list_first_entry(&msgbuf->work_queue,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
574
list_del(&work->queue);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
578
return work;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
584
struct brcmf_msgbuf_work_item *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
596
flowid = work->flowid;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
623
create->msg.ifidx = work->ifidx;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
628
memcpy(create->sa, work->sa, ETH_ALEN);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
629
memcpy(create->da, work->da, ETH_ALEN);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
637
flowid, work->da, create->tid, work->ifidx);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
651
static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
656
msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
1116
static void brcmf_p2p_afx_handler(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
1118
struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
3755
static void brcmf_sdio_dataworker(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
3757
struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
1309
static void brcms_driver_init(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
1465
static void _brcms_timer(struct work_struct *work)
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
1467
struct brcms_timer *t = container_of(work, struct brcms_timer,
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
1468
dly_wrk.work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
1929
static void ipw2100_reset_adapter(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
1932
container_of(work, struct ipw2100_priv, reset_work.work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2148
static void ipw2100_scan_event(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2150
struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv,
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2151
scan_event.work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
321
static void ipw2100_wx_event_work(struct work_struct *work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
5528
static void ipw2100_security_work(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
5531
container_of(work, struct ipw2100_priv, security_work.work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
5750
ipw2100_reset_adapter(&priv->reset_work.work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
5910
static void ipw2100_hang_check(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
5913
container_of(work, struct ipw2100_priv, hang_check.work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
5953
static void ipw2100_rf_kill(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
5956
container_of(work, struct ipw2100_priv, rf_kill.work);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
8213
static void ipw2100_wx_event_work(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
8216
container_of(work, struct ipw2100_priv, wx_event_work.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1010
static void ipw_bg_led_activity_off(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
1013
container_of(work, struct ipw_priv, led_act_off.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10526
static void ipw_bg_rf_kill(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10529
container_of(work, struct ipw_priv, rf_kill.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10558
static void ipw_bg_link_up(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10561
container_of(work, struct ipw_priv, link_up);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10589
static void ipw_bg_link_down(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10592
container_of(work, struct ipw_priv, link_down);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11185
static void ipw_bg_up(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11188
container_of(work, struct ipw_priv, up);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11256
static void ipw_bg_down(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11259
container_of(work, struct ipw_priv, down);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
211
static void ipw_bg_up(struct work_struct *work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
213
static void ipw_bg_down(struct work_struct *work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2306
static void ipw_bg_adapter_restart(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2309
container_of(work, struct ipw_priv, adapter_restart);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2337
static void ipw_bg_scan_check(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
2340
container_of(work, struct ipw_priv, scan_check.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3938
static void ipw_bg_disassociate(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3941
container_of(work, struct ipw_priv, disassociate);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3947
static void ipw_system_config(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3950
container_of(work, struct ipw_priv, system_config);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4317
static void ipw_bg_gather_stats(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4320
container_of(work, struct ipw_priv, gather_stats.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4397
static void ipw_scan_event(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4402
container_of(work, struct ipw_priv, scan_event.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5165
static void ipw_bg_rx_queue_replenish(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5168
container_of(work, struct ipw_priv, rx_replenish);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5566
static void ipw_merge_adhoc_network(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5569
container_of(work, struct ipw_priv, merge_networks);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5988
static void ipw_bg_adhoc_check(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5991
container_of(work, struct ipw_priv, adhoc_check.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6379
static void ipw_request_passive_scan(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6382
container_of(work, struct ipw_priv, request_passive_scan.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6386
static void ipw_request_scan(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6389
container_of(work, struct ipw_priv, request_scan.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6393
static void ipw_request_direct_scan(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6396
container_of(work, struct ipw_priv, request_direct_scan.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6400
static void ipw_bg_abort_scan(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
6403
container_of(work, struct ipw_priv, abort_scan);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
7164
static void ipw_bg_qos_activate(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
7167
container_of(work, struct ipw_priv, qos_activate);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
7470
static void ipw_bg_roam(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
7473
container_of(work, struct ipw_priv, roam);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
7584
static void ipw_bg_associate(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
7587
container_of(work, struct ipw_priv, associate);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
887
static void ipw_bg_led_link_on(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
890
container_of(work, struct ipw_priv, led_link_on.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
932
static void ipw_bg_led_link_off(struct work_struct *work)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
935
container_of(work, struct ipw_priv, led_link_off.work);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
9399
struct delayed_work *work = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
9411
work = &priv->request_direct_scan;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
9413
work = &priv->request_passive_scan;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
9417
work = &priv->request_scan;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
9424
schedule_delayed_work(work, 0);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
2395
container_of(data, struct il_priv, init_alive_start.work);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
2410
container_of(data, struct il_priv, alive_start.work);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
2431
container_of(data, struct il_priv, _3945.rfkill_poll.work);
drivers/net/wireless/intel/iwlegacy/3945.c
1862
il3945_bg_reg_txpower_periodic(struct work_struct *work)
drivers/net/wireless/intel/iwlegacy/3945.c
1864
struct il_priv *il = container_of(work, struct il_priv,
drivers/net/wireless/intel/iwlegacy/3945.c
1865
_3945.thermal_periodic.work);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
5615
container_of(data, struct il_priv, init_alive_start.work);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
5630
container_of(data, struct il_priv, alive_start.work);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
5642
il4965_bg_run_time_calib_work(struct work_struct *work)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
5644
struct il_priv *il = container_of(work, struct il_priv,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
6186
il4965_bg_txpower_work(struct work_struct *work)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
6188
struct il_priv *il = container_of(work, struct il_priv,
drivers/net/wireless/intel/iwlegacy/common.c
1598
container_of(data, struct il_priv, scan_check.work);
drivers/net/wireless/intel/iwlegacy/common.c
1659
il_bg_abort_scan(struct work_struct *work)
drivers/net/wireless/intel/iwlegacy/common.c
1661
struct il_priv *il = container_of(work, struct il_priv, abort_scan);
drivers/net/wireless/intel/iwlegacy/common.c
1673
il_bg_scan_completed(struct work_struct *work)
drivers/net/wireless/intel/iwlegacy/common.c
1675
struct il_priv *il = container_of(work, struct il_priv, scan_completed);
drivers/net/wireless/intel/iwlwifi/dvm/lib.c
386
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/lib.c
389
container_of(work, struct iwl_priv, bt_traffic_change_work);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
264
static void iwl_bg_beacon_update(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/main.c
267
container_of(work, struct iwl_priv, beacon_update);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
303
static void iwl_bg_bt_runtime_config(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/main.c
306
container_of(work, struct iwl_priv, bt_runtime_config);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
321
static void iwl_bg_bt_full_concurrency(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/main.c
324
container_of(work, struct iwl_priv, bt_full_concurrency);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
554
static void iwl_bg_tx_flush(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/main.c
557
container_of(work, struct iwl_priv, tx_flush);
drivers/net/wireless/intel/iwlwifi/dvm/main.c
957
static void iwl_bg_run_time_calib_work(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/main.c
959
struct iwl_priv *priv = container_of(work, struct iwl_priv,
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
1008
container_of(data, struct iwl_priv, scan_check.work);
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
1020
static void iwl_bg_abort_scan(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
1022
struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
1033
static void iwl_bg_scan_completed(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
1036
container_of(work, struct iwl_priv, scan_completed);
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
980
static void iwl_bg_start_internal_scan(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
983
container_of(work, struct iwl_priv, start_internal_scan);
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
468
static void iwl_bg_ct_enter(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
470
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
497
static void iwl_bg_ct_exit(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
499
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
548
static void iwl_bg_tt_work(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
550
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
3246
void iwl_fw_error_dump_wk(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
3249
container_of(work, typeof(*wks), wk.work);
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
193
void iwl_fw_error_dump_wk(struct work_struct *work);
drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
177
static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
181
container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
122
INIT_DELAYED_WORK(&reprobe->work, iwl_trans_reprobe_wk);
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
123
schedule_delayed_work(&reprobe->work, msecs_to_jiffies(delay_ms));
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
207
restart.wk.work);
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
85
struct delayed_work work;
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
92
reprobe = container_of(wk, typeof(*reprobe), work.work);
drivers/net/wireless/intel/iwlwifi/mei/main.c
417
container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
drivers/net/wireless/intel/iwlwifi/mld/iface.c
419
mlo_scan_start_wk.work);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
121
wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
142
wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
296
wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
306
wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
335
wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
93
low_latency.work.work);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.h
46
struct wiphy_delayed_work work;
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
100
emlsr.tmp_non_bss_done_wk.work);
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
529
emlsr.check_tpt_wk.work);
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
84
emlsr.prevent_done_wk.work);
drivers/net/wireless/intel/iwlwifi/mld/ptp.c
182
ptp_data.dwork.work);
drivers/net/wireless/intel/iwlwifi/mld/thermal.c
41
mld = container_of(wk, struct iwl_mld, ct_kill_exit_wk.work);
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1623
mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2048
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2387
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2400
void iwl_mvm_tcm_work(struct work_struct *work);
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
672
struct delayed_work work;
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
1371
INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
1597
cancel_delayed_work_sync(&mvm->tcm.work);
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
745
struct wiphy_work *work);
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
767
static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
770
container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
197
ptp_data.dwork.work);
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
227
schedule_delayed_work(&mvm->tcm.work, 0);
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
2292
schedule_delayed_work(&mvm->tcm.work, 0);
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
2698
void iwl_mvm_scan_timeout_wk(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
2700
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
452
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
461
mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
277
static void check_exit_ctkill(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
285
tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
1045
schedule_delayed_work(&mvm->tcm.work, 0);
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1097
schedule_delayed_work(&mvm->tcm.work, work_delay);
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1104
void iwl_mvm_tcm_work(struct work_struct *work)
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1106
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1108
tcm.work);
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1119
cancel_delayed_work_sync(&mvm->tcm.work);
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1150
schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1152
schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
862
uapsd_nonagg_detected_wk.work);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2177
struct work_struct work;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2185
container_of(wk, struct iwl_trans_pcie_removal, work);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2320
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2322
schedule_work(&removal->work);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
4117
me_recheck_wk.work);
drivers/net/wireless/intersil/p54/led.c
27
static void p54_update_leds(struct work_struct *work)
drivers/net/wireless/intersil/p54/led.c
29
struct p54_common *priv = container_of(work, struct p54_common,
drivers/net/wireless/intersil/p54/led.c
30
led_work.work);
drivers/net/wireless/intersil/p54/main.c
192
ieee80211_queue_delayed_work(dev, &priv->work, 0);
drivers/net/wireless/intersil/p54/main.c
209
cancel_delayed_work_sync(&priv->work);
drivers/net/wireless/intersil/p54/main.c
424
static void p54_work(struct work_struct *work)
drivers/net/wireless/intersil/p54/main.c
426
struct p54_common *priv = container_of(work, struct p54_common,
drivers/net/wireless/intersil/p54/main.c
427
work.work);
drivers/net/wireless/intersil/p54/main.c
796
INIT_DELAYED_WORK(&priv->work, p54_work);
drivers/net/wireless/intersil/p54/p54.h
257
struct delayed_work work;
drivers/net/wireless/intersil/p54/p54spi.c
388
ieee80211_queue_work(priv->hw, &priv->work);
drivers/net/wireless/intersil/p54/p54spi.c
476
ieee80211_queue_work(priv->hw, &priv->work);
drivers/net/wireless/intersil/p54/p54spi.c
479
static void p54spi_work(struct work_struct *work)
drivers/net/wireless/intersil/p54/p54spi.c
481
struct p54s_priv *priv = container_of(work, struct p54s_priv, work);
drivers/net/wireless/intersil/p54/p54spi.c
582
cancel_work_sync(&priv->work);
drivers/net/wireless/intersil/p54/p54spi.c
636
INIT_WORK(&priv->work, p54spi_work);
drivers/net/wireless/intersil/p54/p54spi.h
98
struct work_struct work;
drivers/net/wireless/intersil/p54/txrx.c
393
ieee80211_queue_delayed_work(priv->hw, &priv->work,
drivers/net/wireless/marvell/libertas/cfg.c
725
static void lbs_scan_worker(struct work_struct *work)
drivers/net/wireless/marvell/libertas/cfg.c
728
container_of(work, struct lbs_private, scan_work.work);
drivers/net/wireless/marvell/libertas/if_sdio.c
1026
static void if_sdio_reset_card_worker(struct work_struct *work)
drivers/net/wireless/marvell/libertas/if_sdio.c
1034
card = container_of(work, struct if_sdio_card, reset_worker);
drivers/net/wireless/marvell/libertas/if_sdio.c
397
static void if_sdio_host_to_card_worker(struct work_struct *work)
drivers/net/wireless/marvell/libertas/if_sdio.c
404
card = container_of(work, struct if_sdio_card, packet_worker);
drivers/net/wireless/marvell/libertas/if_spi.c
1075
static void if_spi_resume_worker(struct work_struct *work)
drivers/net/wireless/marvell/libertas/if_spi.c
1079
card = container_of(work, struct if_spi_card, resume_work);
drivers/net/wireless/marvell/libertas/if_spi.c
846
static void if_spi_host_to_card_worker(struct work_struct *work)
drivers/net/wireless/marvell/libertas/if_spi.c
855
card = container_of(work, struct if_spi_card, packet_work);
drivers/net/wireless/marvell/libertas/main.c
392
static void lbs_set_mcast_worker(struct work_struct *work)
drivers/net/wireless/marvell/libertas/main.c
394
struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work);
drivers/net/wireless/marvell/libertas_tf/main.c
196
static void lbtf_tx_work(struct work_struct *work)
drivers/net/wireless/marvell/libertas_tf/main.c
198
struct lbtf_private *priv = container_of(work, struct lbtf_private,
drivers/net/wireless/marvell/libertas_tf/main.c
82
static void lbtf_cmd_work(struct work_struct *work)
drivers/net/wireless/marvell/libertas_tf/main.c
84
struct lbtf_private *priv = container_of(work, struct lbtf_private,
drivers/net/wireless/marvell/mwifiex/11h.c
111
void mwifiex_dfs_cac_work_queue(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/11h.c
114
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/wireless/marvell/mwifiex/11h.c
266
void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/11h.c
269
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/net/wireless/marvell/mwifiex/init.c
55
static void fw_dump_work(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/init.c
58
container_of(work, struct mwifiex_adapter, devdump_work.work);
drivers/net/wireless/marvell/mwifiex/main.c
1372
static void mwifiex_host_mlme_work_queue(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/main.c
1375
container_of(work, struct mwifiex_adapter, host_mlme_work);
drivers/net/wireless/marvell/mwifiex/main.c
1403
static void mwifiex_rx_work_queue(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/main.c
1406
container_of(work, struct mwifiex_adapter, rx_work);
drivers/net/wireless/marvell/mwifiex/main.c
1419
static void mwifiex_main_work_queue(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/main.c
1422
container_of(work, struct mwifiex_adapter, main_work);
drivers/net/wireless/marvell/mwifiex/main.h
1648
void mwifiex_dfs_cac_work_queue(struct work_struct *work);
drivers/net/wireless/marvell/mwifiex/main.h
1649
void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work);
drivers/net/wireless/marvell/mwifiex/main.h
842
void (*iface_work)(struct work_struct *work);
drivers/net/wireless/marvell/mwifiex/pcie.c
191
static void mwifiex_pcie_work(struct work_struct *work);
drivers/net/wireless/marvell/mwifiex/pcie.c
2828
static void mwifiex_pcie_work(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/pcie.c
2831
container_of(work, struct pcie_service_card, work);
drivers/net/wireless/marvell/mwifiex/pcie.c
2848
schedule_work(&card->work);
drivers/net/wireless/marvell/mwifiex/pcie.c
2856
schedule_work(&card->work);
drivers/net/wireless/marvell/mwifiex/pcie.c
3018
cancel_work_sync(&card->work);
drivers/net/wireless/marvell/mwifiex/pcie.c
402
INIT_WORK(&card->work, mwifiex_pcie_work);
drivers/net/wireless/marvell/mwifiex/pcie.c
479
schedule_work(&card->work);
drivers/net/wireless/marvell/mwifiex/pcie.h
231
struct work_struct work;
drivers/net/wireless/marvell/mwifiex/sdio.c
22
static void mwifiex_sdio_work(struct work_struct *work);
drivers/net/wireless/marvell/mwifiex/sdio.c
2645
cancel_work_sync(&card->work);
drivers/net/wireless/marvell/mwifiex/sdio.c
3017
static void mwifiex_sdio_work(struct work_struct *work)
drivers/net/wireless/marvell/mwifiex/sdio.c
3020
container_of(work, struct sdio_mmc_card, work);
drivers/net/wireless/marvell/mwifiex/sdio.c
3036
schedule_work(&card->work);
drivers/net/wireless/marvell/mwifiex/sdio.c
3046
schedule_work(&card->work);
drivers/net/wireless/marvell/mwifiex/sdio.c
588
INIT_WORK(&card->work, mwifiex_sdio_work);
drivers/net/wireless/marvell/mwifiex/sdio.c
952
schedule_work(&card->work);
drivers/net/wireless/marvell/mwifiex/sdio.h
264
struct work_struct work;
drivers/net/wireless/marvell/mwl8k.c
3889
static void mwl8k_watchdog_ba_events(struct work_struct *work)
drivers/net/wireless/marvell/mwl8k.c
3895
container_of(work, struct mwl8k_priv, watchdog_ba_handle);
drivers/net/wireless/marvell/mwl8k.c
4977
static void mwl8k_hw_restart_work(struct work_struct *work)
drivers/net/wireless/marvell/mwl8k.c
4980
container_of(work, struct mwl8k_priv, fw_reload);
drivers/net/wireless/marvell/mwl8k.c
5711
static void mwl8k_finalize_join_worker(struct work_struct *work)
drivers/net/wireless/marvell/mwl8k.c
5714
container_of(work, struct mwl8k_priv, finalize_join_worker);
drivers/net/wireless/mediatek/mt76/agg-rx.c
91
mt76_rx_aggr_reorder_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/agg-rx.c
93
struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
drivers/net/wireless/mediatek/mt76/agg-rx.c
94
reorder_work.work);
drivers/net/wireless/mediatek/mt76/channel.c
337
void mt76_roc_complete_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/channel.c
339
struct mt76_phy *phy = container_of(work, struct mt76_phy, roc_work.work);
drivers/net/wireless/mediatek/mt76/mt76.h
1780
void mt76_scan_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt76.h
1782
void mt76_roc_complete_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1819
void mt7603_mac_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1821
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1822
mphy.mac_work.work);
drivers/net/wireless/mediatek/mt76/mt7603/main.c
196
mt7603_mac_work(&dev->mphy.mac_work.work);
drivers/net/wireless/mediatek/mt76/mt7603/main.c
20
mt7603_mac_work(&dev->mphy.mac_work.work);
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
204
void mt7603_mac_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/init.c
631
INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2009
void mt7615_pm_wake_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2014
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2048
void mt7615_pm_power_save_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2053
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2054
pm.ps_work.work);
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2080
void mt7615_mac_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2086
mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2087
mac_work.work);
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2317
void mt7615_coredump_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2322
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2323
coredump.work.work);
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
2327
queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
drivers/net/wireless/mediatek/mt76/mt7615/main.c
1029
void mt7615_roc_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/main.c
1033
phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
drivers/net/wireless/mediatek/mt76/mt7615/main.c
1054
void mt7615_scan_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/main.c
1058
phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
drivers/net/wireless/mediatek/mt76/mt7615/main.c
1059
scan_work.work);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
401
void mt7615_pm_wake_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
402
void mt7615_pm_power_save_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
455
void mt7615_scan_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
456
void mt7615_roc_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
484
void mt7615_mac_reset_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
513
void mt7615_mac_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
539
void mt7615_coredump_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
15
static void mt7615_pci_init_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
17
struct mt7615_dev *dev = container_of(work, struct mt7615_dev,
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
200
void mt7615_mac_reset_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
208
dev = container_of(work, struct mt7615_dev, reset_work);
drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
42
static void mt7663s_init_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
46
dev = container_of(work, struct mt7615_dev, mcu_work);
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
101
static void mt7663u_init_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
105
dev = container_of(work, struct mt7615_dev, mcu_work);
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
131
static void mt7663_usb_sdio_rate_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
137
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
drivers/net/wireless/mediatek/mt76/mt76_connac.h
124
struct delayed_work work;
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
2026
queue_delayed_work(dev->wq, &coredump->work,
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1101
static void mt76x0_phy_calibration_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1103
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
1104
cal_work.work);
drivers/net/wireless/mediatek/mt76/mt76x02.h
182
void mt76x02_wdt_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
1184
void mt76x02_mac_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
1186
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
1187
mphy.mac_work.work);
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
199
void mt76x02_mac_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
556
void mt76x02_wdt_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
558
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
559
wdt_work.work);
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
181
static void mt76x02u_pre_tbtt_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
184
container_of(work, struct mt76x02_dev, pre_tbtt_work);
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
57
void mt76x2_phy_calibrate(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
31
void mt76x2u_phy_calibrate(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
281
void mt76x2_phy_calibrate(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
285
dev = container_of(work, struct mt76x02_dev, cal_work.work);
drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
42
void mt76x2u_phy_calibrate(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
46
dev = container_of(work, struct mt76x02_dev, cal_work.work);
drivers/net/wireless/mediatek/mt76/mt7915/init.c
737
static void mt7915_init_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7915/init.c
739
struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1486
void mt7915_mac_reset_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1493
dev = container_of(work, struct mt7915_dev, reset_work);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1609
void mt7915_mac_dump_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1620
dev = container_of(work, struct mt7915_dev, dump_work);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1963
void mt7915_mac_sta_rc_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1965
struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1999
void mt7915_mac_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2004
mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2005
mac_work.work);
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
584
void mt7915_mac_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
585
void mt7915_mac_reset_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
586
void mt7915_mac_dump_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
587
void mt7915_mac_sta_rc_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
602
void mt7915_stats_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/init.c
227
static void mt7921_init_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7921/init.c
229
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7921/init.c
287
INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work);
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
653
void mt7921_mac_reset_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
655
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
702
void mt7921_coredump_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
707
dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
708
coredump.work.work);
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
712
queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
845
void mt7921_set_ipv6_ns_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
847
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7921/main.c
1002
phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy,
drivers/net/wireless/mediatek/mt76/mt7921/main.c
1003
scan_work.work);
drivers/net/wireless/mediatek/mt76/mt7921/main.c
1437
void mt7921_csa_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7921/main.c
1444
mvif = (struct mt792x_vif *)container_of(work, struct mt792x_vif,
drivers/net/wireless/mediatek/mt76/mt7921/main.c
383
void mt7921_roc_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7921/main.c
387
phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy,
drivers/net/wireless/mediatek/mt76/mt7921/main.c
998
void mt7921_scan_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
256
void mt7921_mac_reset_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
265
void mt7921_stats_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
278
void mt7921_scan_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
279
void mt7921_roc_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
280
void mt7921_csa_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
282
void mt7921_coredump_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
305
void mt7921_set_ipv6_ns_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
100
static void mt7921s_card_reset(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/init.c
144
static void mt7925_init_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/init.c
146
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7925/init.c
215
INIT_DELAYED_WORK(&dev->coredump.work, mt7925_coredump_work);
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1290
void mt7925_mac_reset_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1292
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1337
void mt7925_coredump_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1342
dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1343
coredump.work.work);
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1347
queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1475
void mt7925_set_ipv6_ns_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1477
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7925/main.c
1312
void mt7925_mlo_pm_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/main.c
1314
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt7925/main.c
1315
mlo_pm_work.work);
drivers/net/wireless/mediatek/mt76/mt7925/main.c
1323
void mt7925_scan_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/main.c
1327
phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy,
drivers/net/wireless/mediatek/mt76/mt7925/main.c
1328
scan_work.work);
drivers/net/wireless/mediatek/mt76/mt7925/main.c
469
void mt7925_roc_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7925/main.c
473
phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy,
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
274
void mt7925_mac_reset_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
284
void mt7925_stats_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
298
void mt7925_mlo_pm_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
299
void mt7925_scan_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
300
void mt7925_roc_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
303
void mt7925_coredump_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
326
void mt7925_set_ipv6_ns_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt792x.h
368
void mt792x_pm_wake_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt792x.h
369
void mt792x_pm_power_save_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt792x.h
379
void mt792x_mac_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
14
mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
15
mac_work.work);
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
314
void mt792x_pm_wake_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
319
dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
348
void mt792x_pm_power_save_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
354
dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
355
pm.ps_work.work);
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
9
void mt792x_mac_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7996/init.c
1063
static void mt7996_wed_rro_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7996/init.c
1069
dev = (struct mt7996_dev *)container_of(work, struct mt7996_dev,
drivers/net/wireless/mediatek/mt76/mt7996/init.c
1070
wed_rro.work);
drivers/net/wireless/mediatek/mt76/mt7996/init.c
1198
INIT_WORK(&dev->wed_rro.work, mt7996_wed_rro_work);
drivers/net/wireless/mediatek/mt76/mt7996/init.c
1729
cancel_work_sync(&dev->wed_rro.work);
drivers/net/wireless/mediatek/mt76/mt7996/init.c
751
static void mt7996_init_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7996/init.c
753
struct mt7996_dev *dev = container_of(work, struct mt7996_dev,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2451
cancel_work_sync(&dev->wed_rro.work);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2500
void mt7996_mac_reset_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2507
dev = container_of(work, struct mt7996_dev, reset_work);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2554
cancel_work_sync(&dev->wed_rro.work);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2652
void mt7996_mac_dump_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2663
dev = container_of(work, struct mt7996_dev, dump_work);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2885
void mt7996_mac_sta_rc_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2887
struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2932
void mt7996_mac_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2937
mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2938
mac_work.work);
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
739
ieee80211_queue_work(mt76_hw(dev), &dev->wed_rro.work);
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
468
struct work_struct work;
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
817
void mt7996_mac_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
818
void mt7996_mac_reset_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
819
void mt7996_mac_dump_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
820
void mt7996_mac_sta_rc_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
843
void mt7996_stats_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt76/scan.c
80
void mt76_scan_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/scan.c
82
struct mt76_dev *dev = container_of(work, struct mt76_dev,
drivers/net/wireless/mediatek/mt76/scan.c
83
scan_work.work);
drivers/net/wireless/mediatek/mt76/usb.c
796
static void mt76u_tx_status_data(struct work_struct *work)
drivers/net/wireless/mediatek/mt76/usb.c
803
usb = container_of(work, struct mt76_usb, stat_work);
drivers/net/wireless/mediatek/mt7601u/mac.c
301
void mt7601u_mac_work(struct work_struct *work)
drivers/net/wireless/mediatek/mt7601u/mac.c
303
struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
drivers/net/wireless/mediatek/mt7601u/mac.c
304
mac_work.work);
drivers/net/wireless/mediatek/mt7601u/main.c
287
ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
drivers/net/wireless/mediatek/mt7601u/mt7601u.h
358
void mt7601u_mac_work(struct work_struct *work);
drivers/net/wireless/mediatek/mt7601u/mt7601u.h
374
void mt7601u_tx_stat(struct work_struct *work);
drivers/net/wireless/mediatek/mt7601u/mt7601u.h
52
struct delayed_work work;
drivers/net/wireless/mediatek/mt7601u/phy.c
1002
static void mt7601u_phy_calibrate(struct work_struct *work)
drivers/net/wireless/mediatek/mt7601u/phy.c
1004
struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
drivers/net/wireless/mediatek/mt7601u/phy.c
1005
cal_work.work);
drivers/net/wireless/mediatek/mt7601u/phy.c
1076
static void mt7601u_phy_freq_cal(struct work_struct *work)
drivers/net/wireless/mediatek/mt7601u/phy.c
1078
struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
drivers/net/wireless/mediatek/mt7601u/phy.c
1079
freq_cal.work.work);
drivers/net/wireless/mediatek/mt7601u/phy.c
1090
ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay);
drivers/net/wireless/mediatek/mt7601u/phy.c
1104
cancel_delayed_work_sync(&dev->freq_cal.work);
drivers/net/wireless/mediatek/mt7601u/phy.c
1118
ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
drivers/net/wireless/mediatek/mt7601u/phy.c
1255
INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal);
drivers/net/wireless/mediatek/mt7601u/phy.c
448
cancel_delayed_work_sync(&dev->freq_cal.work);
drivers/net/wireless/mediatek/mt7601u/phy.c
462
ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
drivers/net/wireless/mediatek/mt7601u/tx.c
228
void mt7601u_tx_stat(struct work_struct *work)
drivers/net/wireless/mediatek/mt7601u/tx.c
230
struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
drivers/net/wireless/mediatek/mt7601u/tx.c
231
stat_work.work);
drivers/net/wireless/microchip/wilc1000/hif.c
1057
static void handle_scan_timer(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
1059
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/microchip/wilc1000/hif.c
1065
static void handle_scan_complete(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
1067
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/microchip/wilc1000/hif.c
334
static void handle_connect_timeout(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
336
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/microchip/wilc1000/hif.c
47
struct work_struct work;
drivers/net/wireless/microchip/wilc1000/hif.c
538
static void handle_rcvd_ntwrk_info(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
540
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/microchip/wilc1000/hif.c
688
static void handle_rcvd_gnrl_async_info(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
690
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/microchip/wilc1000/hif.c
77
INIT_WORK(&msg->work, msg->fn);
drivers/net/wireless/microchip/wilc1000/hif.c
82
if (!queue_work(msg->vif->wilc->hif_workqueue, &msg->work))
drivers/net/wireless/microchip/wilc1000/hif.c
826
static void handle_get_statistics(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
828
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/microchip/wilc1000/hif.c
946
static void wilc_handle_listen_state_expired(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
948
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/microchip/wilc1000/hif.c
977
static void handle_set_mcast_filter(struct work_struct *work)
drivers/net/wireless/microchip/wilc1000/hif.c
979
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
drivers/net/wireless/quantenna/qtnfmac/core.c
313
static void qtnf_vif_reset_handler(struct work_struct *work)
drivers/net/wireless/quantenna/qtnfmac/core.c
315
struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
drivers/net/wireless/quantenna/qtnfmac/core.c
367
static void qtnf_mac_scan_timeout(struct work_struct *work)
drivers/net/wireless/quantenna/qtnfmac/core.c
370
container_of(work, struct qtnf_wmac, scan_timeout.work);
drivers/net/wireless/quantenna/qtnfmac/core.c
376
static void qtnf_vif_send_data_high_pri(struct work_struct *work)
drivers/net/wireless/quantenna/qtnfmac/core.c
379
container_of(work, struct qtnf_vif, high_pri_tx_work);
drivers/net/wireless/quantenna/qtnfmac/core.h
133
void qtnf_main_work_queue(struct work_struct *work);
drivers/net/wireless/quantenna/qtnfmac/event.c
786
void qtnf_event_work_handler(struct work_struct *work)
drivers/net/wireless/quantenna/qtnfmac/event.c
788
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, event_work);
drivers/net/wireless/quantenna/qtnfmac/event.h
12
void qtnf_event_work_handler(struct work_struct *work);
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
1021
static void qtnf_pearl_fw_work_handler(struct work_struct *work)
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
1023
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
1034
static void qtnf_topaz_fw_work_handler(struct work_struct *work)
drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
1036
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
46
static void qtnf_shm_ipc_irq_work(struct work_struct *work)
drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
48
struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
792
static void rt2800mmio_work_txdone(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
795
container_of(work, struct rt2x00_dev, txdone_work);
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
456
static void rt2800usb_work_txdone(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
459
container_of(work, struct rt2x00_dev, txdone_work);
drivers/net/wireless/ralink/rt2x00/rt2x00.h
325
struct delayed_work work;
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
137
static void rt2x00lib_intf_scheduled(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
140
container_of(work, struct rt2x00_dev, intf_work);
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
152
static void rt2x00lib_autowakeup(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
155
container_of(work, struct rt2x00_dev, autowakeup_work.work);
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
580
static void rt2x00lib_sleep(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
583
container_of(work, struct rt2x00_dev, sleep_work);
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
244
&link->work, LINK_TUNE_INTERVAL);
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
249
cancel_delayed_work_sync(&rt2x00dev->link.work);
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
341
static void rt2x00link_tuner(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
344
container_of(work, struct rt2x00_dev, link.work.work);
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
379
&link->work, LINK_TUNE_INTERVAL);
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
398
static void rt2x00link_watchdog(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
401
container_of(work, struct rt2x00_dev, link.watchdog_work.work);
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
423
INIT_DELAYED_WORK(&link->work, rt2x00link_tuner);
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
248
static void rt2x00usb_work_txdone(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
251
container_of(work, struct rt2x00_dev, txdone_work);
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
344
static void rt2x00usb_work_rxdone(struct work_struct *work)
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
347
container_of(work, struct rt2x00_dev, rxdone_work);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1283
static void rtl8180_beacon_work(struct work_struct *work)
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1286
container_of(work, struct rtl8180_vif, beacon_work.work);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1562
schedule_work(&vif_priv->beacon_work.work);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1026
INIT_DELAYED_WORK(&priv->work, rtl8187_work);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1063
cancel_delayed_work_sync(&priv->work);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1075
static void rtl8187_beacon_work(struct work_struct *work)
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1078
container_of(work, struct rtl8187_vif, beacon_work.work);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1310
schedule_work(&vif_priv->beacon_work.work);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
223
ieee80211_queue_delayed_work(hw, &priv->work, 0);
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
894
static void rtl8187_work(struct work_struct *work)
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
903
struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
904
work.work);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
22
static void led_turn_on(struct work_struct *work)
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
28
struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
29
led_on.work);
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
60
static void led_turn_off(struct work_struct *work)
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
66
struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
67
led_off.work);
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
117
struct delayed_work work;
drivers/net/wireless/realtek/rtl8xxxu/core.c
5599
static void rtl8xxxu_update_beacon_work_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtl8xxxu/core.c
5602
container_of(work, struct rtl8xxxu_priv, update_beacon_work.work);
drivers/net/wireless/realtek/rtl8xxxu/core.c
5815
static void rtl8xxxu_rx_urb_work(struct work_struct *work)
drivers/net/wireless/realtek/rtl8xxxu/core.c
5824
priv = container_of(work, struct rtl8xxxu_priv, rx_urb_wq);
drivers/net/wireless/realtek/rtl8xxxu/core.c
6065
static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtl8xxxu/core.c
6075
priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
drivers/net/wireless/realtek/rtl8xxxu/core.c
6165
static void rtl8188e_c2hcmd_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtl8xxxu/core.c
6167
struct rtl8xxxu_priv *priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
drivers/net/wireless/realtek/rtl8xxxu/core.c
7334
static void rtl8xxxu_watchdog_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtl8xxxu/core.c
7341
priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
drivers/net/wireless/realtek/rtlwifi/base.c
2036
static void rtl_watchdog_wq_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/base.c
2038
struct rtl_works *rtlworks = container_of(work, struct rtl_works,
drivers/net/wireless/realtek/rtlwifi/base.c
2039
watchdog_wq.work);
drivers/net/wireless/realtek/rtlwifi/base.c
2233
static void rtl_fwevt_wq_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/base.c
2235
struct rtl_works *rtlworks = container_of(work, struct rtl_works,
drivers/net/wireless/realtek/rtlwifi/base.c
2236
fwevt_wq.work);
drivers/net/wireless/realtek/rtlwifi/base.c
2352
static void rtl_c2hcmd_wq_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/base.c
2354
struct rtl_works *rtlworks = container_of(work, struct rtl_works,
drivers/net/wireless/realtek/rtlwifi/base.c
2355
c2hcmd_wq.work);
drivers/net/wireless/realtek/rtlwifi/base.c
439
static void rtl_watchdog_wq_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/base.c
440
static void rtl_fwevt_wq_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/base.c
441
static void rtl_c2hcmd_wq_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/core.c
989
void rtl_update_beacon_work_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/core.c
992
container_of(work, struct rtl_works, update_beacon_work);
drivers/net/wireless/realtek/rtlwifi/core.h
64
void rtl_update_beacon_work_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/ps.c
182
void rtl_ips_nic_off_wq_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/ps.c
184
struct rtl_works *rtlworks = container_of(work, struct rtl_works,
drivers/net/wireless/realtek/rtlwifi/ps.c
185
ips_nic_off_wq.work);
drivers/net/wireless/realtek/rtlwifi/ps.c
565
void rtl_swlps_rfon_wq_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/ps.c
567
struct rtl_works *rtlworks = container_of(work, struct rtl_works,
drivers/net/wireless/realtek/rtlwifi/ps.c
568
ps_rfon_wq.work);
drivers/net/wireless/realtek/rtlwifi/ps.c
642
void rtl_lps_change_work_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/ps.c
645
container_of(work, struct rtl_works, lps_change_work);
drivers/net/wireless/realtek/rtlwifi/ps.c
678
void rtl_swlps_wq_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/ps.c
680
struct rtl_works *rtlworks = container_of(work, struct rtl_works,
drivers/net/wireless/realtek/rtlwifi/ps.c
681
ps_work.work);
drivers/net/wireless/realtek/rtlwifi/ps.h
13
void rtl_ips_nic_off_wq_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/ps.h
20
void rtl_swlps_wq_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/ps.h
21
void rtl_swlps_rfon_wq_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/ps.h
26
void rtl_lps_change_work_callback(struct work_struct *work);
drivers/net/wireless/realtek/rtlwifi/usb.c
934
static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
drivers/net/wireless/realtek/rtlwifi/usb.c
937
container_of(work, struct rtl_works, fill_h2c_cmd);
drivers/net/wireless/realtek/rtw88/coex.c
3451
void rtw_coex_bt_relink_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3453
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3454
coex.bt_relink_work.work);
drivers/net/wireless/realtek/rtw88/coex.c
3463
void rtw_coex_bt_reenable_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3465
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3466
coex.bt_reenable_work.work);
drivers/net/wireless/realtek/rtw88/coex.c
3474
void rtw_coex_defreeze_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3476
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3477
coex.defreeze_work.work);
drivers/net/wireless/realtek/rtw88/coex.c
3488
void rtw_coex_wl_remain_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3490
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3491
coex.wl_remain_work.work);
drivers/net/wireless/realtek/rtw88/coex.c
3500
void rtw_coex_bt_remain_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3502
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3503
coex.bt_remain_work.work);
drivers/net/wireless/realtek/rtw88/coex.c
3512
void rtw_coex_wl_connecting_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3514
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3515
coex.wl_connecting_work.work);
drivers/net/wireless/realtek/rtw88/coex.c
3525
void rtw_coex_bt_multi_link_remain_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3527
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3528
coex.bt_multi_link_remain_work.work);
drivers/net/wireless/realtek/rtw88/coex.c
3536
void rtw_coex_wl_ccklock_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/coex.c
3538
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/coex.c
3539
coex.wl_ccklock_work.work);
drivers/net/wireless/realtek/rtw88/coex.h
389
void rtw_coex_bt_relink_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/coex.h
390
void rtw_coex_bt_reenable_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/coex.h
391
void rtw_coex_defreeze_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/coex.h
392
void rtw_coex_wl_remain_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/coex.h
393
void rtw_coex_bt_remain_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/coex.h
394
void rtw_coex_wl_connecting_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/coex.h
395
void rtw_coex_bt_multi_link_remain_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/coex.h
396
void rtw_coex_wl_ccklock_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/fw.c
1794
void rtw_fw_update_beacon_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/fw.c
1796
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/fw.h
866
void rtw_fw_update_beacon_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw88/main.c
224
static void rtw_watch_dog_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/main.c
226
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/main.c
227
watch_dog_work.work);
drivers/net/wireless/realtek/rtw88/main.c
312
static void rtw_c2h_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/main.c
314
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work);
drivers/net/wireless/realtek/rtw88/main.c
324
static void rtw_ips_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/main.c
326
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work);
drivers/net/wireless/realtek/rtw88/main.c
334
static void rtw_sta_rc_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/main.c
336
struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
drivers/net/wireless/realtek/rtw88/main.c
674
static void rtw_fw_recovery_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/main.c
676
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
drivers/net/wireless/realtek/rtw88/main.c
712
static void rtw_txq_ba_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/main.c
714
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work);
drivers/net/wireless/realtek/rtw88/sdio.c
1248
static void rtw_sdio_tx_handler(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/sdio.c
1251
container_of(work, struct rtw_sdio_work_data, work);
drivers/net/wireless/realtek/rtw88/sdio.c
1298
INIT_WORK(&rtwsdio->tx_handler_data->work, rtw_sdio_tx_handler);
drivers/net/wireless/realtek/rtw88/sdio.c
818
queue_work(rtwsdio->txwq, &rtwsdio->tx_handler_data->work);
drivers/net/wireless/realtek/rtw88/sdio.h
146
struct work_struct work;
drivers/net/wireless/realtek/rtw88/usb.c
464
static void rtw_usb_tx_handler(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/usb.c
466
struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, tx_work);
drivers/net/wireless/realtek/rtw88/usb.c
601
static void rtw_usb_rx_handler(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/usb.c
603
struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work);
drivers/net/wireless/realtek/rtw88/usb.c
723
static void rtw_usb_rx_resubmit_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw88/usb.c
725
struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_urb_work);
drivers/net/wireless/realtek/rtw89/chan.c
1726
void rtw89_mcc_prepare_done_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/chan.c
1728
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/chan.c
1729
mcc_prepare_done_work.work);
drivers/net/wireless/realtek/rtw89/chan.c
2596
void rtw89_mcc_gc_detect_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/chan.c
2598
struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link,
drivers/net/wireless/realtek/rtw89/chan.c
2599
mcc_gc_detect_beacon_work.work);
drivers/net/wireless/realtek/rtw89/chan.c
2891
void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/chan.c
2893
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/chan.c
2894
chanctx_work.work);
drivers/net/wireless/realtek/rtw89/chan.h
171
void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/chan.h
201
void rtw89_mcc_prepare_done_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/chan.h
202
void rtw89_mcc_gc_detect_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/coex.c
7323
void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/coex.c
7325
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/coex.c
7326
coex_act1_work.work);
drivers/net/wireless/realtek/rtw89/coex.c
7344
void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/coex.c
7346
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/coex.c
7347
coex_bt_devinfo_work.work);
drivers/net/wireless/realtek/rtw89/coex.c
7360
void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/coex.c
7362
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/coex.c
7363
coex_rfk_chk_work.work);
drivers/net/wireless/realtek/rtw89/coex.c
7938
void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/coex.c
7940
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/coex.c
7949
void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/coex.c
7951
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/coex.c
7959
void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/coex.c
7961
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/coex.c
7970
void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/coex.c
7972
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/coex.h
277
void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/coex.h
278
void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/coex.h
279
void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/coex.h
280
void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/coex.h
293
void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/coex.h
294
void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/coex.h
295
void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/core.c
1269
static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
1271
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/core.c
1272
tx_wait_work.work);
drivers/net/wireless/realtek/rtw89/core.c
2440
static void rtw89_cancel_6ghz_probe_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
2442
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/core.c
3852
static void rtw89_core_ba_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw89/core.c
3855
container_of(work, struct rtw89_dev, ba_work);
drivers/net/wireless/realtek/rtw89/core.c
4118
static void rtw89_ips_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
4120
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/core.c
4146
txq_reinvoke_work.work);
drivers/net/wireless/realtek/rtw89/core.c
4154
forbid_ba_work.work);
drivers/net/wireless/realtek/rtw89/core.c
4379
void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
4381
struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
drivers/net/wireless/realtek/rtw89/core.c
4382
roc.roc_work.work);
drivers/net/wireless/realtek/rtw89/core.c
4640
static void rtw89_track_ps_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
4642
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/core.c
4643
track_ps_work.work);
drivers/net/wireless/realtek/rtw89/core.c
4668
static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
4670
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/core.c
4671
track_work.work);
drivers/net/wireless/realtek/rtw89/core.c
5595
void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
5598
struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link,
drivers/net/wireless/realtek/rtw89/core.c
5611
void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/core.c
5614
container_of(work, struct rtw89_vif_link, csa_beacon_work.work);
drivers/net/wireless/realtek/rtw89/core.h
7804
void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/core.h
7805
void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/core.h
7808
void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/fw.c
2023
void rtw89_load_firmware_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw89/fw.c
2026
container_of(work, struct rtw89_dev, load_firmware_work);
drivers/net/wireless/realtek/rtw89/fw.c
7877
void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/fw.c
7879
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/fw.h
5178
void rtw89_load_firmware_work(struct work_struct *work);
drivers/net/wireless/realtek/rtw89/fw.h
5250
void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/phy.c
5230
void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/phy.c
5232
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/phy.c
5233
cfo_track_work.work);
drivers/net/wireless/realtek/rtw89/phy.c
7555
void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/phy.c
7557
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
drivers/net/wireless/realtek/rtw89/phy.c
7558
antdiv_work.work);
drivers/net/wireless/realtek/rtw89/phy.h
1056
void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/phy.h
1071
void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work);
drivers/net/wireless/realtek/rtw89/ps.c
448
static void rtw89_ps_noa_once_set_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/ps.c
451
container_of(work, struct rtw89_ps_noa_once_handler, set_work.work);
drivers/net/wireless/realtek/rtw89/ps.c
458
static void rtw89_ps_noa_once_clr_work(struct wiphy *wiphy, struct wiphy_work *work)
drivers/net/wireless/realtek/rtw89/ps.c
461
container_of(work, struct rtw89_ps_noa_once_handler, clr_work.work);
drivers/net/wireless/realtek/rtw89/ser.c
194
static void rtw89_ser_hdl_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw89/ser.c
197
struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
drivers/net/wireless/realtek/rtw89/ser.c
227
static void rtw89_ser_alarm_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw89/ser.c
229
struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
drivers/net/wireless/realtek/rtw89/ser.c
230
ser_alarm_work.work);
drivers/net/wireless/realtek/rtw89/usb.c
408
static void rtw89_usb_rx_handler(struct work_struct *work)
drivers/net/wireless/realtek/rtw89/usb.c
410
struct rtw89_usb *rtwusb = container_of(work, struct rtw89_usb, rx_work);
drivers/net/wireless/realtek/rtw89/usb.c
506
static void rtw89_usb_rx_resubmit_work(struct work_struct *work)
drivers/net/wireless/realtek/rtw89/usb.c
508
struct rtw89_usb *rtwusb = container_of(work, struct rtw89_usb, rx_urb_work);
drivers/net/wireless/silabs/wfx/bh.c
233
static void bh_work(struct work_struct *work)
drivers/net/wireless/silabs/wfx/bh.c
235
struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh);
drivers/net/wireless/silabs/wfx/bh.c
255
if (!wdev->hif.tx_buffers_used && !work_pending(work)) {
drivers/net/wireless/silabs/wfx/data_tx.c
175
void wfx_tx_policy_upload_work(struct work_struct *work)
drivers/net/wireless/silabs/wfx/data_tx.c
177
struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work);
drivers/net/wireless/silabs/wfx/data_tx.h
43
void wfx_tx_policy_upload_work(struct work_struct *work);
drivers/net/wireless/silabs/wfx/scan.c
149
void wfx_remain_on_channel_work(struct work_struct *work)
drivers/net/wireless/silabs/wfx/scan.c
151
struct wfx_vif *wvif = container_of(work, struct wfx_vif, remain_on_channel_work);
drivers/net/wireless/silabs/wfx/scan.c
91
void wfx_hw_scan_work(struct work_struct *work)
drivers/net/wireless/silabs/wfx/scan.c
93
struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work);
drivers/net/wireless/silabs/wfx/scan.h
16
void wfx_hw_scan_work(struct work_struct *work);
drivers/net/wireless/silabs/wfx/scan.h
22
void wfx_remain_on_channel_work(struct work_struct *work);
drivers/net/wireless/silabs/wfx/sta.c
24
void wfx_cooling_timeout_work(struct work_struct *work)
drivers/net/wireless/silabs/wfx/sta.c
250
static void wfx_beacon_loss_work(struct work_struct *work)
drivers/net/wireless/silabs/wfx/sta.c
252
struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif,
drivers/net/wireless/silabs/wfx/sta.c
258
schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int));
drivers/net/wireless/silabs/wfx/sta.c
26
struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev,
drivers/net/wireless/silabs/wfx/sta.c
618
static void wfx_update_tim_work(struct work_struct *work)
drivers/net/wireless/silabs/wfx/sta.c
620
struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work);
drivers/net/wireless/silabs/wfx/sta.h
64
void wfx_cooling_timeout_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/bh.c
45
static void cw1200_bh_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/bh.c
48
container_of(work, struct cw1200_common, bh_work);
drivers/net/wireless/st/cw1200/main.c
356
INIT_WORK(&priv->scan.work, cw1200_scan_work);
drivers/net/wireless/st/cw1200/pm.c
122
static long cw1200_suspend_work(struct delayed_work *work)
drivers/net/wireless/st/cw1200/pm.c
124
int ret = cancel_delayed_work(work);
drivers/net/wireless/st/cw1200/pm.c
128
tmo = work->timer.expires - jiffies;
drivers/net/wireless/st/cw1200/pm.c
138
struct delayed_work *work,
drivers/net/wireless/st/cw1200/pm.c
144
return queue_delayed_work(priv->workqueue, work, tmo);
drivers/net/wireless/st/cw1200/scan.c
122
queue_work(priv->workqueue, &priv->scan.work);
drivers/net/wireless/st/cw1200/scan.c
126
void cw1200_scan_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/scan.c
128
struct cw1200_common *priv = container_of(work, struct cw1200_common,
drivers/net/wireless/st/cw1200/scan.c
129
scan.work);
drivers/net/wireless/st/cw1200/scan.c
145
cw1200_join_timeout(&priv->join_timeout.work);
drivers/net/wireless/st/cw1200/scan.c
261
queue_work(priv->workqueue, &priv->scan.work);
drivers/net/wireless/st/cw1200/scan.c
294
cw1200_scan_work(&priv->scan.work);
drivers/net/wireless/st/cw1200/scan.c
324
void cw1200_clear_recent_scan_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/scan.c
327
container_of(work, struct cw1200_common,
drivers/net/wireless/st/cw1200/scan.c
328
clear_recent_scan_work.work);
drivers/net/wireless/st/cw1200/scan.c
332
void cw1200_scan_timeout(struct work_struct *work)
drivers/net/wireless/st/cw1200/scan.c
335
container_of(work, struct cw1200_common, scan.timeout.work);
drivers/net/wireless/st/cw1200/scan.c
350
void cw1200_probe_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/scan.c
353
container_of(work, struct cw1200_common, scan.probe_work.work);
drivers/net/wireless/st/cw1200/scan.h
23
struct work_struct work;
drivers/net/wireless/st/cw1200/scan.h
42
void cw1200_scan_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/scan.h
43
void cw1200_scan_timeout(struct work_struct *work);
drivers/net/wireless/st/cw1200/scan.h
44
void cw1200_clear_recent_scan_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/scan.h
51
void cw1200_probe_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.c
1036
void cw1200_bss_loss_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
1039
container_of(work, struct cw1200_common, bss_loss_work.work);
drivers/net/wireless/st/cw1200/sta.c
1047
void cw1200_bss_params_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
1050
container_of(work, struct cw1200_common, bss_params_work);
drivers/net/wireless/st/cw1200/sta.c
1184
void cw1200_join_complete_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
1187
container_of(work, struct cw1200_common, join_complete_work);
drivers/net/wireless/st/cw1200/sta.c
1368
void cw1200_join_timeout(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
1371
container_of(work, struct cw1200_common, join_timeout.work);
drivers/net/wireless/st/cw1200/sta.c
1439
void cw1200_unjoin_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
1442
container_of(work, struct cw1200_common, unjoin_work);
drivers/net/wireless/st/cw1200/sta.c
1705
void cw1200_set_tim_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
1708
container_of(work, struct cw1200_common, set_tim_work);
drivers/net/wireless/st/cw1200/sta.c
1720
void cw1200_set_cts_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
1723
container_of(work, struct cw1200_common, set_cts_work);
drivers/net/wireless/st/cw1200/sta.c
2082
void cw1200_multicast_start_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
2085
container_of(work, struct cw1200_common, multicast_start_work);
drivers/net/wireless/st/cw1200/sta.c
2100
void cw1200_multicast_stop_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
2103
container_of(work, struct cw1200_common, multicast_stop_work);
drivers/net/wireless/st/cw1200/sta.c
511
void cw1200_update_filtering_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
514
container_of(work, struct cw1200_common,
drivers/net/wireless/st/cw1200/sta.c
520
void cw1200_set_beacon_wakeup_period_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
523
container_of(work, struct cw1200_common,
drivers/net/wireless/st/cw1200/sta.c
842
void cw1200_wep_key_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
845
container_of(work, struct cw1200_common, wep_key_work);
drivers/net/wireless/st/cw1200/sta.c
965
void cw1200_event_handler(struct work_struct *work)
drivers/net/wireless/st/cw1200/sta.c
968
container_of(work, struct cw1200_common, event_handler);
drivers/net/wireless/st/cw1200/sta.h
115
void cw1200_set_tim_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
116
void cw1200_set_cts_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
117
void cw1200_multicast_start_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
118
void cw1200_multicast_stop_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
60
void cw1200_event_handler(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
61
void cw1200_bss_loss_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
62
void cw1200_bss_params_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
63
void cw1200_keep_alive_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
64
void cw1200_tx_failure_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
80
void cw1200_join_timeout(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
81
void cw1200_unjoin_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
82
void cw1200_join_complete_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
83
void cw1200_wep_key_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
86
void cw1200_update_filtering_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
87
void cw1200_set_beacon_wakeup_period_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/sta.h
92
void cw1200_ba_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/txrx.c
1264
void cw1200_link_id_reset(struct work_struct *work)
drivers/net/wireless/st/cw1200/txrx.c
1267
container_of(work, struct cw1200_common, linkid_reset_work);
drivers/net/wireless/st/cw1200/txrx.c
1360
void cw1200_link_id_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/txrx.c
1363
container_of(work, struct cw1200_common, link_id_work);
drivers/net/wireless/st/cw1200/txrx.c
1365
cw1200_link_id_gc_work(&priv->link_id_gc_work.work);
drivers/net/wireless/st/cw1200/txrx.c
1369
void cw1200_link_id_gc_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/txrx.c
1372
container_of(work, struct cw1200_common, link_id_gc_work.work);
drivers/net/wireless/st/cw1200/txrx.c
385
void tx_policy_upload_work(struct work_struct *work)
drivers/net/wireless/st/cw1200/txrx.c
388
container_of(work, struct cw1200_common, tx_policy_upload_work);
drivers/net/wireless/st/cw1200/txrx.h
100
void cw1200_link_id_gc_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/txrx.h
53
void tx_policy_upload_work(struct work_struct *work);
drivers/net/wireless/st/cw1200/txrx.h
82
void cw1200_tx_timeout(struct work_struct *work);
drivers/net/wireless/st/cw1200/txrx.h
93
void cw1200_link_id_reset(struct work_struct *work);
drivers/net/wireless/st/cw1200/txrx.h
99
void cw1200_link_id_work(struct work_struct *work);
drivers/net/wireless/ti/wl1251/main.c
197
static void wl1251_irq_work(struct work_struct *work)
drivers/net/wireless/ti/wl1251/main.c
201
container_of(work, struct wl1251, irq_work);
drivers/net/wireless/ti/wl1251/ps.c
16
void wl1251_elp_work(struct work_struct *work)
drivers/net/wireless/ti/wl1251/ps.c
21
dwork = to_delayed_work(work);
drivers/net/wireless/ti/wl1251/ps.h
18
void wl1251_elp_work(struct work_struct *work);
drivers/net/wireless/ti/wl1251/tx.c
330
void wl1251_tx_work(struct work_struct *work)
drivers/net/wireless/ti/wl1251/tx.c
332
struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
drivers/net/wireless/ti/wl1251/tx.h
213
void wl1251_tx_work(struct work_struct *work);
drivers/net/wireless/ti/wlcore/main.c
126
static void wl1271_rx_streaming_enable_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
129
struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
drivers/net/wireless/ti/wlcore/main.c
162
static void wl1271_rx_streaming_disable_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
165
struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
drivers/net/wireless/ti/wlcore/main.c
208
static void wlcore_rc_update_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
2096
static void wlcore_channel_switch_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
2104
dwork = to_delayed_work(work);
drivers/net/wireless/ti/wlcore/main.c
211
struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
drivers/net/wireless/ti/wlcore/main.c
2133
static void wlcore_connection_loss_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
2140
dwork = to_delayed_work(work);
drivers/net/wireless/ti/wlcore/main.c
2161
static void wlcore_pending_auth_complete_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
2169
dwork = to_delayed_work(work);
drivers/net/wireless/ti/wlcore/main.c
240
static void wl12xx_tx_watchdog_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
245
dwork = to_delayed_work(work);
drivers/net/wireless/ti/wlcore/main.c
5732
static void wlcore_roc_complete_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
5738
dwork = to_delayed_work(work);
drivers/net/wireless/ti/wlcore/main.c
577
static void wl1271_netstack_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
580
container_of(work, struct wl1271, netstack_work);
drivers/net/wireless/ti/wlcore/main.c
993
static void wl1271_recovery_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/main.c
996
container_of(work, struct wl1271, recovery_work);
drivers/net/wireless/ti/wlcore/scan.c
20
void wl1271_scan_complete_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/scan.c
30
dwork = to_delayed_work(work);
drivers/net/wireless/ti/wlcore/scan.h
22
void wl1271_scan_complete_work(struct work_struct *work);
drivers/net/wireless/ti/wlcore/tx.c
854
void wl1271_tx_work(struct work_struct *work)
drivers/net/wireless/ti/wlcore/tx.c
856
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
drivers/net/wireless/ti/wlcore/tx.h
229
void wl1271_tx_work(struct work_struct *work);
drivers/net/wireless/virtual/mac80211_hwsim.c
2963
static void hw_scan_work(struct work_struct *work)
drivers/net/wireless/virtual/mac80211_hwsim.c
2966
container_of(work, struct mac80211_hwsim_data, hw_scan.work);
drivers/net/wireless/virtual/mac80211_hwsim.c
3135
static void hw_roc_start(struct work_struct *work)
drivers/net/wireless/virtual/mac80211_hwsim.c
3138
container_of(work, struct mac80211_hwsim_data, roc_start.work);
drivers/net/wireless/virtual/mac80211_hwsim.c
3152
static void hw_roc_done(struct work_struct *work)
drivers/net/wireless/virtual/mac80211_hwsim.c
3155
container_of(work, struct mac80211_hwsim_data, roc_done.work);
drivers/net/wireless/virtual/mac80211_hwsim.c
6882
static void hwsim_virtio_rx_work(struct work_struct *work)
drivers/net/wireless/virtual/mac80211_hwsim.c
948
static void hwsim_virtio_rx_work(struct work_struct *work);
drivers/net/wireless/virtual/virt_wifi.c
183
static void virt_wifi_scan_result(struct work_struct *work)
drivers/net/wireless/virtual/virt_wifi.c
186
container_of(work, struct virt_wifi_wiphy_priv,
drivers/net/wireless/virtual/virt_wifi.c
187
scan_result.work);
drivers/net/wireless/virtual/virt_wifi.c
260
static void virt_wifi_connect_complete(struct work_struct *work)
drivers/net/wireless/virtual/virt_wifi.c
263
container_of(work, struct virt_wifi_netdev_priv, connect.work);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1182
static void zd_process_intr(struct work_struct *work)
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1186
struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1431
static void beacon_watchdog_handler(struct work_struct *work)
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1434
container_of(work, struct zd_mac, beacon.watchdog_work.work);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1504
static void link_led_handler(struct work_struct *work)
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1507
container_of(work, struct zd_mac, housekeeping.link_led_work.work);
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1082
static void zd_tx_watchdog_handler(struct work_struct *work)
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1085
container_of(work, struct zd_usb, tx.watchdog_work.work);
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1128
static void zd_rx_idle_timer_handler(struct work_struct *work)
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1131
container_of(work, struct zd_usb, rx.idle_work.work);
drivers/net/wwan/iosm/iosm_ipc_uevent.c
18
info = container_of(data, struct ipc_uevent_info, work);
drivers/net/wwan/iosm/iosm_ipc_uevent.c
36
INIT_WORK(&info->work, ipc_uevent_work);
drivers/net/wwan/iosm/iosm_ipc_uevent.c
43
schedule_work(&info->work);
drivers/net/wwan/iosm/iosm_ipc_uevent.h
30
struct work_struct work;
drivers/net/wwan/mhi_wwan_ctrl.c
78
static void mhi_wwan_ctrl_refill_work(struct work_struct *work)
drivers/net/wwan/mhi_wwan_ctrl.c
80
struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill);
drivers/net/wwan/mhi_wwan_mbim.c
392
static void mhi_net_rx_refill_work(struct work_struct *work)
drivers/net/wwan/mhi_wwan_mbim.c
394
struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context,
drivers/net/wwan/mhi_wwan_mbim.c
395
rx_refill.work);
drivers/net/wwan/qcom_bam_dmux.c
376
static void bam_dmux_tx_wakeup_work(struct work_struct *work)
drivers/net/wwan/qcom_bam_dmux.c
378
struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
drivers/net/wwan/qcom_bam_dmux.c
431
static void bam_dmux_register_netdev_work(struct work_struct *work)
drivers/net/wwan/qcom_bam_dmux.c
433
struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
238
static void t7xx_cldma_rx_done(struct work_struct *work)
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
240
struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
333
static void t7xx_cldma_tx_done(struct work_struct *work)
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
335
struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1065
static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1067
struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
158
static void t7xx_dpmaif_tx_done(struct work_struct *work)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
160
struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
drivers/net/wwan/t7xx/t7xx_modem_ops.c
565
static void t7xx_md_hk_wq(struct work_struct *work)
drivers/net/wwan/t7xx/t7xx_modem_ops.c
567
struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
drivers/net/wwan/t7xx/t7xx_modem_ops.c
579
static void t7xx_ap_hk_wq(struct work_struct *work)
drivers/net/wwan/t7xx/t7xx_modem_ops.c
581
struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
drivers/net/wwan/wwan_hwsim.c
363
static void wwan_hwsim_port_del_work(struct work_struct *work)
drivers/net/wwan/wwan_hwsim.c
366
container_of(work, typeof(*port), del_work);
drivers/net/wwan/wwan_hwsim.c
471
static void wwan_hwsim_dev_del_work(struct work_struct *work)
drivers/net/wwan/wwan_hwsim.c
473
struct wwan_hwsim_dev *dev = container_of(work, typeof(*dev), del_work);
drivers/net/wwan/wwan_hwsim.c
79
static void wwan_hwsim_port_del_work(struct work_struct *work);
drivers/net/wwan/wwan_hwsim.c
80
static void wwan_hwsim_dev_del_work(struct work_struct *work);
drivers/nfc/nfcmrvl/fw_dnld.c
391
static void fw_dnld_rx_work(struct work_struct *work)
drivers/nfc/nfcmrvl/fw_dnld.c
395
struct nfcmrvl_fw_dnld *fw_dnld = container_of(work,
drivers/nfc/nfcmrvl/usb.c
273
static void nfcmrvl_waker(struct work_struct *work)
drivers/nfc/nfcmrvl/usb.c
276
container_of(work, struct nfcmrvl_usb_drv_data, waker);
drivers/nfc/nfcsim.c
170
static void nfcsim_send_wq(struct work_struct *work)
drivers/nfc/nfcsim.c
172
struct nfcsim *dev = container_of(work, struct nfcsim, send_work.work);
drivers/nfc/nfcsim.c
182
static void nfcsim_recv_wq(struct work_struct *work)
drivers/nfc/nfcsim.c
184
struct nfcsim *dev = container_of(work, struct nfcsim, recv_work);
drivers/nfc/nxp-nci/core.c
150
INIT_WORK(&info->fw_info.work, nxp_nci_fw_work);
drivers/nfc/nxp-nci/core.c
186
cancel_work_sync(&info->fw_info.work);
drivers/nfc/nxp-nci/firmware.c
155
schedule_work(&fw_info->work);
drivers/nfc/nxp-nci/firmware.c
166
void nxp_nci_fw_work(struct work_struct *work)
drivers/nfc/nxp-nci/firmware.c
172
fw_info = container_of(work, struct nxp_nci_fw_info, work);
drivers/nfc/nxp-nci/firmware.c
235
schedule_work(&fw_info->work);
drivers/nfc/nxp-nci/firmware.c
307
schedule_work(&fw_info->work);
drivers/nfc/nxp-nci/nxp-nci.h
46
struct work_struct work;
drivers/nfc/nxp-nci/nxp-nci.h
68
void nxp_nci_fw_work(struct work_struct *work);
drivers/nfc/pn533/pn533.c
1068
static void pn533_wq_tm_mi_recv(struct work_struct *work);
drivers/nfc/pn533/pn533.c
1119
static void pn533_wq_tm_mi_recv(struct work_struct *work)
drivers/nfc/pn533/pn533.c
1121
struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work);
drivers/nfc/pn533/pn533.c
1141
static void pn533_wq_tm_mi_send(struct work_struct *work)
drivers/nfc/pn533/pn533.c
1143
struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work);
drivers/nfc/pn533/pn533.c
1177
static void pn533_wq_tg_get_data(struct work_struct *work)
drivers/nfc/pn533/pn533.c
1179
struct pn533 *dev = container_of(work, struct pn533, tg_work);
drivers/nfc/pn533/pn533.c
1266
static void pn533_wq_rf(struct work_struct *work)
drivers/nfc/pn533/pn533.c
1268
struct pn533 *dev = container_of(work, struct pn533, rf_work);
drivers/nfc/pn533/pn533.c
1599
static void pn533_wq_poll(struct work_struct *work)
drivers/nfc/pn533/pn533.c
1601
struct pn533 *dev = container_of(work, struct pn533, poll_work.work);
drivers/nfc/pn533/pn533.c
2388
static void pn533_wq_mi_recv(struct work_struct *work)
drivers/nfc/pn533/pn533.c
2390
struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
drivers/nfc/pn533/pn533.c
2436
static void pn533_wq_mi_send(struct work_struct *work)
drivers/nfc/pn533/pn533.c
2438
struct pn533 *dev = container_of(work, struct pn533, mi_tx_work);
drivers/nfc/pn533/pn533.c
542
static void pn533_wq_cmd_complete(struct work_struct *work)
drivers/nfc/pn533/pn533.c
544
struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
drivers/nfc/pn533/pn533.c
552
static void pn533_wq_cmd(struct work_struct *work)
drivers/nfc/pn533/pn533.c
554
struct pn533 *dev = container_of(work, struct pn533, cmd_work);
drivers/nfc/pn544/i2c.c
733
static void pn544_hci_i2c_fw_work(struct work_struct *work)
drivers/nfc/pn544/i2c.c
735
struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
drivers/nfc/port100.c
894
static void port100_wq_cmd_complete(struct work_struct *work)
drivers/nfc/port100.c
896
struct port100 *dev = container_of(work, struct port100,
drivers/nfc/st-nci/ndlc.c
196
static void llt_ndlc_sm_work(struct work_struct *work)
drivers/nfc/st-nci/ndlc.c
198
struct llt_ndlc *ndlc = container_of(work, struct llt_ndlc, sm_work);
drivers/nfc/st21nfca/dep.c
110
static void st21nfca_tx_work(struct work_struct *work)
drivers/nfc/st21nfca/dep.c
112
struct st21nfca_hci_info *info = container_of(work,
drivers/nfc/st21nfca/se.c
250
static void st21nfca_se_wt_work(struct work_struct *work)
drivers/nfc/st21nfca/se.c
263
struct st21nfca_hci_info *info = container_of(work,
drivers/nfc/trf7970a.c
1048
static void trf7970a_timeout_work_handler(struct work_struct *work)
drivers/nfc/trf7970a.c
1050
struct trf7970a *trf = container_of(work, struct trf7970a,
drivers/nfc/trf7970a.c
1051
timeout_work.work);
drivers/ntb/hw/amd/ntb_hw_amd.c
992
static void amd_link_hb(struct work_struct *work)
drivers/ntb/hw/amd/ntb_hw_amd.c
994
struct amd_ntb_dev *ndev = hb_ndev(work);
drivers/ntb/hw/amd/ntb_hw_amd.h
218
#define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work)
drivers/ntb/hw/intel/ntb_hw_intel.h
193
hb_timer.work)
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
520
static void check_link_status_work(struct work_struct *work)
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
524
sndev = container_of(work, struct switchtec_ntb,
drivers/ntb/ntb_transport.c
1007
static void ntb_transport_link_work(struct work_struct *work)
drivers/ntb/ntb_transport.c
1010
container_of(work, struct ntb_transport_ctx, link_work.work);
drivers/ntb/ntb_transport.c
1113
static void ntb_qp_link_work(struct work_struct *work)
drivers/ntb/ntb_transport.c
1115
struct ntb_transport_qp *qp = container_of(work,
drivers/ntb/ntb_transport.c
1117
link_work.work);
drivers/ntb/ntb_transport.c
937
static void ntb_qp_link_cleanup_work(struct work_struct *work)
drivers/ntb/ntb_transport.c
939
struct ntb_transport_qp *qp = container_of(work,
drivers/ntb/ntb_transport.c
988
static void ntb_transport_link_cleanup_work(struct work_struct *work)
drivers/ntb/ntb_transport.c
991
container_of(work, struct ntb_transport_ctx, link_cleanup);
drivers/ntb/test/ntb_msi_test.c
56
static void ntb_msit_setup_work(struct work_struct *work)
drivers/ntb/test/ntb_msi_test.c
58
struct ntb_msit_ctx *nm = container_of(work, struct ntb_msit_ctx,
drivers/ntb/test/ntb_perf.c
1019
static void perf_thread_work(struct work_struct *work)
drivers/ntb/test/ntb_perf.c
1021
struct perf_thread *pthr = to_thread_work(work);
drivers/ntb/test/ntb_perf.c
1073
cancel_work_sync(&perf->threads[tidx].work);
drivers/ntb/test/ntb_perf.c
1100
(void)queue_work(perf_wq, &pthr->work);
drivers/ntb/test/ntb_perf.c
1166
INIT_WORK(&pthr->work, perf_thread_work);
drivers/ntb/test/ntb_perf.c
182
struct work_struct work;
drivers/ntb/test/ntb_perf.c
185
container_of(__work, struct perf_thread, work)
drivers/ntb/test/ntb_perf.c
628
static void perf_service_work(struct work_struct *work)
drivers/ntb/test/ntb_perf.c
630
struct perf_peer *peer = to_peer_service(work);
drivers/nvdimm/nd-core.h
73
void nvdimm_security_overwrite_query(struct work_struct *work);
drivers/nvdimm/nd-core.h
80
static inline void nvdimm_security_overwrite_query(struct work_struct *work)
drivers/nvdimm/security.c
485
void nvdimm_security_overwrite_query(struct work_struct *work)
drivers/nvdimm/security.c
488
container_of(work, typeof(*nvdimm), dwork.work);
drivers/nvme/host/apple.c
1022
static void apple_nvme_reset_work(struct work_struct *work)
drivers/nvme/host/apple.c
1028
container_of(work, struct apple_nvme, ctrl.reset_work);
drivers/nvme/host/apple.c
1234
static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
drivers/nvme/host/apple.c
1237
container_of(work, struct apple_nvme, remove_work);
drivers/nvme/host/auth.c
1002
static void nvme_ctrl_auth_work(struct work_struct *work)
drivers/nvme/host/auth.c
1005
container_of(work, struct nvme_ctrl, dhchap_auth_work);
drivers/nvme/host/auth.c
784
static void nvme_queue_auth_work(struct work_struct *work)
drivers/nvme/host/auth.c
787
container_of(work, struct nvme_dhchap_queue_context, auth_work);
drivers/nvme/host/core.c
1372
static void nvme_keep_alive_work(struct work_struct *work)
drivers/nvme/host/core.c
1374
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
drivers/nvme/host/core.c
183
static void nvme_failfast_work(struct work_struct *work)
drivers/nvme/host/core.c
185
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
drivers/nvme/host/core.c
251
static void nvme_delete_ctrl_work(struct work_struct *work)
drivers/nvme/host/core.c
254
container_of(work, struct nvme_ctrl, delete_work);
drivers/nvme/host/core.c
4488
static void nvme_scan_work(struct work_struct *work)
drivers/nvme/host/core.c
4491
container_of(work, struct nvme_ctrl, scan_work);
drivers/nvme/host/core.c
4645
static void nvme_async_event_work(struct work_struct *work)
drivers/nvme/host/core.c
4648
container_of(work, struct nvme_ctrl, async_event_work);
drivers/nvme/host/core.c
4705
static void nvme_fw_act_work(struct work_struct *work)
drivers/nvme/host/core.c
4707
struct nvme_ctrl *ctrl = container_of(work,
drivers/nvme/host/fc.c
1629
nvme_fc_handle_ls_rqst_work(struct work_struct *work)
drivers/nvme/host/fc.c
1632
container_of(work, struct nvme_fc_rport, lsrcv_work);
drivers/nvme/host/fc.c
1869
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
drivers/nvme/host/fc.c
1872
container_of(work, struct nvme_fc_ctrl, ioerr_work);
drivers/nvme/host/fc.c
236
static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
drivers/nvme/host/fc.c
3327
nvme_fc_reset_ctrl_work(struct work_struct *work)
drivers/nvme/host/fc.c
3330
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
drivers/nvme/host/fc.c
3372
nvme_fc_connect_ctrl_work(struct work_struct *work)
drivers/nvme/host/fc.c
3377
container_of(to_delayed_work(work),
drivers/nvme/host/multipath.c
651
static void nvme_partition_scan_work(struct work_struct *work)
drivers/nvme/host/multipath.c
654
container_of(work, struct nvme_ns_head, partition_scan_work);
drivers/nvme/host/multipath.c
665
static void nvme_requeue_work(struct work_struct *work)
drivers/nvme/host/multipath.c
668
container_of(work, struct nvme_ns_head, requeue_work);
drivers/nvme/host/multipath.c
699
static void nvme_remove_head_work(struct work_struct *work)
drivers/nvme/host/multipath.c
701
struct nvme_ns_head *head = container_of(to_delayed_work(work),
drivers/nvme/host/multipath.c
979
static void nvme_ana_work(struct work_struct *work)
drivers/nvme/host/multipath.c
981
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
drivers/nvme/host/pci.c
3350
static void nvme_reset_work(struct work_struct *work)
drivers/nvme/host/pci.c
3353
container_of(work, struct nvme_dev, ctrl.reset_work);
drivers/nvme/host/rdma.c
1098
static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
drivers/nvme/host/rdma.c
1100
struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
drivers/nvme/host/rdma.c
1123
static void nvme_rdma_error_recovery_work(struct work_struct *work)
drivers/nvme/host/rdma.c
1125
struct nvme_rdma_ctrl *ctrl = container_of(work,
drivers/nvme/host/rdma.c
2166
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
drivers/nvme/host/rdma.c
2169
container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
drivers/nvme/host/tcp.c
2448
static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
drivers/nvme/host/tcp.c
2450
struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
drivers/nvme/host/tcp.c
2474
static void nvme_tcp_error_recovery_work(struct work_struct *work)
drivers/nvme/host/tcp.c
2476
struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
drivers/nvme/host/tcp.c
2516
static void nvme_reset_ctrl_work(struct work_struct *work)
drivers/nvme/host/tcp.c
2519
container_of(work, struct nvme_ctrl, reset_work);
drivers/nvme/target/core.c
1574
static void nvmet_fatal_error_handler(struct work_struct *work)
drivers/nvme/target/core.c
1577
container_of(work, struct nvmet_ctrl, fatal_err_work);
drivers/nvme/target/core.c
186
static void nvmet_async_event_work(struct work_struct *work)
drivers/nvme/target/core.c
189
container_of(work, struct nvmet_ctrl, async_event_work);
drivers/nvme/target/core.c
396
static void nvmet_keep_alive_timer(struct work_struct *work)
drivers/nvme/target/core.c
398
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
drivers/nvme/target/fabrics-cmd-auth.c
15
static void nvmet_auth_expired_work(struct work_struct *work)
drivers/nvme/target/fabrics-cmd-auth.c
17
struct nvmet_sq *sq = container_of(to_delayed_work(work),
drivers/nvme/target/fc.c
1061
nvmet_fc_delete_assoc_work(struct work_struct *work)
drivers/nvme/target/fc.c
1064
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
drivers/nvme/target/fc.c
1600
cancel_work(&iod->work);
drivers/nvme/target/fc.c
2008
nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
drivers/nvme/target/fc.c
2011
container_of(work, struct nvmet_fc_ls_iod, work);
drivers/nvme/target/fc.c
2079
queue_work(nvmet_wq, &iod->work);
drivers/nvme/target/fc.c
231
static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
drivers/nvme/target/fc.c
232
static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
drivers/nvme/target/fc.c
238
static void nvmet_fc_put_lsop_work(struct work_struct *work)
drivers/nvme/target/fc.c
241
container_of(work, struct nvmet_fc_ls_req_op, put_work);
drivers/nvme/target/fc.c
45
struct work_struct work;
drivers/nvme/target/fc.c
538
INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
drivers/nvme/target/fc.c
706
nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
drivers/nvme/target/fc.c
709
container_of(work, struct nvmet_fc_fcp_iod, defer_work);
drivers/nvme/target/fcloop.c
263
struct work_struct work;
drivers/nvme/target/fcloop.c
326
fcloop_rport_lsrqst_work(struct work_struct *work)
drivers/nvme/target/fcloop.c
329
container_of(work, struct fcloop_rport, ls_work);
drivers/nvme/target/fcloop.c
417
fcloop_tport_lsrqst_work(struct work_struct *work)
drivers/nvme/target/fcloop.c
420
container_of(work, struct fcloop_tport, ls_work);
drivers/nvme/target/fcloop.c
553
fcloop_tgt_rscn_work(struct work_struct *work)
drivers/nvme/target/fcloop.c
556
container_of(work, struct fcloop_rscn, work);
drivers/nvme/target/fcloop.c
574
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
drivers/nvme/target/fcloop.c
576
queue_work(nvmet_wq, &tgt_rscn->work);
drivers/nvme/target/fcloop.c
660
fcloop_fcp_recv_work(struct work_struct *work)
drivers/nvme/target/fcloop.c
663
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
drivers/nvme/target/fcloop.c
703
fcloop_fcp_abort_recv_work(struct work_struct *work)
drivers/nvme/target/fcloop.c
706
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
drivers/nvme/target/fcloop.c
747
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
drivers/nvme/target/fcloop.c
750
container_of(work, struct fcloop_fcpreq, tio_done_work);
drivers/nvme/target/io-cmd-file.c
207
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
drivers/nvme/target/io-cmd-file.c
214
INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
drivers/nvme/target/io-cmd-file.c
215
queue_work(buffered_io_wq, &req->f.work);
drivers/nvme/target/io-cmd-file.c
259
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
drivers/nvme/target/io-cmd-file.c
268
INIT_WORK(&req->f.work, nvmet_file_flush_work);
drivers/nvme/target/io-cmd-file.c
269
queue_work(nvmet_wq, &req->f.work);
drivers/nvme/target/io-cmd-file.c
309
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
drivers/nvme/target/io-cmd-file.c
328
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
drivers/nvme/target/io-cmd-file.c
329
queue_work(nvmet_wq, &req->f.work);
drivers/nvme/target/io-cmd-file.c
334
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
drivers/nvme/target/io-cmd-file.c
358
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
drivers/nvme/target/io-cmd-file.c
359
queue_work(nvmet_wq, &req->f.work);
drivers/nvme/target/loop.c
125
static void nvme_loop_execute_work(struct work_struct *work)
drivers/nvme/target/loop.c
128
container_of(work, struct nvme_loop_iod, work);
drivers/nvme/target/loop.c
170
queue_work(nvmet_wq, &iod->work);
drivers/nvme/target/loop.c
190
queue_work(nvmet_wq, &iod->work);
drivers/nvme/target/loop.c
199
INIT_WORK(&iod->work, nvme_loop_execute_work);
drivers/nvme/target/loop.c
24
struct work_struct work;
drivers/nvme/target/loop.c
451
static void nvme_loop_reset_ctrl_work(struct work_struct *work)
drivers/nvme/target/loop.c
454
container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
drivers/nvme/target/nvmet.h
463
struct work_struct work;
drivers/nvme/target/nvmet.h
468
struct work_struct work;
drivers/nvme/target/passthru.c
216
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
drivers/nvme/target/passthru.c
350
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
drivers/nvme/target/passthru.c
352
queue_work(nvmet_wq, &req->p.work);
drivers/nvme/target/pci-epf.c
103
struct delayed_work work;
drivers/nvme/target/pci-epf.c
1188
queue_delayed_work(system_highpri_wq, &cq->work, 0);
drivers/nvme/target/pci-epf.c
1356
cancel_delayed_work_sync(&cq->work);
drivers/nvme/target/pci-epf.c
151
struct work_struct work;
drivers/nvme/target/pci-epf.c
1542
static void nvmet_pci_epf_cq_work(struct work_struct *work);
drivers/nvme/target/pci-epf.c
1553
INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
drivers/nvme/target/pci-epf.c
1592
static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
drivers/nvme/target/pci-epf.c
1595
container_of(work, struct nvmet_pci_epf_iod, work);
drivers/nvme/target/pci-epf.c
1696
queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
drivers/nvme/target/pci-epf.c
1704
static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
drivers/nvme/target/pci-epf.c
1707
container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
drivers/nvme/target/pci-epf.c
1754
static void nvmet_pci_epf_cq_work(struct work_struct *work)
drivers/nvme/target/pci-epf.c
1757
container_of(work, struct nvmet_pci_epf_queue, work.work);
drivers/nvme/target/pci-epf.c
1827
queue_delayed_work(system_highpri_wq, &cq->work,
drivers/nvme/target/pci-epf.c
1948
static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
drivers/nvme/target/pci-epf.c
1951
container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
drivers/nvme/target/pci-epf.c
672
static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
drivers/nvme/target/pci-epf.c
693
INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
170
static void ls_g4_pcie_reset(struct work_struct *work)
drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
172
struct delayed_work *dwork = to_delayed_work(work);
drivers/pci/controller/pci-hyperv.c
2698
static void pci_devices_present_work(struct work_struct *work)
drivers/pci/controller/pci-hyperv.c
2710
dr_wrk = container_of(work, struct hv_dr_work, wrk);
drivers/pci/controller/pci-hyperv.c
2949
static void hv_eject_device_work(struct work_struct *work)
drivers/pci/controller/pci-hyperv.c
2962
hpdev = container_of(work, struct hv_pci_dev, wrk);
drivers/pci/controller/pcie-rockchip-ep.c
533
static void rockchip_pcie_ep_link_training(struct work_struct *work)
drivers/pci/controller/pcie-rockchip-ep.c
536
container_of(work, struct rockchip_pcie_ep, link_training.work);
drivers/pci/doe.c
459
destroy_work_on_stack(&task->work);
drivers/pci/doe.c
480
static void doe_statemachine_work(struct work_struct *work)
drivers/pci/doe.c
482
struct pci_doe_task *task = container_of(work, struct pci_doe_task,
drivers/pci/doe.c
483
work);
drivers/pci/doe.c
764
INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
drivers/pci/doe.c
765
queue_work(doe_mb->work_queue, &task->work);
drivers/pci/doe.c
96
struct work_struct work;
drivers/pci/endpoint/functions/pci-epf-mhi.c
451
static void pci_epf_mhi_dma_worker(struct work_struct *work)
drivers/pci/endpoint/functions/pci-epf-mhi.c
453
struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
drivers/pci/endpoint/functions/pci-epf-ntb.c
1772
queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work);
drivers/pci/endpoint/functions/pci-epf-ntb.c
620
static void epf_ntb_cmd_handler(struct work_struct *work)
drivers/pci/endpoint/functions/pci-epf-ntb.c
632
ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work);
drivers/pci/endpoint/functions/pci-epf-test.c
1169
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
drivers/pci/endpoint/functions/pci-epf-test.c
975
static void pci_epf_test_cmd_handler(struct work_struct *work)
drivers/pci/endpoint/functions/pci-epf-test.c
978
struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
drivers/pci/endpoint/functions/pci-epf-test.c
979
cmd_handler.work);
drivers/pci/endpoint/functions/pci-epf-vntb.c
253
static void epf_ntb_cmd_handler(struct work_struct *work)
drivers/pci/endpoint/functions/pci-epf-vntb.c
262
ntb = container_of(work, struct epf_ntb, cmd_handler.work);
drivers/pci/endpoint/functions/pci-epf-vntb.c
934
queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
drivers/pci/hotplug/cpqphp_core.c
309
u32 work;
drivers/pci/hotplug/cpqphp_core.c
340
PCI_CLASS_REVISION, &work);
drivers/pci/hotplug/cpqphp_core.c
342
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
drivers/pci/hotplug/cpqphp_core.c
345
PCI_PRIMARY_BUS, &work);
drivers/pci/hotplug/cpqphp_core.c
347
if (((work >> 8) & 0x000000FF) == (long) bus_num)
drivers/pci/hotplug/cpqphp_pci.c
195
u32 work;
drivers/pci/hotplug/cpqphp_pci.c
202
if (!pci_bus_read_dev_vendor_id(ctrl->pci_bus, tdevice, &work, 0))
drivers/pci/hotplug/cpqphp_pci.c
204
ret = pci_bus_read_config_dword(ctrl->pci_bus, tdevice, PCI_CLASS_REVISION, &work);
drivers/pci/hotplug/cpqphp_pci.c
209
if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
drivers/pci/hotplug/cpqphp_pci.c
233
u32 work;
drivers/pci/hotplug/cpqphp_pci.c
246
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
drivers/pci/hotplug/cpqphp_pci.c
247
if (!nobridge || PCI_POSSIBLE_ERROR(work))
drivers/pci/hotplug/cpqphp_pci.c
251
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
drivers/pci/hotplug/cpqphp_pci.c
252
dbg("work >> 8 (%x) = BRIDGE (%x)\n", work >> 8, PCI_TO_PCI_BRIDGE_CLASS);
drivers/pci/hotplug/cpqphp_pci.c
254
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
drivers/pci/hotplug/octep_hp.c
219
static void octep_hp_work_handler(struct work_struct *work)
drivers/pci/hotplug/octep_hp.c
225
hp_ctrl = container_of(work, struct octep_hp_controller, work);
drivers/pci/hotplug/octep_hp.c
285
schedule_work(&hp_ctrl->work);
drivers/pci/hotplug/octep_hp.c
295
flush_work(&hp_ctrl->work);
drivers/pci/hotplug/octep_hp.c
341
INIT_WORK(&hp_ctrl->work, octep_hp_work_handler);
drivers/pci/hotplug/octep_hp.c
62
struct work_struct work;
drivers/pci/hotplug/pciehp.h
170
void pciehp_queue_pushbutton_work(struct work_struct *work);
drivers/pci/hotplug/pciehp_ctrl.c
148
void pciehp_queue_pushbutton_work(struct work_struct *work)
drivers/pci/hotplug/pciehp_ctrl.c
150
struct controller *ctrl = container_of(work, struct controller,
drivers/pci/hotplug/pciehp_ctrl.c
151
button_work.work);
drivers/pci/hotplug/pnv_php.c
1041
INIT_WORK(&event->work, pnv_php_event_handler);
drivers/pci/hotplug/pnv_php.c
1044
queue_work(php_slot->wq, &event->work);
drivers/pci/hotplug/pnv_php.c
32
struct work_struct work;
drivers/pci/hotplug/pnv_php.c
956
static void pnv_php_event_handler(struct work_struct *work)
drivers/pci/hotplug/pnv_php.c
959
container_of(work, struct pnv_php_event, work);
drivers/pci/hotplug/shpchp.h
151
void shpchp_queue_pushbutton_work(struct work_struct *work);
drivers/pci/hotplug/shpchp.h
61
struct delayed_work work; /* work for button event */
drivers/pci/hotplug/shpchp.h
70
struct work_struct work;
drivers/pci/hotplug/shpchp_core.c
132
cancel_delayed_work(&slot->work);
drivers/pci/hotplug/shpchp_core.c
91
INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
drivers/pci/hotplug/shpchp_ctrl.c
24
static void interrupt_event_handler(struct work_struct *work);
drivers/pci/hotplug/shpchp_ctrl.c
377
struct work_struct work;
drivers/pci/hotplug/shpchp_ctrl.c
38
INIT_WORK(&info->work, interrupt_event_handler);
drivers/pci/hotplug/shpchp_ctrl.c
387
static void shpchp_pushbutton_thread(struct work_struct *work)
drivers/pci/hotplug/shpchp_ctrl.c
390
container_of(work, struct pushbutton_work_info, work);
drivers/pci/hotplug/shpchp_ctrl.c
40
queue_work(p_slot->wq, &info->work);
drivers/pci/hotplug/shpchp_ctrl.c
416
void shpchp_queue_pushbutton_work(struct work_struct *work)
drivers/pci/hotplug/shpchp_ctrl.c
418
struct slot *p_slot = container_of(work, struct slot, work.work);
drivers/pci/hotplug/shpchp_ctrl.c
428
INIT_WORK(&info->work, shpchp_pushbutton_thread);
drivers/pci/hotplug/shpchp_ctrl.c
442
queue_work(p_slot->wq, &info->work);
drivers/pci/hotplug/shpchp_ctrl.c
479
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
drivers/pci/hotplug/shpchp_ctrl.c
490
cancel_delayed_work(&p_slot->work);
drivers/pci/hotplug/shpchp_ctrl.c
517
static void interrupt_event_handler(struct work_struct *work)
drivers/pci/hotplug/shpchp_ctrl.c
519
struct event_info *info = container_of(work, struct event_info, work);
drivers/pci/hotplug/shpchp_ctrl.c
642
cancel_delayed_work(&p_slot->work);
drivers/pci/hotplug/shpchp_ctrl.c
678
cancel_delayed_work(&p_slot->work);
drivers/pci/pci-driver.c
344
struct work_struct work;
drivers/pci/pci-driver.c
348
static void local_pci_probe_callback(struct work_struct *work)
drivers/pci/pci-driver.c
350
struct pci_probe_arg *arg = container_of(work, struct pci_probe_arg, work);
drivers/pci/pci-driver.c
389
INIT_WORK_ONSTACK(&arg.work, local_pci_probe_callback);
drivers/pci/pci-driver.c
406
queue_work_on(cpu, wq, &arg.work);
drivers/pci/pci-driver.c
408
flush_work(&arg.work);
drivers/pci/pci-driver.c
415
destroy_work_on_stack(&arg.work);
drivers/pci/pci.c
2340
static void pci_pme_list_scan(struct work_struct *work)
drivers/pci/pci.c
55
static void pci_pme_list_scan(struct work_struct *work);
drivers/pci/pcie/aer.c
1220
static void aer_recover_work_func(struct work_struct *work)
drivers/pci/pcie/pme.c
214
static void pcie_pme_work_fn(struct work_struct *work)
drivers/pci/pcie/pme.c
217
container_of(work, struct pcie_pme_service_data, work);
drivers/pci/pcie/pme.c
288
schedule_work(&data->work);
drivers/pci/pcie/pme.c
343
INIT_WORK(&data->work, pcie_pme_work_fn);
drivers/pci/pcie/pme.c
45
struct work_struct work;
drivers/pci/pcie/pme.c
452
cancel_work_sync(&data->work);
drivers/pci/switch/switchtec.c
1258
static void link_event_work(struct work_struct *work)
drivers/pci/switch/switchtec.c
1262
stdev = container_of(work, struct switchtec_dev, link_event_work);
drivers/pci/switch/switchtec.c
264
static void mrpc_event_work(struct work_struct *work)
drivers/pci/switch/switchtec.c
268
stdev = container_of(work, struct switchtec_dev, mrpc_work);
drivers/pci/switch/switchtec.c
294
static void mrpc_timeout_work(struct work_struct *work)
drivers/pci/switch/switchtec.c
299
stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
drivers/perf/riscv_pmu_sbi.c
378
static void pmu_sbi_check_std_events(struct work_struct *work)
drivers/phy/allwinner/phy-sun4i-usb.c
582
static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
drivers/phy/allwinner/phy-sun4i-usb.c
585
container_of(work, struct sun4i_usb_phy_data, detect.work);
drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
228
static void extcon_work(struct work_struct *work)
drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
234
driver = container_of(to_delayed_work(work),
drivers/phy/freescale/phy-fsl-lynx-28g.c
1061
#define work_to_lynx(w) container_of((w), struct lynx_28g_priv, cdr_check.work)
drivers/phy/freescale/phy-fsl-lynx-28g.c
1063
static void lynx_28g_cdr_lock_check(struct work_struct *work)
drivers/phy/freescale/phy-fsl-lynx-28g.c
1065
struct lynx_28g_priv *priv = work_to_lynx(work);
drivers/phy/motorola/phy-cpcap-usb.c
215
static void cpcap_usb_detect(struct work_struct *work)
drivers/phy/motorola/phy-cpcap-usb.c
222
ddata = container_of(work, struct cpcap_phy_ddata, detect_work.work);
drivers/phy/motorola/phy-mapphone-mdm6600.c
187
static void phy_mdm6600_status(struct work_struct *work)
drivers/phy/motorola/phy-mapphone-mdm6600.c
194
ddata = container_of(work, struct phy_mdm6600, status_work.work);
drivers/phy/motorola/phy-mapphone-mdm6600.c
475
static void phy_mdm6600_deferred_power_on(struct work_struct *work)
drivers/phy/motorola/phy-mapphone-mdm6600.c
480
ddata = container_of(work, struct phy_mdm6600, bootup_work.work);
drivers/phy/motorola/phy-mapphone-mdm6600.c
508
static void phy_mdm6600_modem_wake(struct work_struct *work)
drivers/phy/motorola/phy-mapphone-mdm6600.c
512
ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
drivers/phy/motorola/phy-mapphone-mdm6600.c
538
phy_mdm6600_modem_wake(&ddata->modem_wake_work.work);
drivers/phy/phy-lgm-usb.c
138
static void tca_work(struct work_struct *work)
drivers/phy/phy-lgm-usb.c
140
struct tca_apb *ta = container_of(work, struct tca_apb, wk);
drivers/phy/renesas/phy-rcar-gen3-usb2.c
1072
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
drivers/phy/renesas/phy-rcar-gen3-usb2.c
140
struct work_struct work;
drivers/phy/renesas/phy-rcar-gen3-usb2.c
169
static void rcar_gen3_phy_usb2_work(struct work_struct *work)
drivers/phy/renesas/phy-rcar-gen3-usb2.c
171
struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
drivers/phy/renesas/phy-rcar-gen3-usb2.c
172
work);
drivers/phy/renesas/phy-rcar-gen3-usb2.c
268
schedule_work(&ch->work);
drivers/phy/renesas/phy-rcar-gen3-usb2.c
278
schedule_work(&ch->work);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
1044
rockchip_usb2phy_sm_work(&rport->sm_work.work);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
1060
rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
659
static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
662
container_of(work, struct rockchip_usb2phy_port,
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
663
otg_sm_work.work);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
818
static void rockchip_chg_detect_work(struct work_struct *work)
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
821
container_of(work, struct rockchip_usb2phy_port, chg_work.work);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
902
rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
926
static void rockchip_usb2phy_sm_work(struct work_struct *work)
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
929
container_of(work, struct rockchip_usb2phy_port, sm_work.work);
drivers/phy/tegra/xusb.c
607
static void tegra_xusb_usb_phy_work(struct work_struct *work)
drivers/phy/tegra/xusb.c
609
struct tegra_xusb_port *port = container_of(work,
drivers/phy/ti/phy-tusb1210.c
258
static void tusb1210_chg_det_work(struct work_struct *work)
drivers/phy/ti/phy-tusb1210.c
260
struct tusb1210 *tusb = container_of(work, struct tusb1210, chg_det_work.work);
drivers/phy/ti/phy-twl4030-usb.c
624
static void twl4030_id_workaround_work(struct work_struct *work)
drivers/phy/ti/phy-twl4030-usb.c
626
struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
drivers/phy/ti/phy-twl4030-usb.c
627
id_workaround_work.work);
drivers/pinctrl/pinctrl-ocelot.c
2197
static void ocelot_irq_work(struct work_struct *work)
drivers/pinctrl/pinctrl-ocelot.c
2199
struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work);
drivers/pinctrl/pinctrl-ocelot.c
2263
struct ocelot_irq_work *work;
drivers/pinctrl/pinctrl-ocelot.c
2265
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/pinctrl/pinctrl-ocelot.c
2266
if (!work)
drivers/pinctrl/pinctrl-ocelot.c
2269
work->irq_desc = desc;
drivers/pinctrl/pinctrl-ocelot.c
2270
INIT_WORK(&work->irq_work, ocelot_irq_work);
drivers/pinctrl/pinctrl-ocelot.c
2271
queue_work(info->wq, &work->irq_work);
drivers/platform/arm64/acer-aspire1-ec.c
375
static void aspire_ec_bridge_update_hpd_work(struct work_struct *work)
drivers/platform/arm64/acer-aspire1-ec.c
377
struct aspire_ec *ec = container_of(work, struct aspire_ec, work);
drivers/platform/arm64/acer-aspire1-ec.c
391
schedule_work(&ec->work);
drivers/platform/arm64/acer-aspire1-ec.c
499
INIT_WORK(&ec->work, aspire_ec_bridge_update_hpd_work);
drivers/platform/arm64/acer-aspire1-ec.c
74
struct work_struct work;
drivers/platform/chrome/cros_ec_ishtp.c
142
static void ish_evt_handler(struct work_struct *work)
drivers/platform/chrome/cros_ec_ishtp.c
145
container_of(work, struct ishtp_cl_data, work_ec_evt);
drivers/platform/chrome/cros_ec_ishtp.c
561
static void reset_handler(struct work_struct *work)
drivers/platform/chrome/cros_ec_ishtp.c
567
container_of(work, struct ishtp_cl_data, work_ishtp_reset);
drivers/platform/chrome/cros_ec_spi.c
644
static void cros_ec_xfer_high_pri_work(struct kthread_work *work)
drivers/platform/chrome/cros_ec_spi.c
648
params = container_of(work, struct cros_ec_xfer_work_params, work);
drivers/platform/chrome/cros_ec_spi.c
658
.work = KTHREAD_WORK_INIT(params.work,
drivers/platform/chrome/cros_ec_spi.c
674
kthread_queue_work(ec_spi->high_pri_worker, ¶ms.work);
drivers/platform/chrome/cros_ec_spi.c
675
kthread_flush_work(¶ms.work);
drivers/platform/chrome/cros_ec_spi.c
96
struct kthread_work work;
drivers/platform/chrome/cros_ec_typec.c
1304
static void cros_typec_port_work(struct work_struct *work)
drivers/platform/chrome/cros_ec_typec.c
1306
struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
drivers/platform/chrome/cros_typec_altmode.c
136
schedule_work(&adata->work);
drivers/platform/chrome/cros_typec_altmode.c
182
schedule_work(&adata->work);
drivers/platform/chrome/cros_typec_altmode.c
189
schedule_work(&adata->work);
drivers/platform/chrome/cros_typec_altmode.c
19
struct work_struct work;
drivers/platform/chrome/cros_typec_altmode.c
238
schedule_work(&adata->work);
drivers/platform/chrome/cros_typec_altmode.c
301
schedule_work(&adata->work);
drivers/platform/chrome/cros_typec_altmode.c
328
INIT_WORK(&adata->work, cros_typec_altmode_work);
drivers/platform/chrome/cros_typec_altmode.c
361
INIT_WORK(&adata->work, cros_typec_altmode_work);
drivers/platform/chrome/cros_typec_altmode.c
40
static void cros_typec_altmode_work(struct work_struct *work)
drivers/platform/chrome/cros_typec_altmode.c
43
container_of(work, struct cros_typec_altmode_data, work);
drivers/platform/chrome/cros_typec_altmode.c
97
schedule_work(&adata->work);
drivers/platform/chrome/cros_usbpd_logger.c
170
static void cros_usbpd_log_check(struct work_struct *work)
drivers/platform/chrome/cros_usbpd_logger.c
172
struct logger_data *logger = container_of(to_delayed_work(work),
drivers/platform/cznic/turris-omnia-mcu-gpio.c
834
static void button_release_emul_fn(struct work_struct *work)
drivers/platform/cznic/turris-omnia-mcu-gpio.c
836
struct omnia_mcu *mcu = container_of(to_delayed_work(work),
drivers/platform/mellanox/mlxbf-tmfifo.c
1013
schedule_work(&fifo->work);
drivers/platform/mellanox/mlxbf-tmfifo.c
1326
cancel_work_sync(&fifo->work);
drivers/platform/mellanox/mlxbf-tmfifo.c
1352
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
drivers/platform/mellanox/mlxbf-tmfifo.c
201
struct work_struct work;
drivers/platform/mellanox/mlxbf-tmfifo.c
321
schedule_work(&irq_info->fifo->work);
drivers/platform/mellanox/mlxbf-tmfifo.c
448
schedule_work(&fifo->work);
drivers/platform/mellanox/mlxbf-tmfifo.c
956
static void mlxbf_tmfifo_work_handler(struct work_struct *work)
drivers/platform/mellanox/mlxbf-tmfifo.c
960
fifo = container_of(work, struct mlxbf_tmfifo, work);
drivers/platform/mellanox/mlxreg-hotplug.c
502
static void mlxreg_hotplug_work_handler(struct work_struct *work)
drivers/platform/mellanox/mlxreg-hotplug.c
511
priv = container_of(work, struct mlxreg_hotplug_priv_data,
drivers/platform/mellanox/mlxreg-hotplug.c
512
dwork_irq.work);
drivers/platform/mellanox/mlxreg-hotplug.c
646
mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
drivers/platform/mips/cpu_hwmon.c
119
static void do_thermal_timer(struct work_struct *work)
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
132
struct work_struct work;
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
252
static void buffer_work_cb(struct work_struct *work)
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
255
container_of(work, struct mmal_msg_context, u.bulk.work);
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
284
static void buffer_to_host_work_cb(struct work_struct *work)
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
287
container_of(work, struct mmal_msg_context,
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
403
INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
530
schedule_work(&msg_context->u.bulk.work);
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
539
schedule_work(&msg_context->u.bulk.work);
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
549
schedule_work(&msg_context->u.bulk.work);
drivers/platform/surface/aggregator/controller.c
723
static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work)
drivers/platform/surface/aggregator/controller.c
725
return queue_work(cplt->wq, work);
drivers/platform/surface/aggregator/controller.c
750
ssam_cplt_submit(cplt, &evq->work);
drivers/platform/surface/aggregator/controller.c
776
static void ssam_event_queue_work_fn(struct work_struct *work)
drivers/platform/surface/aggregator/controller.c
784
queue = container_of(work, struct ssam_event_queue, work);
drivers/platform/surface/aggregator/controller.c
799
ssam_cplt_submit(queue->cplt, &queue->work);
drivers/platform/surface/aggregator/controller.c
813
INIT_WORK(&evq->work, ssam_event_queue_work_fn);
drivers/platform/surface/aggregator/controller.h
110
struct work_struct work;
drivers/platform/surface/aggregator/ssh_packet_layer.c
1500
static void ssh_ptl_timeout_reap(struct work_struct *work)
drivers/platform/surface/aggregator/ssh_packet_layer.c
1502
struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
drivers/platform/surface/aggregator/ssh_request_layer.c
1044
INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
drivers/platform/surface/aggregator/ssh_request_layer.c
1234
cancel_work_sync(&rtl->tx.work);
drivers/platform/surface/aggregator/ssh_request_layer.c
313
return schedule_work(&rtl->tx.work);
drivers/platform/surface/aggregator/ssh_request_layer.c
316
static void ssh_rtl_tx_work_fn(struct work_struct *work)
drivers/platform/surface/aggregator/ssh_request_layer.c
318
struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
drivers/platform/surface/aggregator/ssh_request_layer.c
820
static void ssh_rtl_timeout_reap(struct work_struct *work)
drivers/platform/surface/aggregator/ssh_request_layer.c
822
struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
drivers/platform/surface/aggregator/ssh_request_layer.h
81
struct work_struct work;
drivers/platform/surface/surface_acpi_notify.c
180
struct delayed_work work;
drivers/platform/surface/surface_acpi_notify.c
332
static void san_evt_bat_workfn(struct work_struct *work)
drivers/platform/surface/surface_acpi_notify.c
336
ev = container_of(work, struct san_event_work, work.work);
drivers/platform/surface/surface_acpi_notify.c
345
struct san_event_work *work;
drivers/platform/surface/surface_acpi_notify.c
351
work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL);
drivers/platform/surface/surface_acpi_notify.c
352
if (!work)
drivers/platform/surface/surface_acpi_notify.c
355
INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
drivers/platform/surface/surface_acpi_notify.c
356
work->dev = d->dev;
drivers/platform/surface/surface_acpi_notify.c
358
work->event = *event;
drivers/platform/surface/surface_acpi_notify.c
359
memcpy(work->event.data, event->data, event->length);
drivers/platform/surface/surface_acpi_notify.c
361
queue_delayed_work(san_wq, &work->work, delay);
drivers/platform/surface/surface_aggregator_hub.c
67
static void ssam_hub_update_workfn(struct work_struct *work)
drivers/platform/surface/surface_aggregator_hub.c
69
struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work);
drivers/platform/surface/surface_aggregator_tabletsw.c
88
static void ssam_tablet_sw_update_workfn(struct work_struct *work)
drivers/platform/surface/surface_aggregator_tabletsw.c
90
struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work);
drivers/platform/surface/surface_dtx.c
728
static void sdtx_device_mode_workfn(struct work_struct *work)
drivers/platform/surface/surface_dtx.c
730
struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
drivers/platform/surface/surface_dtx.c
874
static void sdtx_device_state_workfn(struct work_struct *work)
drivers/platform/surface/surface_dtx.c
876
struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
drivers/platform/x86/amd/hfi/hfi.c
117
static void amd_hfi_sched_itmt_work(struct work_struct *work)
drivers/platform/x86/amd/pmf/acpi.c
221
static void apmf_sbios_heartbeat_notify(struct work_struct *work)
drivers/platform/x86/amd/pmf/acpi.c
223
struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
drivers/platform/x86/amd/pmf/core.c
134
static void amd_pmf_get_metrics(struct work_struct *work)
drivers/platform/x86/amd/pmf/core.c
136
struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
drivers/platform/x86/amd/pmf/tee-if.c
313
static void amd_pmf_invoke_cmd(struct work_struct *work)
drivers/platform/x86/amd/pmf/tee-if.c
315
struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, pb_work.work);
drivers/platform/x86/asus-laptop.c
215
struct work_struct work;
drivers/platform/x86/asus-laptop.c
563
queue_work(asus->led_workqueue, &led->work);
drivers/platform/x86/asus-laptop.c
566
static void asus_led_cdev_update(struct work_struct *work)
drivers/platform/x86/asus-laptop.c
568
struct asus_led *led = container_of(work, struct asus_led, work);
drivers/platform/x86/asus-laptop.c
624
queue_work(asus->led_workqueue, &led->work);
drivers/platform/x86/asus-laptop.c
627
static void asus_kled_cdev_update(struct work_struct *work)
drivers/platform/x86/asus-laptop.c
629
struct asus_led *led = container_of(work, struct asus_led, work);
drivers/platform/x86/asus-laptop.c
673
INIT_WORK(&led->work, asus_led_cdev_update);
drivers/platform/x86/asus-laptop.c
734
INIT_WORK(&led->work, asus_kled_cdev_update);
drivers/platform/x86/asus-tf103c-dock.c
657
static void tf103c_dock_hpd_work(struct work_struct *work)
drivers/platform/x86/asus-tf103c-dock.c
660
container_of(work, struct tf103c_dock_data, hpd_work.work);
drivers/platform/x86/asus-wireless.c
91
static void led_state_update(struct work_struct *work)
drivers/platform/x86/asus-wireless.c
96
data = container_of(work, struct asus_wireless_data, led_work);
drivers/platform/x86/asus-wmi.c
1681
static void kbd_led_update_all(struct work_struct *work)
drivers/platform/x86/asus-wmi.c
1687
asus = container_of(work, struct asus_wmi, kbd_led_work);
drivers/platform/x86/asus-wmi.c
1766
static void tpd_led_update(struct work_struct *work)
drivers/platform/x86/asus-wmi.c
1771
asus = container_of(work, struct asus_wmi, tpd_led_work);
drivers/platform/x86/asus-wmi.c
1907
static void wlan_led_update(struct work_struct *work)
drivers/platform/x86/asus-wmi.c
1912
asus = container_of(work, struct asus_wmi, wlan_led_work);
drivers/platform/x86/asus-wmi.c
1940
static void lightbar_led_update(struct work_struct *work)
drivers/platform/x86/asus-wmi.c
1945
asus = container_of(work, struct asus_wmi, lightbar_led_work);
drivers/platform/x86/asus-wmi.c
2286
static void asus_hotplug_work(struct work_struct *work)
drivers/platform/x86/asus-wmi.c
2290
asus = container_of(work, struct asus_wmi, hotplug_work);
drivers/platform/x86/dell/dell-lis3lv02d.c
127
static void instantiate_i2c_client(struct work_struct *work)
drivers/platform/x86/eeepc-laptop.c
485
static void tpd_led_update(struct work_struct *work)
drivers/platform/x86/eeepc-laptop.c
489
eeepc = container_of(work, struct eeepc_laptop, tpd_led_work);
drivers/platform/x86/gpd-pocket-fan.c
115
queue_delayed_work(system_percpu_wq, &fan->work,
drivers/platform/x86/gpd-pocket-fan.c
122
mod_delayed_work(system_percpu_wq, &fan->work, 0);
drivers/platform/x86/gpd-pocket-fan.c
156
ret = devm_delayed_work_autocancel(&pdev->dev, &fan->work,
drivers/platform/x86/gpd-pocket-fan.c
189
cancel_delayed_work_sync(&fan->work);
drivers/platform/x86/gpd-pocket-fan.c
51
struct delayed_work work;
drivers/platform/x86/gpd-pocket-fan.c
74
static void gpd_pocket_fan_worker(struct work_struct *work)
drivers/platform/x86/gpd-pocket-fan.c
77
container_of(work, struct gpd_pocket_fan_data, work.work);
drivers/platform/x86/hp/hp-wmi.c
2527
static void hp_wmi_hwmon_keep_alive_handler(struct work_struct *work)
drivers/platform/x86/hp/hp-wmi.c
2532
dwork = to_delayed_work(work);
drivers/platform/x86/hp/hp_accel.c
331
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
drivers/platform/x86/hp/hp_accel.c
337
flush_work(&hpled_led.work);
drivers/platform/x86/hp/hp_accel.c
352
flush_work(&hpled_led.work);
drivers/platform/x86/hp/hp_accel.c
36
struct work_struct work;
drivers/platform/x86/hp/hp_accel.c
43
static inline void delayed_set_status_worker(struct work_struct *work)
drivers/platform/x86/hp/hp_accel.c
46
container_of(work, struct delayed_led_classdev, work);
drivers/platform/x86/hp/hp_accel.c
57
schedule_work(&data->work);
drivers/platform/x86/intel/ifs/load.c
116
static void copy_hashes_authenticate_chunks(struct work_struct *work)
drivers/platform/x86/intel/ifs/load.c
118
struct ifs_work *local_work = container_of(work, struct ifs_work, w);
drivers/platform/x86/intel/ishtp_eclite.c
349
static void ecl_acpi_invoke_dsm(struct work_struct *work)
drivers/platform/x86/intel/ishtp_eclite.c
354
opr_dev = container_of(work, struct ishtp_opregion_dev, event_work);
drivers/platform/x86/intel/ishtp_eclite.c
499
static void ecl_ishtp_cl_reset_handler(struct work_struct *work)
drivers/platform/x86/intel/ishtp_eclite.c
507
opr_dev = container_of(work, struct ishtp_opregion_dev, reset_work);
drivers/platform/x86/intel/turbo_max_3.c
84
static void itmt_legacy_work_fn(struct work_struct *work)
drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
132
schedule_work(&fc->work);
drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
163
INIT_WORK(&fc->work, yt2_1380_fc_worker);
drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
217
schedule_work(&fc->work);
drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
45
struct work_struct work;
drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
74
static void yt2_1380_fc_worker(struct work_struct *work)
drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
76
struct yt2_1380_fc *fc = container_of(work, struct yt2_1380_fc, work);
drivers/platform/x86/lenovo/yogabook.c
132
schedule_work(&data->work);
drivers/platform/x86/lenovo/yogabook.c
144
schedule_work(&data->work);
drivers/platform/x86/lenovo/yogabook.c
195
INIT_WORK(&data->work, yogabook_work);
drivers/platform/x86/lenovo/yogabook.c
229
schedule_work(&data->work);
drivers/platform/x86/lenovo/yogabook.c
247
cancel_work_sync(&data->work);
drivers/platform/x86/lenovo/yogabook.c
256
cancel_work_sync(&data->work);
drivers/platform/x86/lenovo/yogabook.c
273
flush_work(&data->work);
drivers/platform/x86/lenovo/yogabook.c
291
schedule_work(&data->work);
drivers/platform/x86/lenovo/yogabook.c
521
cancel_work_sync(&data->work);
drivers/platform/x86/lenovo/yogabook.c
534
cancel_work_sync(&data->work);
drivers/platform/x86/lenovo/yogabook.c
57
struct work_struct work;
drivers/platform/x86/lenovo/yogabook.c
63
static void yogabook_work(struct work_struct *work)
drivers/platform/x86/lenovo/yogabook.c
65
struct yogabook_data *data = container_of(work, struct yogabook_data, work);
drivers/platform/x86/samsung-galaxybook.c
1123
static void galaxybook_kbd_backlight_hotkey_work(struct work_struct *work)
drivers/platform/x86/samsung-galaxybook.c
1126
from_work(galaxybook, work, kbd_backlight_hotkey_work);
drivers/platform/x86/samsung-galaxybook.c
1148
static void galaxybook_block_recording_hotkey_work(struct work_struct *work)
drivers/platform/x86/samsung-galaxybook.c
1151
from_work(galaxybook, work, block_recording_hotkey_work);
drivers/platform/x86/samsung-laptop.c
1202
static void kbd_led_update(struct work_struct *work)
drivers/platform/x86/samsung-laptop.c
1206
samsung = container_of(work, struct samsung_laptop, kbd_led_work);
drivers/platform/x86/toshiba_acpi.c
1817
static void toshiba_acpi_kbd_bl_work(struct work_struct *work);
drivers/platform/x86/toshiba_acpi.c
2491
static void toshiba_acpi_kbd_bl_work(struct work_struct *work)
drivers/platform/x86/toshiba_acpi.c
2774
static void toshiba_acpi_hotkey_work(struct work_struct *work)
drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
172
static void atla10_ec_external_power_changed_work(struct work_struct *work)
drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
174
struct atla10_ec_data *data = container_of(work, struct atla10_ec_data, work.work);
drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
186
mod_delayed_work(system_percpu_wq, &data->work, HZ / 2);
drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
230
ret = devm_delayed_work_autocancel(dev, &data->work,
drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
57
struct delayed_work work;
drivers/pmdomain/core.c
1149
static void genpd_power_off_work_fn(struct work_struct *work)
drivers/pmdomain/core.c
1153
genpd = container_of(work, struct generic_pm_domain, power_off_work);
drivers/power/reset/pwr-mlxbf.c
24
static void pwr_mlxbf_reboot_work(struct work_struct *work)
drivers/power/supply/ab8500_btemp.c
270
static void ab8500_btemp_periodic_work(struct work_struct *work)
drivers/power/supply/ab8500_btemp.c
274
struct ab8500_btemp *di = container_of(work,
drivers/power/supply/ab8500_btemp.c
275
struct ab8500_btemp, btemp_periodic_work.work);
drivers/power/supply/ab8500_chargalg.c
1591
static void ab8500_chargalg_periodic_work(struct work_struct *work)
drivers/power/supply/ab8500_chargalg.c
1593
struct ab8500_chargalg *di = container_of(work,
drivers/power/supply/ab8500_chargalg.c
1594
struct ab8500_chargalg, chargalg_periodic_work.work);
drivers/power/supply/ab8500_chargalg.c
1618
static void ab8500_chargalg_wd_work(struct work_struct *work)
drivers/power/supply/ab8500_chargalg.c
1621
struct ab8500_chargalg *di = container_of(work,
drivers/power/supply/ab8500_chargalg.c
1622
struct ab8500_chargalg, chargalg_wd_work.work);
drivers/power/supply/ab8500_chargalg.c
1638
static void ab8500_chargalg_work(struct work_struct *work)
drivers/power/supply/ab8500_chargalg.c
1640
struct ab8500_chargalg *di = container_of(work,
drivers/power/supply/ab8500_charger.c
1957
static void ab8500_charger_check_vbat_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
1960
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
1961
struct ab8500_charger, check_vbat_work.work);
drivers/power/supply/ab8500_charger.c
2001
static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2006
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2007
struct ab8500_charger, check_hw_failure_work.work);
drivers/power/supply/ab8500_charger.c
2056
static void ab8500_charger_kick_watchdog_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2060
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2061
struct ab8500_charger, kick_wd_work.work);
drivers/power/supply/ab8500_charger.c
2079
static void ab8500_charger_ac_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2083
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2106
static void ab8500_charger_usb_attached_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2108
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2110
usb_charger_attached_work.work);
drivers/power/supply/ab8500_charger.c
2143
static void ab8500_charger_ac_attached_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2146
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2148
ac_charger_attached_work.work);
drivers/power/supply/ab8500_charger.c
2190
static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2194
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2250
static void ab8500_charger_usb_link_attach_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2253
container_of(work, struct ab8500_charger, attach_work.work);
drivers/power/supply/ab8500_charger.c
2274
static void ab8500_charger_usb_link_status_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2281
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2404
static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2409
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2410
struct ab8500_charger, usb_state_changed_work.work);
drivers/power/supply/ab8500_charger.c
2471
static void ab8500_charger_check_usbchargernotok_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2477
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2478
struct ab8500_charger, check_usbchgnotok_work.work);
drivers/power/supply/ab8500_charger.c
2510
struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2515
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2540
struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2545
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2667
static void ab8500_charger_vbus_drop_end_work(struct work_struct *work)
drivers/power/supply/ab8500_charger.c
2669
struct ab8500_charger *di = container_of(work,
drivers/power/supply/ab8500_charger.c
2670
struct ab8500_charger, vbus_drop_end_work.work);
drivers/power/supply/ab8500_fg.c
1801
static void ab8500_fg_periodic_work(struct work_struct *work)
drivers/power/supply/ab8500_fg.c
1803
struct ab8500_fg *di = container_of(work, struct ab8500_fg,
drivers/power/supply/ab8500_fg.c
1804
fg_periodic_work.work);
drivers/power/supply/ab8500_fg.c
1836
static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
drivers/power/supply/ab8500_fg.c
1841
struct ab8500_fg *di = container_of(work, struct ab8500_fg,
drivers/power/supply/ab8500_fg.c
1842
fg_check_hw_failure_work.work);
drivers/power/supply/ab8500_fg.c
1877
static void ab8500_fg_low_bat_work(struct work_struct *work)
drivers/power/supply/ab8500_fg.c
1881
struct ab8500_fg *di = container_of(work, struct ab8500_fg,
drivers/power/supply/ab8500_fg.c
1882
fg_low_bat_work.work);
drivers/power/supply/ab8500_fg.c
1981
static void ab8500_fg_instant_work(struct work_struct *work)
drivers/power/supply/ab8500_fg.c
1983
struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_work);
drivers/power/supply/ab8500_fg.c
2415
static void ab8500_fg_reinit_work(struct work_struct *work)
drivers/power/supply/ab8500_fg.c
2417
struct ab8500_fg *di = container_of(work, struct ab8500_fg,
drivers/power/supply/ab8500_fg.c
2418
fg_reinit_work.work);
drivers/power/supply/ab8500_fg.c
751
static void ab8500_fg_acc_cur_work(struct work_struct *work)
drivers/power/supply/ab8500_fg.c
757
struct ab8500_fg *di = container_of(work,
drivers/power/supply/acer_a500_battery.c
200
static void a500_battery_poll_work(struct work_struct *work)
drivers/power/supply/acer_a500_battery.c
205
bat = container_of(work, struct a500_battery, poll_work.work);
drivers/power/supply/act8945a_charger.c
433
static void act8945a_work(struct work_struct *work)
drivers/power/supply/act8945a_charger.c
436
container_of(work, struct act8945a_charger, work);
drivers/power/supply/act8945a_charger.c
448
schedule_work(&charger->work);
drivers/power/supply/act8945a_charger.c
630
INIT_WORK(&charger->work, act8945a_work);
drivers/power/supply/act8945a_charger.c
646
cancel_work_sync(&charger->work);
drivers/power/supply/act8945a_charger.c
79
struct work_struct work;
drivers/power/supply/adc-battery-helper.c
101
static void adc_battery_helper_work(struct work_struct *work)
drivers/power/supply/adc-battery-helper.c
103
struct adc_battery_helper *help = container_of(work, struct adc_battery_helper,
drivers/power/supply/adc-battery-helper.c
104
work.work);
drivers/power/supply/adc-battery-helper.c
184
queue_delayed_work(system_percpu_wq, &help->work,
drivers/power/supply/adc-battery-helper.c
254
mod_delayed_work(system_percpu_wq, &help->work, SETTLE_TIME);
drivers/power/supply/adc-battery-helper.c
263
queue_delayed_work(system_percpu_wq, &help->work, 0);
drivers/power/supply/adc-battery-helper.c
264
flush_delayed_work(&help->work);
drivers/power/supply/adc-battery-helper.c
282
ret = devm_delayed_work_autocancel(dev, &help->work, adc_battery_helper_work);
drivers/power/supply/adc-battery-helper.c
311
cancel_delayed_work_sync(&help->work);
drivers/power/supply/adc-battery-helper.h
26
struct delayed_work work;
drivers/power/supply/axp20x_usb_power.c
125
static void axp20x_usb_power_poll_vbus(struct work_struct *work)
drivers/power/supply/axp20x_usb_power.c
128
container_of(work, struct axp20x_usb_power, vbus_detect.work);
drivers/power/supply/axp20x_usb_power.c
157
static void axp717_usb_power_poll_vbus(struct work_struct *work)
drivers/power/supply/axp20x_usb_power.c
160
container_of(work, struct axp20x_usb_power, vbus_detect.work);
drivers/power/supply/axp20x_usb_power.c
74
void (*axp20x_read_vbus)(struct work_struct *work);
drivers/power/supply/axp288_charger.c
128
struct work_struct work;
drivers/power/supply/axp288_charger.c
138
struct work_struct work;
drivers/power/supply/axp288_charger.c
617
static void axp288_charger_extcon_evt_worker(struct work_struct *work)
drivers/power/supply/axp288_charger.c
620
container_of(work, struct axp288_chrg_info, cable.work);
drivers/power/supply/axp288_charger.c
680
schedule_work(&info->cable.work);
drivers/power/supply/axp288_charger.c
684
static void axp288_charger_otg_evt_worker(struct work_struct *work)
drivers/power/supply/axp288_charger.c
687
container_of(work, struct axp288_chrg_info, otg.work);
drivers/power/supply/axp288_charger.c
712
schedule_work(&info->otg.work);
drivers/power/supply/axp288_charger.c
828
cancel_work_sync(&info->otg.work);
drivers/power/supply/axp288_charger.c
829
cancel_work_sync(&info->cable.work);
drivers/power/supply/axp288_charger.c
919
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
drivers/power/supply/axp288_charger.c
927
schedule_work(&info->cable.work);
drivers/power/supply/axp288_charger.c
930
INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
drivers/power/supply/axp288_charger.c
939
schedule_work(&info->otg.work);
drivers/power/supply/bq2415x_charger.c
1051
cancel_delayed_work_sync(&bq->work);
drivers/power/supply/bq2415x_charger.c
163
struct delayed_work work;
drivers/power/supply/bq2415x_charger.c
1697
INIT_DELAYED_WORK(&bq->work, bq2415x_timer_work);
drivers/power/supply/bq2415x_charger.c
845
mod_delayed_work(system_percpu_wq, &bq->work, 0);
drivers/power/supply/bq2415x_charger.c
865
schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ);
drivers/power/supply/bq2415x_charger.c
869
cancel_delayed_work_sync(&bq->work);
drivers/power/supply/bq2415x_charger.c
888
static void bq2415x_timer_work(struct work_struct *work)
drivers/power/supply/bq2415x_charger.c
890
struct bq2415x_device *bq = container_of(work, struct bq2415x_device,
drivers/power/supply/bq2415x_charger.c
891
work.work);
drivers/power/supply/bq2415x_charger.c
995
schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ);
drivers/power/supply/bq24190_charger.c
1437
static void bq24190_input_current_limit_work(struct work_struct *work)
drivers/power/supply/bq24190_charger.c
1440
container_of(work, struct bq24190_dev_info,
drivers/power/supply/bq24190_charger.c
1441
input_current_limit_work.work);
drivers/power/supply/bq24257_charger.c
586
static void bq24257_iilimit_setup_work(struct work_struct *work)
drivers/power/supply/bq24257_charger.c
588
struct bq24257_device *bq = container_of(work, struct bq24257_device,
drivers/power/supply/bq24257_charger.c
589
iilimit_setup_work.work);
drivers/power/supply/bq24735-charger.c
245
static void bq24735_poll(struct work_struct *work)
drivers/power/supply/bq24735-charger.c
247
struct bq24735 *charger = container_of(work, struct bq24735, poll.work);
drivers/power/supply/bq25890_charger.c
1059
container_of(data, struct bq25890_device, pump_express_work.work);
drivers/power/supply/bq27xxx_battery.c
1130
mod_delayed_work(system_percpu_wq, &di->work, 0);
drivers/power/supply/bq27xxx_battery.c
1948
mod_delayed_work(system_percpu_wq, &di->work, poll_interval * HZ);
drivers/power/supply/bq27xxx_battery.c
1959
static void bq27xxx_battery_poll(struct work_struct *work)
drivers/power/supply/bq27xxx_battery.c
1962
container_of(work, struct bq27xxx_device_info,
drivers/power/supply/bq27xxx_battery.c
1963
work.work);
drivers/power/supply/bq27xxx_battery.c
2224
mod_delayed_work(system_percpu_wq, &di->work, HZ / 2);
drivers/power/supply/bq27xxx_battery.c
2237
INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll);
drivers/power/supply/bq27xxx_battery.c
2285
cancel_delayed_work_sync(&di->work);
drivers/power/supply/bq27xxx_battery.c
2294
cancel_delayed_work(&di->work);
drivers/power/supply/bq27xxx_battery.c
2302
schedule_delayed_work(&di->work, 0);
drivers/power/supply/bq27xxx_battery_i2c.c
196
schedule_delayed_work(&di->work, 60 * HZ);
drivers/power/supply/chagall-battery.c
160
static void chagall_battery_poll_work(struct work_struct *work)
drivers/power/supply/chagall-battery.c
163
container_of(work, struct chagall_battery_data, poll_work.work);
drivers/power/supply/charger-manager.c
649
static void _setup_polling(struct work_struct *work)
drivers/power/supply/charger-manager.c
706
static void cm_monitor_poller(struct work_struct *work)
drivers/power/supply/charger-manager.c
914
static void charger_extcon_work(struct work_struct *work)
drivers/power/supply/charger-manager.c
917
container_of(work, struct charger_cable, wq);
drivers/power/supply/collie_battery.c
193
static void collie_bat_work(struct work_struct *work)
drivers/power/supply/cpcap-charger.c
459
static void cpcap_charger_vbus_work(struct work_struct *work)
drivers/power/supply/cpcap-charger.c
465
ddata = container_of(work, struct cpcap_charger_ddata,
drivers/power/supply/cpcap-charger.c
466
vbus_work.work);
drivers/power/supply/cpcap-charger.c
621
static void cpcap_usb_detect(struct work_struct *work)
drivers/power/supply/cpcap-charger.c
627
ddata = container_of(work, struct cpcap_charger_ddata,
drivers/power/supply/cpcap-charger.c
628
detect_work.work);
drivers/power/supply/cw2015_battery.c
419
static void cw_bat_work(struct work_struct *work)
drivers/power/supply/cw2015_battery.c
426
delay_work = to_delayed_work(work);
drivers/power/supply/da9030_battery.c
282
static void da9030_charging_monitor(struct work_struct *work)
drivers/power/supply/da9030_battery.c
286
charger = container_of(work, struct da9030_charger, work.work);
drivers/power/supply/da9030_battery.c
291
schedule_delayed_work(&charger->work, charger->interval);
drivers/power/supply/da9030_battery.c
397
cancel_delayed_work_sync(&charger->work);
drivers/power/supply/da9030_battery.c
398
schedule_work(&charger->work.work);
drivers/power/supply/da9030_battery.c
519
INIT_DELAYED_WORK(&charger->work, da9030_charging_monitor);
drivers/power/supply/da9030_battery.c
520
schedule_delayed_work(&charger->work, charger->interval);
drivers/power/supply/da9030_battery.c
550
cancel_delayed_work(&charger->work);
drivers/power/supply/da9030_battery.c
565
cancel_delayed_work_sync(&charger->work);
drivers/power/supply/da9030_battery.c
96
struct delayed_work work;
drivers/power/supply/da9150-fg.c
354
static void da9150_fg_work(struct work_struct *work)
drivers/power/supply/da9150-fg.c
356
struct da9150_fg *fg = container_of(work, struct da9150_fg, work.work);
drivers/power/supply/da9150-fg.c
362
schedule_delayed_work(&fg->work, msecs_to_jiffies(fg->interval));
drivers/power/supply/da9150-fg.c
509
ret = devm_delayed_work_autocancel(dev, &fg->work,
drivers/power/supply/da9150-fg.c
516
schedule_delayed_work(&fg->work,
drivers/power/supply/da9150-fg.c
544
flush_delayed_work(&fg->work);
drivers/power/supply/da9150-fg.c
79
struct delayed_work work;
drivers/power/supply/ds2760_battery.c
469
static void ds2760_battery_work(struct work_struct *work)
drivers/power/supply/ds2760_battery.c
471
struct ds2760_device_info *di = container_of(work,
drivers/power/supply/ds2760_battery.c
472
struct ds2760_device_info, monitor_work.work);
drivers/power/supply/ds2782_battery.c
287
static void ds278x_bat_work(struct work_struct *work)
drivers/power/supply/ds2782_battery.c
291
info = container_of(work, struct ds278x_info, bat_work.work);
drivers/power/supply/generic-adc-battery.c
123
static void gab_work(struct work_struct *work)
drivers/power/supply/generic-adc-battery.c
129
delayed_work = to_delayed_work(work);
drivers/power/supply/ipaq_micro_battery.c
49
static void micro_battery_work(struct work_struct *work)
drivers/power/supply/ipaq_micro_battery.c
51
struct micro_battery *mb = container_of(work,
drivers/power/supply/ipaq_micro_battery.c
52
struct micro_battery, update.work);
drivers/power/supply/isp1704_charger.c
226
container_of(data, struct isp1704_charger, work);
drivers/power/supply/isp1704_charger.c
299
schedule_work(&isp->work);
drivers/power/supply/isp1704_charger.c
439
INIT_WORK(&isp->work, isp1704_charger_work);
drivers/power/supply/isp1704_charger.c
467
schedule_work(&isp->work);
drivers/power/supply/isp1704_charger.c
52
struct work_struct work;
drivers/power/supply/lp8727_charger.c
225
work.work);
drivers/power/supply/lp8727_charger.c
250
schedule_delayed_work(&pchg->work, pchg->debounce_jiffies);
drivers/power/supply/lp8727_charger.c
261
INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func);
drivers/power/supply/lp8727_charger.c
283
cancel_delayed_work_sync(&pchg->work);
drivers/power/supply/lp8727_charger.c
97
struct delayed_work work;
drivers/power/supply/lp8788-charger.c
409
static void lp8788_charger_event(struct work_struct *work)
drivers/power/supply/lp8788-charger.c
412
container_of(work, struct lp8788_charger, charger_work);
drivers/power/supply/ltc2941-battery-gauge.c
423
static void ltc294x_work(struct work_struct *work)
drivers/power/supply/ltc2941-battery-gauge.c
427
info = container_of(work, struct ltc294x_info, work.work);
drivers/power/supply/ltc2941-battery-gauge.c
429
schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
drivers/power/supply/ltc2941-battery-gauge.c
530
ret = devm_delayed_work_autocancel(&client->dev, &info->work,
drivers/power/supply/ltc2941-battery-gauge.c
546
schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
drivers/power/supply/ltc2941-battery-gauge.c
581
cancel_delayed_work(&info->work);
drivers/power/supply/ltc2941-battery-gauge.c
590
schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
drivers/power/supply/ltc2941-battery-gauge.c
75
struct delayed_work work; /* Work scheduler */
drivers/power/supply/max14656_charger_detector.c
134
static void max14656_irq_worker(struct work_struct *work)
drivers/power/supply/max14656_charger_detector.c
137
container_of(work, struct max14656_chip, irq_work.work);
drivers/power/supply/max17040_battery.c
143
struct delayed_work work;
drivers/power/supply/max17040_battery.c
269
queue_delayed_work(system_power_efficient_wq, &chip->work,
drivers/power/supply/max17040_battery.c
277
cancel_delayed_work_sync(&chip->work);
drivers/power/supply/max17040_battery.c
280
static void max17040_work(struct work_struct *work)
drivers/power/supply/max17040_battery.c
285
chip = container_of(work, struct max17040_chip, work.work);
drivers/power/supply/max17040_battery.c
537
INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
drivers/power/supply/max17040_battery.c
567
cancel_delayed_work(&chip->work);
drivers/power/supply/max17042_battery.c
1125
ret = devm_work_autocancel(dev, &chip->work,
drivers/power/supply/max17042_battery.c
1129
schedule_work(&chip->work);
drivers/power/supply/max17042_battery.c
61
struct work_struct work;
drivers/power/supply/max17042_battery.c
892
static void max17042_init_worker(struct work_struct *work)
drivers/power/supply/max17042_battery.c
894
struct max17042_chip *chip = container_of(work,
drivers/power/supply/max17042_battery.c
895
struct max17042_chip, work);
drivers/power/supply/max77705_charger.c
481
static void max77705_chgin_isr_work(struct work_struct *work)
drivers/power/supply/max77705_charger.c
484
container_of(work, struct max77705_charger_data, chgin_work);
drivers/power/supply/max8971_charger.c
545
static void max8971_extcon_evt_worker(struct work_struct *work)
drivers/power/supply/max8971_charger.c
548
container_of(work, struct max8971_data, extcon_work.work);
drivers/power/supply/max8997_charger.c
101
container_of(work, struct charger_data, extcon_work);
drivers/power/supply/max8997_charger.c
98
static void max8997_battery_extcon_evt_worker(struct work_struct *work)
drivers/power/supply/mt6360_charger.c
686
static void mt6360_chrdet_work(struct work_struct *work)
drivers/power/supply/mt6360_charger.c
689
work, struct mt6360_chg_info, chrdet_work);
drivers/power/supply/mt6370-charger.c
254
static void mt6370_chg_bc12_work_func(struct work_struct *work)
drivers/power/supply/mt6370-charger.c
256
struct mt6370_priv *priv = container_of(work, struct mt6370_priv,
drivers/power/supply/mt6370-charger.c
360
static void mt6370_chg_mivr_dwork_func(struct work_struct *work)
drivers/power/supply/mt6370-charger.c
362
struct mt6370_priv *priv = container_of(work, struct mt6370_priv,
drivers/power/supply/mt6370-charger.c
363
mivr_dwork.work);
drivers/power/supply/pf1550-charger.c
180
static void pf1550_chg_bat_work(struct work_struct *work)
drivers/power/supply/pf1550-charger.c
182
struct pf1550_charger *chg = container_of(to_delayed_work(work),
drivers/power/supply/pf1550-charger.c
217
static void pf1550_chg_chg_work(struct work_struct *work)
drivers/power/supply/pf1550-charger.c
219
struct pf1550_charger *chg = container_of(to_delayed_work(work),
drivers/power/supply/pf1550-charger.c
269
static void pf1550_chg_vbus_work(struct work_struct *work)
drivers/power/supply/pf1550-charger.c
271
struct pf1550_charger *chg = container_of(to_delayed_work(work),
drivers/power/supply/power_supply_core.c
174
static void power_supply_deferred_register_work(struct work_struct *work)
drivers/power/supply/power_supply_core.c
176
struct power_supply *psy = container_of(work, struct power_supply,
drivers/power/supply/power_supply_core.c
177
deferred_register_work.work);
drivers/power/supply/power_supply_core.c
78
static void power_supply_changed_work(struct work_struct *work)
drivers/power/supply/power_supply_core.c
82
struct power_supply *psy = container_of(work, struct power_supply,
drivers/power/supply/qcom_battmgr.c
1586
static void qcom_battmgr_enable_worker(struct work_struct *work)
drivers/power/supply/qcom_battmgr.c
1588
struct qcom_battmgr *battmgr = container_of(work, struct qcom_battmgr, enable_work);
drivers/power/supply/qcom_smbx.c
547
static void smb_status_change_work(struct work_struct *work)
drivers/power/supply/qcom_smbx.c
554
chip = container_of(work, struct smb_chip, status_change_work.work);
drivers/power/supply/rk817_charger.c
1040
static void rk817_charging_monitor(struct work_struct *work)
drivers/power/supply/rk817_charger.c
1044
charger = container_of(work, struct rk817_charger, work.work);
drivers/power/supply/rk817_charger.c
1049
queue_delayed_work(system_percpu_wq, &charger->work, msecs_to_jiffies(8000));
drivers/power/supply/rk817_charger.c
108
struct delayed_work work;
drivers/power/supply/rk817_charger.c
1203
ret = devm_delayed_work_autocancel(&pdev->dev, &charger->work,
drivers/power/supply/rk817_charger.c
1209
mod_delayed_work(system_percpu_wq, &charger->work, 0);
drivers/power/supply/rk817_charger.c
1218
cancel_delayed_work_sync(&charger->work);
drivers/power/supply/rk817_charger.c
1229
mod_delayed_work(system_percpu_wq, &charger->work, 0);
drivers/power/supply/rt5033_charger.c
575
static void rt5033_charger_extcon_work(struct work_struct *work)
drivers/power/supply/rt5033_charger.c
578
container_of(work, struct rt5033_charger, extcon_work);
drivers/power/supply/rt9455_charger.c
1444
static void rt9455_pwr_rdy_work_callback(struct work_struct *work)
drivers/power/supply/rt9455_charger.c
1446
struct rt9455_info *info = container_of(work, struct rt9455_info,
drivers/power/supply/rt9455_charger.c
1447
pwr_rdy_work.work);
drivers/power/supply/rt9455_charger.c
1482
static void rt9455_max_charging_time_work_callback(struct work_struct *work)
drivers/power/supply/rt9455_charger.c
1484
struct rt9455_info *info = container_of(work, struct rt9455_info,
drivers/power/supply/rt9455_charger.c
1485
max_charging_time_work.work);
drivers/power/supply/rt9455_charger.c
1496
static void rt9455_batt_presence_work_callback(struct work_struct *work)
drivers/power/supply/rt9455_charger.c
1498
struct rt9455_info *info = container_of(work, struct rt9455_info,
drivers/power/supply/rt9455_charger.c
1499
batt_presence_work.work);
drivers/power/supply/sbs-battery.c
1067
cancel_delayed_work_sync(&chip->work);
drivers/power/supply/sbs-battery.c
1069
schedule_delayed_work(&chip->work, HZ);
drivers/power/supply/sbs-battery.c
1073
static void sbs_delayed_work(struct work_struct *work)
drivers/power/supply/sbs-battery.c
1078
chip = container_of(work, struct sbs_info, work.work);
drivers/power/supply/sbs-battery.c
1102
schedule_delayed_work(&chip->work, HZ);
drivers/power/supply/sbs-battery.c
1191
rc = devm_delayed_work_autocancel(&client->dev, &chip->work,
drivers/power/supply/sbs-battery.c
1235
cancel_delayed_work_sync(&chip->work);
drivers/power/supply/sbs-battery.c
215
struct delayed_work work;
drivers/power/supply/sbs-battery.c
659
cancel_delayed_work_sync(&chip->work);
drivers/power/supply/sbs-charger.c
106
static void sbs_delayed_work(struct work_struct *work)
drivers/power/supply/sbs-charger.c
108
struct sbs_info *chip = container_of(work, struct sbs_info, work.work);
drivers/power/supply/sbs-charger.c
112
schedule_delayed_work(&chip->work,
drivers/power/supply/sbs-charger.c
222
ret = devm_delayed_work_autocancel(&client->dev, &chip->work,
drivers/power/supply/sbs-charger.c
228
schedule_delayed_work(&chip->work,
drivers/power/supply/sbs-charger.c
39
struct delayed_work work;
drivers/power/supply/sc2731_charger.c
326
container_of(data, struct sc2731_charger_info, work);
drivers/power/supply/sc2731_charger.c
364
schedule_work(&info->work);
drivers/power/supply/sc2731_charger.c
452
schedule_work(&info->work);
drivers/power/supply/sc2731_charger.c
468
INIT_WORK(&info->work, sc2731_charger_work);
drivers/power/supply/sc2731_charger.c
60
struct work_struct work;
drivers/power/supply/stc3117_fuel_gauge.c
450
static void fuel_gauge_update_work(struct work_struct *work)
drivers/power/supply/stc3117_fuel_gauge.c
453
container_of(work, struct stc3117_data, update_work.work);
drivers/power/supply/surface_battery.c
392
static void spwr_battery_update_bst_workfn(struct work_struct *work)
drivers/power/supply/surface_battery.c
394
struct delayed_work *dwork = to_delayed_work(work);
drivers/power/supply/twl4030_charger.c
1005
INIT_WORK(&bci->work, twl4030_bci_usb_work);
drivers/power/supply/twl4030_charger.c
118
struct work_struct work;
drivers/power/supply/twl4030_charger.c
405
current_worker.work);
drivers/power/supply/twl4030_charger.c
651
struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work);
drivers/power/supply/twl4030_charger.c
678
schedule_work(&bci->work);
drivers/power/supply/twl6030_charger.c
196
struct work_struct work;
drivers/power/supply/twl6030_charger.c
286
charger_monitor.work);
drivers/power/supply/ucs1002_power.c
437
static void ucs1002_health_poll(struct work_struct *work)
drivers/power/supply/ucs1002_power.c
439
struct ucs1002_info *info = container_of(work, struct ucs1002_info,
drivers/power/supply/ucs1002_power.c
440
health_poll.work);
drivers/power/supply/wm97xx_battery.c
126
static void wm97xx_bat_work(struct work_struct *work)
drivers/ps3/ps3-sys-manager.c
739
.work = ps3_sys_manager_work,
drivers/ps3/ps3-vuart.c
1033
INIT_WORK(&priv->rx_list.work.work, ps3_vuart_work);
drivers/ps3/ps3-vuart.c
1034
priv->rx_list.work.trigger = 0;
drivers/ps3/ps3-vuart.c
1035
priv->rx_list.work.dev = dev;
drivers/ps3/ps3-vuart.c
661
static void ps3_vuart_work(struct work_struct *work)
drivers/ps3/ps3-vuart.c
664
ps3_vuart_work_to_system_bus_dev(work);
drivers/ps3/ps3-vuart.c
669
drv->work(dev);
drivers/ps3/ps3-vuart.c
677
if (priv->rx_list.work.trigger) {
drivers/ps3/ps3-vuart.c
689
schedule_work(&priv->rx_list.work.work);
drivers/ps3/ps3-vuart.c
694
priv->rx_list.work.trigger = bytes;
drivers/ps3/ps3-vuart.c
706
to_port_priv(dev)->rx_list.work.trigger = 0;
drivers/ps3/ps3-vuart.c
74
struct ps3_vuart_work work;
drivers/ps3/ps3-vuart.c
795
if (priv->rx_list.work.trigger && priv->rx_list.bytes_held
drivers/ps3/ps3-vuart.c
796
>= priv->rx_list.work.trigger) {
drivers/ps3/ps3-vuart.c
798
__func__, __LINE__, priv->rx_list.work.trigger);
drivers/ps3/ps3-vuart.c
799
priv->rx_list.work.trigger = 0;
drivers/ps3/ps3-vuart.c
800
schedule_work(&priv->rx_list.work.work);
drivers/ps3/ps3av.c
1004
flush_work(&ps3av->work);
drivers/ps3/ps3av.c
34
struct work_struct work;
drivers/ps3/ps3av.c
476
schedule_work(&ps3av->work);
drivers/ps3/ps3av.c
569
static void ps3avd(struct work_struct *work)
drivers/ps3/ps3av.c
945
INIT_WORK(&ps3av->work, ps3avd);
drivers/ps3/vuart.h
23
struct work_struct work;
drivers/ps3/vuart.h
37
void (*work)(struct ps3_system_bus_device *);
drivers/ps3/vuart.h
60
work);
drivers/ptp/ptp_clock.c
238
static void ptp_aux_kworker(struct kthread_work *work)
drivers/ptp/ptp_clock.c
240
struct ptp_clock *ptp = container_of(work, struct ptp_clock,
drivers/ptp/ptp_clock.c
241
aux_work.work);
drivers/ptp/ptp_clockmatrix.c
2314
static void idtcm_extts_check(struct work_struct *work)
drivers/ptp/ptp_clockmatrix.c
2316
struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work);
drivers/ptp/ptp_idt82p33.c
1332
static void idt82p33_extts_check(struct work_struct *work)
drivers/ptp/ptp_idt82p33.c
1334
struct idt82p33 *idt82p33 = container_of(work, struct idt82p33,
drivers/ptp/ptp_idt82p33.c
1335
extts_work.work);
drivers/ptp/ptp_idt82p33.c
510
static void idt82p33_adjtime_workaround(struct work_struct *work)
drivers/ptp/ptp_idt82p33.c
512
struct idt82p33_channel *channel = container_of(work,
drivers/ptp/ptp_idt82p33.c
514
adjtime_work.work);
drivers/ptp/ptp_ines.c
176
static void ines_txtstamp_work(struct work_struct *work);
drivers/ptp/ptp_ines.c
659
static void ines_txtstamp_work(struct work_struct *work)
drivers/ptp/ptp_ines.c
662
container_of(work, struct ines_port, ts_work.work);
drivers/ptp/ptp_ocp.c
4716
ptp_ocp_sync_work(struct work_struct *work)
drivers/ptp/ptp_ocp.c
4721
bp = container_of(work, struct ptp_ocp, sync_work.work);
drivers/rapidio/devices/tsi721.c
294
static void tsi721_pw_dpc(struct work_struct *work)
drivers/rapidio/devices/tsi721.c
296
struct tsi721_device *priv = container_of(work, struct tsi721_device,
drivers/rapidio/devices/tsi721.c
391
static void tsi721_db_dpc(struct work_struct *work)
drivers/rapidio/devices/tsi721.c
393
struct tsi721_device *priv = container_of(work, struct tsi721_device,
drivers/rapidio/rio.c
1944
struct work_struct work;
drivers/rapidio/rio.c
1950
struct rio_disc_work *work;
drivers/rapidio/rio.c
1952
work = container_of(_work, struct rio_disc_work, work);
drivers/rapidio/rio.c
1954
work->mport->id, work->mport->name);
drivers/rapidio/rio.c
1955
if (try_module_get(work->mport->nscan->owner)) {
drivers/rapidio/rio.c
1956
work->mport->nscan->discover(work->mport, 0);
drivers/rapidio/rio.c
1957
module_put(work->mport->nscan->owner);
drivers/rapidio/rio.c
1964
struct rio_disc_work *work;
drivers/rapidio/rio.c
2003
work = kzalloc_objs(*work, n);
drivers/rapidio/rio.c
2004
if (!work) {
drivers/rapidio/rio.c
2013
work[n].mport = port;
drivers/rapidio/rio.c
2014
INIT_WORK(&work[n].work, disc_work_handler);
drivers/rapidio/rio.c
2015
queue_work(rio_wq, &work[n].work);
drivers/rapidio/rio.c
2024
kfree(work);
drivers/rapidio/rio_cm.c
576
static void rio_ibmsg_handler(struct work_struct *work)
drivers/rapidio/rio_cm.c
578
struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
drivers/ras/cec.c
172
static void cec_work_fn(struct work_struct *work)
drivers/regulator/arizona-micsupp.c
46
static void arizona_micsupp_check_cp(struct work_struct *work)
drivers/regulator/arizona-micsupp.c
49
container_of(work, struct arizona_micsupp, check_cp_work);
drivers/regulator/core.c
1698
static void regulator_event_work_fn(struct work_struct *work)
drivers/regulator/core.c
1701
container_of(work, struct regulator_event_work, work);
drivers/regulator/core.c
1750
INIT_WORK(&rew->work, regulator_event_work_fn);
drivers/regulator/core.c
1752
queue_work(system_highpri_wq, &rew->work);
drivers/regulator/core.c
3384
static void regulator_disable_work(struct work_struct *work)
drivers/regulator/core.c
3386
struct regulator_dev *rdev = container_of(work, struct regulator_dev,
drivers/regulator/core.c
3387
disable_work.work);
drivers/regulator/core.c
6285
flush_work(&rdev->disable_work.work);
drivers/regulator/core.c
6840
static void regulator_init_complete_work_function(struct work_struct *work)
drivers/regulator/core.c
96
struct work_struct work;
drivers/regulator/da9121-regulator.c
1094
INIT_DELAYED_WORK(&chip->work, da9121_status_poll_on);
drivers/regulator/da9121-regulator.c
1161
cancel_delayed_work_sync(&chip->work);
drivers/regulator/da9121-regulator.c
34
struct delayed_work work;
drivers/regulator/da9121-regulator.c
619
static void da9121_status_poll_on(struct work_struct *work)
drivers/regulator/da9121-regulator.c
621
struct da9121 *chip = container_of(work, struct da9121, work.work);
drivers/regulator/da9121-regulator.c
670
queue_delayed_work(system_freezable_wq, &chip->work, delay);
drivers/regulator/da9121-regulator.c
765
queue_delayed_work(system_freezable_wq, &chip->work, 0);
drivers/regulator/irq_helpers.c
49
static void regulator_notifier_isr_work(struct work_struct *work)
drivers/regulator/irq_helpers.c
58
h = container_of(work, struct regulator_irq,
drivers/regulator/irq_helpers.c
59
isr_work.work);
drivers/regulator/mt6363-regulator.c
610
static void mt6363_oc_irq_enable_work(struct work_struct *work)
drivers/regulator/mt6363-regulator.c
612
struct delayed_work *dwork = to_delayed_work(work);
drivers/regulator/qcom-labibb-regulator.c
179
static void qcom_labibb_ocp_recovery_worker(struct work_struct *work)
drivers/regulator/qcom-labibb-regulator.c
185
vreg = container_of(work, struct labibb_regulator,
drivers/regulator/qcom-labibb-regulator.c
186
ocp_recovery_work.work);
drivers/regulator/qcom-labibb-regulator.c
428
static void qcom_labibb_sc_recovery_worker(struct work_struct *work)
drivers/regulator/qcom-labibb-regulator.c
436
vreg = container_of(work, struct labibb_regulator,
drivers/regulator/qcom-labibb-regulator.c
437
sc_recovery_work.work);
drivers/regulator/qcom_spmi-regulator.c
1282
static void spmi_regulator_vs_ocp_work(struct work_struct *work)
drivers/regulator/qcom_spmi-regulator.c
1284
struct delayed_work *dwork = to_delayed_work(work);
drivers/remoteproc/imx_dsp_rproc.c
497
static void imx_dsp_rproc_vq_work(struct work_struct *work)
drivers/remoteproc/imx_dsp_rproc.c
499
struct imx_dsp_rproc *priv = container_of(work, struct imx_dsp_rproc,
drivers/remoteproc/imx_rproc.c
838
static void imx_rproc_vq_work(struct work_struct *work)
drivers/remoteproc/imx_rproc.c
840
struct imx_rproc *priv = container_of(work, struct imx_rproc,
drivers/remoteproc/keystone_remoteproc.c
141
static void handle_event(struct work_struct *work)
drivers/remoteproc/keystone_remoteproc.c
144
container_of(work, struct keystone_rproc, workqueue);
drivers/remoteproc/remoteproc_core.c
1859
static void rproc_crash_handler_work(struct work_struct *work)
drivers/remoteproc/remoteproc_core.c
1861
struct rproc *rproc = container_of(work, struct rproc, crash_handler);
drivers/remoteproc/stm32_rproc.c
282
static void stm32_rproc_mb_vq_work(struct work_struct *work)
drivers/remoteproc/stm32_rproc.c
284
struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work);
drivers/remoteproc/xlnx_r5_remoteproc.c
201
static void handle_event_notified(struct work_struct *work)
drivers/remoteproc/xlnx_r5_remoteproc.c
206
ipi = container_of(work, struct mbox_info, mbox_work);
drivers/resctrl/mpam_devices.c
2623
void mpam_enable(struct work_struct *work)
drivers/resctrl/mpam_internal.h
381
void mpam_enable(struct work_struct *work);
drivers/resctrl/mpam_internal.h
382
void mpam_disable(struct work_struct *work);
drivers/rpmsg/qcom_glink_native.c
1768
static void qcom_glink_work(struct work_struct *work)
drivers/rpmsg/qcom_glink_native.c
1770
struct qcom_glink *glink = container_of(work, struct qcom_glink,
drivers/rpmsg/qcom_glink_native.c
223
static void qcom_glink_rx_done_work(struct work_struct *work);
drivers/rpmsg/qcom_glink_native.c
554
static void qcom_glink_rx_done_work(struct work_struct *work)
drivers/rpmsg/qcom_glink_native.c
556
struct glink_channel *channel = container_of(work, struct glink_channel,
drivers/rpmsg/qcom_smd.c
1205
static void qcom_channel_scan_worker(struct work_struct *work)
drivers/rpmsg/qcom_smd.c
1207
struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
drivers/rpmsg/qcom_smd.c
1272
static void qcom_channel_state_worker(struct work_struct *work)
drivers/rpmsg/qcom_smd.c
1275
struct qcom_smd_edge *edge = container_of(work,
drivers/rtc/dev.c
45
static void rtc_uie_task(struct work_struct *work)
drivers/rtc/dev.c
48
container_of(work, struct rtc_device, uie_task);
drivers/rtc/interface.c
928
void rtc_timer_do_work(struct work_struct *work)
drivers/rtc/interface.c
937
container_of(work, struct rtc_device, irqwork);
drivers/rtc/rtc-88pm860x.c
197
static void calibrate_vrtc_work(struct work_struct *work)
drivers/rtc/rtc-88pm860x.c
199
struct pm860x_rtc_info *info = container_of(work,
drivers/rtc/rtc-88pm860x.c
200
struct pm860x_rtc_info, calib_work.work);
drivers/rtc/rtc-atcrtc100.c
144
static void atcrtc_alarm_clear(struct work_struct *work)
drivers/rtc/rtc-atcrtc100.c
147
container_of(work, struct atcrtc_dev, rtc_work);
drivers/rtc/rtc-ds1305.c
435
static void ds1305_work(struct work_struct *work)
drivers/rtc/rtc-ds1305.c
437
struct ds1305 *ds1305 = container_of(work, struct ds1305, work);
drivers/rtc/rtc-ds1305.c
479
schedule_work(&ds1305->work);
drivers/rtc/rtc-ds1305.c
710
INIT_WORK(&ds1305->work, ds1305_work);
drivers/rtc/rtc-ds1305.c
732
cancel_work_sync(&ds1305->work);
drivers/rtc/rtc-ds1305.c
88
struct work_struct work;
drivers/rtc/rtc-ds1374.c
290
schedule_work(&ds1374->work);
drivers/rtc/rtc-ds1374.c
294
static void ds1374_work(struct work_struct *work)
drivers/rtc/rtc-ds1374.c
296
struct ds1374 *ds1374 = container_of(work, struct ds1374, work);
drivers/rtc/rtc-ds1374.c
486
INIT_WORK(&ds1374->work, ds1374_work);
drivers/rtc/rtc-ds1374.c
543
cancel_work_sync(&ds1374->work);
drivers/rtc/rtc-ds1374.c
71
struct work_struct work;
drivers/rtc/rtc-imxdi.c
118
struct work_struct work;
drivers/rtc/rtc-imxdi.c
716
schedule_work(&imxdi->work);
drivers/rtc/rtc-imxdi.c
727
static void dryice_work(struct work_struct *work)
drivers/rtc/rtc-imxdi.c
729
struct imxdi_dev *imxdi = container_of(work,
drivers/rtc/rtc-imxdi.c
730
struct imxdi_dev, work);
drivers/rtc/rtc-imxdi.c
773
INIT_WORK(&imxdi->work, dryice_work);
drivers/rtc/rtc-imxdi.c
837
flush_work(&imxdi->work);
drivers/s390/block/dasd.c
3886
static void do_requeue_requests(struct work_struct *work)
drivers/s390/block/dasd.c
3888
struct dasd_device *device = container_of(work, struct dasd_device,
drivers/s390/block/dasd.c
549
static void do_kick_device(struct work_struct *work)
drivers/s390/block/dasd.c
551
struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
drivers/s390/block/dasd.c
572
static void do_reload_device(struct work_struct *work)
drivers/s390/block/dasd.c
574
struct dasd_device *device = container_of(work, struct dasd_device,
drivers/s390/block/dasd_alias.c
536
static void lcu_update_work(struct work_struct *work)
drivers/s390/block/dasd_alias.c
544
ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
drivers/s390/block/dasd_alias.c
895
static void summary_unit_check_handling_work(struct work_struct *work)
drivers/s390/block/dasd_alias.c
902
suc_data = container_of(work, struct summary_unit_check_work_data,
drivers/s390/block/dasd_alias.c
927
void dasd_alias_handle_summary_unit_check(struct work_struct *work)
drivers/s390/block/dasd_alias.c
929
struct dasd_device *device = container_of(work, struct dasd_device,
drivers/s390/block/dasd_eckd.c
1434
static void do_pe_handler_work(struct work_struct *work)
drivers/s390/block/dasd_eckd.c
1439
data = container_of(work, struct pe_handler_work_data, worker);
drivers/s390/block/dasd_eckd.c
1444
schedule_work(work);
drivers/s390/block/dasd_eckd.c
1449
schedule_work(work);
drivers/s390/block/dasd_eckd.c
1695
static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
drivers/s390/block/dasd_eckd.c
1701
data = container_of(work, struct ext_pool_exhaust_work_data, worker);
drivers/s390/block/dasd_eckd.c
1972
static void dasd_eckd_do_validate_server(struct work_struct *work)
drivers/s390/block/dasd_eckd.c
1974
struct dasd_device *device = container_of(work, struct dasd_device,
drivers/s390/block/dasd_eckd.c
6731
static void dasd_eckd_check_attention_work(struct work_struct *work)
drivers/s390/block/dasd_eckd.c
6738
data = container_of(work, struct check_attention_work_data, worker);
drivers/s390/char/ctrlchar.c
21
ctrlchar_handle_sysrq(struct work_struct *work)
drivers/s390/char/ctrlchar.c
23
struct sysrq_work *sysrq = container_of(work, struct sysrq_work, work);
drivers/s390/char/ctrlchar.c
30
INIT_WORK(&sw->work, ctrlchar_handle_sysrq);
drivers/s390/char/ctrlchar.c
31
schedule_work(&sw->work);
drivers/s390/char/ctrlchar.h
28
struct work_struct work;
drivers/s390/char/raw3270.c
537
static void raw3270_resize_work(struct work_struct *work)
drivers/s390/char/raw3270.c
539
struct raw3270 *rp = container_of(work, struct raw3270, resize_work);
drivers/s390/char/sclp_config.c
44
static void sclp_cpu_capability_notify(struct work_struct *work)
drivers/s390/char/sclp_config.c
59
static void __ref sclp_cpu_change_notify(struct work_struct *work)
drivers/s390/char/sclp_ocf.c
37
static void sclp_ocf_change_notify(struct work_struct *work)
drivers/s390/char/tape_3490.c
107
struct work_struct work;
drivers/s390/char/tape_3490.c
121
tape_3490_work_handler(struct work_struct *work)
drivers/s390/char/tape_3490.c
124
container_of(work, struct tape_3490_work, work);
drivers/s390/char/tape_3490.c
146
INIT_WORK(&p->work, tape_3490_work_handler);
drivers/s390/char/tape_3490.c
151
schedule_work(&p->work);
drivers/s390/char/tape_core.c
214
struct work_struct work;
drivers/s390/char/tape_core.c
218
tape_med_state_work_handler(struct work_struct *work)
drivers/s390/char/tape_core.c
223
container_of(work, struct tape_med_state_work_data, work);
drivers/s390/char/tape_core.c
254
INIT_WORK(&p->work, tape_med_state_work_handler);
drivers/s390/char/tape_core.c
257
schedule_work(&p->work);
drivers/s390/char/tape_core.c
840
tape_delayed_next_request(struct work_struct *work)
drivers/s390/char/tape_core.c
843
container_of(work, struct tape_device, tape_dnr.work);
drivers/s390/cio/ccwgroup.c
206
static void ccwgroup_ungroup_workfn(struct work_struct *work)
drivers/s390/cio/ccwgroup.c
209
container_of(work, struct ccwgroup_device, ungroup_work);
drivers/s390/cio/chp.c
766
static void cfg_func(struct work_struct *work)
drivers/s390/cio/css.c
148
static void css_sch_todo(struct work_struct *work);
drivers/s390/cio/css.c
587
static void css_sch_todo(struct work_struct *work)
drivers/s390/cio/css.c
593
sch = container_of(work, struct subchannel, todo_work);
drivers/s390/cio/device.c
1843
static void ccw_device_todo(struct work_struct *work)
drivers/s390/cio/device.c
1850
priv = container_of(work, struct ccw_device_private, todo_work);
drivers/s390/cio/device.c
730
static void ccw_device_todo(struct work_struct *work);
drivers/s390/cio/vfio_ccw_drv.c
118
void vfio_ccw_crw_todo(struct work_struct *work)
drivers/s390/cio/vfio_ccw_drv.c
122
private = container_of(work, struct vfio_ccw_private, crw_work);
drivers/s390/cio/vfio_ccw_drv.c
82
void vfio_ccw_sch_io_todo(struct work_struct *work)
drivers/s390/cio/vfio_ccw_drv.c
89
private = container_of(work, struct vfio_ccw_private, io_work);
drivers/s390/cio/vfio_ccw_private.h
131
void vfio_ccw_sch_io_todo(struct work_struct *work);
drivers/s390/cio/vfio_ccw_private.h
132
void vfio_ccw_crw_todo(struct work_struct *work);
drivers/s390/crypto/zcrypt_msgtype50.c
440
complete(&msg->response.work);
drivers/s390/crypto/zcrypt_msgtype50.c
470
init_completion(&ap_msg->response.work);
drivers/s390/crypto/zcrypt_msgtype50.c
474
rc = wait_for_completion_interruptible(&ap_msg->response.work);
drivers/s390/crypto/zcrypt_msgtype50.c
519
init_completion(&ap_msg->response.work);
drivers/s390/crypto/zcrypt_msgtype50.c
523
rc = wait_for_completion_interruptible(&ap_msg->response.work);
drivers/s390/crypto/zcrypt_msgtype6.c
1023
init_completion(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
1027
rc = wait_for_completion_interruptible(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
1108
init_completion(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
1112
rc = wait_for_completion_interruptible(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
1224
init_completion(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
1228
rc = wait_for_completion_interruptible(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
1299
init_completion(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
1303
rc = wait_for_completion_interruptible(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
898
complete(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
948
complete(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
978
init_completion(&resp_type->work);
drivers/s390/crypto/zcrypt_msgtype6.c
982
rc = wait_for_completion_interruptible(&resp_type->work);
drivers/s390/net/qeth_core_main.c
1569
static void qeth_start_kernel_thread(struct work_struct *work)
drivers/s390/net/qeth_core_main.c
1572
struct qeth_card *card = container_of(work, struct qeth_card,
drivers/s390/net/qeth_core_main.c
3507
static void qeth_buffer_reclaim_work(struct work_struct *work)
drivers/s390/net/qeth_core_main.c
3509
struct qeth_card *card = container_of(to_delayed_work(work),
drivers/s390/net/qeth_l2_main.c
1305
static void qeth_bridge_state_change_worker(struct work_struct *work)
drivers/s390/net/qeth_l2_main.c
1308
container_of(work, struct qeth_bridge_state_data, worker);
drivers/s390/net/qeth_l2_main.c
1371
static void qeth_l2_dev2br_worker(struct work_struct *work)
drivers/s390/net/qeth_l2_main.c
1373
struct delayed_work *dwork = to_delayed_work(work);
drivers/s390/net/qeth_l2_main.c
1448
static void qeth_addr_change_event_worker(struct work_struct *work)
drivers/s390/net/qeth_l2_main.c
1450
struct delayed_work *dwork = to_delayed_work(work);
drivers/s390/net/qeth_l2_main.c
455
static void qeth_l2_rx_mode_work(struct work_struct *work)
drivers/s390/net/qeth_l2_main.c
457
struct qeth_card *card = container_of(work, struct qeth_card,
drivers/s390/net/qeth_l2_main.c
714
struct work_struct work;
drivers/s390/net/qeth_l2_main.c
748
static void qeth_l2_br2dev_worker(struct work_struct *work)
drivers/s390/net/qeth_l2_main.c
751
container_of(work, struct qeth_l2_br2dev_event_work, work);
drivers/s390/net/qeth_l2_main.c
833
INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
drivers/s390/net/qeth_l2_main.c
845
queue_work(card->event_wq, &worker_data->work);
drivers/s390/net/qeth_l3_main.c
1152
static void qeth_l3_rx_mode_work(struct work_struct *work)
drivers/s390/net/qeth_l3_main.c
1154
struct qeth_card *card = container_of(work, struct qeth_card,
drivers/s390/net/qeth_l3_main.c
2090
struct work_struct work;
drivers/s390/net/qeth_l3_main.c
2095
#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work)
drivers/s390/net/qeth_l3_main.c
2097
static void qeth_l3_add_ip_worker(struct work_struct *work)
drivers/s390/net/qeth_l3_main.c
2099
struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
drivers/s390/net/qeth_l3_main.c
2102
kfree(work);
drivers/s390/net/qeth_l3_main.c
2105
static void qeth_l3_delete_ip_worker(struct work_struct *work)
drivers/s390/net/qeth_l3_main.c
2107
struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
drivers/s390/net/qeth_l3_main.c
2110
kfree(work);
drivers/s390/net/qeth_l3_main.c
2171
INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker);
drivers/s390/net/qeth_l3_main.c
2173
INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker);
drivers/s390/net/qeth_l3_main.c
2181
queue_work(card->cmd_wq, &ip_work->work);
drivers/s390/net/smsgiucv_app.c
98
static void smsg_event_work_fn(struct work_struct *work)
drivers/s390/scsi/zfcp_aux.c
288
static void _zfcp_status_read_scheduler(struct work_struct *work)
drivers/s390/scsi/zfcp_aux.c
290
zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
drivers/s390/scsi/zfcp_aux.c
294
static void zfcp_version_change_lost_work(struct work_struct *work)
drivers/s390/scsi/zfcp_aux.c
296
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
drivers/s390/scsi/zfcp_aux.c
394
INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
drivers/s390/scsi/zfcp_fc.c
102
struct zfcp_fc_events *events = container_of(work,
drivers/s390/scsi/zfcp_fc.c
103
struct zfcp_fc_events, work);
drivers/s390/scsi/zfcp_fc.c
142
queue_work(adapter->work_queue, &adapter->events.work);
drivers/s390/scsi/zfcp_fc.c
176
static void zfcp_fc_wka_port_offline(struct work_struct *work)
drivers/s390/scsi/zfcp_fc.c
178
struct delayed_work *dw = to_delayed_work(work);
drivers/s390/scsi/zfcp_fc.c
180
container_of(dw, struct zfcp_fc_wka_port, work);
drivers/s390/scsi/zfcp_fc.c
204
queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
drivers/s390/scsi/zfcp_fc.c
220
INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
drivers/s390/scsi/zfcp_fc.c
225
cancel_delayed_work_sync(&wka->work);
drivers/s390/scsi/zfcp_fc.c
444
void zfcp_fc_port_did_lookup(struct work_struct *work)
drivers/s390/scsi/zfcp_fc.c
447
struct zfcp_port *port = container_of(work, struct zfcp_port,
drivers/s390/scsi/zfcp_fc.c
594
void zfcp_fc_link_test_work(struct work_struct *work)
drivers/s390/scsi/zfcp_fc.c
597
container_of(work, struct zfcp_port, test_link_work);
drivers/s390/scsi/zfcp_fc.c
806
void zfcp_fc_scan_ports(struct work_struct *work)
drivers/s390/scsi/zfcp_fc.c
808
struct delayed_work *dw = to_delayed_work(work);
drivers/s390/scsi/zfcp_fc.c
947
void zfcp_fc_sym_name_update(struct work_struct *work)
drivers/s390/scsi/zfcp_fc.c
949
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
drivers/s390/scsi/zfcp_fc.c
98
void zfcp_fc_post_event(struct work_struct *work)
drivers/s390/scsi/zfcp_fc.h
206
struct delayed_work work;
drivers/s390/scsi/zfcp_fc.h
54
struct work_struct work;
drivers/s390/scsi/zfcp_scsi.c
799
void zfcp_scsi_rport_work(struct work_struct *work)
drivers/s390/scsi/zfcp_scsi.c
801
struct zfcp_port *port = container_of(work, struct zfcp_port,
drivers/s390/scsi/zfcp_unit.c
34
static void zfcp_unit_scsi_scan_work(struct work_struct *work)
drivers/s390/scsi/zfcp_unit.c
36
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
drivers/scsi/NCR5380.c
689
static void NCR5380_main(struct work_struct *work)
drivers/scsi/NCR5380.c
692
container_of(work, struct NCR5380_hostdata, main_task);
drivers/scsi/NCR5380.h
283
static void NCR5380_main(struct work_struct *work);
drivers/scsi/aacraid/aacraid.h
2654
static inline void aac_safw_rescan_worker(struct work_struct *work)
drivers/scsi/aacraid/aacraid.h
2656
struct aac_dev *dev = container_of(to_delayed_work(work),
drivers/scsi/aacraid/aacraid.h
2690
void aac_safw_rescan_worker(struct work_struct *work);
drivers/scsi/aacraid/aacraid.h
2691
void aac_src_reinit_aif_worker(struct work_struct *work);
drivers/scsi/aacraid/commsup.c
1813
void aac_src_reinit_aif_worker(struct work_struct *work)
drivers/scsi/aacraid/commsup.c
1815
struct aac_dev *dev = container_of(to_delayed_work(work),
drivers/scsi/aha152x.c
1323
static void run(struct work_struct *work)
drivers/scsi/arcmsr/arcmsr_hba.c
133
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
drivers/scsi/arcmsr/arcmsr_hba.c
904
static void arcmsr_message_isr_bh_fn(struct work_struct *work)
drivers/scsi/arcmsr/arcmsr_hba.c
906
struct AdapterControlBlock *acb = container_of(work,
drivers/scsi/be2iscsi/be_main.c
1845
static void beiscsi_mcc_work(struct work_struct *work)
drivers/scsi/be2iscsi/be_main.c
1850
pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
drivers/scsi/be2iscsi/be_main.c
5124
static void beiscsi_boot_work(struct work_struct *work)
drivers/scsi/be2iscsi/be_main.c
5127
container_of(work, struct beiscsi_hba, boot_work);
drivers/scsi/be2iscsi/be_main.c
5170
static void beiscsi_eqd_update_work(struct work_struct *work)
drivers/scsi/be2iscsi/be_main.c
5182
phba = container_of(work, struct beiscsi_hba, eqd_update.work);
drivers/scsi/be2iscsi/be_main.c
5402
static void beiscsi_sess_work(struct work_struct *work)
drivers/scsi/be2iscsi/be_main.c
5406
phba = container_of(work, struct beiscsi_hba, sess_work);
drivers/scsi/be2iscsi/be_main.c
5415
static void beiscsi_recover_port(struct work_struct *work)
drivers/scsi/be2iscsi/be_main.c
5419
phba = container_of(work, struct beiscsi_hba, recover_port.work);
drivers/scsi/bfa/bfad_im.c
1105
bfad_im_itnim_work_handler(struct work_struct *work)
drivers/scsi/bfa/bfad_im.c
1107
struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s,
drivers/scsi/bfa/bfad_im.c
26
static void bfad_im_itnim_work_handler(struct work_struct *work);
drivers/scsi/bfa/bfad_im.c
608
bfad_im_port_delete_handler(struct work_struct *work)
drivers/scsi/bfa/bfad_im.c
611
container_of(work, struct bfad_im_port_s, port_delete_work);
drivers/scsi/bfa/bfad_im.c
671
static void bfad_aen_im_notify_handler(struct work_struct *work)
drivers/scsi/bfa/bfad_im.c
674
container_of(work, struct bfad_im_s, aen_im_notify_work);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2626
struct bnx2fc_work *work, *tmp;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2637
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2638
list_del_init(&work->list);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2639
bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data,
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2640
work->num_rq, work->task);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2641
kfree(work);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
645
struct bnx2fc_work *work, *tmp;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
657
list_for_each_entry_safe(work, tmp, &work_list, list) {
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
658
list_del_init(&work->list);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
659
bnx2fc_process_cq_compl(work->tgt, work->wqe,
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
660
work->rq_data,
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
661
work->num_rq,
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
662
work->task);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
663
kfree(work);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1048
work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1050
if (work) {
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1051
list_add_tail(&work->list, &fps->work_list);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
532
static void bnx2fc_unsol_els_work(struct work_struct *work)
drivers/scsi/bnx2fc/bnx2fc_hwi.c
539
unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
974
struct bnx2fc_work *work;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
975
work = kzalloc_obj(struct bnx2fc_work, GFP_ATOMIC);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
976
if (!work)
drivers/scsi/bnx2fc/bnx2fc_hwi.c
979
INIT_LIST_HEAD(&work->list);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
980
work->tgt = tgt;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
981
work->wqe = wqe;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
982
work->num_rq = num_rq;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
983
work->task = task;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
985
memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
987
return work;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
995
struct bnx2fc_work *work;
drivers/scsi/bnx2fc/bnx2fc_io.c
40
static void bnx2fc_cmd_timeout(struct work_struct *work)
drivers/scsi/bnx2fc/bnx2fc_io.c
42
struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
drivers/scsi/bnx2fc/bnx2fc_io.c
43
timeout_work.work);
drivers/scsi/bnx2i/bnx2i_hwi.c
1855
struct bnx2i_work *work, *tmp;
drivers/scsi/bnx2i/bnx2i_hwi.c
1866
list_for_each_entry_safe(work, tmp, &work_list, list) {
drivers/scsi/bnx2i/bnx2i_hwi.c
1867
list_del_init(&work->list);
drivers/scsi/bnx2i/bnx2i_hwi.c
1869
bnx2i_process_scsi_cmd_resp(work->session,
drivers/scsi/bnx2i/bnx2i_hwi.c
1870
work->bnx2i_conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1871
&work->cqe);
drivers/scsi/bnx2i/bnx2i_hwi.c
1872
atomic_dec(&work->bnx2i_conn->work_cnt);
drivers/scsi/bnx2i/bnx2i_hwi.c
1873
kfree(work);
drivers/scsi/bnx2i/bnx2i_init.c
432
struct bnx2i_work *work, *tmp;
drivers/scsi/bnx2i/bnx2i_init.c
441
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
drivers/scsi/bnx2i/bnx2i_init.c
442
list_del_init(&work->list);
drivers/scsi/bnx2i/bnx2i_init.c
443
bnx2i_process_scsi_cmd_resp(work->session,
drivers/scsi/bnx2i/bnx2i_init.c
444
work->bnx2i_conn, &work->cqe);
drivers/scsi/bnx2i/bnx2i_init.c
445
kfree(work);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1487
struct bnx2i_work *work, *tmp;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1500
list_for_each_entry_safe(work, tmp,
drivers/scsi/bnx2i/bnx2i_iscsi.c
1502
if (work->session == conn->session &&
drivers/scsi/bnx2i/bnx2i_iscsi.c
1503
work->bnx2i_conn == bnx2i_conn) {
drivers/scsi/bnx2i/bnx2i_iscsi.c
1504
list_del_init(&work->list);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1505
kfree(work);
drivers/scsi/csiostor/csio_hw.c
3975
csio_evtq_worker(struct work_struct *work)
drivers/scsi/csiostor/csio_hw.c
3977
struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
drivers/scsi/device_handler/scsi_dh_alua.c
103
static void alua_rtpg_work(struct work_struct *work);
drivers/scsi/device_handler/scsi_dh_alua.c
885
static void alua_rtpg_work(struct work_struct *work)
drivers/scsi/device_handler/scsi_dh_alua.c
888
container_of(work, struct alua_port_group, rtpg_work.work);
drivers/scsi/device_handler/scsi_dh_rdac.c
237
static void send_mode_select(struct work_struct *work);
drivers/scsi/device_handler/scsi_dh_rdac.c
501
static void send_mode_select(struct work_struct *work)
drivers/scsi/device_handler/scsi_dh_rdac.c
504
container_of(work, struct rdac_controller, ms_work);
drivers/scsi/elx/efct/efct_lio.c
1154
static void efct_lio_setup_session(struct work_struct *work)
drivers/scsi/elx/efct/efct_lio.c
1157
container_of(work, struct efct_lio_wq_data, work);
drivers/scsi/elx/efct/efct_lio.c
1236
INIT_WORK(&wq_data->work, efct_lio_setup_session);
drivers/scsi/elx/efct/efct_lio.c
1237
queue_work(lio_wq, &wq_data->work);
drivers/scsi/elx/efct/efct_lio.c
1241
static void efct_lio_remove_session(struct work_struct *work)
drivers/scsi/elx/efct/efct_lio.c
1244
container_of(work, struct efct_lio_wq_data, work);
drivers/scsi/elx/efct/efct_lio.c
1304
INIT_WORK(&wq_data->work, efct_lio_remove_session);
drivers/scsi/elx/efct/efct_lio.c
1305
queue_work(lio_wq, &wq_data->work);
drivers/scsi/elx/efct/efct_lio.h
32
struct work_struct work;
drivers/scsi/esas2r/esas2r.h
775
struct delayed_work work;
drivers/scsi/esas2r/esas2r_main.c
1787
esas2r_firmware_event_work(struct work_struct *work)
drivers/scsi/esas2r/esas2r_main.c
1790
container_of(work, struct esas2r_fw_event_work, work.work);
drivers/scsi/esas2r/esas2r_main.c
1857
INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
drivers/scsi/esas2r/esas2r_main.c
1859
smp_processor_id(), a->fw_event_q, &fw_event->work,
drivers/scsi/fcoe/fcoe.c
1299
flush_work(&p->work);
drivers/scsi/fcoe/fcoe.c
1412
schedule_work_on(cpu, &fps->work);
drivers/scsi/fcoe/fcoe.c
1706
static void fcoe_receive_work(struct work_struct *work)
drivers/scsi/fcoe/fcoe.c
1712
p = container_of(work, struct fcoe_percpu_s, work);
drivers/scsi/fcoe/fcoe.c
2060
static void fcoe_destroy_work(struct work_struct *work)
drivers/scsi/fcoe/fcoe.c
2067
port = container_of(work, struct fcoe_port, destroy_work);
drivers/scsi/fcoe/fcoe.c
2310
flush_work(&pp->work);
drivers/scsi/fcoe/fcoe.c
2457
INIT_WORK(&p->work, fcoe_receive_work);
drivers/scsi/fcoe/fcoe_ctlr.c
1787
static void fcoe_ctlr_timer_work(struct work_struct *work)
drivers/scsi/fcoe/fcoe_ctlr.c
1799
fip = container_of(work, struct fcoe_ctlr, timer_work);
drivers/scsi/fcoe/fcoe_sysfs.c
695
struct work_struct *work)
drivers/scsi/fcoe/fcoe_sysfs.c
706
return queue_work(fcoe_ctlr_work_q(ctlr), work);
drivers/scsi/fcoe/fcoe_sysfs.c
736
struct delayed_work *work,
drivers/scsi/fcoe/fcoe_sysfs.c
748
return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
drivers/scsi/fcoe/fcoe_sysfs.c
879
static void fcoe_fcf_device_final_delete(struct work_struct *work)
drivers/scsi/fcoe/fcoe_sysfs.c
882
container_of(work, struct fcoe_fcf_device, delete_work);
drivers/scsi/fcoe/fcoe_sysfs.c
903
static void fip_timeout_deleted_fcf(struct work_struct *work)
drivers/scsi/fcoe/fcoe_sysfs.c
906
container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
drivers/scsi/fdomain.c
115
struct work_struct work;
drivers/scsi/fdomain.c
265
static void fdomain_work(struct work_struct *work)
drivers/scsi/fdomain.c
267
struct fdomain *fd = container_of(work, struct fdomain, work);
drivers/scsi/fdomain.c
400
schedule_work(&fd->work);
drivers/scsi/fdomain.c
554
INIT_WORK(&fd->work, fdomain_work);
drivers/scsi/fdomain.c
585
cancel_work_sync(&fd->work);
drivers/scsi/fnic/fdls_disc.c
187
void fdls_reclaim_oxid_handler(struct work_struct *work)
drivers/scsi/fnic/fdls_disc.c
189
struct fnic_oxid_pool_s *oxid_pool = container_of(work,
drivers/scsi/fnic/fdls_disc.c
190
struct fnic_oxid_pool_s, oxid_reclaim_work.work);
drivers/scsi/fnic/fdls_disc.c
302
void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
drivers/scsi/fnic/fdls_disc.c
304
struct fnic_oxid_pool_s *oxid_pool = container_of(work,
drivers/scsi/fnic/fdls_disc.c
305
struct fnic_oxid_pool_s, schedule_oxid_free_retry.work);
drivers/scsi/fnic/fip.c
722
void fnic_work_on_fip_timer(struct work_struct *work)
drivers/scsi/fnic/fip.c
724
struct fnic *fnic = container_of(work, struct fnic, fip_timer_work);
drivers/scsi/fnic/fip.c
974
void fnic_work_on_fcs_ka_timer(struct work_struct *work)
drivers/scsi/fnic/fip.c
977
*fnic = container_of(work, struct fnic, fip_timer_work);
drivers/scsi/fnic/fip.h
126
void fnic_work_on_fip_timer(struct work_struct *work);
drivers/scsi/fnic/fip.h
127
void fnic_work_on_fcs_ka_timer(struct work_struct *work);
drivers/scsi/fnic/fnic.h
493
void fnic_handle_frame(struct work_struct *work);
drivers/scsi/fnic/fnic.h
494
void fnic_tport_event_handler(struct work_struct *work);
drivers/scsi/fnic/fnic.h
495
void fnic_handle_link(struct work_struct *work);
drivers/scsi/fnic/fnic.h
496
void fnic_handle_event(struct work_struct *work);
drivers/scsi/fnic/fnic.h
497
void fdls_reclaim_oxid_handler(struct work_struct *work);
drivers/scsi/fnic/fnic.h
499
void fdls_schedule_oxid_free_retry_work(struct work_struct *work);
drivers/scsi/fnic/fnic.h
503
void fnic_flush_tx(struct work_struct *work);
drivers/scsi/fnic/fnic.h
531
void fnic_handle_fip_frame(struct work_struct *work);
drivers/scsi/fnic/fnic.h
532
void fnic_reset_work_handler(struct work_struct *work);
drivers/scsi/fnic/fnic_fcs.c
1012
void fnic_tport_event_handler(struct work_struct *work)
drivers/scsi/fnic/fnic_fcs.c
1014
struct fnic *fnic = container_of(work, struct fnic, tport_work);
drivers/scsi/fnic/fnic_fcs.c
1076
void fnic_reset_work_handler(struct work_struct *work)
drivers/scsi/fnic/fnic_fcs.c
163
void fnic_handle_link(struct work_struct *work)
drivers/scsi/fnic/fnic_fcs.c
165
struct fnic *fnic = container_of(work, struct fnic, link_work);
drivers/scsi/fnic/fnic_fcs.c
283
void fnic_handle_frame(struct work_struct *work)
drivers/scsi/fnic/fnic_fcs.c
285
struct fnic *fnic = container_of(work, struct fnic, frame_work);
drivers/scsi/fnic/fnic_fcs.c
326
void fnic_handle_fip_frame(struct work_struct *work)
drivers/scsi/fnic/fnic_fcs.c
329
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
drivers/scsi/fnic/fnic_fcs.c
775
void fnic_flush_tx(struct work_struct *work)
drivers/scsi/fnic/fnic_fcs.c
777
struct fnic *fnic = container_of(work, struct fnic, flush_work);
drivers/scsi/hisi_sas/hisi_sas.h
141
struct work_struct work;
drivers/scsi/hisi_sas/hisi_sas.h
148
.work = __WORK_INITIALIZER(r.work, \
drivers/scsi/hisi_sas/hisi_sas.h
681
extern void hisi_sas_rst_work_handler(struct work_struct *work);
drivers/scsi/hisi_sas/hisi_sas.h
682
extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
drivers/scsi/hisi_sas/hisi_sas_main.c
1965
queue_work(hisi_hba->wq, &r.work);
drivers/scsi/hisi_sas/hisi_sas_main.c
2384
void hisi_sas_rst_work_handler(struct work_struct *work)
drivers/scsi/hisi_sas/hisi_sas_main.c
2387
container_of(work, struct hisi_hba, rst_work);
drivers/scsi/hisi_sas/hisi_sas_main.c
2396
void hisi_sas_sync_rst_work_handler(struct work_struct *work)
drivers/scsi/hisi_sas/hisi_sas_main.c
2399
container_of(work, struct hisi_sas_rst, work);
drivers/scsi/hisi_sas/hisi_sas_main.c
939
static void hisi_sas_phyup_work_common(struct work_struct *work,
drivers/scsi/hisi_sas/hisi_sas_main.c
943
container_of(work, typeof(*phy), works[event]);
drivers/scsi/hisi_sas/hisi_sas_main.c
974
static void hisi_sas_phyup_work(struct work_struct *work)
drivers/scsi/hisi_sas/hisi_sas_main.c
976
hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
drivers/scsi/hisi_sas/hisi_sas_main.c
979
static void hisi_sas_linkreset_work(struct work_struct *work)
drivers/scsi/hisi_sas/hisi_sas_main.c
982
container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
drivers/scsi/hisi_sas/hisi_sas_main.c
988
static void hisi_sas_phyup_pm_work(struct work_struct *work)
drivers/scsi/hisi_sas/hisi_sas_main.c
991
container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
drivers/scsi/hisi_sas/hisi_sas_main.c
995
hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
drivers/scsi/hosts.c
672
int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
drivers/scsi/hosts.c
683
return queue_work(shost->work_q, work);
drivers/scsi/hpsa.c
2486
INIT_WORK(&c->work, hpsa_command_resubmit_worker);
drivers/scsi/hpsa.c
2487
queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
drivers/scsi/hpsa.c
324
static void hpsa_command_resubmit_worker(struct work_struct *work);
drivers/scsi/hpsa.c
5601
static void hpsa_command_resubmit_worker(struct work_struct *work)
drivers/scsi/hpsa.c
5605
struct CommandList *c = container_of(work, struct CommandList, work);
drivers/scsi/hpsa.c
8541
static void hpsa_event_monitor_worker(struct work_struct *work)
drivers/scsi/hpsa.c
8543
struct ctlr_info *h = container_of(to_delayed_work(work),
drivers/scsi/hpsa.c
8566
static void hpsa_rescan_ctlr_worker(struct work_struct *work)
drivers/scsi/hpsa.c
8569
struct ctlr_info *h = container_of(to_delayed_work(work),
drivers/scsi/hpsa.c
8595
static void hpsa_monitor_ctlr_worker(struct work_struct *work)
drivers/scsi/hpsa.c
8598
struct ctlr_info *h = container_of(to_delayed_work(work),
drivers/scsi/hpsa_cmd.h
442
struct work_struct work;
drivers/scsi/ibmvscsi/ibmvfc.c
6263
static void ibmvfc_rport_add_thread(struct work_struct *work)
drivers/scsi/ibmvscsi/ibmvfc.c
6265
struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2417
queue_work(vscsi->work_q, &cmd->work);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2431
queue_work(vscsi->work_q, &cmd->work);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2817
static void ibmvscsis_scheduler(struct work_struct *work)
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2819
struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2820
work);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
2878
INIT_WORK(&cmd->work, ibmvscsis_scheduler);
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
409
static void ibmvscsis_disconnect(struct work_struct *work)
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
411
struct scsi_info *vscsi = container_of(work, struct scsi_info,
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
160
struct work_struct work;
drivers/scsi/imm.c
733
static void imm_interrupt(struct work_struct *work)
drivers/scsi/imm.c
735
imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
drivers/scsi/ipr.c
3236
static void ipr_add_remove_thread(struct work_struct *work)
drivers/scsi/ipr.c
3242
container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
drivers/scsi/ipr.c
3306
static void ipr_worker_thread(struct work_struct *work)
drivers/scsi/ipr.c
3311
container_of(work, struct ipr_ioa_cfg, work_q);
drivers/scsi/ipr.c
8014
static void ipr_reset_reset_work(struct work_struct *work)
drivers/scsi/ipr.c
8016
struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
drivers/scsi/ipr.c
8047
INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
drivers/scsi/ipr.c
8048
queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
drivers/scsi/ipr.h
1554
struct work_struct work;
drivers/scsi/iscsi_tcp.c
154
static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
drivers/scsi/iscsi_tcp.c
156
struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
drivers/scsi/libfc/fc_disc.c
474
static void fc_disc_timeout(struct work_struct *work)
drivers/scsi/libfc/fc_disc.c
476
struct fc_disc *disc = container_of(work,
drivers/scsi/libfc/fc_disc.c
478
disc_work.work);
drivers/scsi/libfc/fc_exch.c
764
static void fc_exch_timeout(struct work_struct *work)
drivers/scsi/libfc/fc_exch.c
766
struct fc_exch *ep = container_of(work, struct fc_exch,
drivers/scsi/libfc/fc_exch.c
767
timeout_work.work);
drivers/scsi/libfc/fc_lport.c
1589
static void fc_lport_timeout(struct work_struct *work)
drivers/scsi/libfc/fc_lport.c
1592
container_of(work, struct fc_lport,
drivers/scsi/libfc/fc_lport.c
1593
retry_work.work);
drivers/scsi/libfc/fc_rport.c
258
static void fc_rport_work(struct work_struct *work)
drivers/scsi/libfc/fc_rport.c
262
container_of(work, struct fc_rport_priv, event_work);
drivers/scsi/libfc/fc_rport.c
571
static void fc_rport_timeout(struct work_struct *work)
drivers/scsi/libfc/fc_rport.c
574
container_of(work, struct fc_rport_priv, retry_work.work);
drivers/scsi/libiscsi.c
1700
static void iscsi_xmitworker(struct work_struct *work)
drivers/scsi/libiscsi.c
1703
container_of(work, struct iscsi_conn, xmitwork);
drivers/scsi/libsas/sas_discover.c
231
static void sas_suspend_devices(struct work_struct *work)
drivers/scsi/libsas/sas_discover.c
235
struct sas_discovery_event *ev = to_sas_discovery_event(work);
drivers/scsi/libsas/sas_discover.c
262
static void sas_resume_devices(struct work_struct *work)
drivers/scsi/libsas/sas_discover.c
264
struct sas_discovery_event *ev = to_sas_discovery_event(work);
drivers/scsi/libsas/sas_discover.c
457
static void sas_discover_domain(struct work_struct *work)
drivers/scsi/libsas/sas_discover.c
461
struct sas_discovery_event *ev = to_sas_discovery_event(work);
drivers/scsi/libsas/sas_discover.c
512
static void sas_revalidate_domain(struct work_struct *work)
drivers/scsi/libsas/sas_discover.c
515
struct sas_discovery_event *ev = to_sas_discovery_event(work);
drivers/scsi/libsas/sas_discover.c
555
queue_work(ha->disco_q, &sw->work);
drivers/scsi/libsas/sas_discover.c
581
sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
drivers/scsi/libsas/sas_discover.c
604
INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
drivers/scsi/libsas/sas_event.c
124
static void sas_port_event_worker(struct work_struct *work)
drivers/scsi/libsas/sas_event.c
126
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_event.c
130
sas_port_event_fns[ev->event](work);
drivers/scsi/libsas/sas_event.c
135
static void sas_phy_event_worker(struct work_struct *work)
drivers/scsi/libsas/sas_event.c
137
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_event.c
141
sas_phy_event_fns[ev->event](work);
drivers/scsi/libsas/sas_event.c
155
struct sas_work *sw = &ev->work;
drivers/scsi/libsas/sas_event.c
184
if (!sas_queue_event(event, &ev->work, ha)) {
drivers/scsi/libsas/sas_event.c
211
if (!sas_queue_event(event, &ev->work, ha)) {
drivers/scsi/libsas/sas_event.c
25
return queue_work(ha->event_q, &sw->work);
drivers/scsi/libsas/sas_event.c
28
static bool sas_queue_event(int event, struct sas_work *work,
drivers/scsi/libsas/sas_event.c
35
rc = sas_queue_work(ha, work);
drivers/scsi/libsas/sas_event.c
51
sas_free_event(to_asd_sas_event(&sw->work));
drivers/scsi/libsas/sas_init.c
492
static void phy_reset_work(struct work_struct *work)
drivers/scsi/libsas/sas_init.c
494
struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
drivers/scsi/libsas/sas_init.c
499
static void phy_enable_work(struct work_struct *work)
drivers/scsi/libsas/sas_init.c
501
struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
drivers/scsi/libsas/sas_internal.h
76
void sas_porte_bytes_dmaed(struct work_struct *work);
drivers/scsi/libsas/sas_internal.h
77
void sas_porte_broadcast_rcvd(struct work_struct *work);
drivers/scsi/libsas/sas_internal.h
78
void sas_porte_link_reset_err(struct work_struct *work);
drivers/scsi/libsas/sas_internal.h
79
void sas_porte_timer_event(struct work_struct *work);
drivers/scsi/libsas/sas_internal.h
80
void sas_porte_hard_reset(struct work_struct *work);
drivers/scsi/libsas/sas_phy.c
17
static void sas_phye_loss_of_signal(struct work_struct *work)
drivers/scsi/libsas/sas_phy.c
19
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_phy.c
26
static void sas_phye_oob_done(struct work_struct *work)
drivers/scsi/libsas/sas_phy.c
28
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_phy.c
34
static void sas_phye_oob_error(struct work_struct *work)
drivers/scsi/libsas/sas_phy.c
36
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_phy.c
63
static void sas_phye_spinup_hold(struct work_struct *work)
drivers/scsi/libsas/sas_phy.c
65
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_phy.c
75
static void sas_phye_resume_timeout(struct work_struct *work)
drivers/scsi/libsas/sas_phy.c
77
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_phy.c
92
static void sas_phye_shutdown(struct work_struct *work)
drivers/scsi/libsas/sas_phy.c
94
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_port.c
273
void sas_porte_bytes_dmaed(struct work_struct *work)
drivers/scsi/libsas/sas_port.c
275
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_port.c
281
void sas_porte_broadcast_rcvd(struct work_struct *work)
drivers/scsi/libsas/sas_port.c
283
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_port.c
299
void sas_porte_link_reset_err(struct work_struct *work)
drivers/scsi/libsas/sas_port.c
301
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_port.c
307
void sas_porte_timer_event(struct work_struct *work)
drivers/scsi/libsas/sas_port.c
309
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/libsas/sas_port.c
315
void sas_porte_hard_reset(struct work_struct *work)
drivers/scsi/libsas/sas_port.c
317
struct asd_sas_event *ev = to_asd_sas_event(work);
drivers/scsi/lpfc/lpfc_init.c
1283
lpfc_idle_stat_delay_work(struct work_struct *work)
drivers/scsi/lpfc/lpfc_init.c
1285
struct lpfc_hba *phba = container_of(to_delayed_work(work),
drivers/scsi/lpfc/lpfc_init.c
1345
lpfc_hb_eq_delay_work(struct work_struct *work)
drivers/scsi/lpfc/lpfc_init.c
1347
struct lpfc_hba *phba = container_of(to_delayed_work(work),
drivers/scsi/lpfc/lpfc_init.c
7648
lpfc_unblock_requests_work(struct work_struct *work)
drivers/scsi/lpfc/lpfc_init.c
7650
struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
drivers/scsi/lpfc/lpfc_nvmet.c
2297
lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
drivers/scsi/lpfc/lpfc_nvmet.c
2301
container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
drivers/scsi/lpfc/lpfc_sli.c
15053
lpfc_sli4_sp_process_cq(struct work_struct *work)
drivers/scsi/lpfc/lpfc_sli.c
15055
struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
drivers/scsi/lpfc/lpfc_sli.c
15067
lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
drivers/scsi/lpfc/lpfc_sli.c
15069
struct lpfc_queue *cq = container_of(to_delayed_work(work),
drivers/scsi/lpfc/lpfc_sli.c
15447
lpfc_sli4_hba_process_cq(struct work_struct *work)
drivers/scsi/lpfc/lpfc_sli.c
15449
struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
drivers/scsi/lpfc/lpfc_sli.c
15563
lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
drivers/scsi/lpfc/lpfc_sli.c
15565
struct lpfc_queue *cq = container_of(to_delayed_work(work),
drivers/scsi/megaraid/megaraid_sas_base.c
2347
process_fw_state_change_wq(struct work_struct *work);
drivers/scsi/megaraid/megaraid_sas_base.c
240
void megasas_fusion_ocr_wq(struct work_struct *work);
drivers/scsi/megaraid/megaraid_sas_base.c
3217
static void megasas_aen_polling(struct work_struct *work);
drivers/scsi/megaraid/megaraid_sas_base.c
3946
process_fw_state_change_wq(struct work_struct *work)
drivers/scsi/megaraid/megaraid_sas_base.c
3949
container_of(work, struct megasas_instance, work_init);
drivers/scsi/megaraid/megaraid_sas_base.c
8876
megasas_aen_polling(struct work_struct *work)
drivers/scsi/megaraid/megaraid_sas_base.c
8879
container_of(work, struct megasas_aen_event, hotplug_work.work);
drivers/scsi/megaraid/megaraid_sas_fusion.c
1939
megasas_fault_detect_work(struct work_struct *work)
drivers/scsi/megaraid/megaraid_sas_fusion.c
1942
container_of(work, struct megasas_instance,
drivers/scsi/megaraid/megaraid_sas_fusion.c
1943
fw_fault_work.work);
drivers/scsi/megaraid/megaraid_sas_fusion.c
5285
void megasas_fusion_ocr_wq(struct work_struct *work)
drivers/scsi/megaraid/megaraid_sas_fusion.c
5288
container_of(work, struct megasas_instance, work_init);
drivers/scsi/megaraid/megaraid_sas_fusion.h
1401
void megasas_fusion_ocr_wq(struct work_struct *work);
drivers/scsi/mpi3mr/mpi3mr.h
1418
struct work_struct work;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2862
static void mpi3mr_watchdog_work(struct work_struct *work)
drivers/scsi/mpi3mr/mpi3mr_fw.c
2865
container_of(work, struct mpi3mr_ioc, watchdog_work.work);
drivers/scsi/mpi3mr/mpi3mr_os.c
166
static void mpi3mr_fwevt_worker(struct work_struct *work);
drivers/scsi/mpi3mr/mpi3mr_os.c
2276
static void mpi3mr_fwevt_worker(struct work_struct *work)
drivers/scsi/mpi3mr/mpi3mr_os.c
2278
struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
drivers/scsi/mpi3mr/mpi3mr_os.c
2279
work);
drivers/scsi/mpi3mr/mpi3mr_os.c
244
INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
drivers/scsi/mpi3mr/mpi3mr_os.c
247
queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
drivers/scsi/mpi3mr/mpi3mr_os.c
356
if (cancel_work_sync(&fwevt->work)) {
drivers/scsi/mpi3mr/mpi3mr_os.c
400
if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
drivers/scsi/mpt3sas/mpt3sas_base.c
695
_base_fault_reset_work(struct work_struct *work)
drivers/scsi/mpt3sas/mpt3sas_base.c
698
container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
12052
_firmware_event_work(struct work_struct *work)
drivers/scsi/mpt3sas/mpt3sas_scsih.c
12054
struct fw_event_work *fw_event = container_of(work,
drivers/scsi/mpt3sas/mpt3sas_scsih.c
12055
struct fw_event_work, work);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
12061
_firmware_event_work_delayed(struct work_struct *work)
drivers/scsi/mpt3sas/mpt3sas_scsih.c
12063
struct fw_event_work *fw_event = container_of(work,
drivers/scsi/mpt3sas/mpt3sas_scsih.c
12064
struct fw_event_work, delayed_work.work);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
274
struct work_struct work;
drivers/scsi/mpt3sas/mpt3sas_scsih.c
3635
INIT_WORK(&fw_event->work, _firmware_event_work);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
3637
queue_work(ioc->firmware_event_thread, &fw_event->work);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
3845
if (cancel_work_sync(&fw_event->work))
drivers/scsi/mpt3sas/mpt3sas_scsih.c
69
static void _firmware_event_work(struct work_struct *work);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
92
static void _firmware_event_work_delayed(struct work_struct *work);
drivers/scsi/mvsas/mv_sas.c
1694
static void mvs_work_queue(struct work_struct *work)
drivers/scsi/mvsas/mv_sas.c
1696
struct delayed_work *dw = container_of(work, struct delayed_work, work);
drivers/scsi/mvumi.c
1730
static void mvumi_scan_events(struct work_struct *work)
drivers/scsi/mvumi.c
1733
container_of(work, struct mvumi_events_wq, work_q);
drivers/scsi/myrb.c
2381
static void myrb_monitor(struct work_struct *work)
drivers/scsi/myrb.c
2383
struct myrb_hba *cb = container_of(work,
drivers/scsi/myrb.c
2384
struct myrb_hba, monitor_work.work);
drivers/scsi/myrb.c
29
static void myrb_monitor(struct work_struct *work);
drivers/scsi/myrs.c
2103
static void myrs_monitor(struct work_struct *work)
drivers/scsi/myrs.c
2105
struct myrs_hba *cs = container_of(work, struct myrs_hba,
drivers/scsi/myrs.c
2106
monitor_work.work);
drivers/scsi/pm8001/pm8001_hwi.c
1446
void pm8001_work_fn(struct work_struct *work)
drivers/scsi/pm8001/pm8001_hwi.c
1448
struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
drivers/scsi/pm8001/pm8001_hwi.c
1694
INIT_WORK(&pw->work, pm8001_work_fn);
drivers/scsi/pm8001/pm8001_hwi.c
1695
queue_work(pm8001_wq, &pw->work);
drivers/scsi/pm8001/pm8001_sas.h
557
struct work_struct work;
drivers/scsi/pm8001/pm8001_sas.h
693
void pm8001_work_fn(struct work_struct *work);
drivers/scsi/ppa.c
641
static void ppa_interrupt(struct work_struct *work)
drivers/scsi/ppa.c
643
ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
drivers/scsi/qedf/qedf.h
241
struct work_struct work;
drivers/scsi/qedf/qedf.h
258
struct work_struct work;
drivers/scsi/qedf/qedf.h
548
extern void qedf_fp_io_handler(struct work_struct *work);
drivers/scsi/qedf/qedf.h
550
extern void qedf_wq_grcdump(struct work_struct *work);
drivers/scsi/qedf/qedf.h
551
void qedf_stag_change_work(struct work_struct *work);
drivers/scsi/qedf/qedf_io.c
162
static void qedf_handle_rrq(struct work_struct *work)
drivers/scsi/qedf/qedf_io.c
165
container_of(work, struct qedf_ioreq, rrq_work.work);
drivers/scsi/qedf/qedf_io.c
18
static void qedf_cmd_timeout(struct work_struct *work)
drivers/scsi/qedf/qedf_io.c
22
container_of(work, struct qedf_ioreq, timeout_work.work);
drivers/scsi/qedf/qedf_io.c
2566
INIT_WORK(&io_work->work, qedf_fp_io_handler);
drivers/scsi/qedf/qedf_io.c
2574
queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
drivers/scsi/qedf/qedf_main.c
168
static void qedf_handle_link_update(struct work_struct *work)
drivers/scsi/qedf/qedf_main.c
171
container_of(work, struct qedf_ctx, link_update.work);
drivers/scsi/qedf/qedf_main.c
2307
INIT_WORK(&io_work->work, qedf_fp_io_handler);
drivers/scsi/qedf/qedf_main.c
2315
queue_work_on(cpu, qedf_io_wq, &io_work->work);
drivers/scsi/qedf/qedf_main.c
2620
static void qedf_ll2_process_skb(struct work_struct *work)
drivers/scsi/qedf/qedf_main.c
2623
container_of(work, struct qedf_skb_work, work);
drivers/scsi/qedf/qedf_main.c
2685
INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
drivers/scsi/qedf/qedf_main.c
2688
queue_work(qedf->ll2_recv_wq, &skb_work->work);
drivers/scsi/qedf/qedf_main.c
2699
void qedf_fp_io_handler(struct work_struct *work)
drivers/scsi/qedf/qedf_main.c
2702
container_of(work, struct qedf_io_work, work);
drivers/scsi/qedf/qedf_main.c
33
static void qedf_recovery_handler(struct work_struct *work);
drivers/scsi/qedf/qedf_main.c
373
static void qedf_link_recovery(struct work_struct *work)
drivers/scsi/qedf/qedf_main.c
376
container_of(work, struct qedf_ctx, link_recovery.work);
drivers/scsi/qedf/qedf_main.c
3875
void qedf_wq_grcdump(struct work_struct *work)
drivers/scsi/qedf/qedf_main.c
3878
container_of(work, struct qedf_ctx, grcdump_work.work);
drivers/scsi/qedf/qedf_main.c
4020
void qedf_stag_change_work(struct work_struct *work)
drivers/scsi/qedf/qedf_main.c
4023
container_of(work, struct qedf_ctx, stag_work.work);
drivers/scsi/qedf/qedf_main.c
4076
static void qedf_recovery_handler(struct work_struct *work)
drivers/scsi/qedf/qedf_main.c
4079
container_of(work, struct qedf_ctx, recovery_work.work);
drivers/scsi/qedi/qedi_fw.c
1312
static void qedi_abort_work(struct work_struct *work)
drivers/scsi/qedi/qedi_fw.c
1315
container_of(work, struct qedi_cmd, tmf_work);
drivers/scsi/qedi/qedi_fw.c
148
static void qedi_tmf_resp_work(struct work_struct *work)
drivers/scsi/qedi/qedi_fw.c
151
container_of(work, struct qedi_cmd, tmf_work);
drivers/scsi/qedi/qedi_fw.c
733
struct qedi_work_map *work, *work_tmp;
drivers/scsi/qedi/qedi_fw.c
753
list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
drivers/scsi/qedi/qedi_fw.c
755
if (work->rtid == proto_itt) {
drivers/scsi/qedi/qedi_fw.c
757
qedi_cmd = work->qedi_cmd;
drivers/scsi/qedi/qedi_fw.c
766
task = work->ctask;
drivers/scsi/qedi/qedi_fw.c
769
list_del_init(&work->list);
drivers/scsi/qedi/qedi_fw.c
770
kfree(work);
drivers/scsi/qedi/qedi_fw.c
821
void qedi_fp_process_cqes(struct qedi_work *work)
drivers/scsi/qedi/qedi_fw.c
823
struct qedi_ctx *qedi = work->qedi;
drivers/scsi/qedi/qedi_fw.c
824
union iscsi_cqe *cqe = &work->cqe;
drivers/scsi/qedi/qedi_fw.c
833
u16 que_idx = work->que_idx;
drivers/scsi/qedi/qedi_fw.c
871
qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
drivers/scsi/qedi/qedi_gbl.h
60
void qedi_fp_process_cqes(struct qedi_work *work);
drivers/scsi/qedi/qedi_iscsi.c
863
static void qedi_offload_work(struct work_struct *work)
drivers/scsi/qedi/qedi_iscsi.c
866
container_of(work, struct qedi_endpoint, offload_work);
drivers/scsi/qedi/qedi_main.c
1919
struct qedi_work *work, *tmp;
drivers/scsi/qedi/qedi_main.c
1931
list_for_each_entry_safe(work, tmp, &work_list, list) {
drivers/scsi/qedi/qedi_main.c
1932
list_del_init(&work->list);
drivers/scsi/qedi/qedi_main.c
1933
qedi_fp_process_cqes(work);
drivers/scsi/qedi/qedi_main.c
1934
if (!work->is_solicited)
drivers/scsi/qedi/qedi_main.c
1935
kfree(work);
drivers/scsi/qedi/qedi_main.c
1967
struct qedi_work *work, *tmp;
drivers/scsi/qedi/qedi_main.c
1975
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
drivers/scsi/qedi/qedi_main.c
1976
list_del_init(&work->list);
drivers/scsi/qedi/qedi_main.c
1977
qedi_fp_process_cqes(work);
drivers/scsi/qedi/qedi_main.c
1978
if (!work->is_solicited)
drivers/scsi/qedi/qedi_main.c
1979
kfree(work);
drivers/scsi/qedi/qedi_main.c
2479
static void qedi_board_disable_work(struct work_struct *work)
drivers/scsi/qedi/qedi_main.c
2482
container_of(work, struct qedi_ctx,
drivers/scsi/qedi/qedi_main.c
2483
board_disable_work.work);
drivers/scsi/qedi/qedi_main.c
2833
static void qedi_recovery_handler(struct work_struct *work)
drivers/scsi/qedi/qedi_main.c
2836
container_of(work, struct qedi_ctx, recovery_work.work);
drivers/scsi/qedi/qedi_main.c
670
struct skb_work_list *work;
drivers/scsi/qedi/qedi_main.c
69
static void qedi_recovery_handler(struct work_struct *work);
drivers/scsi/qedi/qedi_main.c
708
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/scsi/qedi/qedi_main.c
709
if (!work) {
drivers/scsi/qedi/qedi_main.c
716
INIT_LIST_HEAD(&work->list);
drivers/scsi/qedi/qedi_main.c
717
work->skb = skb;
drivers/scsi/qedi/qedi_main.c
720
work->vlan_id = skb_vlan_tag_get(skb);
drivers/scsi/qedi/qedi_main.c
722
if (work->vlan_id)
drivers/scsi/qedi/qedi_main.c
723
__vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
drivers/scsi/qedi/qedi_main.c
726
list_add_tail(&work->list, &qedi->ll2_skb_list);
drivers/scsi/qedi/qedi_main.c
792
struct skb_work_list *work, *work_tmp;
drivers/scsi/qedi/qedi_main.c
795
list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
drivers/scsi/qedi/qedi_main.c
796
list_del(&work->list);
drivers/scsi/qedi/qedi_main.c
797
kfree_skb(work->skb);
drivers/scsi/qedi/qedi_main.c
798
kfree(work);
drivers/scsi/qedi/qedi_main.c
806
struct skb_work_list *work, *work_tmp;
drivers/scsi/qedi/qedi_main.c
812
list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
drivers/scsi/qedi/qedi_main.c
814
list_del(&work->list);
drivers/scsi/qedi/qedi_main.c
815
qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
drivers/scsi/qedi/qedi_main.c
816
kfree_skb(work->skb);
drivers/scsi/qedi/qedi_main.c
817
kfree(work);
drivers/scsi/qla2xxx/qla_bsg.c
16
static void qla2xxx_free_fcport_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_bsg.c
18
struct fc_port *fcport = container_of(work, typeof(*fcport),
drivers/scsi/qla2xxx/qla_gs.c
3744
void qla_scan_work_fn(struct work_struct *work)
drivers/scsi/qla2xxx/qla_gs.c
3746
struct fab_scan *s = container_of(to_delayed_work(work),
drivers/scsi/qla2xxx/qla_init.c
6288
void qla_register_fcport_fn(struct work_struct *work)
drivers/scsi/qla2xxx/qla_init.c
6290
fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
drivers/scsi/qla2xxx/qla_mid.c
813
static void qla_do_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_mid.c
816
struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
drivers/scsi/qla2xxx/qla_nvme.c
218
static void qla_nvme_ls_complete(struct work_struct *work)
drivers/scsi/qla2xxx/qla_nvme.c
221
container_of(work, struct nvme_private, ls_work);
drivers/scsi/qla2xxx/qla_nvme.c
267
static void qla_nvme_lsrsp_complete(struct work_struct *work)
drivers/scsi/qla2xxx/qla_nvme.c
270
container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
drivers/scsi/qla2xxx/qla_nvme.c
301
static void qla_nvme_abort_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_nvme.c
304
container_of(work, struct nvme_private, abort_work);
drivers/scsi/qla2xxx/qla_os.c
2847
static void qla_heartbeat_work_fn(struct work_struct *work)
drivers/scsi/qla2xxx/qla_os.c
2849
struct qla_hw_data *ha = container_of(work,
drivers/scsi/qla2xxx/qla_os.c
2857
static void qla2x00_iocb_work_fn(struct work_struct *work)
drivers/scsi/qla2xxx/qla_os.c
2859
struct scsi_qla_host *vha = container_of(work,
drivers/scsi/qla2xxx/qla_os.c
5487
LIST_HEAD(work);
drivers/scsi/qla2xxx/qla_os.c
5491
list_splice_init(&vha->work_list, &work);
drivers/scsi/qla2xxx/qla_os.c
5494
list_for_each_entry_safe(e, tmp, &work, list) {
drivers/scsi/qla2xxx/qla_os.c
5580
list_splice(&work, &vha->work_list);
drivers/scsi/qla2xxx/qla_os.c
5723
qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_os.c
5726
container_of(work, struct qla_hw_data, nic_core_unrecoverable);
drivers/scsi/qla2xxx/qla_os.c
5745
qla83xx_idc_state_handler_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_os.c
5748
container_of(work, struct qla_hw_data, idc_state_handler);
drivers/scsi/qla2xxx/qla_os.c
5791
qla83xx_nic_core_reset_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_os.c
5794
container_of(work, struct qla_hw_data, nic_core_reset);
drivers/scsi/qla2xxx/qla_os.c
5830
qla83xx_service_idc_aen(struct work_struct *work)
drivers/scsi/qla2xxx/qla_os.c
5833
container_of(work, struct qla_hw_data, idc_aen);
drivers/scsi/qla2xxx/qla_os.c
6799
qla2x00_disable_board_on_pci_error(struct work_struct *work)
drivers/scsi/qla2xxx/qla_os.c
6801
struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
drivers/scsi/qla2xxx/qla_target.c
2014
static void qlt_do_tmr_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
2017
container_of(work, struct qla_tgt_mgmt_cmd, work);
drivers/scsi/qla2xxx/qla_target.c
2094
INIT_WORK(&mcmd->work, qlt_do_tmr_work);
drivers/scsi/qla2xxx/qla_target.c
2095
queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
drivers/scsi/qla2xxx/qla_target.c
301
void qlt_unknown_atio_work_fn(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
303
struct scsi_qla_host *vha = container_of(to_delayed_work(work),
drivers/scsi/qla2xxx/qla_target.c
4475
static void qlt_do_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
4477
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
drivers/scsi/qla2xxx/qla_target.c
4734
INIT_WORK(&cmd->work, qlt_do_work);
drivers/scsi/qla2xxx/qla_target.c
4736
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
drivers/scsi/qla2xxx/qla_target.c
4739
queue_work(qla_tgt_wq, &cmd->work);
drivers/scsi/qla2xxx/qla_target.c
4742
&cmd->work);
drivers/scsi/qla2xxx/qla_target.c
4744
queue_work(qla_tgt_wq, &cmd->work);
drivers/scsi/qla2xxx/qla_target.c
4804
INIT_WORK(&mcmd->work, qlt_do_tmr_work);
drivers/scsi/qla2xxx/qla_target.c
4806
&mcmd->work);
drivers/scsi/qla2xxx/qla_target.c
6073
static void qlt_handle_srr_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
6075
struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
drivers/scsi/qla2xxx/qla_target.c
675
void qla24xx_delete_sess_fn(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
677
fc_port_t *fcport = container_of(work, struct fc_port, del_work);
drivers/scsi/qla2xxx/qla_target.c
7402
static void qlt_sess_work_fn(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
7404
struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
drivers/scsi/qla2xxx/qla_target.c
8252
qlt_handle_abts_recv_work(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
8254
struct qla_tgt_sess_op *op = container_of(work,
drivers/scsi/qla2xxx/qla_target.c
8255
struct qla_tgt_sess_op, work);
drivers/scsi/qla2xxx/qla_target.c
8295
INIT_WORK(&op->work, qlt_handle_abts_recv_work);
drivers/scsi/qla2xxx/qla_target.c
8296
queue_work(qla_tgt_wq, &op->work);
drivers/scsi/qla2xxx/qla_target.c
939
void qlt_free_session_done(struct work_struct *work)
drivers/scsi/qla2xxx/qla_target.c
941
struct fc_port *sess = container_of(work, struct fc_port,
drivers/scsi/qla2xxx/qla_target.h
1004
struct work_struct work;
drivers/scsi/qla2xxx/qla_target.h
842
struct work_struct work;
drivers/scsi/qla2xxx/qla_target.h
888
struct work_struct work;
drivers/scsi/qla2xxx/tcm_qla2xxx.c
235
static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
drivers/scsi/qla2xxx/tcm_qla2xxx.c
237
struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
drivers/scsi/qla2xxx/tcm_qla2xxx.c
256
static void tcm_qla2xxx_complete_free(struct work_struct *work)
drivers/scsi/qla2xxx/tcm_qla2xxx.c
258
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
drivers/scsi/qla2xxx/tcm_qla2xxx.c
325
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
drivers/scsi/qla2xxx/tcm_qla2xxx.c
326
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
drivers/scsi/qla2xxx/tcm_qla2xxx.c
506
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
drivers/scsi/qla2xxx/tcm_qla2xxx.c
508
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
drivers/scsi/qla2xxx/tcm_qla2xxx.c
565
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
drivers/scsi/qla2xxx/tcm_qla2xxx.c
566
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
drivers/scsi/qla4xxx/ql4_os.c
5273
LIST_HEAD(work);
drivers/scsi/qla4xxx/ql4_os.c
5276
list_splice_init(&ha->work_list, &work);
drivers/scsi/qla4xxx/ql4_os.c
5279
list_for_each_entry_safe(e, tmp, &work, list) {
drivers/scsi/qla4xxx/ql4_os.c
5317
static void qla4xxx_do_dpc(struct work_struct *work)
drivers/scsi/qla4xxx/ql4_os.c
5320
container_of(work, struct scsi_qla_host, dpc_work);
drivers/scsi/scsi_debug.c
6425
static void sdebug_q_cmd_wq_complete(struct work_struct *work)
drivers/scsi/scsi_debug.c
6427
struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
drivers/scsi/scsi_debug.c
6428
ew.work);
drivers/scsi/scsi_debug.c
6739
if (cancel_work(&sd_dp->ew.work))
drivers/scsi/scsi_debug.c
7318
schedule_work(&sd_dp->ew.work);
drivers/scsi/scsi_debug.c
7736
char work[20];
drivers/scsi/scsi_debug.c
7738
if (sscanf(buf, "%10s", work) == 1) {
drivers/scsi/scsi_debug.c
7739
if (strncasecmp(work, "0x", 2) == 0) {
drivers/scsi/scsi_debug.c
7740
if (kstrtoint(work + 2, 16, &opts) == 0)
drivers/scsi/scsi_debug.c
7743
if (kstrtoint(work, 10, &opts) == 0)
drivers/scsi/scsi_debug.c
7918
char work[20];
drivers/scsi/scsi_debug.c
7920
if (sscanf(buf, "%10s", work) == 1) {
drivers/scsi/scsi_debug.c
7921
if (strncasecmp(work, "0x", 2) == 0) {
drivers/scsi/scsi_debug.c
7922
if (kstrtoint(work + 2, 16, &nth) == 0)
drivers/scsi/scsi_debug.c
7925
if (kstrtoint(work, 10, &nth) == 0)
drivers/scsi/scsi_debug.c
9511
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
drivers/scsi/scsi_error.c
147
scmd_eh_abort_handler(struct work_struct *work)
drivers/scsi/scsi_error.c
150
container_of(work, struct scsi_cmnd, abort_work.work);
drivers/scsi/scsi_lib.c
2683
void scsi_evt_thread(struct work_struct *work)
drivers/scsi/scsi_lib.c
2689
sdev = container_of(work, struct scsi_device, event_work);
drivers/scsi/scsi_lib.c
565
void scsi_requeue_run_queue(struct work_struct *work)
drivers/scsi/scsi_lib.c
570
sdev = container_of(work, struct scsi_device, requeue_work);
drivers/scsi/scsi_priv.h
109
extern void scsi_requeue_run_queue(struct work_struct *work);
drivers/scsi/scsi_priv.h
114
extern void scsi_evt_thread(struct work_struct *work);
drivers/scsi/scsi_priv.h
89
extern void scmd_eh_abort_handler(struct work_struct *work);
drivers/scsi/scsi_transport_fc.c
2818
fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
drivers/scsi/scsi_transport_fc.c
2829
return queue_work(fc_host_work_q(shost), work);
drivers/scsi/scsi_transport_fc.c
2862
struct delayed_work *work, unsigned long delay)
drivers/scsi/scsi_transport_fc.c
2873
return queue_delayed_work(rport->devloss_work_q, work, delay);
drivers/scsi/scsi_transport_fc.c
2979
fc_starget_delete(struct work_struct *work)
drivers/scsi/scsi_transport_fc.c
2982
container_of(work, struct fc_rport, stgt_delete_work);
drivers/scsi/scsi_transport_fc.c
2994
fc_rport_final_delete(struct work_struct *work)
drivers/scsi/scsi_transport_fc.c
2997
container_of(work, struct fc_rport, rport_delete_work);
drivers/scsi/scsi_transport_fc.c
30
static void fc_vport_sched_delete(struct work_struct *work);
drivers/scsi/scsi_transport_fc.c
312
static void fc_timeout_deleted_rport(struct work_struct *work);
drivers/scsi/scsi_transport_fc.c
313
static void fc_timeout_fail_rport_io(struct work_struct *work);
drivers/scsi/scsi_transport_fc.c
314
static void fc_scsi_scan_rport(struct work_struct *work);
drivers/scsi/scsi_transport_fc.c
3603
fc_timeout_deleted_rport(struct work_struct *work)
drivers/scsi/scsi_transport_fc.c
3606
container_of(work, struct fc_rport, dev_loss_work.work);
drivers/scsi/scsi_transport_fc.c
3741
fc_timeout_fail_rport_io(struct work_struct *work)
drivers/scsi/scsi_transport_fc.c
3744
container_of(work, struct fc_rport, fail_io_work.work);
drivers/scsi/scsi_transport_fc.c
3758
fc_scsi_scan_rport(struct work_struct *work)
drivers/scsi/scsi_transport_fc.c
3761
container_of(work, struct fc_rport, scan_work);
drivers/scsi/scsi_transport_fc.c
4074
fc_vport_sched_delete(struct work_struct *work)
drivers/scsi/scsi_transport_fc.c
4077
container_of(work, struct fc_vport, vport_delete_work);
drivers/scsi/scsi_transport_iscsi.c
1826
static void iscsi_scan_session(struct work_struct *work)
drivers/scsi/scsi_transport_iscsi.c
1829
container_of(work, struct iscsi_cls_session, scan_work);
drivers/scsi/scsi_transport_iscsi.c
1871
static void session_recovery_timedout(struct work_struct *work)
drivers/scsi/scsi_transport_iscsi.c
1874
container_of(work, struct iscsi_cls_session,
drivers/scsi/scsi_transport_iscsi.c
1875
recovery_work.work);
drivers/scsi/scsi_transport_iscsi.c
1903
static void __iscsi_unblock_session(struct work_struct *work)
drivers/scsi/scsi_transport_iscsi.c
1906
container_of(work, struct iscsi_cls_session,
drivers/scsi/scsi_transport_iscsi.c
1942
static void __iscsi_block_session(struct work_struct *work)
drivers/scsi/scsi_transport_iscsi.c
1945
container_of(work, struct iscsi_cls_session,
drivers/scsi/scsi_transport_iscsi.c
1968
static void __iscsi_unbind_session(struct work_struct *work)
drivers/scsi/scsi_transport_iscsi.c
1971
container_of(work, struct iscsi_cls_session,
drivers/scsi/scsi_transport_iscsi.c
2014
static void __iscsi_destroy_session(struct work_struct *work)
drivers/scsi/scsi_transport_iscsi.c
2017
container_of(work, struct iscsi_cls_session, destroy_work);
drivers/scsi/scsi_transport_iscsi.c
2314
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
drivers/scsi/scsi_transport_iscsi.c
2316
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
drivers/scsi/scsi_transport_spi.c
1068
struct work_struct work;
drivers/scsi/scsi_transport_spi.c
1073
spi_dv_device_work_wrapper(struct work_struct *work)
drivers/scsi/scsi_transport_spi.c
1076
container_of(work, struct work_queue_wrapper, work);
drivers/scsi/scsi_transport_spi.c
1116
INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
drivers/scsi/scsi_transport_spi.c
1119
schedule_work(&wqw->work);
drivers/scsi/scsi_transport_srp.c
378
static void srp_reconnect_work(struct work_struct *work)
drivers/scsi/scsi_transport_srp.c
380
struct srp_rport *rport = container_of(to_delayed_work(work),
drivers/scsi/scsi_transport_srp.c
424
static void rport_fast_io_fail_timedout(struct work_struct *work)
drivers/scsi/scsi_transport_srp.c
426
struct srp_rport *rport = container_of(to_delayed_work(work),
drivers/scsi/scsi_transport_srp.c
443
static void rport_dev_loss_timedout(struct work_struct *work)
drivers/scsi/scsi_transport_srp.c
445
struct srp_rport *rport = container_of(to_delayed_work(work),
drivers/scsi/sg.c
1298
sg_rq_end_io_usercontext(struct work_struct *work)
drivers/scsi/sg.c
1300
struct sg_request *srp = container_of(work, struct sg_request, ew.work);
drivers/scsi/sg.c
1405
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
drivers/scsi/sg.c
1406
schedule_work(&srp->ew.work);
drivers/scsi/sg.c
2202
sg_remove_sfp_usercontext(struct work_struct *work)
drivers/scsi/sg.c
2204
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
drivers/scsi/sg.c
2256
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
drivers/scsi/sg.c
2257
schedule_work(&sfp->ew.work);
drivers/scsi/smartpqi/smartpqi_init.c
103
static void pqi_tmf_worker(struct work_struct *work);
drivers/scsi/smartpqi/smartpqi_init.c
1125
static void pqi_update_time_worker(struct work_struct *work)
drivers/scsi/smartpqi/smartpqi_init.c
1130
ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
drivers/scsi/smartpqi/smartpqi_init.c
1892
static void pqi_rescan_worker(struct work_struct *work)
drivers/scsi/smartpqi/smartpqi_init.c
1896
ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
drivers/scsi/smartpqi/smartpqi_init.c
3692
static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
drivers/scsi/smartpqi/smartpqi_init.c
3696
ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
drivers/scsi/smartpqi/smartpqi_init.c
3703
static void pqi_ofa_quiesce_worker(struct work_struct *work)
drivers/scsi/smartpqi/smartpqi_init.c
3708
ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
drivers/scsi/smartpqi/smartpqi_init.c
3782
static void pqi_event_worker(struct work_struct *work)
drivers/scsi/smartpqi/smartpqi_init.c
3790
ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
drivers/scsi/smartpqi/smartpqi_init.c
6487
static void pqi_tmf_worker(struct work_struct *work)
drivers/scsi/smartpqi/smartpqi_init.c
6492
tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
drivers/scsi/smartpqi/smartpqi_init.c
73
static void pqi_ctrl_offline_worker(struct work_struct *work);
drivers/scsi/smartpqi/smartpqi_init.c
9191
static void pqi_ctrl_offline_worker(struct work_struct *work)
drivers/scsi/smartpqi/smartpqi_init.c
9195
ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
drivers/scsi/snic/snic.h
244
struct work_struct work;
drivers/scsi/snic/snic_ctl.c
24
snic_handle_link(struct work_struct *work)
drivers/scsi/snic/snic_ctl.c
26
struct snic *snic = container_of(work, struct snic, link_work);
drivers/scsi/snic/snic_disc.c
150
snic_scsi_scan_tgt(struct work_struct *work)
drivers/scsi/snic/snic_disc.c
152
struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
drivers/scsi/snic/snic_disc.c
208
snic_tgt_del(struct work_struct *work)
drivers/scsi/snic/snic_disc.c
210
struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
drivers/scsi/snic/snic_disc.c
325
snic_handle_tgt_disc(struct work_struct *work)
drivers/scsi/snic/snic_disc.c
327
struct snic *snic = container_of(work, struct snic, tgt_work);
drivers/scsi/snic/snic_disc.c
509
snic_handle_disc(struct work_struct *work)
drivers/scsi/snic/snic_disc.c
511
struct snic *snic = container_of(work, struct snic, disc_work);
drivers/scsi/stex.c
1452
static void stex_reset_work(struct work_struct *work)
drivers/scsi/stex.c
1454
struct st_hba *hba = container_of(work, struct st_hba, reset_work);
drivers/scsi/storvsc_drv.c
1075
INIT_WORK(&wrk->work, process_err_fn);
drivers/scsi/storvsc_drv.c
1076
queue_work(host_dev->handle_error_wq, &wrk->work);
drivers/scsi/storvsc_drv.c
471
struct work_struct work;
drivers/scsi/storvsc_drv.c
477
static void storvsc_device_scan(struct work_struct *work)
drivers/scsi/storvsc_drv.c
482
wrk = container_of(work, struct storvsc_scan_work, work);
drivers/scsi/storvsc_drv.c
494
static void storvsc_host_scan(struct work_struct *work)
drivers/scsi/storvsc_drv.c
499
container_of(work, struct hv_host_device, host_scan_work);
drivers/scsi/storvsc_drv.c
523
static void storvsc_remove_lun(struct work_struct *work)
drivers/scsi/storvsc_drv.c
528
wrk = container_of(work, struct storvsc_scan_work, work);
drivers/scsi/storvsc_drv.c
987
void (*process_err_fn)(struct work_struct *work);
drivers/scsi/virtio_scsi.c
236
static void virtscsi_handle_event(struct work_struct *work);
drivers/scsi/virtio_scsi.c
245
INIT_WORK(&event_node->work, virtscsi_handle_event);
drivers/scsi/virtio_scsi.c
283
cancel_work_sync(&vscsi->event_list[i].work);
drivers/scsi/virtio_scsi.c
385
static void virtscsi_handle_event(struct work_struct *work)
drivers/scsi/virtio_scsi.c
388
container_of(work, struct virtio_scsi_event_node, work);
drivers/scsi/virtio_scsi.c
424
queue_work(system_freezable_wq, &event_node->work);
drivers/scsi/virtio_scsi.c
66
struct work_struct work;
drivers/scsi/vmw_pvscsi.c
1119
adapter = container_of(data, struct pvscsi_adapter, work);
drivers/scsi/vmw_pvscsi.c
1146
INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
drivers/scsi/vmw_pvscsi.c
1187
queue_work(adapter->workqueue, &adapter->work);
drivers/scsi/vmw_pvscsi.c
76
struct work_struct work;
drivers/sh/maple/maple.c
34
static void maple_dma_handler(struct work_struct *work);
drivers/sh/maple/maple.c
35
static void maple_vblank_handler(struct work_struct *work);
drivers/sh/maple/maple.c
446
static void maple_vblank_handler(struct work_struct *work)
drivers/sh/maple/maple.c
631
static void maple_dma_handler(struct work_struct *work)
drivers/slimbus/qcom-ngd-ctrl.c
1248
static void qcom_slim_ngd_master_worker(struct work_struct *work)
drivers/slimbus/qcom-ngd-ctrl.c
1257
ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work);
drivers/slimbus/qcom-ngd-ctrl.c
1450
static void qcom_slim_ngd_up_worker(struct work_struct *work)
drivers/slimbus/qcom-ngd-ctrl.c
1454
ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work);
drivers/soc/apple/rtkit.c
528
static void apple_rtkit_rx_work(struct work_struct *work)
drivers/soc/apple/rtkit.c
531
container_of(work, struct apple_rtkit_rx_work, work);
drivers/soc/apple/rtkit.c
573
struct apple_rtkit_rx_work *work;
drivers/soc/apple/rtkit.c
593
work = kzalloc_obj(*work, GFP_ATOMIC);
drivers/soc/apple/rtkit.c
594
if (!work)
drivers/soc/apple/rtkit.c
597
work->rtk = rtk;
drivers/soc/apple/rtkit.c
598
work->ep = ep;
drivers/soc/apple/rtkit.c
599
work->msg = msg.msg0;
drivers/soc/apple/rtkit.c
600
INIT_WORK(&work->work, apple_rtkit_rx_work);
drivers/soc/apple/rtkit.c
601
queue_work(rtk->wq, &work->work);
drivers/soc/apple/rtkit.c
81
struct work_struct work;
drivers/soc/fsl/dpio/dpio-service.c
114
struct dim *dim = container_of(w, struct dim, work);
drivers/soc/fsl/dpio/dpio-service.c
189
INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
drivers/soc/fsl/qbman/qman.c
1163
static void qm_congestion_task(struct work_struct *work);
drivers/soc/fsl/qbman/qman.c
1164
static void qm_mr_process_task(struct work_struct *work);
drivers/soc/fsl/qbman/qman.c
1451
static void qm_congestion_task(struct work_struct *work)
drivers/soc/fsl/qbman/qman.c
1453
struct qman_portal *p = container_of(work, struct qman_portal,
drivers/soc/fsl/qbman/qman.c
1486
static void qm_mr_process_task(struct work_struct *work)
drivers/soc/fsl/qbman/qman.c
1488
struct qman_portal *p = container_of(work, struct qman_portal,
drivers/soc/qcom/apr.c
315
static void apr_rxwq(struct work_struct *work)
drivers/soc/qcom/apr.c
317
struct packet_router *apr = container_of(work, struct packet_router, rx_work);
drivers/soc/qcom/pdr_interface.c
159
static void pdr_notifier_work(struct work_struct *work)
drivers/soc/qcom/pdr_interface.c
161
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
drivers/soc/qcom/pdr_interface.c
268
static void pdr_indack_work(struct work_struct *work)
drivers/soc/qcom/pdr_interface.c
270
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
drivers/soc/qcom/pdr_interface.c
459
static void pdr_locator_work(struct work_struct *work)
drivers/soc/qcom/pdr_interface.c
461
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
drivers/soc/qcom/pmic_glink_altmode.c
111
struct work_struct work;
drivers/soc/qcom/pmic_glink_altmode.c
348
static void pmic_glink_altmode_worker(struct work_struct *work)
drivers/soc/qcom/pmic_glink_altmode.c
350
struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work);
drivers/soc/qcom/pmic_glink_altmode.c
453
schedule_work(&alt_port->work);
drivers/soc/qcom/pmic_glink_altmode.c
504
schedule_work(&alt_port->work);
drivers/soc/qcom/pmic_glink_altmode.c
545
static void pmic_glink_altmode_enable_worker(struct work_struct *work)
drivers/soc/qcom/pmic_glink_altmode.c
547
struct pmic_glink_altmode *altmode = work_to_altmode(work);
drivers/soc/qcom/pmic_glink_altmode.c
617
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
drivers/soc/qcom/pmic_glink_altmode.c
97
#define work_to_altmode_port(w) container_of((w), struct pmic_glink_altmode_port, work)
drivers/soc/qcom/pmic_pdcharger_ulog.c
67
static void pmic_pdcharger_ulog_work(struct work_struct *work)
drivers/soc/qcom/pmic_pdcharger_ulog.c
69
struct pmic_pdcharger_ulog *pg = container_of(work, struct pmic_pdcharger_ulog,
drivers/soc/qcom/pmic_pdcharger_ulog.c
70
ulog_work.work);
drivers/soc/qcom/qmi_interface.c
523
static void qmi_data_ready_work(struct work_struct *work)
drivers/soc/qcom/qmi_interface.c
525
struct qmi_handle *qmi = container_of(work, struct qmi_handle, work);
drivers/soc/qcom/qmi_interface.c
582
queue_work(qmi->wq, &qmi->work);
drivers/soc/qcom/qmi_interface.c
636
INIT_WORK(&qmi->work, qmi_data_ready_work);
drivers/soc/qcom/qmi_interface.c
693
cancel_work_sync(&qmi->work);
drivers/soc/qcom/wcnss_ctrl.c
291
static void wcnss_async_probe(struct work_struct *work)
drivers/soc/qcom/wcnss_ctrl.c
293
struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
drivers/soc/tegra/pmc.c
2079
static void tegra186_pmc_wake_handler(struct irq_work *work)
drivers/soc/tegra/pmc.c
2081
struct tegra_pmc *pmc = container_of(work, struct tegra_pmc, wake_work);
drivers/soc/xilinx/zynqmp_power.c
159
static void zynqmp_pm_subsystem_restart_work_fn(struct work_struct *work)
drivers/soc/xilinx/zynqmp_power.c
162
struct zynqmp_pm_work_struct *pm_work = container_of(work, struct zynqmp_pm_work_struct,
drivers/soc/xilinx/zynqmp_power.c
190
static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
drivers/soc/xilinx/zynqmp_power.c
193
container_of(work, struct zynqmp_pm_work_struct, callback_work);
drivers/soundwire/amd_manager.c
859
static void amd_sdw_update_slave_status_work(struct work_struct *work)
drivers/soundwire/amd_manager.c
862
container_of(work, struct amd_sdw_manager, amd_sdw_work);
drivers/soundwire/amd_manager.c
927
static void amd_sdw_irq_thread(struct work_struct *work)
drivers/soundwire/amd_manager.c
930
container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
drivers/soundwire/cadence_master.c
1002
container_of(work, struct sdw_cdns, attach_dwork.work);
drivers/soundwire/cadence_master.c
1030
static void cdns_update_slave_status_work(struct work_struct *work)
drivers/soundwire/cadence_master.c
1033
container_of(work, struct sdw_cdns, work);
drivers/soundwire/cadence_master.c
1259
cancel_work_sync(&cdns->work);
drivers/soundwire/cadence_master.c
1824
INIT_WORK(&cdns->work, cdns_update_slave_status_work);
drivers/soundwire/cadence_master.c
991
schedule_work(&cdns->work);
drivers/soundwire/cadence_master.c
999
static void cdns_check_attached_status_dwork(struct work_struct *work)
drivers/soundwire/cadence_master.h
153
struct work_struct work;
drivers/spi/spi-lantiq-ssc.c
175
struct work_struct work;
drivers/spi/spi-lantiq-ssc.c
659
queue_work(spi->wq, &spi->work);
drivers/spi/spi-lantiq-ssc.c
698
queue_work(spi->wq, &spi->work);
drivers/spi/spi-lantiq-ssc.c
759
static void lantiq_ssc_bussy_work(struct work_struct *work)
drivers/spi/spi-lantiq-ssc.c
765
spi = container_of(work, typeof(*spi), work);
drivers/spi/spi-lantiq-ssc.c
983
INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
drivers/spi/spi-mpc52xx.c
336
schedule_work(&ms->work);
drivers/spi/spi-mpc52xx.c
354
static void mpc52xx_spi_wq(struct work_struct *work)
drivers/spi/spi-mpc52xx.c
356
struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
drivers/spi/spi-mpc52xx.c
379
schedule_work(&ms->work);
drivers/spi/spi-mpc52xx.c
469
INIT_WORK(&ms->work, mpc52xx_spi_wq);
drivers/spi/spi-mpc52xx.c
520
cancel_work_sync(&ms->work);
drivers/spi/spi-mpc52xx.c
82
struct work_struct work;
drivers/spi/spi-topcliff-pch.c
1109
data = container_of(pwork, struct pch_spi_data, work);
drivers/spi/spi-topcliff-pch.c
1242
flush_work(&data->work);
drivers/spi/spi-topcliff-pch.c
1350
INIT_WORK(&data->work, pch_spi_process_messages);
drivers/spi/spi-topcliff-pch.c
165
struct work_struct work;
drivers/spi/spi-topcliff-pch.c
488
schedule_work(&data->work);
drivers/spi/spi-topcliff-pch.c
642
schedule_work(&data->work);
drivers/spi/spi.c
1924
static void spi_pump_messages(struct kthread_work *work)
drivers/spi/spi.c
1927
container_of(work, struct spi_controller, pump_messages);
drivers/staging/greybus/bootrom.c
52
static void gb_bootrom_timedout(struct work_struct *work)
drivers/staging/greybus/bootrom.c
54
struct delayed_work *dwork = to_delayed_work(work);
drivers/staging/greybus/fw-download.c
128
static void fw_request_timedout(struct work_struct *work)
drivers/staging/greybus/fw-download.c
130
struct delayed_work *dwork = to_delayed_work(work);
drivers/staging/greybus/power_supply.c
32
struct delayed_work work;
drivers/staging/greybus/power_supply.c
734
static void gb_power_supply_work(struct work_struct *work)
drivers/staging/greybus/power_supply.c
736
struct gb_power_supply *gbpsy = container_of(work,
drivers/staging/greybus/power_supply.c
738
work.work);
drivers/staging/greybus/power_supply.c
749
schedule_delayed_work(&gbpsy->work, gbpsy->update_interval);
drivers/staging/greybus/power_supply.c
849
cancel_delayed_work_sync(&gbpsy->work);
drivers/staging/greybus/power_supply.c
923
INIT_DELAYED_WORK(&gbpsy->work, gb_power_supply_work);
drivers/staging/greybus/power_supply.c
924
schedule_delayed_work(&gbpsy->work, 0);
drivers/staging/greybus/sdio.c
496
static void gb_sdio_mrq_work(struct work_struct *work)
drivers/staging/greybus/sdio.c
502
host = container_of(work, struct gb_sdio_host, mrqwork);
drivers/staging/greybus/uart.c
225
static void gb_uart_tx_write_work(struct work_struct *work)
drivers/staging/greybus/uart.c
233
gb_tty = container_of(work, struct gb_tty, tx_work);
drivers/staging/greybus/uart.c
268
schedule_work(work);
drivers/staging/greybus/vibrator.c
67
static void gb_vibrator_worker(struct work_struct *work)
drivers/staging/greybus/vibrator.c
69
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/staging/iio/impedance-analyzer/ad5933.c
590
schedule_delayed_work(&st->work,
drivers/staging/iio/impedance-analyzer/ad5933.c
599
cancel_delayed_work_sync(&st->work);
drivers/staging/iio/impedance-analyzer/ad5933.c
609
static void ad5933_work(struct work_struct *work)
drivers/staging/iio/impedance-analyzer/ad5933.c
611
struct ad5933_state *st = container_of(work,
drivers/staging/iio/impedance-analyzer/ad5933.c
612
struct ad5933_state, work.work);
drivers/staging/iio/impedance-analyzer/ad5933.c
623
schedule_delayed_work(&st->work, st->poll_time_jiffies);
drivers/staging/iio/impedance-analyzer/ad5933.c
650
schedule_delayed_work(&st->work, st->poll_time_jiffies);
drivers/staging/iio/impedance-analyzer/ad5933.c
663
schedule_delayed_work(&st->work, st->poll_time_jiffies);
drivers/staging/iio/impedance-analyzer/ad5933.c
707
INIT_DELAYED_WORK(&st->work, ad5933_work);
drivers/staging/iio/impedance-analyzer/ad5933.c
88
struct delayed_work work;
drivers/staging/media/atomisp/pci/atomisp_cmd.c
864
void atomisp_assert_recovery_work(struct work_struct *work)
drivers/staging/media/atomisp/pci/atomisp_cmd.c
866
struct atomisp_device *isp = container_of(work, struct atomisp_device,
drivers/staging/media/atomisp/pci/atomisp_cmd.h
49
void atomisp_assert_recovery_work(struct work_struct *work);
drivers/staging/media/meson/vdec/esparser.c
372
void esparser_queue_all_src(struct work_struct *work)
drivers/staging/media/meson/vdec/esparser.c
376
container_of(work, struct amvdec_session, esparser_queue_work);
drivers/staging/media/meson/vdec/esparser.h
32
void esparser_queue_all_src(struct work_struct *work);
drivers/staging/media/sunxi/cedrus/cedrus_hw.c
170
void cedrus_watchdog(struct work_struct *work)
drivers/staging/media/sunxi/cedrus/cedrus_hw.c
175
dev = container_of(to_delayed_work(work),
drivers/staging/media/sunxi/cedrus/cedrus_hw.h
31
void cedrus_watchdog(struct work_struct *work);
drivers/staging/nvec/nvec.c
379
static void nvec_request_master(struct work_struct *work)
drivers/staging/nvec/nvec.c
381
struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
drivers/staging/nvec/nvec.c
443
static void nvec_dispatch(struct work_struct *work)
drivers/staging/nvec/nvec.c
445
struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
drivers/staging/nvec/nvec_power.c
348
static void nvec_power_poll(struct work_struct *work)
drivers/staging/nvec/nvec_power.c
351
struct nvec_power *power = container_of(work, struct nvec_power,
drivers/staging/nvec/nvec_power.c
352
poller.work);
drivers/staging/nvec/nvec_power.c
369
schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(5000));
drivers/staging/octeon/ethernet-rx.c
102
while (i < work->word1.len - 1) {
drivers/staging/octeon/ethernet-rx.c
111
work->packet_ptr.s.addr += i + 1;
drivers/staging/octeon/ethernet-rx.c
112
work->word1.len -= i + 5;
drivers/staging/octeon/ethernet-rx.c
118
work->packet_ptr.s.addr += i;
drivers/staging/octeon/ethernet-rx.c
119
work->word1.len -= i + 4;
drivers/staging/octeon/ethernet-rx.c
120
for (i = 0; i < work->word1.len; i++) {
drivers/staging/octeon/ethernet-rx.c
131
cvm_oct_free_work(work);
drivers/staging/octeon/ethernet-rx.c
137
port, work->word2.snoip.err_code);
drivers/staging/octeon/ethernet-rx.c
138
cvm_oct_free_work(work);
drivers/staging/octeon/ethernet-rx.c
142
static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
drivers/staging/octeon/ethernet-rx.c
144
int segments = work->word2.s.bufs;
drivers/staging/octeon/ethernet-rx.c
145
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
drivers/staging/octeon/ethernet-rx.c
146
int len = work->word1.len;
drivers/staging/octeon/ethernet-rx.c
222
struct cvmx_wqe *work;
drivers/staging/octeon/ethernet-rx.c
226
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
drivers/staging/octeon/ethernet-rx.c
228
work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
drivers/staging/octeon/ethernet-rx.c
230
prefetch(work);
drivers/staging/octeon/ethernet-rx.c
232
if (!work) {
drivers/staging/octeon/ethernet-rx.c
249
(cvm_oct_get_buffer_ptr(work->packet_ptr) -
drivers/staging/octeon/ethernet-rx.c
260
skb_in_hw = work->word2.s.bufs == 1;
drivers/staging/octeon/ethernet-rx.c
268
port = work->word0.pip.cn68xx.pknd;
drivers/staging/octeon/ethernet-rx.c
270
port = work->word1.cn38xx.ipprt;
drivers/staging/octeon/ethernet-rx.c
275
if (unlikely(work->word2.snoip.rcv_error)) {
drivers/staging/octeon/ethernet-rx.c
276
if (cvm_oct_check_rcv_error(work))
drivers/staging/octeon/ethernet-rx.c
286
skb->data = skb->head + work->packet_ptr.s.addr -
drivers/staging/octeon/ethernet-rx.c
289
skb->len = work->word1.len;
drivers/staging/octeon/ethernet-rx.c
297
skb = dev_alloc_skb(work->word1.len);
drivers/staging/octeon/ethernet-rx.c
299
cvm_oct_free_work(work);
drivers/staging/octeon/ethernet-rx.c
307
if (unlikely(work->word2.s.bufs == 0)) {
drivers/staging/octeon/ethernet-rx.c
308
u8 *ptr = work->packet_data;
drivers/staging/octeon/ethernet-rx.c
310
if (likely(!work->word2.s.not_IP)) {
drivers/staging/octeon/ethernet-rx.c
315
if (work->word2.s.is_v6)
drivers/staging/octeon/ethernet-rx.c
320
skb_put_data(skb, ptr, work->word1.len);
drivers/staging/octeon/ethernet-rx.c
323
copy_segments_to_skb(work, skb);
drivers/staging/octeon/ethernet-rx.c
339
if (unlikely(work->word2.s.not_IP ||
drivers/staging/octeon/ethernet-rx.c
340
work->word2.s.IP_exc ||
drivers/staging/octeon/ethernet-rx.c
341
work->word2.s.L4_error ||
drivers/staging/octeon/ethernet-rx.c
342
!work->word2.s.tcp_or_udp))
drivers/staging/octeon/ethernet-rx.c
383
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
drivers/staging/octeon/ethernet-rx.c
385
cvm_oct_free_work(work);
drivers/staging/octeon/ethernet-rx.c
63
static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
drivers/staging/octeon/ethernet-rx.c
68
port = work->word0.pip.cn68xx.pknd;
drivers/staging/octeon/ethernet-rx.c
70
port = work->word1.cn38xx.ipprt;
drivers/staging/octeon/ethernet-rx.c
72
if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
drivers/staging/octeon/ethernet-rx.c
81
if (work->word2.snoip.err_code == 5 ||
drivers/staging/octeon/ethernet-rx.c
82
work->word2.snoip.err_code == 7) {
drivers/staging/octeon/ethernet-rx.c
99
cvmx_phys_to_ptr(work->packet_ptr.s.addr);
drivers/staging/octeon/ethernet-tx.c
513
struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
drivers/staging/octeon/ethernet-tx.c
515
if (unlikely(!work)) {
drivers/staging/octeon/ethernet-tx.c
528
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
drivers/staging/octeon/ethernet-tx.c
558
work->word0.pip.cn38xx.hw_chksum = skb->csum;
drivers/staging/octeon/ethernet-tx.c
559
work->word1.len = skb->len;
drivers/staging/octeon/ethernet-tx.c
560
cvmx_wqe_set_port(work, priv->port);
drivers/staging/octeon/ethernet-tx.c
561
cvmx_wqe_set_qos(work, priv->port & 0x7);
drivers/staging/octeon/ethernet-tx.c
562
cvmx_wqe_set_grp(work, pow_send_group);
drivers/staging/octeon/ethernet-tx.c
563
work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
drivers/staging/octeon/ethernet-tx.c
564
work->word1.tag = pow_send_group; /* FIXME */
drivers/staging/octeon/ethernet-tx.c
566
work->word2.u64 = 0;
drivers/staging/octeon/ethernet-tx.c
567
work->word2.s.bufs = 1;
drivers/staging/octeon/ethernet-tx.c
568
work->packet_ptr.u64 = 0;
drivers/staging/octeon/ethernet-tx.c
569
work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
drivers/staging/octeon/ethernet-tx.c
570
work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
drivers/staging/octeon/ethernet-tx.c
571
work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
drivers/staging/octeon/ethernet-tx.c
572
work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
drivers/staging/octeon/ethernet-tx.c
575
work->word2.s.ip_offset = 14;
drivers/staging/octeon/ethernet-tx.c
576
work->word2.s.tcp_or_udp =
drivers/staging/octeon/ethernet-tx.c
579
work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
drivers/staging/octeon/ethernet-tx.c
582
work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
drivers/staging/octeon/ethernet-tx.c
583
work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
drivers/staging/octeon/ethernet-tx.c
590
memcpy(work->packet_data, skb->data + 10,
drivers/staging/octeon/ethernet-tx.c
591
sizeof(work->packet_data));
drivers/staging/octeon/ethernet-tx.c
593
work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
drivers/staging/octeon/ethernet-tx.c
594
work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
drivers/staging/octeon/ethernet-tx.c
595
work->word2.snoip.is_bcast =
drivers/staging/octeon/ethernet-tx.c
597
work->word2.snoip.is_mcast =
drivers/staging/octeon/ethernet-tx.c
599
work->word2.snoip.not_IP = 1; /* IP was done up above */
drivers/staging/octeon/ethernet-tx.c
600
memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
drivers/staging/octeon/ethernet-tx.c
604
cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
drivers/staging/octeon/ethernet-tx.c
605
cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
drivers/staging/octeon/ethernet.c
107
static void cvm_oct_rx_refill_worker(struct work_struct *work);
drivers/staging/octeon/ethernet.c
110
static void cvm_oct_rx_refill_worker(struct work_struct *work)
drivers/staging/octeon/ethernet.c
125
static void cvm_oct_periodic_worker(struct work_struct *work)
drivers/staging/octeon/ethernet.c
127
struct octeon_ethernet *priv = container_of(work,
drivers/staging/octeon/ethernet.c
129
port_periodic_work.work);
drivers/staging/octeon/ethernet.c
176
struct cvmx_wqe *work = work_queue_entry;
drivers/staging/octeon/ethernet.c
178
int segments = work->word2.s.bufs;
drivers/staging/octeon/ethernet.c
179
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
drivers/staging/octeon/ethernet.c
190
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
drivers/staging/octeon/octeon-stubs.h
1207
static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
drivers/staging/octeon/octeon-stubs.h
1396
static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
drivers/staging/octeon/octeon-stubs.h
1399
static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
drivers/staging/octeon/octeon-stubs.h
1402
static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
drivers/staging/octeon/octeon-stubs.h
1407
static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
drivers/staging/rtl8723bs/core/rtw_cmd.c
1681
static void c2h_wk_callback(struct work_struct *work)
drivers/staging/rtl8723bs/core/rtw_cmd.c
1683
struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk);
drivers/staging/rtl8723bs/core/rtw_cmd.c
197
static void c2h_wk_callback(struct work_struct *work);
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
612
static void cpwm_event_callback(struct work_struct *work)
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
614
struct pwrctrl_priv *pwrpriv = container_of(work, struct pwrctrl_priv, cpwm_event);
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
623
static void rpwmtimeout_workitem_callback(struct work_struct *work)
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
630
pwrpriv = container_of(work, struct pwrctrl_priv, rpwmtimeoutwi);
drivers/target/iscsi/cxgbit/cxgbit_main.c
534
struct work_struct work;
drivers/target/iscsi/cxgbit/cxgbit_main.c
587
static void cxgbit_dcb_workfn(struct work_struct *work)
drivers/target/iscsi/cxgbit/cxgbit_main.c
595
dcb_work = container_of(work, struct cxgbit_dcb_work, work);
drivers/target/iscsi/cxgbit/cxgbit_main.c
654
INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
drivers/target/iscsi/cxgbit/cxgbit_main.c
655
schedule_work(&dcb_work->work);
drivers/target/iscsi/iscsi_target_nego.c
538
static void iscsi_target_do_login_rx(struct work_struct *work)
drivers/target/iscsi/iscsi_target_nego.c
540
struct iscsit_conn *conn = container_of(work,
drivers/target/iscsi/iscsi_target_nego.c
541
struct iscsit_conn, login_work.work);
drivers/target/loopback/tcm_loop.c
279
flush_work(&se_cmd->work);
drivers/target/sbp/sbp_target.c
1029
INIT_WORK(&agent->work, tgt_agent_fetch_work);
drivers/target/sbp/sbp_target.c
1046
cancel_work_sync(&agent->work);
drivers/target/sbp/sbp_target.c
1449
static void sbp_mgt_agent_process(struct work_struct *work)
drivers/target/sbp/sbp_target.c
1452
container_of(work, struct sbp_management_agent, work);
drivers/target/sbp/sbp_target.c
1620
queue_work(system_dfl_wq, &agent->work);
drivers/target/sbp/sbp_target.c
1649
INIT_WORK(&agent->work, sbp_mgt_agent_process);
drivers/target/sbp/sbp_target.c
1666
cancel_work_sync(&agent->work);
drivers/target/sbp/sbp_target.c
638
static void session_maintenance_work(struct work_struct *work)
drivers/target/sbp/sbp_target.c
640
struct sbp_session *sess = container_of(work, struct sbp_session,
drivers/target/sbp/sbp_target.c
641
maint_work.work);
drivers/target/sbp/sbp_target.c
732
queue_work(system_dfl_wq, &agent->work);
drivers/target/sbp/sbp_target.c
766
queue_work(system_dfl_wq, &agent->work);
drivers/target/sbp/sbp_target.c
853
static void tgt_agent_process_work(struct work_struct *work)
drivers/target/sbp/sbp_target.c
856
container_of(work, struct sbp_target_request, work);
drivers/target/sbp/sbp_target.c
929
static void tgt_agent_fetch_work(struct work_struct *work)
drivers/target/sbp/sbp_target.c
932
container_of(work, struct sbp_target_agent, work);
drivers/target/sbp/sbp_target.c
991
INIT_WORK(&req->work, tgt_agent_process_work);
drivers/target/sbp/sbp_target.c
992
queue_work(system_dfl_wq, &req->work);
drivers/target/sbp/sbp_target.h
205
struct work_struct work;
drivers/target/sbp/sbp_target.h
215
struct work_struct work;
drivers/target/sbp/sbp_target.h
229
struct work_struct work;
drivers/target/target_core_device.c
742
INIT_WORK(&q->sq.work, target_queued_submit_work);
drivers/target/target_core_internal.h
153
void target_qf_do_work(struct work_struct *work);
drivers/target/target_core_internal.h
154
void target_do_delayed_work(struct work_struct *work);
drivers/target/target_core_internal.h
158
void target_queued_submit_work(struct work_struct *work);
drivers/target/target_core_tmr.c
118
flush_work(&dev->queues[i].sq.work);
drivers/target/target_core_tmr.c
301
flush_work(&dev->queues[i].sq.work);
drivers/target/target_core_transport.c
1002
void target_qf_do_work(struct work_struct *work)
drivers/target/target_core_transport.c
1004
struct se_device *dev = container_of(work, struct se_device,
drivers/target/target_core_transport.c
1462
INIT_WORK(&cmd->work, NULL);
drivers/target/target_core_transport.c
1886
void target_queued_submit_work(struct work_struct *work)
drivers/target/target_core_transport.c
1888
struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
drivers/target/target_core_transport.c
1925
queue_work_on(cpu, target_submission_wq, &sq->work);
drivers/target/target_core_transport.c
1957
static void target_complete_tmr_failure(struct work_struct *work)
drivers/target/target_core_transport.c
1959
struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
drivers/target/target_core_transport.c
2029
INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
drivers/target/target_core_transport.c
2030
schedule_work(&se_cmd->work);
drivers/target/target_core_transport.c
2056
INIT_WORK(&cmd->work, target_abort_work);
drivers/target/target_core_transport.c
2057
queue_work(target_completion_wq, &cmd->work);
drivers/target/target_core_transport.c
2311
void target_do_delayed_work(struct work_struct *work)
drivers/target/target_core_transport.c
2313
struct se_device *dev = container_of(work, struct se_device,
drivers/target/target_core_transport.c
2521
static void target_complete_ok_work(struct work_struct *work)
drivers/target/target_core_transport.c
2523
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
drivers/target/target_core_transport.c
3584
static void target_tmr_work(struct work_struct *work)
drivers/target/target_core_transport.c
3586
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
drivers/target/target_core_transport.c
3665
INIT_WORK(&cmd->work, target_tmr_work);
drivers/target/target_core_transport.c
3666
schedule_work(&cmd->work);
drivers/target/target_core_transport.c
58
static void target_complete_ok_work(struct work_struct *work);
drivers/target/target_core_transport.c
784
static void target_complete_failure_work(struct work_struct *work)
drivers/target/target_core_transport.c
786
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
drivers/target/target_core_transport.c
878
static void target_abort_work(struct work_struct *work)
drivers/target/target_core_transport.c
880
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
drivers/target/target_core_transport.c
892
INIT_WORK(&cmd->work, target_abort_work);
drivers/target/target_core_transport.c
893
queue_work(target_completion_wq, &cmd->work);
drivers/target/target_core_transport.c
936
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
drivers/target/target_core_transport.c
944
queue_work_on(cpu, target_completion_wq, &cmd->work);
drivers/target/target_core_user.c
3305
static void tcmu_unmap_work_fn(struct work_struct *work)
drivers/target/target_core_xcopy.c
657
static void target_xcopy_do_work(struct work_struct *work)
drivers/target/target_core_xcopy.c
659
struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
drivers/target/tcm_fc/tcm_fc.h
112
struct work_struct work;
drivers/target/tcm_fc/tfc_cmd.c
415
static void ft_send_work(struct work_struct *work);
drivers/target/tcm_fc/tfc_cmd.c
444
INIT_WORK(&cmd->work, ft_send_work);
drivers/target/tcm_fc/tfc_cmd.c
445
queue_work(sess->tport->tpg->workqueue, &cmd->work);
drivers/target/tcm_fc/tfc_cmd.c
485
static void ft_send_work(struct work_struct *work)
drivers/target/tcm_fc/tfc_cmd.c
487
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
drivers/target/tcm_fc/tfc_io.c
185
static void ft_execute_work(struct work_struct *work)
drivers/target/tcm_fc/tfc_io.c
187
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
drivers/target/tcm_fc/tfc_io.c
316
INIT_WORK(&cmd->work, ft_execute_work);
drivers/target/tcm_fc/tfc_io.c
317
queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
drivers/tee/optee/core.c
35
void optee_bus_scan_rpmb(struct work_struct *work)
drivers/tee/optee/core.c
37
struct optee *optee = container_of(work, struct optee,
drivers/tee/optee/core.c
89
static void optee_bus_scan(struct work_struct *work)
drivers/tee/optee/ffa_abi.c
868
static void notif_work_fn(struct work_struct *work)
drivers/tee/optee/ffa_abi.c
870
struct optee_ffa *optee_ffa = container_of(work, struct optee_ffa,
drivers/tee/optee/optee_private.h
342
void optee_bus_scan_rpmb(struct work_struct *work);
drivers/tee/optee/smc_abi.c
1119
static void notif_pcpu_irq_work_fn(struct work_struct *work)
drivers/tee/optee/smc_abi.c
1121
struct optee_smc *optee_smc = container_of(work, struct optee_smc,
drivers/tee/qcomtee/core.c
101
queue_work(qcomtee->wq, &object->work);
drivers/tee/qcomtee/core.c
115
INIT_WORK(&object->work, qcomtee_do_release_qtee_object);
drivers/tee/qcomtee/core.c
116
queue_work(qcomtee->wq, &object->work);
drivers/tee/qcomtee/core.c
81
static void qcomtee_do_release_qtee_object(struct work_struct *work)
drivers/tee/qcomtee/core.c
90
object = container_of(work, struct qcomtee_object, work);
drivers/tee/qcomtee/qcomtee_object.h
243
struct work_struct work;
drivers/thermal/da9062-thermal.c
104
queue_delayed_work(system_freezable_wq, &thermal->work, delay);
drivers/thermal/da9062-thermal.c
123
queue_delayed_work(system_freezable_wq, &thermal->work, 0);
drivers/thermal/da9062-thermal.c
196
INIT_DELAYED_WORK(&thermal->work, da9062_thermal_poll_on);
drivers/thermal/da9062-thermal.c
247
cancel_delayed_work_sync(&thermal->work);
drivers/thermal/da9062-thermal.c
52
struct delayed_work work;
drivers/thermal/da9062-thermal.c
61
static void da9062_thermal_poll_on(struct work_struct *work)
drivers/thermal/da9062-thermal.c
63
struct da9062_thermal *thermal = container_of(work,
drivers/thermal/da9062-thermal.c
65
work.work);
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
116
static void proc_thermal_threshold_work_fn(struct work_struct *work)
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
118
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
120
struct proc_thermal_pci, work);
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
130
static void pkg_thermal_schedule_work(struct delayed_work *work)
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
134
schedule_delayed_work(work, ms);
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
208
pkg_thermal_schedule_work(&pci_info->work);
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
235
cancel_delayed_work_sync(&pci_info->work);
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
27
struct delayed_work work;
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
356
INIT_DELAYED_WORK(&pci_info->work, proc_thermal_threshold_work_fn);
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
427
cancel_delayed_work_sync(&pci_info->work);
drivers/thermal/intel/intel_hfi.c
243
static void hfi_update_work_fn(struct work_struct *work)
drivers/thermal/intel/intel_hfi.c
247
hfi_instance = container_of(to_delayed_work(work), struct hfi_instance,
drivers/thermal/intel/therm_throt.c
300
static void __maybe_unused throttle_active_work(struct work_struct *work)
drivers/thermal/intel/therm_throt.c
302
struct _thermal_state *state = container_of(to_delayed_work(work),
drivers/thermal/intel/x86_pkg_temp_thermal.c
213
static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
drivers/thermal/intel/x86_pkg_temp_thermal.c
247
static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
drivers/thermal/intel/x86_pkg_temp_thermal.c
251
schedule_delayed_work_on(cpu, work, ms);
drivers/thermal/intel/x86_pkg_temp_thermal.c
269
pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
drivers/thermal/intel/x86_pkg_temp_thermal.c
346
INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn);
drivers/thermal/intel/x86_pkg_temp_thermal.c
443
cancel_delayed_work_sync(&zonedev->work);
drivers/thermal/intel/x86_pkg_temp_thermal.c
452
pkg_thermal_schedule_work(target, &zonedev->work);
drivers/thermal/intel/x86_pkg_temp_thermal.c
55
struct delayed_work work;
drivers/thermal/renesas/rcar_thermal.c
309
static void rcar_thermal_work(struct work_struct *work)
drivers/thermal/renesas/rcar_thermal.c
314
priv = container_of(work, struct rcar_thermal_priv, work.work);
drivers/thermal/renesas/rcar_thermal.c
363
queue_delayed_work(system_freezable_wq, &priv->work,
drivers/thermal/renesas/rcar_thermal.c
382
cancel_delayed_work_sync(&priv->work);
drivers/thermal/renesas/rcar_thermal.c
481
INIT_DELAYED_WORK(&priv->work, rcar_thermal_work);
drivers/thermal/renesas/rcar_thermal.c
94
struct delayed_work work;
drivers/thermal/tegra/tegra-bpmp-thermal.c
100
static void tz_device_update_work_fn(struct work_struct *work)
drivers/thermal/tegra/tegra-bpmp-thermal.c
104
zone = container_of(work, struct tegra_bpmp_thermal_zone,
drivers/thermal/testing/zone.c
159
static void tt_add_tz_work_fn(struct work_struct *work)
drivers/thermal/testing/zone.c
161
struct tt_work *tt_work = tt_work_of_work(work);
drivers/thermal/testing/zone.c
208
INIT_WORK(&tt_work->work, tt_add_tz_work_fn);
drivers/thermal/testing/zone.c
210
schedule_work(&(no_free_ptr(tt_work)->work));
drivers/thermal/testing/zone.c
215
static void tt_del_tz_work_fn(struct work_struct *work)
drivers/thermal/testing/zone.c
217
struct tt_work *tt_work = tt_work_of_work(work);
drivers/thermal/testing/zone.c
270
INIT_WORK(&tt_work->work, tt_del_tz_work_fn);
drivers/thermal/testing/zone.c
272
schedule_work(&(no_free_ptr(tt_work)->work));
drivers/thermal/testing/zone.c
308
static void tt_zone_add_trip_work_fn(struct work_struct *work)
drivers/thermal/testing/zone.c
310
struct tt_work *tt_work = tt_work_of_work(work);
drivers/thermal/testing/zone.c
358
INIT_WORK(&tt_work->work, tt_zone_add_trip_work_fn);
drivers/thermal/testing/zone.c
361
schedule_work(&(no_free_ptr(tt_work)->work));
drivers/thermal/testing/zone.c
80
struct work_struct work;
drivers/thermal/testing/zone.c
85
static inline struct tt_work *tt_work_of_work(struct work_struct *work)
drivers/thermal/testing/zone.c
87
return container_of(work, struct tt_work, work);
drivers/thermal/thermal_core.c
1393
static void thermal_zone_device_check(struct work_struct *work)
drivers/thermal/thermal_core.c
1395
struct thermal_zone_device *tz = container_of(work, struct
drivers/thermal/thermal_core.c
1397
poll_queue.work);
drivers/thermal/thermal_core.c
1783
static void thermal_zone_device_resume(struct work_struct *work)
drivers/thermal/thermal_core.c
1787
tz = container_of(work, struct thermal_zone_device, poll_queue.work);
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
41
static void ti_thermal_work(struct work_struct *work)
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
43
struct ti_thermal_data *data = container_of(work,
drivers/thunderbolt/ctl.c
516
schedule_work(&req->work);
drivers/thunderbolt/ctl.c
524
static void tb_cfg_request_work(struct work_struct *work)
drivers/thunderbolt/ctl.c
526
struct tb_cfg_request *req = container_of(work, typeof(*req), work);
drivers/thunderbolt/ctl.c
555
INIT_WORK(&req->work, tb_cfg_request_work);
drivers/thunderbolt/ctl.c
569
schedule_work(&req->work);
drivers/thunderbolt/ctl.c
592
schedule_work(&req->work);
drivers/thunderbolt/ctl.c
634
flush_work(&req->work);
drivers/thunderbolt/ctl.h
93
struct work_struct work;
drivers/thunderbolt/icm.c
127
struct work_struct work;
drivers/thunderbolt/icm.c
1723
static void icm_handle_notification(struct work_struct *work)
drivers/thunderbolt/icm.c
1725
struct icm_notification *n = container_of(work, typeof(*n), work);
drivers/thunderbolt/icm.c
1782
INIT_WORK(&n->work, icm_handle_notification);
drivers/thunderbolt/icm.c
1785
queue_work(tb->wq, &n->work);
drivers/thunderbolt/icm.c
2106
static void icm_rescan_work(struct work_struct *work)
drivers/thunderbolt/icm.c
2108
struct icm *icm = container_of(work, struct icm, rescan_work.work);
drivers/thunderbolt/nhi.c
274
static void ring_work(struct work_struct *work)
drivers/thunderbolt/nhi.c
276
struct tb_ring *ring = container_of(work, typeof(*ring), work);
drivers/thunderbolt/nhi.c
409
schedule_work(&ring->work);
drivers/thunderbolt/nhi.c
597
INIT_WORK(&ring->work, ring_work);
drivers/thunderbolt/nhi.c
801
schedule_work(&ring->work);
drivers/thunderbolt/nhi.c
802
flush_work(&ring->work);
drivers/thunderbolt/nhi.c
852
flush_work(&ring->work);
drivers/thunderbolt/nhi.c
915
static void nhi_interrupt_work(struct work_struct *work)
drivers/thunderbolt/nhi.c
917
struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
drivers/thunderbolt/tb.c
105
INIT_DELAYED_WORK(&ev->work, tb_handle_hotplug);
drivers/thunderbolt/tb.c
106
queue_delayed_work(tb->wq, &ev->work, 0);
drivers/thunderbolt/tb.c
1567
static void tb_bandwidth_group_release_work(struct work_struct *work)
drivers/thunderbolt/tb.c
1570
container_of(work, typeof(*group), release_work.work);
drivers/thunderbolt/tb.c
2421
static void tb_handle_hotplug(struct work_struct *work)
drivers/thunderbolt/tb.c
2423
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
drivers/thunderbolt/tb.c
2731
static void tb_handle_dp_bandwidth_request(struct work_struct *work)
drivers/thunderbolt/tb.c
2733
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
drivers/thunderbolt/tb.c
2873
INIT_DELAYED_WORK(&ev->work, tb_handle_dp_bandwidth_request);
drivers/thunderbolt/tb.c
2874
queue_delayed_work(tb->wq, &ev->work, delay);
drivers/thunderbolt/tb.c
3243
static void tb_remove_work(struct work_struct *work)
drivers/thunderbolt/tb.c
3245
struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
drivers/thunderbolt/tb.c
78
struct delayed_work work;
drivers/thunderbolt/tb.c
87
static void tb_handle_hotplug(struct work_struct *work);
drivers/thunderbolt/tunnel.c
1059
static void tb_dp_dprx_work(struct work_struct *work)
drivers/thunderbolt/tunnel.c
1061
struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
drivers/thunderbolt/xdomain.c
1570
static void tb_xdomain_state_work(struct work_struct *work)
drivers/thunderbolt/xdomain.c
1572
struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
drivers/thunderbolt/xdomain.c
1688
static void tb_xdomain_properties_changed(struct work_struct *work)
drivers/thunderbolt/xdomain.c
1690
struct tb_xdomain *xd = container_of(work, typeof(*xd),
drivers/thunderbolt/xdomain.c
1691
properties_changed_work.work);
drivers/thunderbolt/xdomain.c
56
struct work_struct work;
drivers/thunderbolt/xdomain.c
729
static void tb_xdp_handle_request(struct work_struct *work)
drivers/thunderbolt/xdomain.c
731
struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
drivers/thunderbolt/xdomain.c
865
INIT_WORK(&xw->work, tb_xdp_handle_request);
drivers/thunderbolt/xdomain.c
873
schedule_work(&xw->work);
drivers/tty/hvc/hvc_console.c
563
static void hvc_set_winsz(struct work_struct *work)
drivers/tty/hvc/hvc_console.c
570
hp = container_of(work, struct hvc_struct, tty_resize);
drivers/tty/hvc/hvc_dcc.c
131
static void dcc_put_work(struct work_struct *work)
drivers/tty/hvc/hvc_dcc.c
158
static void dcc_get_work(struct work_struct *work)
drivers/tty/hvc/hvc_iucv.c
435
static void hvc_iucv_sndbuf_work(struct work_struct *work)
drivers/tty/hvc/hvc_iucv.c
439
priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
drivers/tty/hvc/hvsi.c
646
static void hvsi_handshaker(struct work_struct *work)
drivers/tty/hvc/hvsi.c
649
container_of(work, struct hvsi_struct, handshaker);
drivers/tty/hvc/hvsi.c
849
static void hvsi_write_worker(struct work_struct *work)
drivers/tty/hvc/hvsi.c
852
container_of(work, struct hvsi_struct, writer.work);
drivers/tty/ipwireless/tty.c
156
int work = 0;
drivers/tty/ipwireless/tty.c
166
work = tty_insert_flip_string(&tty->port, data, length);
drivers/tty/ipwireless/tty.c
168
if (work != length)
drivers/tty/ipwireless/tty.c
171
length - work);
drivers/tty/ipwireless/tty.c
173
if (work)
drivers/tty/n_gsm.c
3545
static void gsmld_write_task(struct work_struct *work)
drivers/tty/n_gsm.c
3547
struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
drivers/tty/n_gsm.c
463
static void gsmld_write_task(struct work_struct *work);
drivers/tty/n_hdlc.c
156
static void n_hdlc_tty_write_work(struct work_struct *work);
drivers/tty/n_hdlc.c
339
static void n_hdlc_tty_write_work(struct work_struct *work)
drivers/tty/n_hdlc.c
341
struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work);
drivers/tty/serial/8250/8250_core.c
670
static void serial_8250_overrun_backoff_work(struct work_struct *work)
drivers/tty/serial/8250/8250_core.c
672
struct uart_8250_port *up = container_of(to_delayed_work(work), struct uart_8250_port,
drivers/tty/serial/8250/8250_dw.c
110
static inline struct dw8250_data *work_to_dw8250_data(struct work_struct *work)
drivers/tty/serial/8250/8250_dw.c
112
return container_of(work, struct dw8250_data, clk_work);
drivers/tty/serial/8250/8250_dw.c
487
static void dw8250_clk_work_cb(struct work_struct *work)
drivers/tty/serial/8250/8250_dw.c
489
struct dw8250_data *d = work_to_dw8250_data(work);
drivers/tty/serial/8250/8250_omap.c
616
static void omap8250_uart_qos_work(struct work_struct *work)
drivers/tty/serial/8250/8250_omap.c
620
priv = container_of(work, struct omap8250_priv, qos_work);
drivers/tty/serial/digicolor-usart.c
119
static void digicolor_rx_poll(struct work_struct *work)
drivers/tty/serial/digicolor-usart.c
122
container_of(to_delayed_work(work),
drivers/tty/serial/max3100.c
106
struct work_struct work;
drivers/tty/serial/max3100.c
236
struct max3100_port *s = container_of(w, struct max3100_port, work);
drivers/tty/serial/max3100.c
307
queue_work(s->workqueue, &s->work);
drivers/tty/serial/max3100.c
574
INIT_WORK(&s->work, max3100_work);
drivers/tty/serial/omap-serial.c
767
static void serial_omap_uart_qos_work(struct work_struct *work)
drivers/tty/serial/omap-serial.c
769
struct uart_omap_port *up = container_of(work, struct uart_omap_port,
drivers/tty/serial/sc16is7xx.c
811
struct sc16is7xx_port *s = container_of(ws, struct sc16is7xx_port, poll_work.work);
drivers/tty/serial/sc16is7xx.c
894
struct sc16is7xx_one *one = container_of(ws, struct sc16is7xx_one, ms_work.work);
drivers/tty/serial/sh-sci.c
1641
static void sci_dma_tx_work_fn(struct work_struct *work)
drivers/tty/serial/sh-sci.c
1643
struct sci_port *s = container_of(work, struct sci_port, work_tx);
drivers/tty/synclink_gt.c
1856
static void bh_handler(struct work_struct *work)
drivers/tty/synclink_gt.c
1858
struct slgt_info *info = container_of(work, struct slgt_info, task);
drivers/tty/sysrq.c
802
static void sysrq_reinject_alt_sysrq(struct work_struct *work)
drivers/tty/sysrq.c
805
container_of(work, struct sysrq_state, reinject_work);
drivers/tty/tty_buffer.c
462
static void flush_to_ldisc(struct work_struct *work)
drivers/tty/tty_buffer.c
464
struct tty_port *port = container_of(work, struct tty_port, buf.work);
drivers/tty/tty_buffer.c
533
queue_work(system_dfl_wq, &buf->work);
drivers/tty/tty_buffer.c
563
queue_work(system_dfl_wq, &buf->work);
drivers/tty/tty_buffer.c
586
INIT_WORK(&buf->work, flush_to_ldisc);
drivers/tty/tty_buffer.c
616
return queue_work(system_dfl_wq, &port->buf.work);
drivers/tty/tty_buffer.c
621
return cancel_work_sync(&port->buf.work);
drivers/tty/tty_buffer.c
626
flush_work(&port->buf.work);
drivers/tty/tty_buffer.c
79
queue_work(system_dfl_wq, &buf->work);
drivers/tty/tty_io.c
1514
static void release_one_tty(struct work_struct *work)
drivers/tty/tty_io.c
1517
container_of(work, struct tty_struct, hangup_work);
drivers/tty/tty_io.c
3059
static void do_SAK_work(struct work_struct *work)
drivers/tty/tty_io.c
3062
container_of(work, struct tty_struct, SAK_work);
drivers/tty/tty_io.c
658
static void do_tty_hangup(struct work_struct *work)
drivers/tty/tty_io.c
661
container_of(work, struct tty_struct, hangup_work);
drivers/tty/vt/vt_ioctl.c
978
void vc_SAK(struct work_struct *work)
drivers/tty/vt/vt_ioctl.c
981
container_of(work, struct vc, SAK_work);
drivers/ufs/core/ufs-debugfs.c
131
static void ufs_debugfs_restart_ee(struct work_struct *work)
drivers/ufs/core/ufs-debugfs.c
133
struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work);
drivers/ufs/core/ufshcd.c
1524
static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
drivers/ufs/core/ufshcd.c
1526
struct ufs_hba *hba = container_of(work, struct ufs_hba,
drivers/ufs/core/ufshcd.c
1542
static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
drivers/ufs/core/ufshcd.c
1544
struct ufs_hba *hba = container_of(work, struct ufs_hba,
drivers/ufs/core/ufshcd.c
1900
static void ufshcd_ungate_work(struct work_struct *work)
drivers/ufs/core/ufshcd.c
1903
struct ufs_hba *hba = container_of(work, struct ufs_hba,
drivers/ufs/core/ufshcd.c
2010
static void ufshcd_gate_work(struct work_struct *work)
drivers/ufs/core/ufshcd.c
2012
struct ufs_hba *hba = container_of(work, struct ufs_hba,
drivers/ufs/core/ufshcd.c
2013
clk_gating.gate_work.work);
drivers/ufs/core/ufshcd.c
6329
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
drivers/ufs/core/ufshcd.c
6331
struct ufs_hba *hba = container_of(to_delayed_work(work),
drivers/ufs/core/ufshcd.c
6351
static void ufshcd_exception_event_handler(struct work_struct *work)
drivers/ufs/core/ufshcd.c
6356
hba = container_of(work, struct ufs_hba, eeh_work);
drivers/ufs/core/ufshcd.c
6700
static void ufshcd_err_handler(struct work_struct *work)
drivers/ufs/core/ufshcd.c
6709
hba = container_of(work, struct ufs_hba, eh_work);
drivers/ufs/core/ufshcd.c
8492
static void ufshcd_rtc_work(struct work_struct *work)
drivers/ufs/core/ufshcd.c
8496
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
drivers/usb/atm/cxacru.c
195
static void cxacru_poll_status(struct work_struct *work);
drivers/usb/atm/cxacru.c
428
cxacru_poll_status(&instance->poll_work.work);
drivers/usb/atm/cxacru.c
814
cxacru_poll_status(&instance->poll_work.work);
drivers/usb/atm/cxacru.c
818
static void cxacru_poll_status(struct work_struct *work)
drivers/usb/atm/cxacru.c
821
container_of(work, struct cxacru_data, poll_work.work);
drivers/usb/atm/speedtch.c
484
static void speedtch_check_status(struct work_struct *work)
drivers/usb/atm/speedtch.c
487
container_of(work, struct speedtch_instance_data,
drivers/usb/atm/ueagle-atm.c
1024
static void uea_load_page_e4(struct work_struct *work)
drivers/usb/atm/ueagle-atm.c
1026
struct uea_softc *sc = container_of(work, struct uea_softc, task);
drivers/usb/atm/ueagle-atm.c
899
static void uea_load_page_e1(struct work_struct *work)
drivers/usb/atm/ueagle-atm.c
901
struct uea_softc *sc = container_of(work, struct uea_softc, task);
drivers/usb/c67x00/c67x00-hcd.h
79
struct work_struct work;
drivers/usb/c67x00/c67x00-sched.c
1126
static void c67x00_sched_work(struct work_struct *work)
drivers/usb/c67x00/c67x00-sched.c
1130
c67x00 = container_of(work, struct c67x00_hcd, work);
drivers/usb/c67x00/c67x00-sched.c
1136
queue_work(system_highpri_wq, &c67x00->work);
drivers/usb/c67x00/c67x00-sched.c
1141
INIT_WORK(&c67x00->work, c67x00_sched_work);
drivers/usb/c67x00/c67x00-sched.c
1147
cancel_work_sync(&c67x00->work);
drivers/usb/cdns3/cdns3-ep0.c
516
void cdns3_pending_setup_status_handler(struct work_struct *work)
drivers/usb/cdns3/cdns3-ep0.c
518
struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
drivers/usb/cdns3/cdns3-gadget.c
864
static void cdns3_free_aligned_request_buf(struct work_struct *work)
drivers/usb/cdns3/cdns3-gadget.c
866
struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
drivers/usb/cdns3/cdns3-gadget.h
1348
void cdns3_pending_setup_status_handler(struct work_struct *work);
drivers/usb/chipidea/ci.h
230
struct work_struct work;
drivers/usb/chipidea/core.c
860
static void ci_power_lost_work(struct work_struct *work)
drivers/usb/chipidea/core.c
862
struct ci_hdrc *ci = container_of(work, struct ci_hdrc, power_lost_work);
drivers/usb/chipidea/otg.c
212
static void ci_otg_work(struct work_struct *work)
drivers/usb/chipidea/otg.c
214
struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work);
drivers/usb/chipidea/otg.c
245
INIT_WORK(&ci->work, ci_otg_work);
drivers/usb/chipidea/otg.h
21
if (queue_work(ci->wq, &ci->work) == false)
drivers/usb/class/cdc-acm.c
610
static void acm_softint(struct work_struct *work)
drivers/usb/class/cdc-acm.c
613
struct acm *acm = container_of(work, struct acm, dwork.work);
drivers/usb/class/cdc-wdm.c
1008
static void service_interrupt_work(struct work_struct *work)
drivers/usb/class/cdc-wdm.c
1012
desc = container_of(work, struct wdm_device, service_outs_intr);
drivers/usb/class/cdc-wdm.c
983
static void wdm_rxwork(struct work_struct *work)
drivers/usb/class/cdc-wdm.c
985
struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
drivers/usb/common/usb-conn-gpio.c
70
static void usb_conn_detect_cable(struct work_struct *work)
drivers/usb/common/usb-conn-gpio.c
76
info = container_of(to_delayed_work(work),
drivers/usb/common/usb-otg-fsm.c
120
static void otg_hnp_polling_work(struct work_struct *work)
drivers/usb/common/usb-otg-fsm.c
122
struct otg_fsm *fsm = container_of(to_delayed_work(work),
drivers/usb/core/hcd.c
1674
static void usb_giveback_urb_bh(struct work_struct *work)
drivers/usb/core/hcd.c
1677
container_of(work, struct giveback_urb_bh, bh);
drivers/usb/core/hcd.c
2381
static void hcd_resume_work(struct work_struct *work)
drivers/usb/core/hcd.c
2383
struct usb_hcd *hcd = container_of(work, struct usb_hcd, wakeup_work);
drivers/usb/core/hcd.c
2486
static void hcd_died_work(struct work_struct *work)
drivers/usb/core/hcd.c
2488
struct usb_hcd *hcd = container_of(work, struct usb_hcd, died_work);
drivers/usb/core/hub.c
1366
struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
drivers/usb/core/hub.c
1373
struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
drivers/usb/core/hub.c
1380
struct usb_hub *hub = container_of(ws, struct usb_hub, post_resume_work.work);
drivers/usb/core/hub.c
504
static void led_work(struct work_struct *work)
drivers/usb/core/hub.c
507
container_of(work, struct usb_hub, leds.work);
drivers/usb/core/hub.c
5874
static void hub_event(struct work_struct *work)
drivers/usb/core/hub.c
5884
hub = container_of(work, struct usb_hub, events);
drivers/usb/core/hub.c
837
static void hub_tt_work(struct work_struct *work)
drivers/usb/core/hub.c
840
container_of(work, struct usb_hub, tt.clear_work);
drivers/usb/core/hub.c
86
static void hub_event(struct work_struct *work);
drivers/usb/core/message.c
2286
struct work_struct work;
drivers/usb/core/message.c
2291
static void driver_set_config_work(struct work_struct *work)
drivers/usb/core/message.c
2294
container_of(work, struct set_config_request, work);
drivers/usb/core/message.c
2353
INIT_WORK(&req->work, driver_set_config_work);
drivers/usb/core/message.c
2360
schedule_work(&req->work);
drivers/usb/dwc2/hcd.c
3137
static void dwc2_conn_id_status_change(struct work_struct *work)
drivers/usb/dwc2/hcd.c
3139
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
drivers/usb/dwc2/hcd.c
4194
static void dwc2_hcd_start_func(struct work_struct *work)
drivers/usb/dwc2/hcd.c
4196
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
drivers/usb/dwc2/hcd.c
4197
start_work.work);
drivers/usb/dwc2/hcd.c
4206
static void dwc2_hcd_reset_func(struct work_struct *work)
drivers/usb/dwc2/hcd.c
4208
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
drivers/usb/dwc2/hcd.c
4209
reset_work.work);
drivers/usb/dwc2/hcd.c
4225
static void dwc2_hcd_phy_reset_func(struct work_struct *work)
drivers/usb/dwc2/hcd.c
4227
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
drivers/usb/dwc3/core.c
165
static void __dwc3_set_mode(struct work_struct *work)
drivers/usb/dwc3/core.c
167
struct dwc3 *dwc = work_to_dwc(work);
drivers/usb/dwc3/core.c
2158
static void dwc3_vbus_draw_work(struct work_struct *work)
drivers/usb/dwc3/core.c
2160
struct dwc3 *dwc = container_of(work, struct dwc3, vbus_draw_work);
drivers/usb/dwc3/dwc3-pci.c
315
static void dwc3_pci_resume_work(struct work_struct *work)
drivers/usb/dwc3/dwc3-pci.c
317
struct dwc3_pci *dwc = container_of(work, struct dwc3_pci, wakeup_work);
drivers/usb/dwc3/gadget.c
3326
static void dwc3_nostream_work(struct work_struct *work)
drivers/usb/dwc3/gadget.c
3328
struct dwc3_ep *dep = nostream_work_to_dep(work);
drivers/usb/gadget/function/f_fs.c
1381
static void ffs_dmabuf_cleanup(struct work_struct *work)
drivers/usb/gadget/function/f_fs.c
1384
container_of(work, struct ffs_dma_fence, work);
drivers/usb/gadget/function/f_fs.c
1409
INIT_WORK(&dma_fence->work, ffs_dmabuf_cleanup);
drivers/usb/gadget/function/f_fs.c
1410
queue_work(priv->ffs->io_completion_wq, &dma_fence->work);
drivers/usb/gadget/function/f_fs.c
152
struct work_struct work;
drivers/usb/gadget/function/f_fs.c
251
struct work_struct work;
drivers/usb/gadget/function/f_fs.c
3720
static void ffs_reset_work(struct work_struct *work)
drivers/usb/gadget/function/f_fs.c
3722
struct ffs_data *ffs = container_of(work,
drivers/usb/gadget/function/f_fs.c
859
static void ffs_user_copy_worker(struct work_struct *work)
drivers/usb/gadget/function/f_fs.c
861
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
drivers/usb/gadget/function/f_fs.c
862
work);
drivers/usb/gadget/function/f_fs.c
893
INIT_WORK(&io_data->work, ffs_user_copy_worker);
drivers/usb/gadget/function/f_fs.c
894
queue_work(ffs->io_completion_wq, &io_data->work);
drivers/usb/gadget/function/f_hid.c
104
struct work_struct work;
drivers/usb/gadget/function/f_hid.c
1268
INIT_WORK(&hidg->work, get_report_workqueue_handler);
drivers/usb/gadget/function/f_hid.c
558
static void get_report_workqueue_handler(struct work_struct *work)
drivers/usb/gadget/function/f_hid.c
560
struct f_hidg *hidg = container_of(work, struct f_hidg, work);
drivers/usb/gadget/function/f_hid.c
870
queue_work(hidg->workqueue, &hidg->work);
drivers/usb/gadget/function/f_midi.c
286
queue_work(system_highpri_wq, &midi->work);
drivers/usb/gadget/function/f_midi.c
702
static void f_midi_in_work(struct work_struct *work)
drivers/usb/gadget/function/f_midi.c
706
midi = container_of(work, struct f_midi, work);
drivers/usb/gadget/function/f_midi.c
743
queue_work(system_highpri_wq, &midi->work);
drivers/usb/gadget/function/f_midi.c
882
INIT_WORK(&midi->work, f_midi_in_work);
drivers/usb/gadget/function/f_midi.c
91
struct work_struct work;
drivers/usb/gadget/function/f_tcm.c
1106
queue_work(cmd->fu->tpg->workqueue, &cmd->work);
drivers/usb/gadget/function/f_tcm.c
1252
static void usbg_cmd_work(struct work_struct *work)
drivers/usb/gadget/function/f_tcm.c
1254
struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
drivers/usb/gadget/function/f_tcm.c
1464
INIT_WORK(&cmd->work, usbg_cmd_work);
drivers/usb/gadget/function/f_tcm.c
1465
queue_work(tpg->workqueue, &cmd->work);
drivers/usb/gadget/function/f_tcm.c
1470
static void bot_cmd_work(struct work_struct *work)
drivers/usb/gadget/function/f_tcm.c
1472
struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
drivers/usb/gadget/function/f_tcm.c
1562
INIT_WORK(&cmd->work, bot_cmd_work);
drivers/usb/gadget/function/f_tcm.c
1563
queue_work(tpg->workqueue, &cmd->work);
drivers/usb/gadget/function/f_tcm.c
2366
struct work_struct work;
drivers/usb/gadget/function/f_tcm.c
2373
struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
drivers/usb/gadget/function/f_tcm.c
2374
work);
drivers/usb/gadget/function/f_tcm.c
2375
struct f_uas *fu = work->fu;
drivers/usb/gadget/function/f_tcm.c
2376
int alt = work->alt;
drivers/usb/gadget/function/f_tcm.c
2378
kfree(work);
drivers/usb/gadget/function/f_tcm.c
2415
struct guas_setup_wq *work;
drivers/usb/gadget/function/f_tcm.c
2417
work = kmalloc_obj(*work, GFP_ATOMIC);
drivers/usb/gadget/function/f_tcm.c
2418
if (!work)
drivers/usb/gadget/function/f_tcm.c
2420
INIT_WORK(&work->work, tcm_delayed_set_alt);
drivers/usb/gadget/function/f_tcm.c
2421
work->fu = fu;
drivers/usb/gadget/function/f_tcm.c
2422
work->alt = alt;
drivers/usb/gadget/function/f_tcm.c
2423
schedule_work(&work->work);
drivers/usb/gadget/function/tcm.h
73
struct work_struct work;
drivers/usb/gadget/function/u_ether.c
1118
flush_work(&dev->work);
drivers/usb/gadget/function/u_ether.c
137
if (!schedule_work(&dev->work))
drivers/usb/gadget/function/u_ether.c
396
static void eth_work(struct work_struct *work)
drivers/usb/gadget/function/u_ether.c
398
struct eth_dev *dev = container_of(work, struct eth_dev, work);
drivers/usb/gadget/function/u_ether.c
758
INIT_WORK(&dev->work, eth_work);
drivers/usb/gadget/function/u_ether.c
79
struct work_struct work;
drivers/usb/gadget/function/u_ether.c
829
INIT_WORK(&dev->work, eth_work);
drivers/usb/gadget/function/u_serial.c
1013
schedule_work(&cons->work);
drivers/usb/gadget/function/u_serial.c
1048
schedule_work(&cons->work);
drivers/usb/gadget/function/u_serial.c
1095
INIT_WORK(&cons->work, gs_console_work);
drivers/usb/gadget/function/u_serial.c
1130
cancel_work_sync(&cons->work);
drivers/usb/gadget/function/u_serial.c
364
static void gs_rx_push(struct work_struct *work)
drivers/usb/gadget/function/u_serial.c
366
struct delayed_work *w = to_delayed_work(work);
drivers/usb/gadget/function/u_serial.c
93
struct work_struct work;
drivers/usb/gadget/function/u_serial.c
943
schedule_work(&cons->work);
drivers/usb/gadget/function/u_serial.c
988
static void gs_console_work(struct work_struct *work)
drivers/usb/gadget/function/u_serial.c
990
struct gs_console *cons = container_of(work, struct gs_console, work);
drivers/usb/gadget/function/uvc_video.c
421
static void uvcg_video_hw_submit(struct kthread_work *work)
drivers/usb/gadget/function/uvc_video.c
423
struct uvc_video *video = container_of(work, struct uvc_video, hw_submit);
drivers/usb/gadget/function/uvc_video.c
609
static void uvcg_video_pump(struct work_struct *work)
drivers/usb/gadget/function/uvc_video.c
611
struct uvc_video *video = container_of(work, struct uvc_video, pump);
drivers/usb/gadget/legacy/inode.c
441
struct work_struct work;
drivers/usb/gadget/legacy/inode.c
467
static void ep_user_copy_worker(struct work_struct *work)
drivers/usb/gadget/legacy/inode.c
469
struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
drivers/usb/gadget/legacy/inode.c
518
INIT_WORK(&priv->work, ep_user_copy_worker);
drivers/usb/gadget/legacy/inode.c
519
schedule_work(&priv->work);
drivers/usb/gadget/udc/aspeed-vhub/hub.c
561
static void ast_vhub_wake_work(struct work_struct *work)
drivers/usb/gadget/udc/aspeed-vhub/hub.c
563
struct ast_vhub *vhub = container_of(work,
drivers/usb/gadget/udc/aspeed_udc.c
1221
static void ast_udc_wake_work(struct work_struct *work)
drivers/usb/gadget/udc/aspeed_udc.c
1223
struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev,
drivers/usb/gadget/udc/at91_udc.c
1531
static void at91_vbus_timer_work(struct work_struct *work)
drivers/usb/gadget/udc/at91_udc.c
1533
struct at91_udc *udc = container_of(work, struct at91_udc,
drivers/usb/gadget/udc/bdc/bdc_udc.c
158
static void bdc_func_wake_timer(struct work_struct *work)
drivers/usb/gadget/udc/bdc/bdc_udc.c
160
struct bdc *bdc = container_of(work, struct bdc, func_wake_notify.work);
drivers/usb/gadget/udc/cdns2/cdns2-ep0.c
367
void cdns2_pending_setup_status_handler(struct work_struct *work)
drivers/usb/gadget/udc/cdns2/cdns2-ep0.c
369
struct cdns2_device *pdev = container_of(work, struct cdns2_device,
drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
693
void cdns2_pending_setup_status_handler(struct work_struct *work);
drivers/usb/gadget/udc/core.c
1117
static void usb_gadget_state_work(struct work_struct *work)
drivers/usb/gadget/udc/core.c
1119
struct usb_gadget *gadget = work_to_gadget(work);
drivers/usb/gadget/udc/core.c
1134
schedule_work(&gadget->work);
drivers/usb/gadget/udc/core.c
1151
static void vbus_event_work(struct work_struct *work)
drivers/usb/gadget/udc/core.c
1153
struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work);
drivers/usb/gadget/udc/core.c
1371
INIT_WORK(&gadget->work, usb_gadget_state_work);
drivers/usb/gadget/udc/core.c
1452
flush_work(&gadget->work);
drivers/usb/gadget/udc/core.c
1566
flush_work(&gadget->work);
drivers/usb/gadget/udc/lpc32xx_udc.c
636
static void pullup_work(struct work_struct *work)
drivers/usb/gadget/udc/lpc32xx_udc.c
639
container_of(work, struct lpc32xx_udc, pullup_job);
drivers/usb/gadget/udc/lpc32xx_udc.c
678
static void power_work(struct work_struct *work)
drivers/usb/gadget/udc/lpc32xx_udc.c
681
container_of(work, struct lpc32xx_udc, power_job);
drivers/usb/gadget/udc/renesas_usb3.c
487
static void renesas_usb3_extcon_work(struct work_struct *work)
drivers/usb/gadget/udc/renesas_usb3.c
489
struct renesas_usb3 *usb3 = container_of(work, struct renesas_usb3,
drivers/usb/gadget/udc/renesas_usb3.c
715
static void renesas_usb3_role_work(struct work_struct *work)
drivers/usb/gadget/udc/renesas_usb3.c
718
container_of(work, struct renesas_usb3, role_work);
drivers/usb/gadget/udc/snps_udc_plat.c
70
static void udc_drd_work(struct work_struct *work)
drivers/usb/gadget/udc/snps_udc_plat.c
74
udc = container_of(to_delayed_work(work),
drivers/usb/gadget/udc/tegra-xudc.c
778
static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
drivers/usb/gadget/udc/tegra-xudc.c
780
struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
drivers/usb/gadget/udc/tegra-xudc.c
843
static void tegra_xudc_plc_reset_work(struct work_struct *work)
drivers/usb/gadget/udc/tegra-xudc.c
845
struct delayed_work *dwork = to_delayed_work(work);
drivers/usb/gadget/udc/tegra-xudc.c
870
static void tegra_xudc_port_reset_war_work(struct work_struct *work)
drivers/usb/gadget/udc/tegra-xudc.c
872
struct delayed_work *dwork = to_delayed_work(work);
drivers/usb/host/ehci-platform.c
180
static void quirk_poll_work(struct work_struct *work)
drivers/usb/host/ehci-platform.c
183
container_of(to_delayed_work(work), struct ehci_platform_priv,
drivers/usb/host/ohci-pci.c
129
static void ohci_quirk_nec_worker(struct work_struct *work)
drivers/usb/host/ohci-pci.c
131
struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
drivers/usb/host/xhci-dbgcap.c
1003
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
drivers/usb/host/xhci-dbgcap.c
995
static void xhci_dbc_handle_events(struct work_struct *work)
drivers/usb/host/xhci-ring.c
1717
void xhci_handle_command_timeout(struct work_struct *work)
drivers/usb/host/xhci-ring.c
1726
xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
drivers/usb/host/xhci-tegra.c
1347
static void tegra_xhci_id_work(struct work_struct *work)
drivers/usb/host/xhci-tegra.c
1349
struct tegra_xusb *tegra = container_of(work, struct tegra_xusb,
drivers/usb/host/xhci.h
1944
void xhci_handle_command_timeout(struct work_struct *work);
drivers/usb/misc/appledisplay.c
115
schedule_delayed_work(&pdata->work, 0);
drivers/usb/misc/appledisplay.c
191
static void appledisplay_work(struct work_struct *work)
drivers/usb/misc/appledisplay.c
194
container_of(work, struct appledisplay, work.work);
drivers/usb/misc/appledisplay.c
203
schedule_delayed_work(&pdata->work, HZ / 8);
drivers/usb/misc/appledisplay.c
237
INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
drivers/usb/misc/appledisplay.c
314
cancel_delayed_work_sync(&pdata->work);
drivers/usb/misc/appledisplay.c
334
cancel_delayed_work_sync(&pdata->work);
drivers/usb/misc/appledisplay.c
70
struct delayed_work work;
drivers/usb/misc/lvstest.c
326
static void lvs_rh_work(struct work_struct *work)
drivers/usb/misc/lvstest.c
328
struct lvs_rh *lvs = container_of(work, struct lvs_rh, rh_work);
drivers/usb/misc/onboard_usb_dev.c
304
static void onboard_dev_attach_usb_driver(struct work_struct *work)
drivers/usb/misc/onboard_usb_dev.c
45
static void onboard_dev_attach_usb_driver(struct work_struct *work);
drivers/usb/mtu3/mtu3_dr.c
128
static void ssusb_mode_sw_work(struct work_struct *work)
drivers/usb/mtu3/mtu3_dr.c
131
container_of(work, struct otg_switch_mtk, dr_work);
drivers/usb/musb/musb_core.c
2074
struct musb *musb = container_of(data, struct musb, irq_work.work);
drivers/usb/musb/musb_core.c
2283
static void musb_deassert_reset(struct work_struct *work)
drivers/usb/musb/musb_core.c
2288
musb = container_of(work, struct musb, deassert_reset_work.work);
drivers/usb/musb/musb_gadget.c
1634
static void musb_gadget_work(struct work_struct *work)
drivers/usb/musb/musb_gadget.c
1639
musb = container_of(work, struct musb, gadget_work.work);
drivers/usb/musb/musb_host.h
102
static inline void musb_host_finish_resume(struct work_struct *work) {}
drivers/usb/musb/musb_host.h
68
extern void musb_host_finish_resume(struct work_struct *work);
drivers/usb/musb/musb_virthub.c
21
void musb_host_finish_resume(struct work_struct *work)
drivers/usb/musb/musb_virthub.c
27
musb = container_of(work, struct musb, finish_resume_work.work);
drivers/usb/musb/sunxi.c
157
schedule_work(&glue->work);
drivers/usb/musb/sunxi.c
217
schedule_work(&glue->work);
drivers/usb/musb/sunxi.c
283
cancel_work_sync(&glue->work);
drivers/usb/musb/sunxi.c
309
schedule_work(&glue->work);
drivers/usb/musb/sunxi.c
368
schedule_work(&glue->work);
drivers/usb/musb/sunxi.c
382
schedule_work(&glue->work);
drivers/usb/musb/sunxi.c
728
INIT_WORK(&glue->work, sunxi_musb_work);
drivers/usb/musb/sunxi.c
91
struct work_struct work;
drivers/usb/musb/sunxi.c
97
static void sunxi_musb_work(struct work_struct *work)
drivers/usb/musb/sunxi.c
99
struct sunxi_glue *glue = container_of(work, struct sunxi_glue, work);
drivers/usb/phy/phy-ab8500-usb.c
615
static void ab8500_usb_phy_disable_work(struct work_struct *work)
drivers/usb/phy/phy-ab8500-usb.c
617
struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
drivers/usb/phy/phy-fsl-usb.c
650
static void fsl_otg_event(struct work_struct *work)
drivers/usb/phy/phy-fsl-usb.c
652
struct fsl_otg *og = container_of(work, struct fsl_otg, otg_event.work);
drivers/usb/phy/phy-gpio-vbus-usb.c
158
schedule_delayed_work(&gpio_vbus->work, msecs_to_jiffies(100));
drivers/usb/phy/phy-gpio-vbus-usb.c
306
INIT_DELAYED_WORK(&gpio_vbus->work, gpio_vbus_work);
drivers/usb/phy/phy-gpio-vbus-usb.c
333
cancel_delayed_work_sync(&gpio_vbus->work);
drivers/usb/phy/phy-gpio-vbus-usb.c
38
struct delayed_work work;
drivers/usb/phy/phy-gpio-vbus-usb.c
92
static void gpio_vbus_work(struct work_struct *work)
drivers/usb/phy/phy-gpio-vbus-usb.c
95
container_of(work, struct gpio_vbus_data, work.work);
drivers/usb/phy/phy-mv-usb.h
148
struct delayed_work work;
drivers/usb/phy/phy-twl6030-usb.c
278
static void twl6030_status_work(struct work_struct *work)
drivers/usb/phy/phy-twl6030-usb.c
280
struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
drivers/usb/phy/phy-twl6030-usb.c
281
get_status_work.work);
drivers/usb/phy/phy.c
123
static void usb_phy_notify_charger_work(struct work_struct *work)
drivers/usb/phy/phy.c
125
struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work);
drivers/usb/renesas_usbhs/common.c
511
static void usbhsc_notify_hotplug(struct work_struct *work)
drivers/usb/renesas_usbhs/common.c
513
struct usbhs_priv *priv = container_of(work,
drivers/usb/renesas_usbhs/common.c
515
notify_hotplug_work.work);
drivers/usb/renesas_usbhs/fifo.c
1113
INIT_WORK(&pkt->work, xfer_work);
drivers/usb/renesas_usbhs/fifo.c
1114
schedule_work(&pkt->work);
drivers/usb/renesas_usbhs/fifo.c
861
static void xfer_work(struct work_struct *work)
drivers/usb/renesas_usbhs/fifo.c
863
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
drivers/usb/renesas_usbhs/fifo.c
926
INIT_WORK(&pkt->work, xfer_work);
drivers/usb/renesas_usbhs/fifo.c
927
schedule_work(&pkt->work);
drivers/usb/renesas_usbhs/fifo.h
51
struct work_struct work;
drivers/usb/serial/f81232.c
800
static void f81232_interrupt_work(struct work_struct *work)
drivers/usb/serial/f81232.c
803
container_of(work, struct f81232_private, interrupt_work);
drivers/usb/serial/f81232.c
808
static void f81232_lsr_worker(struct work_struct *work)
drivers/usb/serial/f81232.c
815
priv = container_of(work, struct f81232_private, lsr_work);
drivers/usb/serial/f81534.c
1296
static void f81534_lsr_worker(struct work_struct *work)
drivers/usb/serial/f81534.c
1303
port_priv = container_of(work, struct f81534_port_private, lsr_work);
drivers/usb/serial/io_ti.c
2452
static void edge_heartbeat_work(struct work_struct *work)
drivers/usb/serial/io_ti.c
2457
serial = container_of(work, struct edgeport_serial,
drivers/usb/serial/io_ti.c
2458
heartbeat_work.work);
drivers/usb/serial/keyspan_pda.c
103
static void keyspan_pda_request_unthrottle(struct work_struct *work)
drivers/usb/serial/keyspan_pda.c
106
container_of(work, struct keyspan_pda_private, unthrottle_work);
drivers/usb/serial/mos7720.c
1689
cancel_work_sync(&mos_parport->work);
drivers/usb/serial/mos7720.c
287
if (work_pending(&mos_parport->work))
drivers/usb/serial/mos7720.c
288
flush_work(&mos_parport->work);
drivers/usb/serial/mos7720.c
314
static void deferred_restore_writes(struct work_struct *work)
drivers/usb/serial/mos7720.c
318
mos_parport = container_of(work, struct mos7715_parport, work);
drivers/usb/serial/mos7720.c
494
schedule_work(&mos_parport->work);
drivers/usb/serial/mos7720.c
566
INIT_WORK(&mos_parport->work, deferred_restore_writes);
drivers/usb/serial/mos7720.c
93
struct work_struct work; /* restore deferred writes */
drivers/usb/serial/oti6858.c
192
static void setup_line(struct work_struct *work)
drivers/usb/serial/oti6858.c
194
struct oti6858_private *priv = container_of(work,
drivers/usb/serial/oti6858.c
195
struct oti6858_private, delayed_setup_work.work);
drivers/usb/serial/oti6858.c
260
static void send_data(struct work_struct *work)
drivers/usb/serial/oti6858.c
262
struct oti6858_private *priv = container_of(work,
drivers/usb/serial/oti6858.c
263
struct oti6858_private, delayed_write_work.work);
drivers/usb/serial/usb-serial.c
1073
INIT_WORK(&port->work, usb_serial_port_work);
drivers/usb/serial/usb-serial.c
1196
cancel_work_sync(&port->work);
drivers/usb/serial/usb-serial.c
623
schedule_work(&port->work);
drivers/usb/serial/usb-serial.c
627
static void usb_serial_port_work(struct work_struct *work)
drivers/usb/serial/usb-serial.c
630
container_of(work, struct usb_serial_port, work);
drivers/usb/storage/uas.c
101
container_of(work, struct uas_dev_info, work);
drivers/usb/storage/uas.c
1042
INIT_WORK(&devinfo->work, uas_do_work);
drivers/usb/storage/uas.c
1106
flush_work(&devinfo->work);
drivers/usb/storage/uas.c
1224
cancel_work_sync(&devinfo->work);
drivers/usb/storage/uas.c
126
queue_work(workqueue, &devinfo->work);
drivers/usb/storage/uas.c
132
static void uas_scan_work(struct work_struct *work)
drivers/usb/storage/uas.c
135
container_of(work, struct uas_dev_info, scan_work);
drivers/usb/storage/uas.c
150
queue_work(workqueue, &devinfo->work);
drivers/usb/storage/uas.c
48
struct work_struct work;
drivers/usb/storage/uas.c
79
static void uas_do_work(struct work_struct *work);
drivers/usb/storage/uas.c
98
static void uas_do_work(struct work_struct *work)
drivers/usb/storage/usb.c
975
static void usb_stor_scan_dwork(struct work_struct *work)
drivers/usb/storage/usb.c
977
struct us_data *us = container_of(work, struct us_data,
drivers/usb/storage/usb.c
978
scan_dwork.work);
drivers/usb/typec/altmodes/displayport.c
265
static void dp_altmode_work(struct work_struct *work)
drivers/usb/typec/altmodes/displayport.c
267
struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
drivers/usb/typec/altmodes/displayport.c
342
schedule_work(&dp->work);
drivers/usb/typec/altmodes/displayport.c
367
schedule_work(&dp->work);
drivers/usb/typec/altmodes/displayport.c
436
schedule_work(&dp->work);
drivers/usb/typec/altmodes/displayport.c
495
schedule_work(&dp->work);
drivers/usb/typec/altmodes/displayport.c
77
struct work_struct work;
drivers/usb/typec/altmodes/displayport.c
785
INIT_WORK(&dp->work, dp_altmode_work);
drivers/usb/typec/altmodes/displayport.c
814
schedule_work(&dp->work);
drivers/usb/typec/altmodes/displayport.c
825
cancel_work_sync(&dp->work);
drivers/usb/typec/altmodes/thunderbolt.c
102
schedule_work(&tbt->work);
drivers/usb/typec/altmodes/thunderbolt.c
183
schedule_work(&tbt->work);
drivers/usb/typec/altmodes/thunderbolt.c
240
schedule_work(&tbt->work);
drivers/usb/typec/altmodes/thunderbolt.c
281
INIT_WORK(&tbt->work, tbt_altmode_work);
drivers/usb/typec/altmodes/thunderbolt.c
296
schedule_work(&tbt->work);
drivers/usb/typec/altmodes/thunderbolt.c
34
struct work_struct work;
drivers/usb/typec/altmodes/thunderbolt.c
45
static void tbt_altmode_work(struct work_struct *work)
drivers/usb/typec/altmodes/thunderbolt.c
47
struct tbt_altmode *tbt = container_of(work, struct tbt_altmode, work);
drivers/usb/typec/anx7411.c
1517
INIT_WORK(&plat->work, anx7411_work_func);
drivers/usb/typec/anx7411.c
283
struct work_struct work;
drivers/usb/typec/anx7411.c
906
static void anx7411_work_func(struct work_struct *work)
drivers/usb/typec/anx7411.c
913
struct anx7411_data *ctx = container_of(work, struct anx7411_data, work);
drivers/usb/typec/anx7411.c
990
queue_work(ctx->workqueue, &ctx->work);
drivers/usb/typec/hd3ss3220.c
247
static void output_poll_execute(struct work_struct *work)
drivers/usb/typec/hd3ss3220.c
249
struct delayed_work *delayed_work = to_delayed_work(work);
drivers/usb/typec/mode_selection.c
130
static void mode_selection_work_fn(struct work_struct *work)
drivers/usb/typec/mode_selection.c
132
struct mode_selection *sel = container_of(work,
drivers/usb/typec/mode_selection.c
133
struct mode_selection, work.work);
drivers/usb/typec/mode_selection.c
173
schedule_delayed_work(&sel->work, msecs_to_jiffies(delay));
drivers/usb/typec/mode_selection.c
187
if (cancel_delayed_work(&sel->work))
drivers/usb/typec/mode_selection.c
188
schedule_delayed_work(&sel->work, 0);
drivers/usb/typec/mode_selection.c
263
INIT_DELAYED_WORK(&sel->work, mode_selection_work_fn);
drivers/usb/typec/mode_selection.c
264
schedule_delayed_work(&sel->work, msecs_to_jiffies(delay));
drivers/usb/typec/mode_selection.c
277
cancel_delayed_work_sync(&sel->work);
drivers/usb/typec/mode_selection.c
43
struct delayed_work work;
drivers/usb/typec/tcpm/fusb302.c
1078
static void fusb302_bc_lvl_handler_work(struct work_struct *work)
drivers/usb/typec/tcpm/fusb302.c
1080
struct fusb302_chip *chip = container_of(work, struct fusb302_chip,
drivers/usb/typec/tcpm/fusb302.c
1081
bc_lvl_handler.work);
drivers/usb/typec/tcpm/fusb302.c
1501
static void fusb302_irq_work(struct work_struct *work)
drivers/usb/typec/tcpm/fusb302.c
1503
struct fusb302_chip *chip = container_of(work, struct fusb302_chip,
drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
140
static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
142
struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
222
static void qcom_pmic_typec_port_cc_debounce(struct work_struct *work)
drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
225
container_of(work, struct pmic_typec_port, cc_debounce_dwork.work);
drivers/usb/typec/tcpm/tcpm.c
1639
static void tcpm_queue_vdm_work(struct kthread_work *work)
drivers/usb/typec/tcpm/tcpm.c
1641
struct altmode_vdm_event *event = container_of(work,
drivers/usb/typec/tcpm/tcpm.c
1643
work);
drivers/usb/typec/tcpm/tcpm.c
1676
kthread_init_work(&event->work, tcpm_queue_vdm_work);
drivers/usb/typec/tcpm/tcpm.c
1684
ret = kthread_queue_work(port->wq, &event->work);
drivers/usb/typec/tcpm/tcpm.c
2758
static void vdm_state_machine_work(struct kthread_work *work)
drivers/usb/typec/tcpm/tcpm.c
2760
struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
drivers/usb/typec/tcpm/tcpm.c
3728
static void tcpm_pd_rx_handler(struct kthread_work *work)
drivers/usb/typec/tcpm/tcpm.c
3730
struct pd_rx_event *event = container_of(work,
drivers/usb/typec/tcpm/tcpm.c
3731
struct pd_rx_event, work);
drivers/usb/typec/tcpm/tcpm.c
3810
kthread_init_work(&event->work, tcpm_pd_rx_handler);
drivers/usb/typec/tcpm/tcpm.c
3814
kthread_queue_work(port->wq, &event->work);
drivers/usb/typec/tcpm/tcpm.c
5907
static void tcpm_state_machine_work(struct kthread_work *work)
drivers/usb/typec/tcpm/tcpm.c
5909
struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
drivers/usb/typec/tcpm/tcpm.c
598
struct kthread_work work;
drivers/usb/typec/tcpm/tcpm.c
605
struct kthread_work work;
drivers/usb/typec/tcpm/tcpm.c
6432
static void tcpm_pd_event_handler(struct kthread_work *work)
drivers/usb/typec/tcpm/tcpm.c
6434
struct tcpm_port *port = container_of(work, struct tcpm_port,
drivers/usb/typec/tcpm/tcpm.c
6584
static void tcpm_enable_frs_work(struct kthread_work *work)
drivers/usb/typec/tcpm/tcpm.c
6586
struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
drivers/usb/typec/tcpm/tcpm.c
6617
static void tcpm_send_discover_work(struct kthread_work *work)
drivers/usb/typec/tcpm/tcpm.c
6619
struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
drivers/usb/typec/tipd/core.c
1049
static void tps6598x_poll_work(struct work_struct *work)
drivers/usb/typec/tipd/core.c
1051
struct tps6598x *tps = container_of(to_delayed_work(work),
drivers/usb/typec/tipd/core.c
737
static void cd321x_update_work(struct work_struct *work)
drivers/usb/typec/tipd/core.c
739
struct cd321x *cd321x = container_of(to_delayed_work(work),
drivers/usb/typec/ucsi/cros_ec_ucsi.c
168
static void cros_ucsi_work(struct work_struct *work)
drivers/usb/typec/ucsi/cros_ec_ucsi.c
170
struct cros_ucsi_data *udata = container_of(work, struct cros_ucsi_data, work);
drivers/usb/typec/ucsi/cros_ec_ucsi.c
179
static void cros_ucsi_write_timeout(struct work_struct *work)
drivers/usb/typec/ucsi/cros_ec_ucsi.c
182
container_of(work, struct cros_ucsi_data, write_tmo.work);
drivers/usb/typec/ucsi/cros_ec_ucsi.c
242
flush_work(&udata->work);
drivers/usb/typec/ucsi/cros_ec_ucsi.c
243
schedule_work(&udata->work);
drivers/usb/typec/ucsi/cros_ec_ucsi.c
253
cancel_work_sync(&udata->work);
drivers/usb/typec/ucsi/cros_ec_ucsi.c
276
INIT_WORK(&udata->work, cros_ucsi_work);
drivers/usb/typec/ucsi/cros_ec_ucsi.c
317
cancel_work_sync(&udata->work);
drivers/usb/typec/ucsi/cros_ec_ucsi.c
48
struct work_struct work;
drivers/usb/typec/ucsi/displayport.c
101
schedule_work(&dp->work);
drivers/usb/typec/ucsi/displayport.c
146
schedule_work(&dp->work);
drivers/usb/typec/ucsi/displayport.c
22
struct work_struct work;
drivers/usb/typec/ucsi/displayport.c
259
schedule_work(&dp->work);
drivers/usb/typec/ucsi/displayport.c
276
static void ucsi_displayport_work(struct work_struct *work)
drivers/usb/typec/ucsi/displayport.c
278
struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
drivers/usb/typec/ucsi/displayport.c
302
cancel_work_sync(&dp->work);
drivers/usb/typec/ucsi/displayport.c
335
INIT_WORK(&dp->work, ucsi_displayport_work);
drivers/usb/typec/ucsi/thunderbolt.c
159
schedule_work(&tbt->work);
drivers/usb/typec/ucsi/thunderbolt.c
196
INIT_WORK(&tbt->work, ucsi_thunderbolt_work);
drivers/usb/typec/ucsi/thunderbolt.c
210
cancel_work_sync(&tbt->work);
drivers/usb/typec/ucsi/thunderbolt.c
31
struct work_struct work;
drivers/usb/typec/ucsi/thunderbolt.c
36
static void ucsi_thunderbolt_work(struct work_struct *work)
drivers/usb/typec/ucsi/thunderbolt.c
38
struct ucsi_tbt *tbt = container_of(work, struct ucsi_tbt, work);
drivers/usb/typec/ucsi/thunderbolt.c
76
schedule_work(&tbt->work);
drivers/usb/typec/ucsi/ucsi.c
1263
static void ucsi_handle_connector_change(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi.c
1265
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
drivers/usb/typec/ucsi/ucsi.c
1266
work);
drivers/usb/typec/ucsi/ucsi.c
1380
schedule_work(&con->work);
drivers/usb/typec/ucsi/ucsi.c
1631
INIT_WORK(&con->work, ucsi_handle_connector_change);
drivers/usb/typec/ucsi/ucsi.c
1932
static void ucsi_resume_work(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi.c
1934
struct ucsi *ucsi = container_of(work, struct ucsi, resume_work);
drivers/usb/typec/ucsi/ucsi.c
1962
static void ucsi_init_work(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi.c
1964
struct ucsi *ucsi = container_of(work, struct ucsi, work.work);
drivers/usb/typec/ucsi/ucsi.c
1977
queue_delayed_work(system_long_wq, &ucsi->work,
drivers/usb/typec/ucsi/ucsi.c
2056
INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work);
drivers/usb/typec/ucsi/ucsi.c
2101
queue_delayed_work(system_long_wq, &ucsi->work, 0);
drivers/usb/typec/ucsi/ucsi.c
2120
cancel_delayed_work_sync(&ucsi->work);
drivers/usb/typec/ucsi/ucsi.c
2130
cancel_work_sync(&ucsi->connector[i].work);
drivers/usb/typec/ucsi/ucsi.c
2141
mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
drivers/usb/typec/ucsi/ucsi.c
257
struct delayed_work work;
drivers/usb/typec/ucsi/ucsi.c
265
static void ucsi_poll_worker(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi.c
267
struct ucsi_work *uwork = container_of(work, struct ucsi_work, work.work);
drivers/usb/typec/ucsi/ucsi.c
283
queue_delayed_work(con->wq, &uwork->work, uwork->delay);
drivers/usb/typec/ucsi/ucsi.c
305
INIT_DELAYED_WORK(&uwork->work, ucsi_poll_worker);
drivers/usb/typec/ucsi/ucsi.c
312
queue_delayed_work(con->wq, &uwork->work, delay);
drivers/usb/typec/ucsi/ucsi.h
478
struct delayed_work work;
drivers/usb/typec/ucsi/ucsi.h
517
struct work_struct work;
drivers/usb/typec/ucsi/ucsi_ccg.c
1363
static void ccg_update_firmware(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi_ccg.c
1365
struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
drivers/usb/typec/ucsi/ucsi_ccg.c
1396
schedule_work(&uc->work);
drivers/usb/typec/ucsi/ucsi_ccg.c
1441
INIT_WORK(&uc->work, ccg_update_firmware);
drivers/usb/typec/ucsi/ucsi_ccg.c
1514
cancel_work_sync(&uc->work);
drivers/usb/typec/ucsi/ucsi_ccg.c
219
struct work_struct work;
drivers/usb/typec/ucsi/ucsi_glink.c
298
static void pmic_glink_ucsi_notify(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi_glink.c
300
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, notify_work);
drivers/usb/typec/ucsi/ucsi_glink.c
313
static void pmic_glink_ucsi_register(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi_glink.c
315
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
446
static void gaokun_ucsi_register_worker(struct work_struct *work)
drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
452
uec = container_of(work, struct gaokun_ucsi, work.work);
drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
483
INIT_DELAYED_WORK(&uec->work, gaokun_ucsi_register_worker);
drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
497
schedule_delayed_work(&uec->work, 3 * HZ);
drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
506
disable_delayed_work_sync(&uec->work);
drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
99
struct delayed_work work;
drivers/usb/usbip/usbip_event.c
62
static void event_handler(struct work_struct *work)
drivers/vdpa/mlx5/core/mlx5_vdpa.h
71
struct work_struct work;
drivers/vdpa/mlx5/core/mr.c
665
static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
drivers/vdpa/mlx5/core/mr.c
671
mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2365
static void mlx5_cvq_kick_handler(struct work_struct *work)
drivers/vdpa/mlx5/net/mlx5_vnet.c
2376
wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2429
queue_work(mvdev->wq, &wqent->work);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2450
queue_work(mvdev->wq, &ndev->cvq_ent.work);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2825
static void update_carrier(struct work_struct *work)
drivers/vdpa/mlx5/net/mlx5_vnet.c
2831
wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2854
INIT_WORK(&wqent->work, update_carrier);
drivers/vdpa/mlx5/net/mlx5_vnet.c
2855
queue_work(ndev->mvdev.wq, &wqent->work);
drivers/vdpa/mlx5/net/mlx5_vnet.c
3998
INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
590
static void octep_vdpa_setup_task(struct work_struct *work)
drivers/vdpa/octeon_ep/octep_vdpa_main.c
592
struct octep_vdpa_mgmt_dev *mgmt_dev = container_of(work, struct octep_vdpa_mgmt_dev,
drivers/vdpa/vdpa_sim/vdpa_sim.c
174
static void vdpasim_work_fn(struct kthread_work *work)
drivers/vdpa/vdpa_sim/vdpa_sim.c
176
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
drivers/vdpa/vdpa_sim/vdpa_sim.c
231
kthread_init_work(&vdpasim->work, vdpasim_work_fn);
drivers/vdpa/vdpa_sim/vdpa_sim.c
286
kthread_queue_work(vdpasim->worker, &vdpasim->work);
drivers/vdpa/vdpa_sim/vdpa_sim.c
48
struct kthread_work work;
drivers/vdpa/vdpa_sim/vdpa_sim.c
54
static void vdpasim_mm_work_fn(struct kthread_work *work)
drivers/vdpa/vdpa_sim/vdpa_sim.c
57
container_of(work, struct vdpasim_mm_work, work);
drivers/vdpa/vdpa_sim/vdpa_sim.c
69
struct kthread_work *work = &mm_work->work;
drivers/vdpa/vdpa_sim/vdpa_sim.c
71
kthread_init_work(work, vdpasim_mm_work_fn);
drivers/vdpa/vdpa_sim/vdpa_sim.c
72
kthread_queue_work(vdpasim->worker, work);
drivers/vdpa/vdpa_sim/vdpa_sim.c
74
kthread_flush_work(work);
drivers/vdpa/vdpa_sim/vdpa_sim.c
745
kthread_cancel_work_sync(&vdpasim->work);
drivers/vdpa/vdpa_sim/vdpa_sim.h
61
struct kthread_work work;
drivers/vdpa/vdpa_user/vduse_dev.c
1120
static void vduse_dev_irq_inject(struct work_struct *work)
drivers/vdpa/vdpa_user/vduse_dev.c
1122
struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
drivers/vdpa/vdpa_user/vduse_dev.c
1130
static void vduse_vq_irq_inject(struct work_struct *work)
drivers/vdpa/vdpa_user/vduse_dev.c
1132
struct vduse_virtqueue *vq = container_of(work,
drivers/vdpa/vdpa_user/vduse_dev.c
543
static void vduse_vq_kick_work(struct work_struct *work)
drivers/vdpa/vdpa_user/vduse_dev.c
545
struct vduse_virtqueue *vq = container_of(work,
drivers/vfio/pci/mlx5/cmd.c
617
struct mlx5vf_async_data, work);
drivers/vfio/pci/mlx5/cmd.c
735
queue_work(migf->mvdev->cb_wq, &async_data->work);
drivers/vfio/pci/mlx5/cmd.h
71
struct work_struct work;
drivers/vfio/pci/mlx5/cmd.h
82
struct work_struct work;
drivers/vfio/pci/mlx5/main.c
1055
cancel_work_sync(&mvdev->saving_migf->async_data.work);
drivers/vfio/pci/mlx5/main.c
273
&migf->save_data[chunk_num - 1].work);
drivers/vfio/pci/mlx5/main.c
303
struct mlx5vf_save_work_data, work);
drivers/vfio/pci/mlx5/main.c
432
INIT_WORK(&migf->save_data[i].work,
drivers/vfio/pci/mlx5/main.c
635
INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
drivers/vfio/virqfd.c
104
static void virqfd_flush_inject(struct work_struct *work)
drivers/vfio/virqfd.c
106
struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);
drivers/vfio/virqfd.c
85
static void virqfd_shutdown(struct work_struct *work)
drivers/vfio/virqfd.c
87
struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
drivers/vfio/virqfd.c
97
static void virqfd_inject(struct work_struct *work)
drivers/vfio/virqfd.c
99
struct virqfd *virqfd = container_of(work, struct virqfd, inject);
drivers/vhost/net.c
1288
static void handle_tx_kick(struct vhost_work *work)
drivers/vhost/net.c
1290
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/net.c
1291
poll.work);
drivers/vhost/net.c
1297
static void handle_rx_kick(struct vhost_work *work)
drivers/vhost/net.c
1299
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/net.c
1300
poll.work);
drivers/vhost/net.c
1306
static void handle_tx_net(struct vhost_work *work)
drivers/vhost/net.c
1308
struct vhost_net *net = container_of(work, struct vhost_net,
drivers/vhost/net.c
1309
poll[VHOST_NET_VQ_TX].work);
drivers/vhost/net.c
1313
static void handle_rx_net(struct vhost_work *work)
drivers/vhost/net.c
1315
struct vhost_net *net = container_of(work, struct vhost_net,
drivers/vhost/net.c
1316
poll[VHOST_NET_VQ_RX].work);
drivers/vhost/scsi.c
1548
static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
drivers/vhost/scsi.c
1550
struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
drivers/vhost/scsi.c
1569
static void vhost_scsi_tmf_flush_work(struct work_struct *work)
drivers/vhost/scsi.c
1571
struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
drivers/vhost/scsi.c
1788
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
drivers/vhost/scsi.c
1790
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/scsi.c
1791
poll.work);
drivers/vhost/scsi.c
1827
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
drivers/vhost/scsi.c
1829
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/scsi.c
1830
poll.work);
drivers/vhost/scsi.c
1844
static void vhost_scsi_handle_kick(struct vhost_work *work)
drivers/vhost/scsi.c
1846
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/scsi.c
1847
poll.work);
drivers/vhost/scsi.c
649
static void vhost_scsi_evt_work(struct vhost_work *work)
drivers/vhost/scsi.c
651
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
drivers/vhost/scsi.c
686
static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
drivers/vhost/scsi.c
688
struct vhost_scsi_virtqueue *svq = container_of(work,
drivers/vhost/test.c
102
static void handle_vq_kick(struct vhost_work *work)
drivers/vhost/test.c
104
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/test.c
105
poll.work);
drivers/vhost/vdpa.c
166
static void handle_vq_kick(struct vhost_work *work)
drivers/vhost/vdpa.c
168
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/vdpa.c
169
poll.work);
drivers/vhost/vhost.c
150
struct vhost_work work;
drivers/vhost/vhost.c
154
static void vhost_flush_work(struct vhost_work *work)
drivers/vhost/vhost.c
158
s = container_of(work, struct vhost_flush_struct, work);
drivers/vhost/vhost.c
176
struct vhost_work *work = &poll->work;
drivers/vhost/vhost.c
182
work->fn(work);
drivers/vhost/vhost.c
189
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
drivers/vhost/vhost.c
191
clear_bit(VHOST_WORK_QUEUED, &work->flags);
drivers/vhost/vhost.c
192
work->fn = fn;
drivers/vhost/vhost.c
208
vhost_work_init(&poll->work, fn);
drivers/vhost/vhost.c
245
struct vhost_work *work)
drivers/vhost/vhost.c
247
if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
drivers/vhost/vhost.c
252
llist_add(&work->node, &worker->work_list);
drivers/vhost/vhost.c
257
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
drivers/vhost/vhost.c
266
vhost_worker_queue(worker, work);
drivers/vhost/vhost.c
288
vhost_work_init(&flush.work, vhost_flush_work);
drivers/vhost/vhost.c
290
vhost_worker_queue(worker, &flush.work);
drivers/vhost/vhost.c
335
vhost_vq_work_queue(poll->vq, &poll->work);
drivers/vhost/vhost.c
403
struct vhost_work *work, *work_next;
drivers/vhost/vhost.c
424
llist_for_each_entry_safe(work, work_next, node, node) {
drivers/vhost/vhost.c
425
clear_bit(VHOST_WORK_QUEUED, &work->flags);
drivers/vhost/vhost.c
428
work->fn(work);
drivers/vhost/vhost.c
441
struct vhost_work *work, *work_next;
drivers/vhost/vhost.c
451
llist_for_each_entry_safe(work, work_next, node, node) {
drivers/vhost/vhost.c
452
clear_bit(VHOST_WORK_QUEUED, &work->flags);
drivers/vhost/vhost.c
454
work->fn(work);
drivers/vhost/vhost.c
634
struct vhost_work work;
drivers/vhost/vhost.c
639
static void vhost_attach_cgroups_work(struct vhost_work *work)
drivers/vhost/vhost.c
643
s = container_of(work, struct vhost_attach_cgroups_struct, work);
drivers/vhost/vhost.c
654
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
drivers/vhost/vhost.c
655
vhost_worker_queue(worker, &attach.work);
drivers/vhost/vhost.h
21
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
drivers/vhost/vhost.h
244
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
drivers/vhost/vhost.h
60
struct vhost_work work;
drivers/vhost/vhost.h
73
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
drivers/vhost/vsock.c
265
static void vhost_transport_send_pkt_work(struct vhost_work *work)
drivers/vhost/vsock.c
270
vsock = container_of(work, struct vhost_vsock, send_pkt_work);
drivers/vhost/vsock.c
494
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
drivers/vhost/vsock.c
496
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/vsock.c
497
poll.work);
drivers/vhost/vsock.c
572
static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
drivers/vhost/vsock.c
574
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
drivers/vhost/vsock.c
575
poll.work);
drivers/video/backlight/adp8860_bl.c
179
static void adp8860_led_work(struct work_struct *work)
drivers/video/backlight/adp8860_bl.c
181
struct adp8860_led *led = container_of(work, struct adp8860_led, work);
drivers/video/backlight/adp8860_bl.c
194
schedule_work(&led->work);
drivers/video/backlight/adp8860_bl.c
267
INIT_WORK(&led_dat->work, adp8860_led_work);
drivers/video/backlight/adp8860_bl.c
291
cancel_work_sync(&led[i].work);
drivers/video/backlight/adp8860_bl.c
306
cancel_work_sync(&data->led[i].work);
drivers/video/backlight/adp8860_bl.c
97
struct work_struct work;
drivers/video/backlight/adp8870_bl.c
120
struct work_struct work;
drivers/video/backlight/adp8870_bl.c
194
static void adp8870_led_work(struct work_struct *work)
drivers/video/backlight/adp8870_bl.c
196
struct adp8870_led *led = container_of(work, struct adp8870_led, work);
drivers/video/backlight/adp8870_bl.c
212
schedule_work(&led->work);
drivers/video/backlight/adp8870_bl.c
293
INIT_WORK(&led_dat->work, adp8870_led_work);
drivers/video/backlight/adp8870_bl.c
317
cancel_work_sync(&led[i].work);
drivers/video/backlight/adp8870_bl.c
332
cancel_work_sync(&data->led[i].work);
drivers/video/backlight/lm3630a_bl.c
114
static void lm3630a_delayed_func(struct work_struct *work)
drivers/video/backlight/lm3630a_bl.c
119
pchip = container_of(work, struct lm3630a_chip, work.work);
drivers/video/backlight/lm3630a_bl.c
137
queue_delayed_work(pchip->irqthread, &pchip->work, delay);
drivers/video/backlight/lm3630a_bl.c
155
INIT_DELAYED_WORK(&pchip->work, lm3630a_delayed_func);
drivers/video/backlight/lm3630a_bl.c
45
struct delayed_work work;
drivers/video/backlight/qcom-wled.c
293
static void wled_ovp_work(struct work_struct *work)
drivers/video/backlight/qcom-wled.c
295
struct wled *wled = container_of(work,
drivers/video/backlight/qcom-wled.c
296
struct wled, ovp_work.work);
drivers/video/fbdev/atmel_lcdfb.c
835
static void atmel_lcdfb_task(struct work_struct *work)
drivers/video/fbdev/atmel_lcdfb.c
838
container_of(work, struct atmel_lcdfb_info, task);
drivers/video/fbdev/core/fb_defio.c
261
static void fb_deferred_io_work(struct work_struct *work)
drivers/video/fbdev/core/fb_defio.c
263
struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
drivers/video/fbdev/core/fbcon.c
3329
static void fbcon_register_existing_fbs(struct work_struct *work)
drivers/video/fbdev/core/fbcon.c
393
static void fb_flashcursor(struct work_struct *work)
drivers/video/fbdev/core/fbcon.c
395
struct fbcon_par *par = container_of(work, struct fbcon_par, cursor_work.work);
drivers/video/fbdev/omap/lcd_mipid.c
391
static void mipid_esd_work(struct work_struct *work)
drivers/video/fbdev/omap/lcd_mipid.c
393
struct mipid_device *md = container_of(work, struct mipid_device,
drivers/video/fbdev/omap/lcd_mipid.c
394
esd_work.work);
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
1070
static void dsicm_ulps_work(struct work_struct *work)
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
1072
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
1073
ulps_work.work);
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
83
static void dsicm_te_timeout_work_callback(struct work_struct *work);
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
842
static void dsicm_te_timeout_work_callback(struct work_struct *work)
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
844
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
845
te_timeout_work.work);
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
88
static void dsicm_ulps_work(struct work_struct *work);
drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
324
static void dispc_error_worker(struct work_struct *work)
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
4010
static void dsi_framedone_timeout_work_callback(struct work_struct *work)
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
4012
struct dsi_data *dsi = container_of(work, struct dsi_data,
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
4013
framedone_timeout_work.work);
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1655
static void omapfb_auto_update_work(struct work_struct *work)
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1664
d = container_of(work, struct omapfb_display_data,
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1665
auto_update_work.work);
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
1712
omapfb_auto_update_work(&d->auto_update_work.work);
drivers/video/fbdev/pxafb.c
1624
static void pxafb_task(struct work_struct *work)
drivers/video/fbdev/pxafb.c
1627
container_of(work, struct pxafb_info, task);
drivers/video/fbdev/smscufx.c
1085
static void ufx_release_urb_work(struct work_struct *work)
drivers/video/fbdev/smscufx.c
1087
struct urb_node *unode = container_of(work, struct urb_node,
drivers/video/fbdev/smscufx.c
1088
release_urb_work.work);
drivers/virt/acrn/ioreq.c
25
static void ioreq_dispatcher(struct work_struct *work);
drivers/virt/acrn/ioreq.c
544
static void ioreq_dispatcher(struct work_struct *work)
drivers/virt/acrn/irqfd.c
60
static void hsm_irqfd_shutdown_work(struct work_struct *work)
drivers/virt/acrn/irqfd.c
65
irqfd = container_of(work, struct hsm_irqfd, shutdown);
drivers/virt/nitro_enclaves/ne_pci_dev.c
214
static void ne_event_work_handler(struct work_struct *work)
drivers/virt/nitro_enclaves/ne_pci_dev.c
219
container_of(work, struct ne_pci_dev, notify_work);
drivers/virt/vboxguest/vboxguest_core.c
1837
schedule_work(&gdev->mem_balloon.work);
drivers/virt/vboxguest/vboxguest_core.c
355
static void vbg_balloon_work(struct work_struct *work)
drivers/virt/vboxguest/vboxguest_core.c
358
container_of(work, struct vbg_dev, mem_balloon.work);
drivers/virt/vboxguest/vboxguest_core.c
957
INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
drivers/virt/vboxguest/vboxguest_core.h
38
struct work_struct work;
drivers/virtio/virtio_balloon.c
539
static void update_balloon_stats_func(struct work_struct *work)
drivers/virtio/virtio_balloon.c
543
vb = container_of(work, struct virtio_balloon,
drivers/virtio/virtio_balloon.c
551
static void update_balloon_size_func(struct work_struct *work)
drivers/virtio/virtio_balloon.c
556
vb = container_of(work, struct virtio_balloon,
drivers/virtio/virtio_balloon.c
572
queue_work(system_freezable_wq, work);
drivers/virtio/virtio_balloon.c
788
static void report_free_page_func(struct work_struct *work)
drivers/virtio/virtio_balloon.c
790
struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
drivers/virtio/virtio_mem.c
2415
static void virtio_mem_run_wq(struct work_struct *work)
drivers/virtio/virtio_mem.c
2417
struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
drivers/watchdog/mei_wdt.c
398
static void mei_wdt_unregister_work(struct work_struct *work)
drivers/watchdog/mei_wdt.c
400
struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister);
drivers/watchdog/retu_wdt.c
48
static void retu_wdt_ping_work(struct work_struct *work)
drivers/watchdog/retu_wdt.c
50
struct retu_wdt_dev *wdev = container_of(to_delayed_work(work),
drivers/watchdog/watchdog_core.h
55
struct kthread_work work;
drivers/watchdog/watchdog_dev.c
1048
kthread_init_work(&wd_data->work, watchdog_ping_work);
drivers/watchdog/watchdog_dev.c
1138
kthread_cancel_work_sync(&wd_data->work);
drivers/watchdog/watchdog_dev.c
1289
kthread_cancel_work_sync(&wd_data->work);
drivers/watchdog/watchdog_dev.c
217
static void watchdog_ping_work(struct kthread_work *work)
drivers/watchdog/watchdog_dev.c
221
wd_data = container_of(work, struct watchdog_core_data, work);
drivers/watchdog/watchdog_dev.c
235
kthread_queue_work(watchdog_kworker, &wd_data->work);
drivers/xen/events/events_base.c
313
static void delayed_free_irq(struct work_struct *work)
drivers/xen/events/events_base.c
315
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
drivers/xen/events/events_base.c
648
static void xen_irq_lateeoi_worker(struct work_struct *work)
drivers/xen/events/events_base.c
655
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
drivers/xen/grant-table.c
1312
static void gnttab_unmap_work(struct work_struct *work)
drivers/xen/grant-table.c
1315
*unmap_data = container_of(work,
drivers/xen/grant-table.c
1317
gnttab_work.work);
drivers/xen/mcelog.c
329
static void xen_mce_work_fn(struct work_struct *work)
drivers/xen/pcpu.c
341
static void xen_pcpu_work_fn(struct work_struct *work)
drivers/xen/privcmd.c
912
static void irqfd_shutdown(struct work_struct *work)
drivers/xen/privcmd.c
915
container_of(work, struct privcmd_kernel_irqfd, shutdown);
drivers/xen/pvcalls-back.c
230
static void pvcalls_back_ioworker(struct work_struct *work)
drivers/xen/pvcalls-back.c
232
struct pvcalls_ioworker *ioworker = container_of(work,
drivers/xen/pvcalls-back.c
516
static void __pvcalls_back_accept(struct work_struct *work)
drivers/xen/pvcalls-back.c
519
work, struct sockpass_mapping, register_work);
drivers/xen/xenbus/xenbus_probe_frontend.c
110
schedule_work(&xdev->work);
drivers/xen/xenbus/xenbus_probe_frontend.c
122
INIT_WORK(&xdev->work, xenbus_frontend_delayed_restore);
drivers/xen/xenbus/xenbus_probe_frontend.c
96
struct xenbus_device *xdev = container_of(w, struct xenbus_device, work);
fs/affs/super.c
65
static void flush_superblock(struct work_struct *work)
fs/affs/super.c
70
sbi = container_of(work, struct affs_sb_info, sb_work.work);
fs/afs/callback.c
29
void afs_invalidate_mmap_work(struct work_struct *work)
fs/afs/callback.c
31
struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
fs/afs/cell.c
24
static void afs_destroy_cell_work(struct work_struct *work);
fs/afs/cell.c
25
static void afs_manage_cell_work(struct work_struct *work);
fs/afs/cell.c
588
static void afs_destroy_cell_work(struct work_struct *work)
fs/afs/cell.c
590
struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
fs/afs/cell.c
878
static void afs_manage_cell_work(struct work_struct *work)
fs/afs/cell.c
880
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
fs/afs/cm_security.c
100
struct afs_net *net = container_of(work, struct afs_net, rx_oob_work);
fs/afs/cm_security.c
98
void afs_process_oob_queue(struct work_struct *work)
fs/afs/cmservice.c
101
.work = SRXAFSCB_CallBack,
fs/afs/cmservice.c
164
static void SRXAFSCB_CallBack(struct work_struct *work)
fs/afs/cmservice.c
166
struct afs_call *call = container_of(work, struct afs_call, work);
fs/afs/cmservice.c
287
static void SRXAFSCB_InitCallBackState(struct work_struct *work)
fs/afs/cmservice.c
289
struct afs_call *call = container_of(work, struct afs_call, work);
fs/afs/cmservice.c
379
static void SRXAFSCB_Probe(struct work_struct *work)
fs/afs/cmservice.c
381
struct afs_call *call = container_of(work, struct afs_call, work);
fs/afs/cmservice.c
41
.work = SRXAFSCB_CallBack,
fs/afs/cmservice.c
412
static void SRXAFSCB_ProbeUuid(struct work_struct *work)
fs/afs/cmservice.c
414
struct afs_call *call = container_of(work, struct afs_call, work);
fs/afs/cmservice.c
489
static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
fs/afs/cmservice.c
491
struct afs_call *call = container_of(work, struct afs_call, work);
fs/afs/cmservice.c
51
.work = SRXAFSCB_InitCallBackState,
fs/afs/cmservice.c
61
.work = SRXAFSCB_InitCallBackState,
fs/afs/cmservice.c
71
.work = SRXAFSCB_Probe,
fs/afs/cmservice.c
81
.work = SRXAFSCB_ProbeUuid,
fs/afs/cmservice.c
91
.work = SRXAFSCB_TellMeAboutYourself,
fs/afs/file.c
311
void afs_fetch_data_async_rx(struct work_struct *work)
fs/afs/file.c
313
struct afs_call *call = container_of(work, struct afs_call, async_work);
fs/afs/flock.c
281
void afs_lock_work(struct work_struct *work)
fs/afs/flock.c
284
container_of(work, struct afs_vnode, lock_work.work);
fs/afs/fs_probe.c
419
void afs_fs_probe_dispatcher(struct work_struct *work)
fs/afs/fs_probe.c
421
struct afs_net *net = container_of(work, struct afs_net, fs_prober);
fs/afs/internal.h
1084
void afs_process_oob_queue(struct work_struct *work);
fs/afs/internal.h
1159
void afs_fetch_data_async_rx(struct work_struct *work);
fs/afs/internal.h
133
struct work_struct work; /* actual work processor */
fs/afs/internal.h
209
void (*async_rx)(struct work_struct *work);
fs/afs/internal.h
212
void (*work)(struct work_struct *work);
fs/afs/mntpt.c
204
static void afs_mntpt_expiry_timed_out(struct work_struct *work)
fs/afs/mntpt.c
24
static void afs_mntpt_expiry_timed_out(struct work_struct *work);
fs/afs/rxrpc.c
172
INIT_WORK(&call->work, call->type->work);
fs/afs/rxrpc.c
21
static void afs_deferred_free_worker(struct work_struct *work);
fs/afs/rxrpc.c
232
static void afs_deferred_free_worker(struct work_struct *work)
fs/afs/rxrpc.c
234
struct afs_call *call = container_of(work, struct afs_call, free_work);
fs/afs/rxrpc.c
263
if (call->type->work) {
fs/afs/rxrpc.c
265
if (!queue_work(afs_wq, &call->work))
fs/afs/rxrpc.c
714
static void afs_process_async_call(struct work_struct *work)
fs/afs/rxrpc.c
716
struct afs_call *call = container_of(work, struct afs_call, async_work);
fs/afs/rxrpc.c
739
void afs_charge_preallocation(struct work_struct *work)
fs/afs/rxrpc.c
742
container_of(work, struct afs_net, charge_preallocation_work);
fs/afs/rxrpc.c
826
call->work.func = call->type->work;
fs/afs/server.c
19
static void afs_server_destroyer(struct work_struct *work);
fs/afs/server.c
466
static void afs_server_destroyer(struct work_struct *work)
fs/afs/server.c
469
struct afs_server *server = container_of(work, struct afs_server, destroyer);
fs/afs/volume.c
15
static void afs_destroy_volume(struct work_struct *work);
fs/afs/volume.c
237
static void afs_destroy_volume(struct work_struct *work)
fs/afs/volume.c
239
struct afs_volume *volume = container_of(work, struct afs_volume, destructor);
fs/afs/write.c
103
static void afs_issue_write_worker(struct work_struct *work)
fs/afs/write.c
105
struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
fs/afs/write.c
174
subreq->work.func = afs_issue_write_worker;
fs/afs/write.c
175
if (!queue_work(system_dfl_wq, &subreq->work))
fs/aio.c
1639
static void aio_fsync_work(struct work_struct *work)
fs/aio.c
1641
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
fs/aio.c
1665
INIT_WORK(&req->work, aio_fsync_work);
fs/aio.c
1666
schedule_work(&req->work);
fs/aio.c
1670
static void aio_poll_put_work(struct work_struct *work)
fs/aio.c
1672
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
fs/aio.c
1723
static void aio_poll_complete_work(struct work_struct *work)
fs/aio.c
1725
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
fs/aio.c
1749
schedule_work(&req->work);
fs/aio.c
175
struct work_struct work;
fs/aio.c
1777
schedule_work(&aiocb->poll.work);
fs/aio.c
1820
INIT_WORK(&req->work, aio_poll_put_work);
fs/aio.c
1821
schedule_work(&req->work);
fs/aio.c
1839
schedule_work(&req->work);
fs/aio.c
188
struct work_struct work;
fs/aio.c
1908
INIT_WORK(&req->work, aio_poll_complete_work);
fs/aio.c
615
static void free_ioctx(struct work_struct *work)
fs/aio.c
617
struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
fs/backing-file.c
130
static void backing_aio_complete_work(struct work_struct *work)
fs/backing-file.c
132
struct backing_aio *aio = container_of(work, struct backing_aio, work);
fs/backing-file.c
145
INIT_WORK(&aio->work, backing_aio_complete_work);
fs/backing-file.c
147
&aio->work);
fs/backing-file.c
84
struct work_struct work;
fs/btrfs/async-thread.c
213
struct btrfs_work *work;
fs/btrfs/async-thread.c
222
work = list_first_entry(list, struct btrfs_work, ordered_list);
fs/btrfs/async-thread.c
223
if (!test_bit(WORK_DONE_BIT, &work->flags))
fs/btrfs/async-thread.c
239
if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
fs/btrfs/async-thread.c
241
trace_btrfs_ordered_sched(work);
fs/btrfs/async-thread.c
243
work->ordered_func(work, false);
fs/btrfs/async-thread.c
247
list_del(&work->ordered_list);
fs/btrfs/async-thread.c
250
if (work == self) {
fs/btrfs/async-thread.c
278
work->ordered_func(work, true);
fs/btrfs/async-thread.c
280
trace_btrfs_all_work_done(wq->fs_info, work);
fs/btrfs/async-thread.c
294
struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
fs/btrfs/async-thread.c
296
struct btrfs_workqueue *wq = work->wq;
fs/btrfs/async-thread.c
307
if (work->ordered_func)
fs/btrfs/async-thread.c
310
trace_btrfs_work_sched(work);
fs/btrfs/async-thread.c
312
work->func(work);
fs/btrfs/async-thread.c
321
set_bit(WORK_DONE_BIT, &work->flags);
fs/btrfs/async-thread.c
322
run_ordered_work(wq, work);
fs/btrfs/async-thread.c
325
trace_btrfs_all_work_done(wq->fs_info, work);
fs/btrfs/async-thread.c
329
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
fs/btrfs/async-thread.c
332
work->func = func;
fs/btrfs/async-thread.c
333
work->ordered_func = ordered_func;
fs/btrfs/async-thread.c
334
INIT_WORK(&work->normal_work, btrfs_work_helper);
fs/btrfs/async-thread.c
335
INIT_LIST_HEAD(&work->ordered_list);
fs/btrfs/async-thread.c
336
work->flags = 0;
fs/btrfs/async-thread.c
339
void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
fs/btrfs/async-thread.c
343
work->wq = wq;
fs/btrfs/async-thread.c
345
if (work->ordered_func) {
fs/btrfs/async-thread.c
347
list_add_tail(&work->ordered_list, &wq->ordered_list);
fs/btrfs/async-thread.c
350
trace_btrfs_work_queued(work);
fs/btrfs/async-thread.c
351
queue_work(wq->normal_wq, &work->normal_work);
fs/btrfs/async-thread.c
55
struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
fs/btrfs/async-thread.c
57
return work->wq->fs_info;
fs/btrfs/async-thread.h
40
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
fs/btrfs/async-thread.h
43
struct btrfs_work *work);
fs/btrfs/async-thread.h
46
struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work);
fs/btrfs/bio.c
374
static void simple_end_io_work(struct work_struct *work)
fs/btrfs/bio.c
376
struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
fs/btrfs/bio.c
423
static void orig_write_end_io_work(struct work_struct *work)
fs/btrfs/bio.c
425
struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
fs/btrfs/bio.c
461
static void clone_write_end_io_work(struct work_struct *work)
fs/btrfs/bio.c
463
struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
fs/btrfs/bio.c
610
struct btrfs_work work;
fs/btrfs/bio.c
621
static void run_one_async_start(struct btrfs_work *work)
fs/btrfs/bio.c
624
container_of(work, struct async_submit_bio, work);
fs/btrfs/bio.c
642
static void run_one_async_done(struct btrfs_work *work, bool do_free)
fs/btrfs/bio.c
645
container_of(work, struct async_submit_bio, work);
fs/btrfs/bio.c
649
kfree(container_of(work, struct async_submit_bio, work));
fs/btrfs/bio.c
721
btrfs_init_work(&async->work, run_one_async_start, run_one_async_done);
fs/btrfs/bio.c
722
btrfs_queue_work(fs_info->workers, &async->work);
fs/btrfs/block-group.c
1895
void btrfs_reclaim_bgs_work(struct work_struct *work)
fs/btrfs/block-group.c
1898
container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
fs/btrfs/block-group.c
691
ASSERT(current_work() == &caching_ctl->work.normal_work);
fs/btrfs/block-group.c
865
static noinline void caching_thread(struct btrfs_work *work)
fs/btrfs/block-group.c
872
caching_ctl = container_of(work, struct btrfs_caching_control, work);
fs/btrfs/block-group.c
966
btrfs_init_work(&caching_ctl->work, caching_thread, NULL);
fs/btrfs/block-group.c
990
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
fs/btrfs/block-group.h
111
struct btrfs_work work;
fs/btrfs/block-group.h
353
void btrfs_reclaim_bgs_work(struct work_struct *work);
fs/btrfs/delayed-inode.c
1315
struct btrfs_work work;
fs/btrfs/delayed-inode.c
1318
static void btrfs_async_run_delayed_root(struct btrfs_work *work)
fs/btrfs/delayed-inode.c
1330
async_work = container_of(work, struct btrfs_async_delayed_work, work);
fs/btrfs/delayed-inode.c
1392
btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
fs/btrfs/delayed-inode.c
1395
btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
fs/btrfs/discard.c
383
cancel_delayed_work_sync(&discard_ctl->work);
fs/btrfs/discard.c
407
if (!delayed_work_pending(&discard_ctl->work))
fs/btrfs/discard.c
418
if (!override && delayed_work_pending(&discard_ctl->work))
fs/btrfs/discard.c
459
&discard_ctl->work, nsecs_to_jiffies(delay));
fs/btrfs/discard.c
518
static void btrfs_discard_workfn(struct work_struct *work)
fs/btrfs/discard.c
528
discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
fs/btrfs/discard.c
805
INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
fs/btrfs/discard.c
826
cancel_delayed_work_sync(&fs_info->discard_ctl.work);
fs/btrfs/extent_map.c
1301
static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
fs/btrfs/extent_map.c
1310
fs_info = container_of(work, struct btrfs_fs_info, em_shrinker_work);
fs/btrfs/file-item.c
800
static void csum_one_bio_work(struct work_struct *work)
fs/btrfs/file-item.c
802
struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, csum_work);
fs/btrfs/fs.h
427
struct delayed_work work;
fs/btrfs/inode.c
1657
static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
fs/btrfs/inode.c
1659
struct async_chunk *async_chunk = container_of(work, struct async_chunk,
fs/btrfs/inode.c
1660
work);
fs/btrfs/inode.c
1661
struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
fs/btrfs/inode.c
1771
btrfs_init_work(&async_chunk[i].work, compress_file_range,
fs/btrfs/inode.c
1777
btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
fs/btrfs/inode.c
2848
struct btrfs_work work;
fs/btrfs/inode.c
2851
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
fs/btrfs/inode.c
2854
container_of(work, struct btrfs_writepage_fixup, work);
fs/btrfs/inode.c
3036
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
fs/btrfs/inode.c
3039
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
fs/btrfs/inode.c
770
struct btrfs_work work;
fs/btrfs/inode.c
8866
struct btrfs_work work;
fs/btrfs/inode.c
8869
static void btrfs_run_delalloc_work(struct btrfs_work *work)
fs/btrfs/inode.c
8874
delalloc_work = container_of(work, struct btrfs_delalloc_work,
fs/btrfs/inode.c
8875
work);
fs/btrfs/inode.c
8888
struct btrfs_delalloc_work *work;
fs/btrfs/inode.c
8890
work = kmalloc_obj(*work, GFP_NOFS);
fs/btrfs/inode.c
8891
if (!work)
fs/btrfs/inode.c
8894
init_completion(&work->completion);
fs/btrfs/inode.c
8895
INIT_LIST_HEAD(&work->list);
fs/btrfs/inode.c
8896
work->inode = inode;
fs/btrfs/inode.c
8897
btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
fs/btrfs/inode.c
8899
return work;
fs/btrfs/inode.c
8909
struct btrfs_delalloc_work *work, *next;
fs/btrfs/inode.c
8939
work = btrfs_alloc_delalloc_work(tmp_inode);
fs/btrfs/inode.c
8940
if (!work) {
fs/btrfs/inode.c
8945
list_add_tail(&work->list, &works);
fs/btrfs/inode.c
8947
&work->work);
fs/btrfs/inode.c
8962
list_for_each_entry_safe(work, next, &works, list) {
fs/btrfs/inode.c
8963
list_del_init(&work->list);
fs/btrfs/inode.c
8964
wait_for_completion(&work->completion);
fs/btrfs/inode.c
8965
kfree(work);
fs/btrfs/inode.c
932
static void compress_file_range(struct btrfs_work *work)
fs/btrfs/inode.c
935
container_of(work, struct async_chunk, work);
fs/btrfs/ordered-data.c
342
static void finish_ordered_fn(struct btrfs_work *work)
fs/btrfs/ordered-data.c
346
ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
fs/btrfs/ordered-data.c
411
btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
fs/btrfs/ordered-data.c
412
btrfs_queue_work(wq, &ordered->work);
fs/btrfs/ordered-data.c
733
static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
fs/btrfs/ordered-data.c
737
ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
fs/btrfs/ordered-data.h
151
struct btrfs_work work;
fs/btrfs/qgroup.c
3836
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
fs/btrfs/qgroup.c
3838
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
fs/btrfs/raid56.c
142
static void rmw_rbio_work(struct work_struct *work);
fs/btrfs/raid56.c
143
static void rmw_rbio_work_locked(struct work_struct *work);
fs/btrfs/raid56.c
148
static void scrub_rbio_work_locked(struct work_struct *work);
fs/btrfs/raid56.c
185
INIT_WORK(&rbio->work, work_func);
fs/btrfs/raid56.c
186
queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
fs/btrfs/raid56.c
2203
static void recover_rbio_work(struct work_struct *work)
fs/btrfs/raid56.c
2207
rbio = container_of(work, struct btrfs_raid_bio, work);
fs/btrfs/raid56.c
2212
static void recover_rbio_work_locked(struct work_struct *work)
fs/btrfs/raid56.c
2214
recover_rbio(container_of(work, struct btrfs_raid_bio, work));
fs/btrfs/raid56.c
2545
static void rmw_rbio_work(struct work_struct *work)
fs/btrfs/raid56.c
2549
rbio = container_of(work, struct btrfs_raid_bio, work);
fs/btrfs/raid56.c
2554
static void rmw_rbio_work_locked(struct work_struct *work)
fs/btrfs/raid56.c
2556
rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
fs/btrfs/raid56.c
3004
static void scrub_rbio_work_locked(struct work_struct *work)
fs/btrfs/raid56.c
3006
scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
fs/btrfs/raid56.c
868
static void recover_rbio_work_locked(struct work_struct *work);
fs/btrfs/raid56.h
118
struct work_struct work;
fs/btrfs/scrub.c
1158
static void scrub_stripe_read_repair_worker(struct work_struct *work)
fs/btrfs/scrub.c
1160
struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
fs/btrfs/scrub.c
1272
INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
fs/btrfs/scrub.c
1273
queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
fs/btrfs/scrub.c
1875
INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
fs/btrfs/scrub.c
1876
queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
fs/btrfs/scrub.c
190
struct work_struct work;
fs/btrfs/space-info.c
1247
static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
fs/btrfs/space-info.c
1252
fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
fs/btrfs/space-info.c
1269
static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
fs/btrfs/space-info.c
1279
fs_info = container_of(work, struct btrfs_fs_info,
fs/btrfs/space-info.c
1480
static void btrfs_async_reclaim_data_space(struct work_struct *work)
fs/btrfs/space-info.c
1485
fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
fs/btrfs/zoned.c
2711
static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
fs/btrfs/zoned.c
2715
container_of(work, struct btrfs_block_group, zone_finish_work);
fs/buffer.c
304
struct work_struct work;
fs/buffer.c
309
static void verify_bh(struct work_struct *work)
fs/buffer.c
312
container_of(work, struct postprocess_bh_ctx, work);
fs/buffer.c
322
static void decrypt_bh(struct work_struct *work)
fs/buffer.c
325
container_of(work, struct postprocess_bh_ctx, work);
fs/buffer.c
337
INIT_WORK(&ctx->work, verify_bh);
fs/buffer.c
338
fsverity_enqueue_verify_work(&ctx->work);
fs/buffer.c
367
INIT_WORK(&ctx->work, decrypt_bh);
fs/buffer.c
368
fscrypt_enqueue_decrypt_work(&ctx->work);
fs/buffer.c
370
INIT_WORK(&ctx->work, verify_bh);
fs/buffer.c
371
fsverity_enqueue_verify_work(&ctx->work);
fs/cachefiles/ondemand.c
362
static void ondemand_object_worker(struct work_struct *work)
fs/cachefiles/ondemand.c
365
container_of(work, struct cachefiles_ondemand_info, ondemand_work);
fs/ceph/file.c
1273
struct work_struct work;
fs/ceph/file.c
1277
static void ceph_aio_retry_work(struct work_struct *work);
fs/ceph/file.c
1349
INIT_WORK(&aio_work->work, ceph_aio_retry_work);
fs/ceph/file.c
1352
&aio_work->work);
fs/ceph/file.c
1407
static void ceph_aio_retry_work(struct work_struct *work)
fs/ceph/file.c
1410
container_of(work, struct ceph_aio_work, work);
fs/ceph/inode.c
2319
static void ceph_inode_work(struct work_struct *work)
fs/ceph/inode.c
2321
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
fs/ceph/inode.c
40
static void ceph_inode_work(struct work_struct *work);
fs/ceph/mds_client.c
2404
static void ceph_cap_release_work(struct work_struct *work)
fs/ceph/mds_client.c
2407
container_of(work, struct ceph_mds_session, s_cap_release_work);
fs/ceph/mds_client.c
2447
static void ceph_cap_reclaim_work(struct work_struct *work)
fs/ceph/mds_client.c
2450
container_of(work, struct ceph_mds_client, cap_reclaim_work);
fs/ceph/mds_client.c
2494
static void ceph_cap_unlink_work(struct work_struct *work)
fs/ceph/mds_client.c
2497
container_of(work, struct ceph_mds_client, cap_unlink_work);
fs/ceph/mds_client.c
5473
static void delayed_work(struct work_struct *work)
fs/ceph/mds_client.c
5476
container_of(work, struct ceph_mds_client, delayed_work.work);
fs/ceph/mds_client.c
66
static void ceph_cap_release_work(struct work_struct *work);
fs/ceph/mds_client.c
67
static void ceph_cap_reclaim_work(struct work_struct *work);
fs/ceph/metric.c
213
static void metric_delayed_work(struct work_struct *work)
fs/ceph/metric.c
216
container_of(work, struct ceph_client_metric, delayed_work.work);
fs/crypto/crypto.c
46
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
fs/crypto/crypto.c
48
queue_work(fscrypt_read_workqueue, work);
fs/direct-io.c
321
static void dio_aio_complete_work(struct work_struct *work)
fs/direct-io.c
323
struct dio *dio = container_of(work, struct dio, complete_work);
fs/dlm/ast.c
160
INIT_WORK(&(*cb)->work, dlm_callback_work);
fs/dlm/ast.c
196
queue_work(ls->ls_callback_wq, &cb->work);
fs/dlm/ast.c
254
queue_work(ls->ls_callback_wq, &cb->work);
fs/dlm/ast.c
50
static void dlm_callback_work(struct work_struct *work)
fs/dlm/ast.c
52
struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
fs/dlm/dlm_internal.h
242
struct work_struct work;
fs/dlm/lockspace.c
339
static void free_lockspace(struct work_struct *work)
fs/dlm/lockspace.c
341
struct dlm_ls *ls = container_of(work, struct dlm_ls, ls_free_work);
fs/dlm/lowcomms.c
1484
static void process_recv_sockets(struct work_struct *work)
fs/dlm/lowcomms.c
1486
struct connection *con = container_of(work, struct connection, rwork);
fs/dlm/lowcomms.c
1553
static void process_listen_recv_socket(struct work_struct *work)
fs/dlm/lowcomms.c
1620
static void process_send_sockets(struct work_struct *work)
fs/dlm/lowcomms.c
1622
struct connection *con = container_of(work, struct connection, swork);
fs/dlm/lowcomms.c
199
static void process_recv_sockets(struct work_struct *work);
fs/dlm/lowcomms.c
200
static void process_send_sockets(struct work_struct *work);
fs/dlm/lowcomms.c
201
static void process_dlm_messages(struct work_struct *work);
fs/dlm/lowcomms.c
861
static void process_dlm_messages(struct work_struct *work)
fs/erofs/zdata.c
100
struct work_struct work;
fs/erofs/zdata.c
1415
static void z_erofs_decompressqueue_work(struct work_struct *work)
fs/erofs/zdata.c
1418
container_of(work, struct z_erofs_decompressqueue, u.work);
fs/erofs/zdata.c
1428
static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
fs/erofs/zdata.c
1430
z_erofs_decompressqueue_work((struct work_struct *)work);
fs/erofs/zdata.c
1467
INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
fs/erofs/zdata.c
1468
queue_work(z_erofs_workqueue, &io->u.work);
fs/erofs/zdata.c
1474
queue_work(z_erofs_workqueue, &io->u.work);
fs/erofs/zdata.c
1482
z_erofs_decompressqueue_work(&io->u.work);
fs/erofs/zdata.c
1605
INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
fs/ext4/ext4.h
3860
extern void ext4_end_io_rsv_work(struct work_struct *work);
fs/ext4/mballoc.c
3642
static void ext4_discard_work(struct work_struct *work)
fs/ext4/mballoc.c
3644
struct ext4_sb_info *sbi = container_of(work,
fs/ext4/page-io.c
302
void ext4_end_io_rsv_work(struct work_struct *work)
fs/ext4/page-io.c
304
struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
fs/ext4/readpage.c
100
container_of(work, struct bio_post_read_ctx, work);
fs/ext4/readpage.c
130
INIT_WORK(&ctx->work, decrypt_work);
fs/ext4/readpage.c
131
fscrypt_enqueue_decrypt_work(&ctx->work);
fs/ext4/readpage.c
139
INIT_WORK(&ctx->work, verity_work);
fs/ext4/readpage.c
140
fsverity_enqueue_verify_work(&ctx->work);
fs/ext4/readpage.c
67
struct work_struct work;
fs/ext4/readpage.c
85
static void decrypt_work(struct work_struct *work)
fs/ext4/readpage.c
88
container_of(work, struct bio_post_read_ctx, work);
fs/ext4/readpage.c
97
static void verity_work(struct work_struct *work)
fs/ext4/super.c
745
static void update_super_work(struct work_struct *work)
fs/ext4/super.c
747
struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
fs/f2fs/compress.c
1787
static void f2fs_late_free_dic(struct work_struct *work)
fs/f2fs/compress.c
1790
container_of(work, struct decompress_io_ctx, free_work);
fs/f2fs/compress.c
1807
static void f2fs_verify_cluster(struct work_struct *work)
fs/f2fs/compress.c
1810
container_of(work, struct decompress_io_ctx, verity_work);
fs/f2fs/data.c
119
struct work_struct work;
fs/f2fs/data.c
190
static void f2fs_verify_bio(struct work_struct *work)
fs/f2fs/data.c
193
container_of(work, struct bio_post_read_ctx, work);
fs/f2fs/data.c
245
INIT_WORK(&ctx->work, f2fs_verify_bio);
fs/f2fs/data.c
246
fsverity_enqueue_verify_work(&ctx->work);
fs/f2fs/data.c
291
static void f2fs_post_read_work(struct work_struct *work)
fs/f2fs/data.c
294
container_of(work, struct bio_post_read_ctx, work);
fs/f2fs/data.c
337
INIT_WORK(&ctx->work, f2fs_post_read_work);
fs/f2fs/data.c
338
queue_work(ctx->sbi->post_read_wq, &ctx->work);
fs/f2fs/super.c
4710
static void f2fs_record_error_work(struct work_struct *work)
fs/f2fs/super.c
4712
struct f2fs_sb_info *sbi = container_of(work,
fs/fat/dir.c
373
unsigned char c, work[MSDOS_NAME];
fs/fat/dir.c
384
memcpy(work, de->name, sizeof(work));
fs/fat/dir.c
388
if (work[0] == 0x05)
fs/fat/dir.c
389
work[0] = 0xE5;
fs/fat/dir.c
393
c = work[i];
fs/fat/dir.c
396
chl = fat_shortname2uni(nls_disk, &work[i], 8 - i,
fs/fat/dir.c
413
ptname[i] = work[i];
fs/fat/dir.c
429
c = work[k];
fs/fat/dir.c
432
chl = fat_shortname2uni(nls_disk, &work[k], MSDOS_NAME - k,
fs/fat/dir.c
453
ptname[i] = work[k];
fs/file_table.c
495
static void ____fput(struct callback_head *work)
fs/file_table.c
497
__fput(container_of(work, struct file, f_task_work));
fs/fs-writeback.c
1060
struct wb_writeback_work *work;
fs/fs-writeback.c
1078
work = kmalloc_obj(*work, GFP_ATOMIC);
fs/fs-writeback.c
1079
if (work) {
fs/fs-writeback.c
1080
*work = *base_work;
fs/fs-writeback.c
1081
work->nr_pages = nr_pages;
fs/fs-writeback.c
1082
work->auto_free = 1;
fs/fs-writeback.c
1083
wb_queue_work(wb, work);
fs/fs-writeback.c
1098
work = &fallback_work;
fs/fs-writeback.c
1099
*work = *base_work;
fs/fs-writeback.c
1100
work->nr_pages = nr_pages;
fs/fs-writeback.c
1101
work->auto_free = 0;
fs/fs-writeback.c
1102
work->done = &fallback_work_done;
fs/fs-writeback.c
1104
wb_queue_work(wb, work);
fs/fs-writeback.c
1133
struct wb_writeback_work *work;
fs/fs-writeback.c
1176
work = kzalloc_obj(*work, GFP_NOWAIT);
fs/fs-writeback.c
1177
if (work) {
fs/fs-writeback.c
1178
work->nr_pages = dirty;
fs/fs-writeback.c
1179
work->sync_mode = WB_SYNC_NONE;
fs/fs-writeback.c
1180
work->range_cyclic = 1;
fs/fs-writeback.c
1181
work->reason = reason;
fs/fs-writeback.c
1182
work->done = done;
fs/fs-writeback.c
1183
work->auto_free = 1;
fs/fs-writeback.c
1184
wb_queue_work(wb, work);
fs/fs-writeback.c
1558
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
fs/fs-writeback.c
1567
if (!work->for_sync)
fs/fs-writeback.c
1573
trace_writeback_queue_io(wb, work, dirtied_before, moved);
fs/fs-writeback.c
165
static void finish_writeback_work(struct wb_writeback_work *work)
fs/fs-writeback.c
167
struct wb_completion *done = work->done;
fs/fs-writeback.c
169
if (work->auto_free)
fs/fs-writeback.c
170
kfree(work);
fs/fs-writeback.c
181
struct wb_writeback_work *work)
fs/fs-writeback.c
183
trace_writeback_queue(wb, work);
fs/fs-writeback.c
185
if (work->done)
fs/fs-writeback.c
186
atomic_inc(&work->done->cnt);
fs/fs-writeback.c
191
list_add_tail(&work->list, &wb->work_list);
fs/fs-writeback.c
1919
struct bdi_writeback *wb, struct wb_writeback_work *work)
fs/fs-writeback.c
1936
if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
fs/fs-writeback.c
194
finish_writeback_work(work);
fs/fs-writeback.c
1941
pages = min(pages, work->nr_pages);
fs/fs-writeback.c
1957
struct wb_writeback_work *work)
fs/fs-writeback.c
1960
.sync_mode = work->sync_mode,
fs/fs-writeback.c
1961
.tagged_writepages = work->tagged_writepages,
fs/fs-writeback.c
1962
.for_kupdate = work->for_kupdate,
fs/fs-writeback.c
1963
.for_background = work->for_background,
fs/fs-writeback.c
1964
.for_sync = work->for_sync,
fs/fs-writeback.c
1965
.range_cyclic = work->range_cyclic,
fs/fs-writeback.c
1975
if (work->for_kupdate)
fs/fs-writeback.c
1985
if (work->sb) {
fs/fs-writeback.c
2046
write_chunk = writeback_chunk_size(inode->i_sb, wb, work);
fs/fs-writeback.c
2057
if (work->done && work->done->progress_stamp && timeout &&
fs/fs-writeback.c
2058
(jiffies - work->done->progress_stamp) > HZ * timeout / 2)
fs/fs-writeback.c
2059
wake_up_all(work->done->waitq);
fs/fs-writeback.c
2062
work->nr_pages -= write_chunk - wbc.nr_to_write;
fs/fs-writeback.c
2104
if (work->nr_pages <= 0)
fs/fs-writeback.c
2112
struct wb_writeback_work *work)
fs/fs-writeback.c
2130
wrote += writeback_sb_inodes(sb, wb, work);
fs/fs-writeback.c
2137
if (work->nr_pages <= 0)
fs/fs-writeback.c
2148
struct wb_writeback_work work = {
fs/fs-writeback.c
2159
queue_io(wb, &work, jiffies);
fs/fs-writeback.c
2160
__writeback_inodes_wb(wb, &work);
fs/fs-writeback.c
2164
return nr_pages - work.nr_pages;
fs/fs-writeback.c
2183
struct wb_writeback_work *work)
fs/fs-writeback.c
2185
long nr_pages = work->nr_pages;
fs/fs-writeback.c
2197
if (work->nr_pages <= 0)
fs/fs-writeback.c
2206
if ((work->for_background || work->for_kupdate) &&
fs/fs-writeback.c
2214
if (work->for_background && !wb_over_bg_thresh(wb))
fs/fs-writeback.c
2220
trace_writeback_start(wb, work);
fs/fs-writeback.c
2228
if (work->for_kupdate) {
fs/fs-writeback.c
2232
} else if (work->for_background)
fs/fs-writeback.c
2235
queue_io(wb, work, dirtied_before);
fs/fs-writeback.c
2238
if (work->sb)
fs/fs-writeback.c
2239
progress = writeback_sb_inodes(work->sb, wb, work);
fs/fs-writeback.c
2241
progress = __writeback_inodes_wb(wb, work);
fs/fs-writeback.c
2242
trace_writeback_written(wb, work);
fs/fs-writeback.c
2270
trace_writeback_wait(wb, work);
fs/fs-writeback.c
2279
return nr_pages - work->nr_pages;
fs/fs-writeback.c
2287
struct wb_writeback_work *work = NULL;
fs/fs-writeback.c
2291
work = list_entry(wb->work_list.next,
fs/fs-writeback.c
2293
list_del_init(&work->list);
fs/fs-writeback.c
2296
return work;
fs/fs-writeback.c
2303
struct wb_writeback_work work = {
fs/fs-writeback.c
2311
return wb_writeback(wb, &work);
fs/fs-writeback.c
2337
struct wb_writeback_work work = {
fs/fs-writeback.c
2345
return wb_writeback(wb, &work);
fs/fs-writeback.c
2360
struct wb_writeback_work work = {
fs/fs-writeback.c
2367
nr_pages = wb_writeback(wb, &work);
fs/fs-writeback.c
2380
struct wb_writeback_work *work;
fs/fs-writeback.c
2384
while ((work = get_next_work_item(wb)) != NULL) {
fs/fs-writeback.c
2385
trace_writeback_exec(wb, work);
fs/fs-writeback.c
2386
wrote += wb_writeback(wb, work);
fs/fs-writeback.c
2387
finish_writeback_work(work);
fs/fs-writeback.c
2409
void wb_workfn(struct work_struct *work)
fs/fs-writeback.c
2411
struct bdi_writeback *wb = container_of(to_delayed_work(work),
fs/fs-writeback.c
2831
struct wb_writeback_work work = {
fs/fs-writeback.c
2844
bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
fs/fs-writeback.c
2909
struct wb_writeback_work work = {
fs/fs-writeback.c
2941
bdi_split_work_to_wbs(bdi, &work, false);
fs/fs-writeback.c
566
void inode_switch_wbs_work_fn(struct work_struct *work)
fs/fs-writeback.c
568
struct bdi_writeback *new_wb = container_of(work, struct bdi_writeback,
fs/fserror.c
100
INIT_WORK(&event->work, fserror_worker);
fs/fserror.c
171
schedule_work(&event->work);
fs/fserror.c
48
static void fserror_worker(struct work_struct *work)
fs/fserror.c
51
container_of(work, struct fserror_event, work);
fs/fuse/dax.c
1158
static void fuse_dax_free_mem_worker(struct work_struct *work)
fs/fuse/dax.c
1161
struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
fs/fuse/dax.c
1162
free_work.work);
fs/fuse/dev.c
121
queue_delayed_work(system_percpu_wq, &fc->timeout.work,
fs/fuse/dev.c
2463
cancel_delayed_work(&fc->timeout.work);
fs/fuse/dev.c
73
void fuse_check_timeout(struct work_struct *work)
fs/fuse/dev.c
75
struct delayed_work *dwork = to_delayed_work(work);
fs/fuse/dev.c
77
timeout.work);
fs/fuse/dev_uring.c
432
static void fuse_uring_async_stop_queues(struct work_struct *work)
fs/fuse/dev_uring.c
436
container_of(work, struct fuse_ring, async_teardown_work.work);
fs/fuse/dir.c
160
static void fuse_dentry_tree_work(struct work_struct *work)
fs/fuse/dir.c
198
void fuse_epoch_work(struct work_struct *work)
fs/fuse/dir.c
200
struct fuse_conn *fc = container_of(work, struct fuse_conn,
fs/fuse/fuse_i.h
1291
void fuse_check_timeout(struct work_struct *work);
fs/fuse/fuse_i.h
1296
void fuse_epoch_work(struct work_struct *work);
fs/fuse/fuse_i.h
993
struct delayed_work work;
fs/fuse/inode.c
1034
cancel_delayed_work_sync(&fc->timeout.work);
fs/fuse/inode.c
1288
INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
fs/fuse/inode.c
1289
queue_delayed_work(system_percpu_wq, &fc->timeout.work,
fs/fuse/virtio_fs.c
530
static void virtio_fs_hiprio_done_work(struct work_struct *work)
fs/fuse/virtio_fs.c
532
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
fs/fuse/virtio_fs.c
556
static void virtio_fs_request_dispatch_work(struct work_struct *work)
fs/fuse/virtio_fs.c
559
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
fs/fuse/virtio_fs.c
672
static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
fs/fuse/virtio_fs.c
675
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
fs/fuse/virtio_fs.c
801
static void virtio_fs_complete_req_work(struct work_struct *work)
fs/fuse/virtio_fs.c
804
container_of(work, typeof(*w), done_work);
fs/fuse/virtio_fs.c
810
static void virtio_fs_requests_done_work(struct work_struct *work)
fs/fuse/virtio_fs.c
812
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
fs/gfs2/glock.c
947
static void delete_work_func(struct work_struct *work)
fs/gfs2/glock.c
949
struct delayed_work *dwork = to_delayed_work(work);
fs/gfs2/glock.c
983
static void glock_work_func(struct work_struct *work)
fs/gfs2/glock.c
986
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
fs/gfs2/lock_dlm.c
655
static void gfs2_control_func(struct work_struct *work)
fs/gfs2/lock_dlm.c
657
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
fs/gfs2/recovery.c
398
void gfs2_recover_func(struct work_struct *work)
fs/gfs2/recovery.c
400
struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
fs/gfs2/recovery.h
28
void gfs2_recover_func(struct work_struct *work);
fs/gfs2/super.c
685
void gfs2_freeze_func(struct work_struct *work)
fs/gfs2/super.c
687
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
fs/gfs2/super.h
49
void gfs2_freeze_func(struct work_struct *work);
fs/gfs2/util.c
205
void gfs2_withdraw_func(struct work_struct *work)
fs/gfs2/util.c
207
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_withdraw_work);
fs/gfs2/util.h
196
void gfs2_withdraw_func(struct work_struct *work);
fs/hfs/super.c
57
static void flush_mdb(struct work_struct *work)
fs/hfs/super.c
62
sbi = container_of(work, struct hfs_sb_info, mdb_work.work);
fs/hfsplus/super.c
287
static void delayed_sync_fs(struct work_struct *work)
fs/hfsplus/super.c
292
sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
fs/iomap/bio.c
26
struct work_struct *work)
fs/iomap/direct-io.c
161
static void iomap_dio_complete_work(struct work_struct *work)
fs/iomap/direct-io.c
163
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
fs/iomap/direct-io.c
230
INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
fs/iomap/direct-io.c
231
queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
fs/iomap/direct-io.c
236
iomap_dio_complete_work(&dio->aio.work);
fs/iomap/direct-io.c
48
struct work_struct work;
fs/iomap/ioend.c
77
struct work_struct *work)
fs/jffs2/wbuf.c
1152
static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
fs/jffs2/wbuf.c
1156
dwork = to_delayed_work(work);
fs/jffs2/wbuf.c
1160
static void delayed_wbuf_sync(struct work_struct *work)
fs/jffs2/wbuf.c
1162
struct jffs2_sb_info *c = work_to_sb(work);
fs/kernfs/file.c
912
static void kernfs_notify_workfn(struct work_struct *work)
fs/mbcache.c
342
static void mb_cache_shrink_worker(struct work_struct *work)
fs/mbcache.c
344
struct mb_cache *cache = container_of(work, struct mb_cache,
fs/netfs/direct_write.c
207
static void netfs_unbuffered_write_async(struct work_struct *work)
fs/netfs/direct_write.c
209
struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
fs/netfs/direct_write.c
289
INIT_WORK(&wreq->work, netfs_unbuffered_write_async);
fs/netfs/direct_write.c
291
queue_work(system_dfl_wq, &wreq->work);
fs/netfs/fscache_cookie.c
19
static void fscache_cookie_lru_worker(struct work_struct *work);
fs/netfs/fscache_cookie.c
20
static void fscache_cookie_worker(struct work_struct *work);
fs/netfs/fscache_cookie.c
363
INIT_WORK(&cookie->work, fscache_cookie_worker);
fs/netfs/fscache_cookie.c
830
static void fscache_cookie_worker(struct work_struct *work)
fs/netfs/fscache_cookie.c
832
struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work);
fs/netfs/fscache_cookie.c
84
if (!queue_work(fscache_wq, &cookie->work))
fs/netfs/fscache_cookie.c
881
static void fscache_cookie_lru_worker(struct work_struct *work)
fs/netfs/fscache_volume.c
18
static void fscache_create_volume_work(struct work_struct *work);
fs/netfs/fscache_volume.c
242
INIT_WORK(&volume->work, fscache_create_volume_work);
fs/netfs/fscache_volume.c
281
static void fscache_create_volume_work(struct work_struct *work)
fs/netfs/fscache_volume.c
285
container_of(work, struct fscache_volume, work);
fs/netfs/fscache_volume.c
313
if (!schedule_work(&volume->work))
fs/netfs/internal.h
110
void netfs_read_collection_worker(struct work_struct *work);
fs/netfs/internal.h
192
void netfs_write_collection_worker(struct work_struct *work);
fs/netfs/misc.c
324
queue_work(system_dfl_wq, &rreq->work);
fs/netfs/objects.c
129
cancel_work_sync(&rreq->work);
fs/netfs/objects.c
13
static void netfs_free_request(struct work_struct *work);
fs/netfs/objects.c
152
static void netfs_free_request(struct work_struct *work)
fs/netfs/objects.c
155
container_of(work, struct netfs_io_request, cleanup_work);
fs/netfs/objects.c
212
INIT_WORK(&subreq->work, NULL);
fs/netfs/objects.c
64
INIT_WORK(&rreq->work, netfs_read_collection_worker);
fs/netfs/objects.c
67
INIT_WORK(&rreq->work, netfs_write_collection_worker);
fs/netfs/read_collect.c
451
void netfs_read_collection_worker(struct work_struct *work)
fs/netfs/read_collect.c
453
struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
fs/netfs/write_collect.c
421
void netfs_write_collection_worker(struct work_struct *work)
fs/netfs/write_collect.c
423
struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
fs/nfs/blocklayout/blocklayout.c
209
static void bl_read_cleanup(struct work_struct *work)
fs/nfs/blocklayout/blocklayout.c
214
task = container_of(work, struct rpc_task, u.tk_work);
fs/nfs/blocklayout/blocklayout.c
341
static void bl_write_cleanup(struct work_struct *work)
fs/nfs/blocklayout/blocklayout.c
343
struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
fs/nfs/direct.c
198
INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
fs/nfs/direct.c
73
static void nfs_direct_write_schedule_work(struct work_struct *work);
fs/nfs/direct.c
730
static void nfs_direct_write_schedule_work(struct work_struct *work)
fs/nfs/direct.c
732
struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
fs/nfs/direct.c
753
queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
fs/nfs/internal.h
996
struct work_struct work;
fs/nfs/localio.c
1048
nfs_local_fsync_work(struct work_struct *work)
fs/nfs/localio.c
1054
ctx = container_of(work, struct nfs_local_fsync_ctx, work);
fs/nfs/localio.c
1077
INIT_WORK(&ctx->work, nfs_local_fsync_work);
fs/nfs/localio.c
1101
queue_work(nfslocaliod_workqueue, &ctx->work);
fs/nfs/localio.c
1104
queue_work(nfslocaliod_workqueue, &ctx->work);
fs/nfs/localio.c
202
void nfs_local_probe_async_work(struct work_struct *work)
fs/nfs/localio.c
205
container_of(work, struct nfs_client, cl_local_probe_work);
fs/nfs/localio.c
39
struct work_struct work;
fs/nfs/localio.c
54
struct work_struct work;
fs/nfs/localio.c
612
INIT_WORK(&iocb->work, iocb->aio_complete_work);
fs/nfs/localio.c
613
queue_work(nfsiod_workqueue, &iocb->work);
fs/nfs/localio.c
648
static void nfs_local_read_aio_complete_work(struct work_struct *work)
fs/nfs/localio.c
651
container_of(work, struct nfs_local_kiocb, work);
fs/nfs/localio.c
668
static void nfs_local_call_read(struct work_struct *work)
fs/nfs/localio.c
671
container_of(work, struct nfs_local_kiocb, work);
fs/nfs/localio.c
713
INIT_WORK(&iocb->work, nfs_local_call_read);
fs/nfs/localio.c
714
queue_work(nfslocaliod_workqueue, &iocb->work);
fs/nfs/localio.c
834
static void nfs_local_write_aio_complete_work(struct work_struct *work)
fs/nfs/localio.c
837
container_of(work, struct nfs_local_kiocb, work);
fs/nfs/localio.c
854
static void nfs_local_call_write(struct work_struct *work)
fs/nfs/localio.c
857
container_of(work, struct nfs_local_kiocb, work);
fs/nfs/localio.c
918
INIT_WORK(&iocb->work, nfs_local_call_write);
fs/nfs/localio.c
919
queue_work(nfslocaliod_workqueue, &iocb->work);
fs/nfs/namespace.c
256
static void nfs_expire_automounts(struct work_struct *work)
fs/nfs/namespace.c
26
static void nfs_expire_automounts(struct work_struct *work);
fs/nfs/nfs4renewd.c
55
nfs4_renew_state(struct work_struct *work)
fs/nfs/nfs4renewd.c
59
container_of(work, struct nfs_client, cl_renewd.work);
fs/nfsd/export.c
367
static void svc_export_release(struct work_struct *work)
fs/nfsd/export.c
369
struct svc_export *exp = container_of(to_rcu_work(work),
fs/nfsd/export.c
45
static void expkey_release(struct work_struct *work)
fs/nfsd/export.c
47
struct svc_expkey *key = container_of(to_rcu_work(work),
fs/nfsd/filecache.c
590
nfsd_file_gc_worker(struct work_struct *work)
fs/nfsd/nfs4callback.c
1708
nfsd4_run_cb_work(struct work_struct *work)
fs/nfsd/nfs4callback.c
1711
container_of(work, struct nfsd4_callback, cb_work);
fs/nfsd/nfs4proc.c
1627
struct nfsd4_ssc_umount_item *work = NULL;
fs/nfsd/nfs4proc.c
1633
work = kzalloc_obj(*work);
fs/nfsd/nfs4proc.c
1649
kfree(work);
fs/nfsd/nfs4proc.c
1658
kfree(work);
fs/nfsd/nfs4proc.c
1663
if (work) {
fs/nfsd/nfs4proc.c
1664
strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
fs/nfsd/nfs4proc.c
1665
refcount_set(&work->nsui_refcnt, 2);
fs/nfsd/nfs4proc.c
1666
work->nsui_busy = true;
fs/nfsd/nfs4proc.c
1667
list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
fs/nfsd/nfs4proc.c
1668
*nsui = work;
fs/nfsd/nfs4state.c
7087
nfsd4_state_shrinker_worker(struct work_struct *work)
fs/nfsd/nfs4state.c
7089
struct nfsd_net *nn = container_of(work, struct nfsd_net,
fs/nilfs2/segment.c
828
static void nilfs_iput_work_func(struct work_struct *work)
fs/nilfs2/segment.c
830
struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
fs/notify/fanotify/fanotify_user.c
109
static void perm_group_watchdog(struct work_struct *work);
fs/notify/fanotify/fanotify_user.c
117
static void perm_group_watchdog(struct work_struct *work)
fs/notify/mark.c
1096
static void fsnotify_mark_destroy_workfn(struct work_struct *work)
fs/notify/mark.c
314
static void fsnotify_connector_destroy_workfn(struct work_struct *work)
fs/notify/mark.c
89
static void fsnotify_mark_destroy_workfn(struct work_struct *work);
fs/notify/mark.c
92
static void fsnotify_connector_destroy_workfn(struct work_struct *work);
fs/ocfs2/alloc.c
6090
static void ocfs2_truncate_log_worker(struct work_struct *work)
fs/ocfs2/alloc.c
6094
container_of(work, struct ocfs2_super,
fs/ocfs2/alloc.c
6095
osb_truncate_log_wq.work);
fs/ocfs2/cluster/heartbeat.c
287
static void o2hb_write_timeout(struct work_struct *work)
fs/ocfs2/cluster/heartbeat.c
291
container_of(work, struct o2hb_region,
fs/ocfs2/cluster/heartbeat.c
292
hr_write_timeout_work.work);
fs/ocfs2/cluster/heartbeat.c
371
static void o2hb_nego_timeout(struct work_struct *work)
fs/ocfs2/cluster/heartbeat.c
377
reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
fs/ocfs2/cluster/quorum.c
90
static void o2quo_make_decision(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
123
static void o2net_sc_connect_completed(struct work_struct *work);
fs/ocfs2/cluster/tcp.c
124
static void o2net_rx_until_empty(struct work_struct *work);
fs/ocfs2/cluster/tcp.c
125
static void o2net_shutdown_sc(struct work_struct *work);
fs/ocfs2/cluster/tcp.c
127
static void o2net_sc_send_keep_req(struct work_struct *work);
fs/ocfs2/cluster/tcp.c
1425
static void o2net_rx_until_empty(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
1428
container_of(work, struct o2net_sock_container, sc_rx_work);
fs/ocfs2/cluster/tcp.c
1460
static void o2net_sc_connect_completed(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
1463
container_of(work, struct o2net_sock_container,
fs/ocfs2/cluster/tcp.c
1476
static void o2net_sc_send_keep_req(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
1479
container_of(work, struct o2net_sock_container,
fs/ocfs2/cluster/tcp.c
1480
sc_keepalive_work.work);
fs/ocfs2/cluster/tcp.c
1549
static void o2net_start_connect(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
1552
container_of(work, struct o2net_node, nn_connect_work.work);
fs/ocfs2/cluster/tcp.c
1666
static void o2net_connect_expired(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
1669
container_of(work, struct o2net_node, nn_connect_expired.work);
fs/ocfs2/cluster/tcp.c
1685
static void o2net_still_up(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
1688
container_of(work, struct o2net_node, nn_still_up.work);
fs/ocfs2/cluster/tcp.c
1915
static void o2net_accept_many(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
459
struct work_struct *work)
fs/ocfs2/cluster/tcp.c
462
if (!queue_work(o2net_wq, work))
fs/ocfs2/cluster/tcp.c
466
struct delayed_work *work,
fs/ocfs2/cluster/tcp.c
470
if (!queue_delayed_work(o2net_wq, work, delay))
fs/ocfs2/cluster/tcp.c
474
struct delayed_work *work)
fs/ocfs2/cluster/tcp.c
476
if (cancel_delayed_work(work))
fs/ocfs2/cluster/tcp.c
714
static void o2net_shutdown_sc(struct work_struct *work)
fs/ocfs2/cluster/tcp.c
717
container_of(work, struct o2net_sock_container,
fs/ocfs2/dlm/dlmcommon.h
186
void dlm_dispatch_work(struct work_struct *work);
fs/ocfs2/dlm/dlmrecovery.c
128
void dlm_dispatch_work(struct work_struct *work)
fs/ocfs2/dlm/dlmrecovery.c
131
container_of(work, struct dlm_ctxt, dispatched_work);
fs/ocfs2/dlmfs/userdlm.c
161
static void user_dlm_unblock_lock(struct work_struct *work);
fs/ocfs2/dlmfs/userdlm.c
283
static void user_dlm_unblock_lock(struct work_struct *work)
fs/ocfs2/dlmfs/userdlm.c
287
container_of(work, struct user_lock_res, l_work);
fs/ocfs2/journal.c
1311
void ocfs2_complete_recovery(struct work_struct *work)
fs/ocfs2/journal.c
1315
container_of(work, struct ocfs2_journal, j_recovery_work);
fs/ocfs2/journal.c
2073
static void ocfs2_orphan_scan_work(struct work_struct *work)
fs/ocfs2/journal.c
2078
os = container_of(work, struct ocfs2_orphan_scan,
fs/ocfs2/journal.c
2079
os_orphan_scan_work.work);
fs/ocfs2/journal.h
146
void ocfs2_complete_recovery(struct work_struct *work);
fs/ocfs2/localalloc.c
226
void ocfs2_la_enable_worker(struct work_struct *work)
fs/ocfs2/localalloc.c
229
container_of(work, struct ocfs2_super,
fs/ocfs2/localalloc.c
230
la_enable_wq.work);
fs/ocfs2/localalloc.h
50
void ocfs2_la_enable_worker(struct work_struct *work);
fs/ocfs2/quota.h
118
void ocfs2_drop_dquot_refs(struct work_struct *work);
fs/ocfs2/quota_global.c
645
static void qsync_work_fn(struct work_struct *work)
fs/ocfs2/quota_global.c
647
struct ocfs2_mem_dqinfo *oinfo = container_of(work,
fs/ocfs2/quota_global.c
649
dqi_sync_work.work);
fs/ocfs2/quota_global.c
69
static void qsync_work_fn(struct work_struct *work);
fs/ocfs2/quota_global.c
709
void ocfs2_drop_dquot_refs(struct work_struct *work)
fs/ocfs2/quota_global.c
711
struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
fs/overlayfs/overlayfs.h
585
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *work,
fs/overlayfs/params.c
375
swap(ctx->work, *path);
fs/overlayfs/params.c
737
path_put(&ctx->work);
fs/overlayfs/params.h
29
struct path work;
fs/overlayfs/super.c
1453
err = ovl_get_workdir(sb, ofs, &ctx->upper, &ctx->work);
fs/overlayfs/super.c
316
struct dentry *work;
fs/overlayfs/super.c
321
work = ovl_start_creating_upper(ofs, ofs->workbasedir, &QSTR(name));
fs/overlayfs/super.c
323
if (!IS_ERR(work)) {
fs/overlayfs/super.c
329
if (work->d_inode) {
fs/overlayfs/super.c
330
end_creating_keep(work);
fs/overlayfs/super.c
332
return work;
fs/overlayfs/super.c
337
err = ovl_workdir_cleanup(ofs, ofs->workbasedir, mnt, work, 0);
fs/overlayfs/super.c
338
dput(work);
fs/overlayfs/super.c
345
work = ovl_do_mkdir(ofs, dir, work, attr.ia_mode);
fs/overlayfs/super.c
346
end_creating_keep(work);
fs/overlayfs/super.c
347
err = PTR_ERR(work);
fs/overlayfs/super.c
348
if (IS_ERR(work))
fs/overlayfs/super.c
353
if (d_really_is_negative(work))
fs/overlayfs/super.c
369
err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_DEFAULT);
fs/overlayfs/super.c
373
err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_ACCESS);
fs/overlayfs/super.c
378
inode_lock(work->d_inode);
fs/overlayfs/super.c
379
err = ovl_do_notify_change(ofs, work, &attr);
fs/overlayfs/super.c
380
inode_unlock(work->d_inode);
fs/overlayfs/super.c
384
err = PTR_ERR(work);
fs/overlayfs/super.c
387
return work;
fs/overlayfs/super.c
390
dput(work);
fs/overlayfs/util.c
1219
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *work,
fs/overlayfs/util.c
1230
if (work && (work->d_parent != workdir || d_unhashed(work)))
fs/pstore/platform.c
731
static void pstore_dowork(struct work_struct *work)
fs/pstore/zone.c
315
static void psz_flush_all_dirty_zones(struct work_struct *work)
fs/quota/dquot.c
273
static void quota_release_workfn(struct work_struct *work);
fs/quota/dquot.c
814
static void quota_release_workfn(struct work_struct *work)
fs/resctrl/internal.h
396
void mbm_handle_overflow(struct work_struct *work);
fs/resctrl/internal.h
403
void cqm_handle_limbo(struct work_struct *work);
fs/resctrl/monitor.c
790
void cqm_handle_limbo(struct work_struct *work)
fs/resctrl/monitor.c
798
d = container_of(work, struct rdt_l3_mon_domain, cqm_limbo.work);
fs/resctrl/monitor.c
834
void mbm_handle_overflow(struct work_struct *work)
fs/resctrl/monitor.c
853
d = container_of(work, struct rdt_l3_mon_domain, mbm_over.work);
fs/smb/client/cached_dir.c
18
static void cfids_laundromat_worker(struct work_struct *work);
fs/smb/client/cached_dir.c
632
cached_dir_offload_close(struct work_struct *work)
fs/smb/client/cached_dir.c
634
struct cached_fid *cfid = container_of(work,
fs/smb/client/cached_dir.c
651
static void cached_dir_put_work(struct work_struct *work)
fs/smb/client/cached_dir.c
653
struct cached_fid *cfid = container_of(work, struct cached_fid,
fs/smb/client/cached_dir.c
758
static void cfids_laundromat_worker(struct work_struct *work)
fs/smb/client/cached_dir.c
764
cfids = container_of(work, struct cached_fids, laundromat_work.work);
fs/smb/client/cifsglob.h
1746
struct work_struct work;
fs/smb/client/cifsglob.h
1823
struct work_struct work;
fs/smb/client/cifsglob.h
2131
void cifs_oplock_break(struct work_struct *work);
fs/smb/client/cifsglob.h
2133
void smb2_deferred_work_close(struct work_struct *work);
fs/smb/client/cifsproto.h
128
void smb2_query_server_interfaces(struct work_struct *work);
fs/smb/client/connect.c
103
void smb2_query_server_interfaces(struct work_struct *work)
fs/smb/client/connect.c
107
struct cifs_tcon *tcon = container_of(work,
fs/smb/client/connect.c
109
query_interfaces.work);
fs/smb/client/connect.c
3817
INIT_WORK(&mchan_mount->work, mchan_mount_work_fn);
fs/smb/client/connect.c
3835
mchan_mount_work_fn(struct work_struct *work)
fs/smb/client/connect.c
3837
struct mchan_mount *mchan_mount = container_of(work, struct mchan_mount, work);
fs/smb/client/connect.c
3889
queue_work(cifsiod_wq, &mchan_mount->work);
fs/smb/client/connect.c
3946
queue_work(cifsiod_wq, &mchan_mount->work);
fs/smb/client/connect.c
4447
cifs_prune_tlinks(struct work_struct *work)
fs/smb/client/connect.c
4449
struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
fs/smb/client/connect.c
4450
prune_tlinks.work);
fs/smb/client/connect.c
614
cifs_echo_request(struct work_struct *work)
fs/smb/client/connect.c
617
struct TCP_Server_Info *server = container_of(work,
fs/smb/client/connect.c
618
struct TCP_Server_Info, echo.work);
fs/smb/client/connect.c
64
static void cifs_prune_tlinks(struct work_struct *work);
fs/smb/client/connect.c
68
static void mchan_mount_work_fn(struct work_struct *work);
fs/smb/client/dfs_cache.c
1343
void dfs_cache_refresh(struct work_struct *work)
fs/smb/client/dfs_cache.c
1348
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
fs/smb/client/dfs_cache.h
57
void dfs_cache_refresh(struct work_struct *work);
fs/smb/client/file.c
1404
void smb2_deferred_work_close(struct work_struct *work)
fs/smb/client/file.c
1406
struct cifsFileInfo *cfile = container_of(work,
fs/smb/client/file.c
1407
struct cifsFileInfo, deferred.work);
fs/smb/client/file.c
3152
void cifs_oplock_break(struct work_struct *work)
fs/smb/client/file.c
3154
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
fs/smb/client/file.c
654
static void cifsFileInfo_put_work(struct work_struct *work);
fs/smb/client/file.c
655
void serverclose_work(struct work_struct *work);
fs/smb/client/file.c
782
static void cifsFileInfo_put_work(struct work_struct *work)
fs/smb/client/file.c
784
struct cifsFileInfo *cifs_file = container_of(work,
fs/smb/client/file.c
790
void serverclose_work(struct work_struct *work)
fs/smb/client/file.c
792
struct cifsFileInfo *cifs_file = container_of(work,
fs/smb/client/namespace.c
27
static void cifs_expire_automounts(struct work_struct *work);
fs/smb/client/namespace.c
32
static void cifs_expire_automounts(struct work_struct *work)
fs/smb/client/smb2misc.c
499
cifs_ses_oplock_break(struct work_struct *work)
fs/smb/client/smb2misc.c
501
struct smb2_lease_break_work *lw = container_of(work,
fs/smb/client/smb2misc.c
755
smb2_cancelled_close_fid(struct work_struct *work)
fs/smb/client/smb2misc.c
757
struct close_cancelled_open *cancelled = container_of(work,
fs/smb/client/smb2misc.c
758
struct close_cancelled_open, work);
fs/smb/client/smb2misc.c
800
INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
fs/smb/client/smb2misc.c
801
WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
fs/smb/client/smb2ops.c
4856
static void smb2_decrypt_offload(struct work_struct *work)
fs/smb/client/smb2ops.c
4858
struct smb2_decrypt_work *dw = container_of(work,
fs/smb/client/smb2pdu.c
4190
void smb2_reconnect_server(struct work_struct *work)
fs/smb/client/smb2pdu.c
4192
struct TCP_Server_Info *server = container_of(work,
fs/smb/client/smb2pdu.c
4193
struct TCP_Server_Info, reconnect.work);
fs/smb/client/smb2proto.h
110
void smb2_reconnect_server(struct work_struct *work);
fs/smb/client/smb2proto.h
222
void smb2_cancelled_close_fid(struct work_struct *work);
fs/smb/client/smbdirect.c
1838
static void send_immediate_empty_message(struct work_struct *work)
fs/smb/client/smbdirect.c
1841
container_of(work, struct smbdirect_socket, idle.immediate_work);
fs/smb/client/smbdirect.c
1851
static void idle_connection_timer(struct work_struct *work)
fs/smb/client/smbdirect.c
1854
container_of(work, struct smbdirect_socket, idle.timer_work.work);
fs/smb/client/smbdirect.c
204
static void smbd_disconnect_rdma_work(struct work_struct *work)
fs/smb/client/smbdirect.c
207
container_of(work, struct smbdirect_socket, disconnect_work);
fs/smb/client/smbdirect.c
2638
static void smbd_mr_recovery_work(struct work_struct *work)
fs/smb/client/smbdirect.c
2641
container_of(work, struct smbdirect_socket, mr_io.recovery_work);
fs/smb/client/smbdirect.c
711
static void smbd_post_send_credits(struct work_struct *work)
fs/smb/client/smbdirect.c
716
container_of(work, struct smbdirect_socket, recv_io.posted.refill_work);
fs/smb/common/smbdirect/smbdirect_socket.h
140
struct work_struct work;
fs/smb/common/smbdirect/smbdirect_socket.h
355
static void __smbdirect_socket_disabled_work(struct work_struct *work)
fs/smb/common/smbdirect/smbdirect_socket.h
377
INIT_WORK(&sc->connect.work, __smbdirect_socket_disabled_work);
fs/smb/common/smbdirect/smbdirect_socket.h
378
disable_work_sync(&sc->connect.work);
fs/smb/server/auth.c
717
static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
fs/smb/server/auth.c
724
sess = work->sess;
fs/smb/server/auth.c
726
sess = ksmbd_session_lookup_all(work->conn, ses_id);
fs/smb/server/auth.c
823
int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
fs/smb/server/auth.c
826
struct ksmbd_conn *conn = work->conn;
fs/smb/server/auth.c
840
rc = ksmbd_get_encryption_key(work,
fs/smb/server/auth.h
39
int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
fs/smb/server/connection.c
170
void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
fs/smb/server/connection.c
172
struct ksmbd_conn *conn = work->conn;
fs/smb/server/connection.c
175
if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
fs/smb/server/connection.c
181
list_add_tail(&work->request_entry, requests_queue);
fs/smb/server/connection.c
186
void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
fs/smb/server/connection.c
188
struct ksmbd_conn *conn = work->conn;
fs/smb/server/connection.c
194
if (list_empty(&work->request_entry) &&
fs/smb/server/connection.c
195
list_empty(&work->async_request_entry))
fs/smb/server/connection.c
199
list_del_init(&work->request_entry);
fs/smb/server/connection.c
201
if (work->asynchronous)
fs/smb/server/connection.c
202
release_async_work(work);
fs/smb/server/connection.c
267
int ksmbd_conn_write(struct ksmbd_work *work)
fs/smb/server/connection.c
269
struct ksmbd_conn *conn = work->conn;
fs/smb/server/connection.c
272
if (!work->response_buf) {
fs/smb/server/connection.c
277
if (work->send_no_response)
fs/smb/server/connection.c
280
if (!work->iov_idx)
fs/smb/server/connection.c
284
sent = conn->transport->ops->writev(conn->transport, work->iov,
fs/smb/server/connection.c
285
work->iov_cnt,
fs/smb/server/connection.c
286
get_rfc1002_len(work->iov[0].iov_base) + 4,
fs/smb/server/connection.c
287
work->need_invalidate_rkey,
fs/smb/server/connection.c
288
work->remote_key);
fs/smb/server/connection.h
168
int ksmbd_conn_write(struct ksmbd_work *work);
fs/smb/server/connection.h
177
void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
fs/smb/server/connection.h
178
void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
fs/smb/server/ksmbd_work.c
101
work->iov[++work->iov_idx].iov_base = ib;
fs/smb/server/ksmbd_work.c
102
work->iov[work->iov_idx].iov_len = ib_len;
fs/smb/server/ksmbd_work.c
103
work->iov_cnt++;
fs/smb/server/ksmbd_work.c
106
static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
fs/smb/server/ksmbd_work.c
119
if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
fs/smb/server/ksmbd_work.c
122
work->iov_alloc_cnt += 4;
fs/smb/server/ksmbd_work.c
123
new = krealloc(work->iov,
fs/smb/server/ksmbd_work.c
124
sizeof(struct kvec) * work->iov_alloc_cnt,
fs/smb/server/ksmbd_work.c
128
work->iov_alloc_cnt -= 4;
fs/smb/server/ksmbd_work.c
131
work->iov = new;
fs/smb/server/ksmbd_work.c
135
if (!work->iov_idx) {
fs/smb/server/ksmbd_work.c
136
work->iov[work->iov_idx].iov_base = work->response_buf;
fs/smb/server/ksmbd_work.c
137
*(__be32 *)work->iov[0].iov_base = 0;
fs/smb/server/ksmbd_work.c
138
work->iov[work->iov_idx].iov_len = 4;
fs/smb/server/ksmbd_work.c
139
work->iov_cnt++;
fs/smb/server/ksmbd_work.c
142
__ksmbd_iov_pin(work, ib, len);
fs/smb/server/ksmbd_work.c
143
inc_rfc1001_len(work->iov[0].iov_base, len);
fs/smb/server/ksmbd_work.c
146
__ksmbd_iov_pin(work, aux_buf, aux_size);
fs/smb/server/ksmbd_work.c
147
inc_rfc1001_len(work->iov[0].iov_base, aux_size);
fs/smb/server/ksmbd_work.c
150
list_add(&ar->entry, &work->aux_read_list);
fs/smb/server/ksmbd_work.c
156
int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
fs/smb/server/ksmbd_work.c
158
return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
fs/smb/server/ksmbd_work.c
161
int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
fs/smb/server/ksmbd_work.c
164
return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
fs/smb/server/ksmbd_work.c
167
int allocate_interim_rsp_buf(struct ksmbd_work *work)
fs/smb/server/ksmbd_work.c
169
work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, KSMBD_DEFAULT_GFP);
fs/smb/server/ksmbd_work.c
170
if (!work->response_buf)
fs/smb/server/ksmbd_work.c
172
work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
fs/smb/server/ksmbd_work.c
21
struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP);
fs/smb/server/ksmbd_work.c
23
if (work) {
fs/smb/server/ksmbd_work.c
24
work->compound_fid = KSMBD_NO_FID;
fs/smb/server/ksmbd_work.c
25
work->compound_pfid = KSMBD_NO_FID;
fs/smb/server/ksmbd_work.c
26
INIT_LIST_HEAD(&work->request_entry);
fs/smb/server/ksmbd_work.c
27
INIT_LIST_HEAD(&work->async_request_entry);
fs/smb/server/ksmbd_work.c
28
INIT_LIST_HEAD(&work->fp_entry);
fs/smb/server/ksmbd_work.c
29
INIT_LIST_HEAD(&work->aux_read_list);
fs/smb/server/ksmbd_work.c
30
work->iov_alloc_cnt = 4;
fs/smb/server/ksmbd_work.c
31
work->iov = kzalloc_objs(struct kvec, work->iov_alloc_cnt,
fs/smb/server/ksmbd_work.c
33
if (!work->iov) {
fs/smb/server/ksmbd_work.c
34
kmem_cache_free(work_cache, work);
fs/smb/server/ksmbd_work.c
35
work = NULL;
fs/smb/server/ksmbd_work.c
38
return work;
fs/smb/server/ksmbd_work.c
41
void ksmbd_free_work_struct(struct ksmbd_work *work)
fs/smb/server/ksmbd_work.c
45
WARN_ON(work->saved_cred != NULL);
fs/smb/server/ksmbd_work.c
47
kvfree(work->response_buf);
fs/smb/server/ksmbd_work.c
49
list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
fs/smb/server/ksmbd_work.c
55
kfree(work->tr_buf);
fs/smb/server/ksmbd_work.c
56
kvfree(work->request_buf);
fs/smb/server/ksmbd_work.c
57
kfree(work->iov);
fs/smb/server/ksmbd_work.c
59
if (work->async_id)
fs/smb/server/ksmbd_work.c
60
ksmbd_release_id(&work->conn->async_ida, work->async_id);
fs/smb/server/ksmbd_work.c
61
kmem_cache_free(work_cache, work);
fs/smb/server/ksmbd_work.c
93
bool ksmbd_queue_work(struct ksmbd_work *work)
fs/smb/server/ksmbd_work.c
95
return queue_work(ksmbd_wq, &work->work);
fs/smb/server/ksmbd_work.c
98
static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
fs/smb/server/ksmbd_work.h
100
return work->response_buf + work->next_smb2_rsp_hdr_off + 4;
fs/smb/server/ksmbd_work.h
107
static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work)
fs/smb/server/ksmbd_work.h
109
return work->response_buf + work->curr_smb2_rsp_hdr_off + 4;
fs/smb/server/ksmbd_work.h
116
static inline void *ksmbd_req_buf_next(struct ksmbd_work *work)
fs/smb/server/ksmbd_work.h
118
return work->request_buf + work->next_smb2_rcv_hdr_off + 4;
fs/smb/server/ksmbd_work.h
122
void ksmbd_free_work_struct(struct ksmbd_work *work);
fs/smb/server/ksmbd_work.h
129
bool ksmbd_queue_work(struct ksmbd_work *work);
fs/smb/server/ksmbd_work.h
130
int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
fs/smb/server/ksmbd_work.h
132
int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len);
fs/smb/server/ksmbd_work.h
133
int allocate_interim_rsp_buf(struct ksmbd_work *work);
fs/smb/server/ksmbd_work.h
86
struct work_struct work;
fs/smb/server/ksmbd_work.h
98
static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
fs/smb/server/mgmt/share_config.c
124
static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
fs/smb/server/mgmt/share_config.c
130
struct unicode_map *um = work->conn->um;
fs/smb/server/mgmt/share_config.c
186
if (__ksmbd_override_fsids(work, share)) {
fs/smb/server/mgmt/share_config.c
193
ksmbd_revert_fsids(work);
fs/smb/server/mgmt/share_config.c
226
struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
fs/smb/server/mgmt/share_config.c
239
return share_config_request(work, name);
fs/smb/server/mgmt/share_config.h
73
struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
fs/smb/server/mgmt/tree_connect.c
20
ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
fs/smb/server/mgmt/tree_connect.c
27
struct ksmbd_conn *conn = work->conn;
fs/smb/server/mgmt/tree_connect.c
28
struct ksmbd_session *sess = work->sess;
fs/smb/server/mgmt/tree_connect.c
31
sc = ksmbd_share_config_get(work, share_name);
fs/smb/server/mgmt/tree_connect.c
66
new_sc = ksmbd_share_config_get(work, share_name);
fs/smb/server/mgmt/tree_connect.h
53
ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name);
fs/smb/server/oplock.c
1185
int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
fs/smb/server/oplock.c
1189
struct ksmbd_session *sess = work->sess;
fs/smb/server/oplock.c
1205
opinfo = alloc_opinfo(work, pid, tid);
fs/smb/server/oplock.c
1265
err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II, work);
fs/smb/server/oplock.c
1336
static void smb_break_all_write_oplock(struct ksmbd_work *work,
fs/smb/server/oplock.c
1351
oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II, work);
fs/smb/server/oplock.c
1362
void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/oplock.c
1367
struct ksmbd_conn *conn = work->conn;
fs/smb/server/oplock.c
1369
if (!test_share_config_flag(work->tcon->share_conf,
fs/smb/server/oplock.c
1430
void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp)
fs/smb/server/oplock.c
1432
if (!test_share_config_flag(work->tcon->share_conf,
fs/smb/server/oplock.c
1436
smb_break_all_write_oplock(work, fp, 1);
fs/smb/server/oplock.c
1437
smb_break_all_levII_oplock(work, fp, 1);
fs/smb/server/oplock.c
30
static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
fs/smb/server/oplock.c
33
struct ksmbd_conn *conn = work->conn;
fs/smb/server/oplock.c
34
struct ksmbd_session *sess = work->sess;
fs/smb/server/oplock.c
644
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
fs/smb/server/oplock.c
645
struct ksmbd_conn *conn = work->conn;
fs/smb/server/oplock.c
646
struct oplock_break_info *br_info = work->request_buf;
fs/smb/server/oplock.c
654
if (allocate_interim_rsp_buf(work)) {
fs/smb/server/oplock.c
656
ksmbd_fd_put(work, fp);
fs/smb/server/oplock.c
660
rsp_hdr = smb_get_msg(work->response_buf);
fs/smb/server/oplock.c
674
rsp = smb_get_msg(work->response_buf);
fs/smb/server/oplock.c
688
ksmbd_fd_put(work, fp);
fs/smb/server/oplock.c
689
if (ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/oplock.c
697
ksmbd_conn_write(work);
fs/smb/server/oplock.c
700
ksmbd_free_work_struct(work);
fs/smb/server/oplock.c
716
struct ksmbd_work *work = ksmbd_alloc_work_struct();
fs/smb/server/oplock.c
718
if (!work)
fs/smb/server/oplock.c
723
ksmbd_free_work_struct(work);
fs/smb/server/oplock.c
731
work->request_buf = (char *)br_info;
fs/smb/server/oplock.c
732
work->conn = conn;
fs/smb/server/oplock.c
733
work->sess = opinfo->sess;
fs/smb/server/oplock.c
737
INIT_WORK(&work->work, __smb2_oplock_break_noti);
fs/smb/server/oplock.c
738
ksmbd_queue_work(work);
fs/smb/server/oplock.c
742
__smb2_oplock_break_noti(&work->work);
fs/smb/server/oplock.c
757
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
fs/smb/server/oplock.c
758
struct ksmbd_conn *conn = work->conn;
fs/smb/server/oplock.c
759
struct lease_break_info *br_info = work->request_buf;
fs/smb/server/oplock.c
762
if (allocate_interim_rsp_buf(work)) {
fs/smb/server/oplock.c
767
rsp_hdr = smb_get_msg(work->response_buf);
fs/smb/server/oplock.c
781
rsp = smb_get_msg(work->response_buf);
fs/smb/server/oplock.c
797
if (ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/oplock.c
801
ksmbd_conn_write(work);
fs/smb/server/oplock.c
804
ksmbd_free_work_struct(work);
fs/smb/server/oplock.c
818
struct ksmbd_work *work;
fs/smb/server/oplock.c
822
work = ksmbd_alloc_work_struct();
fs/smb/server/oplock.c
823
if (!work)
fs/smb/server/oplock.c
828
ksmbd_free_work_struct(work);
fs/smb/server/oplock.c
840
work->request_buf = (char *)br_info;
fs/smb/server/oplock.c
841
work->conn = conn;
fs/smb/server/oplock.c
842
work->sess = opinfo->sess;
fs/smb/server/oplock.c
846
INIT_WORK(&work->work, __smb2_lease_break_noti);
fs/smb/server/oplock.c
847
ksmbd_queue_work(work);
fs/smb/server/oplock.c
850
__smb2_lease_break_noti(&work->work);
fs/smb/server/oplock.h
100
void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp);
fs/smb/server/oplock.h
58
struct ksmbd_work *work;
fs/smb/server/oplock.h
90
int smb_grant_oplock(struct ksmbd_work *work, int req_op_level,
fs/smb/server/oplock.h
93
void smb_break_all_levII_oplock(struct ksmbd_work *work,
fs/smb/server/server.c
109
static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn,
fs/smb/server/server.c
116
if (check_conn_state(work))
fs/smb/server/server.c
119
if (ksmbd_verify_smb_message(work)) {
fs/smb/server/server.c
120
conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
fs/smb/server/server.c
124
command = conn->ops->get_cmd_val(work);
fs/smb/server/server.c
129
conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
fs/smb/server/server.c
136
conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED);
fs/smb/server/server.c
140
if (work->sess && conn->ops->is_sign_req(work, command)) {
fs/smb/server/server.c
141
ret = conn->ops->check_sign_req(work);
fs/smb/server/server.c
143
conn->ops->set_rsp_status(work, STATUS_ACCESS_DENIED);
fs/smb/server/server.c
148
ret = cmds->proc(work);
fs/smb/server/server.c
161
if (work->send_no_response)
fs/smb/server/server.c
166
static void __handle_ksmbd_work(struct ksmbd_work *work,
fs/smb/server/server.c
174
conn->ops->is_transform_hdr(work->request_buf)) {
fs/smb/server/server.c
175
rc = conn->ops->decrypt_req(work);
fs/smb/server/server.c
178
work->encrypted = true;
fs/smb/server/server.c
181
if (conn->ops->allocate_rsp_buf(work))
fs/smb/server/server.c
184
rc = conn->ops->init_rsp_hdr(work);
fs/smb/server/server.c
187
conn->ops->set_rsp_status(work, STATUS_INVALID_HANDLE);
fs/smb/server/server.c
193
rc = conn->ops->check_user_session(work);
fs/smb/server/server.c
196
conn->ops->set_rsp_status(work,
fs/smb/server/server.c
199
conn->ops->set_rsp_status(work,
fs/smb/server/server.c
203
rc = conn->ops->get_ksmbd_tcon(work);
fs/smb/server/server.c
206
conn->ops->set_rsp_status(work,
fs/smb/server/server.c
209
conn->ops->set_rsp_status(work,
fs/smb/server/server.c
216
rc = __process_request(work, conn, &command);
fs/smb/server/server.c
226
rc = conn->ops->set_rsp_credits(work);
fs/smb/server/server.c
229
conn->ops->set_rsp_status(work,
fs/smb/server/server.c
235
is_chained = is_chained_smb2_message(work);
fs/smb/server/server.c
237
if (work->sess &&
fs/smb/server/server.c
238
(work->sess->sign || smb3_11_final_sess_setup_resp(work) ||
fs/smb/server/server.c
239
conn->ops->is_sign_req(work, command)))
fs/smb/server/server.c
240
conn->ops->set_sign_rsp(work);
fs/smb/server/server.c
244
if (work->tcon)
fs/smb/server/server.c
245
ksmbd_tree_connect_put(work->tcon);
fs/smb/server/server.c
246
smb3_preauth_hash_rsp(work);
fs/smb/server/server.c
247
if (work->sess && work->sess->enc && work->encrypted &&
fs/smb/server/server.c
249
rc = conn->ops->encrypt_resp(work);
fs/smb/server/server.c
251
conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
fs/smb/server/server.c
253
if (work->sess)
fs/smb/server/server.c
254
ksmbd_user_session_put(work->sess);
fs/smb/server/server.c
256
ksmbd_conn_write(work);
fs/smb/server/server.c
267
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
fs/smb/server/server.c
268
struct ksmbd_conn *conn = work->conn;
fs/smb/server/server.c
272
__handle_ksmbd_work(work, conn);
fs/smb/server/server.c
274
ksmbd_conn_try_dequeue_request(work);
fs/smb/server/server.c
275
ksmbd_free_work_struct(work);
fs/smb/server/server.c
288
struct ksmbd_work *work;
fs/smb/server/server.c
295
work = ksmbd_alloc_work_struct();
fs/smb/server/server.c
296
if (!work) {
fs/smb/server/server.c
301
work->conn = conn;
fs/smb/server/server.c
302
work->request_buf = conn->request_buf;
fs/smb/server/server.c
305
ksmbd_conn_enqueue_request(work);
fs/smb/server/server.c
309
INIT_WORK(&work->work, handle_ksmbd_work);
fs/smb/server/server.c
310
ksmbd_queue_work(work);
fs/smb/server/server.c
386
static void server_ctrl_handle_work(struct work_struct *work)
fs/smb/server/server.c
390
ctrl = container_of(work, struct server_ctrl_struct, ctrl_work);
fs/smb/server/server.c
93
static inline int check_conn_state(struct ksmbd_work *work)
fs/smb/server/server.c
97
if (ksmbd_conn_exiting(work->conn) ||
fs/smb/server/server.c
98
ksmbd_conn_need_reconnect(work->conn)) {
fs/smb/server/server.c
99
rsp_hdr = smb_get_msg(work->response_buf);
fs/smb/server/smb2misc.c
367
int ksmbd_smb2_check_message(struct ksmbd_work *work)
fs/smb/server/smb2misc.c
369
struct smb2_pdu *pdu = ksmbd_req_buf_next(work);
fs/smb/server/smb2misc.c
373
__u32 len = get_rfc1002_len(work->request_buf);
fs/smb/server/smb2misc.c
376
if ((u64)work->next_smb2_rcv_hdr_off + next_cmd > len) {
fs/smb/server/smb2misc.c
384
else if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2misc.c
385
len -= work->next_smb2_rcv_hdr_off;
fs/smb/server/smb2misc.c
462
if ((work->conn->vals->req_capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
fs/smb/server/smb2misc.c
463
smb2_validate_credit_charge(work->conn, hdr))
fs/smb/server/smb2misc.c
469
int smb2_negotiate_request(struct ksmbd_work *work)
fs/smb/server/smb2misc.c
471
return ksmbd_smb_negotiate_common(work, SMB2_NEGOTIATE_HE);
fs/smb/server/smb2pdu.c
100
int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
102
struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
1103
int smb2_handle_negotiate(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
1105
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
1106
struct smb2_negotiate_req *req = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
1107
struct smb2_negotiate_rsp *rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
1116
work->send_no_response = 1;
fs/smb/server/smb2pdu.c
1121
smb2_buf_len = get_rfc1002_len(work->request_buf);
fs/smb/server/smb2pdu.c
113
if (xa_empty(&work->sess->tree_conns)) {
fs/smb/server/smb2pdu.c
1179
get_rfc1002_len(work->request_buf));
fs/smb/server/smb2pdu.c
1199
work->request_buf,
fs/smb/server/smb2pdu.c
124
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
125
if (!work->tcon) {
fs/smb/server/smb2pdu.c
1276
rc = ksmbd_iov_pin_rsp(work, rsp,
fs/smb/server/smb2pdu.c
1280
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
129
if (work->tcon->t_state != TREE_CONNECTED)
fs/smb/server/smb2pdu.c
1301
static int generate_preauth_hash(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
1303
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
1304
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
131
if (tree_id != UINT_MAX && work->tcon->id != tree_id) {
fs/smb/server/smb2pdu.c
1328
ksmbd_gen_preauth_integrity_hash(conn, work->request_buf, preauth_hash);
fs/smb/server/smb2pdu.c
133
tree_id, work->tcon->id);
fs/smb/server/smb2pdu.c
1349
static int ntlm_negotiate(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
1360
rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->conn);
fs/smb/server/smb2pdu.c
1368
if (!work->conn->use_spnego) {
fs/smb/server/smb2pdu.c
1369
sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
fs/smb/server/smb2pdu.c
1385
sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
fs/smb/server/smb2pdu.c
139
work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id);
fs/smb/server/smb2pdu.c
140
if (!work->tcon) {
fs/smb/server/smb2pdu.c
1458
static int ntlm_authenticate(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
1462
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
1463
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
152
void smb2_set_err_rsp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
156
if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
157
err_rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
159
err_rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
1598
static int krb5_authenticate(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
1602
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
1603
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
1615
out_len = work->response_sz -
fs/smb/server/smb2pdu.c
169
err = ksmbd_iov_pin_rsp(work, (void *)err_rsp,
fs/smb/server/smb2pdu.c
1692
static int krb5_authenticate(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
1700
int smb2_sess_setup(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
1702
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
1713
work->send_no_response = 1;
fs/smb/server/smb2pdu.c
1717
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
173
work->send_no_response = 1;
fs/smb/server/smb2pdu.c
1820
work->sess = sess;
fs/smb/server/smb2pdu.c
183
bool is_smb2_neg_cmd(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
1845
rc = generate_preauth_hash(work);
fs/smb/server/smb2pdu.c
185
struct smb2_hdr *hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
1851
rc = krb5_authenticate(work, req, rsp);
fs/smb/server/smb2pdu.c
1863
rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
fs/smb/server/smb2pdu.c
1869
rc = ntlm_authenticate(work, req, rsp);
fs/smb/server/smb2pdu.c
1951
work->sess = NULL;
fs/smb/server/smb2pdu.c
1958
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
1968
rc = ksmbd_iov_pin_rsp(work, rsp, iov_len);
fs/smb/server/smb2pdu.c
1983
int smb2_tree_connect(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
1985
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
1988
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
1996
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
2016
status = ksmbd_tree_conn_connect(work, name);
fs/smb/server/smb2pdu.c
2065
rc = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_tree_connect_rsp));
fs/smb/server/smb2pdu.c
207
bool is_smb2_rsp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
209
struct smb2_hdr *hdr = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
2101
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
2184
int smb2_tree_disconnect(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
2188
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
2189
struct ksmbd_tree_connect *tcon = work->tcon;
fs/smb/server/smb2pdu.c
2194
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
2204
ksmbd_close_tree_conn_fds(work);
fs/smb/server/smb2pdu.c
2224
err = ksmbd_iov_pin_rsp(work, rsp,
fs/smb/server/smb2pdu.c
2234
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
2245
int smb2_session_logoff(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
2247
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
2248
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
2254
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
2262
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
2269
ksmbd_close_session_fds(work);
fs/smb/server/smb2pdu.c
2275
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
228
u16 get_smb2_cmd_val(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
2286
err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
fs/smb/server/smb2pdu.c
2289
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
2301
static noinline int create_smb2_pipe(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
2309
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
2312
1, work->conn->local_nls);
fs/smb/server/smb2pdu.c
2319
id = ksmbd_session_rpc_open(work->sess, name);
fs/smb/server/smb2pdu.c
232
if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
233
rcv_hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
2344
err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_create_rsp, Buffer));
fs/smb/server/smb2pdu.c
235
rcv_hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
2363
ksmbd_session_rpc_close(work->sess, id);
fs/smb/server/smb2pdu.c
2368
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
244
void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
fs/smb/server/smb2pdu.c
248
rsp_hdr = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
251
work->iov_idx = 0;
fs/smb/server/smb2pdu.c
252
work->iov_cnt = 0;
fs/smb/server/smb2pdu.c
253
work->next_smb2_rcv_hdr_off = 0;
fs/smb/server/smb2pdu.c
254
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
2612
static int smb2_creat(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
2616
struct ksmbd_tree_connect *tcon = work->tcon;
fs/smb/server/smb2pdu.c
2629
rc = ksmbd_vfs_mkdir(work, name, mode);
fs/smb/server/smb2pdu.c
2636
rc = ksmbd_vfs_create(work, name, mode);
fs/smb/server/smb2pdu.c
264
int init_smb2_neg_rsp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
2641
rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
fs/smb/server/smb2pdu.c
2650
static int smb2_create_sd_buffer(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
2674
return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
fs/smb/server/smb2pdu.c
268
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
271
rsp_hdr = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
2714
static int parse_durable_handle_context(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
2719
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
285
rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
2889
int smb2_open(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
2891
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
2892
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
2893
struct ksmbd_tree_connect *tcon = work->tcon;
fs/smb/server/smb2pdu.c
2925
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
2927
if (req->hdr.NextCommand && !work->next_smb2_rcv_hdr_off &&
fs/smb/server/smb2pdu.c
2931
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
2937
return create_smb2_pipe(work);
fs/smb/server/smb2pdu.c
2964
work->conn->local_nls);
fs/smb/server/smb2pdu.c
2975
if (!test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
3009
rc = parse_durable_handle_context(work, req, lc, &dh_info);
fs/smb/server/smb2pdu.c
3022
rc = ksmbd_reopen_durable_fd(work, dh_info.fp);
fs/smb/server/smb2pdu.c
3030
if (ksmbd_override_fsids(work)) {
fs/smb/server/smb2pdu.c
311
err = ksmbd_iov_pin_rsp(work, rsp,
fs/smb/server/smb2pdu.c
3149
if (ksmbd_override_fsids(work)) {
fs/smb/server/smb2pdu.c
3154
rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS,
fs/smb/server/smb2pdu.c
325
int smb2_set_rsp_credits(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
327
struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
3276
rc = smb2_creat(work, &path, name, open_flags,
fs/smb/server/smb2pdu.c
328
struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
329
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
333
if (work->send_no_response)
fs/smb/server/smb2pdu.c
3357
fp = ksmbd_open_fd(work, filp);
fs/smb/server/smb2pdu.c
3388
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
3395
rc = smb2_create_sd_buffer(work, req, &path);
fs/smb/server/smb2pdu.c
3401
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
3491
smb_break_all_oplock(work, fp);
fs/smb/server/smb2pdu.c
3496
if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
fs/smb/server/smb2pdu.c
3529
rc = smb_grant_oplock(work, req_op_level,
fs/smb/server/smb2pdu.c
3568
smb_break_all_levII_oplock(work, fp, 1);
fs/smb/server/smb2pdu.c
3617
test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
370
work->credits_granted += credits_granted;
fs/smb/server/smb2pdu.c
374
hdr->CreditRequest = cpu_to_le16(work->credits_granted);
fs/smb/server/smb2pdu.c
3770
ksmbd_revert_fsids(work);
fs/smb/server/smb2pdu.c
3774
ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
fs/smb/server/smb2pdu.c
3775
rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len);
fs/smb/server/smb2pdu.c
3804
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
3805
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
387
static void init_chained_smb2_rsp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
389
struct smb2_hdr *req = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
390
struct smb2_hdr *rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
404
work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId;
fs/smb/server/smb2pdu.c
405
work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId;
fs/smb/server/smb2pdu.c
406
work->compound_sid = le64_to_cpu(rsp->SessionId);
fs/smb/server/smb2pdu.c
409
len = get_rfc1002_len(work->response_buf) - work->next_smb2_rsp_hdr_off;
fs/smb/server/smb2pdu.c
413
work->iov[work->iov_idx].iov_len += (new_len - len);
fs/smb/server/smb2pdu.c
414
inc_rfc1001_len(work->response_buf, new_len - len);
fs/smb/server/smb2pdu.c
4143
struct ksmbd_work *work;
fs/smb/server/smb2pdu.c
417
work->next_smb2_rcv_hdr_off += next_hdr_offset;
fs/smb/server/smb2pdu.c
418
work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off;
fs/smb/server/smb2pdu.c
4185
rc = ksmbd_vfs_fill_dentry_attrs(priv->work,
fs/smb/server/smb2pdu.c
419
work->next_smb2_rsp_hdr_off += new_len;
fs/smb/server/smb2pdu.c
4195
rc = smb2_populate_readdir_entry(priv->work->conn,
fs/smb/server/smb2pdu.c
422
new_len, work->next_smb2_rcv_hdr_off,
fs/smb/server/smb2pdu.c
423
work->next_smb2_rsp_hdr_off);
fs/smb/server/smb2pdu.c
425
rsp_hdr = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
426
rcv_hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
430
work->compound_fid = KSMBD_NO_FID;
fs/smb/server/smb2pdu.c
431
work->compound_pfid = KSMBD_NO_FID;
fs/smb/server/smb2pdu.c
4329
if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name))
fs/smb/server/smb2pdu.c
4362
static int smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len)
fs/smb/server/smb2pdu.c
4366
free_len = (int)(work->response_sz -
fs/smb/server/smb2pdu.c
4367
(get_rfc1002_len(work->response_buf) + 4)) - hdr2_len;
fs/smb/server/smb2pdu.c
4371
static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
4377
if (out_buf_len > work->conn->vals->max_trans_size)
fs/smb/server/smb2pdu.c
4380
free_len = smb2_resp_buf_len(work, hdr2_len);
fs/smb/server/smb2pdu.c
4387
int smb2_query_dir(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
4389
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
4392
struct ksmbd_share_config *share = work->tcon->share_conf;
fs/smb/server/smb2pdu.c
4403
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
4405
if (ksmbd_override_fsids(work)) {
fs/smb/server/smb2pdu.c
4407
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
4417
dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
fs/smb/server/smb2pdu.c
4459
smb2_calc_max_out_buf_len(work,
fs/smb/server/smb2pdu.c
4472
rc = ksmbd_populate_dot_dotdot_entries(work, req->FileInformationClass,
fs/smb/server/smb2pdu.c
4485
query_dir_private.work = work;
fs/smb/server/smb2pdu.c
4531
rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
4547
rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
4555
ksmbd_fd_put(work, dir_fp);
fs/smb/server/smb2pdu.c
4556
ksmbd_revert_fsids(work);
fs/smb/server/smb2pdu.c
457
bool is_chained_smb2_message(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
4581
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
4582
ksmbd_fd_put(work, dir_fp);
fs/smb/server/smb2pdu.c
4583
ksmbd_revert_fsids(work);
fs/smb/server/smb2pdu.c
459
struct smb2_hdr *hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
46
static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
fs/smb/server/smb2pdu.c
465
hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
468
if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +
fs/smb/server/smb2pdu.c
4692
static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/smb2pdu.c
470
get_rfc1002_len(work->request_buf)) {
fs/smb/server/smb2pdu.c
4728
smb2_calc_max_out_buf_len(work,
fs/smb/server/smb2pdu.c
476
if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE >
fs/smb/server/smb2pdu.c
477
work->response_sz) {
fs/smb/server/smb2pdu.c
48
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
483
init_chained_smb2_rsp(work);
fs/smb/server/smb2pdu.c
485
} else if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
49
*req = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
490
len = ALIGN(get_rfc1002_len(work->response_buf), 8);
fs/smb/server/smb2pdu.c
491
len = len - get_rfc1002_len(work->response_buf);
fs/smb/server/smb2pdu.c
4935
static int get_file_all_info(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
494
work->iov[work->iov_idx].iov_len += len;
fs/smb/server/smb2pdu.c
4940
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
4948
struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
495
inc_rfc1001_len(work->response_buf, len);
fs/smb/server/smb2pdu.c
4956
filename = convert_to_nt_pathname(work->tcon->share_conf, &fp->filp->f_path);
fs/smb/server/smb2pdu.c
4961
buf_free_len = smb2_calc_max_out_buf_len(work,
fs/smb/server/smb2pdu.c
497
work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off;
fs/smb/server/smb2pdu.c
50
*rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
5023
static void get_file_alternate_info(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
5028
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
5044
static int get_file_stream_info(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
5049
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
5057
struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
5068
smb2_calc_max_out_buf_len(work,
fs/smb/server/smb2pdu.c
508
int init_smb2_rsp_hdr(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
510
struct smb2_hdr *rsp_hdr = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
511
struct smb2_hdr *rcv_hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
52
*req = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
53
*rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
5367
static int smb2_get_info_file(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
5376
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
5379
rc = smb2_get_info_file_pipe(work->sess, req, rsp,
fs/smb/server/smb2pdu.c
538
int smb2_allocate_rsp_buf(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
5380
work->response_buf);
fs/smb/server/smb2pdu.c
5384
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
5387
work->compound_fid);
fs/smb/server/smb2pdu.c
5388
id = work->compound_fid;
fs/smb/server/smb2pdu.c
5389
pid = work->compound_pfid;
fs/smb/server/smb2pdu.c
5398
fp = ksmbd_lookup_fd_slow(work, id, pid);
fs/smb/server/smb2pdu.c
540
struct smb2_hdr *hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
5406
get_file_access_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5410
rc = get_file_basic_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5414
rc = get_file_standard_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5418
get_file_alignment_info(rsp, work->response_buf);
fs/smb/server/smb2pdu.c
542
size_t large_sz = small_sz + work->conn->vals->max_trans_size;
fs/smb/server/smb2pdu.c
5422
rc = get_file_all_info(work, rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5426
get_file_alternate_info(work, rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5430
rc = get_file_stream_info(work, rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5434
rc = get_file_internal_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5438
rc = get_file_network_open_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5442
get_file_ea_info(rsp, work->response_buf);
fs/smb/server/smb2pdu.c
5446
rc = smb2_get_ea(work, fp, req, rsp, work->response_buf);
fs/smb/server/smb2pdu.c
5450
get_file_position_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5454
get_file_mode_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5458
rc = get_file_compression_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5462
rc = get_file_attribute_tag_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5465
if (!work->tcon->posix_extensions) {
fs/smb/server/smb2pdu.c
5469
rc = find_file_posix_info(rsp, fp, work->response_buf);
fs/smb/server/smb2pdu.c
5479
rsp, work->response_buf);
fs/smb/server/smb2pdu.c
5480
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
5484
rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
5490
static int smb2_get_info_filesystem(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
5494
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
5495
struct ksmbd_share_config *share = work->tcon->share_conf;
fs/smb/server/smb2pdu.c
552
if (get_rfc1002_len(work->request_buf) <
fs/smb/server/smb2pdu.c
5529
if (!test_tree_conn_flag(work->tcon,
fs/smb/server/smb2pdu.c
5551
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
556
req = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
568
work->response_buf = kvzalloc(sz, KSMBD_DEFAULT_GFP);
fs/smb/server/smb2pdu.c
5689
if (!work->tcon->posix_extensions) {
fs/smb/server/smb2pdu.c
569
if (!work->response_buf)
fs/smb/server/smb2pdu.c
5711
rsp, work->response_buf);
fs/smb/server/smb2pdu.c
5715
rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
572
work->response_sz = sz;
fs/smb/server/smb2pdu.c
5721
static int smb2_get_info_sec(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
5758
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
5761
work->compound_fid);
fs/smb/server/smb2pdu.c
5762
id = work->compound_fid;
fs/smb/server/smb2pdu.c
5763
pid = work->compound_pfid;
fs/smb/server/smb2pdu.c
5772
fp = ksmbd_lookup_fd_slow(work, id, pid);
fs/smb/server/smb2pdu.c
5780
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
5782
ppntsd_size = ksmbd_vfs_get_sd_xattr(work->conn, idmap,
fs/smb/server/smb2pdu.c
5787
max_len = smb2_calc_max_out_buf_len(work,
fs/smb/server/smb2pdu.c
5815
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
582
int smb2_check_user_session(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
5825
rsp, work->response_buf);
fs/smb/server/smb2pdu.c
5829
rc = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
fs/smb/server/smb2pdu.c
584
struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
5847
int smb2_query_info(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
585
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
5855
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
5857
if (ksmbd_override_fsids(work)) {
fs/smb/server/smb2pdu.c
5868
rc = smb2_get_info_file(work, req, rsp);
fs/smb/server/smb2pdu.c
5872
rc = smb2_get_info_filesystem(work, req, rsp);
fs/smb/server/smb2pdu.c
5876
rc = smb2_get_info_sec(work, req, rsp);
fs/smb/server/smb2pdu.c
5883
ksmbd_revert_fsids(work);
fs/smb/server/smb2pdu.c
5899
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
5914
static noinline int smb2_close_pipe(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
5920
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
5923
ksmbd_session_rpc_close(work->sess, id);
fs/smb/server/smb2pdu.c
5936
return ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
5946
int smb2_close(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
5952
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
5959
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
5961
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
5964
return smb2_close_pipe(work);
fs/smb/server/smb2pdu.c
5969
sess_id = work->compound_sid;
fs/smb/server/smb2pdu.c
5971
work->compound_sid = 0;
fs/smb/server/smb2pdu.c
5973
work->compound_sid = sess_id;
fs/smb/server/smb2pdu.c
5982
if (work->next_smb2_rcv_hdr_off &&
fs/smb/server/smb2pdu.c
5984
if (!has_file_id(work->compound_fid)) {
fs/smb/server/smb2pdu.c
5993
work->compound_fid,
fs/smb/server/smb2pdu.c
5994
work->compound_pfid);
fs/smb/server/smb2pdu.c
5995
volatile_id = work->compound_fid;
fs/smb/server/smb2pdu.c
5998
work->compound_fid = KSMBD_NO_FID;
fs/smb/server/smb2pdu.c
5999
work->compound_pfid = KSMBD_NO_FID;
fs/smb/server/smb2pdu.c
6013
fp = ksmbd_lookup_fd_fast(work, volatile_id);
fs/smb/server/smb2pdu.c
6022
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
6038
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
6050
err = ksmbd_close_fd(work, volatile_id);
fs/smb/server/smb2pdu.c
6053
err = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
6059
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
607
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
6071
int smb2_echo(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
6073
struct smb2_echo_rsp *rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
6077
if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
6078
rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
608
if (!work->sess) {
fs/smb/server/smb2pdu.c
6082
return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_echo_rsp));
fs/smb/server/smb2pdu.c
6085
static int smb2_rename(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
612
if (sess_id != ULLONG_MAX && work->sess->id != sess_id) {
fs/smb/server/smb2pdu.c
614
sess_id, work->sess->id);
fs/smb/server/smb2pdu.c
6149
rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
fs/smb/server/smb2pdu.c
6151
smb_break_all_levII_oplock(work, fp, 0);
fs/smb/server/smb2pdu.c
6157
static int smb2_create_link(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
6192
rc = ksmbd_vfs_kern_path_start_removing(work, link_name, LOOKUP_NO_SYMLINKS,
fs/smb/server/smb2pdu.c
6199
rc = ksmbd_vfs_remove_file(work, &path);
fs/smb/server/smb2pdu.c
621
work->sess = ksmbd_session_lookup_all(conn, sess_id);
fs/smb/server/smb2pdu.c
6213
rc = ksmbd_vfs_link(work, target_name, link_name);
fs/smb/server/smb2pdu.c
622
if (work->sess)
fs/smb/server/smb2pdu.c
6304
static int set_file_allocation_info(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
6334
smb_break_all_levII_oplock(work, fp, 1);
fs/smb/server/smb2pdu.c
6352
rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
fs/smb/server/smb2pdu.c
6363
static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/smb2pdu.c
6386
rc = ksmbd_vfs_truncate(work, fp, newsize);
fs/smb/server/smb2pdu.c
6397
static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/smb2pdu.c
6413
return smb2_rename(work, fp, rename_info, work->conn->local_nls);
fs/smb/server/smb2pdu.c
6497
static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/smb2pdu.c
6517
return set_file_allocation_info(work, fp,
fs/smb/server/smb2pdu.c
6525
return set_end_of_file_info(work, fp,
fs/smb/server/smb2pdu.c
6533
return set_rename_info(work, fp,
fs/smb/server/smb2pdu.c
6542
return smb2_create_link(work, work->tcon->share_conf,
fs/smb/server/smb2pdu.c
6545
work->conn->local_nls);
fs/smb/server/smb2pdu.c
6606
int smb2_set_info(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
6616
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
6617
req = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
6618
rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
6621
work->compound_fid);
fs/smb/server/smb2pdu.c
6622
id = work->compound_fid;
fs/smb/server/smb2pdu.c
6623
pid = work->compound_pfid;
fs/smb/server/smb2pdu.c
6626
req = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
6627
rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
663
int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
fs/smb/server/smb2pdu.c
6630
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
fs/smb/server/smb2pdu.c
6642
fp = ksmbd_lookup_fd_slow(work, id, pid);
fs/smb/server/smb2pdu.c
665
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
6652
rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);
fs/smb/server/smb2pdu.c
6656
if (ksmbd_override_fsids(work)) {
fs/smb/server/smb2pdu.c
6664
ksmbd_revert_fsids(work);
fs/smb/server/smb2pdu.c
6674
rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
6678
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
6702
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
6703
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
6714
static noinline int smb2_read_pipe(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
6722
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
6726
rpc_resp = ksmbd_rpc_read(work->sess, id);
fs/smb/server/smb2pdu.c
673
work->asynchronous = true;
fs/smb/server/smb2pdu.c
674
work->async_id = id;
fs/smb/server/smb2pdu.c
6745
err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
fs/smb/server/smb2pdu.c
6754
err = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
6770
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
6775
static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
678
work->async_id);
fs/smb/server/smb2pdu.c
6782
if (work->conn->dialect == SMB30_PROT_ID &&
fs/smb/server/smb2pdu.c
6798
work->need_invalidate_rkey =
fs/smb/server/smb2pdu.c
680
work->cancel_fn = fn;
fs/smb/server/smb2pdu.c
6801
work->remote_key = le32_to_cpu(desc->token);
fs/smb/server/smb2pdu.c
6805
static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
681
work->cancel_argv = arg;
fs/smb/server/smb2pdu.c
6811
err = ksmbd_conn_rdma_write(work->conn, data_buf, length,
fs/smb/server/smb2pdu.c
6827
int smb2_read(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
6829
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
683
if (list_empty(&work->async_request_entry)) {
fs/smb/server/smb2pdu.c
6844
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
6847
return smb2_read_pipe(work);
fs/smb/server/smb2pdu.c
685
list_add_tail(&work->async_request_entry, &conn->async_requests);
fs/smb/server/smb2pdu.c
6850
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
6851
req = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
6852
rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
6855
work->compound_fid);
fs/smb/server/smb2pdu.c
6856
id = work->compound_fid;
fs/smb/server/smb2pdu.c
6857
pid = work->compound_pfid;
fs/smb/server/smb2pdu.c
6860
req = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
6861
rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
6872
max_read_size = get_smbd_max_read_write_size(work->conn->transport);
fs/smb/server/smb2pdu.c
6886
err = smb2_set_remote_key_for_rdma(work,
fs/smb/server/smb2pdu.c
6895
fp = ksmbd_lookup_fd_slow(work, id, pid);
fs/smb/server/smb2pdu.c
692
void release_async_work(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
6931
nbytes = ksmbd_vfs_read(work, fp, length, &offset, aux_payload_buf);
fs/smb/server/smb2pdu.c
694
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
6941
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
6942
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
6951
remain_bytes = smb2_read_rdma_channel(work, req,
fs/smb/server/smb2pdu.c
6969
err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
fs/smb/server/smb2pdu.c
697
list_del_init(&work->async_request_entry);
fs/smb/server/smb2pdu.c
6976
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
6996
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
6998
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
700
work->asynchronous = 0;
fs/smb/server/smb2pdu.c
7008
static noinline int smb2_write_pipe(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
701
work->cancel_fn = NULL;
fs/smb/server/smb2pdu.c
7018
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
702
kfree(work->cancel_argv);
fs/smb/server/smb2pdu.c
7024
get_rfc1002_len(work->request_buf)) {
fs/smb/server/smb2pdu.c
7027
get_rfc1002_len(work->request_buf));
fs/smb/server/smb2pdu.c
703
work->cancel_argv = NULL;
fs/smb/server/smb2pdu.c
7035
rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length);
fs/smb/server/smb2pdu.c
704
if (work->async_id) {
fs/smb/server/smb2pdu.c
7040
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
7045
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
705
ksmbd_release_id(&conn->async_ida, work->async_id);
fs/smb/server/smb2pdu.c
7058
err = ksmbd_iov_pin_rsp(work, (void *)rsp,
fs/smb/server/smb2pdu.c
706
work->async_id = 0;
fs/smb/server/smb2pdu.c
7063
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
7069
static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
7082
ret = ksmbd_conn_rdma_read(work->conn, data_buf, length,
fs/smb/server/smb2pdu.c
7091
ret = ksmbd_vfs_write(work, fp, data_buf, length, &offset, sync, &nbytes);
fs/smb/server/smb2pdu.c
710
void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
fs/smb/server/smb2pdu.c
7105
int smb2_write(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
7116
unsigned int max_write_size = work->conn->vals->max_write_size;
fs/smb/server/smb2pdu.c
7120
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
7122
if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) {
fs/smb/server/smb2pdu.c
7124
return smb2_write_pipe(work);
fs/smb/server/smb2pdu.c
7135
max_write_size = get_smbd_max_read_write_size(work->conn->transport);
fs/smb/server/smb2pdu.c
7151
err = smb2_set_remote_key_for_rdma(work,
fs/smb/server/smb2pdu.c
7160
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
fs/smb/server/smb2pdu.c
7166
fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
fs/smb/server/smb2pdu.c
7201
err = ksmbd_vfs_write(work, fp, data_buf, length, &offset,
fs/smb/server/smb2pdu.c
7209
nbytes = smb2_write_rdma_channel(work, req, fp, offset, length,
fs/smb/server/smb2pdu.c
7223
err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_write_rsp, Buffer));
fs/smb/server/smb2pdu.c
7226
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
724
in_work->conn = work->conn;
fs/smb/server/smb2pdu.c
7245
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
7246
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
725
memcpy(smb_get_msg(in_work->response_buf), ksmbd_resp_buf_next(work),
fs/smb/server/smb2pdu.c
7256
int smb2_flush(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
7262
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
7266
err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId);
fs/smb/server/smb2pdu.c
7272
return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_flush_rsp));
fs/smb/server/smb2pdu.c
7276
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
7286
int smb2_cancel(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
7288
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
7289
struct smb2_hdr *hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
7294
if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
7295
hdr = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
730
rsp_hdr->Id.AsyncId = cpu_to_le64(work->async_id);
fs/smb/server/smb2pdu.c
7330
iter == work)
fs/smb/server/smb2pdu.c
7344
work->send_no_response = 1;
fs/smb/server/smb2pdu.c
7454
int smb2_lock(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
7474
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
7477
fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
fs/smb/server/smb2pdu.c
7691
rc = setup_async_work(work,
fs/smb/server/smb2pdu.c
7700
list_add(&work->fp_entry, &fp->blocked_works);
fs/smb/server/smb2pdu.c
7703
smb2_send_interim_resp(work, STATUS_PENDING);
fs/smb/server/smb2pdu.c
7708
list_del(&work->fp_entry);
fs/smb/server/smb2pdu.c
7711
if (work->state != KSMBD_WORK_ACTIVE) {
fs/smb/server/smb2pdu.c
7715
if (work->state == KSMBD_WORK_CANCELLED) {
fs/smb/server/smb2pdu.c
7719
smb2_send_interim_resp(work,
fs/smb/server/smb2pdu.c
7721
work->send_no_response = 1;
fs/smb/server/smb2pdu.c
7732
release_async_work(work);
fs/smb/server/smb2pdu.c
7736
spin_lock(&work->conn->llist_lock);
fs/smb/server/smb2pdu.c
7738
&work->conn->lock_list);
fs/smb/server/smb2pdu.c
7741
spin_unlock(&work->conn->llist_lock);
fs/smb/server/smb2pdu.c
7753
smb_break_all_oplock(work, fp);
fs/smb/server/smb2pdu.c
7759
err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lock_rsp));
fs/smb/server/smb2pdu.c
7763
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
7790
spin_lock(&work->conn->llist_lock);
fs/smb/server/smb2pdu.c
7794
spin_unlock(&work->conn->llist_lock);
fs/smb/server/smb2pdu.c
7815
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
7816
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
7820
static int fsctl_copychunk(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
7873
src_fp = ksmbd_lookup_foreign_fd(work,
fs/smb/server/smb2pdu.c
7875
dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
fs/smb/server/smb2pdu.c
7898
ret = ksmbd_vfs_copy_file_ranges(work, src_fp, dst_fp,
fs/smb/server/smb2pdu.c
7926
ksmbd_fd_put(work, src_fp);
fs/smb/server/smb2pdu.c
7927
ksmbd_fd_put(work, dst_fp);
fs/smb/server/smb2pdu.c
8102
static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
fs/smb/server/smb2pdu.c
8121
fp = ksmbd_lookup_fd_fast(work, id);
fs/smb/server/smb2pdu.c
8130
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
8134
static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
fs/smb/server/smb2pdu.c
8143
rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
fs/smb/server/smb2pdu.c
8179
static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
fs/smb/server/smb2pdu.c
8187
fp = ksmbd_lookup_fd_fast(work, id);
fs/smb/server/smb2pdu.c
8199
test_share_config_flag(work->tcon->share_conf,
fs/smb/server/smb2pdu.c
8217
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
8221
static int fsctl_request_resume_key(struct ksmbd_work *work,
fs/smb/server/smb2pdu.c
8227
fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
fs/smb/server/smb2pdu.c
8234
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
8245
int smb2_ioctl(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
8251
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
8257
if (work->next_smb2_rcv_hdr_off) {
fs/smb/server/smb2pdu.c
8258
req = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
8259
rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.c
8262
work->compound_fid);
fs/smb/server/smb2pdu.c
8263
id = work->compound_fid;
fs/smb/server/smb2pdu.c
8266
req = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
8267
rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
8281
ret = smb2_calc_max_out_buf_len(work,
fs/smb/server/smb2pdu.c
8319
nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
fs/smb/server/smb2pdu.c
8361
ret = fsctl_request_resume_key(work, req,
fs/smb/server/smb2pdu.c
8371
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
fs/smb/server/smb2pdu.c
8391
fsctl_copychunk(work,
fs/smb/server/smb2pdu.c
8405
ret = fsctl_set_sparse(work, id, (struct file_sparse *)buffer);
fs/smb/server/smb2pdu.c
8415
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
fs/smb/server/smb2pdu.c
8439
fp = ksmbd_lookup_fd_fast(work, id);
fs/smb/server/smb2pdu.c
8445
ret = ksmbd_vfs_zero_data(work, fp, off, len);
fs/smb/server/smb2pdu.c
8446
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
8458
ret = fsctl_query_allocated_ranges(work, id,
fs/smb/server/smb2pdu.c
8478
fp = ksmbd_lookup_fd_fast(work, id);
fs/smb/server/smb2pdu.c
8488
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
8505
fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
fs/smb/server/smb2pdu.c
8513
fp_out = ksmbd_lookup_fd_fast(work, id);
fs/smb/server/smb2pdu.c
8550
ksmbd_fd_put(work, fp_in);
fs/smb/server/smb2pdu.c
8551
ksmbd_fd_put(work, fp_out);
fs/smb/server/smb2pdu.c
8572
ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_ioctl_rsp) + nbytes);
fs/smb/server/smb2pdu.c
8589
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8599
static void smb20_oplock_break_ack(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
8611
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
8619
fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
fs/smb/server/smb2pdu.c
8622
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8630
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8631
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
8706
ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
fs/smb/server/smb2pdu.c
8709
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8715
ksmbd_fd_put(work, fp);
fs/smb/server/smb2pdu.c
8739
static void smb21_lease_break_ack(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
8741
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
8751
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
8758
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8852
ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
fs/smb/server/smb2pdu.c
8855
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8871
int smb2_oplock_break(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
8878
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
8882
smb20_oplock_break_ack(work);
fs/smb/server/smb2pdu.c
8885
smb21_lease_break_ack(work);
fs/smb/server/smb2pdu.c
8891
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8904
int smb2_notify(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
8911
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
8913
if (work->next_smb2_rcv_hdr_off && req->hdr.NextCommand) {
fs/smb/server/smb2pdu.c
8915
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8919
smb2_set_err_rsp(work);
fs/smb/server/smb2pdu.c
8931
bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
fs/smb/server/smb2pdu.c
8933
struct smb2_hdr *rcv_hdr2 = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
8950
int smb2_check_sign_req(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
8958
hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
8959
if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
8960
hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
8962
if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
8963
len = get_rfc1002_len(work->request_buf);
fs/smb/server/smb2pdu.c
8967
len = get_rfc1002_len(work->request_buf) -
fs/smb/server/smb2pdu.c
8968
work->next_smb2_rcv_hdr_off;
fs/smb/server/smb2pdu.c
8976
ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, 1,
fs/smb/server/smb2pdu.c
8992
void smb2_set_sign_rsp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
8999
hdr = ksmbd_resp_buf_curr(work);
fs/smb/server/smb2pdu.c
9004
iov = &work->iov[work->iov_idx - 1];
fs/smb/server/smb2pdu.c
9007
iov = &work->iov[work->iov_idx];
fs/smb/server/smb2pdu.c
9010
ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, n_vec,
fs/smb/server/smb2pdu.c
9021
int smb3_check_sign_req(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
9023
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
9032
hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb2pdu.c
9033
if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
9034
hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb2pdu.c
9036
if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
9037
len = get_rfc1002_len(work->request_buf);
fs/smb/server/smb2pdu.c
9041
len = get_rfc1002_len(work->request_buf) -
fs/smb/server/smb2pdu.c
9042
work->next_smb2_rcv_hdr_off;
fs/smb/server/smb2pdu.c
9045
signing_key = work->sess->smb3signingkey;
fs/smb/server/smb2pdu.c
9047
chann = lookup_chann_list(work->sess, conn);
fs/smb/server/smb2pdu.c
9080
void smb3_set_sign_rsp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
9082
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
9090
hdr = ksmbd_resp_buf_curr(work);
fs/smb/server/smb2pdu.c
9094
signing_key = work->sess->smb3signingkey;
fs/smb/server/smb2pdu.c
9096
chann = lookup_chann_list(work->sess, work->conn);
fs/smb/server/smb2pdu.c
9110
iov = &work->iov[work->iov_idx - 1];
fs/smb/server/smb2pdu.c
9113
iov = &work->iov[work->iov_idx];
fs/smb/server/smb2pdu.c
9126
void smb3_preauth_hash_rsp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
9128
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
9129
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
9135
WORK_BUFFERS(work, req, rsp);
fs/smb/server/smb2pdu.c
9139
ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
fs/smb/server/smb2pdu.c
9157
ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
fs/smb/server/smb2pdu.c
9182
int smb3_encrypt_resp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
9184
struct kvec *iov = work->iov;
fs/smb/server/smb2pdu.c
9193
fill_transform_hdr(tr_buf, work->response_buf, work->conn->cipher_type);
fs/smb/server/smb2pdu.c
9197
work->tr_buf = tr_buf;
fs/smb/server/smb2pdu.c
9199
return ksmbd_crypt_message(work, iov, work->iov_idx + 1, 1);
fs/smb/server/smb2pdu.c
9209
int smb3_decrypt_req(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
9212
char *buf = work->request_buf;
fs/smb/server/smb2pdu.c
9231
sess = ksmbd_session_lookup_all(work->conn, le64_to_cpu(tr_hdr->SessionId));
fs/smb/server/smb2pdu.c
9243
rc = ksmbd_crypt_message(work, iov, 2, 0);
fs/smb/server/smb2pdu.c
9253
bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
fs/smb/server/smb2pdu.c
9255
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb2pdu.c
9256
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb2pdu.c
9257
struct smb2_hdr *rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb2pdu.c
9262
if (work->next_smb2_rcv_hdr_off)
fs/smb/server/smb2pdu.c
9263
rsp = ksmbd_resp_buf_next(work);
fs/smb/server/smb2pdu.h
330
bool is_smb2_neg_cmd(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
331
bool is_smb2_rsp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
333
u16 get_smb2_cmd_val(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
334
void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
fs/smb/server/smb2pdu.h
335
int init_smb2_rsp_hdr(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
336
int smb2_allocate_rsp_buf(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
337
bool is_chained_smb2_message(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
338
int init_smb2_neg_rsp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
339
void smb2_set_err_rsp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
340
int smb2_check_user_session(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
341
int smb2_get_ksmbd_tcon(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
342
bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command);
fs/smb/server/smb2pdu.h
343
int smb2_check_sign_req(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
344
void smb2_set_sign_rsp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
345
int smb3_check_sign_req(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
346
void smb3_set_sign_rsp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
350
int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
fs/smb/server/smb2pdu.h
352
void release_async_work(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
353
void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
fs/smb/server/smb2pdu.h
356
void smb3_preauth_hash_rsp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
358
int smb3_decrypt_req(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
359
int smb3_encrypt_resp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
360
bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
361
int smb2_set_rsp_credits(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
365
int ksmbd_smb2_check_message(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
368
int smb2_handle_negotiate(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
369
int smb2_negotiate_request(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
370
int smb2_sess_setup(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
371
int smb2_tree_connect(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
372
int smb2_tree_disconnect(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
373
int smb2_session_logoff(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
374
int smb2_open(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
375
int smb2_query_info(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
376
int smb2_query_dir(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
377
int smb2_close(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
378
int smb2_echo(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
379
int smb2_set_info(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
380
int smb2_read(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
381
int smb2_write(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
382
int smb2_flush(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
383
int smb2_cancel(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
384
int smb2_lock(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
385
int smb2_ioctl(struct ksmbd_work *work);
fs/smb/server/smb2pdu.h
386
int smb2_oplock_break(struct ksmbd_work *work);
fs/smb/server/smb_common.c
159
int ksmbd_verify_smb_message(struct ksmbd_work *work)
fs/smb/server/smb_common.c
161
struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work);
fs/smb/server/smb_common.c
165
return ksmbd_smb2_check_message(work);
fs/smb/server/smb_common.c
167
hdr = smb_get_msg(work->request_buf);
fs/smb/server/smb_common.c
170
work->conn->outstanding_credits++;
fs/smb/server/smb_common.c
333
static u16 get_smb1_cmd_val(struct ksmbd_work *work)
fs/smb/server/smb_common.c
344
static int init_smb1_rsp_hdr(struct ksmbd_work *work)
fs/smb/server/smb_common.c
346
struct smb_hdr *rsp_hdr = (struct smb_hdr *)smb_get_msg(work->response_buf);
fs/smb/server/smb_common.c
347
struct smb_hdr *rcv_hdr = (struct smb_hdr *)smb_get_msg(work->request_buf);
fs/smb/server/smb_common.c
365
static int smb1_check_user_session(struct ksmbd_work *work)
fs/smb/server/smb_common.c
367
unsigned int cmd = work->conn->ops->get_cmd_val(work);
fs/smb/server/smb_common.c
381
static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
fs/smb/server/smb_common.c
383
work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
fs/smb/server/smb_common.c
385
work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
fs/smb/server/smb_common.c
387
if (!work->response_buf) {
fs/smb/server/smb_common.c
401
static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
fs/smb/server/smb_common.c
403
work->send_no_response = 1;
fs/smb/server/smb_common.c
418
static int smb1_negotiate(struct ksmbd_work *work)
fs/smb/server/smb_common.c
420
return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
fs/smb/server/smb_common.c
453
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
fs/smb/server/smb_common.c
462
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb_common.c
488
rc = ksmbd_vfs_fill_dentry_attrs(work,
fs/smb/server/smb_common.c
597
static int smb_handle_negotiate(struct ksmbd_work *work)
fs/smb/server/smb_common.c
599
struct smb_negotiate_rsp *neg_rsp = smb_get_msg(work->response_buf);
fs/smb/server/smb_common.c
603
if (ksmbd_iov_pin_rsp(work, (void *)neg_rsp,
fs/smb/server/smb_common.c
609
neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
fs/smb/server/smb_common.c
614
int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
fs/smb/server/smb_common.c
616
struct ksmbd_conn *conn = work->conn;
fs/smb/server/smb_common.c
620
ksmbd_negotiate_smb_dialect(work->request_buf);
fs/smb/server/smb_common.c
624
ret = smb2_handle_negotiate(work);
fs/smb/server/smb_common.c
631
init_smb2_neg_rsp(work);
fs/smb/server/smb_common.c
635
return smb_handle_negotiate(work);
fs/smb/server/smb_common.c
763
int __ksmbd_override_fsids(struct ksmbd_work *work,
fs/smb/server/smb_common.c
766
struct ksmbd_session *sess = work->sess;
fs/smb/server/smb_common.c
806
WARN_ON(work->saved_cred);
fs/smb/server/smb_common.c
807
work->saved_cred = override_creds(cred);
fs/smb/server/smb_common.c
811
int ksmbd_override_fsids(struct ksmbd_work *work)
fs/smb/server/smb_common.c
813
return __ksmbd_override_fsids(work, work->tcon->share_conf);
fs/smb/server/smb_common.c
816
void ksmbd_revert_fsids(struct ksmbd_work *work)
fs/smb/server/smb_common.c
819
WARN_ON(!work->saved_cred);
fs/smb/server/smb_common.c
821
cred = revert_creds(work->saved_cred);
fs/smb/server/smb_common.c
822
work->saved_cred = NULL;
fs/smb/server/smb_common.h
149
int (*allocate_rsp_buf)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
150
int (*set_rsp_credits)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
151
int (*check_user_session)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
152
int (*get_ksmbd_tcon)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
153
bool (*is_sign_req)(struct ksmbd_work *work, unsigned int command);
fs/smb/server/smb_common.h
154
int (*check_sign_req)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
155
void (*set_sign_rsp)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
159
int (*decrypt_req)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
160
int (*encrypt_resp)(struct ksmbd_work *work);
fs/smb/server/smb_common.h
173
int ksmbd_verify_smb_message(struct ksmbd_work *work);
fs/smb/server/smb_common.h
181
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
fs/smb/server/smb_common.h
195
int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
fs/smb/server/smb_common.h
198
int __ksmbd_override_fsids(struct ksmbd_work *work,
fs/smb/server/smb_common.h
200
int ksmbd_override_fsids(struct ksmbd_work *work);
fs/smb/server/smb_common.h
201
void ksmbd_revert_fsids(struct ksmbd_work *work);
fs/smb/server/transport_rdma.c
1019
static void smb_direct_post_recv_credits(struct work_struct *work)
fs/smb/server/transport_rdma.c
1022
container_of(work, struct smbdirect_socket, recv_io.posted.refill_work);
fs/smb/server/transport_rdma.c
152
static void smb_direct_post_recv_credits(struct work_struct *work);
fs/smb/server/transport_rdma.c
2000
queue_work(sc->workqueue, &sc->connect.work);
fs/smb/server/transport_rdma.c
245
static void smb_direct_disconnect_rdma_work(struct work_struct *work)
fs/smb/server/transport_rdma.c
248
container_of(work, struct smbdirect_socket, disconnect_work);
fs/smb/server/transport_rdma.c
259
disable_work(&sc->connect.work);
fs/smb/server/transport_rdma.c
315
disable_work(&sc->connect.work);
fs/smb/server/transport_rdma.c
372
static void smb_direct_send_immediate_work(struct work_struct *work)
fs/smb/server/transport_rdma.c
375
container_of(work, struct smbdirect_socket, idle.immediate_work);
fs/smb/server/transport_rdma.c
383
static void smb_direct_idle_connection_timer(struct work_struct *work)
fs/smb/server/transport_rdma.c
386
container_of(work, struct smbdirect_socket, idle.timer_work.work);
fs/smb/server/transport_rdma.c
486
disable_work_sync(&sc->connect.work);
fs/smb/server/transport_rdma.c
744
static void smb_direct_negotiate_recv_work(struct work_struct *work);
fs/smb/server/transport_rdma.c
819
INIT_WORK(&sc->connect.work, smb_direct_negotiate_recv_work);
fs/smb/server/transport_rdma.c
821
queue_work(sc->workqueue, &sc->connect.work);
fs/smb/server/transport_rdma.c
826
static void smb_direct_negotiate_recv_work(struct work_struct *work)
fs/smb/server/transport_rdma.c
829
container_of(work, struct smbdirect_socket, connect.work);
fs/smb/server/vfs.c
1141
int __ksmbd_vfs_kern_path(struct ksmbd_work *work, char *filepath,
fs/smb/server/vfs.c
1145
struct ksmbd_share_config *share_conf = work->tcon->share_conf;
fs/smb/server/vfs.c
1172
work->conn->um);
fs/smb/server/vfs.c
1213
int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *filepath,
fs/smb/server/vfs.c
1217
return __ksmbd_vfs_kern_path(work, filepath, flags, path,
fs/smb/server/vfs.c
1234
int ksmbd_vfs_kern_path_start_removing(struct ksmbd_work *work, char *filepath,
fs/smb/server/vfs.c
1238
return __ksmbd_vfs_kern_path(work, filepath, flags, path,
fs/smb/server/vfs.c
1249
struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
fs/smb/server/vfs.c
1257
abs_name = convert_to_unix_name(work->tcon->share_conf, name);
fs/smb/server/vfs.c
151
int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
fs/smb/server/vfs.c
157
dentry = ksmbd_vfs_kern_path_create(work, name,
fs/smb/server/vfs.c
1614
int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
fs/smb/server/vfs.c
1619
struct ksmbd_share_config *share_conf = work->tcon->share_conf;
fs/smb/server/vfs.c
1645
if (test_share_config_flag(work->tcon->share_conf,
fs/smb/server/vfs.c
170
ksmbd_vfs_inherit_owner(work, d_inode(path.dentry),
fs/smb/server/vfs.c
1708
int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
fs/smb/server/vfs.c
1738
smb_break_all_levII_oplock(work, dst_fp, 1);
fs/smb/server/vfs.c
1740
if (!work->tcon->posix_extensions) {
fs/smb/server/vfs.c
188
int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
fs/smb/server/vfs.c
195
dentry = ksmbd_vfs_kern_path_create(work, name,
fs/smb/server/vfs.c
215
ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(dentry));
fs/smb/server/vfs.c
344
int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
fs/smb/server/vfs.c
357
if (work->conn->connection_type) {
fs/smb/server/vfs.c
367
if (!work->tcon->posix_extensions) {
fs/smb/server/vfs.c
42
static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
fs/smb/server/vfs.c
46
if (!test_share_config_flag(work->tcon->share_conf,
fs/smb/server/vfs.c
466
int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/vfs.c
474
if (work->conn->connection_type) {
fs/smb/server/vfs.c
492
if (!work->tcon->posix_extensions) {
fs/smb/server/vfs.c
505
smb_break_all_levII_oplock(work, fp, 1);
fs/smb/server/vfs.c
553
int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
fs/smb/server/vfs.c
558
fp = ksmbd_lookup_fd_slow(work, fid, p_id);
fs/smb/server/vfs.c
566
ksmbd_fd_put(work, fp);
fs/smb/server/vfs.c
577
int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
fs/smb/server/vfs.c
583
if (ksmbd_override_fsids(work))
fs/smb/server/vfs.c
603
ksmbd_revert_fsids(work);
fs/smb/server/vfs.c
615
int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
fs/smb/server/vfs.c
622
if (ksmbd_override_fsids(work))
fs/smb/server/vfs.c
632
dentry = ksmbd_vfs_kern_path_create(work, newname,
fs/smb/server/vfs.c
658
ksmbd_revert_fsids(work);
fs/smb/server/vfs.c
662
int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
fs/smb/server/vfs.c
669
struct ksmbd_share_config *share_conf = work->tcon->share_conf;
fs/smb/server/vfs.c
674
if (ksmbd_override_fsids(work))
fs/smb/server/vfs.c
709
ksmbd_fd_put(work, parent_fp);
fs/smb/server/vfs.c
712
ksmbd_fd_put(work, parent_fp);
fs/smb/server/vfs.c
736
ksmbd_revert_fsids(work);
fs/smb/server/vfs.c
748
int ksmbd_vfs_truncate(struct ksmbd_work *work,
fs/smb/server/vfs.c
757
smb_break_all_levII_oplock(work, fp, 1);
fs/smb/server/vfs.c
759
if (!work->tcon->posix_extensions) {
fs/smb/server/vfs.c
919
int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/vfs.c
922
smb_break_all_levII_oplock(work, fp, 1);
fs/smb/server/vfs.h
120
int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
fs/smb/server/vfs.h
123
int ksmbd_vfs_kern_path_start_removing(struct ksmbd_work *work, char *name,
fs/smb/server/vfs.h
127
struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
fs/smb/server/vfs.h
133
int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/vfs.h
141
int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
fs/smb/server/vfs.h
78
int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
fs/smb/server/vfs.h
79
int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
fs/smb/server/vfs.h
80
int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
fs/smb/server/vfs.h
82
int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
fs/smb/server/vfs.h
85
int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id);
fs/smb/server/vfs.h
86
int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path);
fs/smb/server/vfs.h
87
int ksmbd_vfs_link(struct ksmbd_work *work,
fs/smb/server/vfs.h
90
int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
fs/smb/server/vfs.h
92
int ksmbd_vfs_truncate(struct ksmbd_work *work,
fs/smb/server/vfs.h
95
int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
fs/smb/server/vfs_cache.c
1025
void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
fs/smb/server/vfs_cache.c
1027
int num = __close_file_table_ids(&work->sess->file_table,
fs/smb/server/vfs_cache.c
1028
work->tcon,
fs/smb/server/vfs_cache.c
1031
atomic_sub(num, &work->conn->stats.open_files_count);
fs/smb/server/vfs_cache.c
1034
void ksmbd_close_session_fds(struct ksmbd_work *work)
fs/smb/server/vfs_cache.c
1036
int num = __close_file_table_ids(&work->sess->file_table,
fs/smb/server/vfs_cache.c
1037
work->tcon,
fs/smb/server/vfs_cache.c
1040
atomic_sub(num, &work->conn->stats.open_files_count);
fs/smb/server/vfs_cache.c
1089
int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
fs/smb/server/vfs_cache.c
1104
fp->conn = work->conn;
fs/smb/server/vfs_cache.c
1105
fp->tcon = work->tcon;
fs/smb/server/vfs_cache.c
1118
__open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
fs/smb/server/vfs_cache.c
506
static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
fs/smb/server/vfs_cache.c
508
__ksmbd_close_fd(&work->sess->file_table, fp);
fs/smb/server/vfs_cache.c
509
atomic_dec(&work->conn->stats.open_files_count);
fs/smb/server/vfs_cache.c
525
int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
fs/smb/server/vfs_cache.c
533
ft = &work->sess->file_table;
fs/smb/server/vfs_cache.c
552
__put_fd_final(work, fp);
fs/smb/server/vfs_cache.c
556
void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
fs/smb/server/vfs_cache.c
563
__put_fd_final(work, fp);
fs/smb/server/vfs_cache.c
575
struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
fs/smb/server/vfs_cache.c
577
return __ksmbd_lookup_fd(&work->sess->file_table, id);
fs/smb/server/vfs_cache.c
580
struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
fs/smb/server/vfs_cache.c
582
struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
fs/smb/server/vfs_cache.c
584
if (__sanity_check(work->tcon, fp))
fs/smb/server/vfs_cache.c
587
ksmbd_fd_put(work, fp);
fs/smb/server/vfs_cache.c
591
struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
fs/smb/server/vfs_cache.c
597
id = work->compound_fid;
fs/smb/server/vfs_cache.c
598
pid = work->compound_pfid;
fs/smb/server/vfs_cache.c
601
fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
fs/smb/server/vfs_cache.c
602
if (!__sanity_check(work->tcon, fp)) {
fs/smb/server/vfs_cache.c
603
ksmbd_fd_put(work, fp);
fs/smb/server/vfs_cache.c
607
ksmbd_fd_put(work, fp);
fs/smb/server/vfs_cache.c
732
struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
fs/smb/server/vfs_cache.c
750
fp->conn = work->conn;
fs/smb/server/vfs_cache.c
751
fp->tcon = work->tcon;
fs/smb/server/vfs_cache.c
762
ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
fs/smb/server/vfs_cache.c
768
atomic_inc(&work->conn->stats.open_files_count);
fs/smb/server/vfs_cache.h
144
int ksmbd_close_fd(struct ksmbd_work *work, u64 id);
fs/smb/server/vfs_cache.h
145
struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id);
fs/smb/server/vfs_cache.h
146
struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id);
fs/smb/server/vfs_cache.h
147
struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
fs/smb/server/vfs_cache.h
149
void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
fs/smb/server/vfs_cache.h
158
struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
fs/smb/server/vfs_cache.h
161
void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
fs/smb/server/vfs_cache.h
162
void ksmbd_close_session_fds(struct ksmbd_work *work);
fs/smb/server/vfs_cache.h
163
int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
fs/smb/server/vfs_cache.h
188
int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp);
fs/super.c
1126
static void do_emergency_remount(struct work_struct *work)
fs/super.c
1130
kfree(work);
fs/super.c
1136
struct work_struct *work;
fs/super.c
1138
work = kmalloc_obj(*work, GFP_ATOMIC);
fs/super.c
1139
if (work) {
fs/super.c
1140
INIT_WORK(work, do_emergency_remount);
fs/super.c
1141
schedule_work(work);
fs/super.c
1154
static void do_thaw_all(struct work_struct *work)
fs/super.c
1157
kfree(work);
fs/super.c
1168
struct work_struct *work;
fs/super.c
1170
work = kmalloc_obj(*work, GFP_ATOMIC);
fs/super.c
1171
if (work) {
fs/super.c
1172
INIT_WORK(work, do_thaw_all);
fs/super.c
1173
schedule_work(work);
fs/super.c
275
static void destroy_super_work(struct work_struct *work)
fs/super.c
277
struct super_block *s = container_of(work, struct super_block,
fs/sync.c
115
static void do_sync_work(struct work_struct *work)
fs/sync.c
131
kfree(work);
fs/sync.c
136
struct work_struct *work;
fs/sync.c
138
work = kmalloc_obj(*work, GFP_ATOMIC);
fs/sync.c
139
if (work) {
fs/sync.c
140
INIT_WORK(work, do_sync_work);
fs/sync.c
141
schedule_work(work);
fs/timerfd.c
116
static void timerfd_resume_work(struct work_struct *work)
fs/ufs/super.c
654
static void delayed_sync_fs(struct work_struct *work)
fs/ufs/super.c
658
sbi = container_of(work, struct ufs_sb_info, sync_work.work);
fs/verity/verify.c
478
void fsverity_enqueue_verify_work(struct work_struct *work)
fs/verity/verify.c
480
queue_work(fsverity_read_workqueue, work);
fs/xfs/libxfs/xfs_ag.c
596
aghdr_init_work_f work,
fs/xfs/libxfs/xfs_ag.c
606
(*work)(mp, bp, id);
fs/xfs/libxfs/xfs_ag.c
617
aghdr_init_work_f work;
fs/xfs/libxfs/xfs_ag.c
643
.work = &xfs_sbblock_init,
fs/xfs/libxfs/xfs_ag.c
650
.work = &xfs_agfblock_init,
fs/xfs/libxfs/xfs_ag.c
657
.work = &xfs_agflblock_init,
fs/xfs/libxfs/xfs_ag.c
664
.work = &xfs_agiblock_init,
fs/xfs/libxfs/xfs_ag.c
671
.work = &xfs_bnoroot_init,
fs/xfs/libxfs/xfs_ag.c
679
.work = &xfs_bnoroot_init,
fs/xfs/libxfs/xfs_ag.c
687
.work = &xfs_btroot_init,
fs/xfs/libxfs/xfs_ag.c
695
.work = &xfs_btroot_init,
fs/xfs/libxfs/xfs_ag.c
703
.work = &xfs_rmaproot_init,
fs/xfs/libxfs/xfs_ag.c
711
.work = &xfs_btroot_init,
fs/xfs/libxfs/xfs_ag.c
731
error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
fs/xfs/libxfs/xfs_btree.c
2999
struct work_struct work;
fs/xfs/libxfs/xfs_btree.c
3007
struct work_struct *work)
fs/xfs/libxfs/xfs_btree.c
3009
struct xfs_btree_split_args *args = container_of(work,
fs/xfs/libxfs/xfs_btree.c
3010
struct xfs_btree_split_args, work);
fs/xfs/libxfs/xfs_btree.c
3082
INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
fs/xfs/libxfs/xfs_btree.c
3083
queue_work(xfs_alloc_wq, &args.work);
fs/xfs/libxfs/xfs_btree.c
3085
destroy_work_on_stack(&args.work);
fs/xfs/libxfs/xfs_defer.h
154
struct list_head *work)
fs/xfs/libxfs/xfs_defer.h
156
list_add_tail(work, &dfp->dfp_work);
fs/xfs/xfs_aops.c
188
struct work_struct *work)
fs/xfs/xfs_aops.c
191
container_of(work, struct xfs_inode, i_ioend_work);
fs/xfs/xfs_bmap_item.c
504
struct xfs_bmap_intent *work;
fs/xfs/xfs_bmap_item.c
515
work = xfs_bui_recover_work(mp, dfp, &ip, map);
fs/xfs/xfs_bmap_item.c
516
if (IS_ERR(work))
fs/xfs/xfs_bmap_item.c
517
return PTR_ERR(work);
fs/xfs/xfs_bmap_item.c
530
xfs_ifork_is_realtime(ip, work->bi_whichfork)) {
fs/xfs/xfs_bmap_item.c
535
if (work->bi_type == XFS_BMAP_MAP)
fs/xfs/xfs_bmap_item.c
540
error = xfs_iext_count_extend(tp, ip, work->bi_whichfork, iext_delta);
fs/xfs/xfs_buf.c
1224
struct work_struct *work)
fs/xfs/xfs_buf.c
1227
container_of(work, struct xfs_buf, b_ioend_work);
fs/xfs/xfs_discard.c
82
struct work_struct *work)
fs/xfs/xfs_discard.c
85
container_of(work, struct xfs_busy_extents, endio_work);
fs/xfs/xfs_icache.c
1218
struct work_struct *work)
fs/xfs/xfs_icache.c
1220
struct xfs_mount *mp = container_of(to_delayed_work(work),
fs/xfs/xfs_icache.c
1560
struct work_struct *work)
fs/xfs/xfs_icache.c
1562
struct xfs_perag *pag = container_of(to_delayed_work(work),
fs/xfs/xfs_icache.c
1954
struct work_struct *work)
fs/xfs/xfs_icache.c
1956
struct xfs_inodegc *gc = container_of(to_delayed_work(work),
fs/xfs/xfs_icache.c
1957
struct xfs_inodegc, work);
fs/xfs/xfs_icache.c
2219
mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
fs/xfs/xfs_icache.c
2225
flush_delayed_work(&gc->work);
fs/xfs/xfs_icache.c
2319
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
fs/xfs/xfs_icache.c
467
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
fs/xfs/xfs_icache.h
52
void xfs_reclaim_worker(struct work_struct *work);
fs/xfs/xfs_icache.h
73
void xfs_blockgc_worker(struct work_struct *work);
fs/xfs/xfs_icache.h
77
void xfs_inodegc_worker(struct work_struct *work);
fs/xfs/xfs_inode.h
653
void xfs_end_io(struct work_struct *work);
fs/xfs/xfs_log.c
1186
struct work_struct *work)
fs/xfs/xfs_log.c
1189
container_of(work, struct xlog_in_core, ic_end_io_work);
fs/xfs/xfs_log.c
1287
struct work_struct *work)
fs/xfs/xfs_log.c
1289
struct xlog *log = container_of(to_delayed_work(work),
fs/xfs/xfs_log_cil.c
1376
struct work_struct *work)
fs/xfs/xfs_log_cil.c
1380
container_of(work, struct xfs_cil_ctx, push_work);
fs/xfs/xfs_log_cil.c
96
static void xlog_cil_push_work(struct work_struct *work);
fs/xfs/xfs_mount.h
66
struct delayed_work work;
fs/xfs/xfs_mru_cache.c
101
struct delayed_work work; /* Workqueue data for reaping. */
fs/xfs/xfs_mru_cache.c
206
queue_delayed_work(xfs_mru_reap_wq, &mru->work,
fs/xfs/xfs_mru_cache.c
265
struct work_struct *work)
fs/xfs/xfs_mru_cache.c
268
container_of(work, struct xfs_mru_cache, work.work);
fs/xfs/xfs_mru_cache.c
286
queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
fs/xfs/xfs_mru_cache.c
359
INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap);
fs/xfs/xfs_mru_cache.c
384
cancel_delayed_work_sync(&mru->work);
fs/xfs/xfs_pwork.c
39
struct work_struct *work)
fs/xfs/xfs_pwork.c
45
pwork = container_of(work, struct xfs_pwork, work);
fs/xfs/xfs_pwork.c
94
INIT_WORK(&pwork->work, xfs_pwork_work);
fs/xfs/xfs_pwork.c
97
queue_work(pctl->wq, &pwork->work);
fs/xfs/xfs_pwork.h
31
struct work_struct work;
fs/xfs/xfs_super.c
1203
INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
fs/xfs/xfs_super.c
660
struct work_struct *work)
fs/xfs/xfs_super.c
662
struct xfs_mount *mp = container_of(work, struct xfs_mount,
include/cxl/event.h
289
int cxl_cper_register_work(struct work_struct *work);
include/cxl/event.h
290
int cxl_cper_unregister_work(struct work_struct *work);
include/cxl/event.h
292
int cxl_cper_register_prot_err_work(struct work_struct *work);
include/cxl/event.h
293
int cxl_cper_unregister_prot_err_work(struct work_struct *work);
include/cxl/event.h
296
static inline int cxl_cper_register_work(struct work_struct *work)
include/cxl/event.h
301
static inline int cxl_cper_unregister_work(struct work_struct *work)
include/cxl/event.h
309
static inline int cxl_cper_register_prot_err_work(struct work_struct *work)
include/cxl/event.h
313
static inline int cxl_cper_unregister_prot_err_work(struct work_struct *work)
include/drm/display/drm_dp_mst_helper.h
766
struct work_struct work;
include/drm/drm_flip_work.h
51
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
include/drm/drm_flip_work.h
71
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
include/drm/drm_flip_work.h
72
void drm_flip_work_commit(struct drm_flip_work *work,
include/drm/drm_flip_work.h
74
void drm_flip_work_init(struct drm_flip_work *work,
include/drm/drm_flip_work.h
76
void drm_flip_work_cleanup(struct drm_flip_work *work);
include/drm/drm_vblank_work.h
65
int drm_vblank_work_schedule(struct drm_vblank_work *work,
include/drm/drm_vblank_work.h
67
void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
include/drm/drm_vblank_work.h
68
void (*func)(struct kthread_work *work));
include/drm/drm_vblank_work.h
69
bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
include/drm/drm_vblank_work.h
70
void drm_vblank_work_flush(struct drm_vblank_work *work);
include/drm/gpu_scheduler.h
375
struct work_struct work;
include/kunit/run-in-irq-context.h
56
static void kunit_irq_test_bh_work_func(struct work_struct *work)
include/kunit/run-in-irq-context.h
59
container_of(work, typeof(*state), bh_work);
include/linux/backing-dev.h
40
void wb_workfn(struct work_struct *work);
include/linux/blkdev.h
1618
int kblockd_schedule_work(struct work_struct *work);
include/linux/bpf.h
1328
struct work_struct work;
include/linux/bpf.h
1765
struct work_struct work;
include/linux/bpf.h
1819
struct work_struct work;
include/linux/bpf.h
1837
struct work_struct work;
include/linux/bpf.h
1922
struct work_struct work;
include/linux/bpf.h
325
struct work_struct work;
include/linux/bpf_mem_alloc.h
16
struct work_struct work;
include/linux/ccp.h
642
struct work_struct work;
include/linux/cdx/mcdi.h
112
struct work_struct work;
include/linux/cdx/mcdi.h
68
struct work_struct work;
include/linux/ceph/messenger.h
508
struct delayed_work work; /* send|recv work */
include/linux/closure.h
151
struct work_struct work;
include/linux/closure.h
267
INIT_WORK(&cl->work, cl->work.func);
include/linux/closure.h
268
BUG_ON(!queue_work(wq, &cl->work));
include/linux/closure.h
270
cl->fn(&cl->work);
include/linux/closure.h
354
struct closure *cl = container_of(ws, struct closure, work); \
include/linux/completion.h
35
#define COMPLETION_INITIALIZER(work) \
include/linux/completion.h
36
{ 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
include/linux/completion.h
38
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
include/linux/completion.h
39
(*({ init_completion_map(&(work), &(map)); &(work); }))
include/linux/completion.h
41
#define COMPLETION_INITIALIZER_ONSTACK(work) \
include/linux/completion.h
42
(*({ init_completion(&work); &work; }))
include/linux/completion.h
52
#define DECLARE_COMPLETION(work) \
include/linux/completion.h
53
struct completion work = COMPLETION_INITIALIZER(work)
include/linux/completion.h
68
# define DECLARE_COMPLETION_ONSTACK(work) \
include/linux/completion.h
69
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
include/linux/completion.h
70
# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
include/linux/completion.h
71
struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
include/linux/completion.h
73
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
include/linux/completion.h
74
# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work)
include/linux/console_struct.h
178
extern void vc_SAK(struct work_struct *work);
include/linux/crush/crush.h
348
struct crush_work_bucket **work; /* Per-bucket working store */
include/linux/devfreq.h
193
struct delayed_work work;
include/linux/dim.h
149
struct work_struct work;
include/linux/dim.h
250
void (*rx_dim_work)(struct work_struct *work),
include/linux/dim.h
251
void (*tx_dim_work)(struct work_struct *work));
include/linux/dim.h
87
void (*rx_dim_work)(struct work_struct *work);
include/linux/dim.h
88
void (*tx_dim_work)(struct work_struct *work);
include/linux/dma-fence-array.h
46
struct irq_work work;
include/linux/dma-fence-chain.h
47
struct irq_work work;
include/linux/dsa/ksz_common.h
29
struct kthread_work work;
include/linux/dsa/ksz_common.h
33
void (*xmit_work_fn)(struct kthread_work *work);
include/linux/dsa/ocelot.h
172
struct kthread_work work;
include/linux/dsa/ocelot.h
176
void (*xmit_work_fn)(struct kthread_work *work);
include/linux/dsa/sja1105.h
46
struct kthread_work work;
include/linux/dsa/sja1105.h
51
void (*xmit_work_fn)(struct kthread_work *work);
include/linux/edac.h
597
struct delayed_work work;
include/linux/efi.h
1269
struct work_struct work;
include/linux/entry-common.h
100
if (work & SYSCALL_WORK_SYSCALL_RSEQ_SLICE)
include/linux/entry-common.h
104
if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
include/linux/entry-common.h
106
if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
include/linux/entry-common.h
111
if (work & SYSCALL_WORK_SECCOMP) {
include/linux/entry-common.h
120
if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
include/linux/entry-common.h
153
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
include/linux/entry-common.h
155
if (work & SYSCALL_WORK_ENTER)
include/linux/entry-common.h
156
syscall = syscall_trace_enter(regs, work);
include/linux/entry-common.h
198
static __always_inline bool report_single_step(unsigned long work)
include/linux/entry-common.h
200
if (work & SYSCALL_WORK_SYSCALL_EMU)
include/linux/entry-common.h
203
return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
include/linux/entry-common.h
231
static __always_inline void syscall_exit_work(struct pt_regs *regs, unsigned long work)
include/linux/entry-common.h
241
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
include/linux/entry-common.h
250
if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
include/linux/entry-common.h
253
step = report_single_step(work);
include/linux/entry-common.h
254
if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
include/linux/entry-common.h
268
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
include/linux/entry-common.h
285
if (unlikely(work & SYSCALL_WORK_EXIT))
include/linux/entry-common.h
286
syscall_exit_work(regs, work);
include/linux/entry-common.h
81
static __always_inline long syscall_trace_enter(struct pt_regs *regs, unsigned long work)
include/linux/entry-common.h
90
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
include/linux/firewire.h
236
struct delayed_work work;
include/linux/firewire.h
555
struct work_struct work;
include/linux/firewire.h
612
queue_work(ctx->card->isoc_wq, &ctx->work);
include/linux/fscache.h
114
struct work_struct work; /* Commit/relinq/withdraw work */
include/linux/fscache.h
81
struct work_struct work;
include/linux/fscrypt.h
512
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
include/linux/fserror.h
29
struct work_struct work;
include/linux/fsverity.h
203
void fsverity_enqueue_verify_work(struct work_struct *work);
include/linux/fsverity.h
279
static inline void fsverity_enqueue_verify_work(struct work_struct *work)
include/linux/greybus/operation.h
102
struct work_struct work;
include/linux/hid-sensor-hub.h
237
struct work_struct work;
include/linux/hisi_acc_qm.h
331
struct work_struct work;
include/linux/i3c/master.h
118
struct work_struct work;
include/linux/interrupt.h
274
struct work_struct work;
include/linux/io_uring_types.h
764
struct io_wq_work work;
include/linux/iommu.h
136
struct work_struct work;
include/linux/irq_work.h
32
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
include/linux/irq_work.h
34
*work = IRQ_WORK_INIT(func);
include/linux/irq_work.h
37
static inline bool irq_work_is_pending(struct irq_work *work)
include/linux/irq_work.h
39
return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
include/linux/irq_work.h
42
static inline bool irq_work_is_busy(struct irq_work *work)
include/linux/irq_work.h
44
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
include/linux/irq_work.h
47
static inline bool irq_work_is_hard(struct irq_work *work)
include/linux/irq_work.h
49
return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
include/linux/irq_work.h
52
bool irq_work_queue(struct irq_work *work);
include/linux/irq_work.h
53
bool irq_work_queue_on(struct irq_work *work, int cpu);
include/linux/irq_work.h
56
void irq_work_sync(struct irq_work *work);
include/linux/irqdesc.h
40
struct irq_work work;
include/linux/jump_label_ratelimit.h
12
struct delayed_work work;
include/linux/jump_label_ratelimit.h
18
struct delayed_work work;
include/linux/jump_label_ratelimit.h
24
struct delayed_work work;
include/linux/jump_label_ratelimit.h
28
__static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
include/linux/jump_label_ratelimit.h
30
__static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
include/linux/jump_label_ratelimit.h
33
__static_key_deferred_flush((x), &(x)->work)
include/linux/jump_label_ratelimit.h
37
struct delayed_work *work,
include/linux/jump_label_ratelimit.h
39
extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
include/linux/jump_label_ratelimit.h
43
extern void jump_label_update_timeout(struct work_struct *work);
include/linux/jump_label_ratelimit.h
49
.work = __DELAYED_WORK_INITIALIZER((name).work, \
include/linux/jump_label_ratelimit.h
58
.work = __DELAYED_WORK_INITIALIZER((name).work, \
include/linux/kthread.h
137
typedef void (*kthread_work_func_t)(struct kthread_work *work);
include/linux/kthread.h
162
struct kthread_work work;
include/linux/kthread.h
166
#define KTHREAD_WORK_INIT(work, fn) { \
include/linux/kthread.h
167
.node = LIST_HEAD_INIT((work).node), \
include/linux/kthread.h
172
.work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
include/linux/kthread.h
177
#define DEFINE_KTHREAD_WORK(work, fn) \
include/linux/kthread.h
178
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
include/linux/kthread.h
193
#define kthread_init_work(work, fn) \
include/linux/kthread.h
195
memset((work), 0, sizeof(struct kthread_work)); \
include/linux/kthread.h
196
INIT_LIST_HEAD(&(work)->node); \
include/linux/kthread.h
197
(work)->func = (fn); \
include/linux/kthread.h
202
kthread_init_work(&(dwork)->work, (fn)); \
include/linux/kthread.h
264
struct kthread_work *work);
include/linux/kthread.h
274
void kthread_flush_work(struct kthread_work *work);
include/linux/kthread.h
277
bool kthread_cancel_work_sync(struct kthread_work *work);
include/linux/kthread.h
278
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
include/linux/kvm_host.h
238
struct work_struct work;
include/linux/leds-pca9532.h
31
struct work_struct work;
include/linux/libata.h
2042
extern void ata_sff_queue_work(struct work_struct *work);
include/linux/mfd/wm8350/pmic.h
713
struct work_struct work;
include/linux/mlx4/device.h
1034
void handle_port_mgmt_change_event(struct work_struct *work);
include/linux/mlx5/driver.h
845
struct work_struct work;
include/linux/mlx5/driver.h
995
struct mlx5_async_work *work);
include/linux/netfs.h
175
struct work_struct work;
include/linux/netfs.h
226
struct work_struct work; /* Result collector work */
include/linux/padata.h
66
struct work_struct work;
include/linux/page_reporting.h
17
struct delayed_work work;
include/linux/pci-pwrctrl.h
49
struct work_struct work;
include/linux/phy.h
2357
void phy_state_machine(struct work_struct *work);
include/linux/pid_namespace.h
49
struct work_struct work;
include/linux/pm.h
699
struct work_struct work;
include/linux/pm_runtime.h
255
static inline bool queue_pm_work(struct work_struct *work) { return false; }
include/linux/pm_runtime.h
63
static inline bool queue_pm_work(struct work_struct *work)
include/linux/pm_runtime.h
65
return queue_work(pm_wq, work);
include/linux/posix-timers_types.h
69
struct callback_head work;
include/linux/power/bq27xxx_battery.h
69
struct delayed_work work;
include/linux/qed/qede_rdma.h
29
struct work_struct work;
include/linux/rseq_types.h
179
struct work_struct work;
include/linux/rtc.h
222
void rtc_timer_do_work(struct work_struct *work);
include/linux/skmsg.h
121
struct delayed_work work;
include/linux/soc/qcom/qmi.h
222
struct work_struct work;
include/linux/srcutree.h
164
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
include/linux/srcutree.h
43
struct work_struct work; /* Context for CB invoking. */
include/linux/srcutree.h
97
struct delayed_work work;
include/linux/stop_machine.h
47
struct work_struct work;
include/linux/stop_machine.h
62
static void stop_one_cpu_nowait_workfn(struct work_struct *work)
include/linux/stop_machine.h
65
container_of(work, struct cpu_stop_work, work);
include/linux/stop_machine.h
76
INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
include/linux/stop_machine.h
79
schedule_work(&work_buf->work);
include/linux/thunderbolt.h
550
struct work_struct work;
include/linux/tifm.h
148
void tifm_queue_work(struct work_struct *work);
include/linux/tty_buffer.h
37
struct work_struct work;
include/linux/umh.h
21
struct work_struct work;
include/linux/unwind_deferred.h
28
int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func);
include/linux/unwind_deferred.h
29
int unwind_deferred_request(struct unwind_work *work, u64 *cookie);
include/linux/unwind_deferred.h
30
void unwind_deferred_cancel(struct unwind_work *work);
include/linux/unwind_deferred.h
65
unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
include/linux/unwind_deferred.h
69
unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
include/linux/unwind_deferred.h
72
static inline void unwind_deferred_cancel(struct unwind_work *work) {}
include/linux/unwind_deferred_types.h
38
struct callback_head work;
include/linux/unwind_deferred_types.h
45
typedef void (*unwind_callback_t)(struct unwind_work *work,
include/linux/usb/gadget.h
443
struct work_struct work;
include/linux/usb/gadget.h
489
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
include/linux/usb/serial.h
105
struct work_struct work;
include/linux/user_namespace.h
105
struct work_struct work;
include/linux/vmpressure.h
27
struct work_struct work;
include/linux/workqueue.h
115
struct work_struct work;
include/linux/workqueue.h
124
struct work_struct work;
include/linux/workqueue.h
212
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
include/linux/workqueue.h
214
return container_of(work, struct delayed_work, work);
include/linux/workqueue.h
217
static inline struct rcu_work *to_rcu_work(struct work_struct *work)
include/linux/workqueue.h
219
return container_of(work, struct rcu_work, work);
include/linux/workqueue.h
223
struct work_struct work;
include/linux/workqueue.h
24
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
include/linux/workqueue.h
246
.work = __WORK_INITIALIZER((n).work, (f)), \
include/linux/workqueue.h
261
extern void __init_work(struct work_struct *work, int onstack);
include/linux/workqueue.h
262
extern void destroy_work_on_stack(struct work_struct *work);
include/linux/workqueue.h
263
extern void destroy_delayed_work_on_stack(struct delayed_work *work);
include/linux/workqueue.h
264
static inline unsigned int work_static(struct work_struct *work)
include/linux/workqueue.h
266
return *work_data_bits(work) & WORK_STRUCT_STATIC;
include/linux/workqueue.h
269
static inline void __init_work(struct work_struct *work, int onstack) { }
include/linux/workqueue.h
270
static inline void destroy_work_on_stack(struct work_struct *work) { }
include/linux/workqueue.h
271
static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
include/linux/workqueue.h
272
static inline unsigned int work_static(struct work_struct *work) { return 0; }
include/linux/workqueue.h
319
INIT_WORK(&(_work)->work, (_func)); \
include/linux/workqueue.h
327
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
include/linux/workqueue.h
346
INIT_WORK(&(_work)->work, (_func))
include/linux/workqueue.h
349
INIT_WORK_ONSTACK(&(_work)->work, (_func))
include/linux/workqueue.h
355
#define work_pending(work) \
include/linux/workqueue.h
356
test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
include/linux/workqueue.h
364
work_pending(&(w)->work)
include/linux/workqueue.h
594
struct work_struct *work);
include/linux/workqueue.h
596
struct work_struct *work);
include/linux/workqueue.h
598
struct delayed_work *work, unsigned long delay);
include/linux/workqueue.h
610
extern bool flush_work(struct work_struct *work);
include/linux/workqueue.h
611
extern bool cancel_work(struct work_struct *work);
include/linux/workqueue.h
612
extern bool cancel_work_sync(struct work_struct *work);
include/linux/workqueue.h
618
extern bool disable_work(struct work_struct *work);
include/linux/workqueue.h
619
extern bool disable_work_sync(struct work_struct *work);
include/linux/workqueue.h
620
extern bool enable_work(struct work_struct *work);
include/linux/workqueue.h
635
extern unsigned int work_busy(struct work_struct *work);
include/linux/workqueue.h
667
struct work_struct *work)
include/linux/workqueue.h
669
return queue_work_on(WORK_CPU_UNBOUND, wq, work);
include/linux/workqueue.h
709
static inline bool schedule_work_on(int cpu, struct work_struct *work)
include/linux/workqueue.h
711
return queue_work_on(cpu, system_percpu_wq, work);
include/linux/workqueue.h
728
static inline bool schedule_work(struct work_struct *work)
include/linux/workqueue.h
730
return queue_work(system_percpu_wq, work);
include/linux/workqueue.h
750
struct work_struct *work)
include/linux/workqueue.h
752
if (enable_work(work)) {
include/linux/workqueue.h
753
queue_work(wq, work);
include/linux/workqueue_types.h
13
typedef void (*work_func_t)(struct work_struct *work);
include/linux/writeback.h
268
void inode_switch_wbs_work_fn(struct work_struct *work);
include/media/cec.h
71
struct delayed_work work;
include/media/i2c/ir-kbd-i2c.h
21
struct delayed_work work;
include/net/bluetooth/coredump.h
64
void hci_devcd_rx(struct work_struct *work);
include/net/bluetooth/coredump.h
65
void hci_devcd_timeout(struct work_struct *work);
include/net/bluetooth/coredump.h
78
static inline void hci_devcd_rx(struct work_struct *work) {}
include/net/bluetooth/coredump.h
79
static inline void hci_devcd_timeout(struct work_struct *work) {}
include/net/bluetooth/l2cap.h
838
struct delayed_work *work, long timeout)
include/net/bluetooth/l2cap.h
845
if (!cancel_delayed_work(work))
include/net/bluetooth/l2cap.h
848
schedule_delayed_work(work, timeout);
include/net/bluetooth/l2cap.h
852
struct delayed_work *work)
include/net/bluetooth/l2cap.h
858
ret = cancel_delayed_work(work);
include/net/cfg80211.h
6453
static inline void wiphy_work_init(struct wiphy_work *work,
include/net/cfg80211.h
6456
INIT_LIST_HEAD(&work->entry);
include/net/cfg80211.h
6457
work->func = func;
include/net/cfg80211.h
6472
void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
include/net/cfg80211.h
6482
void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
include/net/cfg80211.h
6492
void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
include/net/cfg80211.h
6495
struct wiphy_work work;
include/net/cfg80211.h
6506
wiphy_work_init(&dwork->work, func);
include/net/cfg80211.h
6598
struct wiphy_work work;
include/net/cfg80211.h
6610
wiphy_work_init(&hrwork->work, func);
include/net/espintcp.h
29
struct work_struct work;
include/net/libeth/xdp.h
1602
void name(struct work_struct *work) \
include/net/libeth/xdp.h
1604
libeth_xdpsq_run_timer(work, poll); \
include/net/libeth/xdp.h
221
void (*poll)(struct work_struct *work));
include/net/libeth/xdp.h
260
libeth_xdpsq_run_timer(struct work_struct *work,
include/net/libeth/xdp.h
263
struct libeth_xdpsq_timer *timer = container_of(work, typeof(*timer),
include/net/libeth/xdp.h
264
dwork.work);
include/net/mac80211.h
6439
void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work);
include/net/mana/gdma.h
85
struct work_struct work;
include/net/netns/xfrm.h
24
struct work_struct work;
include/net/psp/types.h
147
struct work_struct work;
include/net/strparser.h
104
struct work_struct work;
include/net/tls.h
124
struct work_struct work;
include/net/tls.h
93
struct delayed_work work;
include/net/xdp_sock.h
38
struct work_struct work;
include/net/xsk_buff_pool.h
55
struct work_struct work;
include/rdma/ib_verbs.h
1643
struct work_struct work;
include/scsi/libfcoe.h
333
struct work_struct work;
include/scsi/libsas.h
197
struct work_struct work;
include/scsi/libsas.h
216
INIT_WORK(&sw->work, fn);
include/scsi/libsas.h
221
struct sas_work work;
include/scsi/libsas.h
225
static inline struct sas_discovery_event *to_sas_discovery_event(struct work_struct *work)
include/scsi/libsas.h
227
struct sas_discovery_event *ev = container_of(work, typeof(*ev), work.work);
include/scsi/libsas.h
253
struct sas_work work;
include/scsi/libsas.h
279
struct sas_work work;
include/scsi/libsas.h
284
static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
include/scsi/libsas.h
286
struct asd_sas_event *ev = container_of(work, typeof(*ev), work.work);
include/scsi/libsas.h
295
INIT_SAS_WORK(&ev->work, fn);
include/sound/ak4113.h
292
struct delayed_work work;
include/sound/ak4114.h
175
struct delayed_work work;
include/sound/emu10k1.h
1695
struct work_struct work;
include/sound/sdca_ump.h
46
void sdca_ump_cancel_timeout(struct delayed_work *work);
include/sound/sdca_ump.h
47
void sdca_ump_schedule_timeout(struct delayed_work *work,
include/sound/soc-jack.h
71
struct delayed_work work;
include/target/target_core_base.h
538
struct work_struct work;
include/target/target_core_base.h
805
struct work_struct work;
include/trace/events/btrfs.h
1525
TP_PROTO(const struct btrfs_work *work),
include/trace/events/btrfs.h
1527
TP_ARGS(work),
include/trace/events/btrfs.h
1530
__field( const void *, work )
include/trace/events/btrfs.h
1537
TP_fast_assign_btrfs(btrfs_work_owner(work),
include/trace/events/btrfs.h
1538
__entry->work = work;
include/trace/events/btrfs.h
1539
__entry->wq = work->wq;
include/trace/events/btrfs.h
1540
__entry->func = work->func;
include/trace/events/btrfs.h
1541
__entry->ordered_func = work->ordered_func;
include/trace/events/btrfs.h
1542
__entry->normal_work = &work->normal_work;
include/trace/events/btrfs.h
1546
__entry->work, __entry->normal_work, __entry->wq,
include/trace/events/btrfs.h
1574
TP_PROTO(const struct btrfs_work *work),
include/trace/events/btrfs.h
1576
TP_ARGS(work)
include/trace/events/btrfs.h
1581
TP_PROTO(const struct btrfs_work *work),
include/trace/events/btrfs.h
1583
TP_ARGS(work)
include/trace/events/btrfs.h
1595
TP_PROTO(const struct btrfs_work *work),
include/trace/events/btrfs.h
1597
TP_ARGS(work)
include/trace/events/io_uring.h
152
__field( struct io_wq_work *, work )
include/trace/events/io_uring.h
164
__entry->work = &req->work;
include/trace/events/io_uring.h
173
__entry->hashed ? "hashed" : "normal", __entry->work)
include/trace/events/napi.h
16
TP_PROTO(struct napi_struct *napi, int work, int budget),
include/trace/events/napi.h
18
TP_ARGS(napi, work, budget),
include/trace/events/napi.h
23
__field( int, work)
include/trace/events/napi.h
30
__entry->work = work;
include/trace/events/napi.h
36
__entry->work, __entry->budget)
include/trace/events/sched.h
100
__field( void *, work )
include/trace/events/sched.h
105
__entry->work = work;
include/trace/events/sched.h
106
__entry->function = work->func;
include/trace/events/sched.h
109
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
include/trace/events/sched.h
121
TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
include/trace/events/sched.h
123
TP_ARGS(work, function),
include/trace/events/sched.h
126
__field( void *, work )
include/trace/events/sched.h
131
__entry->work = work;
include/trace/events/sched.h
135
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
include/trace/events/sched.h
67
struct kthread_work *work),
include/trace/events/sched.h
69
TP_ARGS(worker, work),
include/trace/events/sched.h
72
__field( void *, work )
include/trace/events/sched.h
78
__entry->work = work;
include/trace/events/sched.h
79
__entry->function = work->func;
include/trace/events/sched.h
84
__entry->work, __entry->function, __entry->worker)
include/trace/events/sched.h
95
TP_PROTO(struct kthread_work *work),
include/trace/events/sched.h
97
TP_ARGS(work),
include/trace/events/workqueue.h
100
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
include/trace/events/workqueue.h
112
TP_PROTO(struct work_struct *work, work_func_t function),
include/trace/events/workqueue.h
114
TP_ARGS(work, function),
include/trace/events/workqueue.h
117
__field( void *, work )
include/trace/events/workqueue.h
122
__entry->work = work;
include/trace/events/workqueue.h
126
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
include/trace/events/workqueue.h
26
struct work_struct *work),
include/trace/events/workqueue.h
28
TP_ARGS(req_cpu, pwq, work),
include/trace/events/workqueue.h
31
__field( void *, work )
include/trace/events/workqueue.h
39
__entry->work = work;
include/trace/events/workqueue.h
40
__entry->function = work->func;
include/trace/events/workqueue.h
47
__entry->work, __entry->function, __get_str(workqueue),
include/trace/events/workqueue.h
61
TP_PROTO(struct work_struct *work),
include/trace/events/workqueue.h
63
TP_ARGS(work),
include/trace/events/workqueue.h
66
__field( void *, work )
include/trace/events/workqueue.h
71
__entry->work = work;
include/trace/events/workqueue.h
72
__entry->function = work->func;
include/trace/events/workqueue.h
75
TP_printk("work struct %p function=%ps ", __entry->work, __entry->function)
include/trace/events/workqueue.h
86
TP_PROTO(struct work_struct *work),
include/trace/events/workqueue.h
88
TP_ARGS(work),
include/trace/events/workqueue.h
91
__field( void *, work )
include/trace/events/workqueue.h
96
__entry->work = work;
include/trace/events/workqueue.h
97
__entry->function = work->func;
include/trace/events/writeback.h
384
TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
include/trace/events/writeback.h
385
TP_ARGS(wb, work),
include/trace/events/writeback.h
399
__entry->nr_pages = work->nr_pages;
include/trace/events/writeback.h
400
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
include/trace/events/writeback.h
401
__entry->sync_mode = work->sync_mode;
include/trace/events/writeback.h
402
__entry->for_kupdate = work->for_kupdate;
include/trace/events/writeback.h
403
__entry->range_cyclic = work->range_cyclic;
include/trace/events/writeback.h
404
__entry->for_background = work->for_background;
include/trace/events/writeback.h
405
__entry->reason = work->reason;
include/trace/events/writeback.h
423
TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
include/trace/events/writeback.h
424
TP_ARGS(wb, work))
include/trace/events/writeback.h
532
struct wb_writeback_work *work,
include/trace/events/writeback.h
535
TP_ARGS(wb, work, dirtied_before, moved),
include/trace/events/writeback.h
549
__entry->reason = work->reason;
include/uapi/linux/pkt_sched.h
464
__u64 work; /* total work done */
include/xen/xenbus.h
89
struct work_struct work;
io_uring/cancel.c
441
static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
io_uring/cancel.c
443
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_uring/cancel.c
475
__cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
io_uring/cancel.c
477
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_uring/cancel.c
70
static bool io_cancel_cb(struct io_wq_work *work, void *data)
io_uring/cancel.c
72
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_uring/cancel.h
37
__cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data);
io_uring/cancel.h
41
if (req->cancel_seq_set && sequence == req->work.cancel_seq)
io_uring/cancel.h
45
req->work.cancel_seq = sequence;
io_uring/io-wq.c
1000
struct io_wq_work *work, unsigned int work_flags)
io_uring/io-wq.c
1007
wq_list_add_tail(&work->list, &acct->work_list);
io_uring/io-wq.c
1013
wq->hash_tail[hash] = work;
io_uring/io-wq.c
1017
wq_list_add_after(&work->list, &tail->list, &acct->work_list);
io_uring/io-wq.c
1020
static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
io_uring/io-wq.c
1022
return work == data;
io_uring/io-wq.c
1025
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
io_uring/io-wq.c
1027
unsigned int work_flags = atomic_read(&work->flags);
io_uring/io-wq.c
1031
.data = work,
io_uring/io-wq.c
1042
io_run_cancel(work, wq);
io_uring/io-wq.c
1047
io_wq_insert_work(wq, acct, work, work_flags);
io_uring/io-wq.c
1079
void io_wq_hash_work(struct io_wq_work *work, void *val)
io_uring/io-wq.c
1084
atomic_or(IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT), &work->flags);
io_uring/io-wq.c
1089
struct io_wq_work *work)
io_uring/io-wq.c
1091
if (work && match->fn(work, match->data)) {
io_uring/io-wq.c
1092
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
io_uring/io-wq.c
1118
struct io_wq_work *work,
io_uring/io-wq.c
1121
unsigned int hash = io_get_work_hash(work);
io_uring/io-wq.c
1124
if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) {
io_uring/io-wq.c
1132
wq_list_del(&acct->work_list, &work->list, prev);
io_uring/io-wq.c
1140
struct io_wq_work *work;
io_uring/io-wq.c
1144
work = container_of(node, struct io_wq_work, list);
io_uring/io-wq.c
1145
if (!match->fn(work, match->data))
io_uring/io-wq.c
1147
io_wq_remove_pending(wq, acct, work, prev);
io_uring/io-wq.c
1149
io_run_cancel(work, wq);
io_uring/io-wq.c
160
static inline unsigned int io_get_work_hash(struct io_wq_work *work)
io_uring/io-wq.c
162
return __io_get_work_hash(atomic_read(&work->flags));
io_uring/io-wq.c
433
static bool io_wq_hash_defer(struct io_wq_work *work, struct io_wq_acct *acct)
io_uring/io-wq.c
440
work_flags = atomic_read(&work->flags);
io_uring/io-wq.c
529
struct io_wq_work *work, *tail;
io_uring/io-wq.c
536
work = container_of(node, struct io_wq_work, list);
io_uring/io-wq.c
539
work_flags = atomic_read(&work->flags);
io_uring/io-wq.c
542
return work;
io_uring/io-wq.c
553
return work;
io_uring/io-wq.c
583
struct io_wq_work *work)
io_uring/io-wq.c
585
if (work) {
io_uring/io-wq.c
591
worker->cur_work = work;
io_uring/io-wq.c
606
struct io_wq_work *work;
io_uring/io-wq.c
615
work = io_get_next_work(acct, wq);
io_uring/io-wq.c
616
if (work) {
io_uring/io-wq.c
625
worker->cur_work = work;
io_uring/io-wq.c
631
if (!work)
io_uring/io-wq.c
636
io_assign_current_work(worker, work);
io_uring/io-wq.c
642
unsigned int work_flags = atomic_read(&work->flags);
io_uring/io-wq.c
647
next_hashed = wq_next_work(work);
io_uring/io-wq.c
651
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
io_uring/io-wq.c
652
io_wq_submit_work(work);
io_uring/io-wq.c
655
linked = io_wq_free_work(work);
io_uring/io-wq.c
656
work = next_hashed;
io_uring/io-wq.c
657
if (!work && linked && !io_wq_is_hashed(linked)) {
io_uring/io-wq.c
658
work = linked;
io_uring/io-wq.c
661
io_assign_current_work(worker, work);
io_uring/io-wq.c
674
} while (work);
io_uring/io-wq.c
68
struct delayed_work work;
io_uring/io-wq.c
801
static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
io_uring/io-wq.c
837
schedule_delayed_work(&worker->work,
io_uring/io-wq.c
883
static void io_workqueue_create(struct work_struct *work)
io_uring/io-wq.c
885
struct io_worker *worker = container_of(work, struct io_worker,
io_uring/io-wq.c
886
work.work);
io_uring/io-wq.c
924
INIT_DELAYED_WORK(&worker->work, io_workqueue_create);
io_uring/io-wq.c
990
static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
io_uring/io-wq.c
993
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
io_uring/io-wq.c
994
io_wq_submit_work(work);
io_uring/io-wq.c
995
work = io_wq_free_work(work);
io_uring/io-wq.c
996
} while (work);
io_uring/io-wq.h
47
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
io_uring/io-wq.h
48
void io_wq_hash_work(struct io_wq_work *work, void *val);
io_uring/io-wq.h
59
static inline bool io_wq_is_hashed(struct io_wq_work *work)
io_uring/io-wq.h
61
return __io_wq_is_hashed(atomic_read(&work->flags));
io_uring/io_uring.c
1447
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
io_uring/io_uring.c
1449
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_uring/io_uring.c
1457
return nxt ? &nxt->work : NULL;
io_uring/io_uring.c
1460
void io_wq_submit_work(struct io_wq_work *work)
io_uring/io_uring.c
1462
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_uring/io_uring.c
1475
if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
io_uring/io_uring.c
1482
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
io_uring/io_uring.c
2288
struct io_tctx_exit *work;
io_uring/io_uring.c
2290
work = container_of(cb, struct io_tctx_exit, task_work);
io_uring/io_uring.c
2298
io_uring_del_tctx_node((unsigned long)work->ctx);
io_uring/io_uring.c
2299
complete(&work->completion);
io_uring/io_uring.c
2302
static __cold void io_ring_exit_work(struct work_struct *work)
io_uring/io_uring.c
2304
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
io_uring/io_uring.c
3121
.freeptr_offset = offsetof(struct io_kiocb, work),
io_uring/io_uring.c
366
req->work.list.next = NULL;
io_uring/io_uring.c
367
atomic_set(&req->work.flags, 0);
io_uring/io_uring.c
369
atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
io_uring/io_uring.c
382
io_wq_hash_work(&req->work, file_inode(req->file));
io_uring/io_uring.c
385
atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
io_uring/io_uring.c
428
atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
io_uring/io_uring.c
430
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
io_uring/io_uring.c
431
io_wq_enqueue(tctx->io_wq, &req->work);
io_uring/io_uring.h
205
struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
io_uring/io_uring.h
206
void io_wq_submit_work(struct io_wq_work *work);
io_uring/slist.h
82
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
io_uring/slist.h
84
if (!work->list.next)
io_uring/slist.h
87
return container_of(work->list.next, struct io_wq_work, list);
io_uring/tw.c
18
void io_fallback_req_func(struct work_struct *work)
io_uring/tw.c
20
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
io_uring/tw.c
21
fallback_work.work);
io_uring/tw.h
31
__cold void io_fallback_req_func(struct work_struct *work);
kernel/acct.c
106
struct work_struct work;
kernel/acct.c
194
schedule_work(&acct->work);
kernel/acct.c
202
static void close_work(struct work_struct *work)
kernel/acct.c
204
struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work);
kernel/acct.c
268
INIT_WORK(&acct->work, close_work);
kernel/async.c
117
static void async_run_entry_fn(struct work_struct *work)
kernel/async.c
120
container_of(work, struct async_entry, work);
kernel/async.c
160
INIT_WORK(&entry->work, async_run_entry_fn);
kernel/async.c
178
queue_work_node(node, async_wq, &entry->work);
kernel/async.c
72
struct work_struct work;
kernel/backtracetest.c
24
static void backtrace_test_bh_workfn(struct work_struct *work)
kernel/bpf/arena.c
67
static void arena_free_worker(struct work_struct *work);
kernel/bpf/arena.c
784
static void arena_free_worker(struct work_struct *work)
kernel/bpf/arena.c
786
struct bpf_arena *arena = container_of(work, struct bpf_arena, free_work);
kernel/bpf/arena.c
798
schedule_work(work);
kernel/bpf/arraymap.c
1156
static void prog_array_map_clear_deferred(struct work_struct *work)
kernel/bpf/arraymap.c
1158
struct bpf_map *map = container_of(work, struct bpf_array_aux,
kernel/bpf/arraymap.c
1159
work)->map;
kernel/bpf/arraymap.c
1169
schedule_work(&aux->work);
kernel/bpf/arraymap.c
1181
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
kernel/bpf/cgroup.c
304
static void cgroup_bpf_release(struct work_struct *work)
kernel/bpf/cgroup.c
306
struct cgroup *p, *cgrp = container_of(work, struct cgroup,
kernel/bpf/core.c
2931
static void bpf_prog_free_deferred(struct work_struct *work)
kernel/bpf/core.c
2936
aux = container_of(work, struct bpf_prog_aux, work);
kernel/bpf/core.c
2979
INIT_WORK(&aux->work, bpf_prog_free_deferred);
kernel/bpf/core.c
2980
schedule_work(&aux->work);
kernel/bpf/cpumap.c
517
static void __cpu_map_entry_free(struct work_struct *work)
kernel/bpf/cpumap.c
526
rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work);
kernel/bpf/cpumap.c
642
__cpu_map_entry_free(&rcpu->free_work.work);
kernel/bpf/helpers.c
1152
struct work_struct work;
kernel/bpf/helpers.c
1160
struct bpf_work *work;
kernel/bpf/helpers.c
1200
static void bpf_wq_work(struct work_struct *work)
kernel/bpf/helpers.c
1202
struct bpf_work *w = container_of(work, struct bpf_work, work);
kernel/bpf/helpers.c
1260
if (!cancel_work(&w->work) && work_busy(&w->work))
kernel/bpf/helpers.c
1279
static void worker_for_call_rcu(struct irq_work *work)
kernel/bpf/helpers.c
1281
struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
kernel/bpf/helpers.c
1300
static void bpf_async_irq_worker(struct irq_work *work);
kernel/bpf/helpers.c
1342
INIT_WORK(&w->work, bpf_wq_work);
kernel/bpf/helpers.c
1635
schedule_work(&w->work);
kernel/bpf/helpers.c
1638
cancel_work(&w->work);
kernel/bpf/helpers.c
1647
static void bpf_async_irq_worker(struct irq_work *work)
kernel/bpf/helpers.c
1649
struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
kernel/bpf/helpers.c
3185
w = READ_ONCE(async->work);
kernel/bpf/helpers.c
3193
schedule_work(&w->work);
kernel/bpf/helpers.c
4132
struct callback_head work;
kernel/bpf/helpers.c
4189
if (task_work_cancel(ctx->task, &ctx->work))
kernel/bpf/helpers.c
4195
struct bpf_task_work_ctx *ctx = container_of(cb, struct bpf_task_work_ctx, work);
kernel/bpf/helpers.c
4239
err = task_work_add(ctx->task, &ctx->work, ctx->mode);
kernel/bpf/helpers.c
4360
init_task_work(&ctx->work, bpf_task_work_callback);
kernel/bpf/memalloc.c
431
static void bpf_mem_refill(struct irq_work *work)
kernel/bpf/memalloc.c
433
struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
kernel/bpf/memalloc.c
716
static void free_mem_alloc_deferred(struct work_struct *work)
kernel/bpf/memalloc.c
718
struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work);
kernel/bpf/memalloc.c
745
INIT_WORK(©->work, free_mem_alloc_deferred);
kernel/bpf/memalloc.c
746
queue_work(system_dfl_wq, ©->work);
kernel/bpf/mmap_unlock_work.h
26
struct mmap_unlock_irq_work *work = NULL;
kernel/bpf/mmap_unlock_work.h
31
work = this_cpu_ptr(&mmap_unlock_work);
kernel/bpf/mmap_unlock_work.h
32
if (irq_work_is_busy(&work->irq_work)) {
kernel/bpf/mmap_unlock_work.h
45
*work_ptr = work;
kernel/bpf/mmap_unlock_work.h
49
static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
kernel/bpf/mmap_unlock_work.h
51
if (!work) {
kernel/bpf/mmap_unlock_work.h
54
work->mm = mm;
kernel/bpf/mmap_unlock_work.h
61
irq_work_queue(&work->irq_work);
kernel/bpf/ringbuf.c
154
static void bpf_ringbuf_notify(struct irq_work *work)
kernel/bpf/ringbuf.c
156
struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
kernel/bpf/ringbuf.c
183
init_irq_work(&rb->work, bpf_ringbuf_notify);
kernel/bpf/ringbuf.c
230
irq_work_sync(&rb->work);
kernel/bpf/ringbuf.c
30
struct irq_work work;
kernel/bpf/ringbuf.c
582
irq_work_queue(&rb->work);
kernel/bpf/ringbuf.c
584
irq_work_queue(&rb->work);
kernel/bpf/ringbuf.c
867
irq_work_queue(&rb->work);
kernel/bpf/ringbuf.c
869
irq_work_queue(&rb->work);
kernel/bpf/stackmap.c
169
struct mmap_unlock_irq_work *work = NULL;
kernel/bpf/stackmap.c
170
bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
kernel/bpf/stackmap.c
209
bpf_mmap_unlock_mm(work, current->mm);
kernel/bpf/syscall.c
2398
static void bpf_prog_put_deferred(struct work_struct *work)
kernel/bpf/syscall.c
2403
aux = container_of(work, struct bpf_prog_aux, work);
kernel/bpf/syscall.c
2417
INIT_WORK(&aux->work, bpf_prog_put_deferred);
kernel/bpf/syscall.c
2418
schedule_work(&aux->work);
kernel/bpf/syscall.c
2420
bpf_prog_put_deferred(&aux->work);
kernel/bpf/syscall.c
3320
static void bpf_link_put_deferred(struct work_struct *work)
kernel/bpf/syscall.c
3322
struct bpf_link *link = container_of(work, struct bpf_link, work);
kernel/bpf/syscall.c
3335
INIT_WORK(&link->work, bpf_link_put_deferred);
kernel/bpf/syscall.c
3336
schedule_work(&link->work);
kernel/bpf/syscall.c
912
static void bpf_map_free_deferred(struct work_struct *work)
kernel/bpf/syscall.c
914
struct bpf_map *map = container_of(work, struct bpf_map, work);
kernel/bpf/syscall.c
932
INIT_WORK(&map->work, bpf_map_free_deferred);
kernel/bpf/syscall.c
936
queue_work(system_dfl_wq, &map->work);
kernel/bpf/task_iter.c
1036
struct mmap_unlock_irq_work *work;
kernel/bpf/task_iter.c
1041
work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
kernel/bpf/task_iter.c
1042
mmap_read_unlock_non_owner(work->mm);
kernel/bpf/task_iter.c
1047
struct mmap_unlock_irq_work *work;
kernel/bpf/task_iter.c
1051
work = per_cpu_ptr(&mmap_unlock_work, cpu);
kernel/bpf/task_iter.c
1052
init_irq_work(&work->irq_work, do_mmap_read_unlock);
kernel/bpf/task_iter.c
754
struct mmap_unlock_irq_work *work = NULL;
kernel/bpf/task_iter.c
770
irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
kernel/bpf/task_iter.c
782
bpf_mmap_unlock_mm(work, mm);
kernel/bpf/task_iter.c
800
struct mmap_unlock_irq_work *work;
kernel/bpf/task_iter.c
844
irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work);
kernel/bpf/task_iter.c
876
bpf_mmap_unlock_mm(kit->data->work, kit->data->mm);
kernel/bpf/token.c
42
static void bpf_token_put_deferred(struct work_struct *work)
kernel/bpf/token.c
44
struct bpf_token *token = container_of(work, struct bpf_token, work);
kernel/bpf/token.c
57
INIT_WORK(&token->work, bpf_token_put_deferred);
kernel/bpf/token.c
58
schedule_work(&token->work);
kernel/bpf/trampoline.c
475
static void __bpf_tramp_image_put_deferred(struct work_struct *work)
kernel/bpf/trampoline.c
479
im = container_of(work, struct bpf_tramp_image, work);
kernel/bpf/trampoline.c
489
INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
kernel/bpf/trampoline.c
490
schedule_work(&im->work);
kernel/cgroup/cgroup-internal.h
296
void cgroup1_release_agent(struct work_struct *work);
kernel/cgroup/cgroup-v1.c
219
static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
kernel/cgroup/cgroup-v1.c
221
struct delayed_work *dwork = to_delayed_work(work);
kernel/cgroup/cgroup-v1.c
801
void cgroup1_release_agent(struct work_struct *work)
kernel/cgroup/cgroup-v1.c
804
container_of(work, struct cgroup, release_agent_work);
kernel/cgroup/cgroup.c
5579
static void css_free_rwork_fn(struct work_struct *work)
kernel/cgroup/cgroup.c
5581
struct cgroup_subsys_state *css = container_of(to_rcu_work(work),
kernel/cgroup/cgroup.c
5630
static void css_release_work_fn(struct work_struct *work)
kernel/cgroup/cgroup.c
5633
container_of(work, struct cgroup_subsys_state, destroy_work);
kernel/cgroup/cgroup.c
6052
static void css_killed_work_fn(struct work_struct *work)
kernel/cgroup/cgroup.c
6055
container_of(work, struct cgroup_subsys_state, destroy_work);
kernel/cgroup/cpuset-v1.c
10
struct work_struct work;
kernel/cgroup/cpuset-v1.c
275
static void cpuset_migrate_tasks_workfn(struct work_struct *work)
kernel/cgroup/cpuset-v1.c
279
s = container_of(work, struct cpuset_remove_tasks_struct, work);
kernel/cgroup/cpuset-v1.c
326
INIT_WORK(&s->work, cpuset_migrate_tasks_workfn);
kernel/cgroup/cpuset-v1.c
327
schedule_work(&s->work);
kernel/cgroup/cpuset.c
1367
static void hk_sd_workfn(struct work_struct *work)
kernel/cgroup/cpuset.c
2518
struct work_struct work;
kernel/cgroup/cpuset.c
2524
static void cpuset_migrate_mm_workfn(struct work_struct *work)
kernel/cgroup/cpuset.c
2527
container_of(work, struct cpuset_migrate_mm_work, work);
kernel/cgroup/cpuset.c
2550
INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
kernel/cgroup/cpuset.c
2551
queue_work(cpuset_migrate_mm_wq, &mwork->work);
kernel/dma/pool.c
151
static void atomic_pool_work_fn(struct work_struct *work)
kernel/dma/swiotlb.c
733
static void swiotlb_dyn_alloc(struct work_struct *work)
kernel/dma/swiotlb.c
736
container_of(work, struct io_tlb_mem, dyn_alloc);
kernel/dma/swiotlb.c
86
static void swiotlb_dyn_alloc(struct work_struct *work);
kernel/events/core.c
10354
static void perf_unwind_deferred_callback(struct unwind_work *work,
kernel/events/core.c
443
static void perf_sched_delayed(struct work_struct *work);
kernel/events/core.c
5647
static void perf_sched_delayed(struct work_struct *work)
kernel/events/internal.h
17
struct work_struct work;
kernel/events/ring_buffer.c
903
static void rb_free_work(struct work_struct *work)
kernel/events/ring_buffer.c
907
rb = container_of(work, struct perf_buffer, work);
kernel/events/ring_buffer.c
915
schedule_work(&rb->work);
kernel/events/ring_buffer.c
933
INIT_WORK(&rb->work, rb_free_work);
kernel/events/uprobes.c
2174
static void dup_xol_work(struct callback_head *work)
kernel/events/uprobes.c
673
static void uprobe_free_deferred(struct work_struct *work)
kernel/events/uprobes.c
675
struct uprobe *uprobe = container_of(work, struct uprobe, work);
kernel/events/uprobes.c
705
INIT_WORK(&uprobe->work, uprobe_free_deferred);
kernel/events/uprobes.c
706
schedule_work(&uprobe->work);
kernel/events/uprobes.c
72
struct work_struct work;
kernel/fork.c
1203
static void mmput_async_fn(struct work_struct *work)
kernel/fork.c
1205
struct mm_struct *mm = container_of(work, struct mm_struct,
kernel/fork.c
741
static void mmdrop_async_fn(struct work_struct *work)
kernel/fork.c
745
mm = container_of(work, struct mm_struct, async_put_work);
kernel/irq/irq_sim.c
128
static void irq_sim_handle_irq(struct irq_work *work)
kernel/irq/irq_sim.c
134
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
kernel/irq/irq_sim.c
15
struct irq_work work;
kernel/irq/irq_sim.c
221
work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq);
kernel/irq/irq_sim.c
242
irq_work_sync(&work_ctx->work);
kernel/irq/irq_sim.c
82
irq_work_queue(&irq_ctx->work_ctx->work);
kernel/irq/irqdesc.c
81
static void irq_redirect_work(struct irq_work *work)
kernel/irq/irqdesc.c
819
irq_work_queue_on(&desc->redirect.work, target_cpu);
kernel/irq/irqdesc.c
83
handle_irq_desc(container_of(work, struct irq_desc, redirect.work));
kernel/irq/irqdesc.c
98
desc->redirect.work = IRQ_WORK_INIT_HARD(irq_redirect_work);
kernel/irq/manage.c
370
if (!schedule_work(&desc->affinity_notify->work)) {
kernel/irq/manage.c
42
irq_work_sync(&desc->redirect.work);
kernel/irq/manage.c
519
static void irq_affinity_notify(struct work_struct *work)
kernel/irq/manage.c
521
struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work);
kernel/irq/manage.c
568
INIT_WORK(¬ify->work, irq_affinity_notify);
kernel/irq/manage.c
577
if (cancel_work_sync(&old_notify->work)) {
kernel/irq_work.c
107
if (!llist_add(&work->node.llist, list))
kernel/irq_work.c
112
irq_work_raise(work);
kernel/irq_work.c
116
bool irq_work_queue(struct irq_work *work)
kernel/irq_work.c
119
if (!irq_work_claim(work))
kernel/irq_work.c
124
__irq_work_queue_local(work);
kernel/irq_work.c
137
bool irq_work_queue_on(struct irq_work *work, int cpu)
kernel/irq_work.c
140
return irq_work_queue(work);
kernel/irq_work.c
147
if (!irq_work_claim(work))
kernel/irq_work.c
150
kasan_record_aux_stack(work);
kernel/irq_work.c
163
!(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) {
kernel/irq_work.c
165
if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
kernel/irq_work.c
168
work = &per_cpu(irq_work_wakeup, cpu);
kernel/irq_work.c
169
if (!irq_work_claim(work))
kernel/irq_work.c
173
__smp_call_single_queue(cpu, &work->node.llist);
kernel/irq_work.c
175
__irq_work_queue_local(work);
kernel/irq_work.c
203
struct irq_work *work = arg;
kernel/irq_work.c
211
flags = atomic_read(&work->node.a_flags);
kernel/irq_work.c
213
atomic_set(&work->node.a_flags, flags);
kernel/irq_work.c
221
work->func(work);
kernel/irq_work.c
228
(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
kernel/irq_work.c
230
if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
kernel/irq_work.c
232
rcuwait_wake_up(&work->irqwait);
kernel/irq_work.c
237
struct irq_work *work, *tmp;
kernel/irq_work.c
251
llist_for_each_entry_safe(work, tmp, llnode, node.llist)
kernel/irq_work.c
252
irq_work_single(work);
kernel/irq_work.c
286
void irq_work_sync(struct irq_work *work)
kernel/irq_work.c
291
if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
kernel/irq_work.c
293
rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
kernel/irq_work.c
298
while (irq_work_is_busy(work))
kernel/irq_work.c
57
static bool irq_work_claim(struct irq_work *work)
kernel/irq_work.c
61
oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
kernel/irq_work.c
79
static __always_inline void irq_work_raise(struct irq_work *work)
kernel/irq_work.c
82
trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func);
kernel/irq_work.c
88
static void __irq_work_queue_local(struct irq_work *work)
kernel/irq_work.c
95
work_flags = atomic_read(&work->node.a_flags);
kernel/jump_label.c
325
void jump_label_update_timeout(struct work_struct *work)
kernel/jump_label.c
328
container_of(work, struct static_key_deferred, work.work);
kernel/jump_label.c
347
struct delayed_work *work,
kernel/jump_label.c
355
schedule_delayed_work(work, timeout);
kernel/jump_label.c
359
void __static_key_deferred_flush(void *key, struct delayed_work *work)
kernel/jump_label.c
362
flush_delayed_work(work);
kernel/jump_label.c
371
INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
kernel/kthread.c
1011
work = NULL;
kernel/kthread.c
1014
work = list_first_entry(&worker->work_list,
kernel/kthread.c
1016
list_del_init(&work->node);
kernel/kthread.c
1018
worker->current_work = work;
kernel/kthread.c
1021
if (work) {
kernel/kthread.c
1022
kthread_work_func_t func = work->func;
kernel/kthread.c
1024
trace_sched_kthread_work_execute_start(work);
kernel/kthread.c
1025
work->func(work);
kernel/kthread.c
1030
trace_sched_kthread_work_execute_end(work, func);
kernel/kthread.c
1156
struct kthread_work *work)
kernel/kthread.c
1160
return !list_empty(&work->node) || work->canceling;
kernel/kthread.c
1164
struct kthread_work *work)
kernel/kthread.c
1167
WARN_ON_ONCE(!list_empty(&work->node));
kernel/kthread.c
1169
WARN_ON_ONCE(work->worker && work->worker != worker);
kernel/kthread.c
1174
struct kthread_work *work,
kernel/kthread.c
1177
kthread_insert_work_sanity_check(worker, work);
kernel/kthread.c
1179
trace_sched_kthread_work_queue_work(worker, work);
kernel/kthread.c
1181
list_add_tail(&work->node, pos);
kernel/kthread.c
1182
work->worker = worker;
kernel/kthread.c
1200
struct kthread_work *work)
kernel/kthread.c
1206
if (!queuing_blocked(worker, work)) {
kernel/kthread.c
1207
kthread_insert_work(worker, work, &worker->work_list);
kernel/kthread.c
1227
struct kthread_work *work = &dwork->work;
kernel/kthread.c
1228
struct kthread_worker *worker = work->worker;
kernel/kthread.c
1240
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1243
WARN_ON_ONCE(list_empty(&work->node));
kernel/kthread.c
1244
list_del_init(&work->node);
kernel/kthread.c
1245
if (!work->canceling)
kernel/kthread.c
1246
kthread_insert_work(worker, work, &worker->work_list);
kernel/kthread.c
1257
struct kthread_work *work = &dwork->work;
kernel/kthread.c
1268
kthread_insert_work(worker, work, &worker->work_list);
kernel/kthread.c
1273
kthread_insert_work_sanity_check(worker, work);
kernel/kthread.c
1275
list_add(&work->node, &worker->delayed_work_list);
kernel/kthread.c
1276
work->worker = worker;
kernel/kthread.c
1300
struct kthread_work *work = &dwork->work;
kernel/kthread.c
1306
if (!queuing_blocked(worker, work)) {
kernel/kthread.c
1317
struct kthread_work work;
kernel/kthread.c
1321
static void kthread_flush_work_fn(struct kthread_work *work)
kernel/kthread.c
1324
container_of(work, struct kthread_flush_work, work);
kernel/kthread.c
1334
void kthread_flush_work(struct kthread_work *work)
kernel/kthread.c
1337
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
kernel/kthread.c
1343
worker = work->worker;
kernel/kthread.c
1349
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1351
if (!list_empty(&work->node))
kernel/kthread.c
1352
kthread_insert_work(worker, &fwork.work, work->node.next);
kernel/kthread.c
1353
else if (worker->current_work == work)
kernel/kthread.c
1354
kthread_insert_work(worker, &fwork.work,
kernel/kthread.c
1373
static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
kernel/kthread.c
1377
container_of(work, struct kthread_delayed_work, work);
kernel/kthread.c
1378
struct kthread_worker *worker = work->worker;
kernel/kthread.c
1386
work->canceling++;
kernel/kthread.c
1390
work->canceling--;
kernel/kthread.c
1406
static bool __kthread_cancel_work(struct kthread_work *work)
kernel/kthread.c
1412
if (!list_empty(&work->node)) {
kernel/kthread.c
1413
list_del_init(&work->node);
kernel/kthread.c
1447
struct kthread_work *work = &dwork->work;
kernel/kthread.c
1454
if (!work->worker) {
kernel/kthread.c
1460
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1474
kthread_cancel_delayed_work_timer(work, &flags);
kernel/kthread.c
1475
if (work->canceling) {
kernel/kthread.c
1480
ret = __kthread_cancel_work(work);
kernel/kthread.c
1490
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
kernel/kthread.c
1492
struct kthread_worker *worker = work->worker;
kernel/kthread.c
1501
WARN_ON_ONCE(work->worker != worker);
kernel/kthread.c
1504
kthread_cancel_delayed_work_timer(work, &flags);
kernel/kthread.c
1506
ret = __kthread_cancel_work(work);
kernel/kthread.c
1508
if (worker->current_work != work)
kernel/kthread.c
1515
work->canceling++;
kernel/kthread.c
1517
kthread_flush_work(work);
kernel/kthread.c
1519
work->canceling--;
kernel/kthread.c
1543
bool kthread_cancel_work_sync(struct kthread_work *work)
kernel/kthread.c
1545
return __kthread_cancel_work_sync(work, false);
kernel/kthread.c
1560
return __kthread_cancel_work_sync(&dwork->work, true);
kernel/kthread.c
1574
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
kernel/kthread.c
1578
kthread_queue_work(worker, &fwork.work);
kernel/kthread.c
988
struct kthread_work *work;
kernel/livepatch/core.c
775
static void klp_free_patch_work_fn(struct work_struct *work)
kernel/livepatch/core.c
778
container_of(work, struct klp_patch, free_work);
kernel/livepatch/transition.c
44
static void klp_transition_work_fn(struct work_struct *work)
kernel/livepatch/transition.c
60
static void klp_sync(struct work_struct *work)
kernel/locking/test-ww_mutex.c
104
flush_work(&mtx.work);
kernel/locking/test-ww_mutex.c
105
destroy_work_on_stack(&mtx.work);
kernel/locking/test-ww_mutex.c
180
struct work_struct work;
kernel/locking/test-ww_mutex.c
190
static void test_abba_work(struct work_struct *work)
kernel/locking/test-ww_mutex.c
192
struct test_abba *abba = container_of(work, typeof(*abba), work);
kernel/locking/test-ww_mutex.c
230
INIT_WORK_ONSTACK(&abba.work, test_abba_work);
kernel/locking/test-ww_mutex.c
237
queue_work(wq, &abba.work);
kernel/locking/test-ww_mutex.c
262
flush_work(&abba.work);
kernel/locking/test-ww_mutex.c
263
destroy_work_on_stack(&abba.work);
kernel/locking/test-ww_mutex.c
283
struct work_struct work;
kernel/locking/test-ww_mutex.c
292
static void test_cycle_work(struct work_struct *work)
kernel/locking/test-ww_mutex.c
294
struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
kernel/locking/test-ww_mutex.c
30
struct work_struct work;
kernel/locking/test-ww_mutex.c
347
INIT_WORK(&cycle->work, test_cycle_work);
kernel/locking/test-ww_mutex.c
352
queue_work(wq, &cycles[n].work);
kernel/locking/test-ww_mutex.c
390
struct work_struct work;
kernel/locking/test-ww_mutex.c
41
static void test_mutex_work(struct work_struct *work)
kernel/locking/test-ww_mutex.c
43
struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
kernel/locking/test-ww_mutex.c
436
static void stress_inorder_work(struct work_struct *work)
kernel/locking/test-ww_mutex.c
438
struct stress *stress = container_of(work, typeof(*stress), work);
kernel/locking/test-ww_mutex.c
495
static void stress_reorder_work(struct work_struct *work)
kernel/locking/test-ww_mutex.c
497
struct stress *stress = container_of(work, typeof(*stress), work);
kernel/locking/test-ww_mutex.c
554
static void stress_one_work(struct work_struct *work)
kernel/locking/test-ww_mutex.c
556
struct stress *stress = container_of(work, typeof(*stress), work);
kernel/locking/test-ww_mutex.c
601
void (*fn)(struct work_struct *work);
kernel/locking/test-ww_mutex.c
624
INIT_WORK(&stress->work, fn);
kernel/locking/test-ww_mutex.c
630
queue_work(wq, &stress->work);
kernel/locking/test-ww_mutex.c
69
INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
kernel/locking/test-ww_mutex.c
75
queue_work(wq, &mtx.work);
kernel/module/dups.c
100
kmod_req = container_of(work, struct kmod_dup_req, complete_work);
kernel/module/dups.c
71
static void kmod_dup_request_delete(struct work_struct *work)
kernel/module/dups.c
74
kmod_req = container_of(to_delayed_work(work), struct kmod_dup_req, delete_work);
kernel/module/dups.c
96
static void kmod_dup_request_complete(struct work_struct *work)
kernel/padata.c
306
queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
kernel/padata.c
326
squeue = container_of(serial_work, struct padata_serial_queue, work);
kernel/padata.c
48
static void __init padata_mt_helper(struct work_struct *work);
kernel/padata.c
520
INIT_WORK(&squeue->work, padata_serial_worker);
kernel/pid_namespace.c
120
INIT_WORK(&ns->work, destroy_pid_namespace_work);
kernel/pid_namespace.c
161
static void destroy_pid_namespace_work(struct work_struct *work)
kernel/pid_namespace.c
164
container_of(work, struct pid_namespace, work);
kernel/pid_namespace.c
188
schedule_work(&ns->work);
kernel/pid_namespace.c
74
static void destroy_pid_namespace_work(struct work_struct *work);
kernel/power/autosleep.c
26
static void try_to_suspend(struct work_struct *work)
kernel/power/energy_model.c
41
static void em_update_workfn(struct work_struct *work);
kernel/power/energy_model.c
894
static void em_update_workfn(struct work_struct *work)
kernel/power/energy_model.c
997
static void rebuild_sd_workfn(struct work_struct *work)
kernel/power/main.c
110
static void pm_fs_sync_work_fn(struct work_struct *work)
kernel/power/wakelock.c
103
static void __wakelocks_gc(struct work_struct *work)
kernel/power/wakelock.c
88
static void __wakelocks_gc(struct work_struct *work);
kernel/rcu/srcutree.c
108
INIT_WORK(&sdp->work, srcu_invoke_callbacks);
kernel/rcu/srcutree.c
1136
else if (list_empty(&sup->work.work.entry))
kernel/rcu/srcutree.c
1137
list_add(&sup->work.work.entry, &srcu_boot_list);
kernel/rcu/srcutree.c
1879
static void srcu_invoke_callbacks(struct work_struct *work)
kernel/rcu/srcutree.c
1888
sdp = container_of(work, struct srcu_data, work);
kernel/rcu/srcutree.c
1957
queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay);
kernel/rcu/srcutree.c
1963
static void process_srcu(struct work_struct *work)
kernel/rcu/srcutree.c
1970
sup = container_of(work, struct srcu_usage, work.work);
kernel/rcu/srcutree.c
1994
static void srcu_irq_work(struct irq_work *work)
kernel/rcu/srcutree.c
2001
sup = container_of(work, struct srcu_usage, irq_work);
kernel/rcu/srcutree.c
2008
queue_delayed_work(rcu_gp_wq, &sup->work, !!delay);
kernel/rcu/srcutree.c
2122
work.work.entry);
kernel/rcu/srcutree.c
2123
list_del_init(&sup->work.work.entry);
kernel/rcu/srcutree.c
2127
queue_work(rcu_gp_wq, &sup->work.work);
kernel/rcu/srcutree.c
220
INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu);
kernel/rcu/srcutree.c
724
flush_delayed_work(&sup->work);
kernel/rcu/srcutree.c
729
flush_work(&sdp->work);
kernel/rcu/srcutree.c
76
static void srcu_invoke_callbacks(struct work_struct *work);
kernel/rcu/srcutree.c
78
static void process_srcu(struct work_struct *work);
kernel/rcu/srcutree.c
79
static void srcu_irq_work(struct irq_work *work);
kernel/rcu/srcutree.c
866
queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
kernel/rcu/srcutree.c
873
queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
kernel/rcu/tasks.h
1341
static void rcu_tasks_be_rude(struct work_struct *work)
kernel/rcu/tasks.h
1540
static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
kernel/rcu/tree.c
1655
static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
kernel/rcu/tree.c
2828
static void strict_work_handler(struct work_struct *work)
kernel/rcu/tree.c
2943
char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
kernel/rcu/tree.c
2953
work = *workp;
kernel/rcu/tree.c
2956
if (work)
kernel/rcu/tree.c
582
static void late_wakeup_func(struct irq_work *work)
kernel/reboot.c
900
static void poweroff_work_func(struct work_struct *work)
kernel/reboot.c
922
static void reboot_work_func(struct work_struct *work)
kernel/reboot.c
962
static void hw_failure_emergency_action_func(struct work_struct *work)
kernel/relay.c
271
static void wakeup_readers(struct irq_work *work)
kernel/relay.c
275
buf = container_of(work, struct rchan_buf, wakeup_work);
kernel/sched/clock.c
155
notrace static void __sched_clock_work(struct work_struct *work)
kernel/sched/core.c
10795
cancel_work_sync(&mm->mm_cid.work);
kernel/sched/core.c
10811
static void mm_cid_work_fn(struct work_struct *work)
kernel/sched/core.c
10813
struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.work);
kernel/sched/core.c
10834
static void mm_cid_irq_work(struct irq_work *work)
kernel/sched/core.c
10836
struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.irq_work);
kernel/sched/core.c
10843
schedule_work(&mm->mm_cid.work);
kernel/sched/core.c
10857
INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn);
kernel/sched/core.c
501
static void __sched_core_put(struct work_struct *work)
kernel/sched/core.c
5601
struct delayed_work work;
kernel/sched/core.c
5633
static void sched_tick_remote(struct work_struct *work)
kernel/sched/core.c
5635
struct delayed_work *dwork = to_delayed_work(work);
kernel/sched/core.c
5636
struct tick_work *twork = container_of(dwork, struct tick_work, work);
kernel/sched/core.c
5702
INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
kernel/sched/core.c
5703
queue_delayed_work(system_unbound_wq, &twork->work, HZ);
kernel/sched/cpufreq_schedutil.c
32
struct kthread_work work;
kernel/sched/cpufreq_schedutil.c
542
static void sugov_work(struct kthread_work *work)
kernel/sched/cpufreq_schedutil.c
544
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
kernel/sched/cpufreq_schedutil.c
574
kthread_queue_work(&sg_policy->worker, &sg_policy->work);
kernel/sched/cpufreq_schedutil.c
679
kthread_init_work(&sg_policy->work, sugov_work);
kernel/sched/cpufreq_schedutil.c
888
kthread_cancel_work_sync(&sg_policy->work);
kernel/sched/ext.c
2828
static void scx_watchdog_workfn(struct work_struct *work)
kernel/sched/ext.c
2840
queue_delayed_work(system_unbound_wq, to_delayed_work(work),
kernel/sched/ext.c
3743
static void scx_sched_free_rcu_work(struct work_struct *work)
kernel/sched/ext.c
3745
struct rcu_work *rcu_work = to_rcu_work(work);
kernel/sched/ext.c
4358
static void scx_disable_workfn(struct kthread_work *work)
kernel/sched/ext.c
4360
struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
kernel/sched/ext.c
5061
struct kthread_work work;
kernel/sched/ext.c
5066
static void scx_enable_workfn(struct kthread_work *work)
kernel/sched/ext.c
5069
container_of(work, struct scx_enable_cmd, work);
kernel/sched/ext.c
5350
kthread_init_work(&cmd.work, scx_enable_workfn);
kernel/sched/ext.c
5353
kthread_queue_work(READ_ONCE(helper), &cmd.work);
kernel/sched/ext.c
5354
kthread_flush_work(&cmd.work);
kernel/sched/fair.c
3363
static void task_numa_work(struct callback_head *work)
kernel/sched/fair.c
3377
WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
kernel/sched/fair.c
3379
work->next = work;
kernel/sched/fair.c
3672
struct callback_head *work = &curr->numa_work;
kernel/sched/fair.c
3678
if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
kernel/sched/fair.c
3696
task_work_add(curr, work, TWA_RESUME);
kernel/sched/fair.c
5773
static void throttle_cfs_rq_work(struct callback_head *work)
kernel/sched/fair.c
5775
struct task_struct *p = container_of(work, struct task_struct, sched_throttle_work);
kernel/sched/psi.c
201
static void psi_avgs_work(struct work_struct *work);
kernel/sched/psi.c
327
if (current_work() == &group->avgs_work.work) {
kernel/sched/psi.c
578
static void psi_avgs_work(struct work_struct *work)
kernel/sched/psi.c
585
dwork = to_delayed_work(work);
kernel/sched/rt.c
2203
void rto_push_irq_work_func(struct irq_work *work)
kernel/sched/rt.c
2206
container_of(work, struct root_domain, rto_push_work);
kernel/sched/sched.h
1070
extern void rto_push_irq_work_func(struct irq_work *work);
kernel/smp.c
1126
struct work_struct work;
kernel/smp.c
1134
static void smp_call_on_cpu_callback(struct work_struct *work)
kernel/smp.c
1138
sscs = container_of(work, struct smp_call_on_cpu_struct, work);
kernel/smp.c
1157
INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
kernel/smp.c
1162
queue_work_on(cpu, system_wq, &sscs.work);
kernel/smp.c
1164
destroy_work_on_stack(&sscs.work);
kernel/stop_machine.c
101
__cpu_stop_queue_work(stopper, work);
kernel/stop_machine.c
102
else if (work->done)
kernel/stop_machine.c
103
cpu_stop_signal_done(work->done);
kernel/stop_machine.c
140
struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
kernel/stop_machine.c
143
if (!cpu_stop_queue_work(cpu, &work))
kernel/stop_machine.c
396
struct cpu_stop_work *work;
kernel/stop_machine.c
409
work = &per_cpu(cpu_stopper.stop_work, cpu);
kernel/stop_machine.c
410
work->fn = fn;
kernel/stop_machine.c
411
work->arg = arg;
kernel/stop_machine.c
412
work->done = done;
kernel/stop_machine.c
413
work->caller = _RET_IP_;
kernel/stop_machine.c
414
if (cpu_stop_queue_work(cpu, work))
kernel/stop_machine.c
490
struct cpu_stop_work *work;
kernel/stop_machine.c
493
work = NULL;
kernel/stop_machine.c
496
work = list_first_entry(&stopper->works,
kernel/stop_machine.c
498
list_del_init(&work->list);
kernel/stop_machine.c
502
if (work) {
kernel/stop_machine.c
503
cpu_stop_fn_t fn = work->fn;
kernel/stop_machine.c
504
void *arg = work->arg;
kernel/stop_machine.c
505
struct cpu_stop_done *done = work->done;
kernel/stop_machine.c
509
stopper->caller = work->caller;
kernel/stop_machine.c
85
struct cpu_stop_work *work)
kernel/stop_machine.c
87
list_add_tail(&work->list, &stopper->works);
kernel/stop_machine.c
91
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
kernel/task_work.c
121
struct callback_head *work;
kernel/task_work.c
133
work = READ_ONCE(*pprev);
kernel/task_work.c
134
while (work) {
kernel/task_work.c
135
if (!match(work, data)) {
kernel/task_work.c
136
pprev = &work->next;
kernel/task_work.c
137
work = READ_ONCE(*pprev);
kernel/task_work.c
138
} else if (try_cmpxchg(pprev, &work, work->next))
kernel/task_work.c
143
return work;
kernel/task_work.c
203
struct callback_head *work, *head, *next;
kernel/task_work.c
210
work = READ_ONCE(task->task_works);
kernel/task_work.c
213
if (!work) {
kernel/task_work.c
219
} while (!try_cmpxchg(&task->task_works, &work, head));
kernel/task_work.c
221
if (!work)
kernel/task_work.c
232
next = work->next;
kernel/task_work.c
233
work->func(work);
kernel/task_work.c
234
work = next;
kernel/task_work.c
236
} while (work);
kernel/task_work.c
59
int task_work_add(struct task_struct *task, struct callback_head *work,
kernel/task_work.c
70
kasan_record_aux_stack(work);
kernel/task_work.c
77
work->next = head;
kernel/task_work.c
78
} while (!try_cmpxchg(&task->task_works, &head, work));
kernel/time/clocksource.c
153
static void clocksource_watchdog_work(struct work_struct *work);
kernel/time/clocksource.c
177
static void clocksource_watchdog_work(struct work_struct *work)
kernel/time/hrtimer.c
986
static void clock_was_set_work(struct work_struct *work)
kernel/time/ntp.c
494
static void sync_hw_clock(struct work_struct *work);
kernel/time/ntp.c
627
static void sync_hw_clock(struct work_struct *work)
kernel/time/posix-cpu-timers.c
1132
static void posix_cpu_timers_work(struct callback_head *work)
kernel/time/posix-cpu-timers.c
1134
struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
kernel/time/posix-cpu-timers.c
1188
memset(&p->posix_cputimers_work.work, 0,
kernel/time/posix-cpu-timers.c
1189
sizeof(p->posix_cputimers_work.work));
kernel/time/posix-cpu-timers.c
1190
init_task_work(&p->posix_cputimers_work.work,
kernel/time/posix-cpu-timers.c
1223
task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
kernel/time/tick-sched.c
405
static void nohz_full_kick_func(struct irq_work *work)
kernel/time/timer.c
274
static void timer_update_keys(struct work_struct *work);
kernel/time/timer.c
327
static void timer_update_keys(struct work_struct *work)
kernel/time/timer.c
666
struct delayed_work, timer, work.func),
kernel/time/timer.c
668
struct kthread_delayed_work, timer, work.func),
kernel/time/timer_migration.c
1576
struct work_struct *work = per_cpu_ptr(works, cpu);
kernel/time/timer_migration.c
1578
INIT_WORK(work, tmigr_cpu_unisolate);
kernel/time/timer_migration.c
1579
schedule_work_on(cpu, work);
kernel/time/timer_migration.c
1603
struct work_struct *work = per_cpu_ptr(works, cpu);
kernel/time/timer_migration.c
1605
INIT_WORK(work, tmigr_cpu_isolate);
kernel/time/timer_migration.c
1606
schedule_work_on(cpu, work);
kernel/trace/blktrace.c
1859
static void __init blktrace_works_func(struct work_struct *work)
kernel/trace/bpf_trace.c
2219
struct send_signal_irq_work *work;
kernel/trace/bpf_trace.c
2222
work = per_cpu_ptr(&send_signal_work, cpu);
kernel/trace/bpf_trace.c
2223
init_irq_work(&work->irq_work, do_bpf_send_signal);
kernel/trace/bpf_trace.c
394
static void __set_printk_clr_event(struct work_struct *work)
kernel/trace/bpf_trace.c
807
struct send_signal_irq_work *work;
kernel/trace/bpf_trace.c
810
work = container_of(entry, struct send_signal_irq_work, irq_work);
kernel/trace/bpf_trace.c
811
siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV;
kernel/trace/bpf_trace.c
813
group_send_sig_info(work->sig, siginfo, work->task, work->type);
kernel/trace/bpf_trace.c
814
put_task_struct(work->task);
kernel/trace/bpf_trace.c
819
struct send_signal_irq_work *work = NULL;
kernel/trace/bpf_trace.c
857
work = this_cpu_ptr(&send_signal_work);
kernel/trace/bpf_trace.c
858
if (irq_work_is_busy(&work->irq_work))
kernel/trace/bpf_trace.c
865
work->task = get_task_struct(task);
kernel/trace/bpf_trace.c
866
work->has_siginfo = siginfo == &info;
kernel/trace/bpf_trace.c
867
if (work->has_siginfo)
kernel/trace/bpf_trace.c
868
copy_siginfo(&work->info, &info);
kernel/trace/bpf_trace.c
869
work->sig = sig;
kernel/trace/bpf_trace.c
870
work->type = type;
kernel/trace/bpf_trace.c
871
irq_work_queue(&work->irq_work);
kernel/trace/ftrace.c
4360
static __init void ftrace_check_work_func(struct work_struct *work)
kernel/trace/ring_buffer.c
2371
init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
kernel/trace/ring_buffer.c
2454
irq_work_sync(&cpu_buffer->irq_work.work);
kernel/trace/ring_buffer.c
2507
init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
kernel/trace/ring_buffer.c
2682
irq_work_sync(&buffer->irq_work.work);
kernel/trace/ring_buffer.c
2926
static void update_pages_handler(struct work_struct *work)
kernel/trace/ring_buffer.c
2928
struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
kernel/trace/ring_buffer.c
4029
return irq_work_queue(&irq_work->work);
kernel/trace/ring_buffer.c
4036
return irq_work_queue_on(&irq_work->work, cpu);
kernel/trace/ring_buffer.c
449
struct irq_work work;
kernel/trace/ring_buffer.c
47
static void update_pages_handler(struct work_struct *work);
kernel/trace/ring_buffer.c
821
static void rb_wake_up_waiters(struct irq_work *work)
kernel/trace/ring_buffer.c
823
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
kernel/trace/ring_buffer.c
884
irq_work_queue(&rbwork->work);
kernel/trace/trace.c
10031
static void __init eval_map_work_func(struct work_struct *work)
kernel/trace/trace.c
10173
static __init void tracer_init_tracefs_work_func(struct work_struct *work)
kernel/trace/trace.c
1494
static void latency_fsnotify_workfn(struct work_struct *work)
kernel/trace/trace.c
1496
struct trace_array *tr = container_of(work, struct trace_array,
kernel/trace/trace_events.c
3629
static void hist_poll_event_irq_work(struct irq_work *work)
kernel/trace/trace_events_filter.c
1365
static void free_filter_list_work(struct work_struct *work)
kernel/trace/trace_events_filter.c
1369
filter_list = container_of(to_rcu_work(work), struct filter_head, rwork);
kernel/trace/trace_events_user.c
139
struct work_struct work;
kernel/trace/trace_events_user.c
238
static void delayed_destroy_user_event(struct work_struct *work)
kernel/trace/trace_events_user.c
241
work, struct user_event, put_work);
kernel/trace/trace_events_user.c
443
static void user_event_enabler_fault_fixup(struct work_struct *work)
kernel/trace/trace_events_user.c
446
work, struct user_event_enabler_fault, work);
kernel/trace/trace_events_user.c
504
INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
kernel/trace/trace_events_user.c
512
if (!schedule_work(&fault->work)) {
kernel/trace/trace_events_user.c
779
static void delayed_user_event_mm_put(struct work_struct *work)
kernel/trace/trace_events_user.c
783
mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
kernel/umh.c
157
static void call_usermodehelper_exec_work(struct work_struct *work)
kernel/umh.c
160
container_of(work, struct subprocess_info, work);
kernel/umh.c
366
INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
kernel/umh.c
433
queue_work(system_unbound_wq, &sub_info->work);
kernel/unwind/deferred.c
152
struct unwind_work *work;
kernel/unwind/deferred.c
177
list_for_each_entry_srcu(work, &callbacks, list,
kernel/unwind/deferred.c
179
if (test_bit(work->bit, &bits)) {
kernel/unwind/deferred.c
180
work->func(work, &trace, cookie);
kernel/unwind/deferred.c
182
info->cache->unwind_completed |= BIT(work->bit);
kernel/unwind/deferred.c
201
task_work_cancel(task, &info->work);
kernel/unwind/deferred.c
229
int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
kernel/unwind/deferred.c
255
bit = READ_ONCE(work->bit);
kernel/unwind/deferred.c
290
ret = task_work_add(current, &info->work, twa_mode);
kernel/unwind/deferred.c
298
void unwind_deferred_cancel(struct unwind_work *work)
kernel/unwind/deferred.c
303
if (!work)
kernel/unwind/deferred.c
306
bit = work->bit;
kernel/unwind/deferred.c
313
list_del_rcu(&work->list);
kernel/unwind/deferred.c
316
work->bit = -1;
kernel/unwind/deferred.c
332
int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
kernel/unwind/deferred.c
334
memset(work, 0, sizeof(*work));
kernel/unwind/deferred.c
342
work->bit = ffz(unwind_mask);
kernel/unwind/deferred.c
343
__set_bit(work->bit, &unwind_mask);
kernel/unwind/deferred.c
345
list_add_rcu(&work->list, &callbacks);
kernel/unwind/deferred.c
346
work->func = func;
kernel/unwind/deferred.c
355
init_task_work(&info->work, unwind_deferred_task_work);
kernel/unwind/deferred.c
364
task_work_cancel(task, &info->work);
kernel/user_namespace.c
138
INIT_WORK(&ns->work, free_user_ns);
kernel/user_namespace.c
197
static void free_user_ns(struct work_struct *work)
kernel/user_namespace.c
200
container_of(work, struct user_namespace, work);
kernel/user_namespace.c
233
schedule_work(&ns->work);
kernel/user_namespace.c
32
static void free_user_ns(struct work_struct *work);
kernel/watchdog.c
1303
static void __init lockup_detector_delay_init(struct work_struct *work);
kernel/watchdog.c
1309
static void __init lockup_detector_delay_init(struct work_struct *work)
kernel/workqueue.c
1113
struct work_struct *work)
kernel/workqueue.c
1118
(unsigned long)work)
kernel/workqueue.c
1119
if (worker->current_work == work &&
kernel/workqueue.c
1120
worker->current_func == work->func)
kernel/workqueue.c
1126
static void mayday_cursor_func(struct work_struct *work)
kernel/workqueue.c
1146
static void move_linked_works(struct work_struct *work, struct list_head *head,
kernel/workqueue.c
1155
list_for_each_entry_safe_from(work, n, NULL, entry) {
kernel/workqueue.c
1156
list_move_tail(&work->entry, head);
kernel/workqueue.c
1157
if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
kernel/workqueue.c
1186
static bool assign_work(struct work_struct *work, struct worker *worker,
kernel/workqueue.c
1195
if (unlikely(work->func == mayday_cursor_func)) {
kernel/workqueue.c
1199
*nextp = list_next_entry(work, entry);
kernel/workqueue.c
1200
list_del_init(&work->entry);
kernel/workqueue.c
1212
collision = find_worker_executing_work(pool, work);
kernel/workqueue.c
1214
move_linked_works(work, &collision->scheduled, nextp);
kernel/workqueue.c
1218
move_linked_works(work, &worker->scheduled, nextp);
kernel/workqueue.c
1288
struct work_struct *work = list_first_entry(&pool->worklist,
kernel/workqueue.c
1294
get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
kernel/workqueue.c
1693
struct work_struct *work)
kernel/workqueue.c
1695
unsigned long *wdb = work_data_bits(work);
kernel/workqueue.c
1698
trace_workqueue_activate_work(work);
kernel/workqueue.c
1701
move_linked_works(work, &pwq->pool->worklist, NULL);
kernel/workqueue.c
1806
struct work_struct *work =
kernel/workqueue.c
1810
if (work && pwq_tryinc_nr_active(pwq, fill)) {
kernel/workqueue.c
1811
__pwq_activate_work(pwq, work);
kernel/workqueue.c
1871
struct work_struct *work;
kernel/workqueue.c
1903
work = list_first_entry_or_null(&pwq->inactive_works,
kernel/workqueue.c
1905
if (!work) {
kernel/workqueue.c
1919
__pwq_activate_work(pwq, work);
kernel/workqueue.c
2063
static int try_to_grab_pending(struct work_struct *work, u32 cflags,
kernel/workqueue.c
2073
struct delayed_work *dwork = to_delayed_work(work);
kernel/workqueue.c
2085
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
kernel/workqueue.c
2093
pool = get_work_pool(work);
kernel/workqueue.c
2106
pwq = get_work_pwq(work);
kernel/workqueue.c
2108
unsigned long work_data = *work_data_bits(work);
kernel/workqueue.c
2110
debug_work_deactivate(work);
kernel/workqueue.c
2127
move_linked_works(work, &pwq->pool->worklist, NULL);
kernel/workqueue.c
2129
list_del_init(&work->entry);
kernel/workqueue.c
2135
set_work_pool_and_keep_pending(work, pool->id,
kernel/workqueue.c
2167
static bool work_grab_pending(struct work_struct *work, u32 cflags,
kernel/workqueue.c
2173
ret = try_to_grab_pending(work, cflags, irq_flags);
kernel/workqueue.c
2193
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
kernel/workqueue.c
2196
debug_work_activate(work);
kernel/workqueue.c
2199
kasan_record_aux_stack(work);
kernel/workqueue.c
2202
set_work_pwq(work, pwq, extra_flags);
kernel/workqueue.c
2203
list_add_tail(&work->entry, head);
kernel/workqueue.c
2249
struct work_struct *work)
kernel/workqueue.c
2271
work->func, wq->name))) {
kernel/workqueue.c
2296
last_pool = get_work_pool(work);
kernel/workqueue.c
2302
worker = find_worker_executing_work(last_pool, work);
kernel/workqueue.c
2336
trace_workqueue_queue_work(req_cpu, pwq, work);
kernel/workqueue.c
2338
if (WARN_ON(!list_empty(&work->entry)))
kernel/workqueue.c
2353
trace_workqueue_activate_work(work);
kernel/workqueue.c
2354
insert_work(pwq, work, &pool->worklist, work_flags);
kernel/workqueue.c
2358
insert_work(pwq, work, &pwq->inactive_works, work_flags);
kernel/workqueue.c
2366
static bool clear_pending_if_disabled(struct work_struct *work)
kernel/workqueue.c
2368
unsigned long data = *work_data_bits(work);
kernel/workqueue.c
2376
set_work_pool_and_clear_pending(work, offqd.pool_id,
kernel/workqueue.c
2396
struct work_struct *work)
kernel/workqueue.c
2403
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
kernel/workqueue.c
2404
!clear_pending_if_disabled(work)) {
kernel/workqueue.c
2405
__queue_work(cpu, wq, work);
kernel/workqueue.c
2464
struct work_struct *work)
kernel/workqueue.c
2482
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
kernel/workqueue.c
2483
!clear_pending_if_disabled(work)) {
kernel/workqueue.c
2486
__queue_work(cpu, wq, work);
kernel/workqueue.c
2500
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
kernel/workqueue.c
2508
struct work_struct *work = &dwork->work;
kernel/workqueue.c
2513
WARN_ON_ONCE(!list_empty(&work->entry));
kernel/workqueue.c
2522
__queue_work(cpu, wq, &dwork->work);
kernel/workqueue.c
2565
struct work_struct *work = &dwork->work;
kernel/workqueue.c
2572
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
kernel/workqueue.c
2573
!clear_pending_if_disabled(work)) {
kernel/workqueue.c
2607
ret = work_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, &irq_flags);
kernel/workqueue.c
2609
if (!clear_pending_if_disabled(&dwork->work))
kernel/workqueue.c
2623
__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
kernel/workqueue.c
2639
struct work_struct *work = &rwork->work;
kernel/workqueue.c
2645
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
kernel/workqueue.c
2646
!WARN_ON_ONCE(clear_pending_if_disabled(work))) {
kernel/workqueue.c
2962
static void idle_cull_fn(struct work_struct *work)
kernel/workqueue.c
2964
struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
kernel/workqueue.c
3024
struct work_struct *work;
kernel/workqueue.c
3036
list_for_each_entry(work, &pool->worklist, entry)
kernel/workqueue.c
3037
send_mayday(get_work_pwq(work));
kernel/workqueue.c
3174
static void process_one_work(struct worker *worker, struct work_struct *work)
kernel/workqueue.c
3178
struct pool_workqueue *pwq = get_work_pwq(work);
kernel/workqueue.c
3193
lockdep_copy_map(&lockdep_map, &work->lockdep_map);
kernel/workqueue.c
3200
debug_work_deactivate(work);
kernel/workqueue.c
3201
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
kernel/workqueue.c
3202
worker->current_work = work;
kernel/workqueue.c
3203
worker->current_func = work->func;
kernel/workqueue.c
3208
work_data = *work_data_bits(work);
kernel/workqueue.c
3217
list_del_init(&work->entry);
kernel/workqueue.c
3242
set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool));
kernel/workqueue.c
3275
trace_workqueue_execute_start(work);
kernel/workqueue.c
3276
worker->current_func(work);
kernel/workqueue.c
3281
trace_workqueue_execute_end(work, worker->current_func);
kernel/workqueue.c
3350
struct work_struct *work;
kernel/workqueue.c
3353
while ((work = list_first_entry_or_null(&worker->scheduled,
kernel/workqueue.c
3359
process_one_work(worker, work);
kernel/workqueue.c
3435
struct work_struct *work =
kernel/workqueue.c
3439
if (assign_work(work, worker, NULL))
kernel/workqueue.c
3463
struct work_struct *work, *n;
kernel/workqueue.c
3498
work = list_first_entry(&pool->worklist, struct work_struct, entry);
kernel/workqueue.c
3500
work = list_next_entry(cursor, entry);
kernel/workqueue.c
3503
list_for_each_entry_safe_from(work, n, &pool->worklist, entry) {
kernel/workqueue.c
3504
if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n)) {
kernel/workqueue.c
3657
struct work_struct *work =
kernel/workqueue.c
3661
if (assign_work(work, worker, NULL))
kernel/workqueue.c
3694
struct work_struct work;
kernel/workqueue.c
3699
static void drain_dead_softirq_workfn(struct work_struct *work)
kernel/workqueue.c
3702
container_of(work, struct wq_drain_dead_softirq_work, work);
kernel/workqueue.c
3731
queue_work(system_bh_highpri_wq, work);
kernel/workqueue.c
3733
queue_work(system_bh_wq, work);
kernel/workqueue.c
3758
INIT_WORK_ONSTACK(&dead_work.work, drain_dead_softirq_workfn);
kernel/workqueue.c
3763
queue_work(system_bh_highpri_wq, &dead_work.work);
kernel/workqueue.c
3765
queue_work(system_bh_wq, &dead_work.work);
kernel/workqueue.c
3768
destroy_work_on_stack(&dead_work.work);
kernel/workqueue.c
3809
struct work_struct work;
kernel/workqueue.c
3814
static void wq_barrier_func(struct work_struct *work)
kernel/workqueue.c
3816
struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
kernel/workqueue.c
3863
INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func,
kernel/workqueue.c
3865
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
kernel/workqueue.c
3894
insert_work(pwq, &barr->work, head, work_flags);
kernel/workqueue.c
3999
static void touch_work_lockdep_map(struct work_struct *work,
kernel/workqueue.c
4006
lock_map_acquire(&work->lockdep_map);
kernel/workqueue.c
4007
lock_map_release(&work->lockdep_map);
kernel/workqueue.c
4226
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
kernel/workqueue.c
4235
pool = get_work_pool(work);
kernel/workqueue.c
4243
pwq = get_work_pwq(work);
kernel/workqueue.c
4248
worker = find_worker_executing_work(pool, work);
kernel/workqueue.c
4255
check_flush_dependency(wq, work, from_cancel);
kernel/workqueue.c
4257
insert_wq_barrier(pwq, barr, work, worker);
kernel/workqueue.c
4260
touch_work_lockdep_map(work, wq);
kernel/workqueue.c
4282
static bool __flush_work(struct work_struct *work, bool from_cancel)
kernel/workqueue.c
4289
if (WARN_ON(!work->func))
kernel/workqueue.c
4292
if (!start_flush_work(work, &barr, from_cancel))
kernel/workqueue.c
4303
unsigned long data = *work_data_bits(work);
kernel/workqueue.c
4317
pool = get_work_pool(work);
kernel/workqueue.c
4331
destroy_work_on_stack(&barr.work);
kernel/workqueue.c
4346
bool flush_work(struct work_struct *work)
kernel/workqueue.c
4349
return __flush_work(work, false);
kernel/workqueue.c
4369
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
kernel/workqueue.c
4371
return flush_work(&dwork->work);
kernel/workqueue.c
4385
if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
kernel/workqueue.c
4387
flush_work(&rwork->work);
kernel/workqueue.c
4390
return flush_work(&rwork->work);
kernel/workqueue.c
4413
static bool __cancel_work(struct work_struct *work, u32 cflags)
kernel/workqueue.c
4419
ret = work_grab_pending(work, cflags, &irq_flags);
kernel/workqueue.c
4421
work_offqd_unpack(&offqd, *work_data_bits(work));
kernel/workqueue.c
4426
set_work_pool_and_clear_pending(work, offqd.pool_id,
kernel/workqueue.c
4432
static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
kernel/workqueue.c
4436
ret = __cancel_work(work, cflags | WORK_CANCEL_DISABLE);
kernel/workqueue.c
4438
if (*work_data_bits(work) & WORK_OFFQ_BH)
kernel/workqueue.c
4448
__flush_work(work, true);
kernel/workqueue.c
4451
enable_work(work);
kernel/workqueue.c
4459
bool cancel_work(struct work_struct *work)
kernel/workqueue.c
4461
return __cancel_work(work, 0);
kernel/workqueue.c
4483
bool cancel_work_sync(struct work_struct *work)
kernel/workqueue.c
4485
return __cancel_work_sync(work, 0);
kernel/workqueue.c
4507
return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED);
kernel/workqueue.c
4522
return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED);
kernel/workqueue.c
4538
bool disable_work(struct work_struct *work)
kernel/workqueue.c
4540
return __cancel_work(work, WORK_CANCEL_DISABLE);
kernel/workqueue.c
4557
bool disable_work_sync(struct work_struct *work)
kernel/workqueue.c
4559
return __cancel_work_sync(work, WORK_CANCEL_DISABLE);
kernel/workqueue.c
4573
bool enable_work(struct work_struct *work)
kernel/workqueue.c
4578
work_grab_pending(work, 0, &irq_flags);
kernel/workqueue.c
4580
work_offqd_unpack(&offqd, *work_data_bits(work));
kernel/workqueue.c
4582
set_work_pool_and_clear_pending(work, offqd.pool_id,
kernel/workqueue.c
4598
return __cancel_work(&dwork->work,
kernel/workqueue.c
4611
return __cancel_work_sync(&dwork->work,
kernel/workqueue.c
4624
return enable_work(&dwork->work);
kernel/workqueue.c
4651
struct work_struct *work = per_cpu_ptr(works, cpu);
kernel/workqueue.c
4653
INIT_WORK(work, func);
kernel/workqueue.c
4654
schedule_work_on(cpu, work);
kernel/workqueue.c
4680
fn(&ew->work);
kernel/workqueue.c
4684
INIT_WORK(&ew->work, fn);
kernel/workqueue.c
4685
schedule_work(&ew->work);
kernel/workqueue.c
5151
static void pwq_release_workfn(struct kthread_work *work)
kernel/workqueue.c
5153
struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
kernel/workqueue.c
616
struct work_struct *work = addr;
kernel/workqueue.c
6166
unsigned int work_busy(struct work_struct *work)
kernel/workqueue.c
6172
if (work_pending(work))
kernel/workqueue.c
6176
pool = get_work_pool(work);
kernel/workqueue.c
6179
if (find_worker_executing_work(pool, work))
kernel/workqueue.c
618
return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
kernel/workqueue.c
627
struct work_struct *work = addr;
kernel/workqueue.c
631
cancel_work_sync(work);
kernel/workqueue.c
6313
static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
kernel/workqueue.c
6315
if (work->func == wq_barrier_func) {
kernel/workqueue.c
6318
barr = container_of(work, struct wq_barrier, work);
kernel/workqueue.c
632
debug_object_init(work, &work_debug_descr);
kernel/workqueue.c
6326
pr_cont_work_flush(comma, work->func, pcwsp);
kernel/workqueue.c
6334
struct work_struct *work;
kernel/workqueue.c
6365
list_for_each_entry(work, &worker->scheduled, entry)
kernel/workqueue.c
6366
pr_cont_work(false, work, &pcws);
kernel/workqueue.c
6373
list_for_each_entry(work, &pool->worklist, entry) {
kernel/workqueue.c
6374
if (get_work_pwq(work) == pwq) {
kernel/workqueue.c
6383
list_for_each_entry(work, &pool->worklist, entry) {
kernel/workqueue.c
6384
if (get_work_pwq(work) != pwq)
kernel/workqueue.c
6387
pr_cont_work(comma, work, &pcws);
kernel/workqueue.c
6388
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
kernel/workqueue.c
6398
list_for_each_entry(work, &pwq->inactive_works, entry) {
kernel/workqueue.c
6399
pr_cont_work(comma, work, &pcws);
kernel/workqueue.c
6400
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
kernel/workqueue.c
645
struct work_struct *work = addr;
kernel/workqueue.c
649
cancel_work_sync(work);
kernel/workqueue.c
650
debug_object_free(work, &work_debug_descr);
kernel/workqueue.c
665
static inline void debug_work_activate(struct work_struct *work)
kernel/workqueue.c
667
debug_object_activate(work, &work_debug_descr);
kernel/workqueue.c
670
static inline void debug_work_deactivate(struct work_struct *work)
kernel/workqueue.c
672
debug_object_deactivate(work, &work_debug_descr);
kernel/workqueue.c
675
void __init_work(struct work_struct *work, int onstack)
kernel/workqueue.c
678
debug_object_init_on_stack(work, &work_debug_descr);
kernel/workqueue.c
680
debug_object_init(work, &work_debug_descr);
kernel/workqueue.c
6826
struct work_struct work;
kernel/workqueue.c
6832
static void work_for_cpu_fn(struct work_struct *work)
kernel/workqueue.c
6834
struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
kernel/workqueue.c
684
void destroy_work_on_stack(struct work_struct *work)
kernel/workqueue.c
6856
INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
kernel/workqueue.c
6857
schedule_work_on(cpu, &wfc.work);
kernel/workqueue.c
6858
flush_work(&wfc.work);
kernel/workqueue.c
6859
destroy_work_on_stack(&wfc.work);
kernel/workqueue.c
686
debug_object_free(work, &work_debug_descr);
kernel/workqueue.c
690
void destroy_delayed_work_on_stack(struct delayed_work *work)
kernel/workqueue.c
692
timer_destroy_on_stack(&work->timer);
kernel/workqueue.c
693
debug_object_free(&work->work, &work_debug_descr);
kernel/workqueue.c
698
static inline void debug_work_activate(struct work_struct *work) { }
kernel/workqueue.c
699
static inline void debug_work_deactivate(struct work_struct *work) { }
kernel/workqueue.c
789
static inline void set_work_data(struct work_struct *work, unsigned long data)
kernel/workqueue.c
791
WARN_ON_ONCE(!work_pending(work));
kernel/workqueue.c
792
atomic_long_set(&work->data, data | work_static(work));
kernel/workqueue.c
795
static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
kernel/workqueue.c
798
set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING |
kernel/workqueue.c
802
static void set_work_pool_and_keep_pending(struct work_struct *work,
kernel/workqueue.c
805
set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
kernel/workqueue.c
809
static void set_work_pool_and_clear_pending(struct work_struct *work,
kernel/workqueue.c
819
set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
kernel/workqueue.c
857
static struct pool_workqueue *get_work_pwq(struct work_struct *work)
kernel/workqueue.c
859
unsigned long data = atomic_long_read(&work->data);
kernel/workqueue.c
882
static struct worker_pool *get_work_pool(struct work_struct *work)
kernel/workqueue.c
884
unsigned long data = atomic_long_read(&work->data);
lib/closure.c
124
struct closure *cl = container_of(ws, struct closure, work);
lib/closure.c
274
work_data_bits(&cl->work)) ? "Q" : "",
lib/closure.c
50
destructor(&cl->work);
lib/debugobjects.c
109
static void free_obj_work(struct work_struct *work);
lib/debugobjects.c
490
static void free_obj_work(struct work_struct *work)
lib/dim/net_dim.c
101
void (*rx_dim_work)(struct work_struct *work),
lib/dim/net_dim.c
102
void (*tx_dim_work)(struct work_struct *work))
lib/dim/net_dim.c
176
INIT_WORK(&dim->work, irq_moder->tx_dim_work);
lib/dim/net_dim.c
181
INIT_WORK(&dim->work, irq_moder->rx_dim_work);
lib/dim/net_dim.c
188
cancel_work_sync(&dim->work);
lib/dim/net_dim.c
366
schedule_work(&dim->work);
lib/dim/rdma_dim.c
95
schedule_work(&dim->work);
lib/irq_poll.c
108
work = 0;
lib/irq_poll.c
110
work = iop->poll(iop, weight);
lib/irq_poll.c
112
budget -= work;
lib/irq_poll.c
124
if (work >= weight) {
lib/irq_poll.c
88
int work, weight;
lib/kobject.c
702
static void kobject_delayed_cleanup(struct work_struct *work)
lib/kobject.c
704
kobject_cleanup(container_of(to_delayed_work(work),
lib/once.c
16
struct once_work *work;
lib/once.c
18
work = container_of(w, struct once_work, work);
lib/once.c
19
BUG_ON(!static_key_enabled(work->key));
lib/once.c
20
static_branch_disable(work->key);
lib/once.c
21
module_put(work->module);
lib/once.c
22
kfree(work);
lib/once.c
33
INIT_WORK(&w->work, once_deferred);
lib/once.c
37
schedule_work(&w->work);
lib/once.c
9
struct work_struct work;
lib/ref_tracker.c
497
static void debugfs_reap_work(struct work_struct *work)
lib/rhashtable.c
416
static void rht_deferred_worker(struct work_struct *work)
lib/rhashtable.c
422
ht = container_of(work, struct rhashtable, run_work);
lib/test_lockup.c
405
static void test_work_fn(struct work_struct *work)
lib/test_lockup.c
408
work == per_cpu_ptr(&test_works, master_cpu));
lib/test_objpool.c
365
struct task_struct *work;
lib/test_objpool.c
373
work = kthread_run_on_cpu(ot_thread_worker, item,
lib/test_objpool.c
375
if (IS_ERR(work))
lib/test_objpool.c
552
struct task_struct *work;
lib/test_objpool.c
560
work = kthread_run_on_cpu(ot_thread_worker, item, cpu, "ot_worker_%d");
lib/test_objpool.c
561
if (IS_ERR(work))
lib/tests/slub_kunit.c
186
struct work_struct work;
lib/tests/slub_kunit.c
194
cdw = container_of(w, struct cache_destroy_work, work);
lib/tests/slub_kunit.c
212
INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
lib/tests/slub_kunit.c
234
queue_work(wq, &cdw.work);
lib/tests/slub_kunit.c
235
flush_work(&cdw.work);
lib/zlib_inflate/inflate.c
496
&(state->lenbits), state->work);
lib/zlib_inflate/inflate.c
562
&(state->lenbits), state->work);
lib/zlib_inflate/inflate.c
571
&(state->next), &(state->distbits), state->work);
lib/zlib_inflate/inflate.h
110
unsigned short work[288]; /* work area for code table building */
lib/zlib_inflate/inftrees.c
133
if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
lib/zlib_inflate/inftrees.c
170
base = extra = work; /* dummy value--not used */
lib/zlib_inflate/inftrees.c
205
if ((int)(work[sym]) < end) {
lib/zlib_inflate/inftrees.c
207
this.val = work[sym];
lib/zlib_inflate/inftrees.c
209
else if ((int)(work[sym]) > end) {
lib/zlib_inflate/inftrees.c
210
this.op = (unsigned char)(extra[work[sym]]);
lib/zlib_inflate/inftrees.c
211
this.val = base[work[sym]];
lib/zlib_inflate/inftrees.c
24
code **table, unsigned *bits, unsigned short *work)
lib/zlib_inflate/inftrees.c
242
len = lens[work[sym]];
lib/zlib_inflate/inftrees.h
58
unsigned *bits, unsigned short *work);
mm/backing-dev.c
502
static void wb_update_bandwidth_workfn(struct work_struct *work)
mm/backing-dev.c
504
struct bdi_writeback *wb = container_of(to_delayed_work(work),
mm/backing-dev.c
599
static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
mm/backing-dev.c
611
static void cgwb_release_workfn(struct work_struct *work)
mm/backing-dev.c
613
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
mm/backing-dev.c
884
static void cleanup_offline_cgwbs_workfn(struct work_struct *work)
mm/execmem.c
158
static void execmem_cache_clean(struct work_struct *work)
mm/execmem.c
374
static void execmem_cache_free_slow(struct work_struct *work);
mm/execmem.c
377
static void execmem_cache_free_slow(struct work_struct *work)
mm/hugetlb.c
1580
static void free_hpage_workfn(struct work_struct *work)
mm/kasan/kasan_test_c.c
968
static void workqueue_uaf_work(struct work_struct *work)
mm/kasan/kasan_test_c.c
970
kfree(work);
mm/kasan/kasan_test_c.c
976
struct work_struct *work;
mm/kasan/kasan_test_c.c
981
work = kmalloc_obj(struct work_struct);
mm/kasan/kasan_test_c.c
982
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
mm/kasan/kasan_test_c.c
984
INIT_WORK(work, workqueue_uaf_work);
mm/kasan/kasan_test_c.c
985
queue_work(workqueue, work);
mm/kasan/kasan_test_c.c
989
((volatile struct work_struct *)work)->data);
mm/kfence/core.c
874
static void wake_up_kfence_timer(struct irq_work *work)
mm/kfence/core.c
892
static void toggle_allocation_gate(struct work_struct *work)
mm/kmemleak.c
2219
static void kmemleak_do_cleanup(struct work_struct *work)
mm/memcontrol-v1.c
1001
static void memcg_event_remove(struct work_struct *work)
mm/memcontrol-v1.c
1004
container_of(work, struct mem_cgroup_event, remove);
mm/memcontrol.c
1800
struct work_struct work;
mm/memcontrol.c
1816
struct work_struct work;
mm/memcontrol.c
2019
static void schedule_drain_work(int cpu, struct work_struct *work)
mm/memcontrol.c
2029
queue_work_on(cpu, memcg_wq, work);
mm/memcontrol.c
2060
drain_local_memcg_stock(&memcg_st->work);
mm/memcontrol.c
2062
schedule_drain_work(cpu, &memcg_st->work);
mm/memcontrol.c
2070
drain_local_obj_stock(&obj_st->work);
mm/memcontrol.c
2072
schedule_drain_work(cpu, &obj_st->work);
mm/memcontrol.c
2115
static void high_work_func(struct work_struct *work)
mm/memcontrol.c
2119
memcg = container_of(work, struct mem_cgroup, high_work);
mm/memcontrol.c
5182
INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
mm/memcontrol.c
5184
INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
mm/memory-failure.c
2574
struct work_struct work;
mm/memory-failure.c
2609
schedule_work_on(smp_processor_id(), &mf_cpu->work);
mm/memory-failure.c
2618
static void memory_failure_work_func(struct work_struct *work)
mm/memory-failure.c
2625
mf_cpu = container_of(work, struct memory_failure_cpu, work);
mm/memory-failure.c
2648
INIT_WORK(&mf_cpu->work, memory_failure_work_func);
mm/page_reporting.c
307
static void page_reporting_process(struct work_struct *work)
mm/page_reporting.c
309
struct delayed_work *d_work = to_delayed_work(work);
mm/page_reporting.c
311
container_of(d_work, struct page_reporting_dev_info, work);
mm/page_reporting.c
346
schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
mm/page_reporting.c
381
INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
mm/page_reporting.c
412
cancel_delayed_work_sync(&prdev->work);
mm/page_reporting.c
83
schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
mm/percpu.c
196
static void pcpu_balance_workfn(struct work_struct *work);
mm/percpu.c
2196
static void pcpu_balance_workfn(struct work_struct *work)
mm/pgtable-generic.c
412
static void kernel_pgtable_work_func(struct work_struct *work);
mm/pgtable-generic.c
418
struct work_struct work;
mm/pgtable-generic.c
422
.work = __WORK_INITIALIZER(kernel_pgtable_work.work, kernel_pgtable_work_func),
mm/pgtable-generic.c
425
static void kernel_pgtable_work_func(struct work_struct *work)
mm/pgtable-generic.c
445
schedule_work(&kernel_pgtable_work.work);
mm/slab_common.c
1550
static void kfree_rcu_work(struct work_struct *work)
mm/slab_common.c
1561
krwp = container_of(to_rcu_work(work),
mm/slab_common.c
1771
static void kfree_rcu_monitor(struct work_struct *work)
mm/slab_common.c
1773
struct kfree_rcu_cpu *krcp = container_of(work,
mm/slab_common.c
1774
struct kfree_rcu_cpu, monitor_work.work);
mm/slab_common.c
1791
static void fill_page_cache_func(struct work_struct *work)
mm/slab_common.c
1795
container_of(work, struct kfree_rcu_cpu,
mm/slab_common.c
1796
page_cache_work.work);
mm/slab_common.c
2154
kfree_rcu_monitor(&krcp->monitor_work.work);
mm/slub.c
3967
sfw = container_of(w, struct slub_flush_work, work);
mm/slub.c
3989
INIT_WORK(&sfw->work, flush_cpu_sheaves);
mm/slub.c
3992
queue_work_on(cpu, flushwq, &sfw->work);
mm/slub.c
3999
flush_work(&sfw->work);
mm/slub.c
4019
sfw = container_of(w, struct slub_flush_work, work);
mm/slub.c
4053
INIT_WORK(&sfw->work, flush_rcu_sheaf);
mm/slub.c
4055
queue_work_on(cpu, flushwq, &sfw->work);
mm/slub.c
4060
flush_work(&sfw->work);
mm/slub.c
483
struct work_struct work;
mm/slub.c
6089
struct irq_work work;
mm/slub.c
6092
static void free_deferred_objects(struct irq_work *work);
mm/slub.c
6096
.work = IRQ_WORK_INIT(free_deferred_objects),
mm/slub.c
6104
static void free_deferred_objects(struct irq_work *work)
mm/slub.c
6106
struct defer_free *df = container_of(work, struct defer_free, work);
mm/slub.c
6147
irq_work_queue(&df->work);
mm/slub.c
6155
irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
mm/swap.c
875
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
mm/swap.c
878
INIT_WORK(work, lru_add_drain_per_cpu);
mm/swap.c
879
queue_work_on(cpu, mm_percpu_wq, work);
mm/swapfile.c
1028
static void swap_reclaim_work(struct work_struct *work)
mm/swapfile.c
1032
si = container_of(work, struct swap_info_struct, reclaim_work);
mm/swapfile.c
649
static void swap_discard_work(struct work_struct *work)
mm/swapfile.c
653
si = container_of(work, struct swap_info_struct, discard_work);
mm/vmalloc.c
1068
static void drain_vmap_area_work(struct work_struct *work);
mm/vmalloc.c
2303
static void purge_vmap_node(struct work_struct *work)
mm/vmalloc.c
2305
struct vmap_node *vn = container_of(work,
mm/vmalloc.c
2433
static void drain_vmap_area_work(struct work_struct *work)
mm/vmalloc.c
3763
static void cleanup_vm_area_work(struct work_struct *work)
mm/vmpressure.c
180
static void vmpressure_work_fn(struct work_struct *work)
mm/vmpressure.c
182
struct vmpressure *vmpr = work_to_vmpressure(work);
mm/vmpressure.c
290
schedule_work(&vmpr->work);
mm/vmpressure.c
464
INIT_WORK(&vmpr->work, vmpressure_work_fn);
mm/vmpressure.c
480
flush_work(&vmpr->work);
mm/vmpressure.c
70
static struct vmpressure *work_to_vmpressure(struct work_struct *work)
mm/vmpressure.c
72
return container_of(work, struct vmpressure, work);
mm/vmstat.c
1970
static void refresh_vm_stats(struct work_struct *work)
mm/zsmalloc.c
1793
static void async_free_zspage(struct work_struct *work)
mm/zsmalloc.c
1799
struct zs_pool *pool = container_of(work, struct zs_pool,
mm/zswap.c
334
static void __zswap_pool_release(struct work_struct *work)
mm/zswap.c
336
struct zswap_pool *pool = container_of(work, typeof(*pool),
net/9p/trans_fd.c
1050
static void p9_poll_workfn(struct work_struct *work)
net/9p/trans_fd.c
113
static void p9_poll_workfn(struct work_struct *work);
net/9p/trans_fd.c
244
static void p9_read_work(struct work_struct *work)
net/9p/trans_fd.c
250
m = container_of(work, struct p9_conn, rq);
net/9p/trans_fd.c
410
static void p9_write_work(struct work_struct *work)
net/9p/trans_fd.c
417
m = container_of(work, struct p9_conn, wq);
net/9p/trans_xen.c
172
static void p9_xen_response(struct work_struct *work)
net/9p/trans_xen.c
181
ring = container_of(work, struct xen_9pfs_dataring, work);
net/9p/trans_xen.c
253
schedule_work(&ring->work);
net/9p/trans_xen.c
284
cancel_work_sync(&ring->work);
net/9p/trans_xen.c
339
INIT_WORK(&ring->work, p9_xen_response);
net/9p/trans_xen.c
48
struct work_struct work;
net/atm/lec.c
1252
static void lec_arp_check_expire(struct work_struct *work);
net/atm/lec.c
1688
static void lec_arp_check_expire(struct work_struct *work)
net/atm/lec.c
1692
container_of(work, struct lec_priv, lec_arp_work.work);
net/batman-adv/bat_iv_ogm.c
1683
static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
net/batman-adv/bat_iv_ogm.c
1690
delayed_work = to_delayed_work(work);
net/batman-adv/bat_iv_ogm.c
61
static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
net/batman-adv/bat_v_elp.c
290
static void batadv_v_elp_periodic_work(struct work_struct *work)
net/batman-adv/bat_v_elp.c
303
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
net/batman-adv/bat_v_ogm.c
363
static void batadv_v_ogm_send(struct work_struct *work)
net/batman-adv/bat_v_ogm.c
368
bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
net/batman-adv/bat_v_ogm.c
382
void batadv_v_ogm_aggr_work(struct work_struct *work)
net/batman-adv/bat_v_ogm.c
387
batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
net/batman-adv/bat_v_ogm.h
18
void batadv_v_ogm_aggr_work(struct work_struct *work);
net/batman-adv/bridge_loop_avoidance.c
1425
static void batadv_bla_periodic_work(struct work_struct *work)
net/batman-adv/bridge_loop_avoidance.c
1437
delayed_work = to_delayed_work(work);
net/batman-adv/bridge_loop_avoidance.c
1438
priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
net/batman-adv/bridge_loop_avoidance.c
1509
queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
net/batman-adv/bridge_loop_avoidance.c
1580
INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
net/batman-adv/bridge_loop_avoidance.c
1582
queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
net/batman-adv/bridge_loop_avoidance.c
1806
cancel_delayed_work_sync(&bat_priv->bla.work);
net/batman-adv/bridge_loop_avoidance.c
459
static void batadv_bla_loopdetect_report(struct work_struct *work)
net/batman-adv/bridge_loop_avoidance.c
465
backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
net/batman-adv/bridge_loop_avoidance.c
54
static void batadv_bla_periodic_work(struct work_struct *work);
net/batman-adv/distributed-arp-table.c
103
queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
net/batman-adv/distributed-arp-table.c
194
static void batadv_dat_purge(struct work_struct *work)
net/batman-adv/distributed-arp-table.c
200
delayed_work = to_delayed_work(work);
net/batman-adv/distributed-arp-table.c
201
priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
net/batman-adv/distributed-arp-table.c
818
INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
net/batman-adv/distributed-arp-table.c
837
cancel_delayed_work_sync(&bat_priv->dat.work);
net/batman-adv/distributed-arp-table.c
95
static void batadv_dat_purge(struct work_struct *work);
net/batman-adv/multicast.c
1928
INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
net/batman-adv/multicast.c
2164
cancel_delayed_work_sync(&bat_priv->mcast.work);
net/batman-adv/multicast.c
62
static void batadv_mcast_mla_update(struct work_struct *work);
net/batman-adv/multicast.c
70
queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
net/batman-adv/multicast.c
936
static void batadv_mcast_mla_update(struct work_struct *work)
net/batman-adv/multicast.c
942
delayed_work = to_delayed_work(work);
net/batman-adv/multicast.c
943
priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
net/batman-adv/originator.c
1300
static void batadv_purge_orig(struct work_struct *work)
net/batman-adv/originator.c
1305
delayed_work = to_delayed_work(work);
net/batman-adv/originator.c
83
static void batadv_purge_orig(struct work_struct *work);
net/batman-adv/send.c
1036
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
net/batman-adv/send.c
1045
delayed_work = to_delayed_work(work);
net/batman-adv/send.c
41
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
net/batman-adv/tp_meter.c
448
static void batadv_tp_sender_finish(struct work_struct *work)
net/batman-adv/tp_meter.c
453
delayed_work = to_delayed_work(work);
net/batman-adv/translation-table.c
3499
static void batadv_tt_purge(struct work_struct *work)
net/batman-adv/translation-table.c
3505
delayed_work = to_delayed_work(work);
net/batman-adv/translation-table.c
3506
priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
net/batman-adv/translation-table.c
3514
queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
net/batman-adv/translation-table.c
3529
cancel_delayed_work_sync(&bat_priv->tt.work);
net/batman-adv/translation-table.c
4125
INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
net/batman-adv/translation-table.c
4126
queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
net/batman-adv/translation-table.c
69
static void batadv_tt_purge(struct work_struct *work);
net/batman-adv/types.h
1013
struct delayed_work work;
net/batman-adv/types.h
1071
struct delayed_work work;
net/batman-adv/types.h
1147
struct delayed_work work;
net/batman-adv/types.h
1267
struct delayed_work work;
net/bluetooth/6lowpan.c
1075
struct work_struct work;
net/bluetooth/6lowpan.c
1079
static void do_enable_set(struct work_struct *work)
net/bluetooth/6lowpan.c
1081
struct set_enable *set_enable = container_of(work,
net/bluetooth/6lowpan.c
1082
struct set_enable, work);
net/bluetooth/6lowpan.c
1115
INIT_WORK(&set_enable->work, do_enable_set);
net/bluetooth/6lowpan.c
1117
schedule_work(&set_enable->work);
net/bluetooth/6lowpan.c
606
static void do_notify_peers(struct work_struct *work)
net/bluetooth/6lowpan.c
608
struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
net/bluetooth/6lowpan.c
609
notify_peers.work);
net/bluetooth/6lowpan.c
761
static void delete_netdev(struct work_struct *work)
net/bluetooth/6lowpan.c
763
struct lowpan_btle_dev *entry = container_of(work,
net/bluetooth/coredump.c
331
void hci_devcd_rx(struct work_struct *work)
net/bluetooth/coredump.c
333
struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
net/bluetooth/coredump.c
395
void hci_devcd_timeout(struct work_struct *work)
net/bluetooth/coredump.c
397
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/coredump.c
398
dump.dump_timeout.work);
net/bluetooth/hci_conn.c
559
static void hci_conn_timeout(struct work_struct *work)
net/bluetooth/hci_conn.c
561
struct hci_conn *conn = container_of(work, struct hci_conn,
net/bluetooth/hci_conn.c
562
disc_work.work);
net/bluetooth/hci_conn.c
583
static void hci_conn_idle(struct work_struct *work)
net/bluetooth/hci_conn.c
585
struct hci_conn *conn = container_of(work, struct hci_conn,
net/bluetooth/hci_conn.c
586
idle_work.work);
net/bluetooth/hci_conn.c
617
static void hci_conn_auto_accept(struct work_struct *work)
net/bluetooth/hci_conn.c
619
struct hci_conn *conn = container_of(work, struct hci_conn,
net/bluetooth/hci_conn.c
620
auto_accept_work.work);
net/bluetooth/hci_conn.c
643
static void le_conn_timeout(struct work_struct *work)
net/bluetooth/hci_conn.c
645
struct hci_conn *conn = container_of(work, struct hci_conn,
net/bluetooth/hci_conn.c
646
le_conn_timeout.work);
net/bluetooth/hci_core.c
1013
static void hci_power_off(struct work_struct *work)
net/bluetooth/hci_core.c
1015
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/hci_core.c
1016
power_off.work);
net/bluetooth/hci_core.c
1023
static void hci_error_reset(struct work_struct *work)
net/bluetooth/hci_core.c
1025
struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
net/bluetooth/hci_core.c
1462
static void hci_cmd_timeout(struct work_struct *work)
net/bluetooth/hci_core.c
1464
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/hci_core.c
1465
cmd_timer.work);
net/bluetooth/hci_core.c
1485
static void hci_ncmd_timeout(struct work_struct *work)
net/bluetooth/hci_core.c
1487
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/hci_core.c
1488
ncmd_timer.work);
net/bluetooth/hci_core.c
1691
static void adv_instance_rpa_expired(struct work_struct *work)
net/bluetooth/hci_core.c
1693
struct adv_info *adv_instance = container_of(work, struct adv_info,
net/bluetooth/hci_core.c
1694
rpa_expired_cb.work);
net/bluetooth/hci_core.c
3806
static void hci_tx_work(struct work_struct *work)
net/bluetooth/hci_core.c
3808
struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
net/bluetooth/hci_core.c
4027
static void hci_rx_work(struct work_struct *work)
net/bluetooth/hci_core.c
4029
struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
net/bluetooth/hci_core.c
4138
static void hci_cmd_work(struct work_struct *work)
net/bluetooth/hci_core.c
4140
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
net/bluetooth/hci_core.c
50
static void hci_rx_work(struct work_struct *work);
net/bluetooth/hci_core.c
51
static void hci_cmd_work(struct work_struct *work);
net/bluetooth/hci_core.c
52
static void hci_tx_work(struct work_struct *work);
net/bluetooth/hci_core.c
944
static void hci_power_on(struct work_struct *work)
net/bluetooth/hci_core.c
946
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
net/bluetooth/hci_sync.c
305
static void hci_cmd_sync_work(struct work_struct *work)
net/bluetooth/hci_sync.c
307
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
net/bluetooth/hci_sync.c
342
static void hci_cmd_sync_cancel_work(struct work_struct *work)
net/bluetooth/hci_sync.c
344
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
net/bluetooth/hci_sync.c
364
static void le_scan_disable(struct work_struct *work)
net/bluetooth/hci_sync.c
366
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/hci_sync.c
367
le_scan_disable.work);
net/bluetooth/hci_sync.c
447
static void reenable_adv(struct work_struct *work)
net/bluetooth/hci_sync.c
449
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/hci_sync.c
553
static void adv_timeout_expire(struct work_struct *work)
net/bluetooth/hci_sync.c
556
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/hci_sync.c
557
adv_instance_expire.work);
net/bluetooth/hci_sync.c
586
static void interleave_scan_work(struct work_struct *work)
net/bluetooth/hci_sync.c
588
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/hci_sync.c
589
interleave_scan.work);
net/bluetooth/hidp/core.c
888
static void hidp_session_dev_work(struct work_struct *work)
net/bluetooth/hidp/core.c
890
struct hidp_session *session = container_of(work,
net/bluetooth/iso.c
153
static void iso_sock_timeout(struct work_struct *work)
net/bluetooth/iso.c
155
struct iso_conn *conn = container_of(work, struct iso_conn,
net/bluetooth/iso.c
156
timeout_work.work);
net/bluetooth/l2cap_core.c
1676
static void l2cap_info_timeout(struct work_struct *work)
net/bluetooth/l2cap_core.c
1678
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
net/bluetooth/l2cap_core.c
1679
info_timer.work);
net/bluetooth/l2cap_core.c
1895
static void l2cap_monitor_timeout(struct work_struct *work)
net/bluetooth/l2cap_core.c
1897
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
net/bluetooth/l2cap_core.c
1898
monitor_timer.work);
net/bluetooth/l2cap_core.c
1916
static void l2cap_retrans_timeout(struct work_struct *work)
net/bluetooth/l2cap_core.c
1918
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
net/bluetooth/l2cap_core.c
1919
retrans_timer.work);
net/bluetooth/l2cap_core.c
3151
static void l2cap_ack_timeout(struct work_struct *work)
net/bluetooth/l2cap_core.c
3153
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
net/bluetooth/l2cap_core.c
3154
ack_timer.work);
net/bluetooth/l2cap_core.c
405
static void l2cap_chan_timeout(struct work_struct *work)
net/bluetooth/l2cap_core.c
407
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
net/bluetooth/l2cap_core.c
408
chan_timer.work);
net/bluetooth/l2cap_core.c
62
static void l2cap_retrans_timeout(struct work_struct *work);
net/bluetooth/l2cap_core.c
63
static void l2cap_monitor_timeout(struct work_struct *work);
net/bluetooth/l2cap_core.c
64
static void l2cap_ack_timeout(struct work_struct *work);
net/bluetooth/l2cap_core.c
6968
static void process_pending_rx(struct work_struct *work)
net/bluetooth/l2cap_core.c
6970
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
net/bluetooth/l2cap_core.c
739
static void l2cap_conn_update_id_addr(struct work_struct *work)
net/bluetooth/l2cap_core.c
741
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
net/bluetooth/l2cap_core.c
742
id_addr_timer.work);
net/bluetooth/mgmt.c
1015
static void service_cache_off(struct work_struct *work)
net/bluetooth/mgmt.c
1017
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/mgmt.c
1018
service_cache.work);
net/bluetooth/mgmt.c
1038
static void rpa_expired(struct work_struct *work)
net/bluetooth/mgmt.c
1040
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/mgmt.c
1041
rpa_expired.work);
net/bluetooth/mgmt.c
1055
static void discov_off(struct work_struct *work)
net/bluetooth/mgmt.c
1057
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/mgmt.c
1058
discov_off.work);
net/bluetooth/mgmt.c
1127
static void mesh_send_done(struct work_struct *work)
net/bluetooth/mgmt.c
1129
struct hci_dev *hdev = container_of(work, struct hci_dev,
net/bluetooth/mgmt.c
1130
mesh_send_done.work);
net/bluetooth/sco.c
141
static void sco_sock_timeout(struct work_struct *work)
net/bluetooth/sco.c
143
struct sco_conn *conn = container_of(work, struct sco_conn,
net/bluetooth/sco.c
144
timeout_work.work);
net/bluetooth/smp.c
1371
static void smp_timeout(struct work_struct *work)
net/bluetooth/smp.c
1373
struct smp_chan *smp = container_of(work, struct smp_chan,
net/bluetooth/smp.c
1374
security_timer.work);
net/bridge/br_cfm.c
267
static void ccm_tx_work_expired(struct work_struct *work)
net/bridge/br_cfm.c
274
del_work = to_delayed_work(work);
net/bridge/br_cfm.c
295
static void ccm_rx_work_expired(struct work_struct *work)
net/bridge/br_cfm.c
301
del_work = to_delayed_work(work);
net/bridge/br_fdb.c
539
void br_fdb_cleanup(struct work_struct *work)
net/bridge/br_fdb.c
541
struct net_bridge *br = container_of(work, struct net_bridge,
net/bridge/br_fdb.c
542
gc_work.work);
net/bridge/br_mrp.c
279
static void br_mrp_test_work_expired(struct work_struct *work)
net/bridge/br_mrp.c
281
struct delayed_work *del_work = to_delayed_work(work);
net/bridge/br_mrp.c
352
static void br_mrp_in_test_work_expired(struct work_struct *work)
net/bridge/br_mrp.c
354
struct delayed_work *del_work = to_delayed_work(work);
net/bridge/br_multicast.c
4075
static void br_multicast_gc_work(struct work_struct *work)
net/bridge/br_multicast.c
4077
struct net_bridge *br = container_of(work, struct net_bridge,
net/bridge/br_private.h
847
void br_fdb_cleanup(struct work_struct *work);
net/caif/chnl_net.c
119
static void close_work(struct work_struct *work)
net/ceph/crush/mapper.c
101
work->perm[i] = i;
net/ceph/crush/mapper.c
102
work->perm[work->perm[0]] = 0;
net/ceph/crush/mapper.c
103
work->perm_n = 1;
net/ceph/crush/mapper.c
107
for (i = 0; i < work->perm_n; i++)
net/ceph/crush/mapper.c
108
dprintk(" perm_choose have %d: %d\n", i, work->perm[i]);
net/ceph/crush/mapper.c
109
while (work->perm_n <= pr) {
net/ceph/crush/mapper.c
110
unsigned int p = work->perm_n;
net/ceph/crush/mapper.c
116
unsigned int t = work->perm[p + i];
net/ceph/crush/mapper.c
117
work->perm[p + i] = work->perm[p];
net/ceph/crush/mapper.c
118
work->perm[p] = t;
net/ceph/crush/mapper.c
122
work->perm_n++;
net/ceph/crush/mapper.c
125
dprintk(" perm_choose %d: %d\n", i, work->perm[i]);
net/ceph/crush/mapper.c
127
s = work->perm[pr];
net/ceph/crush/mapper.c
136
struct crush_work_bucket *work, int x, int r)
net/ceph/crush/mapper.c
138
return bucket_perm_choose(&bucket->h, work, x, r);
net/ceph/crush/mapper.c
377
struct crush_work_bucket *work,
net/ceph/crush/mapper.c
388
work, x, r);
net/ceph/crush/mapper.c
454
struct crush_work *work,
net/ceph/crush/mapper.c
514
in, work->work[-1-in->id],
net/ceph/crush/mapper.c
518
in, work->work[-1-in->id],
net/ceph/crush/mapper.c
567
work,
net/ceph/crush/mapper.c
647
struct crush_work *work,
net/ceph/crush/mapper.c
728
in, work->work[-1-in->id],
net/ceph/crush/mapper.c
75
struct crush_work_bucket *work,
net/ceph/crush/mapper.c
780
work,
net/ceph/crush/mapper.c
82
if (work->perm_x != (__u32)x || work->perm_n == 0) {
net/ceph/crush/mapper.c
84
work->perm_x = x;
net/ceph/crush/mapper.c
865
w->work = v;
net/ceph/crush/mapper.c
871
w->work[b] = v;
net/ceph/crush/mapper.c
877
w->work[b]->perm_x = 0;
net/ceph/crush/mapper.c
878
w->work[b]->perm_n = 0;
net/ceph/crush/mapper.c
879
w->work[b]->perm = v;
net/ceph/crush/mapper.c
90
work->perm[0] = s;
net/ceph/crush/mapper.c
91
work->perm_n = 0xffff; /* magic value, see below */
net/ceph/crush/mapper.c
96
work->perm[i] = i;
net/ceph/crush/mapper.c
97
work->perm_n = 0;
net/ceph/crush/mapper.c
98
} else if (work->perm_n == 0xffff) {
net/ceph/messenger.c
1446
if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
net/ceph/messenger.c
1462
if (cancel_delayed_work(&con->work)) {
net/ceph/messenger.c
1542
static void ceph_con_workfn(struct work_struct *work)
net/ceph/messenger.c
1544
struct ceph_connection *con = container_of(work, struct ceph_connection,
net/ceph/messenger.c
1545
work.work);
net/ceph/messenger.c
647
INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
net/ceph/mon_client.c
1083
static void delayed_work(struct work_struct *work)
net/ceph/mon_client.c
1086
container_of(work, struct ceph_mon_client, delayed_work.work);
net/ceph/osd_client.c
2529
static void complete_request_workfn(struct work_struct *work)
net/ceph/osd_client.c
2532
container_of(work, struct ceph_osd_request, r_complete_work);
net/ceph/osd_client.c
2925
struct work_struct work;
net/ceph/osd_client.c
2954
INIT_WORK(&lwork->work, workfn);
net/ceph/osd_client.c
2983
queue_work(osdc->notify_wq, &lwork->work);
net/ceph/osd_client.c
2988
struct linger_work *lwork = container_of(w, struct linger_work, work);
net/ceph/osd_client.c
3011
struct linger_work *lwork = container_of(w, struct linger_work, work);
net/ceph/osd_client.c
3429
static void handle_timeout(struct work_struct *work)
net/ceph/osd_client.c
3432
container_of(work, struct ceph_osd_client, timeout_work.work);
net/ceph/osd_client.c
3518
static void handle_osds_timeout(struct work_struct *work)
net/ceph/osd_client.c
3521
container_of(work, struct ceph_osd_client,
net/ceph/osd_client.c
3522
osds_timeout_work.work);
net/ceph/osdmap.c
1001
WARN_ON(!list_empty(&work->item));
net/ceph/osdmap.c
1002
kvfree(work);
net/ceph/osdmap.c
1015
struct crush_work *work)
net/ceph/osdmap.c
1019
list_add(&work->item, &wsm->idle_ws);
net/ceph/osdmap.c
1026
struct crush_work *work;
net/ceph/osdmap.c
1029
work = list_first_entry(&wsm->idle_ws, struct crush_work,
net/ceph/osdmap.c
1031
list_del_init(&work->item);
net/ceph/osdmap.c
1032
free_workspace(work);
net/ceph/osdmap.c
1045
struct crush_work *work;
net/ceph/osdmap.c
1051
work = list_first_entry(&wsm->idle_ws, struct crush_work,
net/ceph/osdmap.c
1053
list_del_init(&work->item);
net/ceph/osdmap.c
1056
return work;
net/ceph/osdmap.c
1072
work = alloc_workspace(c);
net/ceph/osdmap.c
1073
if (!work) {
net/ceph/osdmap.c
1085
return work;
net/ceph/osdmap.c
1093
struct crush_work *work)
net/ceph/osdmap.c
1097
list_add(&work->item, &wsm->idle_ws);
net/ceph/osdmap.c
1104
free_workspace(work);
net/ceph/osdmap.c
1254
struct crush_work *work;
net/ceph/osdmap.c
1259
work = alloc_workspace(crush);
net/ceph/osdmap.c
1260
if (!work) {
net/ceph/osdmap.c
1269
add_initial_workspace(&map->crush_wsm, work);
net/ceph/osdmap.c
2493
struct crush_work *work;
net/ceph/osdmap.c
2504
work = get_workspace(&map->crush_wsm, map->crush);
net/ceph/osdmap.c
2506
weight, weight_max, work,
net/ceph/osdmap.c
2508
put_workspace(&map->crush_wsm, work);
net/ceph/osdmap.c
983
struct crush_work *work;
net/ceph/osdmap.c
990
work = kvmalloc(work_size, GFP_NOIO);
net/ceph/osdmap.c
991
if (!work)
net/ceph/osdmap.c
994
INIT_LIST_HEAD(&work->item);
net/ceph/osdmap.c
995
crush_init_workspace(c, work);
net/ceph/osdmap.c
996
return work;
net/ceph/osdmap.c
999
static void free_workspace(struct crush_work *work)
net/core/dev.c
2369
static void netstamp_clear(struct work_struct *work)
net/core/dev.c
6472
static void flush_backlog(struct work_struct *work)
net/core/dev.c
6627
int work = 0;
net/core/dev.c
6647
if (++work >= quota) {
net/core/dev.c
6648
rps_input_queue_head_add(sd, work);
net/core/dev.c
6649
return work;
net/core/dev.c
6677
if (work)
net/core/dev.c
6678
rps_input_queue_head_add(sd, work);
net/core/dev.c
6679
return work;
net/core/dev.c
6933
int work = 0;
net/core/dev.c
6959
work = napi_poll(napi, budget);
net/core/dev.c
6960
trace_napi_poll(napi, work, budget);
net/core/dev.c
6963
if (work > 0)
net/core/dev.c
6965
LINUX_MIB_BUSYPOLLRXPACKETS, work);
net/core/dev.c
7697
int work, weight;
net/core/dev.c
7707
work = 0;
net/core/dev.c
7709
work = n->poll(n, weight);
net/core/dev.c
7710
trace_napi_poll(n, work, weight);
net/core/dev.c
7715
if (unlikely(work > weight))
net/core/dev.c
7717
n->poll, work, weight);
net/core/dev.c
7719
if (likely(work < weight))
net/core/dev.c
7720
return work;
net/core/dev.c
7729
return work;
net/core/dev.c
7736
if (napi_complete_done(n, work)) {
net/core/dev.c
7742
return work;
net/core/dev.c
7754
return work;
net/core/dev.c
7759
return work;
net/core/dev.c
7766
int work;
net/core/dev.c
7772
work = __napi_poll(n, &do_repoll);
net/core/dev.c
7784
return work;
net/core/drop_monitor.c
115
int work, int budget);
net/core/drop_monitor.c
116
void (*work_item_func)(struct work_struct *work);
net/core/drop_monitor.c
117
void (*hw_work_item_func)(struct work_struct *work);
net/core/drop_monitor.c
190
static void send_dm_alert(struct work_struct *work)
net/core/drop_monitor.c
195
data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
net/core/drop_monitor.c
275
int work, int budget)
net/core/drop_monitor.c
408
static void net_dm_hw_summary_work(struct work_struct *work)
net/core/drop_monitor.c
415
hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
net/core/drop_monitor.c
542
int work, int budget)
net/core/drop_monitor.c
725
static void net_dm_packet_work(struct work_struct *work)
net/core/drop_monitor.c
732
data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
net/core/drop_monitor.c
943
static void net_dm_hw_packet_work(struct work_struct *work)
net/core/drop_monitor.c
950
hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
net/core/neighbour.c
1664
static void neigh_managed_work(struct work_struct *work)
net/core/neighbour.c
1666
struct neigh_table *tbl = container_of(work, struct neigh_table,
net/core/neighbour.c
1667
managed_work.work);
net/core/neighbour.c
975
static void neigh_periodic_work(struct work_struct *work)
net/core/neighbour.c
977
struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
net/core/net_namespace.c
664
static void cleanup_net(struct work_struct *work)
net/core/netpoll.c
144
int work;
net/core/netpoll.c
156
work = napi->poll(napi, 0);
net/core/netpoll.c
157
WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
net/core/netpoll.c
158
trace_napi_poll(napi, work, 0);
net/core/netpoll.c
542
static void refill_skbs_work_handler(struct work_struct *work)
net/core/netpoll.c
545
container_of(work, struct netpoll, refill_wq);
net/core/netpoll.c
88
static void queue_process(struct work_struct *work)
net/core/netpoll.c
91
container_of(work, struct netpoll_info, tx_work.work);
net/core/skmsg.c
1068
schedule_delayed_work(&psock->work, 0);
net/core/skmsg.c
1100
schedule_delayed_work(&psock->work, 0);
net/core/skmsg.c
670
static void sk_psock_backlog(struct work_struct *work)
net/core/skmsg.c
672
struct delayed_work *dwork = to_delayed_work(work);
net/core/skmsg.c
673
struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
net/core/skmsg.c
727
schedule_delayed_work(&psock->work, 1);
net/core/skmsg.c
784
INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
net/core/skmsg.c
863
static void sk_psock_destroy(struct work_struct *work)
net/core/skmsg.c
865
struct sk_psock *psock = container_of(to_rcu_work(work),
net/core/skmsg.c
871
cancel_delayed_work_sync(&psock->work);
net/core/skmsg.c
990
schedule_delayed_work(&psock_other->work, 0);
net/core/sock_diag.c
116
struct work_struct work;
net/core/sock_diag.c
144
static void sock_diag_broadcast_destroy_work(struct work_struct *work)
net/core/sock_diag.c
147
container_of(work, struct broadcast_sk, work);
net/core/sock_diag.c
184
INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work);
net/core/sock_diag.c
185
queue_work(broadcast_wq, &bsk->work);
net/core/sock_map.c
1691
cancel_delayed_work_sync(&psock->work);
net/devlink/core.c
306
static void devlink_release(struct work_struct *work)
net/devlink/core.c
310
devlink = container_of(to_rcu_work(work), struct devlink, rwork);
net/devlink/core.c
70
static void devlink_rel_nested_in_notify_work(struct work_struct *work)
net/devlink/core.c
72
struct devlink_rel *rel = container_of(work, struct devlink_rel,
net/devlink/core.c
73
nested_in.notify_work.work);
net/devlink/port.c
973
static void devlink_port_type_warn(struct work_struct *work)
net/devlink/port.c
975
struct devlink_port *port = container_of(to_delayed_work(work),
net/dsa/dsa.c
42
bool dsa_schedule_work(struct work_struct *work)
net/dsa/dsa.c
44
return queue_work(dsa_owq, work);
net/dsa/dsa.h
19
bool dsa_schedule_work(struct work_struct *work);
net/dsa/tag_ksz.c
247
void (*xmit_work_fn)(struct kthread_work *work);
net/dsa/tag_ksz.c
265
kthread_init_work(&xmit_work->work, xmit_work_fn);
net/dsa/tag_ksz.c
272
kthread_queue_work(xmit_worker, &xmit_work->work);
net/dsa/tag_ocelot_8021q.c
29
void (*xmit_work_fn)(struct kthread_work *work);
net/dsa/tag_ocelot_8021q.c
51
kthread_init_work(&xmit_work->work, xmit_work_fn);
net/dsa/tag_ocelot_8021q.c
58
kthread_queue_work(xmit_worker, &xmit_work->work);
net/dsa/tag_sja1105.c
145
void (*xmit_work_fn)(struct kthread_work *work);
net/dsa/tag_sja1105.c
159
kthread_init_work(&xmit_work->work, xmit_work_fn);
net/dsa/tag_sja1105.c
166
kthread_queue_work(xmit_worker, &xmit_work->work);
net/dsa/user.c
154
INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work);
net/dsa/user.c
161
dsa_schedule_work(&standalone_work->work);
net/dsa/user.c
3629
static void dsa_user_switchdev_event_work(struct work_struct *work)
net/dsa/user.c
3632
container_of(work, struct dsa_switchdev_event_work, work);
net/dsa/user.c
37
struct work_struct work;
net/dsa/user.c
3750
INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work);
net/dsa/user.c
3759
dsa_schedule_work(&switchdev_work->work);
net/dsa/user.c
55
struct work_struct work;
net/dsa/user.c
82
static void dsa_user_standalone_event_work(struct work_struct *work)
net/dsa/user.c
85
container_of(work, struct dsa_standalone_event_work, work);
net/ethtool/module.c
196
struct ethtool_module_fw_flash *work;
net/ethtool/module.c
200
list_for_each_entry(work, &module_fw_flash_work_list, list) {
net/ethtool/module.c
201
if (work->fw_update.ntf_params.portid == info->snd_portid &&
net/ethtool/module.c
202
work->fw_update.dev == module_fw->fw_update.dev) {
net/ethtool/module.c
221
static void module_flash_fw_work(struct work_struct *work)
net/ethtool/module.c
225
module_fw = container_of(work, struct ethtool_module_fw_flash, work);
net/ethtool/module.c
269
INIT_WORK(&module_fw->work, module_flash_fw_work);
net/ethtool/module.c
282
struct ethtool_module_fw_flash *work;
net/ethtool/module.c
285
list_for_each_entry(work, &module_fw_flash_work_list, list) {
net/ethtool/module.c
286
if (work->fw_update.dev == sk_priv->dev &&
net/ethtool/module.c
287
work->fw_update.ntf_params.portid == sk_priv->portid) {
net/ethtool/module.c
288
work->fw_update.ntf_params.closed_sock = true;
net/ethtool/module.c
338
schedule_work(&module_fw->work);
net/ethtool/module_fw.h
54
struct work_struct work;
net/ipv4/devinet.c
709
static void check_lifetime(struct work_struct *work)
net/ipv4/devinet.c
717
net = container_of(to_delayed_work(work), struct net, ipv4.addr_chk_work);
net/ipv4/inet_fragment.c
153
static void fqdir_free_fn(struct work_struct *work)
net/ipv4/inet_fragment.c
179
static void fqdir_work_fn(struct work_struct *work)
net/ipv4/inet_fragment.c
181
struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
net/ipv4/nexthop.c
1887
static void nh_res_table_upkeep_dw(struct work_struct *work)
net/ipv4/nexthop.c
1889
struct delayed_work *dw = to_delayed_work(work);
net/ipv4/nexthop.c
558
static void nh_res_table_upkeep_dw(struct work_struct *work);
net/ipv4/tcp_output.c
1253
struct work_struct work;
net/ipv4/tcp_output.c
1291
static void tcp_tsq_workfn(struct work_struct *work)
net/ipv4/tcp_output.c
1293
struct tsq_work *tsq = container_of(work, struct tsq_work, work);
net/ipv4/tcp_output.c
1371
INIT_WORK(&tsq->work, tcp_tsq_workfn);
net/ipv4/tcp_output.c
1417
queue_work(system_bh_wq, &tsq->work);
net/ipv4/tcp_sigpool.c
207
static void cpool_cleanup_work_cb(struct work_struct *work)
net/ipv4/udp_tunnel_nic.c
307
queue_work(udp_tunnel_nic_workqueue, &utn->work);
net/ipv4/udp_tunnel_nic.c
41
struct work_struct work;
net/ipv4/udp_tunnel_nic.c
731
static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
net/ipv4/udp_tunnel_nic.c
734
container_of(work, struct udp_tunnel_nic, work);
net/ipv4/udp_tunnel_nic.c
760
INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
net/ipv6/mcast.c
1538
static void mld_query_work(struct work_struct *work)
net/ipv6/mcast.c
1540
struct inet6_dev *idev = container_of(to_delayed_work(work),
net/ipv6/mcast.c
1642
static void mld_report_work(struct work_struct *work)
net/ipv6/mcast.c
1644
struct inet6_dev *idev = container_of(to_delayed_work(work),
net/ipv6/mcast.c
2288
static void mld_dad_work(struct work_struct *work)
net/ipv6/mcast.c
2290
struct inet6_dev *idev = container_of(to_delayed_work(work),
net/ipv6/mcast.c
2672
static void mld_gq_work(struct work_struct *work)
net/ipv6/mcast.c
2674
struct inet6_dev *idev = container_of(to_delayed_work(work),
net/ipv6/mcast.c
2686
static void mld_ifc_work(struct work_struct *work)
net/ipv6/mcast.c
2688
struct inet6_dev *idev = container_of(to_delayed_work(work),
net/ipv6/mcast.c
2716
static void mld_mca_work(struct work_struct *work)
net/ipv6/mcast.c
2718
struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
net/ipv6/mcast.c
78
static void mld_mca_work(struct work_struct *work);
net/ipv6/route.c
632
struct work_struct work;
net/ipv6/route.c
641
struct __rt6_probe_work *work =
net/ipv6/route.c
642
container_of(w, struct __rt6_probe_work, work);
net/ipv6/route.c
644
addrconf_addr_solict_mult(&work->target, &mcaddr);
net/ipv6/route.c
645
ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
net/ipv6/route.c
646
netdev_put(work->dev, &work->dev_tracker);
net/ipv6/route.c
647
kfree(work);
net/ipv6/route.c
652
struct __rt6_probe_work *work = NULL;
net/ipv6/route.c
687
work = kmalloc_obj(*work, GFP_ATOMIC);
net/ipv6/route.c
688
if (work)
net/ipv6/route.c
694
work = kmalloc_obj(*work, GFP_ATOMIC);
net/ipv6/route.c
697
if (!work || cmpxchg(&fib6_nh->last_probe,
net/ipv6/route.c
699
kfree(work);
net/ipv6/route.c
701
INIT_WORK(&work->work, rt6_probe_deferred);
net/ipv6/route.c
702
work->target = *nh_gw;
net/ipv6/route.c
703
netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
net/ipv6/route.c
704
work->dev = dev;
net/ipv6/route.c
705
schedule_work(&work->work);
net/iucv/iucv.c
156
static void iucv_work_fn(struct work_struct *work);
net/iucv/iucv.c
1768
static void iucv_work_fn(struct work_struct *work)
net/iucv/iucv.c
1801
struct iucv_irq_list *work;
net/iucv/iucv.c
1811
work = kmalloc_obj(struct iucv_irq_list, GFP_ATOMIC);
net/iucv/iucv.c
1812
if (!work) {
net/iucv/iucv.c
1816
memcpy(&work->data, p, sizeof(work->data));
net/iucv/iucv.c
1820
list_add_tail(&work->list, &iucv_work_queue);
net/iucv/iucv.c
1824
list_add_tail(&work->list, &iucv_task_queue);
net/l2tp/l2tp_core.c
1415
static void l2tp_tunnel_del_work(struct work_struct *work)
net/l2tp/l2tp_core.c
1417
struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
net/l2tp/l2tp_core.c
1739
static void l2tp_session_del_work(struct work_struct *work)
net/l2tp/l2tp_core.c
1741
struct l2tp_session *session = container_of(work, struct l2tp_session,
net/mac80211/agg-rx.c
135
wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/agg-rx.c
162
wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/agg-rx.c
515
wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/agg-rx.c
533
wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/agg-tx.c
743
wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/agg-tx.c
862
wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/agg-tx.c
902
wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/agg-tx.c
962
wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/cfg.c
4193
void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/cfg.c
4196
container_of(work, struct ieee80211_link_data, csa.finalize_work);
net/mac80211/cfg.c
5222
struct wiphy_work *work)
net/mac80211/cfg.c
5225
container_of(work, struct ieee80211_link_data,
net/mac80211/cfg.c
5244
struct wiphy_work *work)
net/mac80211/cfg.c
5247
container_of(work, struct ieee80211_link_data,
net/mac80211/cfg.c
5248
color_collision_detect_work.work);
net/mac80211/ht.c
337
wiphy_work_cancel(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
net/mac80211/ht.c
352
void ieee80211_ba_session_work(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/ht.c
355
container_of(work, struct sta_info, ampdu_mlme.work);
net/mac80211/ht.c
417
wiphy_work_queue(sdata->local->hw.wiphy, work);
net/mac80211/ibss.c
1217
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/ibss.c
1682
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/ibss.c
1808
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/ibss.c
713
struct wiphy_work *work)
net/mac80211/ibss.c
716
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/ibss.c
724
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/ieee80211_i.h
1162
struct wiphy_work work;
net/mac80211/ieee80211_i.h
2024
void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2057
struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2078
void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2084
struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2086
struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2251
void ieee80211_ba_session_work(struct wiphy *wiphy, struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2491
struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2493
struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2783
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
net/mac80211/ieee80211_i.h
2787
struct wiphy_work *work);
net/mac80211/iface.c
1251
wiphy_work_init(&sdata->work, ieee80211_iface_work);
net/mac80211/iface.c
1779
static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/iface.c
1782
container_of(work, struct ieee80211_sub_if_data, work);
net/mac80211/iface.c
1840
struct wiphy_work *work)
net/mac80211/iface.c
1843
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/iface.c
1889
wiphy_work_init(&sdata->work, ieee80211_iface_work);
net/mac80211/iface.c
45
static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work);
net/mac80211/iface.c
636
wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/iface.c
866
if (WARN_ON(!list_empty(&sdata->work.entry)))
net/mac80211/iface.c
867
wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/key.c
1220
dec_tailroom_needed_wk.work);
net/mac80211/main.c
474
static void ieee80211_restart_work(struct work_struct *work)
net/mac80211/main.c
477
container_of(work, struct ieee80211_local, restart_work);
net/mac80211/main.c
88
struct wiphy_work *work)
net/mac80211/main.c
91
container_of(work, struct ieee80211_local, reconfig_filter);
net/mac80211/mesh.c
1174
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/mesh.c
1199
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/mesh.c
49
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/mesh.c
695
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/mesh.c
706
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/mesh_hwmp.c
1042
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/mesh_hwmp.c
1049
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/mlme.c
2511
struct wiphy_work *work)
net/mac80211/mlme.c
2514
container_of(work, struct ieee80211_link_data,
net/mac80211/mlme.c
2515
u.mgd.csa.switch_work.work);
net/mac80211/mlme.c
3494
struct wiphy_work *work)
net/mac80211/mlme.c
3497
container_of(work, struct ieee80211_local,
net/mac80211/mlme.c
3512
struct wiphy_work *work)
net/mac80211/mlme.c
3515
container_of(work, struct ieee80211_local,
net/mac80211/mlme.c
3589
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/mlme.c
3592
container_of(work, struct ieee80211_link_data,
net/mac80211/mlme.c
3593
dfs_cac_timer_work.work);
net/mac80211/mlme.c
3696
struct wiphy_work *work)
net/mac80211/mlme.c
3700
sdata = container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
3701
u.mgd.tx_tspec_wk.work);
net/mac80211/mlme.c
4459
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/mlme.c
4692
struct wiphy_work *work)
net/mac80211/mlme.c
4695
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
4718
struct wiphy_work *work)
net/mac80211/mlme.c
4721
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
6998
struct wiphy_work *work)
net/mac80211/mlme.c
7001
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
7002
u.mgd.ml_reconf_work.work);
net/mac80211/mlme.c
7206
struct wiphy_work *work)
net/mac80211/mlme.c
7210
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
7211
u.mgd.ttlm_work.work);
net/mac80211/mlme.c
7837
struct wiphy_work *work)
net/mac80211/mlme.c
7840
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
7841
u.mgd.neg_ttlm_timeout_work.work);
net/mac80211/mlme.c
8200
struct wiphy_work *work)
net/mac80211/mlme.c
8203
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
8391
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/mlme.c
8546
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/mlme.c
8807
struct wiphy_work *work)
net/mac80211/mlme.c
8810
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
8918
struct wiphy_work *work)
net/mac80211/mlme.c
8921
container_of(work, struct ieee80211_link_data,
net/mac80211/mlme.c
8929
struct wiphy_work *work)
net/mac80211/mlme.c
8932
container_of(work, struct ieee80211_sub_if_data,
net/mac80211/mlme.c
8933
u.mgd.reconf.wk.work);
net/mac80211/mlme.c
8987
struct wiphy_work *work)
net/mac80211/mlme.c
8990
container_of(work, struct ieee80211_link_data,
net/mac80211/ocb.c
158
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/ocb.c
199
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/ocb.c
84
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/offchannel.c
261
static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/offchannel.c
264
container_of(work, struct ieee80211_local, hw_roc_start);
net/mac80211/offchannel.c
488
static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/offchannel.c
491
container_of(work, struct ieee80211_local, roc_work.work);
net/mac80211/offchannel.c
498
static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/offchannel.c
501
container_of(work, struct ieee80211_local, hw_roc_done);
net/mac80211/rx.c
239
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/scan.c
1095
void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
net/mac80211/scan.c
1098
container_of(work, struct ieee80211_local, scan_work.work);
net/mac80211/scan.c
1457
struct wiphy_work *work)
net/mac80211/scan.c
1460
container_of(work, struct ieee80211_local,
net/mac80211/scan.c
533
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
net/mac80211/sta_info.c
644
wiphy_work_init(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
net/mac80211/sta_info.h
291
struct wiphy_work work;
net/mac80211/status.c
783
&sdata->work);
net/mac80211/tdls.c
30
u.mgd.tdls_peer_del_work.work);
net/mac80211/util.c
2249
wiphy_work_queue(local->hw.wiphy, &sdata->work);
net/mac80211/util.c
3621
struct wiphy_work *work)
net/mac80211/util.c
3624
container_of(work, struct ieee80211_local, radar_detected_work);
net/mac80211/util.c
945
void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work)
net/mac80211/util.c
952
queue_work(local->workqueue, work);
net/mac802154/ieee802154_i.h
187
void ieee802154_xmit_sync_worker(struct work_struct *work);
net/mac802154/ieee802154_i.h
276
void mac802154_scan_worker(struct work_struct *work);
net/mac802154/ieee802154_i.h
284
void mac802154_rx_beacon_worker(struct work_struct *work);
net/mac802154/ieee802154_i.h
291
void mac802154_beacon_worker(struct work_struct *work);
net/mac802154/ieee802154_i.h
302
void mac802154_rx_mac_cmd_worker(struct work_struct *work);
net/mac802154/rx.c
32
void mac802154_rx_beacon_worker(struct work_struct *work)
net/mac802154/rx.c
35
container_of(work, struct ieee802154_local, rx_beacon_work);
net/mac802154/rx.c
71
void mac802154_rx_mac_cmd_worker(struct work_struct *work)
net/mac802154/rx.c
74
container_of(work, struct ieee802154_local, rx_mac_cmd_work);
net/mac802154/scan.c
174
void mac802154_scan_worker(struct work_struct *work)
net/mac802154/scan.c
177
container_of(work, struct ieee802154_local, scan_work.work);
net/mac802154/scan.c
401
void mac802154_beacon_worker(struct work_struct *work)
net/mac802154/scan.c
404
container_of(work, struct ieee802154_local, beacon_work.work);
net/mac802154/tx.c
25
void ieee802154_xmit_sync_worker(struct work_struct *work)
net/mac802154/tx.c
28
container_of(work, struct ieee802154_local, sync_tx_work);
net/mptcp/protocol.c
2940
static void mptcp_worker(struct work_struct *work)
net/mptcp/protocol.c
2942
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
net/mptcp/protocol.c
2997
INIT_WORK(&msk->work, mptcp_worker);
net/mptcp/protocol.c
3085
if (cancel_work_sync(&msk->work))
net/mptcp/protocol.c
978
if (schedule_work(&mptcp_sk(sk)->work))
net/mptcp/protocol.h
330
struct work_struct work;
net/ncsi/internal.h
339
struct work_struct work; /* For channel management */
net/ncsi/ncsi-manage.c
1055
schedule_work(&ndp->work);
net/ncsi/ncsi-manage.c
1089
schedule_work(&ndp->work);
net/ncsi/ncsi-manage.c
1099
schedule_work(&ndp->work);
net/ncsi/ncsi-manage.c
1411
schedule_work(&ndp->work);
net/ncsi/ncsi-manage.c
1419
schedule_work(&ndp->work);
net/ncsi/ncsi-manage.c
1534
static void ncsi_dev_work(struct work_struct *work)
net/ncsi/ncsi-manage.c
1536
struct ncsi_dev_priv *ndp = container_of(work,
net/ncsi/ncsi-manage.c
1537
struct ncsi_dev_priv, work);
net/ncsi/ncsi-manage.c
1781
INIT_WORK(&ndp->work, ncsi_dev_work);
net/ncsi/ncsi-manage.c
1829
schedule_work(&ndp->work);
net/ncsi/ncsi-manage.c
1951
schedule_work(&ndp->work);
net/ncsi/ncsi-manage.c
1970
disable_work_sync(&ndp->work);
net/ncsi/ncsi-manage.c
412
schedule_work(&ndp->work);
net/netfilter/ipset/ip_set_hash_gen.h
557
mtype_gc(struct work_struct *work)
net/netfilter/ipset/ip_set_hash_gen.h
566
gc = container_of(work, struct htable_gc, dwork.work);
net/netfilter/ipvs/ip_vs_ctl.c
215
static void expire_nodest_conn_handler(struct work_struct *work)
net/netfilter/ipvs/ip_vs_ctl.c
219
ipvs = container_of(work, struct netns_ipvs,
net/netfilter/ipvs/ip_vs_ctl.c
220
expire_nodest_conn_work.work);
net/netfilter/ipvs/ip_vs_ctl.c
229
static void defense_work_handler(struct work_struct *work)
net/netfilter/ipvs/ip_vs_ctl.c
232
container_of(work, struct netns_ipvs, defense_work.work);
net/netfilter/ipvs/ip_vs_ctl.c
242
static void est_reload_work_handler(struct work_struct *work)
net/netfilter/ipvs/ip_vs_ctl.c
245
container_of(work, struct netns_ipvs, est_reload_work.work);
net/netfilter/ipvs/ip_vs_ctl.c
4398
cancel_work_sync(&ipvs->defense_work.work);
net/netfilter/ipvs/ip_vs_sync.c
1618
static void master_wakeup_work_handler(struct work_struct *work)
net/netfilter/ipvs/ip_vs_sync.c
1621
container_of(work, struct ipvs_master_sync_state,
net/netfilter/ipvs/ip_vs_sync.c
1622
master_wakeup_work.work);
net/netfilter/nf_conncount.c
550
static void tree_gc_worker(struct work_struct *work)
net/netfilter/nf_conncount.c
552
struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
net/netfilter/nf_conncount.c
602
schedule_work(work);
net/netfilter/nf_conntrack_core.c
1513
static void gc_worker(struct work_struct *work)
net/netfilter/nf_conntrack_core.c
1523
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
net/netfilter/nf_conntrack_ecache.c
106
static void ecache_work(struct work_struct *work)
net/netfilter/nf_conntrack_ecache.c
108
struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work);
net/netfilter/nf_flow_table_core.c
595
static void nf_flow_offload_work_gc(struct work_struct *work)
net/netfilter/nf_flow_table_core.c
599
flow_table = container_of(work, struct nf_flowtable, gc_work.work);
net/netfilter/nf_flow_table_offload.c
1036
static void flow_offload_work_handler(struct work_struct *work)
net/netfilter/nf_flow_table_offload.c
1041
offload = container_of(work, struct flow_offload_work, work);
net/netfilter/nf_flow_table_offload.c
1070
queue_work(nf_flow_offload_add_wq, &offload->work);
net/netfilter/nf_flow_table_offload.c
1073
queue_work(nf_flow_offload_del_wq, &offload->work);
net/netfilter/nf_flow_table_offload.c
1076
queue_work(nf_flow_offload_stats_wq, &offload->work);
net/netfilter/nf_flow_table_offload.c
1098
INIT_WORK(&offload->work, flow_offload_work_handler);
net/netfilter/nf_flow_table_offload.c
28
struct work_struct work;
net/netfilter/nf_nat_masquerade.c
123
INIT_WORK(&w->work, iterate_cleanup_work);
net/netfilter/nf_nat_masquerade.c
13
struct work_struct work;
net/netfilter/nf_nat_masquerade.c
130
schedule_work(&w->work);
net/netfilter/nf_nat_masquerade.c
78
static void iterate_cleanup_work(struct work_struct *work)
net/netfilter/nf_nat_masquerade.c
83
w = container_of(work, struct masq_dev_work, work);
net/netfilter/nf_tables_api.c
10426
static void nft_trans_gc_work(struct work_struct *work)
net/netfilter/nf_tables_api.c
152
static void nft_trans_gc_work(struct work_struct *work);
net/netfilter/nft_set_hash.c
410
static void nft_rhash_gc(struct work_struct *work)
net/netfilter/nft_set_hash.c
421
priv = container_of(work, struct nft_rhash, gc_work.work);
net/netfilter/xt_IDLETIMER.c
107
schedule_work(&timer->work);
net/netfilter/xt_IDLETIMER.c
115
schedule_work(&timer->work);
net/netfilter/xt_IDLETIMER.c
168
INIT_WORK(&info->timer->work, idletimer_tg_work);
net/netfilter/xt_IDLETIMER.c
220
INIT_WORK(&info->timer->work, idletimer_tg_work);
net/netfilter/xt_IDLETIMER.c
36
struct work_struct work;
net/netfilter/xt_IDLETIMER.c
429
cancel_work_sync(&info->timer->work);
net/netfilter/xt_IDLETIMER.c
460
cancel_work_sync(&info->timer->work);
net/netfilter/xt_IDLETIMER.c
93
static void idletimer_tg_work(struct work_struct *work)
net/netfilter/xt_IDLETIMER.c
95
struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
net/netfilter/xt_IDLETIMER.c
96
work);
net/netfilter/xt_hashlimit.c
272
static void htable_gc(struct work_struct *work);
net/netfilter/xt_hashlimit.c
381
static void htable_gc(struct work_struct *work)
net/netfilter/xt_hashlimit.c
385
ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
net/nfc/core.c
983
static void nfc_check_pres_work(struct work_struct *work)
net/nfc/core.c
985
struct nfc_dev *dev = container_of(work, struct nfc_dev,
net/nfc/digital_core.c
110
static void digital_wq_cmd_complete(struct work_struct *work)
net/nfc/digital_core.c
113
struct nfc_digital_dev *ddev = container_of(work,
net/nfc/digital_core.c
152
static void digital_wq_cmd(struct work_struct *work)
net/nfc/digital_core.c
157
struct nfc_digital_dev *ddev = container_of(work,
net/nfc/digital_core.c
427
static void digital_wq_poll(struct work_struct *work)
net/nfc/digital_core.c
431
struct nfc_digital_dev *ddev = container_of(work,
net/nfc/digital_core.c
433
poll_work.work);
net/nfc/hci/core.c
126
static void nfc_hci_msg_rx_work(struct work_struct *work)
net/nfc/hci/core.c
128
struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
net/nfc/hci/core.c
63
static void nfc_hci_msg_tx_work(struct work_struct *work)
net/nfc/hci/core.c
65
struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
net/nfc/hci/llc_shdlc.c
590
static void llc_shdlc_sm_work(struct work_struct *work)
net/nfc/hci/llc_shdlc.c
592
struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
net/nfc/llcp_core.c
1517
static void nfc_llcp_rx_work(struct work_struct *work)
net/nfc/llcp_core.c
1519
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
net/nfc/llcp_core.c
236
static void nfc_llcp_timeout_work(struct work_struct *work)
net/nfc/llcp_core.c
238
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
net/nfc/llcp_core.c
254
static void nfc_llcp_sdreq_timeout_work(struct work_struct *work)
net/nfc/llcp_core.c
260
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
net/nfc/llcp_core.c
761
static void nfc_llcp_tx_work(struct work_struct *work)
net/nfc/llcp_core.c
763
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
net/nfc/nci/core.c
1520
static void nci_tx_work(struct work_struct *work)
net/nfc/nci/core.c
1522
struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
net/nfc/nci/core.c
1559
static void nci_rx_work(struct work_struct *work)
net/nfc/nci/core.c
1561
struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
net/nfc/nci/core.c
1611
static void nci_cmd_work(struct work_struct *work)
net/nfc/nci/core.c
1613
struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
net/nfc/nci/core.c
39
static void nci_cmd_work(struct work_struct *work);
net/nfc/nci/core.c
40
static void nci_rx_work(struct work_struct *work);
net/nfc/nci/core.c
41
static void nci_tx_work(struct work_struct *work);
net/nfc/nci/hci.c
405
static void nci_hci_msg_rx_work(struct work_struct *work)
net/nfc/nci/hci.c
408
container_of(work, struct nci_hci_dev, msg_rx_work);
net/nfc/nci/uart.c
71
static void nci_uart_write_work(struct work_struct *work)
net/nfc/nci/uart.c
73
struct nci_uart *nu = container_of(work, struct nci_uart, write_work);
net/nfc/netlink.c
1834
static void nfc_urelease_event_work(struct work_struct *work)
net/nfc/netlink.c
1836
struct urelease_work *w = container_of(work, struct urelease_work, w);
net/nfc/rawsock.c
188
static void rawsock_tx_work(struct work_struct *work)
net/nfc/rawsock.c
190
struct sock *sk = to_rawsock_sk(work);
net/openvswitch/datapath.c
136
static void ovs_dp_masks_rebalance(struct work_struct *work);
net/openvswitch/datapath.c
2553
static void ovs_dp_masks_rebalance(struct work_struct *work)
net/openvswitch/datapath.c
2555
struct ovs_net *ovs_net = container_of(work, struct ovs_net,
net/openvswitch/datapath.c
2556
masks_rebalance.work);
net/openvswitch/datapath.h
335
void ovs_dp_notify_wq(struct work_struct *work);
net/openvswitch/dp_notify.c
34
void ovs_dp_notify_wq(struct work_struct *work)
net/openvswitch/dp_notify.c
36
struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work);
net/psp/psp_sock.c
110
INIT_WORK(&pas->work, psp_assoc_free);
net/psp/psp_sock.c
111
schedule_work(&pas->work);
net/psp/psp_sock.c
93
static void psp_assoc_free(struct work_struct *work)
net/psp/psp_sock.c
95
struct psp_assoc *pas = container_of(work, struct psp_assoc, work);
net/qrtr/ns.c
26
struct work_struct work;
net/qrtr/ns.c
588
static void qrtr_ns_worker(struct work_struct *work)
net/qrtr/ns.c
684
queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
net/qrtr/ns.c
693
INIT_WORK(&qrtr_ns.work, qrtr_ns_worker);
net/qrtr/ns.c
761
cancel_work_sync(&qrtr_ns.work);
net/rds/ib.c
101
struct rds_ib_device *rds_ibdev = container_of(work,
net/rds/ib.c
98
static void rds_ib_dev_free(struct work_struct *work)
net/rds/ib_mr.h
66
struct delayed_work work;
net/rds/ib_rdma.c
44
static void rds_ib_odp_mr_worker(struct work_struct *work);
net/rds/ib_rdma.c
473
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
net/rds/ib_rdma.c
475
struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
net/rds/ib_rdma.c
493
INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
net/rds/ib_rdma.c
494
queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
net/rds/ib_rdma.c
696
static void rds_ib_odp_mr_worker(struct work_struct *work)
net/rds/ib_rdma.c
700
ibmr = container_of(work, struct rds_ib_mr, work.work);
net/rds/tcp.c
524
static void rds_tcp_accept_worker(struct work_struct *work)
net/rds/tcp.c
526
struct rds_tcp_net *rtn = container_of(work,
net/rds/threads.c
162
void rds_connect_worker(struct work_struct *work)
net/rds/threads.c
164
struct rds_conn_path *cp = container_of(work,
net/rds/threads.c
166
cp_conn_w.work);
net/rds/threads.c
191
void rds_send_worker(struct work_struct *work)
net/rds/threads.c
193
struct rds_conn_path *cp = container_of(work,
net/rds/threads.c
195
cp_send_w.work);
net/rds/threads.c
218
void rds_recv_worker(struct work_struct *work)
net/rds/threads.c
220
struct rds_conn_path *cp = container_of(work,
net/rds/threads.c
222
cp_recv_w.work);
net/rds/threads.c
243
void rds_shutdown_worker(struct work_struct *work)
net/rds/threads.c
245
struct rds_conn_path *cp = container_of(work,
net/rfkill/core.c
1022
static void rfkill_poll(struct work_struct *work)
net/rfkill/core.c
1026
rfkill = container_of(work, struct rfkill, poll_work.work);
net/rfkill/core.c
1040
static void rfkill_uevent_work(struct work_struct *work)
net/rfkill/core.c
1044
rfkill = container_of(work, struct rfkill, uevent_work);
net/rfkill/core.c
1051
static void rfkill_sync_work(struct work_struct *work)
net/rfkill/core.c
1053
struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
net/rfkill/core.c
177
static void rfkill_global_led_trigger_worker(struct work_struct *work)
net/rfkill/input.c
94
static void rfkill_op_handler(struct work_struct *work)
net/rxrpc/call_object.c
683
static void rxrpc_destroy_call(struct work_struct *work)
net/rxrpc/call_object.c
685
struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer);
net/rxrpc/conn_event.c
378
void rxrpc_process_connection(struct work_struct *work)
net/rxrpc/conn_event.c
381
container_of(work, struct rxrpc_connection, processor);
net/rxrpc/conn_object.c
22
static void rxrpc_clean_up_connection(struct work_struct *work);
net/rxrpc/conn_object.c
306
static void rxrpc_clean_up_connection(struct work_struct *work)
net/rxrpc/conn_object.c
309
container_of(work, struct rxrpc_connection, destructor);
net/rxrpc/conn_object.c
387
void rxrpc_service_connection_reaper(struct work_struct *work)
net/rxrpc/conn_object.c
391
container_of(work, struct rxrpc_net, service_conn_reaper);
net/rxrpc/peer_event.c
316
void rxrpc_peer_keepalive_worker(struct work_struct *work)
net/rxrpc/peer_event.c
319
container_of(work, struct rxrpc_net, peer_keepalive_work);
net/rxrpc/rxperf.c
113
queue_work(rxperf_workqueue, &call->work);
net/rxrpc/rxperf.c
149
static void rxperf_charge_preallocation(struct work_struct *work)
net/rxrpc/rxperf.c
167
INIT_WORK(&call->work, rxperf_deliver_to_call);
net/rxrpc/rxperf.c
280
static void rxperf_deliver_to_call(struct work_struct *work)
net/rxrpc/rxperf.c
282
struct rxperf_call *call = container_of(work, struct rxperf_call, work);
net/rxrpc/rxperf.c
352
cancel_work(&call->work);
net/rxrpc/rxperf.c
52
struct work_struct work;
net/rxrpc/rxperf.c
67
void (*processor)(struct work_struct *work);
net/rxrpc/rxperf.c
74
static void rxperf_deliver_to_call(struct work_struct *work);
net/rxrpc/rxperf.c
78
static void rxperf_charge_preallocation(struct work_struct *work);
net/sched/act_ct.c
375
static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
net/sched/act_ct.c
380
ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
net/sched/cls_basic.c
100
struct basic_filter *f = container_of(to_rcu_work(work),
net/sched/cls_basic.c
98
static void basic_delete_filter_work(struct work_struct *work)
net/sched/cls_bpf.c
276
static void cls_bpf_delete_prog_work(struct work_struct *work)
net/sched/cls_bpf.c
278
struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
net/sched/cls_cgroup.c
68
static void cls_cgroup_destroy_work(struct work_struct *work)
net/sched/cls_cgroup.c
70
struct cls_cgroup_head *head = container_of(to_rcu_work(work),
net/sched/cls_flow.c
380
static void flow_destroy_filter_work(struct work_struct *work)
net/sched/cls_flow.c
382
struct flow_filter *f = container_of(to_rcu_work(work),
net/sched/cls_flower.c
389
static void fl_mask_free_work(struct work_struct *work)
net/sched/cls_flower.c
391
struct fl_flow_mask *mask = container_of(to_rcu_work(work),
net/sched/cls_flower.c
397
static void fl_uninit_mask_free_work(struct work_struct *work)
net/sched/cls_flower.c
399
struct fl_flow_mask *mask = container_of(to_rcu_work(work),
net/sched/cls_flower.c
440
static void fl_destroy_filter_work(struct work_struct *work)
net/sched/cls_flower.c
442
struct cls_fl_filter *f = container_of(to_rcu_work(work),
net/sched/cls_flower.c
588
static void fl_destroy_sleepable(struct work_struct *work)
net/sched/cls_flower.c
590
struct cls_fl_head *head = container_of(to_rcu_work(work),
net/sched/cls_fw.c
122
static void fw_delete_filter_work(struct work_struct *work)
net/sched/cls_fw.c
124
struct fw_filter *f = container_of(to_rcu_work(work),
net/sched/cls_matchall.c
58
static void mall_destroy_work(struct work_struct *work)
net/sched/cls_matchall.c
60
struct cls_mall_head *head = container_of(to_rcu_work(work),
net/sched/cls_route.c
262
static void route4_delete_filter_work(struct work_struct *work)
net/sched/cls_route.c
264
struct route4_filter *f = container_of(to_rcu_work(work),
net/sched/cls_u32.c
434
static void u32_delete_key_work(struct work_struct *work)
net/sched/cls_u32.c
436
struct tc_u_knode *key = container_of(to_rcu_work(work),
net/sched/cls_u32.c
451
static void u32_delete_key_freepf_work(struct work_struct *work)
net/sched/cls_u32.c
453
struct tc_u_knode *key = container_of(to_rcu_work(work),
net/sched/sch_hfsc.c
1343
xstats.work = cl->cl_total;
net/sched/sch_htb.c
1035
static void htb_work_func(struct work_struct *work)
net/sched/sch_htb.c
1037
struct htb_sched *q = container_of(work, struct htb_sched, work);
net/sched/sch_htb.c
1063
INIT_WORK(&q->work, htb_work_func);
net/sched/sch_htb.c
161
struct work_struct work;
net/sched/sch_htb.c
1624
cancel_work_sync(&q->work);
net/sched/sch_htb.c
993
schedule_work(&q->work);
net/sctp/associola.c
44
static void sctp_assoc_bh_rcv(struct work_struct *work);
net/sctp/associola.c
961
static void sctp_assoc_bh_rcv(struct work_struct *work)
net/sctp/associola.c
964
container_of(work, struct sctp_association,
net/sctp/endpointola.c
326
static void sctp_endpoint_bh_rcv(struct work_struct *work)
net/sctp/endpointola.c
329
container_of(work, struct sctp_endpoint,
net/sctp/endpointola.c
36
static void sctp_endpoint_bh_rcv(struct work_struct *work);
net/smc/af_smc.c
1607
static void smc_connect_work(struct work_struct *work)
net/smc/af_smc.c
1609
struct smc_sock *smc = container_of(work, struct smc_sock,
net/smc/af_smc.c
2450
static void smc_listen_work(struct work_struct *work)
net/smc/af_smc.c
2452
struct smc_sock *new_smc = container_of(work, struct smc_sock,
net/smc/af_smc.c
2595
static void smc_tcp_listen_work(struct work_struct *work)
net/smc/af_smc.c
2597
struct smc_sock *lsmc = container_of(work, struct smc_sock,
net/smc/smc_close.c
356
static void smc_close_passive_work(struct work_struct *work)
net/smc/smc_close.c
358
struct smc_connection *conn = container_of(work,
net/smc/smc_core.c
1744
static void smc_conn_abort_work(struct work_struct *work)
net/smc/smc_core.c
1746
struct smc_connection *conn = container_of(work,
net/smc/smc_core.c
1867
static void smc_link_down_work(struct work_struct *work)
net/smc/smc_core.c
1869
struct smc_link *link = container_of(work, struct smc_link,
net/smc/smc_core.c
56
static void smc_link_down_work(struct work_struct *work);
net/smc/smc_core.c
714
static void smc_lgr_free_work(struct work_struct *work)
net/smc/smc_core.c
716
struct smc_link_group *lgr = container_of(to_delayed_work(work),
net/smc/smc_core.c
750
static void smc_lgr_terminate_work(struct work_struct *work)
net/smc/smc_core.c
752
struct smc_link_group *lgr = container_of(work, struct smc_link_group,
net/smc/smc_ib.c
381
static void smc_ib_port_event_work(struct work_struct *work)
net/smc/smc_ib.c
384
work, struct smc_ib_device, port_event_work);
net/smc/smc_ism.c
397
struct work_struct work;
net/smc/smc_ism.c
445
static void smc_ism_event_work(struct work_struct *work)
net/smc/smc_ism.c
448
container_of(work, struct smc_ism_event_work, work);
net/smc/smc_ism.c
587
INIT_WORK(&wrk->work, smc_ism_event_work);
net/smc/smc_ism.c
590
queue_work(smcd->event_wq, &wrk->work);
net/smc/smc_llc.c
1559
static void smc_llc_add_link_work(struct work_struct *work)
net/smc/smc_llc.c
1561
struct smc_link_group *lgr = container_of(work, struct smc_link_group,
net/smc/smc_llc.c
1740
static void smc_llc_delete_link_work(struct work_struct *work)
net/smc/smc_llc.c
1742
struct smc_link_group *lgr = container_of(work, struct smc_link_group,
net/smc/smc_llc.c
1994
static void smc_llc_event_work(struct work_struct *work)
net/smc/smc_llc.c
1996
struct smc_link_group *lgr = container_of(work, struct smc_link_group,
net/smc/smc_llc.c
2114
static void smc_llc_testlink_work(struct work_struct *work)
net/smc/smc_llc.c
2116
struct smc_link *link = container_of(to_delayed_work(work),
net/smc/smc_tx.c
683
void smc_tx_work(struct work_struct *work)
net/smc/smc_tx.c
685
struct smc_connection *conn = container_of(to_delayed_work(work),
net/smc/smc_tx.h
31
void smc_tx_work(struct work_struct *work);
net/strparser/strparser.c
385
queue_work(strp_wq, &strp->work);
net/strparser/strparser.c
395
queue_work(strp_wq, &strp->work);
net/strparser/strparser.c
413
queue_work(strp_wq, &strp->work);
net/strparser/strparser.c
421
do_strp_work(container_of(w, struct strparser, work));
net/strparser/strparser.c
427
msg_timer_work.work);
net/strparser/strparser.c
482
INIT_WORK(&strp->work, strp_work);
net/strparser/strparser.c
495
queue_work(strp_wq, &strp->work);
net/strparser/strparser.c
507
cancel_work_sync(&strp->work);
net/strparser/strparser.c
524
queue_work(strp_wq, &strp->work);
net/sunrpc/cache.c
396
static void do_cache_clean(struct work_struct *work);
net/sunrpc/cache.c
514
static void do_cache_clean(struct work_struct *work)
net/sunrpc/clnt.c
971
static void rpc_free_client_work(struct work_struct *work)
net/sunrpc/clnt.c
973
struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
net/sunrpc/rpc_pipe.c
83
rpc_timeout_upcall_queue(struct work_struct *work)
net/sunrpc/rpc_pipe.c
87
container_of(work, struct rpc_pipe, queue_timeout.work);
net/sunrpc/sched.c
1028
static void rpc_async_schedule(struct work_struct *work)
net/sunrpc/sched.c
1032
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
net/sunrpc/sched.c
1190
static void rpc_async_release(struct work_struct *work)
net/sunrpc/sched.c
1194
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
net/sunrpc/sched.c
767
static void __rpc_queue_timer_fn(struct work_struct *work)
net/sunrpc/sched.c
769
struct rpc_wait_queue *queue = container_of(work,
net/sunrpc/sched.c
771
timer_list.dwork.work);
net/sunrpc/xprt.c
2100
static void xprt_destroy_cb(struct work_struct *work)
net/sunrpc/xprt.c
2103
container_of(work, struct rpc_xprt, task_cleanup);
net/sunrpc/xprt.c
735
static void xprt_autoclose(struct work_struct *work)
net/sunrpc/xprt.c
738
container_of(work, struct rpc_xprt, task_cleanup);
net/sunrpc/xprtrdma/svc_rdma_rw.c
239
static void svc_rdma_write_info_free_async(struct work_struct *work)
net/sunrpc/xprtrdma/svc_rdma_rw.c
243
info = container_of(work, struct svc_rdma_write_info, wi_work);
net/sunrpc/xprtrdma/svc_rdma_sendto.c
261
static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
net/sunrpc/xprtrdma/svc_rdma_sendto.c
265
ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
net/sunrpc/xprtrdma/transport.c
223
xprt_rdma_connect_worker(struct work_struct *work)
net/sunrpc/xprtrdma/transport.c
225
struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
net/sunrpc/xprtrdma/transport.c
226
rx_connect_worker.work);
net/sunrpc/xprtrdma/verbs.c
783
rpcrdma_mr_refresh_worker(struct work_struct *work)
net/sunrpc/xprtrdma/verbs.c
785
struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
net/sunrpc/xprtsock.c
1456
static void xs_udp_data_receive_workfn(struct work_struct *work)
net/sunrpc/xprtsock.c
1459
container_of(work, struct sock_xprt, recv_worker);
net/sunrpc/xprtsock.c
1935
static void xs_dummy_setup_socket(struct work_struct *work)
net/sunrpc/xprtsock.c
2213
static void xs_udp_setup_socket(struct work_struct *work)
net/sunrpc/xprtsock.c
2216
container_of(work, struct sock_xprt, connect_worker.work);
net/sunrpc/xprtsock.c
2418
static void xs_tcp_setup_socket(struct work_struct *work)
net/sunrpc/xprtsock.c
2421
container_of(work, struct sock_xprt, connect_worker.work);
net/sunrpc/xprtsock.c
2686
static void xs_tcp_tls_setup_socket(struct work_struct *work)
net/sunrpc/xprtsock.c
2689
container_of(work, struct sock_xprt, connect_worker.work);
net/sunrpc/xprtsock.c
2845
static void xs_error_handle(struct work_struct *work)
net/sunrpc/xprtsock.c
2847
struct sock_xprt *transport = container_of(work,
net/sunrpc/xprtsock.c
812
static void xs_stream_data_receive_workfn(struct work_struct *work)
net/sunrpc/xprtsock.c
815
container_of(work, struct sock_xprt, recv_worker);
net/switchdev/switchdev.c
102
static void switchdev_deferred_process_work(struct work_struct *work)
net/tipc/core.c
114
cancel_work_sync(&tn->work);
net/tipc/core.c
63
INIT_WORK(&tn->work, tipc_net_finalize_work);
net/tipc/core.h
145
struct work_struct work;
net/tipc/crypto.c
1221
if (cancel_delayed_work(&rx->work)) {
net/tipc/crypto.c
1420
if (queue_delayed_work(tx->wq, &rx->work, delay))
net/tipc/crypto.c
1514
INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
net/tipc/crypto.c
1516
INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
net/tipc/crypto.c
1533
cancel_delayed_work_sync(&c->work);
net/tipc/crypto.c
213
struct delayed_work work;
net/tipc/crypto.c
2342
if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
net/tipc/crypto.c
2355
static void tipc_crypto_work_rx(struct work_struct *work)
net/tipc/crypto.c
2357
struct delayed_work *dwork = to_delayed_work(work);
net/tipc/crypto.c
2358
struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
net/tipc/crypto.c
2403
if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
net/tipc/crypto.c
2426
cancel_delayed_work_sync(&tx->work);
net/tipc/crypto.c
2431
queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
net/tipc/crypto.c
2444
static void tipc_crypto_work_tx(struct work_struct *work)
net/tipc/crypto.c
2446
struct delayed_work *dwork = to_delayed_work(work);
net/tipc/crypto.c
2447
struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
net/tipc/crypto.c
306
static void tipc_crypto_work_tx(struct work_struct *work);
net/tipc/crypto.c
307
static void tipc_crypto_work_rx(struct work_struct *work);
net/tipc/discover.c
171
schedule_work(&tn->work);
net/tipc/discover.c
314
schedule_work(&tn->work);
net/tipc/net.c
144
void tipc_net_finalize_work(struct work_struct *work)
net/tipc/net.c
146
struct tipc_net *tn = container_of(work, struct tipc_net, work);
net/tipc/net.h
45
void tipc_net_finalize_work(struct work_struct *work);
net/tipc/topsrv.c
114
static void tipc_conn_recv_work(struct work_struct *work);
net/tipc/topsrv.c
115
static void tipc_conn_send_work(struct work_struct *work);
net/tipc/topsrv.c
301
static void tipc_conn_send_work(struct work_struct *work)
net/tipc/topsrv.c
303
struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
net/tipc/topsrv.c
418
static void tipc_conn_recv_work(struct work_struct *work)
net/tipc/topsrv.c
420
struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
net/tipc/topsrv.c
455
static void tipc_topsrv_accept(struct work_struct *work)
net/tipc/topsrv.c
457
struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
net/tipc/udp_media.c
102
struct work_struct work;
net/tipc/udp_media.c
806
static void cleanup_bearer(struct work_struct *work)
net/tipc/udp_media.c
808
struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
net/tipc/udp_media.c
844
INIT_WORK(&ub->work, cleanup_bearer);
net/tipc/udp_media.c
845
schedule_work(&ub->work);
net/tls/tls_device.c
69
static void tls_device_tx_del_task(struct work_struct *work)
net/tls/tls_device.c
72
container_of(work, struct tls_offload_context_tx, destruct_work);
net/tls/tls_strp.c
553
queue_work(tls_strp_wq, &strp->work);
net/tls/tls_strp.c
567
queue_work(tls_strp_wq, &strp->work);
net/tls/tls_strp.c
577
container_of(w, struct tls_strparser, work);
net/tls/tls_strp.c
614
INIT_WORK(&strp->work, tls_strp_work);
net/tls/tls_strp.c
626
cancel_work_sync(&strp->work);
net/tls/tls_sw.c
1159
cancel_delayed_work(&ctx->tx_work.work);
net/tls/tls_sw.c
1221
cancel_delayed_work(&ctx->tx_work.work);
net/tls/tls_sw.c
1259
cancel_delayed_work(&ctx->tx_work.work);
net/tls/tls_sw.c
1340
cancel_delayed_work(&ctx->tx_work.work);
net/tls/tls_sw.c
2536
disable_delayed_work_sync(&ctx->tx_work.work);
net/tls/tls_sw.c
2624
static void tx_work_handler(struct work_struct *work)
net/tls/tls_sw.c
2626
struct delayed_work *delayed_work = to_delayed_work(work);
net/tls/tls_sw.c
2628
struct tx_work, work);
net/tls/tls_sw.c
2653
schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
net/tls/tls_sw.c
2675
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
net/tls/tls_sw.c
2711
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
net/tls/tls_sw.c
517
schedule_delayed_work(&ctx->tx_work.work, 1);
net/unix/garbage.c
605
static void unix_gc(struct work_struct *work)
net/vmw_vsock/af_vsock.c
1614
static void vsock_connect_timeout(struct work_struct *work)
net/vmw_vsock/af_vsock.c
1619
vsk = container_of(work, struct vsock_sock, connect_work.work);
net/vmw_vsock/af_vsock.c
732
static void vsock_pending_work(struct work_struct *work)
net/vmw_vsock/af_vsock.c
739
vsk = container_of(work, struct vsock_sock, pending_work.work);
net/vmw_vsock/af_vsock.c
880
static void vsock_connect_timeout(struct work_struct *work);
net/vmw_vsock/hyperv_transport.c
495
static void hvs_close_timeout(struct work_struct *work)
net/vmw_vsock/hyperv_transport.c
498
container_of(work, struct vsock_sock, close_work.work);
net/vmw_vsock/virtio_transport.c
158
virtio_transport_send_pkt_work(struct work_struct *work)
net/vmw_vsock/virtio_transport.c
161
container_of(work, struct virtio_vsock, send_pkt_work);
net/vmw_vsock/virtio_transport.c
342
static void virtio_transport_tx_work(struct work_struct *work)
net/vmw_vsock/virtio_transport.c
345
container_of(work, struct virtio_vsock, tx_work);
net/vmw_vsock/virtio_transport.c
448
static void virtio_transport_event_work(struct work_struct *work)
net/vmw_vsock/virtio_transport.c
451
container_of(work, struct virtio_vsock, event_work);
net/vmw_vsock/virtio_transport.c
624
static void virtio_transport_rx_work(struct work_struct *work)
net/vmw_vsock/virtio_transport.c
627
container_of(work, struct virtio_vsock, rx_work);
net/vmw_vsock/virtio_transport_common.c
1258
static void virtio_transport_close_timeout(struct work_struct *work)
net/vmw_vsock/virtio_transport_common.c
1261
container_of(work, struct vsock_sock, close_work.work);
net/vmw_vsock/vmci_transport.c
1627
static void vmci_transport_cleanup(struct work_struct *work)
net/vmw_vsock/vmci_transport.c
37
static void vmci_transport_recv_pkt_work(struct work_struct *work);
net/vmw_vsock/vmci_transport.c
38
static void vmci_transport_cleanup(struct work_struct *work);
net/vmw_vsock/vmci_transport.c
63
struct work_struct work;
net/vmw_vsock/vmci_transport.c
793
INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
net/vmw_vsock/vmci_transport.c
795
schedule_work(&recv_pkt_info->work);
net/vmw_vsock/vmci_transport.c
899
static void vmci_transport_recv_pkt_work(struct work_struct *work)
net/vmw_vsock/vmci_transport.c
906
container_of(work, struct vmci_transport_recv_pkt_info, work);
net/vmw_vsock/vsock_loopback.c
123
static void vsock_loopback_work(struct work_struct *work)
net/vmw_vsock/vsock_loopback.c
126
container_of(work, struct vsock_loopback, pkt_work);
net/wireless/core.c
1713
void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
net/wireless/core.c
1718
trace_wiphy_work_queue(wiphy, work);
net/wireless/core.c
1721
if (list_empty(&work->entry))
net/wireless/core.c
1722
list_add_tail(&work->entry, &rdev->wiphy_work_list);
net/wireless/core.c
1729
void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
net/wireless/core.c
1736
trace_wiphy_work_cancel(wiphy, work);
net/wireless/core.c
1739
if (!list_empty(&work->entry))
net/wireless/core.c
1740
list_del_init(&work->entry);
net/wireless/core.c
1745
void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
net/wireless/core.c
1751
trace_wiphy_work_flush(wiphy, work);
net/wireless/core.c
1754
run = !work || !list_empty(&work->entry);
net/wireless/core.c
1758
cfg80211_process_wiphy_works(rdev, work);
net/wireless/core.c
1766
wiphy_work_queue(dwork->wiphy, &dwork->work);
net/wireless/core.c
1774
trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay);
net/wireless/core.c
1778
wiphy_work_queue(wiphy, &dwork->work);
net/wireless/core.c
1793
wiphy_work_cancel(wiphy, &dwork->work);
net/wireless/core.c
1803
wiphy_work_flush(wiphy, &dwork->work);
net/wireless/core.c
1819
wiphy_work_queue(hrwork->wiphy, &hrwork->work);
net/wireless/core.c
1829
trace_wiphy_hrtimer_work_queue(wiphy, &hrwork->work, delay);
net/wireless/core.c
1833
wiphy_work_queue(wiphy, &hrwork->work);
net/wireless/core.c
1849
wiphy_work_cancel(wiphy, &hrwork->work);
net/wireless/core.c
1859
wiphy_work_flush(wiphy, &hrwork->work);
net/wireless/core.c
318
static void cfg80211_rfkill_block_work(struct work_struct *work)
net/wireless/core.c
322
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/core.c
327
static void cfg80211_event_work(struct work_struct *work)
net/wireless/core.c
331
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/core.c
358
static void cfg80211_destroy_iface_wk(struct work_struct *work)
net/wireless/core.c
362
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/core.c
371
struct wiphy_work *work)
net/wireless/core.c
376
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/core.c
385
static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
net/wireless/core.c
389
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/core.c
401
static void cfg80211_propagate_cac_done_wk(struct work_struct *work)
net/wireless/core.c
405
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/core.c
417
static void cfg80211_wiphy_work(struct work_struct *work)
net/wireless/core.c
422
rdev = container_of(work, struct cfg80211_registered_device, wiphy_work);
net/wireless/core.c
436
queue_work(system_dfl_wq, work);
net/wireless/core.h
317
struct wiphy_work *work);
net/wireless/core.h
428
void cfg80211_autodisconnect_wk(struct work_struct *work);
net/wireless/core.h
431
void cfg80211_conn_work(struct work_struct *work);
net/wireless/core.h
455
void cfg80211_sched_scan_results_wk(struct work_struct *work);
net/wireless/core.h
484
void cfg80211_dfs_channels_update_work(struct work_struct *work);
net/wireless/core.h
496
void cfg80211_background_cac_done_wk(struct work_struct *work);
net/wireless/core.h
498
void cfg80211_background_cac_abort_wk(struct work_struct *work);
net/wireless/core.h
572
void cfg80211_pmsr_free_wk(struct work_struct *work);
net/wireless/debugfs.c
148
struct wiphy_work work;
net/wireless/debugfs.c
164
struct wiphy_work *work)
net/wireless/debugfs.c
166
struct debugfs_read_work *w = container_of(work, typeof(*w), work);
net/wireless/debugfs.c
177
wiphy_work_cancel(w->wiphy, &w->work);
net/wireless/debugfs.c
192
struct debugfs_read_work work = {
net/wireless/debugfs.c
200
.completion = COMPLETION_INITIALIZER_ONSTACK(work.completion),
net/wireless/debugfs.c
204
.cancel_data = &work,
net/wireless/debugfs.c
210
wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work);
net/wireless/debugfs.c
211
wiphy_work_queue(wiphy, &work.work);
net/wireless/debugfs.c
214
wait_for_completion(&work.completion);
net/wireless/debugfs.c
217
if (work.ret < 0)
net/wireless/debugfs.c
218
return work.ret;
net/wireless/debugfs.c
220
if (WARN_ON(work.ret > bufsize))
net/wireless/debugfs.c
223
return simple_read_from_buffer(userbuf, count, ppos, buf, work.ret);
net/wireless/debugfs.c
228
struct wiphy_work work;
net/wireless/debugfs.c
244
struct wiphy_work *work)
net/wireless/debugfs.c
246
struct debugfs_write_work *w = container_of(work, typeof(*w), work);
net/wireless/debugfs.c
257
wiphy_work_cancel(w->wiphy, &w->work);
net/wireless/debugfs.c
271
struct debugfs_write_work work = {
net/wireless/debugfs.c
279
.completion = COMPLETION_INITIALIZER_ONSTACK(work.completion),
net/wireless/debugfs.c
283
.cancel_data = &work,
net/wireless/debugfs.c
295
wiphy_work_init(&work.work, wiphy_locked_debugfs_write_work);
net/wireless/debugfs.c
296
wiphy_work_queue(wiphy, &work.work);
net/wireless/debugfs.c
299
wait_for_completion(&work.completion);
net/wireless/debugfs.c
302
return work.ret;
net/wireless/mlme.c
1022
void cfg80211_dfs_channels_update_work(struct work_struct *work)
net/wireless/mlme.c
1024
struct delayed_work *delayed_work = to_delayed_work(work);
net/wireless/mlme.c
1227
void cfg80211_background_cac_done_wk(struct work_struct *work)
net/wireless/mlme.c
1229
struct delayed_work *delayed_work = to_delayed_work(work);
net/wireless/mlme.c
1238
void cfg80211_background_cac_abort_wk(struct work_struct *work)
net/wireless/mlme.c
1242
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/nl80211.c
20809
void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
net/wireless/nl80211.c
20811
struct wireless_dev *wdev = container_of(work, struct wireless_dev,
net/wireless/pmsr.c
645
void cfg80211_pmsr_free_wk(struct work_struct *work)
net/wireless/pmsr.c
647
struct wireless_dev *wdev = container_of(work, struct wireless_dev,
net/wireless/reg.c
237
static void reg_check_chans_work(struct work_struct *work);
net/wireless/reg.c
240
static void reg_todo(struct work_struct *work);
net/wireless/reg.c
2457
static void reg_check_chans_work(struct work_struct *work)
net/wireless/reg.c
3196
static void reg_todo(struct work_struct *work)
net/wireless/reg.c
485
static void reg_regdb_apply(struct work_struct *work)
net/wireless/reg.c
534
static void crda_timeout_work(struct work_struct *work);
net/wireless/reg.c
537
static void crda_timeout_work(struct work_struct *work)
net/wireless/scan.c
1276
void cfg80211_sched_scan_results_wk(struct work_struct *work)
net/wireless/scan.c
1281
rdev = container_of(work, struct cfg80211_registered_device,
net/wireless/sme.c
1580
void cfg80211_autodisconnect_wk(struct work_struct *work)
net/wireless/sme.c
1583
container_of(work, struct wireless_dev, disconnect_wk);
net/wireless/sme.c
246
void cfg80211_conn_work(struct work_struct *work)
net/wireless/sme.c
249
container_of(work, struct cfg80211_registered_device, conn_work);
net/wireless/sme.c
705
static void disconnect_work(struct work_struct *work)
net/wireless/trace.h
250
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
net/wireless/trace.h
251
TP_ARGS(wiphy, work),
net/wireless/trace.h
259
__entry->instance = work;
net/wireless/trace.h
260
__entry->func = work ? work->func : NULL;
net/wireless/trace.h
267
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
net/wireless/trace.h
268
TP_ARGS(wiphy, work)
net/wireless/trace.h
272
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
net/wireless/trace.h
273
TP_ARGS(wiphy, work)
net/wireless/trace.h
277
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
net/wireless/trace.h
278
TP_ARGS(wiphy, work)
net/wireless/trace.h
282
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work),
net/wireless/trace.h
283
TP_ARGS(wiphy, work)
net/wireless/trace.h
287
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work,
net/wireless/trace.h
289
TP_ARGS(wiphy, work, delay),
net/wireless/trace.h
298
__entry->instance = work;
net/wireless/trace.h
299
__entry->func = work->func;
net/wireless/trace.h
308
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work,
net/wireless/trace.h
310
TP_ARGS(wiphy, work, delay),
net/wireless/trace.h
319
__entry->instance = work;
net/wireless/trace.h
320
__entry->func = work->func;
net/wireless/wext-core.c
412
static void wireless_nlevent_process(struct work_struct *work)
net/xdp/xdp_umem.c
67
static void xdp_umem_release_deferred(struct work_struct *work)
net/xdp/xdp_umem.c
69
struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
net/xdp/xdp_umem.c
86
INIT_WORK(&umem->work, xdp_umem_release_deferred);
net/xdp/xdp_umem.c
87
schedule_work(&umem->work);
net/xdp/xsk_buff_pool.c
271
static void xp_release_deferred(struct work_struct *work)
net/xdp/xsk_buff_pool.c
273
struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
net/xdp/xsk_buff_pool.c
274
work);
net/xdp/xsk_buff_pool.c
305
INIT_WORK(&pool->work, xp_release_deferred);
net/xdp/xsk_buff_pool.c
306
schedule_work(&pool->work);
net/xfrm/espintcp.c
416
static void espintcp_tx_work(struct work_struct *work)
net/xfrm/espintcp.c
418
struct espintcp_ctx *ctx = container_of(work,
net/xfrm/espintcp.c
419
struct espintcp_ctx, work);
net/xfrm/espintcp.c
432
schedule_work(&ctx->work);
net/xfrm/espintcp.c
501
INIT_WORK(&ctx->work, espintcp_tx_work);
net/xfrm/espintcp.c
539
disable_work_sync(&ctx->work);
net/xfrm/xfrm_input.c
29
struct work_struct work;
net/xfrm/xfrm_input.c
778
static void xfrm_trans_reinject(struct work_struct *work)
net/xfrm/xfrm_input.c
780
struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work);
net/xfrm/xfrm_input.c
814
schedule_work(&trans->work);
net/xfrm/xfrm_input.c
846
INIT_WORK(&trans->work, xfrm_trans_reinject);
net/xfrm/xfrm_nat_keepalive.c
197
static void nat_keepalive_work(struct work_struct *work)
net/xfrm/xfrm_nat_keepalive.c
206
net = container_of(work, struct net, xfrm.nat_keepalive_work.work);
net/xfrm/xfrm_policy.c
1282
static void xfrm_hash_rebuild(struct work_struct *work)
net/xfrm/xfrm_policy.c
1284
struct net *net = container_of(work, struct net,
net/xfrm/xfrm_policy.c
1285
xfrm.policy_hthresh.work);
net/xfrm/xfrm_policy.c
1407
schedule_work(&net->xfrm.policy_hthresh.work);
net/xfrm/xfrm_policy.c
4264
INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
net/xfrm/xfrm_policy.c
4285
disable_work_sync(&net->xfrm.policy_hthresh.work);
net/xfrm/xfrm_policy.c
726
static void xfrm_hash_resize(struct work_struct *work)
net/xfrm/xfrm_policy.c
728
struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
net/xfrm/xfrm_state.c
161
static void xfrm_hash_resize(struct work_struct *work)
net/xfrm/xfrm_state.c
163
struct net *net = container_of(work, struct net, xfrm.state_hash_work);
net/xfrm/xfrm_state.c
40
static void xfrm_state_gc_task(struct work_struct *work);
net/xfrm/xfrm_state.c
623
static void xfrm_state_gc_task(struct work_struct *work)
rust/helpers/workqueue.c
11
__init_work(work, onstack);
rust/helpers/workqueue.c
12
work->data = (atomic_long_t)WORK_DATA_INIT();
rust/helpers/workqueue.c
13
lockdep_init_map(&work->lockdep_map, name, key, 0);
rust/helpers/workqueue.c
14
INIT_LIST_HEAD(&work->entry);
rust/helpers/workqueue.c
15
work->func = func;
rust/helpers/workqueue.c
5
__rust_helper void rust_helper_init_work_with_key(struct work_struct *work,
samples/ftrace/sample-trace-array.c
24
static void trace_work_fn(struct work_struct *work)
samples/livepatch/livepatch-callbacks-busymod.c
34
static void busymod_work_func(struct work_struct *work);
samples/livepatch/livepatch-callbacks-busymod.c
35
static DECLARE_DELAYED_WORK(work, busymod_work_func);
samples/livepatch/livepatch-callbacks-busymod.c
37
static void busymod_work_func(struct work_struct *work)
samples/livepatch/livepatch-callbacks-busymod.c
47
schedule_delayed_work(&work, 0);
samples/livepatch/livepatch-callbacks-busymod.c
53
cancel_delayed_work_sync(&work);
samples/livepatch/livepatch-callbacks-demo.c
132
static void patched_work_func(struct work_struct *work)
samples/livepatch/livepatch-shadow-mod.c
139
static void alloc_work_func(struct work_struct *work);
samples/livepatch/livepatch-shadow-mod.c
142
static void alloc_work_func(struct work_struct *work)
samples/livepatch/livepatch-shadow-mod.c
163
static void cleanup_work_func(struct work_struct *work);
samples/livepatch/livepatch-shadow-mod.c
166
static void cleanup_work_func(struct work_struct *work)
samples/trace_printk/trace-printk.c
18
static void trace_printk_irq_work(struct irq_work *work)
samples/workqueue/stall_detector/wq_stall.c
36
static void stall_work2_fn(struct work_struct *work)
samples/workqueue/stall_detector/wq_stall.c
41
static void stall_work1_fn(struct work_struct *work)
security/apparmor/include/policy_unpack.h
114
struct work_struct work;
security/apparmor/policy_unpack.c
132
static void do_ploaddata_rmfs(struct work_struct *work)
security/apparmor/policy_unpack.c
134
struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
security/apparmor/policy_unpack.c
153
INIT_WORK(&d->work, do_ploaddata_rmfs);
security/apparmor/policy_unpack.c
154
schedule_work(&d->work);
security/integrity/ima/ima_queue_keys.c
34
static void ima_keys_handler(struct work_struct *work);
security/integrity/ima/ima_queue_keys.c
42
static void ima_keys_handler(struct work_struct *work)
security/keys/gc.c
178
static void key_garbage_collector(struct work_struct *work)
security/keys/gc.c
21
static void key_garbage_collector(struct work_struct *work);
security/keys/trusted-keys/trusted_tpm2.c
29
u8 *work = scratch, *work1;
security/keys/trusted-keys/trusted_tpm2.c
46
work = asn1_encode_oid(work, end_work, tpm2key_oid,
security/keys/trusted-keys/trusted_tpm2.c
57
work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
security/keys/trusted-keys/trusted_tpm2.c
66
if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
security/keys/trusted-keys/trusted_tpm2.c
72
work = asn1_encode_integer(work, end_work, options->keyhandle);
security/keys/trusted-keys/trusted_tpm2.c
73
work = asn1_encode_octet_string(work, end_work, pub, pub_len);
security/keys/trusted-keys/trusted_tpm2.c
74
work = asn1_encode_octet_string(work, end_work, priv, priv_len);
security/keys/trusted-keys/trusted_tpm2.c
78
scratch, work - scratch);
security/landlock/ruleset.c
507
static void free_ruleset_work(struct work_struct *const work)
security/landlock/ruleset.c
511
ruleset = container_of(work, struct landlock_ruleset, work_free);
security/landlock/tsync.c
158
static void restrict_one_thread_callback(struct callback_head *work)
security/landlock/tsync.c
160
struct tsync_work *ctx = container_of(work, struct tsync_work, work);
security/landlock/tsync.c
257
struct tsync_work *work;
security/landlock/tsync.c
274
work = kzalloc_obj(*work, flags);
security/landlock/tsync.c
275
if (!work) {
security/landlock/tsync.c
283
s->works[i] = work;
security/landlock/tsync.c
417
init_task_work(&ctx->work, restrict_one_thread_callback);
security/landlock/tsync.c
418
err = task_work_add(thread, &ctx->work, TWA_SIGNAL);
security/landlock/tsync.c
454
&works->works[i]->work))
security/landlock/tsync.c
60
struct callback_head work;
security/yama/yama_lsm.c
101
if (task_work_add(current, &info->work, TWA_RESUME) == 0)
security/yama/yama_lsm.c
115
static void yama_relation_cleanup(struct work_struct *work)
security/yama/yama_lsm.c
42
static void yama_relation_cleanup(struct work_struct *work);
security/yama/yama_lsm.c
46
struct callback_head work;
security/yama/yama_lsm.c
52
static void __report_access(struct callback_head *work)
security/yama/yama_lsm.c
55
container_of(work, struct access_report_info, work);
security/yama/yama_lsm.c
95
init_task_work(&info->work, __report_access);
sound/aoa/aoa-gpio.h
62
struct delayed_work work;
sound/aoa/core/gpio-feature.c
210
static void ftr_handle_notify(struct work_struct *work)
sound/aoa/core/gpio-feature.c
213
container_of(work, struct gpio_notification, work.work);
sound/aoa/core/gpio-feature.c
274
INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
sound/aoa/core/gpio-feature.c
275
INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
sound/aoa/core/gpio-feature.c
276
INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
sound/aoa/core/gpio-feature.c
292
cancel_delayed_work_sync(&rt->headphone_notify.work);
sound/aoa/core/gpio-feature.c
293
cancel_delayed_work_sync(&rt->line_in_notify.work);
sound/aoa/core/gpio-feature.c
294
cancel_delayed_work_sync(&rt->line_out_notify.work);
sound/aoa/core/gpio-feature.c
304
schedule_delayed_work(¬if->work, 0);
sound/aoa/core/gpio-pmf.c
108
cancel_delayed_work_sync(&rt->headphone_notify.work);
sound/aoa/core/gpio-pmf.c
109
cancel_delayed_work_sync(&rt->line_in_notify.work);
sound/aoa/core/gpio-pmf.c
110
cancel_delayed_work_sync(&rt->line_out_notify.work);
sound/aoa/core/gpio-pmf.c
125
schedule_delayed_work(¬if->work, 0);
sound/aoa/core/gpio-pmf.c
72
static void pmf_handle_notify(struct work_struct *work)
sound/aoa/core/gpio-pmf.c
75
container_of(work, struct gpio_notification, work.work);
sound/aoa/core/gpio-pmf.c
86
INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
sound/aoa/core/gpio-pmf.c
87
INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
sound/aoa/core/gpio-pmf.c
88
INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
sound/core/compress_offload.c
56
static void error_delayed_work(struct work_struct *work);
sound/core/compress_offload.c
867
static void error_delayed_work(struct work_struct *work)
sound/core/compress_offload.c
871
stream = container_of(work, struct snd_compr_stream, error_work.work);
sound/core/misc.c
100
static void snd_fasync_work_fn(struct work_struct *work)
sound/core/rawmidi.c
121
static void snd_rawmidi_input_event_work(struct work_struct *work)
sound/core/rawmidi.c
124
container_of(work, struct snd_rawmidi_runtime, event_work);
sound/core/seq/oss/seq_oss_init.c
52
static void async_call_lookup_ports(struct work_struct *work)
sound/core/seq/seq_ump_client.c
382
static void handle_group_notify(struct work_struct *work)
sound/core/seq/seq_ump_client.c
385
container_of(work, struct seq_ump_client, group_notify_work);
sound/core/seq/seq_virmidi.c
133
static void snd_vmidi_output_work(struct work_struct *work)
sound/core/seq/seq_virmidi.c
140
vmidi = container_of(work, struct snd_virmidi, output_work);
sound/core/seq_device.c
120
static void autoload_drivers(struct work_struct *work)
sound/core/timer.c
805
static void snd_timer_work(struct work_struct *work)
sound/core/timer.c
807
struct snd_timer *timer = container_of(work, struct snd_timer, task_work);
sound/drivers/aloop.c
852
static void loopback_snd_timer_work(struct work_struct *work)
sound/drivers/aloop.c
856
cable = container_of(work, struct loopback_cable, snd_timer.event_work);
sound/drivers/pcsp/pcsp_lib.c
30
static void pcsp_call_pcm_elapsed(struct work_struct *work)
sound/drivers/serial-generic.c
67
static void snd_serial_generic_tx_work(struct work_struct *work)
sound/drivers/serial-generic.c
70
struct snd_serial_generic *drvdata = container_of(work, struct snd_serial_generic,
sound/firewire/amdtp-stream.c
1062
struct work_struct *work = current_work();
sound/firewire/amdtp-stream.c
1070
if (work && work != &s->period_work)
sound/firewire/amdtp-stream.c
627
static void pcm_period_work(struct work_struct *work)
sound/firewire/amdtp-stream.c
629
struct amdtp_stream *s = container_of(work, struct amdtp_stream,
sound/firewire/amdtp-stream.c
80
static void pcm_period_work(struct work_struct *work);
sound/firewire/fireface/ff-transaction.c
114
static void transmit_midi0_msg(struct work_struct *work)
sound/firewire/fireface/ff-transaction.c
116
struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[0]);
sound/firewire/fireface/ff-transaction.c
121
static void transmit_midi1_msg(struct work_struct *work)
sound/firewire/fireface/ff-transaction.c
123
struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[1]);
sound/firewire/oxfw/oxfw-scs1x.c
138
schedule_work(&scs->work);
sound/firewire/oxfw/oxfw-scs1x.c
174
static void scs_output_work(struct work_struct *work)
sound/firewire/oxfw/oxfw-scs1x.c
176
struct fw_scs1x *scs = container_of(work, struct fw_scs1x, work);
sound/firewire/oxfw/oxfw-scs1x.c
28
struct work_struct work;
sound/firewire/oxfw/oxfw-scs1x.c
322
schedule_work(&scs->work);
sound/firewire/oxfw/oxfw-scs1x.c
412
INIT_WORK(&scs->work, scs_output_work);
sound/firewire/tascam/tascam-transaction.c
168
schedule_work(&port->work);
sound/firewire/tascam/tascam-transaction.c
171
static void midi_port_work(struct work_struct *work)
sound/firewire/tascam/tascam-transaction.c
174
container_of(work, struct snd_fw_async_midi_port, work);
sound/firewire/tascam/tascam-transaction.c
188
schedule_work(&port->work);
sound/firewire/tascam/tascam-transaction.c
202
schedule_work(&port->work);
sound/firewire/tascam/tascam-transaction.c
322
INIT_WORK(&tscm->out_ports[i].work, midi_port_work);
sound/firewire/tascam/tascam.h
188
schedule_work(&port->work);
sound/firewire/tascam/tascam.h
196
cancel_work_sync(&port->work);
sound/firewire/tascam/tascam.h
49
struct work_struct work;
sound/hda/codecs/ca0132.c
4884
static void ca0132_unsol_hp_delayed(struct work_struct *work)
sound/hda/codecs/ca0132.c
4887
to_delayed_work(work), struct ca0132_spec, unsol_hp_work);
sound/hda/codecs/cirrus/cs8409.c
111
static void cs8409_disable_i2c_clock_worker(struct work_struct *work)
sound/hda/codecs/cirrus/cs8409.c
113
struct cs8409_spec *spec = container_of(work, struct cs8409_spec, i2c_clk_work.work);
sound/hda/codecs/cirrus/cs8409.c
58
static void cs8409_disable_i2c_clock_worker(struct work_struct *work);
sound/hda/codecs/cm9825.c
214
static void cm9825_unsol_inputs_delayed(struct work_struct *work)
sound/hda/codecs/cm9825.c
217
container_of(to_delayed_work(work), struct cmi_spec,
sound/hda/codecs/cm9825.c
230
static void cm9825_unsol_lineout_delayed(struct work_struct *work)
sound/hda/codecs/cm9825.c
233
container_of(to_delayed_work(work), struct cmi_spec,
sound/hda/codecs/cm9825.c
240
static void cm9825_unsol_hp_delayed(struct work_struct *work)
sound/hda/codecs/cm9825.c
243
container_of(to_delayed_work(work), struct cmi_spec, unsol_hp_work);
sound/hda/codecs/hdmi/hdmi.c
1155
schedule_delayed_work(&per_pin->work,
sound/hda/codecs/hdmi/hdmi.c
1412
static void hdmi_repoll_eld(struct work_struct *work)
sound/hda/codecs/hdmi/hdmi.c
1415
container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
sound/hda/codecs/hdmi/hdmi.c
1985
INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
sound/hda/codecs/hdmi/hdmi.c
2053
cancel_delayed_work_sync(&per_pin->work);
sound/hda/codecs/hdmi/hdmi.c
2074
cancel_delayed_work_sync(&per_pin->work);
sound/hda/codecs/hdmi/hdmi_local.h
44
struct delayed_work work;
sound/hda/codecs/side-codecs/cs35l41_hda.c
1254
static void cs35l41_fw_load_work(struct work_struct *work)
sound/hda/codecs/side-codecs/cs35l41_hda.c
1256
struct cs35l41_hda *cs35l41 = container_of(work, struct cs35l41_hda, fw_load_work);
sound/hda/codecs/side-codecs/cs35l56_hda.c
692
static void cs35l56_hda_dsp_work(struct work_struct *work)
sound/hda/codecs/side-codecs/cs35l56_hda.c
694
struct cs35l56_hda *cs35l56 = container_of(work, struct cs35l56_hda, dsp_work);
sound/hda/common/beep.c
45
static void snd_hda_generate_beep(struct work_struct *work)
sound/hda/common/beep.c
48
container_of(work, struct hda_beep, beep_work);
sound/hda/common/codec.c
4017
if (current_work() != &codec->jackpoll_work.work)
sound/hda/common/codec.c
635
static void hda_jackpoll_work(struct work_struct *work)
sound/hda/common/codec.c
638
container_of(work, struct hda_codec, jackpoll_work.work);
sound/hda/controllers/acpi.c
151
static void hda_acpi_probe_work(struct work_struct *work)
sound/hda/controllers/acpi.c
153
struct hda_acpi *hda = container_of(work, struct hda_acpi, probe_work);
sound/hda/controllers/intel.c
1723
static void azx_probe_work(struct work_struct *work)
sound/hda/controllers/intel.c
1725
struct hda_intel *hda = container_of(work, struct hda_intel, probe_work.work);
sound/hda/controllers/intel.c
719
static void azx_irq_pending_work(struct work_struct *work)
sound/hda/controllers/intel.c
721
struct hda_intel *hda = container_of(work, struct hda_intel, irq_pending_work);
sound/hda/controllers/tegra.c
408
static void hda_tegra_probe_work(struct work_struct *work);
sound/hda/controllers/tegra.c
588
static void hda_tegra_probe_work(struct work_struct *work)
sound/hda/controllers/tegra.c
590
struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work);
sound/hda/core/bus.c
15
static void snd_hdac_bus_process_unsol_events(struct work_struct *work);
sound/hda/core/bus.c
166
static void snd_hdac_bus_process_unsol_events(struct work_struct *work)
sound/hda/core/bus.c
168
struct hdac_bus *bus = container_of(work, struct hdac_bus, unsol_work);
sound/i2c/other/ak4113.c
129
cancel_delayed_work_sync(&chip->work);
sound/i2c/other/ak4113.c
135
schedule_delayed_work(&chip->work, HZ / 10);
sound/i2c/other/ak4113.c
25
static void ak4113_stats(struct work_struct *work);
sound/i2c/other/ak4113.c
45
cancel_delayed_work_sync(&chip->work);
sound/i2c/other/ak4113.c
505
schedule_delayed_work(&ak4113->work, HZ / 10);
sound/i2c/other/ak4113.c
611
static void ak4113_stats(struct work_struct *work)
sound/i2c/other/ak4113.c
613
struct ak4113 *chip = container_of(work, struct ak4113, work.work);
sound/i2c/other/ak4113.c
619
schedule_delayed_work(&chip->work, HZ / 10);
sound/i2c/other/ak4113.c
626
cancel_delayed_work_sync(&chip->work);
sound/i2c/other/ak4113.c
75
INIT_DELAYED_WORK(&chip->work, ak4113_stats);
sound/i2c/other/ak4114.c
134
cancel_delayed_work_sync(&chip->work);
sound/i2c/other/ak4114.c
140
schedule_delayed_work(&chip->work, HZ / 10);
sound/i2c/other/ak4114.c
24
static void ak4114_stats(struct work_struct *work);
sound/i2c/other/ak4114.c
44
cancel_delayed_work_sync(&chip->work);
sound/i2c/other/ak4114.c
481
schedule_delayed_work(&ak4114->work, HZ / 10);
sound/i2c/other/ak4114.c
589
static void ak4114_stats(struct work_struct *work)
sound/i2c/other/ak4114.c
591
struct ak4114 *chip = container_of(work, struct ak4114, work.work);
sound/i2c/other/ak4114.c
596
schedule_delayed_work(&chip->work, HZ / 10);
sound/i2c/other/ak4114.c
603
cancel_delayed_work_sync(&chip->work);
sound/i2c/other/ak4114.c
75
INIT_DELAYED_WORK(&chip->work, ak4114_stats);
sound/pci/ac97/ac97_codec.c
2022
static void do_update_power(struct work_struct *work)
sound/pci/ac97/ac97_codec.c
2025
container_of(work, struct snd_ac97, power_work.work));
sound/pci/emu10k1/emu10k1.c
193
cancel_work_sync(&emu->emu1010.work);
sound/pci/emu10k1/emu10k1_main.c
1495
INIT_WORK(&emu->emu1010.work, emu1010_work);
sound/pci/emu10k1/emu10k1_main.c
758
static void emu1010_work(struct work_struct *work)
sound/pci/emu10k1/emu10k1_main.c
763
emu = container_of(work, struct snd_emu10k1, emu1010.work);
sound/pci/emu10k1/emu10k1_main.c
793
schedule_work(&emu->emu1010.work);
sound/pci/emu10k1/emu10k1_main.c
915
cancel_work_sync(&emu->emu1010.work);
sound/pci/es1968.c
1826
static void es1968_update_hw_volume(struct work_struct *work)
sound/pci/es1968.c
1828
struct es1968 *chip = container_of(work, struct es1968, hwvol_work);
sound/pci/ice1712/psc724.c
200
static void psc724_update_hp_jack_state(struct work_struct *work)
sound/pci/ice1712/psc724.c
202
struct psc724_spec *spec = container_of(work, struct psc724_spec,
sound/pci/ice1712/psc724.c
203
hp_work.work);
sound/pci/maestro3.c
1513
static void snd_m3_update_hw_volume(struct work_struct *work)
sound/pci/maestro3.c
1515
struct snd_m3 *chip = container_of(work, struct snd_m3, hwvol_work);
sound/pci/oxygen/oxygen_lib.c
115
static void oxygen_spdif_input_bits_changed(struct work_struct *work)
sound/pci/oxygen/oxygen_lib.c
117
struct oxygen *chip = container_of(work, struct oxygen,
sound/pci/oxygen/oxygen_lib.c
176
static void oxygen_gpio_changed(struct work_struct *work)
sound/pci/oxygen/oxygen_lib.c
178
struct oxygen *chip = container_of(work, struct oxygen, gpio_work);
sound/pci/rme9652/hdsp.c
3823
static void hdsp_midi_work(struct work_struct *work)
sound/pci/rme9652/hdsp.c
3825
struct hdsp *hdsp = container_of(work, struct hdsp, midi_work);
sound/pci/rme9652/hdspm.c
2153
static void hdspm_midi_work(struct work_struct *work)
sound/pci/rme9652/hdspm.c
2155
struct hdspm *hdspm = container_of(work, struct hdspm, midi_work);
sound/ppc/tumbler.c
968
static void device_change_handler(struct work_struct *work)
sound/sh/aica.c
253
static void run_spu_dma(struct work_struct *work)
sound/sh/aica.c
259
container_of(work, struct snd_card_aica, spu_dma_work);
sound/soc/codecs/ak4613.c
680
static void ak4613_dummy_write(struct work_struct *work)
sound/soc/codecs/ak4613.c
682
struct ak4613_priv *priv = container_of(work,
sound/soc/codecs/arizona-jack.c
717
static void arizona_micd_timeout_work(struct work_struct *work)
sound/soc/codecs/arizona-jack.c
719
struct arizona_priv *info = container_of(work,
sound/soc/codecs/arizona-jack.c
721
micd_timeout_work.work);
sound/soc/codecs/arizona-jack.c
928
static void arizona_micd_detect(struct work_struct *work)
sound/soc/codecs/arizona-jack.c
930
struct arizona_priv *info = container_of(work,
sound/soc/codecs/arizona-jack.c
932
micd_detect_work.work);
sound/soc/codecs/arizona-jack.c
974
arizona_micd_detect(&info->micd_detect_work.work);
sound/soc/codecs/arizona-jack.c
979
static void arizona_hpdet_work(struct work_struct *work)
sound/soc/codecs/arizona-jack.c
981
struct arizona_priv *info = container_of(work,
sound/soc/codecs/arizona-jack.c
983
hpdet_work.work);
sound/soc/codecs/aw88081.c
761
static void aw88081_startup_work(struct work_struct *work)
sound/soc/codecs/aw88081.c
764
container_of(work, struct aw88081, start_work.work);
sound/soc/codecs/aw88166.c
1171
static void aw88166_startup_work(struct work_struct *work)
sound/soc/codecs/aw88166.c
1174
container_of(work, struct aw88166, start_work.work);
sound/soc/codecs/aw88261.c
689
static void aw88261_startup_work(struct work_struct *work)
sound/soc/codecs/aw88261.c
692
container_of(work, struct aw88261, start_work.work);
sound/soc/codecs/aw88395/aw88395.c
49
static void aw88395_startup_work(struct work_struct *work)
sound/soc/codecs/aw88395/aw88395.c
52
container_of(work, struct aw88395, start_work.work);
sound/soc/codecs/aw88399.c
1138
static void aw88399_startup_work(struct work_struct *work)
sound/soc/codecs/aw88399.c
1141
container_of(work, struct aw88399, start_work.work);
sound/soc/codecs/cros_ec_codec.c
608
container_of(w, struct cros_ec_codec_priv, wov_copy_work.work);
sound/soc/codecs/cs35l56-sdw.c
322
static void cs35l56_sdw_irq_work(struct work_struct *work)
sound/soc/codecs/cs35l56-sdw.c
324
struct cs35l56_private *cs35l56 = container_of(work,
sound/soc/codecs/cs35l56.c
858
static void cs35l56_dsp_work(struct work_struct *work)
sound/soc/codecs/cs35l56.c
860
struct cs35l56_private *cs35l56 = container_of(work,
sound/soc/codecs/cs4234.c
117
static void cs4234_vq_ramp_done(struct work_struct *work)
sound/soc/codecs/cs4234.c
119
struct delayed_work *dw = to_delayed_work(work);
sound/soc/codecs/cs42l43-jack.c
467
void cs42l43_bias_sense_timeout(struct work_struct *work)
sound/soc/codecs/cs42l43-jack.c
469
struct cs42l43_codec *priv = container_of(work, struct cs42l43_codec,
sound/soc/codecs/cs42l43-jack.c
470
bias_sense_timeout.work);
sound/soc/codecs/cs42l43-jack.c
734
void cs42l43_tip_sense_work(struct work_struct *work)
sound/soc/codecs/cs42l43-jack.c
736
struct cs42l43_codec *priv = container_of(work, struct cs42l43_codec,
sound/soc/codecs/cs42l43-jack.c
737
tip_sense_work.work);
sound/soc/codecs/cs42l43.c
153
static void cs42l43_hp_ilimit_clear_work(struct work_struct *work)
sound/soc/codecs/cs42l43.c
155
struct cs42l43_codec *priv = container_of(work, struct cs42l43_codec,
sound/soc/codecs/cs42l43.c
156
hp_ilimit_clear_work.work);
sound/soc/codecs/cs42l43.h
134
void cs42l43_bias_sense_timeout(struct work_struct *work);
sound/soc/codecs/cs42l43.h
136
void cs42l43_tip_sense_work(struct work_struct *work);
sound/soc/codecs/cs42l52.c
918
static void cs42l52_beep_work(struct work_struct *work)
sound/soc/codecs/cs42l52.c
921
container_of(work, struct cs42l52_private, beep_work);
sound/soc/codecs/cs42l56.c
995
static void cs42l56_beep_work(struct work_struct *work)
sound/soc/codecs/cs42l56.c
998
container_of(work, struct cs42l56_private, beep_work);
sound/soc/codecs/cs43130.c
2127
cs43130 = container_of(wk, struct cs43130_private, work);
sound/soc/codecs/cs43130.c
2331
!work_busy(&cs43130->work)) {
sound/soc/codecs/cs43130.c
2333
queue_work(cs43130->wq, &cs43130->work);
sound/soc/codecs/cs43130.c
2379
INIT_WORK(&cs43130->work, cs43130_imp_meas);
sound/soc/codecs/cs43130.c
2660
cancel_work_sync(&cs43130->work);
sound/soc/codecs/cs43130.h
537
struct work_struct work;
sound/soc/codecs/da7219-aad.c
107
static void da7219_aad_hptest_work(struct work_struct *work)
sound/soc/codecs/da7219-aad.c
110
container_of(work, struct da7219_aad_priv, hptest_work);
sound/soc/codecs/da7219-aad.c
336
static void da7219_aad_jack_det_work(struct work_struct *work)
sound/soc/codecs/da7219-aad.c
339
container_of(work, struct da7219_aad_priv, jack_det_work.work);
sound/soc/codecs/da7219-aad.c
51
static void da7219_aad_btn_det_work(struct work_struct *work)
sound/soc/codecs/da7219-aad.c
54
container_of(work, struct da7219_aad_priv, btn_det_work);
sound/soc/codecs/es8326.c
781
static void es8326_jack_button_handler(struct work_struct *work)
sound/soc/codecs/es8326.c
784
container_of(work, struct es8326_priv, button_press_work.work);
sound/soc/codecs/es8326.c
851
static void es8326_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/es8326.c
854
container_of(work, struct es8326_priv, jack_detect_work.work);
sound/soc/codecs/framer-codec.c
271
static void framer_carrier_work(struct work_struct *work)
sound/soc/codecs/framer-codec.c
273
struct framer_codec *framer = container_of(work, struct framer_codec, carrier_work);
sound/soc/codecs/fs210x.c
848
static void fs210x_start_work(struct work_struct *work)
sound/soc/codecs/fs210x.c
853
fs210x = container_of(work, struct fs210x_priv, start_work.work);
sound/soc/codecs/fs210x.c
864
static void fs210x_fault_check_work(struct work_struct *work)
sound/soc/codecs/fs210x.c
870
fs210x = container_of(work, struct fs210x_priv, fault_check_work.work);
sound/soc/codecs/hdac_hdmi.c
198
static void hdac_hdmi_jack_dapm_work(struct work_struct *work)
sound/soc/codecs/hdac_hdmi.c
202
port = container_of(work, struct hdac_hdmi_port, dapm_work);
sound/soc/codecs/lpass-tx-macro.c
672
static void tx_macro_tx_hpf_corner_freq_callback(struct work_struct *work)
sound/soc/codecs/lpass-tx-macro.c
681
hpf_delayed_work = to_delayed_work(work);
sound/soc/codecs/lpass-tx-macro.c
716
static void tx_macro_mute_update_callback(struct work_struct *work)
sound/soc/codecs/lpass-tx-macro.c
724
delayed_work = to_delayed_work(work);
sound/soc/codecs/max98090.c
2103
static void max98090_pll_det_enable_work(struct work_struct *work)
sound/soc/codecs/max98090.c
2106
container_of(work, struct max98090_priv,
sound/soc/codecs/max98090.c
2107
pll_det_enable_work.work);
sound/soc/codecs/max98090.c
2136
static void max98090_pll_det_disable_work(struct work_struct *work)
sound/soc/codecs/max98090.c
2139
container_of(work, struct max98090_priv, pll_det_disable_work);
sound/soc/codecs/max98090.c
2185
static void max98090_jack_work(struct work_struct *work)
sound/soc/codecs/max98090.c
2187
struct max98090_priv *max98090 = container_of(work,
sound/soc/codecs/max98090.c
2189
jack_work.work);
sound/soc/codecs/mt6359-accdet.c
396
static void mt6359_accdet_work(struct work_struct *work)
sound/soc/codecs/mt6359-accdet.c
399
container_of(work, struct mt6359_accdet, accdet_work);
sound/soc/codecs/mt6359-accdet.c
411
static void mt6359_accdet_jd_work(struct work_struct *work)
sound/soc/codecs/mt6359-accdet.c
417
container_of(work, struct mt6359_accdet, jd_work);
sound/soc/codecs/nau8821.c
1109
static void nau8821_jdet_work(struct work_struct *work)
sound/soc/codecs/nau8821.c
1112
container_of(work, struct nau8821, jdet_work.work);
sound/soc/codecs/nau8824.c
883
static void nau8824_jdet_work(struct work_struct *work)
sound/soc/codecs/nau8824.c
886
work, struct nau8824, jdet_work);
sound/soc/codecs/nau8825.c
746
static void nau8825_xtalk_work(struct work_struct *work)
sound/soc/codecs/nau8825.c
749
work, struct nau8825, xtalk_work);
sound/soc/codecs/pcm1789.c
131
static void pcm1789_work_queue(struct work_struct *work)
sound/soc/codecs/pcm1789.c
133
struct pcm1789_private *priv = container_of(work,
sound/soc/codecs/pcm1789.c
135
work);
sound/soc/codecs/pcm1789.c
154
schedule_work(&priv->work);
sound/soc/codecs/pcm1789.c
254
INIT_WORK(&pcm1789->work, pcm1789_work_queue);
sound/soc/codecs/pcm1789.c
265
flush_work(&priv->work);
sound/soc/codecs/pcm1789.c
31
struct work_struct work;
sound/soc/codecs/rt1011.c
2368
static void rt1011_calibration_work(struct work_struct *work)
sound/soc/codecs/rt1011.c
2371
container_of(work, struct rt1011_priv, cali_work);
sound/soc/codecs/rt1318.c
1277
static void rt1318_calibration_work(struct work_struct *work)
sound/soc/codecs/rt1318.c
1280
container_of(work, struct rt1318_priv, cali_work);
sound/soc/codecs/rt1320-sdw.c
1773
static void rt1320_load_dspfw_work(struct work_struct *work)
sound/soc/codecs/rt1320-sdw.c
1776
container_of(work, struct rt1320_sdw_priv, load_dspfw_work);
sound/soc/codecs/rt274.c
377
static void rt274_jack_detect_work(struct work_struct *work)
sound/soc/codecs/rt274.c
380
container_of(work, struct rt274_priv, jack_detect_work.work);
sound/soc/codecs/rt286.c
294
static void rt286_jack_detect_work(struct work_struct *work)
sound/soc/codecs/rt286.c
297
container_of(work, struct rt286_priv, jack_detect_work.work);
sound/soc/codecs/rt298.c
308
static void rt298_jack_detect_work(struct work_struct *work)
sound/soc/codecs/rt298.c
311
container_of(work, struct rt298_priv, jack_detect_work.work);
sound/soc/codecs/rt5514-spi.c
73
static void rt5514_spi_copy_work(struct work_struct *work)
sound/soc/codecs/rt5514-spi.c
76
container_of(work, struct rt5514_dsp, copy_work.work);
sound/soc/codecs/rt5640.c
2205
static void rt5640_button_press_work(struct work_struct *work)
sound/soc/codecs/rt5640.c
2208
container_of(work, struct rt5640_priv, bp_work.work);
sound/soc/codecs/rt5640.c
2309
static void rt5640_jack_work(struct work_struct *work)
sound/soc/codecs/rt5640.c
2312
container_of(work, struct rt5640_priv, jack_work.work);
sound/soc/codecs/rt5645.c
3319
static void rt5645_jack_detect_work(struct work_struct *work)
sound/soc/codecs/rt5645.c
3322
container_of(work, struct rt5645_priv, jack_detect_work.work);
sound/soc/codecs/rt5645.c
3423
static void rt5645_rcclock_work(struct work_struct *work)
sound/soc/codecs/rt5645.c
3426
container_of(work, struct rt5645_priv, rcclock_work.work);
sound/soc/codecs/rt5645.c
4342
rt5645_jack_detect_work(&rt5645->jack_detect_work.work);
sound/soc/codecs/rt5651.c
1675
static void rt5651_button_press_work(struct work_struct *work)
sound/soc/codecs/rt5651.c
1678
container_of(work, struct rt5651_priv, bp_work.work);
sound/soc/codecs/rt5651.c
1783
static void rt5651_jack_detect_work(struct work_struct *work)
sound/soc/codecs/rt5651.c
1786
container_of(work, struct rt5651_priv, jack_detect_work);
sound/soc/codecs/rt5659.c
1352
static void rt5659_jack_detect_work(struct work_struct *work)
sound/soc/codecs/rt5659.c
1355
container_of(work, struct rt5659_priv, jack_detect_work.work);
sound/soc/codecs/rt5659.c
1423
static void rt5659_jack_detect_intel_hd_header(struct work_struct *work)
sound/soc/codecs/rt5659.c
1426
container_of(work, struct rt5659_priv, jack_detect_work.work);
sound/soc/codecs/rt5663.c
1900
static void rt5663_jack_detect_work(struct work_struct *work)
sound/soc/codecs/rt5663.c
1903
container_of(work, struct rt5663_priv, jack_detect_work.work);
sound/soc/codecs/rt5663.c
2001
static void rt5663_jd_unplug_work(struct work_struct *work)
sound/soc/codecs/rt5663.c
2004
container_of(work, struct rt5663_priv, jd_unplug_work.work);
sound/soc/codecs/rt5665.c
1143
static void rt5665_jd_check_handler(struct work_struct *work)
sound/soc/codecs/rt5665.c
1145
struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
sound/soc/codecs/rt5665.c
1146
jd_check_work.work);
sound/soc/codecs/rt5665.c
1190
static void rt5665_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt5665.c
1193
container_of(work, struct rt5665_priv, jack_detect_work.work);
sound/soc/codecs/rt5665.c
4643
static void rt5665_calibrate_handler(struct work_struct *work)
sound/soc/codecs/rt5665.c
4645
struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
sound/soc/codecs/rt5665.c
4646
calibrate_work.work);
sound/soc/codecs/rt5668.c
909
static void rt5668_jd_check_handler(struct work_struct *work)
sound/soc/codecs/rt5668.c
911
struct rt5668_priv *rt5668 = container_of(work, struct rt5668_priv,
sound/soc/codecs/rt5668.c
912
jd_check_work.work);
sound/soc/codecs/rt5668.c
975
static void rt5668_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt5668.c
978
container_of(work, struct rt5668_priv, jack_detect_work.work);
sound/soc/codecs/rt5677-spi.c
303
static void rt5677_spi_copy_work(struct work_struct *work)
sound/soc/codecs/rt5677-spi.c
306
container_of(work, struct rt5677_dsp, copy_work.work);
sound/soc/codecs/rt5677.c
5373
static void rt5677_resume_irq_check(struct work_struct *work)
sound/soc/codecs/rt5677.c
5377
container_of(work, struct rt5677_priv, resume_irq_check.work);
sound/soc/codecs/rt5677.c
886
static void rt5677_dsp_work(struct work_struct *work)
sound/soc/codecs/rt5677.c
889
container_of(work, struct rt5677_priv, dsp_work.work);
sound/soc/codecs/rt5682-i2c.c
55
static void rt5682_jd_check_handler(struct work_struct *work)
sound/soc/codecs/rt5682-i2c.c
57
struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv,
sound/soc/codecs/rt5682-i2c.c
58
jd_check_work.work);
sound/soc/codecs/rt5682.c
1094
void rt5682_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt5682.c
1097
container_of(work, struct rt5682_priv, jack_detect_work.work);
sound/soc/codecs/rt5682.h
1479
void rt5682_jack_detect_handler(struct work_struct *work);
sound/soc/codecs/rt5682s.c
829
static void rt5682s_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt5682s.c
832
container_of(work, struct rt5682s_priv, jack_detect_work.work);
sound/soc/codecs/rt5682s.c
919
static void rt5682s_jd_check_handler(struct work_struct *work)
sound/soc/codecs/rt5682s.c
922
container_of(work, struct rt5682s_priv, jd_check_work.work);
sound/soc/codecs/rt700.c
156
static void rt700_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt700.c
159
container_of(work, struct rt700_priv, jack_detect_work.work);
sound/soc/codecs/rt700.c
219
static void rt700_btn_check_handler(struct work_struct *work)
sound/soc/codecs/rt700.c
221
struct rt700_priv *rt700 = container_of(work, struct rt700_priv,
sound/soc/codecs/rt700.c
222
jack_btn_check_work.work);
sound/soc/codecs/rt711-sdca.c
309
static void rt711_sdca_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt711-sdca.c
312
container_of(work, struct rt711_sdca_priv, jack_detect_work.work);
sound/soc/codecs/rt711-sdca.c
360
static void rt711_sdca_btn_check_handler(struct work_struct *work)
sound/soc/codecs/rt711-sdca.c
363
container_of(work, struct rt711_sdca_priv, jack_btn_check_work.work);
sound/soc/codecs/rt711.c
1163
static void rt711_calibration_work(struct work_struct *work)
sound/soc/codecs/rt711.c
1166
container_of(work, struct rt711_priv, calibration_work);
sound/soc/codecs/rt711.c
236
static void rt711_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt711.c
239
container_of(work, struct rt711_priv, jack_detect_work.work);
sound/soc/codecs/rt711.c
306
static void rt711_btn_check_handler(struct work_struct *work)
sound/soc/codecs/rt711.c
308
struct rt711_priv *rt711 = container_of(work, struct rt711_priv,
sound/soc/codecs/rt711.c
309
jack_btn_check_work.work);
sound/soc/codecs/rt712-sdca.c
261
static void rt712_sdca_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt712-sdca.c
264
container_of(work, struct rt712_sdca_priv, jack_detect_work.work);
sound/soc/codecs/rt712-sdca.c
312
static void rt712_sdca_btn_check_handler(struct work_struct *work)
sound/soc/codecs/rt712-sdca.c
315
container_of(work, struct rt712_sdca_priv, jack_btn_check_work.work);
sound/soc/codecs/rt721-sdca.c
29
static void rt721_sdca_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt721-sdca.c
32
container_of(work, struct rt721_sdca_priv, jack_detect_work.work);
sound/soc/codecs/rt721-sdca.c
83
static void rt721_sdca_btn_check_handler(struct work_struct *work)
sound/soc/codecs/rt721-sdca.c
86
container_of(work, struct rt721_sdca_priv, jack_btn_check_work.work);
sound/soc/codecs/rt722-sdca.c
183
static void rt722_sdca_jack_detect_handler(struct work_struct *work)
sound/soc/codecs/rt722-sdca.c
186
container_of(work, struct rt722_sdca_priv, jack_detect_work.work);
sound/soc/codecs/rt722-sdca.c
234
static void rt722_sdca_btn_check_handler(struct work_struct *work)
sound/soc/codecs/rt722-sdca.c
237
container_of(work, struct rt722_sdca_priv, jack_btn_check_work.work);
sound/soc/codecs/sma1303.c
1450
static void sma1303_check_fault_worker(struct work_struct *work)
sound/soc/codecs/sma1303.c
1453
container_of(work, struct sma1303_priv, check_fault_work.work);
sound/soc/codecs/sma1307.c
1590
static void sma1307_check_fault_worker(struct work_struct *work)
sound/soc/codecs/sma1307.c
1593
container_of(work, struct sma1307_priv, check_fault_work.work);
sound/soc/codecs/sta32x.c
388
static void sta32x_watchdog(struct work_struct *work)
sound/soc/codecs/sta32x.c
390
struct sta32x_priv *sta32x = container_of(work, struct sta32x_priv,
sound/soc/codecs/sta32x.c
391
watchdog_work.work);
sound/soc/codecs/tas5720.c
241
static void tas5720_fault_check_work(struct work_struct *work)
sound/soc/codecs/tas5720.c
243
struct tas5720_data *tas5720 = container_of(work, struct tas5720_data,
sound/soc/codecs/tas5720.c
244
fault_check_work.work);
sound/soc/codecs/tas5805m.c
170
struct work_struct work;
sound/soc/codecs/tas5805m.c
312
schedule_work(&tas5805m->work);
sound/soc/codecs/tas5805m.c
327
static void do_work(struct work_struct *work)
sound/soc/codecs/tas5805m.c
330
container_of(work, struct tas5805m_priv, work);
sound/soc/codecs/tas5805m.c
363
cancel_work_sync(&tas5805m->work);
sound/soc/codecs/tas5805m.c
552
INIT_WORK(&tas5805m->work, do_work);
sound/soc/codecs/tas5805m.c
575
cancel_work_sync(&tas5805m->work);
sound/soc/codecs/tas6424.c
403
static void tas6424_fault_check_work(struct work_struct *work)
sound/soc/codecs/tas6424.c
405
struct tas6424_data *tas6424 = container_of(work, struct tas6424_data,
sound/soc/codecs/tas6424.c
406
fault_check_work.work);
sound/soc/codecs/tlv320dac33.c
1130
schedule_work(&dac33->work);
sound/soc/codecs/tlv320dac33.c
1138
schedule_work(&dac33->work);
sound/soc/codecs/tlv320dac33.c
1409
INIT_WORK(&dac33->work, dac33_work);
sound/soc/codecs/tlv320dac33.c
1428
flush_work(&dac33->work);
sound/soc/codecs/tlv320dac33.c
728
static void dac33_work(struct work_struct *work)
sound/soc/codecs/tlv320dac33.c
734
dac33 = container_of(work, struct tlv320dac33_priv, work);
sound/soc/codecs/tlv320dac33.c
774
schedule_work(&dac33->work);
sound/soc/codecs/tlv320dac33.c
78
struct work_struct work;
sound/soc/codecs/twl6040.c
1116
INIT_DELAYED_WORK(&priv->hs_jack.work, twl6040_accessory_work);
sound/soc/codecs/twl6040.c
301
static void twl6040_accessory_work(struct work_struct *work)
sound/soc/codecs/twl6040.c
303
struct twl6040_data *priv = container_of(work,
sound/soc/codecs/twl6040.c
304
struct twl6040_data, hs_jack.work.work);
sound/soc/codecs/twl6040.c
318
&priv->hs_jack.work, msecs_to_jiffies(200));
sound/soc/codecs/twl6040.c
47
struct delayed_work work;
sound/soc/codecs/uda1380.c
176
static void uda1380_flush_work(struct work_struct *work)
sound/soc/codecs/uda1380.c
178
struct uda1380_priv *uda1380 = container_of(work, struct uda1380_priv, work);
sound/soc/codecs/uda1380.c
36
struct work_struct work;
sound/soc/codecs/uda1380.c
518
schedule_work(&uda1380->work);
sound/soc/codecs/uda1380.c
524
schedule_work(&uda1380->work);
sound/soc/codecs/uda1380.c
709
INIT_WORK(&uda1380->work, uda1380_flush_work);
sound/soc/codecs/wcd-mbhc-v2.c
1147
static void wcd_correct_swch_plug(struct work_struct *work)
sound/soc/codecs/wcd-mbhc-v2.c
1159
mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
sound/soc/codecs/wcd-mbhc-v2.c
419
struct work_struct *work)
sound/soc/codecs/wcd-mbhc-v2.c
423
cancel_work_sync(work);
sound/soc/codecs/wcd-mbhc-v2.c
488
struct work_struct *work)
sound/soc/codecs/wcd-mbhc-v2.c
492
schedule_work(work);
sound/soc/codecs/wcd-mbhc-v2.c
513
static void mbhc_plug_detect_fn(struct work_struct *work)
sound/soc/codecs/wcd-mbhc-v2.c
515
struct wcd_mbhc *mbhc = container_of(work, struct wcd_mbhc, mbhc_plug_detect_work);
sound/soc/codecs/wcd-mbhc-v2.c
660
static void wcd_btn_long_press_fn(struct work_struct *work)
sound/soc/codecs/wcd-mbhc-v2.c
662
struct delayed_work *dwork = to_delayed_work(work);
sound/soc/codecs/wm8350.c
1257
static void wm8350_hpl_work(struct work_struct *work)
sound/soc/codecs/wm8350.c
1260
container_of(work, struct wm8350_data, hpl.work.work);
sound/soc/codecs/wm8350.c
1265
static void wm8350_hpr_work(struct work_struct *work)
sound/soc/codecs/wm8350.c
1268
container_of(work, struct wm8350_data, hpr.work.work);
sound/soc/codecs/wm8350.c
1286
&priv->hpl.work, msecs_to_jiffies(200));
sound/soc/codecs/wm8350.c
1304
&priv->hpr.work, msecs_to_jiffies(200));
sound/soc/codecs/wm8350.c
1493
INIT_DELAYED_WORK(&priv->hpl.work, wm8350_hpl_work);
sound/soc/codecs/wm8350.c
1494
INIT_DELAYED_WORK(&priv->hpr.work, wm8350_hpr_work);
sound/soc/codecs/wm8350.c
1593
cancel_delayed_work_sync(&priv->hpl.work);
sound/soc/codecs/wm8350.c
1594
cancel_delayed_work_sync(&priv->hpr.work);
sound/soc/codecs/wm8350.c
207
static void wm8350_pga_work(struct work_struct *work)
sound/soc/codecs/wm8350.c
210
container_of(work, struct wm8350_data, pga_work.work);
sound/soc/codecs/wm8350.c
54
struct delayed_work work;
sound/soc/codecs/wm8753.c
1321
static void wm8753_charge_work(struct work_struct *work)
sound/soc/codecs/wm8753.c
1324
container_of(work, struct wm8753_priv, charge_work.work);
sound/soc/codecs/wm8962.c
3093
static void wm8962_mic_work(struct work_struct *work)
sound/soc/codecs/wm8962.c
3095
struct wm8962_priv *wm8962 = container_of(work,
sound/soc/codecs/wm8962.c
3097
mic_work.work);
sound/soc/codecs/wm8962.c
3278
static void wm8962_beep_work(struct work_struct *work)
sound/soc/codecs/wm8962.c
3281
container_of(work, struct wm8962_priv, beep_work);
sound/soc/codecs/wm8971.c
551
static void wm8971_charge_work(struct work_struct *work)
sound/soc/codecs/wm8971.c
554
container_of(work, struct wm8971_priv, charge_work.work);
sound/soc/codecs/wm8994.c
3574
static void wm8994_mic_work(struct work_struct *work)
sound/soc/codecs/wm8994.c
3576
struct wm8994_priv *priv = container_of(work,
sound/soc/codecs/wm8994.c
3578
mic_work.work);
sound/soc/codecs/wm8994.c
3700
static void wm8958_open_circuit_work(struct work_struct *work)
sound/soc/codecs/wm8994.c
3702
struct wm8994_priv *wm8994 = container_of(work,
sound/soc/codecs/wm8994.c
3704
open_circuit_work.work);
sound/soc/codecs/wm8994.c
3772
static void wm1811_mic_work(struct work_struct *work)
sound/soc/codecs/wm8994.c
3774
struct wm8994_priv *wm8994 = container_of(work, struct wm8994_priv,
sound/soc/codecs/wm8994.c
3775
mic_work.work);
sound/soc/codecs/wm8994.c
3901
static void wm1811_jackdet_bootstrap(struct work_struct *work)
sound/soc/codecs/wm8994.c
3903
struct wm8994_priv *wm8994 = container_of(work,
sound/soc/codecs/wm8994.c
3905
jackdet_bootstrap.work);
sound/soc/codecs/wm8994.c
4022
static void wm8958_mic_work(struct work_struct *work)
sound/soc/codecs/wm8994.c
4024
struct wm8994_priv *wm8994 = container_of(work,
sound/soc/codecs/wm8994.c
4026
mic_complete_work.work);
sound/soc/codecs/wm_adsp.c
1028
static void wm_adsp_boot_work(struct work_struct *work)
sound/soc/codecs/wm_adsp.c
1030
struct wm_adsp *dsp = container_of(work,
sound/soc/codecs/wm_adsp.c
316
struct work_struct work;
sound/soc/codecs/wm_adsp.c
534
static void wm_adsp_ctl_work(struct work_struct *work)
sound/soc/codecs/wm_adsp.c
536
struct wm_coeff_ctl *ctl = container_of(work,
sound/soc/codecs/wm_adsp.c
538
work);
sound/soc/codecs/wm_adsp.c
643
INIT_WORK(&ctl->work, wm_adsp_ctl_work);
sound/soc/codecs/wm_adsp.c
644
schedule_work(&ctl->work);
sound/soc/codecs/wm_adsp.c
669
cancel_work_sync(&ctl->work);
sound/soc/fsl/fsl_esai.c
1092
INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);
sound/soc/fsl/fsl_esai.c
1112
cancel_work_sync(&esai_priv->work);
sound/soc/fsl/fsl_esai.c
115
schedule_work(&esai_priv->work);
sound/soc/fsl/fsl_esai.c
68
struct work_struct work;
sound/soc/fsl/fsl_esai.c
708
static void fsl_esai_hw_reset(struct work_struct *work)
sound/soc/fsl/fsl_esai.c
710
struct fsl_esai *esai_priv = container_of(work, struct fsl_esai, work);
sound/soc/fsl/fsl_xcvr.c
1360
static void reset_rx_work(struct work_struct *work)
sound/soc/fsl/fsl_xcvr.c
1362
struct fsl_xcvr *xcvr = container_of(work, struct fsl_xcvr, work_rst);
sound/soc/fsl/imx-pcm-rpmsg.c
126
queue_work(info->rpmsg_wq, &info->work_list[index].work);
sound/soc/fsl/imx-pcm-rpmsg.c
637
static void imx_rpmsg_pcm_work(struct work_struct *work)
sound/soc/fsl/imx-pcm-rpmsg.c
645
work_of_rpmsg = container_of(work, struct work_of_rpmsg, work);
sound/soc/fsl/imx-pcm-rpmsg.c
719
INIT_WORK(&info->work_list[i].work, imx_rpmsg_pcm_work);
sound/soc/fsl/imx-pcm-rpmsg.h
445
struct work_struct work;
sound/soc/generic/test-component.c
420
static void test_component_dwork(struct work_struct *work)
sound/soc/generic/test-component.c
422
struct test_priv *priv = container_of(work, struct test_priv, dwork.work);
sound/soc/intel/atom/sst/sst.c
193
void sst_process_pending_msg(struct work_struct *work)
sound/soc/intel/atom/sst/sst.c
195
struct intel_sst_drv *ctx = container_of(work,
sound/soc/intel/atom/sst/sst.h
490
void sst_process_pending_msg(struct work_struct *work);
sound/soc/intel/avs/cldma.c
52
static void cldma_memcpy_work(struct work_struct *work);
sound/soc/intel/avs/cldma.c
88
static void cldma_memcpy_work(struct work_struct *work)
sound/soc/intel/avs/cldma.c
90
struct hda_cldma *cl = container_of(work, struct hda_cldma, memcpy_work.work);
sound/soc/intel/avs/core.c
202
static void avs_hda_probe_work(struct work_struct *work)
sound/soc/intel/avs/core.c
204
struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
sound/soc/intel/avs/ipc.c
151
static void avs_dsp_recovery_work(struct work_struct *work)
sound/soc/intel/avs/ipc.c
153
struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work);
sound/soc/intel/avs/ipc.c
173
if (current_work() != &ipc->d0ix_work.work)
sound/soc/intel/avs/ipc.c
54
static void avs_dsp_d0ix_work(struct work_struct *work)
sound/soc/intel/avs/ipc.c
56
struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work);
sound/soc/intel/avs/pcm.c
67
static void avs_period_elapsed_work(struct work_struct *work)
sound/soc/intel/avs/pcm.c
69
struct avs_dma_data *data = container_of(work, struct avs_dma_data, period_elapsed_work);
sound/soc/intel/boards/sof_es8336.c
115
static void pcm_pop_work_events(struct work_struct *work)
sound/soc/intel/boards/sof_es8336.c
118
container_of(work, struct sof_es8336_private, pcm_pop_work.work);
sound/soc/renesas/siu.h
99
struct work_struct work;
sound/soc/renesas/siu_pcm.c
201
static void siu_io_work(struct work_struct *work)
sound/soc/renesas/siu_pcm.c
203
struct siu_stream *siu_stream = container_of(work, struct siu_stream,
sound/soc/renesas/siu_pcm.c
204
work);
sound/soc/renesas/siu_pcm.c
253
queue_work(system_highpri_wq, &siu_stream->work);
sound/soc/renesas/siu_pcm.c
520
INIT_WORK(&(*port_info)->playback.work, siu_io_work);
sound/soc/renesas/siu_pcm.c
521
INIT_WORK(&(*port_info)->capture.work, siu_io_work);
sound/soc/renesas/siu_pcm.c
534
cancel_work_sync(&port_info->capture.work);
sound/soc/renesas/siu_pcm.c
535
cancel_work_sync(&port_info->playback.work);
sound/soc/renesas/siu_pcm.c
73
queue_work(system_highpri_wq, &siu_stream->work);
sound/soc/renesas/siu_pcm.c
96
queue_work(system_highpri_wq, &siu_stream->work);
sound/soc/sdca/sdca_class.c
140
static void class_boot_work(struct work_struct *work)
sound/soc/sdca/sdca_class.c
142
struct sdca_class_drv *drv = container_of(work,
sound/soc/sdca/sdca_fdl.c
330
static void sdca_fdl_timeout_work(struct work_struct *work)
sound/soc/sdca/sdca_fdl.c
332
struct fdl_state *fdl_state = container_of(work, struct fdl_state,
sound/soc/sdca/sdca_fdl.c
333
timeout.work);
sound/soc/sdca/sdca_ump.c
249
void sdca_ump_cancel_timeout(struct delayed_work *work)
sound/soc/sdca/sdca_ump.c
251
cancel_delayed_work_sync(work);
sound/soc/sdca/sdca_ump.c
255
void sdca_ump_schedule_timeout(struct delayed_work *work, unsigned int timeout_us)
sound/soc/sdca/sdca_ump.c
260
queue_delayed_work(system_dfl_wq, work, usecs_to_jiffies(timeout_us));
sound/soc/soc-core.c
489
static void close_delayed_work(struct work_struct *work) {
sound/soc/soc-core.c
491
container_of(work, struct snd_soc_pcm_runtime,
sound/soc/soc-core.c
492
delayed_work.work);
sound/soc/soc-core.c
755
static void soc_resume_deferred(struct work_struct *work)
sound/soc/soc-core.c
758
container_of(work, struct snd_soc_card,
sound/soc/soc-jack.c
242
queue_delayed_work(system_power_efficient_wq, &gpio->work,
sound/soc/soc-jack.c
249
static void gpio_work(struct work_struct *work)
sound/soc/soc-jack.c
253
gpio = container_of(work, struct snd_soc_jack_gpio, work.work);
sound/soc/soc-jack.c
271
queue_delayed_work(system_power_efficient_wq, &gpio->work, 0);
sound/soc/soc-jack.c
287
cancel_delayed_work_sync(&gpios[i].work);
sound/soc/soc-jack.c
352
INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
sound/soc/soc-jack.c
384
schedule_delayed_work(&gpios[i].work,
sound/soc/sof/compress.c
31
static void snd_sof_compr_fragment_elapsed_work(struct work_struct *work)
sound/soc/sof/compress.c
34
container_of(work, struct snd_sof_pcm_stream,
sound/soc/sof/compress.c
40
void snd_sof_compr_init_elapsed_work(struct work_struct *work)
sound/soc/sof/compress.c
42
INIT_WORK(work, snd_sof_compr_fragment_elapsed_work);
sound/soc/sof/core.c
596
static void sof_probe_work(struct work_struct *work)
sound/soc/sof/core.c
599
container_of(work, struct snd_sof_dev, probe_work);
sound/soc/sof/intel/hda-dsp.c
1028
cancel_work_sync(&hda->mic_privacy.work);
sound/soc/sof/intel/hda-dsp.c
1168
void hda_dsp_d0i3_work(struct work_struct *work)
sound/soc/sof/intel/hda-dsp.c
1170
struct sof_intel_hda_dev *hdev = container_of(work,
sound/soc/sof/intel/hda-dsp.c
1172
d0i3_work.work);
sound/soc/sof/intel/hda-dsp.c
998
cancel_work_sync(&hda->mic_privacy.work);
sound/soc/sof/intel/hda.c
983
cancel_work_sync(&hda->mic_privacy.work);
sound/soc/sof/intel/hda.h
492
struct work_struct work;
sound/soc/sof/intel/hda.h
637
void hda_dsp_d0i3_work(struct work_struct *work);
sound/soc/sof/intel/ptl.c
30
static void sof_ptl_mic_privacy_work(struct work_struct *work)
sound/soc/sof/intel/ptl.c
32
struct sof_intel_hda_dev *hdev = container_of(work,
sound/soc/sof/intel/ptl.c
34
mic_privacy.work);
sound/soc/sof/intel/ptl.c
61
schedule_work(&hdev->mic_privacy.work);
sound/soc/sof/intel/ptl.c
84
INIT_WORK(&hdev->mic_privacy.work, sof_ptl_mic_privacy_work);
sound/soc/sof/pcm.c
26
static void snd_sof_pcm_period_elapsed_work(struct work_struct *work)
sound/soc/sof/pcm.c
29
container_of(work, struct snd_sof_pcm_stream,
sound/soc/sof/pcm.c
35
void snd_sof_pcm_init_elapsed_work(struct work_struct *work)
sound/soc/sof/pcm.c
37
INIT_WORK(work, snd_sof_pcm_period_elapsed_work);
sound/soc/sof/sof-audio.h
636
void snd_sof_pcm_init_elapsed_work(struct work_struct *work);
sound/soc/sof/sof-audio.h
659
void snd_sof_compr_init_elapsed_work(struct work_struct *work);
sound/soc/sof/sof-audio.h
662
static inline void snd_sof_compr_init_elapsed_work(struct work_struct *work) { }
sound/soc/sunxi/sun8i-codec.c
1348
static void sun8i_codec_jack_work(struct work_struct *work)
sound/soc/sunxi/sun8i-codec.c
1350
struct sun8i_codec *scodec = container_of(work, struct sun8i_codec,
sound/soc/sunxi/sun8i-codec.c
1351
jack_work.work);
sound/usb/line6/driver.c
722
static void line6_startup_work(struct work_struct *work)
sound/usb/line6/driver.c
725
container_of(work, struct usb_line6, startup_work.work);
sound/usb/midi.c
1205
flush_work(&port->ep->work);
sound/usb/midi.c
1223
queue_work(system_highpri_wq, &port->ep->work);
sound/usb/midi.c
129
struct work_struct work;
sound/usb/midi.c
1487
INIT_WORK(&ep->work, snd_usbmidi_out_work);
sound/usb/midi.c
1549
cancel_work_sync(&ep->out->work);
sound/usb/midi.c
326
static void snd_usbmidi_out_work(struct work_struct *work)
sound/usb/midi.c
329
container_of(work, struct snd_usb_midi_out_endpoint, work);
sound/usb/misc/ua101.c
247
static void playback_work(struct work_struct *work)
sound/usb/misc/ua101.c
249
struct ua101 *ua = container_of(work, struct ua101, playback_work);
sound/usb/mixer_scarlett2.c
1229
struct delayed_work work;
sound/usb/mixer_scarlett2.c
2692
cancel_delayed_work_sync(&private->work);
sound/usb/mixer_scarlett2.c
2740
schedule_delayed_work(&private->work, msecs_to_jiffies(2000));
sound/usb/mixer_scarlett2.c
2797
static void scarlett2_config_save_work(struct work_struct *work)
sound/usb/mixer_scarlett2.c
2800
container_of(work, struct scarlett2_data, work.work);
sound/usb/mixer_scarlett2.c
8172
cancel_delayed_work_sync(&private->work);
sound/usb/mixer_scarlett2.c
8181
if (cancel_delayed_work_sync(&private->work))
sound/usb/mixer_scarlett2.c
8280
INIT_DELAYED_WORK(&private->work, scarlett2_config_save_work);
sound/usb/usx2y/us144mkii.c
261
void tascam_stop_work_handler(struct work_struct *work)
sound/usb/usx2y/us144mkii.c
264
container_of(work, struct tascam_card, stop_work);
sound/usb/usx2y/us144mkii.h
318
void tascam_stop_work_handler(struct work_struct *work);
sound/usb/usx2y/us144mkii_capture.c
157
void tascam_capture_work_handler(struct work_struct *work)
sound/usb/usx2y/us144mkii_capture.c
160
container_of(work, struct tascam_card, capture_work);
sound/usb/usx2y/us144mkii_midi.c
14
static void tascam_midi_in_work_handler(struct work_struct *work)
sound/usb/usx2y/us144mkii_midi.c
17
container_of(work, struct tascam_card, midi_in_work);
sound/usb/usx2y/us144mkii_midi.c
218
static void tascam_midi_out_work_handler(struct work_struct *work)
sound/usb/usx2y/us144mkii_midi.c
221
container_of(work, struct tascam_card, midi_out_work);
sound/usb/usx2y/us144mkii_pcm.h
163
void tascam_capture_work_handler(struct work_struct *work);
sound/usb/usx2y/us144mkii_pcm.h
71
void tascam_stop_pcm_work_handler(struct work_struct *work);
sound/usb/usx2y/us144mkii_playback.c
447
void tascam_stop_pcm_work_handler(struct work_struct *work)
sound/usb/usx2y/us144mkii_playback.c
450
container_of(work, struct tascam_card, stop_pcm_work);
sound/virtio/virtio_pcm.c
308
static void virtsnd_pcm_period_elapsed(struct work_struct *work)
sound/virtio/virtio_pcm.c
311
container_of(work, struct virtio_pcm_substream, elapsed_period);
sound/x86/intel_hdmi_audio.c
1514
static void had_audio_wq(struct work_struct *work)
sound/x86/intel_hdmi_audio.c
1517
container_of(work, struct snd_intelhad, hdmi_audio_wq);
tools/include/uapi/linux/pkt_sched.h
444
__u64 work; /* total work done */
tools/perf/builtin-kwork.c
1007
struct kwork_work *work,
tools/perf/builtin-kwork.c
1013
work->class = class;
tools/perf/builtin-kwork.c
1014
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1017
work->id = evsel__intval_common(evsel, sample, "common_pid");
tools/perf/builtin-kwork.c
1018
work->name = NULL;
tools/perf/builtin-kwork.c
1020
work->id = evsel__intval(evsel, sample, "irq");
tools/perf/builtin-kwork.c
1021
work->name = evsel__strval(evsel, sample, "name");
tools/perf/builtin-kwork.c
1025
static void irq_work_name(struct kwork_work *work, char *buf, int len)
tools/perf/builtin-kwork.c
1027
snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
tools/perf/builtin-kwork.c
1135
struct kwork_work *work,
tools/perf/builtin-kwork.c
1143
work->class = class;
tools/perf/builtin-kwork.c
1144
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1147
work->id = evsel__intval_common(evsel, sample, "common_pid");
tools/perf/builtin-kwork.c
1148
work->name = NULL;
tools/perf/builtin-kwork.c
1151
work->id = num;
tools/perf/builtin-kwork.c
1152
work->name = evsel__softirq_name(evsel, num);
tools/perf/builtin-kwork.c
1156
static void softirq_work_name(struct kwork_work *work, char *buf, int len)
tools/perf/builtin-kwork.c
1158
snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
tools/perf/builtin-kwork.c
1235
struct kwork_work *work,
tools/perf/builtin-kwork.c
1245
work->class = class;
tools/perf/builtin-kwork.c
1246
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1247
work->id = evsel__intval(evsel, sample, "work");
tools/perf/builtin-kwork.c
1248
work->name = function_addr == 0 ? NULL :
tools/perf/builtin-kwork.c
1252
static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
tools/perf/builtin-kwork.c
1254
if (work->name != NULL)
tools/perf/builtin-kwork.c
1255
snprintf(buf, len, "(w)%s", work->name);
tools/perf/builtin-kwork.c
1257
snprintf(buf, len, "(w)0x%" PRIx64, work->id);
tools/perf/builtin-kwork.c
1303
struct kwork_work *work,
tools/perf/builtin-kwork.c
1309
work->class = class;
tools/perf/builtin-kwork.c
1310
work->cpu = sample->cpu;
tools/perf/builtin-kwork.c
1313
work->id = evsel__intval(evsel, sample, "prev_pid");
tools/perf/builtin-kwork.c
1314
work->name = strdup(evsel__strval(evsel, sample, "prev_comm"));
tools/perf/builtin-kwork.c
1316
work->id = evsel__intval(evsel, sample, "next_pid");
tools/perf/builtin-kwork.c
1317
work->name = strdup(evsel__strval(evsel, sample, "next_comm"));
tools/perf/builtin-kwork.c
1321
static void sched_work_name(struct kwork_work *work, char *buf, int len)
tools/perf/builtin-kwork.c
1323
snprintf(buf, len, "%s", work->name);
tools/perf/builtin-kwork.c
1348
static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
tools/perf/builtin-kwork.c
1360
if (work->class && work->class->work_name) {
tools/perf/builtin-kwork.c
1361
work->class->work_name(work, kwork_name,
tools/perf/builtin-kwork.c
1371
ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
tools/perf/builtin-kwork.c
1379
(double)work->total_runtime / NSEC_PER_MSEC);
tools/perf/builtin-kwork.c
1383
(double)work->total_latency /
tools/perf/builtin-kwork.c
1384
work->nr_atoms / NSEC_PER_MSEC);
tools/perf/builtin-kwork.c
1390
ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
tools/perf/builtin-kwork.c
1396
timestamp__scnprintf_usec(work->max_runtime_start,
tools/perf/builtin-kwork.c
1399
timestamp__scnprintf_usec(work->max_runtime_end,
tools/perf/builtin-kwork.c
1404
(double)work->max_runtime / NSEC_PER_MSEC,
tools/perf/builtin-kwork.c
1412
timestamp__scnprintf_usec(work->max_latency_start,
tools/perf/builtin-kwork.c
1415
timestamp__scnprintf_usec(work->max_latency_end,
tools/perf/builtin-kwork.c
1420
(double)work->max_latency / NSEC_PER_MSEC,
tools/perf/builtin-kwork.c
1639
static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
tools/perf/builtin-kwork.c
1648
ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id);
tools/perf/builtin-kwork.c
1654
ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid);
tools/perf/builtin-kwork.c
1661
(double)work->cpu_usage / 100);
tools/perf/builtin-kwork.c
1668
(double)work->total_runtime / NSEC_PER_MSEC);
tools/perf/builtin-kwork.c
1675
work->is_kthread ? "[" : "",
tools/perf/builtin-kwork.c
1676
work->name,
tools/perf/builtin-kwork.c
1677
work->is_kthread ? "]" : "");
tools/perf/builtin-kwork.c
1679
ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name);
tools/perf/builtin-kwork.c
1839
struct kwork_work *work)
tools/perf/builtin-kwork.c
1845
count = nr_list_entry(&work->atom_list[i]);
tools/perf/builtin-kwork.c
1855
struct kwork_work *work = NULL;
tools/perf/builtin-kwork.c
1857
work = work_new(key);
tools/perf/builtin-kwork.c
1858
if (work == NULL)
tools/perf/builtin-kwork.c
1861
work_insert(&class->work_root, work, &kwork->cmp_id);
tools/perf/builtin-kwork.c
1862
return work;
tools/perf/builtin-kwork.c
1907
struct kwork_work *work;
tools/perf/builtin-kwork.c
1924
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
1925
process_skipped_events(kwork, work);
tools/perf/builtin-kwork.c
1927
if (work->nr_atoms != 0) {
tools/perf/builtin-kwork.c
1928
report_print_work(kwork, work);
tools/perf/builtin-kwork.c
1930
kwork->all_runtime += work->total_runtime;
tools/perf/builtin-kwork.c
1931
kwork->all_count += work->nr_atoms;
tools/perf/builtin-kwork.c
2000
struct kwork_work *work;
tools/perf/builtin-kwork.c
2010
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
2011
BUG_ON(work->cpu >= MAX_NR_CPUS);
tools/perf/builtin-kwork.c
2012
stat->cpus_runtime[work->cpu].total += work->total_runtime;
tools/perf/builtin-kwork.c
2013
stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
tools/perf/builtin-kwork.c
2019
struct kwork_work *work)
tools/perf/builtin-kwork.c
2023
if (work->id == 0) {
tools/perf/builtin-kwork.c
2024
stat->cpus_runtime[work->cpu].idle += work->total_runtime;
tools/perf/builtin-kwork.c
2025
stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
tools/perf/builtin-kwork.c
2031
struct kwork_work *work)
tools/perf/builtin-kwork.c
2036
stat->cpus_runtime[work->cpu].irq += work->total_runtime;
tools/perf/builtin-kwork.c
2037
stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime;
tools/perf/builtin-kwork.c
2039
stat->cpus_runtime[work->cpu].softirq += work->total_runtime;
tools/perf/builtin-kwork.c
2040
stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime;
tools/perf/builtin-kwork.c
2045
struct kwork_work *work)
tools/perf/builtin-kwork.c
2058
work->id, work->cpu);
tools/perf/builtin-kwork.c
2062
if (work->total_runtime > data->total_runtime) {
tools/perf/builtin-kwork.c
2063
work->total_runtime -= data->total_runtime;
tools/perf/builtin-kwork.c
2072
struct kwork_work *work;
tools/perf/builtin-kwork.c
2082
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
2084
if (work->total_runtime == 0)
tools/perf/builtin-kwork.c
2087
__set_bit(work->cpu, stat->all_cpus_bitmap);
tools/perf/builtin-kwork.c
2089
top_subtract_irq_runtime(kwork, work);
tools/perf/builtin-kwork.c
2091
work->cpu_usage = work->total_runtime * 10000 /
tools/perf/builtin-kwork.c
2092
stat->cpus_runtime[work->cpu].total;
tools/perf/builtin-kwork.c
2094
top_calc_idle_time(kwork, work);
tools/perf/builtin-kwork.c
2101
struct kwork_work *work)
tools/perf/builtin-kwork.c
2105
if (work->id != 0) {
tools/perf/builtin-kwork.c
2106
stat->cpus_runtime[work->cpu].load += work->total_runtime;
tools/perf/builtin-kwork.c
2107
stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
tools/perf/builtin-kwork.c
2152
struct kwork_work *work;
tools/perf/builtin-kwork.c
2161
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
2162
process_skipped_events(kwork, work);
tools/perf/builtin-kwork.c
2164
if (work->total_runtime == 0)
tools/perf/builtin-kwork.c
2167
top_print_work(kwork, work);
tools/perf/builtin-kwork.c
315
struct kwork_work *work;
tools/perf/builtin-kwork.c
319
work = container_of(node, struct kwork_work, node);
tools/perf/builtin-kwork.c
320
cmp = work_cmp(sort_list, key, work);
tools/perf/builtin-kwork.c
326
if (work->name == NULL)
tools/perf/builtin-kwork.c
327
work->name = key->name;
tools/perf/builtin-kwork.c
328
return work;
tools/perf/builtin-kwork.c
362
struct kwork_work *work = zalloc(sizeof(*work));
tools/perf/builtin-kwork.c
364
if (work == NULL) {
tools/perf/builtin-kwork.c
370
INIT_LIST_HEAD(&work->atom_list[i]);
tools/perf/builtin-kwork.c
372
work->id = key->id;
tools/perf/builtin-kwork.c
373
work->cpu = key->cpu;
tools/perf/builtin-kwork.c
374
work->name = key->name;
tools/perf/builtin-kwork.c
375
work->class = key->class;
tools/perf/builtin-kwork.c
376
return work;
tools/perf/builtin-kwork.c
383
struct kwork_work *work = work_search(root, key, sort_list);
tools/perf/builtin-kwork.c
385
if (work != NULL)
tools/perf/builtin-kwork.c
386
return work;
tools/perf/builtin-kwork.c
388
work = work_new(key);
tools/perf/builtin-kwork.c
389
if (work)
tools/perf/builtin-kwork.c
390
work_insert(root, work, sort_list);
tools/perf/builtin-kwork.c
392
return work;
tools/perf/builtin-kwork.c
409
struct kwork_work *work)
tools/perf/builtin-kwork.c
411
if (kwork->profile_name && work->name &&
tools/perf/builtin-kwork.c
412
(strcmp(work->name, kwork->profile_name) != 0)) {
tools/perf/builtin-kwork.c
420
struct kwork_work *work,
tools/perf/builtin-kwork.c
423
int cpu = work->cpu;
tools/perf/builtin-kwork.c
439
!profile_name_match(kwork, work)) {
tools/perf/builtin-kwork.c
458
struct kwork_work *work, key;
tools/perf/builtin-kwork.c
467
work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
tools/perf/builtin-kwork.c
468
if (work == NULL) {
tools/perf/builtin-kwork.c
473
if (!profile_event_match(kwork, work, sample)) {
tools/perf/builtin-kwork.c
479
dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
tools/perf/builtin-kwork.c
488
*ret_work = work;
tools/perf/builtin-kwork.c
491
last_atom = list_last_entry_or_null(&work->atom_list[src_type],
tools/perf/builtin-kwork.c
501
list_add_tail(&atom->list, &work->atom_list[src_type]);
tools/perf/builtin-kwork.c
516
struct kwork_work *work, key;
tools/perf/builtin-kwork.c
521
work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
tools/perf/builtin-kwork.c
523
*ret_work = work;
tools/perf/builtin-kwork.c
525
if (work == NULL)
tools/perf/builtin-kwork.c
528
if (!profile_event_match(kwork, work, sample))
tools/perf/builtin-kwork.c
531
atom = list_last_entry_or_null(&work->atom_list[dst_type],
tools/perf/builtin-kwork.c
538
list_add_tail(&src_atom->list, &work->atom_list[src_type]);
tools/perf/builtin-kwork.c
551
struct kwork_work *work;
tools/perf/builtin-kwork.c
555
work = rb_entry(next, struct kwork_work, node);
tools/perf/builtin-kwork.c
556
if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
tools/perf/builtin-kwork.c
557
(cpu == -1 && work->id == id))
tools/perf/builtin-kwork.c
558
return work;
tools/perf/builtin-kwork.c
579
static void report_update_exit_event(struct kwork_work *work,
tools/perf/builtin-kwork.c
589
if ((delta > work->max_runtime) ||
tools/perf/builtin-kwork.c
590
(work->max_runtime == 0)) {
tools/perf/builtin-kwork.c
591
work->max_runtime = delta;
tools/perf/builtin-kwork.c
592
work->max_runtime_start = entry_time;
tools/perf/builtin-kwork.c
593
work->max_runtime_end = exit_time;
tools/perf/builtin-kwork.c
595
work->total_runtime += delta;
tools/perf/builtin-kwork.c
596
work->nr_atoms++;
tools/perf/builtin-kwork.c
618
struct kwork_work *work = NULL;
tools/perf/builtin-kwork.c
622
machine, &work);
tools/perf/builtin-kwork.c
623
if (work == NULL)
tools/perf/builtin-kwork.c
627
report_update_exit_event(work, atom, sample);
tools/perf/builtin-kwork.c
634
static void latency_update_entry_event(struct kwork_work *work,
tools/perf/builtin-kwork.c
644
if ((delta > work->max_latency) ||
tools/perf/builtin-kwork.c
645
(work->max_latency == 0)) {
tools/perf/builtin-kwork.c
646
work->max_latency = delta;
tools/perf/builtin-kwork.c
647
work->max_latency_start = raise_time;
tools/perf/builtin-kwork.c
648
work->max_latency_end = entry_time;
tools/perf/builtin-kwork.c
650
work->total_latency += delta;
tools/perf/builtin-kwork.c
651
work->nr_atoms++;
tools/perf/builtin-kwork.c
673
struct kwork_work *work = NULL;
tools/perf/builtin-kwork.c
677
machine, &work);
tools/perf/builtin-kwork.c
678
if (work == NULL)
tools/perf/builtin-kwork.c
682
latency_update_entry_event(work, atom, sample);
tools/perf/builtin-kwork.c
739
struct kwork_work *work,
tools/perf/builtin-kwork.c
764
printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
tools/perf/builtin-kwork.c
769
if (work->class && work->class->work_name) {
tools/perf/builtin-kwork.c
770
work->class->work_name(work, kwork_name,
tools/perf/builtin-kwork.c
832
struct kwork_work *work = NULL;
tools/perf/builtin-kwork.c
836
machine, &work, true);
tools/perf/builtin-kwork.c
840
if (work != NULL)
tools/perf/builtin-kwork.c
853
struct kwork_work *work = NULL;
tools/perf/builtin-kwork.c
866
machine, &work);
tools/perf/builtin-kwork.c
867
if (work == NULL) {
tools/perf/builtin-kwork.c
873
work->nr_atoms++;
tools/perf/builtin-kwork.c
874
timehist_print_event(kwork, work, atom, sample, &al);
tools/perf/builtin-kwork.c
883
static void top_update_runtime(struct kwork_work *work,
tools/perf/builtin-kwork.c
893
work->total_runtime += delta;
tools/perf/builtin-kwork.c
914
struct kwork_work *work, *sched_work;
tools/perf/builtin-kwork.c
920
machine, &work);
tools/perf/builtin-kwork.c
921
if (!work)
tools/perf/builtin-kwork.c
928
work->id, work->cpu);
tools/perf/builtin-kwork.c
930
top_update_runtime(work, atom, sample);
tools/perf/builtin-kwork.c
945
struct kwork_work *work;
tools/perf/builtin-kwork.c
949
machine, &work);
tools/perf/builtin-kwork.c
950
if (!work)
tools/perf/builtin-kwork.c
954
top_update_runtime(work, atom, sample);
tools/perf/tests/builtin-test.c
713
static int run_workload(const char *work, int argc, const char **argv)
tools/perf/tests/builtin-test.c
718
if (!strcmp(twl->name, work))
tools/perf/tests/builtin-test.c
722
pr_info("No workload found: %s\n", work);
tools/perf/tests/tests.h
225
#define DECLARE_WORKLOAD(work) \
tools/perf/tests/tests.h
226
extern struct test_workload workload__##work
tools/perf/tests/tests.h
228
#define DEFINE_WORKLOAD(work) \
tools/perf/tests/tests.h
229
struct test_workload workload__##work = { \
tools/perf/tests/tests.h
230
.name = #work, \
tools/perf/tests/tests.h
231
.func = work, \
tools/perf/util/bpf_kwork.c
267
struct kwork_work *work;
tools/perf/util/bpf_kwork.c
288
work = kwork->add_work(kwork, tmp.class, &tmp);
tools/perf/util/bpf_kwork.c
289
if (work == NULL)
tools/perf/util/bpf_kwork.c
293
work->nr_atoms = data->nr;
tools/perf/util/bpf_kwork.c
294
work->total_runtime = data->total_time;
tools/perf/util/bpf_kwork.c
295
work->max_runtime = data->max_time;
tools/perf/util/bpf_kwork.c
296
work->max_runtime_start = data->max_time_start;
tools/perf/util/bpf_kwork.c
297
work->max_runtime_end = data->max_time_end;
tools/perf/util/bpf_kwork.c
299
work->nr_atoms = data->nr;
tools/perf/util/bpf_kwork.c
300
work->total_latency = data->total_time;
tools/perf/util/bpf_kwork.c
301
work->max_latency = data->max_time;
tools/perf/util/bpf_kwork.c
302
work->max_latency_start = data->max_time_start;
tools/perf/util/bpf_kwork.c
303
work->max_latency_end = data->max_time_end;
tools/perf/util/bpf_kwork_top.c
217
static void read_task_info(struct kwork_work *work)
tools/perf/util/bpf_kwork_top.c
222
.pid = work->id,
tools/perf/util/bpf_kwork_top.c
223
.cpu = work->cpu,
tools/perf/util/bpf_kwork_top.c
233
work->tgid = data.tgid;
tools/perf/util/bpf_kwork_top.c
234
work->is_kthread = data.is_kthread;
tools/perf/util/bpf_kwork_top.c
235
work->name = strdup(data.comm);
tools/perf/util/bpf_kwork_top.c
242
struct kwork_work *work;
tools/perf/util/bpf_kwork_top.c
258
work = kwork->add_work(kwork, tmp.class, &tmp);
tools/perf/util/bpf_kwork_top.c
259
if (!work)
tools/perf/util/bpf_kwork_top.c
262
work->total_runtime = data->runtime;
tools/perf/util/bpf_kwork_top.c
263
read_task_info(work);
tools/perf/util/bpf_skel/kwork_trace.bpf.c
330
.id = (__u64)ctx->work,
tools/perf/util/bpf_skel/kwork_trace.bpf.c
343
.id = (__u64)ctx->work,
tools/perf/util/bpf_skel/kwork_trace.bpf.c
360
.id = (__u64)ctx->work,
tools/perf/util/bpf_skel/kwork_trace.bpf.c
373
.id = (__u64)ctx->work,
tools/perf/util/bpf_skel/vmlinux/vmlinux.h
139
void *work;
tools/perf/util/bpf_skel/vmlinux/vmlinux.h
146
void *work;
tools/perf/util/bpf_skel/vmlinux/vmlinux.h
153
void *work;
tools/perf/util/kwork.h
158
struct kwork_work *work,
tools/perf/util/kwork.h
164
void (*work_name)(struct kwork_work *work,
tools/testing/cxl/test/mem.c
656
void cxl_mockmem_sanitize_work(struct work_struct *work)
tools/testing/cxl/test/mem.c
659
container_of(work, typeof(*mds), security.poll_dwork.work);
tools/testing/nvdimm/test/nfit.c
214
struct work_struct work;
tools/testing/nvdimm/test/nfit.c
3347
INIT_WORK(&nfit_test->work, uc_error_notify);
tools/testing/nvdimm/test/nfit.c
821
static void uc_error_notify(struct work_struct *work)
tools/testing/nvdimm/test/nfit.c
823
struct nfit_test *t = container_of(work, typeof(*t), work);
tools/testing/nvdimm/test/nfit.c
849
queue_work(nfit_wq, &t->work);
tools/testing/selftests/arm64/fp/fp-stress.c
149
char work[1024];
tools/testing/selftests/arm64/fp/fp-stress.c
168
strncpy(work, child->output, sizeof(work) - 1);
tools/testing/selftests/arm64/fp/fp-stress.c
169
cur_work = strnlen(work, sizeof(work));
tools/testing/selftests/arm64/fp/fp-stress.c
178
work[cur_work] = read_data[cur_read++];
tools/testing/selftests/arm64/fp/fp-stress.c
180
if (work[cur_work] == '\n') {
tools/testing/selftests/arm64/fp/fp-stress.c
181
work[cur_work] = '\0';
tools/testing/selftests/arm64/fp/fp-stress.c
182
ksft_print_msg("%s: %s\n", child->name, work);
tools/testing/selftests/arm64/fp/fp-stress.c
190
work[cur_work] = '\0';
tools/testing/selftests/arm64/fp/fp-stress.c
191
ret = asprintf(&child->output, "%s", work);
tools/testing/selftests/arm64/gcs/gcs-stress.c
153
char work[1024];
tools/testing/selftests/arm64/gcs/gcs-stress.c
172
strncpy(work, child->output, sizeof(work) - 1);
tools/testing/selftests/arm64/gcs/gcs-stress.c
173
cur_work = strnlen(work, sizeof(work));
tools/testing/selftests/arm64/gcs/gcs-stress.c
182
work[cur_work] = read_data[cur_read++];
tools/testing/selftests/arm64/gcs/gcs-stress.c
184
if (work[cur_work] == '\n') {
tools/testing/selftests/arm64/gcs/gcs-stress.c
185
work[cur_work] = '\0';
tools/testing/selftests/arm64/gcs/gcs-stress.c
186
ksft_print_msg("%s: %s\n", child->name, work);
tools/testing/selftests/arm64/gcs/gcs-stress.c
194
work[cur_work] = '\0';
tools/testing/selftests/arm64/gcs/gcs-stress.c
195
ret = asprintf(&child->output, "%s", work);
tools/testing/selftests/bpf/progs/exhandler_kern.c
22
struct callback_head *work;
tools/testing/selftests/bpf/progs/exhandler_kern.c
38
work = task->task_works;
tools/testing/selftests/bpf/progs/exhandler_kern.c
39
func = work->func;
tools/testing/selftests/bpf/progs/exhandler_kern.c
44
barrier_var(work);
tools/testing/selftests/bpf/progs/exhandler_kern.c
45
if (work)
tools/testing/selftests/bpf/progs/file_reader.c
69
struct elem *work;
tools/testing/selftests/bpf/progs/file_reader.c
75
work = bpf_map_lookup_elem(&arrmap, &key);
tools/testing/selftests/bpf/progs/file_reader.c
76
if (!work) {
tools/testing/selftests/bpf/progs/file_reader.c
80
bpf_task_work_schedule_signal(task, &work->tw, &arrmap, task_work_callback);
tools/testing/selftests/bpf/progs/lpm_trie_bench.c
40
int BPF_PROG(trie_free_entry, struct work_struct *work)
tools/testing/selftests/bpf/progs/lpm_trie_bench.c
42
struct bpf_map *map = container_of(work, struct bpf_map, work);
tools/testing/selftests/bpf/progs/lpm_trie_bench.c
64
int BPF_PROG(trie_free_exit, struct work_struct *work)
tools/testing/selftests/bpf/progs/task_work.c
101
work = bpf_map_lookup_elem(&lrumap, &key);
tools/testing/selftests/bpf/progs/task_work.c
102
if (!work || work->data[0])
tools/testing/selftests/bpf/progs/task_work.c
104
bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work);
tools/testing/selftests/bpf/progs/task_work.c
45
struct elem *work = value;
tools/testing/selftests/bpf/progs/task_work.c
47
bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
tools/testing/selftests/bpf/progs/task_work.c
57
struct elem *work;
tools/testing/selftests/bpf/progs/task_work.c
65
work = bpf_map_lookup_elem(&hmap, &key);
tools/testing/selftests/bpf/progs/task_work.c
66
if (!work)
tools/testing/selftests/bpf/progs/task_work.c
68
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
tools/testing/selftests/bpf/progs/task_work.c
75
struct elem *work;
tools/testing/selftests/bpf/progs/task_work.c
79
work = bpf_map_lookup_elem(&arrmap, &key);
tools/testing/selftests/bpf/progs/task_work.c
80
if (!work)
tools/testing/selftests/bpf/progs/task_work.c
82
bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work);
tools/testing/selftests/bpf/progs/task_work.c
90
struct elem *work;
tools/testing/selftests/bpf/progs/task_work.c
95
work = bpf_map_lookup_elem(&lrumap, &key);
tools/testing/selftests/bpf/progs/task_work.c
96
if (work)
tools/testing/selftests/bpf/progs/task_work_fail.c
37
struct elem *work = value;
tools/testing/selftests/bpf/progs/task_work_fail.c
39
bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
tools/testing/selftests/bpf/progs/task_work_fail.c
49
struct elem *work;
tools/testing/selftests/bpf/progs/task_work_fail.c
53
work = bpf_map_lookup_elem(&arrmap, &key);
tools/testing/selftests/bpf/progs/task_work_fail.c
54
if (!work)
tools/testing/selftests/bpf/progs/task_work_fail.c
56
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
tools/testing/selftests/bpf/progs/task_work_fail.c
87
struct elem *work;
tools/testing/selftests/bpf/progs/task_work_fail.c
91
work = bpf_map_lookup_elem(&arrmap, &key);
tools/testing/selftests/bpf/progs/task_work_fail.c
92
if (!work)
tools/testing/selftests/bpf/progs/task_work_fail.c
94
bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work);
tools/testing/selftests/bpf/progs/task_work_stress.c
43
struct elem *work;
tools/testing/selftests/bpf/progs/task_work_stress.c
47
work = bpf_map_lookup_elem(&hmap, &key);
tools/testing/selftests/bpf/progs/task_work_stress.c
48
if (!work) {
tools/testing/selftests/bpf/progs/task_work_stress.c
50
work = bpf_map_lookup_elem(&hmap, &key);
tools/testing/selftests/bpf/progs/task_work_stress.c
51
if (!work)
tools/testing/selftests/bpf/progs/task_work_stress.c
54
err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
tools/testing/selftests/bpf/progs/wq.c
106
wq = &val->work;
tools/testing/selftests/bpf/progs/wq.c
16
struct bpf_wq work;
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
1211
static void ctx_check_irq_fn(struct irq_work *work)
tools/testing/selftests/hid/progs/hid.c
448
struct bpf_wq work;
tools/testing/selftests/hid/progs/hid.c
458
static int wq_cb_sleepable(void *map, int *key, void *work)
tools/testing/selftests/hid/progs/hid.c
486
wq = &val->work;
tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_busy.c
17
static void busymod_work_func(struct work_struct *work);
tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_busy.c
18
static DECLARE_WORK(work, busymod_work_func);
tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_busy.c
21
static void busymod_work_func(struct work_struct *work)
tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_busy.c
40
schedule_work(&work);
tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_busy.c
53
flush_work(&work);
tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_busy.c
62
flush_work(&work);
tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_demo.c
55
static void patched_work_func(struct work_struct *work)
virt/kvm/async_pf.c
113
if (work->wakeup_all)
virt/kvm/async_pf.c
114
WARN_ON_ONCE(work->work.func);
virt/kvm/async_pf.c
116
flush_work(&work->work);
virt/kvm/async_pf.c
117
kmem_cache_free(async_pf_cache, work);
virt/kvm/async_pf.c
124
struct kvm_async_pf *work =
virt/kvm/async_pf.c
126
typeof(*work), queue);
virt/kvm/async_pf.c
127
list_del(&work->queue);
virt/kvm/async_pf.c
130
flush_work(&work->work);
virt/kvm/async_pf.c
132
if (cancel_work_sync(&work->work))
virt/kvm/async_pf.c
133
kmem_cache_free(async_pf_cache, work);
virt/kvm/async_pf.c
139
struct kvm_async_pf *work =
virt/kvm/async_pf.c
141
typeof(*work), link);
virt/kvm/async_pf.c
142
list_del(&work->link);
virt/kvm/async_pf.c
145
kvm_flush_and_free_async_pf_work(work);
virt/kvm/async_pf.c
155
struct kvm_async_pf *work;
virt/kvm/async_pf.c
160
work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
virt/kvm/async_pf.c
162
list_del(&work->link);
virt/kvm/async_pf.c
165
kvm_arch_async_page_ready(vcpu, work);
virt/kvm/async_pf.c
167
kvm_arch_async_page_present(vcpu, work);
virt/kvm/async_pf.c
169
list_del(&work->queue);
virt/kvm/async_pf.c
171
kvm_flush_and_free_async_pf_work(work);
virt/kvm/async_pf.c
182
struct kvm_async_pf *work;
virt/kvm/async_pf.c
195
work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
virt/kvm/async_pf.c
196
if (!work)
virt/kvm/async_pf.c
199
work->wakeup_all = false;
virt/kvm/async_pf.c
200
work->vcpu = vcpu;
virt/kvm/async_pf.c
201
work->cr2_or_gpa = cr2_or_gpa;
virt/kvm/async_pf.c
202
work->addr = hva;
virt/kvm/async_pf.c
203
work->arch = *arch;
virt/kvm/async_pf.c
205
INIT_WORK(&work->work, async_pf_execute);
virt/kvm/async_pf.c
207
list_add_tail(&work->queue, &vcpu->async_pf.queue);
virt/kvm/async_pf.c
209
work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
virt/kvm/async_pf.c
211
schedule_work(&work->work);
virt/kvm/async_pf.c
218
struct kvm_async_pf *work;
virt/kvm/async_pf.c
224
work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
virt/kvm/async_pf.c
225
if (!work)
virt/kvm/async_pf.c
228
work->wakeup_all = true;
virt/kvm/async_pf.c
229
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
virt/kvm/async_pf.c
233
list_add_tail(&work->link, &vcpu->async_pf.done);
virt/kvm/async_pf.c
45
static void async_pf_execute(struct work_struct *work)
virt/kvm/async_pf.c
48
container_of(work, struct kvm_async_pf, work);
virt/kvm/async_pf.c
99
static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
virt/kvm/eventfd.c
122
irqfd_shutdown(struct work_struct *work)
virt/kvm/eventfd.c
125
container_of(work, struct kvm_kernel_irqfd, shutdown);
virt/kvm/eventfd.c
42
irqfd_inject(struct work_struct *work)
virt/kvm/eventfd.c
45
container_of(work, struct kvm_kernel_irqfd, inject);