Symbol: work
crypto/krb5/src/appl/gss-sample/gss-server.c
643
struct _work_plan *work = (struct _work_plan *) param;
crypto/krb5/src/appl/gss-sample/gss-server.c
648
sign_server(work->s, work->server_creds, work->export);
crypto/krb5/src/appl/gss-sample/gss-server.c
649
closesocket(work->s);
crypto/krb5/src/appl/gss-sample/gss-server.c
650
free(work);
crypto/krb5/src/appl/gss-sample/gss-server.c
771
struct _work_plan *work = malloc(sizeof(struct _work_plan));
crypto/krb5/src/appl/gss-sample/gss-server.c
773
if (work == NULL) {
crypto/krb5/src/appl/gss-sample/gss-server.c
779
if ((work->s = accept(stmp, NULL, 0)) < 0) {
crypto/krb5/src/appl/gss-sample/gss-server.c
781
free(work);
crypto/krb5/src/appl/gss-sample/gss-server.c
785
work->server_creds = server_creds;
crypto/krb5/src/appl/gss-sample/gss-server.c
786
work->export = export;
crypto/krb5/src/appl/gss-sample/gss-server.c
789
worker_bee((void *) work);
crypto/krb5/src/appl/gss-sample/gss-server.c
795
_beginthread(worker_bee, 0, (void *) work);
crypto/krb5/src/appl/gss-sample/gss-server.c
797
closesocket(work->s);
crypto/krb5/src/appl/gss-sample/gss-server.c
798
free(work);
crypto/krb5/src/appl/gss-sample/gss-server.c
803
closesocket(work->s);
crypto/krb5/src/appl/gss-sample/gss-server.c
804
free(work);
crypto/openssh/smult_curve25519_ref.c
124
static void mainloop(unsigned int work[64],const unsigned char e[32])
crypto/openssh/smult_curve25519_ref.c
145
for (j = 0;j < 32;++j) xzm1[j] = work[j];
crypto/openssh/smult_curve25519_ref.c
173
mult(xzn1b + 32,r,work);
crypto/openssh/smult_curve25519_ref.c
177
for (j = 0;j < 64;++j) work[j] = xzm[j];
crypto/openssh/smult_curve25519_ref.c
251
unsigned int work[96];
crypto/openssh/smult_curve25519_ref.c
258
for (i = 0;i < 32;++i) work[i] = p[i];
crypto/openssh/smult_curve25519_ref.c
259
mainloop(work,e);
crypto/openssh/smult_curve25519_ref.c
260
recip(work + 32,work + 32);
crypto/openssh/smult_curve25519_ref.c
261
mult(work + 64,work,work + 32);
crypto/openssh/smult_curve25519_ref.c
262
freeze(work + 64);
crypto/openssh/smult_curve25519_ref.c
263
for (i = 0;i < 32;++i) q[i] = work[64 + i];
crypto/openssl/apps/lib/apps.c
1914
char *work;
crypto/openssl/apps/lib/apps.c
1931
work = OPENSSL_strdup(cp);
crypto/openssl/apps/lib/apps.c
1932
if (work == NULL) {
crypto/openssl/apps/lib/apps.c
1939
char *bp = work;
crypto/openssl/apps/lib/apps.c
2009
OPENSSL_free(work);
crypto/openssl/apps/lib/apps.c
2014
OPENSSL_free(work);
lib/libomp/omp-tools.h
1386
ompt_record_work_t work;
sbin/hastd/subr.h
41
#define KEEP_ERRNO(work) do { \
sbin/hastd/subr.h
45
work; \
sys/compat/linuxkpi/common/include/linux/kthread.h
122
kthread_init_work(struct kthread_work *work, kthread_work_func_t func)
sys/compat/linuxkpi/common/include/linux/kthread.h
124
work->tq = NULL;
sys/compat/linuxkpi/common/include/linux/kthread.h
125
work->func = func;
sys/compat/linuxkpi/common/include/linux/kthread.h
126
TASK_INIT(&work->task, 0, lkpi_kthread_work_fn, work);
sys/compat/linuxkpi/common/include/linux/kthread.h
130
kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
sys/compat/linuxkpi/common/include/linux/kthread.h
134
error = taskqueue_enqueue_flags(worker->tq, &work->task,
sys/compat/linuxkpi/common/include/linux/kthread.h
137
work->tq = worker->tq;
sys/compat/linuxkpi/common/include/linux/kthread.h
142
kthread_cancel_work_sync(struct kthread_work *work)
sys/compat/linuxkpi/common/include/linux/kthread.h
146
if (work->tq != NULL &&
sys/compat/linuxkpi/common/include/linux/kthread.h
147
taskqueue_cancel(work->tq, &work->task, &pending) != 0)
sys/compat/linuxkpi/common/include/linux/kthread.h
148
taskqueue_drain(work->tq, &work->task);
sys/compat/linuxkpi/common/include/linux/kthread.h
154
kthread_flush_work(struct kthread_work *work)
sys/compat/linuxkpi/common/include/linux/kthread.h
156
if (work->tq != NULL)
sys/compat/linuxkpi/common/include/linux/kthread.h
157
taskqueue_drain(work->tq, &work->task);
sys/compat/linuxkpi/common/include/linux/kthread.h
45
typedef void (*kthread_work_func_t)(struct kthread_work *work);
sys/compat/linuxkpi/common/include/linux/net_dim.h
393
schedule_work(&dim->work);
sys/compat/linuxkpi/common/include/linux/net_dim.h
69
struct work_struct work;
sys/compat/linuxkpi/common/include/linux/sched.h
89
struct work_struct *work; /* current work struct, if set */
sys/compat/linuxkpi/common/include/linux/workqueue.h
107
to_delayed_work(struct work_struct *work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
109
return (container_of(work, struct delayed_work, work));
sys/compat/linuxkpi/common/include/linux/workqueue.h
112
#define INIT_WORK(work, fn) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
114
(work)->func = (fn); \
sys/compat/linuxkpi/common/include/linux/workqueue.h
115
(work)->work_queue = NULL; \
sys/compat/linuxkpi/common/include/linux/workqueue.h
116
atomic_set(&(work)->state, 0); \
sys/compat/linuxkpi/common/include/linux/workqueue.h
117
TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \
sys/compat/linuxkpi/common/include/linux/workqueue.h
121
INIT_WORK(&(_work)->work, (_fn))
sys/compat/linuxkpi/common/include/linux/workqueue.h
123
#define INIT_WORK_ONSTACK(work, fn) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
124
INIT_WORK(work, fn)
sys/compat/linuxkpi/common/include/linux/workqueue.h
138
#define queue_work(wq, work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
139
linux_queue_work_on(WORK_CPU_UNBOUND, wq, work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
141
#define schedule_work(work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
142
linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
150
#define queue_work_on(cpu, wq, work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
151
linux_queue_work_on(cpu, wq, work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
189
linux_work_pending(&(dwork)->work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
191
#define cancel_work(work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
192
linux_cancel_work(work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
197
#define cancel_work_sync(work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
198
linux_cancel_work_sync(work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
203
#define flush_work(work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
204
linux_flush_work(work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
215
#define work_pending(work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
216
linux_work_pending(work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
218
#define work_busy(work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
219
linux_work_busy(work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
221
#define destroy_work_on_stack(work) \
sys/compat/linuxkpi/common/include/linux/workqueue.h
262
queue_work_node(int node __unused, struct workqueue_struct *wq, struct work_struct *work)
sys/compat/linuxkpi/common/include/linux/workqueue.h
264
return (queue_work(wq, work));
sys/compat/linuxkpi/common/include/linux/workqueue.h
74
struct work_struct work;
sys/compat/linuxkpi/common/include/linux/workqueue.h
89
struct work_struct work;
sys/compat/linuxkpi/common/include/net/cfg80211.h
1114
struct wiphy_work work;
sys/compat/linuxkpi/common/include/net/cfg80211.h
2238
wiphy_work_init(&wdwk->work, fn);
sys/compat/linuxkpi/common/src/linux_80211.c
7727
lkpi_wiphy_work(struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_80211.c
7733
lwiphy = container_of(work, struct lkpi_wiphy, wwk);
sys/compat/linuxkpi/common/src/linux_80211.c
7750
schedule_work(work);
sys/compat/linuxkpi/common/src/linux_80211.c
7828
wiphy_work_queue(wdwk->wiphy, &wdwk->work);
sys/compat/linuxkpi/common/src/linux_80211.c
7838
wiphy_work_queue(wiphy, &wdwk->work);
sys/compat/linuxkpi/common/src/linux_80211.c
7850
wiphy_work_cancel(wiphy, &wdwk->work);
sys/compat/linuxkpi/common/src/linux_80211.c
7860
wiphy_work_flush(wiphy, &wdwk->work);
sys/compat/linuxkpi/common/src/linux_kthread.c
170
struct kthread_work *work = context;
sys/compat/linuxkpi/common/src/linux_kthread.c
172
work->func(work);
sys/compat/linuxkpi/common/src/linux_work.c
105
if (exec->target == work) {
sys/compat/linuxkpi/common/src/linux_work.c
121
tq = dwork->work.work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
122
taskqueue_enqueue(tq, &dwork->work.work_task);
sys/compat/linuxkpi/common/src/linux_work.c
132
struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_work.c
143
return (!work_pending(work));
sys/compat/linuxkpi/common/src/linux_work.c
145
switch (linux_update_state(&work->state, states)) {
sys/compat/linuxkpi/common/src/linux_work.c
148
if (linux_work_exec_unblock(work) != 0)
sys/compat/linuxkpi/common/src/linux_work.c
152
work->work_queue = wq;
sys/compat/linuxkpi/common/src/linux_work.c
153
taskqueue_enqueue(wq->taskqueue, &work->work_task);
sys/compat/linuxkpi/common/src/linux_work.c
169
linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
sys/compat/linuxkpi/common/src/linux_work.c
181
if (!linux_work_pending(&rwork->work)) {
sys/compat/linuxkpi/common/src/linux_work.c
199
if (linux_work_pending(&rwork->work)) {
sys/compat/linuxkpi/common/src/linux_work.c
201
linux_flush_work(&rwork->work);
sys/compat/linuxkpi/common/src/linux_work.c
204
return (linux_flush_work(&rwork->work));
sys/compat/linuxkpi/common/src/linux_work.c
227
return (!work_pending(&dwork->work));
sys/compat/linuxkpi/common/src/linux_work.c
237
switch (linux_update_state(&dwork->work.state, states)) {
sys/compat/linuxkpi/common/src/linux_work.c
240
if (delay == 0 && linux_work_exec_unblock(&dwork->work)) {
sys/compat/linuxkpi/common/src/linux_work.c
247
dwork->work.work_queue = wq;
sys/compat/linuxkpi/common/src/linux_work.c
280
struct work_struct *work;
sys/compat/linuxkpi/common/src/linux_work.c
288
work = context;
sys/compat/linuxkpi/common/src/linux_work.c
289
wq = work->work_queue;
sys/compat/linuxkpi/common/src/linux_work.c
292
exec.target = work;
sys/compat/linuxkpi/common/src/linux_work.c
298
switch (linux_update_state(&work->state, states)) {
sys/compat/linuxkpi/common/src/linux_work.c
305
task->work = work;
sys/compat/linuxkpi/common/src/linux_work.c
308
work->func(work);
sys/compat/linuxkpi/common/src/linux_work.c
311
task->work = NULL;
sys/compat/linuxkpi/common/src/linux_work.c
315
if (exec.target != work) {
sys/compat/linuxkpi/common/src/linux_work.c
317
exec.target = work;
sys/compat/linuxkpi/common/src/linux_work.c
345
linux_work_fn(&dwork->work, pending);
sys/compat/linuxkpi/common/src/linux_work.c
360
switch (linux_update_state(&dwork->work.state, states)) {
sys/compat/linuxkpi/common/src/linux_work.c
377
linux_cancel_work(struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_work.c
388
MPASS(atomic_read(&work->state) != WORK_ST_TIMER);
sys/compat/linuxkpi/common/src/linux_work.c
389
MPASS(atomic_read(&work->state) != WORK_ST_CANCEL);
sys/compat/linuxkpi/common/src/linux_work.c
391
switch (linux_update_state(&work->state, states)) {
sys/compat/linuxkpi/common/src/linux_work.c
393
tq = work->work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
394
if (taskqueue_cancel(tq, &work->work_task, NULL) == 0)
sys/compat/linuxkpi/common/src/linux_work.c
408
linux_cancel_work_sync(struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_work.c
423
switch (linux_update_state(&work->state, states)) {
sys/compat/linuxkpi/common/src/linux_work.c
428
tq = work->work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
429
if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
sys/compat/linuxkpi/common/src/linux_work.c
430
taskqueue_drain(tq, &work->work_task);
sys/compat/linuxkpi/common/src/linux_work.c
433
tq = work->work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
434
if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
sys/compat/linuxkpi/common/src/linux_work.c
435
taskqueue_drain(tq, &work->work_task);
sys/compat/linuxkpi/common/src/linux_work.c
482
switch (linux_update_state(&dwork->work.state, states)) {
sys/compat/linuxkpi/common/src/linux_work.c
487
atomic_cmpxchg(&dwork->work.state,
sys/compat/linuxkpi/common/src/linux_work.c
494
tq = dwork->work.work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
495
if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
sys/compat/linuxkpi/common/src/linux_work.c
496
atomic_cmpxchg(&dwork->work.state,
sys/compat/linuxkpi/common/src/linux_work.c
531
state = linux_update_state(&dwork->work.state, states);
sys/compat/linuxkpi/common/src/linux_work.c
540
tq = dwork->work.work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
541
ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
sys/compat/linuxkpi/common/src/linux_work.c
545
taskqueue_drain(tq, &dwork->work.work_task);
sys/compat/linuxkpi/common/src/linux_work.c
548
tq = dwork->work.work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
549
ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
sys/compat/linuxkpi/common/src/linux_work.c
552
taskqueue_drain(tq, &dwork->work.work_task);
sys/compat/linuxkpi/common/src/linux_work.c
574
linux_flush_work(struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_work.c
582
switch (atomic_read(&work->state)) {
sys/compat/linuxkpi/common/src/linux_work.c
586
tq = work->work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
587
retval = taskqueue_poll_is_busy(tq, &work->work_task);
sys/compat/linuxkpi/common/src/linux_work.c
588
taskqueue_drain(tq, &work->work_task);
sys/compat/linuxkpi/common/src/linux_work.c
607
switch (atomic_read(&dwork->work.state)) {
sys/compat/linuxkpi/common/src/linux_work.c
615
tq = dwork->work.work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
616
retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task);
sys/compat/linuxkpi/common/src/linux_work.c
617
taskqueue_drain(tq, &dwork->work.work_task);
sys/compat/linuxkpi/common/src/linux_work.c
627
linux_work_pending(struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_work.c
629
switch (atomic_read(&work->state)) {
sys/compat/linuxkpi/common/src/linux_work.c
643
linux_work_busy(struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_work.c
647
switch (atomic_read(&work->state)) {
sys/compat/linuxkpi/common/src/linux_work.c
651
tq = work->work_queue->taskqueue;
sys/compat/linuxkpi/common/src/linux_work.c
652
return (taskqueue_poll_is_busy(tq, &work->work_task));
sys/compat/linuxkpi/common/src/linux_work.c
694
dwork->work.func = func;
sys/compat/linuxkpi/common/src/linux_work.c
695
TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork);
sys/compat/linuxkpi/common/src/linux_work.c
704
return (current->work);
sys/compat/linuxkpi/common/src/linux_work.c
93
linux_work_exec_unblock(struct work_struct *work)
sys/compat/linuxkpi/common/src/linux_work.c
99
wq = work->work_queue;
sys/dev/ahci/ahci.c
1092
error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
sys/dev/ahci/ahci.c
1096
error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
sys/dev/ahci/ahci.c
1099
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/dev/ahci/ahci.c
1167
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/dev/ahci/ahci.c
1169
ch->dma.work = NULL;
sys/dev/ahci/ahci.c
1360
uint32_t work;
sys/dev/ahci/ahci.c
1364
work = ATA_INL(ch->r_mem, AHCI_P_CMD);
sys/dev/ahci/ahci.c
1366
work |= AHCI_P_CMD_PARTIAL;
sys/dev/ahci/ahci.c
1368
work |= AHCI_P_CMD_SLUMBER;
sys/dev/ahci/ahci.c
1369
ATA_OUTL(ch->r_mem, AHCI_P_CMD, work);
sys/dev/ahci/ahci.c
1647
ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
sys/dev/ahci/ahci.c
1676
ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
sys/dev/ahci/ahci.c
1685
(ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
sys/dev/ahci/ahci.c
1947
(ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
sys/dev/ahci/ahci.c
982
uint64_t work;
sys/dev/ahci/ahci.c
987
work = ch->dma.work_bus + AHCI_CL_OFFSET;
sys/dev/ahci/ahci.c
988
ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff);
sys/dev/ahci/ahci.c
989
ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32);
sys/dev/ahci/ahci.c
990
work = ch->dma.rfis_bus;
sys/dev/ahci/ahci.c
991
ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff);
sys/dev/ahci/ahci.c
992
ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32);
sys/dev/ahci/ahci.h
370
uint8_t *work; /* workspace */
sys/dev/ata/ata-all.h
345
u_int8_t *work; /* workspace */
sys/dev/ata/ata-dma.c
106
if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
sys/dev/ata/ata-dma.c
111
if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
sys/dev/ata/ata-dma.c
114
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/dev/ata/ata-dma.c
132
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/dev/ata/ata-dma.c
134
ch->dma.work = NULL;
sys/dev/ata/chipsets/ata-promise.c
661
u_int32_t *wordp = (u_int32_t *)ch->dma.work;
sys/dev/bnxt/bnxt_en/if_bnxt.c
2098
static void bnxt_fw_reset_task(struct work_struct *work)
sys/dev/bnxt/bnxt_en/if_bnxt.c
2100
struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
sys/dev/bnxt/bnxt_en/if_bnxt.c
2270
static void bnxt_sp_task(struct work_struct *work)
sys/dev/bnxt/bnxt_en/if_bnxt.c
2272
struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
sys/dev/bnxt/bnxt_re/bnxt_re.h
1038
struct work_struct work;
sys/dev/bnxt/bnxt_re/bnxt_re.h
203
struct work_struct work;
sys/dev/bnxt/bnxt_re/bnxt_re.h
438
struct work_struct work;
sys/dev/bnxt/bnxt_re/bnxt_re.h
444
struct work_struct work;
sys/dev/bnxt/bnxt_re/bnxt_re.h
613
struct work_struct work;
sys/dev/bnxt/bnxt_re/ib_verbs.c
1022
INIT_WORK(&resolve_dmac_work->work, bnxt_re_resolve_dmac_task);
sys/dev/bnxt/bnxt_re/ib_verbs.c
1023
queue_work(rdev->resolve_wq, &resolve_dmac_work->work);
sys/dev/bnxt/bnxt_re/ib_verbs.c
3769
cancel_work_sync(&ib_cq->work);
sys/dev/bnxt/bnxt_re/ib_verbs.c
52
void bnxt_re_resolve_dmac_task(struct work_struct *work)
sys/dev/bnxt/bnxt_re/ib_verbs.c
58
container_of(work, struct bnxt_re_resolve_dmac_work, work);
sys/dev/bnxt/bnxt_re/ib_verbs.h
401
void bnxt_re_resolve_dmac_task(struct work_struct *work);
sys/dev/bnxt/bnxt_re/main.c
3244
static void bnxt_re_worker(struct work_struct *work)
sys/dev/bnxt/bnxt_re/main.c
3246
struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
sys/dev/bnxt/bnxt_re/main.c
3247
worker.work);
sys/dev/bnxt/bnxt_re/main.c
329
struct work_struct work;
sys/dev/bnxt/bnxt_re/main.c
4021
INIT_WORK(&re_work->work, bnxt_re_task);
sys/dev/bnxt/bnxt_re/main.c
4025
queue_work(bnxt_re_wq, &re_work->work);
sys/dev/bnxt/bnxt_re/main.c
4052
static void bnxt_re_task(struct work_struct *work)
sys/dev/bnxt/bnxt_re/main.c
4059
re_work = container_of(work, struct bnxt_re_work, work);
sys/dev/bnxt/bnxt_re/main.c
420
static void bnxt_re_dcb_wq_task(struct work_struct *work)
sys/dev/bnxt/bnxt_re/main.c
426
container_of(work, struct bnxt_re_dcb_work, work);
sys/dev/bnxt/bnxt_re/main.c
677
static void bnxt_re_dbq_wq_task(struct work_struct *work)
sys/dev/bnxt/bnxt_re/main.c
680
container_of(work, struct bnxt_re_dbq_work, work);
sys/dev/bnxt/bnxt_re/main.c
768
INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
sys/dev/bnxt/bnxt_re/main.c
769
queue_work(rdev->dcb_wq, &dcb_work->work);
sys/dev/bnxt/bnxt_re/main.c
788
INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task);
sys/dev/bnxt/bnxt_re/main.c
789
queue_work(rdev->dbq_wq, &dbq_work->work);
sys/dev/bnxt/bnxt_re/main.c
801
INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task);
sys/dev/bnxt/bnxt_re/main.c
802
queue_work(rdev->dbq_wq, &dbq_work->work);
sys/dev/bnxt/bnxt_re/main.c
812
static void bnxt_re_db_fifo_check(struct work_struct *work)
sys/dev/bnxt/bnxt_re/main.c
814
struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
sys/dev/bnxt/bnxt_re/main.c
856
static void bnxt_re_pacing_timer_exp(struct work_struct *work)
sys/dev/bnxt/bnxt_re/main.c
858
struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
sys/dev/bnxt/bnxt_re/main.c
859
dbq_pacing_work.work);
sys/dev/bnxt/bnxt_re/main.c
955
INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task);
sys/dev/bnxt/bnxt_re/main.c
956
queue_work(rdev->dbq_wq, &dbq_work->work);
sys/dev/bnxt/bnxt_re/qplib_fp.c
146
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
sys/dev/bnxt/bnxt_re/qplib_fp.c
149
container_of(work, struct bnxt_qplib_nq_work, work);
sys/dev/bnxt/bnxt_re/qplib_fp.c
2282
INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
sys/dev/bnxt/bnxt_re/qplib_fp.c
2283
queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
sys/dev/bnxt/bnxt_re/qplib_fp.c
2392
INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
sys/dev/bnxt/bnxt_re/qplib_fp.c
2393
queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
sys/dev/bnxt/bnxt_re/qplib_fp.h
539
struct work_struct work;
sys/dev/bxe/bxe.c
4461
long work = atomic_load_acq_long(&sc->chip_tq_flags);
sys/dev/bxe/bxe.c
4463
switch (work)
sys/dev/cxgb/cxgb_sge.c
2995
int work;
sys/dev/cxgb/cxgb_sge.c
2998
work = process_responses(adap, rspq_to_qset(rq), -1);
sys/dev/cxgb/cxgb_sge.c
3007
return (work);
sys/dev/cxgbe/iw_cxgbe/qp.c
572
static void free_qp_work(struct work_struct *work)
sys/dev/cxgbe/iw_cxgbe/qp.c
578
qhp = container_of(work, struct c4iw_qp, free_work);
sys/dev/cxgbe/t4_netmap.c
1343
u_int work = 0;
sys/dev/cxgbe/t4_netmap.c
1420
netmap_rx_irq(ifp, nm_rxq->nid, &work);
sys/dev/cxgbe/t4_netmap.c
1448
netmap_rx_irq(ifp, nm_rxq->nid, &work);
sys/dev/drm2/ttm/ttm_memory.c
258
TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
sys/dev/drm2/ttm/ttm_memory.c
292
taskqueue_drain(glob->swap_queue, &glob->work);
sys/dev/drm2/ttm/ttm_memory.c
322
taskqueue_enqueue(glob->swap_queue, &glob->work);
sys/dev/drm2/ttm/ttm_memory.h
73
struct task work;
sys/dev/irdma/irdma_cm.c
2205
irdma_cm_free_ah_worker(struct work_struct *work)
sys/dev/irdma/irdma_cm.c
2207
struct irdma_sc_ah *ah = container_of(work, struct irdma_sc_ah, ah_free_work);
sys/dev/irdma/irdma_cm.c
3438
struct disconn_work *work;
sys/dev/irdma/irdma_cm.c
3441
work = kzalloc(sizeof(*work), GFP_ATOMIC);
sys/dev/irdma/irdma_cm.c
3442
if (!work)
sys/dev/irdma/irdma_cm.c
3450
kfree(work);
sys/dev/irdma/irdma_cm.c
3456
work->iwqp = iwqp;
sys/dev/irdma/irdma_cm.c
3457
INIT_WORK(&work->work, irdma_disconnect_worker);
sys/dev/irdma/irdma_cm.c
3458
queue_work(iwdev->cleanup_wq, &work->work);
sys/dev/irdma/irdma_cm.c
3660
irdma_disconnect_worker(struct work_struct *work)
sys/dev/irdma/irdma_cm.c
3662
struct disconn_work *dwork = container_of(work, struct disconn_work, work);
sys/dev/irdma/irdma_cm.c
38
static void irdma_disconnect_worker(struct work_struct *work);
sys/dev/irdma/irdma_cm.c
4352
irdma_cm_event_handler(struct work_struct *work)
sys/dev/irdma/irdma_cm.c
4354
struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work);
sys/dev/irdma/irdma_hw.c
2278
cqp_compl_worker(struct work_struct *work)
sys/dev/irdma/irdma_hw.c
2280
struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
sys/dev/irdma/irdma_kcompat.c
355
struct irdma_udqs_work *work;
sys/dev/irdma/irdma_kcompat.c
374
work = kzalloc(sizeof(*work), GFP_ATOMIC);
sys/dev/irdma/irdma_kcompat.c
375
if (!work) {
sys/dev/irdma/irdma_kcompat.c
380
work->iwqp = iwqp;
sys/dev/irdma/irdma_kcompat.c
381
work->user_prio = user_pri;
sys/dev/irdma/irdma_kcompat.c
382
work->qs_change = qs_change;
sys/dev/irdma/irdma_kcompat.c
383
INIT_WORK(&work->work, irdma_udqp_qs_worker);
sys/dev/irdma/irdma_kcompat.c
386
queue_work(rf->iwdev->cleanup_wq, &work->work);
sys/dev/irdma/irdma_main.h
572
void cqp_poll_worker(struct work_struct *work);
sys/dev/irdma/irdma_main.h
615
void irdma_udqp_qs_worker(struct work_struct *work);
sys/dev/irdma/irdma_main.h
624
void cqp_compl_worker(struct work_struct *work);
sys/dev/irdma/irdma_utils.c
2618
irdma_udqp_qs_worker(struct work_struct *work)
sys/dev/irdma/irdma_utils.c
2620
struct irdma_udqs_work *udqs_work = container_of(work, struct irdma_udqs_work, work);
sys/dev/irdma/irdma_utils.c
2652
cqp_poll_worker(struct work_struct *work)
sys/dev/irdma/irdma_utils.c
2654
struct delayed_work *dwork = to_delayed_work(work);
sys/dev/irdma/irdma_verbs.c
1433
irdma_free_cqbuf(struct work_struct *work)
sys/dev/irdma/irdma_verbs.c
1435
struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
sys/dev/irdma/irdma_verbs.c
1462
queue_work(iwdev->cleanup_wq, &cq_buf->work);
sys/dev/irdma/irdma_verbs.c
1596
INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
sys/dev/irdma/irdma_verbs.c
691
irdma_user_flush_worker(struct work_struct *work)
sys/dev/irdma/irdma_verbs.c
693
struct delayed_work *dwork = to_delayed_work(work);
sys/dev/irdma/irdma_verbs.c
710
irdma_kern_flush_worker(struct work_struct *work)
sys/dev/irdma/irdma_verbs.c
712
struct delayed_work *dwork = to_delayed_work(work);
sys/dev/irdma/irdma_verbs.h
119
struct work_struct work;
sys/dev/irdma/irdma_verbs.h
179
struct work_struct work;
sys/dev/irdma/irdma_verbs.h
184
struct work_struct work;
sys/dev/irdma/irdma_verbs.h
269
struct work_struct work;
sys/dev/irdma/irdma_verbs.h
398
void irdma_kern_flush_worker(struct work_struct *work);
sys/dev/irdma/irdma_verbs.h
399
void irdma_user_flush_worker(struct work_struct *work);
sys/dev/liquidio/base/lio_device.h
289
struct timeout_task work;
sys/dev/liquidio/base/lio_request_manager.c
163
TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
sys/dev/liquidio/base/lio_request_manager.c
171
taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1);
sys/dev/liquidio/base/lio_request_manager.c
194
&oct->check_db_tq[iq_no].work,
sys/dev/liquidio/base/lio_request_manager.c
197
&oct->check_db_tq[iq_no].work);
sys/dev/liquidio/base/lio_request_manager.c
577
taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work,
sys/dev/liquidio/base/lio_response_manager.c
202
taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));
sys/dev/liquidio/base/lio_response_manager.c
63
TIMEOUT_TASK_INIT(ctq->tq, &ctq->work, 0, lio_poll_req_completion,
sys/dev/liquidio/base/lio_response_manager.c
70
taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));
sys/dev/liquidio/base/lio_response_manager.c
81
&oct->dma_comp_tq.work, NULL))
sys/dev/liquidio/base/lio_response_manager.c
83
&oct->dma_comp_tq.work);
sys/dev/liquidio/lio_main.c
1834
taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
sys/dev/liquidio/lio_main.c
1851
TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
sys/dev/liquidio/lio_main.c
1860
taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
sys/dev/liquidio/lio_main.c
1873
&lio->rx_status_tq.work, NULL))
sys/dev/liquidio/lio_main.c
1875
&lio->rx_status_tq.work);
sys/dev/mlx4/device.h
1021
void handle_port_mgmt_change_event(struct work_struct *work);
sys/dev/mlx4/mlx4_core/fw.h
257
void mlx4_opreq_action(struct work_struct *work);
sys/dev/mlx4/mlx4_core/mlx4.h
1041
void mlx4_master_comm_channel(struct work_struct *work);
sys/dev/mlx4/mlx4_core/mlx4.h
1042
void mlx4_gen_slave_eqe(struct work_struct *work);
sys/dev/mlx4/mlx4_core/mlx4.h
1043
void mlx4_master_handle_slave_flr(struct work_struct *work);
sys/dev/mlx4/mlx4_core/mlx4.h
642
struct work_struct work;
sys/dev/mlx4/mlx4_core/mlx4_catas.c
260
static void catas_reset(struct work_struct *work)
sys/dev/mlx4/mlx4_core/mlx4_catas.c
263
container_of(work, struct mlx4_dev_persistent,
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1852
struct mlx4_vf_immed_vlan_work *work;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1879
work = kzalloc(sizeof(*work), GFP_KERNEL);
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1880
if (!work)
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1889
kfree(work);
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1898
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1906
work->orig_vlan_id = vp_oper->state.default_vlan;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1907
work->orig_vlan_ix = vp_oper->vlan_idx;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1911
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1913
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1922
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1925
work->port = port;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1926
work->slave = slave;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1927
work->qos = vp_oper->state.default_qos;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1928
work->qos_vport = vp_oper->state.qos_vport;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1929
work->vlan_id = vp_oper->state.default_vlan;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1930
work->vlan_ix = vp_oper->vlan_idx;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1931
work->vlan_proto = vp_oper->state.vlan_proto;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1932
work->priv = priv;
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1933
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
1934
queue_work(priv->mfunc.master.comm_wq, &work->work);
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
2235
void mlx4_master_comm_channel(struct work_struct *work)
sys/dev/mlx4/mlx4_core/mlx4_cmd.c
2238
container_of(work,
sys/dev/mlx4/mlx4_core/mlx4_eq.c
136
void mlx4_gen_slave_eqe(struct work_struct *work)
sys/dev/mlx4/mlx4_core/mlx4_eq.c
139
container_of(work, struct mlx4_mfunc_master_ctx,
sys/dev/mlx4/mlx4_core/mlx4_eq.c
444
void mlx4_master_handle_slave_flr(struct work_struct *work)
sys/dev/mlx4/mlx4_core/mlx4_eq.c
447
container_of(work, struct mlx4_mfunc_master_ctx,
sys/dev/mlx4/mlx4_core/mlx4_eq.c
838
int work = 0;
sys/dev/mlx4/mlx4_core/mlx4_eq.c
844
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
sys/dev/mlx4/mlx4_core/mlx4_eq.c
846
return IRQ_RETVAL(work);
sys/dev/mlx4/mlx4_core/mlx4_fw.c
2667
void mlx4_opreq_action(struct work_struct *work)
sys/dev/mlx4/mlx4_core/mlx4_fw.c
2669
struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5237
struct mlx4_vf_immed_vlan_work *work =
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5238
container_of(_work, struct mlx4_vf_immed_vlan_work, work);
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5241
struct mlx4_dev *dev = &work->priv->dev;
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5243
&work->priv->mfunc.master.res_tracker;
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5245
&tracker->slave_list[work->slave].res_list[RES_QP];
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5271
work->slave);
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5278
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5285
else if (!work->vlan_id)
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5288
else if (work->vlan_proto == htons(ETH_P_8021AD))
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5304
if (qp->com.owner == work->slave) {
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5313
if (port != work->port) {
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5322
if (work->vlan_id == MLX4_VGT) {
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5334
upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5339
if (work->vlan_proto == htons(ETH_P_8021AD))
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5348
((work->qos & 0x7) << 3);
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5353
work->qos_vport;
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5362
work->slave, port, qp->local_qpn, err);
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5373
errors, work->slave, work->port);
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5378
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5379
NO_INDX != work->orig_vlan_ix)
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5380
__mlx4_unregister_vlan(&work->priv->dev, work->port,
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5381
work->orig_vlan_id);
sys/dev/mlx4/mlx4_core/mlx4_resource_tracker.c
5383
kfree(work);
sys/dev/mlx4/mlx4_core/mlx4_sense.c
91
static void mlx4_sense_port(struct work_struct *work)
sys/dev/mlx4/mlx4_core/mlx4_sense.c
93
struct delayed_work *delay = to_delayed_work(work);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1009
static void mlx4_en_do_set_rx_mode(struct work_struct *work)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1011
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1180
static void mlx4_en_do_get_stats(struct work_struct *work)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1182
struct delayed_work *delay = to_delayed_work(work);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1209
static void mlx4_en_service_task(struct work_struct *work)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1211
struct delayed_work *delay = to_delayed_work(work);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1224
static void mlx4_en_linkstate(struct work_struct *work)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1226
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
129
static void mlx4_en_filter_work(struct work_struct *work)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
131
struct mlx4_en_filter *filter = container_of(work,
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
133
work);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1565
static void mlx4_en_restart(struct work_struct *work)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1567
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
232
INIT_WORK(&filter->work, mlx4_en_filter_work);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
341
queue_work(priv->mdev->workqueue, &filter->work);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
364
cancel_work_sync(&filter->work);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
381
!work_pending(&filter->work) &&
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
96
struct work_struct work;
sys/dev/mlx4/mlx4_ib/mlx4_ib.h
402
struct work_struct work;
sys/dev/mlx4/mlx4_ib/mlx4_ib.h
440
struct work_struct work;
sys/dev/mlx4/mlx4_ib/mlx4_ib.h
601
struct work_struct work;
sys/dev/mlx4/mlx4_ib/mlx4_ib.h
826
void mlx4_ib_tunnels_update_work(struct work_struct *work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c
746
static void alias_guid_work(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c
748
struct delayed_work *delay = to_delayed_work(work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c
167
static void id_map_ent_timeout(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c
169
struct delayed_work *delay = to_delayed_work(work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1149
void handle_port_mgmt_change_event(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1151
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1269
queue_work(ctx->wq, &ctx->work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1688
static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1694
ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1854
static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1862
ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
2006
INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
2008
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
2103
void mlx4_ib_tunnels_update_work(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
2107
dmxw = container_of(work, struct mlx4_ib_demux_work, work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
2922
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
2932
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3005
static void handle_bonded_port_state_event(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3008
container_of(work, struct ib_event_work, work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3055
static void ib_sl2vl_update_work(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3057
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3073
INIT_WORK(&ew->work, ib_sl2vl_update_work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3076
queue_work(wq, &ew->work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3097
INIT_WORK(&ew->work, handle_bonded_port_state_event);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3099
queue_work(wq, &ew->work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3143
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3148
queue_work(wq, &ew->work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
3150
handle_port_mgmt_change_event(&ew->work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
108
struct work_struct work;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1117
struct work_struct work;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1122
static void mcg_clean_task(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1124
struct clean_work *cw = container_of(work, struct clean_work, work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1133
struct clean_work *work;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1146
work = kmalloc(sizeof *work, GFP_KERNEL);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1147
if (!work) {
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1153
work->ctx = ctx;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1154
work->destroy_wq = destroy_wq;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1155
INIT_WORK(&work->work, mcg_clean_task);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
1156
queue_work(clean_wq, &work->work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
539
static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
541
struct delayed_work *delay = to_delayed_work(work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
578
if (!queue_work(group->demux->mcg_wq, &group->work))
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
642
static void mlx4_ib_mcg_work_handler(struct work_struct *work)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
652
group = container_of(work, typeof(*group), work);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
839
INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
882
if (!queue_work(group->demux->mcg_wq, &group->work))
sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
918
if (!queue_work(ctx->mcg_wq, &group->work))
sys/dev/mlx5/driver.h
1006
struct mlx5_async_work *work);
sys/dev/mlx5/driver.h
292
struct delayed_work work;
sys/dev/mlx5/driver.h
521
struct work_struct work;
sys/dev/mlx5/driver.h
882
struct work_struct work;
sys/dev/mlx5/mlx5_accel/ipsec.h
162
struct mlx5e_ipsec_work *work;
sys/dev/mlx5/mlx5_accel/ipsec.h
64
struct work_struct work;
sys/dev/mlx5/mlx5_accel/mlx5_ipsec.c
84
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
385
struct mlx5e_ipsec_work *work =
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
386
container_of(_work, struct mlx5e_ipsec_work, work);
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
387
struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
410
kfree(work);
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
417
struct mlx5e_ipsec_work *work;
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
430
work = kmalloc(sizeof(*work), GFP_ATOMIC);
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
431
if (!work)
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
434
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
435
work->data = sa_entry;
sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
437
queue_work(sa_entry->ipsec->wq, &work->work);
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1019
INIT_WORK(&ent->work, cmd_work_handler);
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1021
cmd_work_handler(&ent->work);
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1022
} else if (!queue_work(dev->priv.health.wq_cmd, &ent->work)) {
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1401
struct mlx5_async_work *work = _work;
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1402
struct mlx5_async_ctx *ctx = work->ctx;
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1404
work->user_callback(status, work);
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1411
struct mlx5_async_work *work)
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1415
work->ctx = ctx;
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1416
work->user_callback = callback;
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
1420
mlx5_cmd_exec_cb_handler, work, false);
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
788
static void cb_timeout_handler(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
790
struct delayed_work *dwork = container_of(work, struct delayed_work,
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
791
work);
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
806
cmd_free_work(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
808
struct mlx5_cmd_work_ent *ent = container_of(work,
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
877
static void cmd_work_handler(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
879
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
sys/dev/mlx5/mlx5_core/mlx5_eswitch.c
566
static void esw_vport_change_handler(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_eswitch.c
569
container_of(work, struct mlx5_vport, vport_change_handler);
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
264
static void mlx5_fc_stats_work(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
266
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
267
priv.fc_stats.work.work);
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
278
queue_delayed_work(fc_stats->wq, &fc_stats->work,
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
388
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
408
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
440
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
460
cancel_delayed_work_sync(&dev->priv.fc_stats.work);
sys/dev/mlx5/mlx5_core/mlx5_health.c
141
static void mlx5_trigger_cmd_completions(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_health.c
144
container_of(work, struct mlx5_core_dev, priv.health.work_cmd_completion);
sys/dev/mlx5/mlx5_core/mlx5_health.c
364
static void health_recover(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_health.c
374
dwork = container_of(work, struct delayed_work, work);
sys/dev/mlx5/mlx5_core/mlx5_health.c
420
static void health_care(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_health.c
428
health = container_of(work, struct mlx5_core_health, work);
sys/dev/mlx5/mlx5_core/mlx5_health.c
467
queue_work(health->wq, &health->work);
sys/dev/mlx5/mlx5_core/mlx5_health.c
544
static void health_watchdog(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_health.c
551
dev = container_of(work, struct mlx5_core_dev, priv.health.work_watchdog);
sys/dev/mlx5/mlx5_core/mlx5_health.c
694
cancel_work_sync(&health->work);
sys/dev/mlx5/mlx5_core/mlx5_health.c
741
INIT_WORK(&health->work, health_care);
sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
41
struct work_struct work;
sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
460
static void pages_work_handler(struct work_struct *work)
sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
462
struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
492
INIT_WORK(&req->work, pages_work_handler);
sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
493
if (!queue_work(dev->priv.pg_wq, &req->work))
sys/dev/mlx5/mlx5_en/en.h
1221
void mlx5e_set_rx_mode_work(struct work_struct *work);
sys/dev/mlx5/mlx5_en/en_hw_tls.h
58
struct work_struct work;
sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h
96
struct work_struct work;
sys/dev/mlx5/mlx5_en/mlx5_en_dim.c
61
mlx5e_dim_work(struct work_struct *work)
sys/dev/mlx5/mlx5_en/mlx5_en_dim.c
63
struct net_dim *dim = container_of(work, struct net_dim, work);
sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
1204
mlx5e_set_rx_mode_work(struct work_struct *work)
sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
1207
container_of(work, struct mlx5e_priv, set_rx_mode_work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
142
INIT_WORK(&ptag->work, mlx5e_tls_work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
169
flush_work(&ptag->work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
367
mlx5e_tls_work(struct work_struct *work)
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
372
ptag = container_of(work, struct mlx5e_tls_tag, work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
599
queue_work(priv->tls.wq, &ptag->work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
90
mlx5e_tls_prealloc_work(struct work_struct *work)
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
97
ptls = container_of(work, struct mlx5e_tls, prealloc_work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
1023
queue_work(priv->tls_rx.wq, &ptag->work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
417
INIT_WORK(&ptag->work, mlx5e_tls_rx_work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
435
flush_work(&ptag->work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
552
mlx5e_tls_rx_work(struct work_struct *work)
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
558
ptag = container_of(work, struct mlx5e_tls_rx_tag, work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
788
queue_work(priv->tls_rx.wq, &ptag->work);
sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
789
flush_work(&ptag->work);
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
1018
mlx5e_update_stats_work(struct work_struct *work)
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
1022
priv = container_of(work, struct mlx5e_priv, update_stats_work);
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
1262
INIT_WORK(&rq->dim.work, mlx5e_dim_work);
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
1476
cancel_work_sync(&rq->dim.work);
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
2609
cancel_work_sync(&rq->dim.work);
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
609
mlx5e_update_carrier_work(struct work_struct *work)
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
611
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
sys/dev/mlx5/mlx5_ib/mlx5_ib.h
300
struct work_struct work;
sys/dev/mlx5/mlx5_ib/mlx5_ib.h
603
struct work_struct work;
sys/dev/mlx5/mlx5_ib/mlx5_ib_cong.c
423
mlx5_ib_read_cong_stats(struct work_struct *work)
sys/dev/mlx5/mlx5_ib/mlx5_ib_cong.c
426
container_of(work, struct mlx5_ib_dev, congestion.dwork.work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
908
static void notify_soft_wc_handler(struct work_struct *work)
sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
910
struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
2587
static void pkey_change_handler(struct work_struct *work)
sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
2590
container_of(work, struct mlx5_ib_port_resources,
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
278
queue_work(cache->wq, &ent->work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
298
queue_work(cache->wq, &ent->work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
305
static void delayed_cache_work_func(struct work_struct *work)
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
309
ent = container_of(work, struct mlx5_cache_ent, dwork.work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
313
static void cache_work_func(struct work_struct *work)
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
317
ent = container_of(work, struct mlx5_cache_ent, work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
348
queue_work(cache->wq, &ent->work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
353
queue_work(cache->wq, &ent->work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
383
queue_work(cache->wq, &ent->work);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
451
INIT_WORK(&ent->work, cache_work_func);
sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
454
queue_work(cache->wq, &ent->work);
sys/dev/mthca/mthca_catas.c
61
static void catas_reset(struct work_struct *work)
sys/dev/mthca/mthca_eq.c
434
int work = 0;
sys/dev/mthca/mthca_eq.c
442
work = 1;
sys/dev/mthca/mthca_eq.c
449
return IRQ_RETVAL(work);
sys/dev/mvs/mvs.c
468
uint64_t work;
sys/dev/mvs/mvs.c
471
work = ch->dma.workrq_bus;
sys/dev/mvs/mvs.c
472
ATA_OUTL(ch->r_mem, EDMA_REQQBAH, work >> 32);
sys/dev/mvs/mvs.c
473
ATA_OUTL(ch->r_mem, EDMA_REQQIP, work & 0xffffffff);
sys/dev/mvs/mvs.c
474
ATA_OUTL(ch->r_mem, EDMA_REQQOP, work & 0xffffffff);
sys/dev/mvs/mvs.c
479
work = ch->dma.workrp_bus;
sys/dev/mvs/mvs.c
480
ATA_OUTL(ch->r_mem, EDMA_RESQBAH, work >> 32);
sys/dev/mvs/mvs.c
481
ATA_OUTL(ch->r_mem, EDMA_RESQIP, work & 0xffffffff);
sys/dev/mvs/mvs.c
482
ATA_OUTL(ch->r_mem, EDMA_RESQOP, work & 0xffffffff);
sys/dev/mvs/mvs.c
662
uint32_t work;
sys/dev/mvs/mvs.c
667
work = ATA_INL(ch->r_mem, SATA_SC);
sys/dev/mvs/mvs.c
668
work &= ~SATA_SC_SPM_MASK;
sys/dev/mvs/mvs.c
670
work |= SATA_SC_SPM_PARTIAL;
sys/dev/mvs/mvs.c
672
work |= SATA_SC_SPM_SLUMBER;
sys/dev/mvs/mvs.c
673
ATA_OUTL(ch->r_mem, SATA_SC, work);
sys/dev/mvs/mvs.c
680
uint32_t work;
sys/dev/mvs/mvs.c
683
work = ATA_INL(ch->r_mem, SATA_SS);
sys/dev/mvs/mvs.c
684
if (work & SATA_SS_IPM_ACTIVE)
sys/dev/mvs/mvs.c
687
work = ATA_INL(ch->r_mem, SATA_SC);
sys/dev/mvs/mvs.c
688
work &= ~SATA_SC_SPM_MASK;
sys/dev/mvs/mvs.c
689
work |= SATA_SC_SPM_ACTIVE;
sys/dev/mvs/mvs.c
690
ATA_OUTL(ch->r_mem, SATA_SC, work);
sys/dev/qat/qat_common/adf_aer.c
190
adf_device_reset_worker(struct work_struct *work)
sys/dev/qat/qat_common/adf_aer.c
193
container_of(work, struct adf_reset_dev_data, reset_work);
sys/dev/qat/qat_common/adf_aer.c
23
struct work_struct work;
sys/dev/qat/qat_common/adf_aer.c
274
adf_notify_fatal_error_work(struct work_struct *work)
sys/dev/qat/qat_common/adf_aer.c
277
container_of(work, struct adf_fatal_error_data, work);
sys/dev/qat/qat_common/adf_aer.c
310
INIT_WORK(&wq_data->work, adf_notify_fatal_error_work);
sys/dev/qat/qat_common/adf_aer.c
311
queue_work(fatal_error_wq, &wq_data->work);
sys/dev/qat/qat_common/adf_gen4_timer.c
26
adf_hb_irq_bh_handler(struct work_struct *work)
sys/dev/qat/qat_common/adf_gen4_timer.c
31
container_of(work, struct adf_hb_timer_data, hb_int_timer_work);
sys/dev/qat/qat_common/adf_vf_isr.c
30
struct work_struct work;
sys/dev/qat/qat_common/adf_vf_isr.c
56
adf_dev_stop_async(struct work_struct *work)
sys/dev/qat/qat_common/adf_vf_isr.c
59
container_of(work, struct adf_vf_stop_data, work);
sys/dev/qat/qat_common/adf_vf_isr.c
87
INIT_WORK(&stop_data->work, adf_dev_stop_async);
sys/dev/qat/qat_common/adf_vf_isr.c
88
queue_work(adf_vf_stop_wq, &stop_data->work);
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
4109
int work = info->completed - info->completed_handled - 1;
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
4111
QL_DPRINT12(ha, "enter [%d]\n", work);
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
4113
while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5812
struct work_struct work;
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5819
qlnxr_iw_disconnect_worker(struct work_struct *work)
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5822
container_of(work, struct qlnxr_discon_work, work);
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5866
struct qlnxr_discon_work *work;
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5871
work = kzalloc(sizeof(*work), GFP_ATOMIC);
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5872
if (!work)
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5876
work->ep = ep;
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5877
work->event = params->event;
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5878
work->status = params->status;
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5880
INIT_WORK(&work->work, qlnxr_iw_disconnect_worker);
sys/dev/qlnx/qlnxr/qlnxr_verbs.c
5881
queue_work(dev->iwarp_wq, &work->work);
sys/dev/siis/siis.c
1035
ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset);
sys/dev/siis/siis.c
1066
ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset);
sys/dev/siis/siis.c
677
if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 0,
sys/dev/siis/siis.c
680
if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
sys/dev/siis/siis.c
682
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/dev/siis/siis.c
721
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/dev/siis/siis.c
724
ch->dma.work = NULL;
sys/dev/siis/siis.h
328
uint8_t *work; /* workspace */
sys/dev/smartpqi/smartpqi_queue.c
50
req->work = 0;
sys/dev/smartpqi/smartpqi_structures.h
1449
uint16_t work; /* bytes 6-7 */
sys/dev/smartpqi/smartpqi_structures.h
276
uint16_t work;
sys/dev/smartpqi/smartpqi_structures.h
339
uint16_t work;
sys/dev/wtap/if_wtap_module.c
121
plugin->base.work = visibility_work;
sys/dev/wtap/plugins/wtap_plugin.h
43
void (*work)(struct wtap_plugin *, struct packet *p);
sys/dev/wtap/wtap_hal/hal.c
51
hal->plugin->work(hal->plugin, p);
sys/kern/kern_sx.c
88
#define GIANT_SAVE(work) do { \
sys/kern/kern_sx.c
90
work++; \
sys/net/iflib.c
4040
u_int work = 0;
sys/net/iflib.c
4051
nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work);
sys/net/netisr.c
1524
SYSCTL_PROC(_net_isr, OID_AUTO, work,
sys/netinet/libalias/alias_smedia.c
422
char *work;
sys/netinet/libalias/alias_smedia.c
426
work = data;
sys/netinet/libalias/alias_smedia.c
427
work += 5;
sys/netinet/libalias/alias_smedia.c
428
while (work + 4 < data + dlen) {
sys/netinet/libalias/alias_smedia.c
429
memcpy(&msg_id, work, 2);
sys/netinet/libalias/alias_smedia.c
430
work += 2;
sys/netinet/libalias/alias_smedia.c
431
memcpy(&msg_len, work, 2);
sys/netinet/libalias/alias_smedia.c
432
work += 2;
sys/netinet/libalias/alias_smedia.c
437
memcpy(&port, work, 2);
sys/netinet/libalias/alias_smedia.c
447
memcpy(work, &alias_port, 2);
sys/netinet/libalias/alias_smedia.c
458
work += ntohs(msg_len);
sys/ofed/drivers/infiniband/core/ib_addr.c
223
mod_delayed_work(addr_wq, &work, delay);
sys/ofed/drivers/infiniband/core/ib_addr.c
708
static void process_req(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_addr.c
72
static void process_req(struct work_struct *work);
sys/ofed/drivers/infiniband/core/ib_addr.c
76
static DECLARE_DELAYED_WORK(work, process_req);
sys/ofed/drivers/infiniband/core/ib_cache.c
1144
struct ib_update_work *work =
sys/ofed/drivers/infiniband/core/ib_cache.c
1145
container_of(_work, struct ib_update_work, work);
sys/ofed/drivers/infiniband/core/ib_cache.c
1147
ib_cache_update(work->device, work->port_num);
sys/ofed/drivers/infiniband/core/ib_cache.c
1148
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cache.c
1154
struct ib_update_work *work;
sys/ofed/drivers/infiniband/core/ib_cache.c
1163
work = kmalloc(sizeof *work, GFP_ATOMIC);
sys/ofed/drivers/infiniband/core/ib_cache.c
1164
if (work) {
sys/ofed/drivers/infiniband/core/ib_cache.c
1165
INIT_WORK(&work->work, ib_cache_task);
sys/ofed/drivers/infiniband/core/ib_cache.c
1166
work->device = event->device;
sys/ofed/drivers/infiniband/core/ib_cache.c
1167
work->port_num = event->element.port_num;
sys/ofed/drivers/infiniband/core/ib_cache.c
1168
queue_work(ib_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_cache.c
57
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_cm.c
1064
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
sys/ofed/drivers/infiniband/core/ib_cm.c
1065
cm_free_work(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
1499
static u16 cm_get_bth_pkey(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
1501
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
sys/ofed/drivers/infiniband/core/ib_cm.c
1502
u8 port_num = work->port->port_num;
sys/ofed/drivers/infiniband/core/ib_cm.c
1503
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
sys/ofed/drivers/infiniband/core/ib_cm.c
1517
static void cm_format_req_event(struct cm_work *work,
sys/ofed/drivers/infiniband/core/ib_cm.c
1524
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
1525
param = &work->cm_event.param.req_rcvd;
sys/ofed/drivers/infiniband/core/ib_cm.c
1527
param->bth_pkey = cm_get_bth_pkey(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
1529
param->primary_path = &work->path[0];
sys/ofed/drivers/infiniband/core/ib_cm.c
1531
param->alternate_path = &work->path[1];
sys/ofed/drivers/infiniband/core/ib_cm.c
1550
work->cm_event.private_data = &req_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
1554
struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
1559
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
sys/ofed/drivers/infiniband/core/ib_cm.c
1560
cm_free_work(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
1564
work = cm_dequeue_work(cm_id_priv);
sys/ofed/drivers/infiniband/core/ib_cm.c
1566
BUG_ON(!work);
sys/ofed/drivers/infiniband/core/ib_cm.c
1568
&work->cm_event);
sys/ofed/drivers/infiniband/core/ib_cm.c
1569
cm_free_work(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
1632
static void cm_dup_req_handler(struct cm_work *work,
sys/ofed/drivers/infiniband/core/ib_cm.c
1638
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
1645
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
sys/ofed/drivers/infiniband/core/ib_cm.c
1675
static struct cm_id_private * cm_match_req(struct cm_work *work,
sys/ofed/drivers/infiniband/core/ib_cm.c
1683
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
1689
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
sys/ofed/drivers/infiniband/core/ib_cm.c
1690
timewait_info->work.remote_id);
sys/ofed/drivers/infiniband/core/ib_cm.c
1693
cm_dup_req_handler(work, cur_cm_id_priv);
sys/ofed/drivers/infiniband/core/ib_cm.c
1703
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
sys/ofed/drivers/infiniband/core/ib_cm.c
1704
timewait_info->work.remote_id);
sys/ofed/drivers/infiniband/core/ib_cm.c
1707
cm_issue_rej(work->port, work->mad_recv_wc,
sys/ofed/drivers/infiniband/core/ib_cm.c
1724
cm_issue_rej(work->port, work->mad_recv_wc,
sys/ofed/drivers/infiniband/core/ib_cm.c
1766
static int cm_req_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
1775
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
1777
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
sys/ofed/drivers/infiniband/core/ib_cm.c
1783
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
sys/ofed/drivers/infiniband/core/ib_cm.c
1784
work->mad_recv_wc->recv_buf.grh,
sys/ofed/drivers/infiniband/core/ib_cm.c
1794
cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
sys/ofed/drivers/infiniband/core/ib_cm.c
1798
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
sys/ofed/drivers/infiniband/core/ib_cm.c
1809
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
sys/ofed/drivers/infiniband/core/ib_cm.c
1810
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
sys/ofed/drivers/infiniband/core/ib_cm.c
1812
memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
sys/ofed/drivers/infiniband/core/ib_cm.c
1813
work->path[0].hop_limit = cm_id_priv->av.ah_attr.grh.hop_limit;
sys/ofed/drivers/infiniband/core/ib_cm.c
1814
ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
sys/ofed/drivers/infiniband/core/ib_cm.c
1815
work->port->port_num,
sys/ofed/drivers/infiniband/core/ib_cm.c
1820
work->path[0].ifindex = if_getindex(gid_attr.ndev);
sys/ofed/drivers/infiniband/core/ib_cm.c
1821
work->path[0].net = dev_net(gid_attr.ndev);
sys/ofed/drivers/infiniband/core/ib_cm.c
1824
work->path[0].gid_type = gid_attr.gid_type;
sys/ofed/drivers/infiniband/core/ib_cm.c
1825
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
sys/ofed/drivers/infiniband/core/ib_cm.c
1829
int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
sys/ofed/drivers/infiniband/core/ib_cm.c
1830
work->port->port_num, 0,
sys/ofed/drivers/infiniband/core/ib_cm.c
1831
&work->path[0].sgid,
sys/ofed/drivers/infiniband/core/ib_cm.c
1834
work->path[0].ifindex = if_getindex(gid_attr.ndev);
sys/ofed/drivers/infiniband/core/ib_cm.c
1835
work->path[0].net = dev_net(gid_attr.ndev);
sys/ofed/drivers/infiniband/core/ib_cm.c
1838
work->path[0].gid_type = gid_attr.gid_type;
sys/ofed/drivers/infiniband/core/ib_cm.c
1840
&work->path[0].sgid, sizeof work->path[0].sgid,
sys/ofed/drivers/infiniband/core/ib_cm.c
1845
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
sys/ofed/drivers/infiniband/core/ib_cm.c
1849
&work->path[0].sgid,
sys/ofed/drivers/infiniband/core/ib_cm.c
1850
sizeof work->path[0].sgid, NULL, 0);
sys/ofed/drivers/infiniband/core/ib_cm.c
1868
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
sys/ofed/drivers/infiniband/core/ib_cm.c
1869
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2026
static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
sys/ofed/drivers/infiniband/core/ib_cm.c
2031
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2032
param = &work->cm_event.param.rep_rcvd;
sys/ofed/drivers/infiniband/core/ib_cm.c
2044
work->cm_event.private_data = &rep_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
2047
static void cm_dup_rep_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2054
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2060
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
2062
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
sys/ofed/drivers/infiniband/core/ib_cm.c
2090
static int cm_rep_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2099
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2102
cm_dup_rep_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2106
cm_format_rep_event(work, cm_id_priv->qp_type);
sys/ofed/drivers/infiniband/core/ib_cm.c
2119
cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
sys/ofed/drivers/infiniband/core/ib_cm.c
2137
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
sys/ofed/drivers/infiniband/core/ib_cm.c
2138
timewait_info->work.remote_id);
sys/ofed/drivers/infiniband/core/ib_cm.c
2142
cm_issue_rej(work->port, work->mad_recv_wc,
sys/ofed/drivers/infiniband/core/ib_cm.c
2176
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
2180
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2190
static int cm_establish_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2196
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
sys/ofed/drivers/infiniband/core/ib_cm.c
2209
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
2213
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2222
static int cm_rtu_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2228
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2234
work->cm_event.private_data = &rtu_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
2240
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
2249
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
2253
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2412
static int cm_dreq_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2419
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2423
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
2425
cm_issue_drep(work->port, work->mad_recv_wc);
sys/ofed/drivers/infiniband/core/ib_cm.c
2429
work->cm_event.private_data = &dreq_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
243
struct delayed_work work;
sys/ofed/drivers/infiniband/core/ib_cm.c
2448
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
2450
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
sys/ofed/drivers/infiniband/core/ib_cm.c
2459
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
sys/ofed/drivers/infiniband/core/ib_cm.c
2464
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
2474
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
2478
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2488
static int cm_drep_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2494
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2500
work->cm_event.private_data = &drep_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
2513
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
2517
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
254
struct cm_work work; /* Must be first. */
sys/ofed/drivers/infiniband/core/ib_cm.c
2587
static void cm_format_rej_event(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2592
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2593
param = &work->cm_event.param.rej_rcvd;
sys/ofed/drivers/infiniband/core/ib_cm.c
2597
work->cm_event.private_data = &rej_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
2617
(timewait_info->work.local_id ^
sys/ofed/drivers/infiniband/core/ib_cm.c
2634
static int cm_rej_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2640
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2645
cm_format_rej_event(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2687
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
2691
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2792
static int cm_mra_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2798
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2803
work->cm_event.private_data = &mra_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
2804
work->cm_event.param.mra_rcvd.service_timeout =
sys/ofed/drivers/infiniband/core/ib_cm.c
2831
atomic_long_inc(&work->port->
sys/ofed/drivers/infiniband/core/ib_cm.c
2840
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
2851
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
2855
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
2974
static int cm_lap_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
2985
if (rdma_protocol_roce(work->port->cm_dev->ib_device,
sys/ofed/drivers/infiniband/core/ib_cm.c
2986
work->port->port_num))
sys/ofed/drivers/infiniband/core/ib_cm.c
2990
lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
2996
param = &work->cm_event.param.lap_rcvd;
sys/ofed/drivers/infiniband/core/ib_cm.c
2997
param->alternate_path = &work->path[0];
sys/ofed/drivers/infiniband/core/ib_cm.c
2999
work->cm_event.private_data = &lap_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
3010
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
3012
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
sys/ofed/drivers/infiniband/core/ib_cm.c
3023
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
sys/ofed/drivers/infiniband/core/ib_cm.c
3028
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
3037
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
sys/ofed/drivers/infiniband/core/ib_cm.c
3038
work->mad_recv_wc->recv_buf.grh,
sys/ofed/drivers/infiniband/core/ib_cm.c
3048
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
3052
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
312
static void cm_work_handler(struct work_struct *work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3128
static int cm_apr_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
3137
if (rdma_protocol_roce(work->port->cm_dev->ib_device,
sys/ofed/drivers/infiniband/core/ib_cm.c
3138
work->port->port_num))
sys/ofed/drivers/infiniband/core/ib_cm.c
3141
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
3147
work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
sys/ofed/drivers/infiniband/core/ib_cm.c
3148
work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
sys/ofed/drivers/infiniband/core/ib_cm.c
3149
work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
sys/ofed/drivers/infiniband/core/ib_cm.c
3150
work->cm_event.private_data = &apr_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
3165
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
3169
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3178
static int cm_timewait_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
3184
timewait_info = (struct cm_timewait_info *)work;
sys/ofed/drivers/infiniband/core/ib_cm.c
3189
cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
sys/ofed/drivers/infiniband/core/ib_cm.c
3190
timewait_info->work.remote_id);
sys/ofed/drivers/infiniband/core/ib_cm.c
3203
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_cm.c
3207
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3280
static void cm_format_sidr_req_event(struct cm_work *work,
sys/ofed/drivers/infiniband/core/ib_cm.c
3288
work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
3289
param = &work->cm_event.param.sidr_req_rcvd;
sys/ofed/drivers/infiniband/core/ib_cm.c
3293
param->bth_pkey = cm_get_bth_pkey(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3294
param->port = work->port->port_num;
sys/ofed/drivers/infiniband/core/ib_cm.c
3296
work->cm_event.private_data = &sidr_req_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
3299
static int cm_sidr_req_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
3307
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
sys/ofed/drivers/infiniband/core/ib_cm.c
3314
work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
3315
wc = work->mad_recv_wc->wc;
sys/ofed/drivers/infiniband/core/ib_cm.c
3318
ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
sys/ofed/drivers/infiniband/core/ib_cm.c
3319
work->mad_recv_wc->recv_buf.grh,
sys/ofed/drivers/infiniband/core/ib_cm.c
3331
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
sys/ofed/drivers/infiniband/core/ib_cm.c
3352
cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
sys/ofed/drivers/infiniband/core/ib_cm.c
3353
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3429
static void cm_format_sidr_rep_event(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
3435
work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
3436
param = &work->cm_event.param.sidr_rep_rcvd;
sys/ofed/drivers/infiniband/core/ib_cm.c
3442
work->cm_event.private_data = &sidr_rep_msg->private_data;
sys/ofed/drivers/infiniband/core/ib_cm.c
3445
static int cm_sidr_rep_handler(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
3451
work->mad_recv_wc->recv_buf.mad;
sys/ofed/drivers/infiniband/core/ib_cm.c
3465
cm_format_sidr_rep_event(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3466
cm_process_work(cm_id_priv, work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3568
struct cm_work *work = container_of(_work, struct cm_work, work.work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3571
switch (work->cm_event.event) {
sys/ofed/drivers/infiniband/core/ib_cm.c
3573
ret = cm_req_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3576
ret = cm_mra_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3579
ret = cm_rej_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3582
ret = cm_rep_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3585
ret = cm_rtu_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3588
ret = cm_establish_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3591
ret = cm_dreq_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3594
ret = cm_drep_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3597
ret = cm_sidr_req_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3600
ret = cm_sidr_rep_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3603
ret = cm_lap_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3606
ret = cm_apr_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3609
ret = cm_timewait_handler(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3616
cm_free_work(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3622
struct cm_work *work;
sys/ofed/drivers/infiniband/core/ib_cm.c
3631
work = kmalloc(sizeof *work, GFP_ATOMIC);
sys/ofed/drivers/infiniband/core/ib_cm.c
3632
if (!work)
sys/ofed/drivers/infiniband/core/ib_cm.c
3653
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3663
INIT_DELAYED_WORK(&work->work, cm_work_handler);
sys/ofed/drivers/infiniband/core/ib_cm.c
3664
work->local_id = cm_id->local_id;
sys/ofed/drivers/infiniband/core/ib_cm.c
3665
work->remote_id = cm_id->remote_id;
sys/ofed/drivers/infiniband/core/ib_cm.c
3666
work->mad_recv_wc = NULL;
sys/ofed/drivers/infiniband/core/ib_cm.c
3667
work->cm_event.event = IB_CM_USER_ESTABLISHED;
sys/ofed/drivers/infiniband/core/ib_cm.c
3672
queue_delayed_work(cm.wq, &work->work, 0);
sys/ofed/drivers/infiniband/core/ib_cm.c
3674
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
3735
struct cm_work *work;
sys/ofed/drivers/infiniband/core/ib_cm.c
3787
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
sys/ofed/drivers/infiniband/core/ib_cm.c
3789
if (!work) {
sys/ofed/drivers/infiniband/core/ib_cm.c
3794
INIT_DELAYED_WORK(&work->work, cm_work_handler);
sys/ofed/drivers/infiniband/core/ib_cm.c
3795
work->cm_event.event = event;
sys/ofed/drivers/infiniband/core/ib_cm.c
3796
work->mad_recv_wc = mad_recv_wc;
sys/ofed/drivers/infiniband/core/ib_cm.c
3797
work->port = port;
sys/ofed/drivers/infiniband/core/ib_cm.c
3802
queue_delayed_work(cm.wq, &work->work, 0);
sys/ofed/drivers/infiniband/core/ib_cm.c
3808
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
4247
cancel_delayed_work(&timewait_info->work.work);
sys/ofed/drivers/infiniband/core/ib_cm.c
4254
cancel_delayed_work_sync(&timewait_info->work.work);
sys/ofed/drivers/infiniband/core/ib_cm.c
692
__be32 remote_id = timewait_info->work.remote_id;
sys/ofed/drivers/infiniband/core/ib_cm.c
698
if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
sys/ofed/drivers/infiniband/core/ib_cm.c
700
else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
sys/ofed/drivers/infiniband/core/ib_cm.c
724
if (be32_lt(remote_id, timewait_info->work.remote_id))
sys/ofed/drivers/infiniband/core/ib_cm.c
726
else if (be32_gt(remote_id, timewait_info->work.remote_id))
sys/ofed/drivers/infiniband/core/ib_cm.c
849
struct cm_work *work;
sys/ofed/drivers/infiniband/core/ib_cm.c
854
work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
sys/ofed/drivers/infiniband/core/ib_cm.c
855
list_del(&work->list);
sys/ofed/drivers/infiniband/core/ib_cm.c
856
return work;
sys/ofed/drivers/infiniband/core/ib_cm.c
859
static void cm_free_work(struct cm_work *work)
sys/ofed/drivers/infiniband/core/ib_cm.c
861
if (work->mad_recv_wc)
sys/ofed/drivers/infiniband/core/ib_cm.c
862
ib_free_recv_mad(work->mad_recv_wc);
sys/ofed/drivers/infiniband/core/ib_cm.c
863
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cm.c
912
timewait_info->work.local_id = local_id;
sys/ofed/drivers/infiniband/core/ib_cm.c
913
INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
sys/ofed/drivers/infiniband/core/ib_cm.c
914
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
sys/ofed/drivers/infiniband/core/ib_cm.c
944
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
sys/ofed/drivers/infiniband/core/ib_cm.c
968
struct cm_work *work;
sys/ofed/drivers/infiniband/core/ib_cma.c
2568
struct cma_work *work = context;
sys/ofed/drivers/infiniband/core/ib_cma.c
2571
route = &work->id->id.route;
sys/ofed/drivers/infiniband/core/ib_cma.c
2577
work->old_state = RDMA_CM_ROUTE_QUERY;
sys/ofed/drivers/infiniband/core/ib_cma.c
2578
work->new_state = RDMA_CM_ADDR_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
2579
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
sys/ofed/drivers/infiniband/core/ib_cma.c
2580
work->event.status = status;
sys/ofed/drivers/infiniband/core/ib_cma.c
2583
queue_work(cma_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2587
struct cma_work *work)
sys/ofed/drivers/infiniband/core/ib_cma.c
2628
work, &id_priv->query);
sys/ofed/drivers/infiniband/core/ib_cma.c
2635
struct cma_work *work = container_of(_work, struct cma_work, work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2636
struct rdma_id_private *id_priv = work->id;
sys/ofed/drivers/infiniband/core/ib_cma.c
2640
if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
sys/ofed/drivers/infiniband/core/ib_cma.c
2643
if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
sys/ofed/drivers/infiniband/core/ib_cma.c
2652
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2658
struct cma_work *work;
sys/ofed/drivers/infiniband/core/ib_cma.c
2661
work = kzalloc(sizeof *work, GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_cma.c
2662
if (!work)
sys/ofed/drivers/infiniband/core/ib_cma.c
2665
work->id = id_priv;
sys/ofed/drivers/infiniband/core/ib_cma.c
2666
INIT_WORK(&work->work, cma_work_handler);
sys/ofed/drivers/infiniband/core/ib_cma.c
2667
work->old_state = RDMA_CM_ROUTE_QUERY;
sys/ofed/drivers/infiniband/core/ib_cma.c
2668
work->new_state = RDMA_CM_ROUTE_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
2669
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
2677
ret = cma_query_ib_route(id_priv, timeout_ms, work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2686
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2718
struct cma_work *work;
sys/ofed/drivers/infiniband/core/ib_cma.c
2720
work = kzalloc(sizeof *work, GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_cma.c
2721
if (!work)
sys/ofed/drivers/infiniband/core/ib_cma.c
2724
work->id = id_priv;
sys/ofed/drivers/infiniband/core/ib_cma.c
2725
INIT_WORK(&work->work, cma_work_handler);
sys/ofed/drivers/infiniband/core/ib_cma.c
2726
work->old_state = RDMA_CM_ROUTE_QUERY;
sys/ofed/drivers/infiniband/core/ib_cma.c
2727
work->new_state = RDMA_CM_ROUTE_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
2728
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
2729
queue_work(cma_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2758
struct cma_work *work;
sys/ofed/drivers/infiniband/core/ib_cma.c
2763
work = kzalloc(sizeof *work, GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_cma.c
2764
if (!work)
sys/ofed/drivers/infiniband/core/ib_cma.c
2767
work->id = id_priv;
sys/ofed/drivers/infiniband/core/ib_cma.c
2768
INIT_WORK(&work->work, cma_work_handler);
sys/ofed/drivers/infiniband/core/ib_cma.c
2833
work->old_state = RDMA_CM_ROUTE_QUERY;
sys/ofed/drivers/infiniband/core/ib_cma.c
2834
work->new_state = RDMA_CM_ROUTE_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
2835
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
2836
work->event.status = 0;
sys/ofed/drivers/infiniband/core/ib_cma.c
2838
queue_work(cma_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2846
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cma.c
2994
struct cma_work *work;
sys/ofed/drivers/infiniband/core/ib_cma.c
2998
work = kzalloc(sizeof *work, GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_cma.c
2999
if (!work)
sys/ofed/drivers/infiniband/core/ib_cma.c
3011
work->id = id_priv;
sys/ofed/drivers/infiniband/core/ib_cma.c
3012
INIT_WORK(&work->work, cma_work_handler);
sys/ofed/drivers/infiniband/core/ib_cma.c
3013
work->old_state = RDMA_CM_ADDR_QUERY;
sys/ofed/drivers/infiniband/core/ib_cma.c
3014
work->new_state = RDMA_CM_ADDR_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
3015
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
3016
queue_work(cma_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_cma.c
3019
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cma.c
3025
struct cma_work *work;
sys/ofed/drivers/infiniband/core/ib_cma.c
3028
work = kzalloc(sizeof *work, GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_cma.c
3029
if (!work)
sys/ofed/drivers/infiniband/core/ib_cma.c
3041
work->id = id_priv;
sys/ofed/drivers/infiniband/core/ib_cma.c
3042
INIT_WORK(&work->work, cma_work_handler);
sys/ofed/drivers/infiniband/core/ib_cma.c
3043
work->old_state = RDMA_CM_ADDR_QUERY;
sys/ofed/drivers/infiniband/core/ib_cma.c
3044
work->new_state = RDMA_CM_ADDR_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
3045
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
sys/ofed/drivers/infiniband/core/ib_cma.c
3046
queue_work(cma_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_cma.c
3049
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cma.c
361
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_cma.c
369
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_cma.c
375
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_cma.c
4264
static void iboe_mcast_work_handler(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_cma.c
4266
struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
sys/ofed/drivers/infiniband/core/ib_cma.c
4308
struct iboe_mcast_work *work;
sys/ofed/drivers/infiniband/core/ib_cma.c
4321
work = kzalloc(sizeof *work, GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_cma.c
4322
if (!work)
sys/ofed/drivers/infiniband/core/ib_cma.c
4371
work->id = id_priv;
sys/ofed/drivers/infiniband/core/ib_cma.c
4372
work->mc = mc;
sys/ofed/drivers/infiniband/core/ib_cma.c
4373
INIT_WORK(&work->work, iboe_mcast_work_handler);
sys/ofed/drivers/infiniband/core/ib_cma.c
4375
queue_work(cma_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_cma.c
4382
kfree(work);
sys/ofed/drivers/infiniband/core/ib_cq.c
132
INIT_WORK(&cq->work, ib_cq_poll_work);
sys/ofed/drivers/infiniband/core/ib_cq.c
158
flush_work(&cq->work);
sys/ofed/drivers/infiniband/core/ib_cq.c
48
ib_cq_poll_work(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_cq.c
51
struct ib_cq *cq = container_of(work, struct ib_cq, work);
sys/ofed/drivers/infiniband/core/ib_cq.c
77
queue_work(ib_comp_wq, &cq->work);
sys/ofed/drivers/infiniband/core/ib_cq.c
83
queue_work(ib_comp_wq, &cq->work);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
122
struct iwcm_work *work;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
126
work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
sys/ofed/drivers/infiniband/core/ib_iwcm.c
128
list_del_init(&work->free_list);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
129
return work;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
132
static void put_work(struct iwcm_work *work)
sys/ofed/drivers/infiniband/core/ib_iwcm.c
134
list_add(&work->free_list, &work->cm_id->work_free_list);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
147
struct iwcm_work *work;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
151
work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
152
if (!work) {
sys/ofed/drivers/infiniband/core/ib_iwcm.c
156
work->cm_id = cm_id_priv;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
157
INIT_LIST_HEAD(&work->list);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
158
put_work(work);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
87
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
891
struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
893
struct iwcm_id_private *cm_id_priv = work->cm_id;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
901
work = list_entry(cm_id_priv->work_list.next,
sys/ofed/drivers/infiniband/core/ib_iwcm.c
903
list_del_init(&work->list);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
905
levent = work->event;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
906
put_work(work);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
942
struct iwcm_work *work;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
950
work = get_work(cm_id_priv);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
951
if (!work) {
sys/ofed/drivers/infiniband/core/ib_iwcm.c
956
INIT_WORK(&work->work, cm_work_handler);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
957
work->cm_id = cm_id_priv;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
958
work->event = *iw_event;
sys/ofed/drivers/infiniband/core/ib_iwcm.c
960
if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
sys/ofed/drivers/infiniband/core/ib_iwcm.c
961
work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
sys/ofed/drivers/infiniband/core/ib_iwcm.c
962
work->event.private_data_len) {
sys/ofed/drivers/infiniband/core/ib_iwcm.c
963
ret = copy_private_data(&work->event);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
965
put_work(work);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
972
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
973
queue_work(iwcm_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_iwcm.c
975
list_add_tail(&work->list, &cm_id_priv->work_list);
sys/ofed/drivers/infiniband/core/ib_mad.c
2653
static void local_completions(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_mad.c
2665
container_of(work, struct ib_mad_agent_private, local_work);
sys/ofed/drivers/infiniband/core/ib_mad.c
2785
static void timeout_sends(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_mad.c
2792
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
sys/ofed/drivers/infiniband/core/ib_mad.c
2793
timed_work.work);
sys/ofed/drivers/infiniband/core/ib_mad.c
81
static void timeout_sends(struct work_struct *work);
sys/ofed/drivers/infiniband/core/ib_mad.c
82
static void local_completions(struct work_struct *work);
sys/ofed/drivers/infiniband/core/ib_mad_rmpp.c
248
static void recv_timeout_handler(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_mad_rmpp.c
251
container_of(work, struct mad_rmpp_recv, timeout_work.work);
sys/ofed/drivers/infiniband/core/ib_mad_rmpp.c
270
static void recv_cleanup_handler(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_mad_rmpp.c
273
container_of(work, struct mad_rmpp_recv, cleanup_work.work);
sys/ofed/drivers/infiniband/core/ib_multicast.c
106
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_multicast.c
221
queue_work(mcast_wq, &group->work);
sys/ofed/drivers/infiniband/core/ib_multicast.c
429
static void mcast_work_handler(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_multicast.c
437
group = container_of(work, typeof(*group), work);
sys/ofed/drivers/infiniband/core/ib_multicast.c
545
mcast_work_handler(&group->work);
sys/ofed/drivers/infiniband/core/ib_multicast.c
557
mcast_work_handler(&group->work);
sys/ofed/drivers/infiniband/core/ib_multicast.c
586
INIT_WORK(&group->work, mcast_work_handler);
sys/ofed/drivers/infiniband/core/ib_multicast.c
683
queue_work(mcast_wq, &group->work);
sys/ofed/drivers/infiniband/core/ib_multicast.c
777
queue_work(mcast_wq, &group->work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
319
struct roce_netdev_event_work *work =
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
320
container_of(_work, struct roce_netdev_event_work, work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
322
ib_enum_all_roce_netdevs(roce_gid_match_netdev, work->ndev,
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
325
dev_put(work->ndev);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
326
kfree(work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
332
struct roce_netdev_event_work *work;
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
347
work = kmalloc(sizeof(*work), GFP_ATOMIC);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
348
if (!work) {
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
353
INIT_WORK(&work->work, roce_gid_queue_scan_event_handler);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
356
work->ndev = ndev;
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
358
queue_work(roce_gid_mgmt_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
364
struct roce_netdev_event_work *work =
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
365
container_of(_work, struct roce_netdev_event_work, work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
367
ib_cache_gid_del_all_by_netdev(work->ndev);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
368
dev_put(work->ndev);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
369
kfree(work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
375
struct roce_netdev_event_work *work;
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
377
work = kmalloc(sizeof(*work), GFP_ATOMIC);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
378
if (!work) {
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
383
INIT_WORK(&work->work, roce_gid_delete_all_event_handler);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
385
work->ndev = ndev;
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
386
queue_work(roce_gid_mgmt_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
432
struct roce_rescan_work *work =
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
433
container_of(_work, struct roce_rescan_work, work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
435
ib_enum_roce_netdev(work->ib_dev, roce_gid_match_all, NULL,
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
437
kfree(work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
443
struct roce_rescan_work *work = kmalloc(sizeof(*work), GFP_KERNEL);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
445
if (!work)
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
448
work->ib_dev = ib_dev;
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
449
INIT_WORK(&work->work, roce_rescan_device_handler);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
450
queue_work(roce_gid_mgmt_wq, &work->work);
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
56
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c
61
struct work_struct work;
sys/ofed/drivers/infiniband/core/ib_sa_query.c
500
static void update_sm_ah(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_sa_query.c
503
container_of(work, struct ib_sa_port, update_task);
sys/ofed/drivers/infiniband/core/ib_ucma.c
170
static void ucma_close_event_id(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_ucma.c
172
struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
sys/ofed/drivers/infiniband/core/ib_ucma.c
178
static void ucma_close_id(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_ucma.c
180
struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
sys/ofed/drivers/infiniband/core/ib_umem.c
232
static void ib_umem_account(struct work_struct *work)
sys/ofed/drivers/infiniband/core/ib_umem.c
234
struct ib_umem *umem = container_of(work, struct ib_umem, work);
sys/ofed/drivers/infiniband/core/ib_umem.c
284
INIT_WORK(&umem->work, ib_umem_account);
sys/ofed/drivers/infiniband/core/ib_umem.c
288
queue_work(ib_wq, &umem->work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
456
void ipoib_reap_ah(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
464
void ipoib_ib_dev_flush_light(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
465
void ipoib_ib_dev_flush_normal(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
466
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
467
void ipoib_pkey_event(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
478
void ipoib_mcast_join_task(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
479
void ipoib_mcast_carrier_on_task(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
482
void ipoib_mcast_restart_task(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
519
void ipoib_pkey_poll(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1196
static void ipoib_cm_tx_start(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1198
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1240
static void ipoib_cm_tx_reap(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1242
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1260
static void ipoib_cm_mb_reap(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1262
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1316
static void ipoib_cm_rx_reap(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1318
ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1322
static void ipoib_cm_stale_task(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1324
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1325
cm.stale_task.work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
580
void ipoib_reap_ah(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
583
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
950
void ipoib_ib_dev_flush_light(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
953
container_of(work, struct ipoib_dev_priv, flush_light);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
958
void ipoib_ib_dev_flush_normal(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
961
container_of(work, struct ipoib_dev_priv, flush_normal);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
966
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
969
container_of(work, struct ipoib_dev_priv, flush_heavy);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
996
void ipoib_pkey_poll(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
999
container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
323
void ipoib_mcast_carrier_on_task(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
325
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
471
void ipoib_mcast_join_task(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
474
container_of(work, struct ipoib_dev_priv, mcast_task.work);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
732
void ipoib_mcast_restart_task(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
735
container_of(work, struct ipoib_dev_priv, restart_task);
sys/ofed/drivers/infiniband/ulp/sdp/sdp.h
678
void srcavail_cancel_timeout(struct work_struct *work);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
562
sdp_rx_comp_work(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
564
struct sdp_sock *ssk = container_of(work, struct sdp_sock,
sys/ofed/drivers/infiniband/ulp/sdp/sdp_zcopy.c
134
void srcavail_cancel_timeout(struct work_struct *work)
sys/ofed/drivers/infiniband/ulp/sdp/sdp_zcopy.c
137
container_of(work, struct sdp_sock, srcavail_cancel_work.work);
sys/ofed/include/rdma/ib_umem.h
51
struct work_struct work;
sys/ofed/include/rdma/ib_verbs.h
1475
struct work_struct work;
sys/powerpc/mpc85xx/fsl_sata.c
161
((struct fsl_sata_cmd_tab *)(ch->dma.work + FSL_SATA_CT_OFFSET + \
sys/powerpc/mpc85xx/fsl_sata.c
164
((struct fsl_sata_cmd_list *) (ch->dma.work + FSL_SATA_CL_OFFSET + \
sys/powerpc/mpc85xx/fsl_sata.c
218
uint8_t *work; /* workspace */
sys/powerpc/mpc85xx/fsl_sata.c
471
uint64_t work;
sys/powerpc/mpc85xx/fsl_sata.c
482
work = ch->dma.work_bus + FSL_SATA_CL_OFFSET;
sys/powerpc/mpc85xx/fsl_sata.c
483
ATA_OUTL(ch->r_mem, FSL_SATA_P_CHBA, work);
sys/powerpc/mpc85xx/fsl_sata.c
577
if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
sys/powerpc/mpc85xx/fsl_sata.c
580
if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
sys/powerpc/mpc85xx/fsl_sata.c
582
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/powerpc/mpc85xx/fsl_sata.c
595
device_printf(dev, "work area: %p\n", ch->dma.work);
sys/powerpc/mpc85xx/fsl_sata.c
623
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
sys/powerpc/mpc85xx/fsl_sata.c
625
ch->dma.work = NULL;
sys/powerpc/mpc85xx/fsl_sata.c
774
uint32_t work;
sys/powerpc/mpc85xx/fsl_sata.c
778
work = ATA_INL(ch->r_mem, FSL_SATA_P_PCC) & ~FSL_SATA_PCC_LPB_EN;
sys/powerpc/mpc85xx/fsl_sata.c
780
work |= FSL_SATA_P_PCC_PARTIAL;
sys/powerpc/mpc85xx/fsl_sata.c
782
work |= FSL_SATA_P_PCC_SLUMBER;
sys/powerpc/mpc85xx/fsl_sata.c
783
ATA_OUTL(ch->r_mem, FSL_SATA_P_PCC, work);
tools/regression/gaithrstress/gaithrstress.c
241
if (pthread_create(&workers[i].w_thread, NULL, work,
usr.sbin/ppp/vjcomp.c
119
u_char work[MAX_HDR + MAX_VJHEADER]; /* enough to hold TCP/IP header */
usr.sbin/ppp/vjcomp.c
147
bufp = work + MAX_HDR;