Symbol: task
arch/alpha/include/asm/elf.h
119
extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
arch/alpha/include/asm/ptrace.h
14
#define task_pt_regs(task) \
arch/alpha/include/asm/ptrace.h
15
((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
arch/alpha/include/asm/syscall.h
12
static inline long syscall_get_return_value(struct task_struct *task,
arch/alpha/include/asm/syscall.h
7
static inline int syscall_get_arch(struct task_struct *task)
arch/alpha/include/asm/thread_info.h
101
#define GET_UNALIGN_CTL(task,value) ({ \
arch/alpha/include/asm/thread_info.h
102
__u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
arch/alpha/include/asm/thread_info.h
18
struct task_struct *task; /* main task structure */
arch/alpha/include/asm/thread_info.h
37
.task = &tsk, \
arch/alpha/include/asm/thread_info.h
90
#define SET_UNALIGN_CTL(task,value) ({ \
arch/alpha/include/asm/thread_info.h
91
__u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
arch/alpha/include/asm/thread_info.h
98
task_thread_info(task)->status = status; \
arch/alpha/kernel/process.c
332
dump_elf_task(elf_greg_t *dest, struct task_struct *task)
arch/alpha/kernel/process.c
334
dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
arch/alpha/kernel/ptrace.c
109
get_reg_addr(struct task_struct * task, unsigned long regno)
arch/alpha/kernel/ptrace.c
114
addr = &task_thread_info(task)->pcb.usp;
arch/alpha/kernel/ptrace.c
116
addr = &task_thread_info(task)->pcb.unique;
arch/alpha/kernel/ptrace.c
121
addr = task_stack_page(task) + regoff[regno];
arch/alpha/kernel/ptrace.c
130
get_reg(struct task_struct * task, unsigned long regno)
arch/alpha/kernel/ptrace.c
134
unsigned long fpcr = *get_reg_addr(task, regno);
arch/alpha/kernel/ptrace.c
136
= task_thread_info(task)->ieee_state & IEEE_SW_MASK;
arch/alpha/kernel/ptrace.c
140
return *get_reg_addr(task, regno);
arch/alpha/kernel/ptrace.c
147
put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
arch/alpha/kernel/ptrace.c
150
task_thread_info(task)->ieee_state
arch/alpha/kernel/ptrace.c
151
= ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK)
arch/alpha/kernel/ptrace.c
155
*get_reg_addr(task, regno) = data;
arch/alpha/kernel/ptrace.c
160
read_int(struct task_struct *task, unsigned long addr, int * data)
arch/alpha/kernel/ptrace.c
162
int copied = access_process_vm(task, addr, data, sizeof(int),
arch/alpha/kernel/ptrace.c
168
write_int(struct task_struct *task, unsigned long addr, int data)
arch/alpha/kernel/ptrace.c
170
int copied = access_process_vm(task, addr, &data, sizeof(int),
arch/alpha/kernel/traps.c
113
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
arch/arc/include/asm/syscall.h
18
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
arch/arc/include/asm/syscall.h
27
syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
arch/arc/include/asm/syscall.h
38
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
arch/arc/include/asm/syscall.h
44
syscall_get_error(struct task_struct *task, struct pt_regs *regs)
arch/arc/include/asm/syscall.h
51
syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
arch/arc/include/asm/syscall.h
57
syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
arch/arc/include/asm/syscall.h
68
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
arch/arc/include/asm/syscall.h
82
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
arch/arc/include/asm/syscall.h
96
syscall_get_arch(struct task_struct *task)
arch/arc/include/asm/thread_info.h
44
struct task_struct *task; /* main task structure */
arch/arc/include/asm/thread_info.h
53
.task = &tsk, \
arch/arc/include/asm/unwind.h
51
struct task_struct *task;
arch/arc/kernel/kgdb.c
70
struct task_struct *task)
arch/arc/kernel/kgdb.c
72
if (task)
arch/arc/kernel/kgdb.c
73
to_gdb_regs(gdb_regs, task_pt_regs(task),
arch/arc/kernel/kgdb.c
74
(struct callee_regs *) task->thread.callee_reg);
arch/arc/kernel/ptrace.c
313
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/arc/kernel/stacktrace.c
51
frame_info->task = tsk;
arch/arc/kernel/stacktrace.c
64
frame_info->task = current;
arch/arc/kernel/stacktrace.c
90
frame_info->task = tsk;
arch/arc/kernel/unwind.c
1084
top = STACK_TOP_UNW(frame->task);
arch/arc/kernel/unwind.c
1085
bottom = STACK_BOTTOM_UNW(frame->task);
arch/arm/common/bL_switcher.c
260
struct task_struct *task;
arch/arm/common/bL_switcher.c
308
struct task_struct *task;
arch/arm/common/bL_switcher.c
310
task = kthread_run_on_cpu(bL_switcher_thread, arg,
arch/arm/common/bL_switcher.c
312
if (IS_ERR(task))
arch/arm/common/bL_switcher.c
315
return task;
arch/arm/common/bL_switcher.c
353
if (IS_ERR(t->task))
arch/arm/common/bL_switcher.c
354
return PTR_ERR(t->task);
arch/arm/common/bL_switcher.c
355
if (!t->task)
arch/arm/common/bL_switcher.c
580
t->task = bL_switcher_thread_create(cpu, t);
arch/arm/common/bL_switcher.c
604
struct task_struct *task;
arch/arm/common/bL_switcher.c
629
task = t->task;
arch/arm/common/bL_switcher.c
630
t->task = NULL;
arch/arm/common/bL_switcher.c
631
if (!task || IS_ERR(task))
arch/arm/common/bL_switcher.c
633
kthread_stop(task);
arch/arm/common/bL_switcher.c
640
task = bL_switcher_thread_create(cpu, t);
arch/arm/common/bL_switcher.c
641
if (!IS_ERR(task)) {
arch/arm/common/bL_switcher.c
643
kthread_stop(task);
arch/arm/include/asm/smp.h
46
asmlinkage void secondary_start_kernel(struct task_struct *task);
arch/arm/include/asm/smp.h
59
struct task_struct *task;
arch/arm/include/asm/syscall.h
105
static inline void syscall_set_arguments(struct task_struct *task,
arch/arm/include/asm/syscall.h
118
static inline int syscall_get_arch(struct task_struct *task)
arch/arm/include/asm/syscall.h
22
static inline int syscall_get_nr(struct task_struct *task,
arch/arm/include/asm/syscall.h
26
return task_thread_info(task)->abi_syscall;
arch/arm/include/asm/syscall.h
28
if (task_thread_info(task)->abi_syscall == -1)
arch/arm/include/asm/syscall.h
31
return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
arch/arm/include/asm/syscall.h
34
static inline bool __in_oabi_syscall(struct task_struct *task)
arch/arm/include/asm/syscall.h
37
(task_thread_info(task)->abi_syscall & __NR_OABI_SYSCALL_BASE);
arch/arm/include/asm/syscall.h
45
static inline void syscall_rollback(struct task_struct *task,
arch/arm/include/asm/syscall.h
51
static inline long syscall_get_error(struct task_struct *task,
arch/arm/include/asm/syscall.h
58
static inline long syscall_get_return_value(struct task_struct *task,
arch/arm/include/asm/syscall.h
64
static inline void syscall_set_return_value(struct task_struct *task,
arch/arm/include/asm/syscall.h
71
static inline void syscall_set_nr(struct task_struct *task,
arch/arm/include/asm/syscall.h
76
task_thread_info(task)->abi_syscall = -1;
arch/arm/include/asm/syscall.h
83
syscall_set_return_value(task, regs, -ENOSYS, 0);
arch/arm/include/asm/syscall.h
87
task_thread_info(task)->abi_syscall = nr;
arch/arm/include/asm/syscall.h
90
task_thread_info(task)->abi_syscall =
arch/arm/include/asm/syscall.h
91
(task_thread_info(task)->abi_syscall & ~__NR_SYSCALL_MASK) |
arch/arm/include/asm/syscall.h
95
static inline void syscall_get_arguments(struct task_struct *task,
arch/arm/kernel/kgdb.c
76
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
arch/arm/kernel/kgdb.c
82
if (task == NULL)
arch/arm/kernel/kgdb.c
90
ti = task_thread_info(task);
arch/arm/kernel/perf_regs.c
29
u64 perf_reg_abi(struct task_struct *task)
arch/arm/kernel/ptrace.c
161
static inline long get_user_reg(struct task_struct *task, int offset)
arch/arm/kernel/ptrace.c
163
return task_pt_regs(task)->uregs[offset];
arch/arm/kernel/ptrace.c
173
put_user_reg(struct task_struct *task, int offset, long data)
arch/arm/kernel/ptrace.c
175
struct pt_regs newregs, *regs = task_pt_regs(task);
arch/arm/kernel/ptrace.c
720
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/arm/kernel/smp.c
155
secondary_data.task = idle;
arch/arm/kernel/smp.c
410
asmlinkage void secondary_start_kernel(struct task_struct *task)
arch/arm/kernel/smp.c
415
set_current(task);
arch/arm/kernel/stacktrace.c
145
static void start_stack_trace(struct stackframe *frame, struct task_struct *task,
arch/arm/kernel/stacktrace.c
155
frame->tsk = task;
arch/arm/kernel/stacktrace.c
163
struct task_struct *task, struct pt_regs *regs)
arch/arm/kernel/stacktrace.c
170
} else if (task != current) {
arch/arm/kernel/stacktrace.c
179
start_stack_trace(&frame, task, thread_saved_fp(task),
arch/arm/kernel/stacktrace.c
180
thread_saved_sp(task), 0,
arch/arm/kernel/stacktrace.c
181
thread_saved_pc(task));
arch/arm/kernel/stacktrace.c
185
start_stack_trace(&frame, task,
arch/arm/mach-rpc/ecard.c
1000
PTR_ERR(task));
arch/arm/mach-rpc/ecard.c
1002
return PTR_ERR(task);
arch/arm/mach-rpc/ecard.c
990
struct task_struct *task;
arch/arm/mach-rpc/ecard.c
997
task = kthread_run(ecard_task, NULL, "kecardd");
arch/arm/mach-rpc/ecard.c
998
if (IS_ERR(task)) {
arch/arm64/include/asm/debug-monitors.h
69
void user_rewind_single_step(struct task_struct *task);
arch/arm64/include/asm/debug-monitors.h
70
void user_fastforward_single_step(struct task_struct *task);
arch/arm64/include/asm/debug-monitors.h
72
struct task_struct *task);
arch/arm64/include/asm/fpsimd.h
114
extern void task_smstop_sm(struct task_struct *task);
arch/arm64/include/asm/fpsimd.h
201
extern void sve_alloc(struct task_struct *task, bool flush);
arch/arm64/include/asm/fpsimd.h
202
extern void fpsimd_release_task(struct task_struct *task);
arch/arm64/include/asm/fpsimd.h
203
extern void fpsimd_sync_from_effective_state(struct task_struct *task);
arch/arm64/include/asm/fpsimd.h
204
extern void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task);
arch/arm64/include/asm/fpsimd.h
206
extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
arch/arm64/include/asm/fpsimd.h
308
static inline size_t sve_state_size(struct task_struct const *task)
arch/arm64/include/asm/fpsimd.h
310
unsigned int sve_vl = task_get_sve_vl(task);
arch/arm64/include/asm/fpsimd.h
311
unsigned int sme_vl = task_get_sme_vl(task);
arch/arm64/include/asm/fpsimd.h
317
static inline void sve_alloc(struct task_struct *task, bool flush) { }
arch/arm64/include/asm/fpsimd.h
318
static inline void fpsimd_release_task(struct task_struct *task) { }
arch/arm64/include/asm/fpsimd.h
319
static inline void fpsimd_sync_from_effective_state(struct task_struct *task) { }
arch/arm64/include/asm/fpsimd.h
320
static inline void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task) { }
arch/arm64/include/asm/fpsimd.h
359
static inline size_t sve_state_size(struct task_struct const *task)
arch/arm64/include/asm/fpsimd.h
405
extern void sme_alloc(struct task_struct *task, bool flush);
arch/arm64/include/asm/fpsimd.h
426
static inline size_t sme_state_size(struct task_struct const *task)
arch/arm64/include/asm/fpsimd.h
428
return __sme_state_size(task_get_sme_vl(task));
arch/arm64/include/asm/fpsimd.h
440
static inline void sme_alloc(struct task_struct *task, bool flush) { }
arch/arm64/include/asm/fpsimd.h
454
static inline size_t sme_state_size(struct task_struct const *task)
arch/arm64/include/asm/gcs.h
162
static inline bool task_gcs_el0_enabled(struct task_struct *task)
arch/arm64/include/asm/gcs.h
167
static inline void gcs_set_el0_mode(struct task_struct *task) { }
arch/arm64/include/asm/gcs.h
168
static inline void gcs_free(struct task_struct *task) { }
arch/arm64/include/asm/gcs.h
179
static inline int gcs_check_locked(struct task_struct *task,
arch/arm64/include/asm/gcs.h
59
static inline bool task_gcs_el0_enabled(struct task_struct *task)
arch/arm64/include/asm/gcs.h
61
return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
arch/arm64/include/asm/gcs.h
64
void gcs_set_el0_mode(struct task_struct *task);
arch/arm64/include/asm/gcs.h
65
void gcs_free(struct task_struct *task);
arch/arm64/include/asm/gcs.h
70
static inline int gcs_check_locked(struct task_struct *task,
arch/arm64/include/asm/gcs.h
73
unsigned long cur_val = task->thread.gcs_el0_mode;
arch/arm64/include/asm/gcs.h
75
cur_val &= task->thread.gcs_el0_locked;
arch/arm64/include/asm/gcs.h
76
new_val &= task->thread.gcs_el0_locked;
arch/arm64/include/asm/hw_breakpoint.h
128
extern void ptrace_hw_copy_thread(struct task_struct *task);
arch/arm64/include/asm/hw_breakpoint.h
133
static inline void ptrace_hw_copy_thread(struct task_struct *task)
arch/arm64/include/asm/mte.h
106
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
arch/arm64/include/asm/mte.h
107
long get_mte_ctrl(struct task_struct *task);
arch/arm64/include/asm/mte.h
149
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
arch/arm64/include/asm/mte.h
153
static inline long get_mte_ctrl(struct task_struct *task)
arch/arm64/include/asm/mte.h
227
static inline void mte_disable_tco_entry(struct task_struct *task)
arch/arm64/include/asm/mte.h
243
(task->thread.sctlr_user & (1UL << SCTLR_EL1_TCF0_SHIFT)))
arch/arm64/include/asm/processor.h
228
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
arch/arm64/include/asm/processor.h
229
void task_set_vl(struct task_struct *task, enum vec_type type,
arch/arm64/include/asm/processor.h
231
void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
arch/arm64/include/asm/processor.h
233
unsigned int task_get_vl_onexec(const struct task_struct *task,
arch/arm64/include/asm/processor.h
236
static inline unsigned int task_get_sve_vl(const struct task_struct *task)
arch/arm64/include/asm/processor.h
238
return task_get_vl(task, ARM64_VEC_SVE);
arch/arm64/include/asm/processor.h
241
static inline unsigned int task_get_sme_vl(const struct task_struct *task)
arch/arm64/include/asm/processor.h
243
return task_get_vl(task, ARM64_VEC_SME);
arch/arm64/include/asm/processor.h
246
static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
arch/arm64/include/asm/processor.h
248
task_set_vl(task, ARM64_VEC_SVE, vl);
arch/arm64/include/asm/processor.h
251
static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
arch/arm64/include/asm/processor.h
253
return task_get_vl_onexec(task, ARM64_VEC_SVE);
arch/arm64/include/asm/processor.h
256
static inline void task_set_sve_vl_onexec(struct task_struct *task,
arch/arm64/include/asm/processor.h
259
task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
arch/arm64/include/asm/processor.h
434
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
arch/arm64/include/asm/processor.h
435
long get_tagged_addr_ctrl(struct task_struct *task);
arch/arm64/include/asm/ptrace.h
337
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
arch/arm64/include/asm/smp.h
91
struct task_struct *task;
arch/arm64/include/asm/syscall.h
115
static inline int syscall_get_arch(struct task_struct *task)
arch/arm64/include/asm/syscall.h
117
if (is_compat_thread(task_thread_info(task)))
arch/arm64/include/asm/syscall.h
20
static inline int syscall_get_nr(struct task_struct *task,
arch/arm64/include/asm/syscall.h
26
static inline void syscall_rollback(struct task_struct *task,
arch/arm64/include/asm/syscall.h
32
static inline long syscall_get_return_value(struct task_struct *task,
arch/arm64/include/asm/syscall.h
37
if (is_compat_thread(task_thread_info(task)))
arch/arm64/include/asm/syscall.h
43
static inline long syscall_get_error(struct task_struct *task,
arch/arm64/include/asm/syscall.h
46
unsigned long error = syscall_get_return_value(task, regs);
arch/arm64/include/asm/syscall.h
51
static inline void syscall_set_return_value(struct task_struct *task,
arch/arm64/include/asm/syscall.h
58
if (is_compat_thread(task_thread_info(task)))
arch/arm64/include/asm/syscall.h
64
static inline void syscall_set_nr(struct task_struct *task,
arch/arm64/include/asm/syscall.h
76
syscall_set_return_value(task, regs, -ENOSYS, 0);
arch/arm64/include/asm/syscall.h
80
static inline void syscall_get_arguments(struct task_struct *task,
arch/arm64/include/asm/syscall.h
92
static inline void syscall_set_arguments(struct task_struct *task,
arch/arm64/kernel/asm-offsets.c
99
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
arch/arm64/kernel/debug-monitors.c
320
void user_rewind_single_step(struct task_struct *task)
arch/arm64/kernel/debug-monitors.c
326
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
arch/arm64/kernel/debug-monitors.c
327
set_regs_spsr_ss(task_pt_regs(task));
arch/arm64/kernel/debug-monitors.c
331
void user_fastforward_single_step(struct task_struct *task)
arch/arm64/kernel/debug-monitors.c
333
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
arch/arm64/kernel/debug-monitors.c
334
clear_regs_spsr_ss(task_pt_regs(task));
arch/arm64/kernel/debug-monitors.c
338
struct task_struct *task)
arch/arm64/kernel/debug-monitors.c
340
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
arch/arm64/kernel/debug-monitors.c
382
void user_enable_single_step(struct task_struct *task)
arch/arm64/kernel/debug-monitors.c
384
struct thread_info *ti = task_thread_info(task);
arch/arm64/kernel/debug-monitors.c
387
set_regs_spsr_ss(task_pt_regs(task));
arch/arm64/kernel/debug-monitors.c
391
void user_disable_single_step(struct task_struct *task)
arch/arm64/kernel/debug-monitors.c
393
clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
arch/arm64/kernel/fpsimd.c
1176
void sme_alloc(struct task_struct *task, bool flush)
arch/arm64/kernel/fpsimd.c
1178
if (task->thread.sme_state) {
arch/arm64/kernel/fpsimd.c
1180
memset(task->thread.sme_state, 0,
arch/arm64/kernel/fpsimd.c
1181
sme_state_size(task));
arch/arm64/kernel/fpsimd.c
1186
task->thread.sme_state =
arch/arm64/kernel/fpsimd.c
1187
kzalloc(sme_state_size(task), GFP_KERNEL);
arch/arm64/kernel/fpsimd.c
1190
static void sme_free(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
1192
kfree(task->thread.sme_state);
arch/arm64/kernel/fpsimd.c
1193
task->thread.sme_state = NULL;
arch/arm64/kernel/fpsimd.c
1458
static void fpsimd_load_kernel_state(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
1466
if (last->st == task->thread.kernel_fpsimd_state &&
arch/arm64/kernel/fpsimd.c
1467
task->thread.kernel_fpsimd_cpu == smp_processor_id())
arch/arm64/kernel/fpsimd.c
1470
fpsimd_load_state(task->thread.kernel_fpsimd_state);
arch/arm64/kernel/fpsimd.c
1473
static void fpsimd_save_kernel_state(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
1476
.st = task->thread.kernel_fpsimd_state,
arch/arm64/kernel/fpsimd.c
1482
fpsimd_save_state(task->thread.kernel_fpsimd_state);
arch/arm64/kernel/fpsimd.c
1485
task->thread.kernel_fpsimd_cpu = smp_processor_id();
arch/arm64/kernel/fpsimd.c
255
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
arch/arm64/kernel/fpsimd.c
257
return task->thread.vl[type];
arch/arm64/kernel/fpsimd.c
260
void task_set_vl(struct task_struct *task, enum vec_type type,
arch/arm64/kernel/fpsimd.c
263
task->thread.vl[type] = vl;
arch/arm64/kernel/fpsimd.c
266
unsigned int task_get_vl_onexec(const struct task_struct *task,
arch/arm64/kernel/fpsimd.c
269
return task->thread.vl_onexec[type];
arch/arm64/kernel/fpsimd.c
272
void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
arch/arm64/kernel/fpsimd.c
275
task->thread.vl_onexec[type] = vl;
arch/arm64/kernel/fpsimd.c
661
static inline void fpsimd_to_sve(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
664
void *sst = task->thread.sve_state;
arch/arm64/kernel/fpsimd.c
665
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
670
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
arch/arm64/kernel/fpsimd.c
685
static inline void sve_to_fpsimd(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
688
void const *sst = task->thread.sve_state;
arch/arm64/kernel/fpsimd.c
689
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
696
vl = thread_get_cur_vl(&task->thread);
arch/arm64/kernel/fpsimd.c
712
void task_smstop_sm(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
714
if (!thread_sm_enabled(&task->thread))
arch/arm64/kernel/fpsimd.c
717
__fpsimd_zero_vregs(&task->thread.uw.fpsimd_state);
arch/arm64/kernel/fpsimd.c
718
task->thread.uw.fpsimd_state.fpsr = 0x0800009f;
arch/arm64/kernel/fpsimd.c
720
task->thread.uw.fpmr = 0;
arch/arm64/kernel/fpsimd.c
722
task->thread.svcr &= ~SVCR_SM_MASK;
arch/arm64/kernel/fpsimd.c
723
task->thread.fp_type = FP_STATE_FPSIMD;
arch/arm64/kernel/fpsimd.c
733
static void sve_free(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
735
kfree(task->thread.sve_state);
arch/arm64/kernel/fpsimd.c
736
task->thread.sve_state = NULL;
arch/arm64/kernel/fpsimd.c
749
void sve_alloc(struct task_struct *task, bool flush)
arch/arm64/kernel/fpsimd.c
751
if (task->thread.sve_state) {
arch/arm64/kernel/fpsimd.c
753
memset(task->thread.sve_state, 0,
arch/arm64/kernel/fpsimd.c
754
sve_state_size(task));
arch/arm64/kernel/fpsimd.c
759
task->thread.sve_state =
arch/arm64/kernel/fpsimd.c
760
kzalloc(sve_state_size(task), GFP_KERNEL);
arch/arm64/kernel/fpsimd.c
770
void fpsimd_sync_from_effective_state(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
772
if (task->thread.fp_type == FP_STATE_SVE)
arch/arm64/kernel/fpsimd.c
773
sve_to_fpsimd(task);
arch/arm64/kernel/fpsimd.c
784
void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task)
arch/arm64/kernel/fpsimd.c
787
void *sst = task->thread.sve_state;
arch/arm64/kernel/fpsimd.c
788
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
790
if (task->thread.fp_type != FP_STATE_SVE)
arch/arm64/kernel/fpsimd.c
793
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
arch/arm64/kernel/fpsimd.c
799
static int change_live_vector_length(struct task_struct *task,
arch/arm64/kernel/fpsimd.c
803
unsigned int sve_vl = task_get_sve_vl(task);
arch/arm64/kernel/fpsimd.c
804
unsigned int sme_vl = task_get_sme_vl(task);
arch/arm64/kernel/fpsimd.c
832
if (task == current)
arch/arm64/kernel/fpsimd.c
835
fpsimd_flush_task_state(task);
arch/arm64/kernel/fpsimd.c
841
fpsimd_sync_from_effective_state(task);
arch/arm64/kernel/fpsimd.c
842
task_set_vl(task, type, vl);
arch/arm64/kernel/fpsimd.c
843
kfree(task->thread.sve_state);
arch/arm64/kernel/fpsimd.c
844
task->thread.sve_state = sve_state;
arch/arm64/kernel/fpsimd.c
845
fpsimd_sync_to_effective_state_zeropad(task);
arch/arm64/kernel/fpsimd.c
848
task->thread.svcr &= ~SVCR_ZA_MASK;
arch/arm64/kernel/fpsimd.c
849
kfree(task->thread.sme_state);
arch/arm64/kernel/fpsimd.c
850
task->thread.sme_state = sme_state;
arch/arm64/kernel/fpsimd.c
861
int vec_set_vector_length(struct task_struct *task, enum vec_type type,
arch/arm64/kernel/fpsimd.c
885
if (!onexec && vl != task_get_vl(task, type)) {
arch/arm64/kernel/fpsimd.c
886
if (change_live_vector_length(task, type, vl))
arch/arm64/kernel/fpsimd.c
891
task_set_vl_onexec(task, type, vl);
arch/arm64/kernel/fpsimd.c
894
task_set_vl_onexec(task, type, 0);
arch/arm64/kernel/fpsimd.c
896
update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
arch/arm64/kernel/kgdb.c
129
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
arch/arm64/kernel/kgdb.c
131
struct cpu_context *cpu_context = &task->thread.cpu_context;
arch/arm64/kernel/mte.c
203
static void mte_update_sctlr_user(struct task_struct *task)
arch/arm64/kernel/mte.c
211
unsigned long sctlr = task->thread.sctlr_user;
arch/arm64/kernel/mte.c
212
unsigned long mte_ctrl = task->thread.mte_ctrl;
arch/arm64/kernel/mte.c
237
task->thread.sctlr_user = sctlr;
arch/arm64/kernel/mte.c
240
static void mte_update_gcr_excl(struct task_struct *task)
arch/arm64/kernel/mte.c
250
((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
arch/arm64/kernel/mte.c
372
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
arch/arm64/kernel/mte.c
399
task->thread.mte_ctrl = mte_ctrl;
arch/arm64/kernel/mte.c
400
if (task == current) {
arch/arm64/kernel/mte.c
402
mte_update_sctlr_user(task);
arch/arm64/kernel/mte.c
403
mte_update_gcr_excl(task);
arch/arm64/kernel/mte.c
404
update_sctlr_el1(task->thread.sctlr_user);
arch/arm64/kernel/mte.c
411
long get_mte_ctrl(struct task_struct *task)
arch/arm64/kernel/mte.c
414
u64 mte_ctrl = task->thread.mte_ctrl;
arch/arm64/kernel/perf_regs.c
93
u64 perf_reg_abi(struct task_struct *task)
arch/arm64/kernel/perf_regs.c
95
if (is_compat_thread(task_thread_info(task)))
arch/arm64/kernel/process.c
851
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
arch/arm64/kernel/process.c
854
struct thread_info *ti = task_thread_info(task);
arch/arm64/kernel/process.c
877
if (set_mte_ctrl(task, arg) != 0)
arch/arm64/kernel/process.c
885
long get_tagged_addr_ctrl(struct task_struct *task)
arch/arm64/kernel/process.c
888
struct thread_info *ti = task_thread_info(task);
arch/arm64/kernel/process.c
896
ret |= get_mte_ctrl(task);
arch/arm64/kernel/proton-pack.c
676
static void ssbd_prctl_enable_mitigation(struct task_struct *task)
arch/arm64/kernel/proton-pack.c
678
task_clear_spec_ssb_noexec(task);
arch/arm64/kernel/proton-pack.c
679
task_set_spec_ssb_disable(task);
arch/arm64/kernel/proton-pack.c
680
set_tsk_thread_flag(task, TIF_SSBD);
arch/arm64/kernel/proton-pack.c
683
static void ssbd_prctl_disable_mitigation(struct task_struct *task)
arch/arm64/kernel/proton-pack.c
685
task_clear_spec_ssb_noexec(task);
arch/arm64/kernel/proton-pack.c
686
task_clear_spec_ssb_disable(task);
arch/arm64/kernel/proton-pack.c
687
clear_tsk_thread_flag(task, TIF_SSBD);
arch/arm64/kernel/proton-pack.c
690
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
arch/arm64/kernel/proton-pack.c
699
if (task_spec_ssb_force_disable(task))
arch/arm64/kernel/proton-pack.c
709
ssbd_prctl_disable_mitigation(task);
arch/arm64/kernel/proton-pack.c
720
task_set_spec_ssb_force_disable(task);
arch/arm64/kernel/proton-pack.c
728
ssbd_prctl_enable_mitigation(task);
arch/arm64/kernel/proton-pack.c
736
if (task_spec_ssb_force_disable(task) ||
arch/arm64/kernel/proton-pack.c
742
ssbd_prctl_enable_mitigation(task);
arch/arm64/kernel/proton-pack.c
743
task_set_spec_ssb_noexec(task);
arch/arm64/kernel/proton-pack.c
749
spectre_v4_enable_task_mitigation(task);
arch/arm64/kernel/proton-pack.c
753
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
arch/arm64/kernel/proton-pack.c
758
return ssbd_prctl_set(task, ctrl);
arch/arm64/kernel/proton-pack.c
764
static int ssbd_prctl_get(struct task_struct *task)
arch/arm64/kernel/proton-pack.c
785
if (task_spec_ssb_force_disable(task))
arch/arm64/kernel/proton-pack.c
788
if (task_spec_ssb_noexec(task))
arch/arm64/kernel/proton-pack.c
791
if (task_spec_ssb_disable(task))
arch/arm64/kernel/proton-pack.c
797
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
arch/arm64/kernel/proton-pack.c
801
return ssbd_prctl_get(task);
arch/arm64/kernel/ptrace.c
1803
static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
arch/arm64/kernel/ptrace.c
1805
struct pt_regs *regs = task_pt_regs(task);
arch/arm64/kernel/ptrace.c
2314
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/arm64/kernel/ptrace.c
2324
else if (is_compat_thread(task_thread_info(task)))
arch/arm64/kernel/ptrace.c
2520
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
arch/arm64/kernel/ptrace.c
2523
user_regs_reset_single_step(regs, task);
arch/arm64/kernel/ptrace.c
2525
if (is_compat_thread(task_thread_info(task)))
arch/arm64/kernel/smp.c
120
secondary_data.task = idle;
arch/arm64/kernel/smp.c
141
secondary_data.task = NULL;
arch/arm64/kernel/stacktrace.c
121
struct task_struct *task)
arch/arm64/kernel/stacktrace.c
123
kunwind_init(state, task);
arch/arm64/kernel/stacktrace.c
125
state->common.fp = thread_saved_fp(task);
arch/arm64/kernel/stacktrace.c
126
state->common.pc = thread_saved_pc(task);
arch/arm64/kernel/stacktrace.c
134
if (state->task->ret_stack &&
arch/arm64/kernel/stacktrace.c
137
orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
arch/arm64/kernel/stacktrace.c
141
WARN_ON_ONCE(state->task == current);
arch/arm64/kernel/stacktrace.c
152
orig_pc = kretprobe_find_ret_addr(state->task,
arch/arm64/kernel/stacktrace.c
192
struct task_struct *tsk = state->task;
arch/arm64/kernel/stacktrace.c
309
((task == current) && !preemptible()) \
arch/arm64/kernel/stacktrace.c
320
((task == current) && in_nmi()) \
arch/arm64/kernel/stacktrace.c
327
((task == current) && current_in_efi()) \
arch/arm64/kernel/stacktrace.c
334
void *cookie, struct task_struct *task,
arch/arm64/kernel/stacktrace.c
338
stackinfo_get_task(task),
arch/arm64/kernel/stacktrace.c
357
if (task != current)
arch/arm64/kernel/stacktrace.c
360
} else if (task == current) {
arch/arm64/kernel/stacktrace.c
363
kunwind_init_from_task(&state, task);
arch/arm64/kernel/stacktrace.c
382
void *cookie, struct task_struct *task,
arch/arm64/kernel/stacktrace.c
390
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
arch/arm64/kernel/stacktrace.c
412
struct task_struct *task)
arch/arm64/kernel/stacktrace.c
420
task, NULL);
arch/arm64/kernel/stacktrace.c
51
struct task_struct *task;
arch/arm64/kernel/stacktrace.c
63
struct task_struct *task)
arch/arm64/kernel/stacktrace.c
66
state->task = task;
arch/arm64/mm/gcs.c
139
void gcs_set_el0_mode(struct task_struct *task)
arch/arm64/mm/gcs.c
143
if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)
arch/arm64/mm/gcs.c
146
if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE)
arch/arm64/mm/gcs.c
149
if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH)
arch/arm64/mm/gcs.c
155
void gcs_free(struct task_struct *task)
arch/arm64/mm/gcs.c
160
if (!task->mm || task->mm != current->mm)
arch/arm64/mm/gcs.c
163
if (task->thread.gcs_base)
arch/arm64/mm/gcs.c
164
vm_munmap(task->thread.gcs_base, task->thread.gcs_size);
arch/arm64/mm/gcs.c
166
task->thread.gcspr_el0 = 0;
arch/arm64/mm/gcs.c
167
task->thread.gcs_base = 0;
arch/arm64/mm/gcs.c
168
task->thread.gcs_size = 0;
arch/arm64/mm/gcs.c
171
int arch_set_shadow_stack_status(struct task_struct *task, unsigned long arg)
arch/arm64/mm/gcs.c
179
if (is_compat_thread(task_thread_info(task)))
arch/arm64/mm/gcs.c
186
ret = gcs_check_locked(task, arg);
arch/arm64/mm/gcs.c
192
!task_gcs_el0_enabled(task)) {
arch/arm64/mm/gcs.c
194
if (task->thread.gcs_base || task->thread.gcspr_el0)
arch/arm64/mm/gcs.c
197
if (task != current)
arch/arm64/mm/gcs.c
205
task->thread.gcspr_el0 = gcs + size - sizeof(u64);
arch/arm64/mm/gcs.c
206
task->thread.gcs_base = gcs;
arch/arm64/mm/gcs.c
207
task->thread.gcs_size = size;
arch/arm64/mm/gcs.c
208
if (task == current)
arch/arm64/mm/gcs.c
209
write_sysreg_s(task->thread.gcspr_el0,
arch/arm64/mm/gcs.c
213
task->thread.gcs_el0_mode = arg;
arch/arm64/mm/gcs.c
214
if (task == current)
arch/arm64/mm/gcs.c
215
gcs_set_el0_mode(task);
arch/arm64/mm/gcs.c
220
int arch_get_shadow_stack_status(struct task_struct *task,
arch/arm64/mm/gcs.c
226
if (is_compat_thread(task_thread_info(task)))
arch/arm64/mm/gcs.c
229
return put_user(task->thread.gcs_el0_mode, arg);
arch/arm64/mm/gcs.c
232
int arch_lock_shadow_stack_status(struct task_struct *task,
arch/arm64/mm/gcs.c
238
if (is_compat_thread(task_thread_info(task)))
arch/arm64/mm/gcs.c
245
task->thread.gcs_el0_locked |= arg;
arch/csky/include/asm/syscall.h
14
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
arch/csky/include/asm/syscall.h
20
syscall_set_nr(struct task_struct *task, struct pt_regs *regs,
arch/csky/include/asm/syscall.h
27
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
arch/csky/include/asm/syscall.h
33
syscall_get_error(struct task_struct *task, struct pt_regs *regs)
arch/csky/include/asm/syscall.h
41
syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
arch/csky/include/asm/syscall.h
47
syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
arch/csky/include/asm/syscall.h
54
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
arch/csky/include/asm/syscall.h
63
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
arch/csky/include/asm/syscall.h
76
syscall_get_arch(struct task_struct *task)
arch/csky/include/asm/thread_info.h
14
struct task_struct *task;
arch/csky/include/asm/thread_info.h
26
.task = &tsk, \
arch/csky/kernel/asm-offsets.c
30
DEFINE(TINFO_TASK, offsetof(struct thread_info, task));
arch/csky/kernel/perf_regs.c
29
u64 perf_reg_abi(struct task_struct *task)
arch/csky/kernel/ptrace.c
193
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/csky/kernel/stacktrace.c
112
unsigned long __get_wchan(struct task_struct *task)
arch/csky/kernel/stacktrace.c
116
walk_stackframe(task, NULL, save_wchan, &pc);
arch/csky/kernel/stacktrace.c
16
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
arch/csky/kernel/stacktrace.c
25
} else if (task == NULL || task == current) {
arch/csky/kernel/stacktrace.c
32
fp = thread_saved_fp(task);
arch/csky/kernel/stacktrace.c
33
sp = thread_saved_sp(task);
arch/csky/kernel/stacktrace.c
34
pc = thread_saved_lr(task);
arch/csky/kernel/stacktrace.c
60
static void notrace walk_stackframe(struct task_struct *task,
arch/csky/kernel/stacktrace.c
69
} else if (task == NULL || task == current) {
arch/csky/kernel/stacktrace.c
74
sp = thread_saved_sp(task);
arch/csky/kernel/stacktrace.c
75
pc = thread_saved_lr(task);
arch/csky/kernel/stacktrace.c
96
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
arch/csky/kernel/stacktrace.c
99
walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
arch/hexagon/include/asm/mmu_context.h
57
switch_mm(prev, next, current_thread_info()->task);
arch/hexagon/include/asm/processor.h
57
#define task_pt_regs(task) \
arch/hexagon/include/asm/processor.h
58
((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
arch/hexagon/include/asm/syscall.h
23
static inline long syscall_get_nr(struct task_struct *task,
arch/hexagon/include/asm/syscall.h
29
static inline void syscall_set_nr(struct task_struct *task,
arch/hexagon/include/asm/syscall.h
36
static inline void syscall_get_arguments(struct task_struct *task,
arch/hexagon/include/asm/syscall.h
43
static inline void syscall_set_arguments(struct task_struct *task,
arch/hexagon/include/asm/syscall.h
50
static inline long syscall_get_error(struct task_struct *task,
arch/hexagon/include/asm/syscall.h
56
static inline long syscall_get_return_value(struct task_struct *task,
arch/hexagon/include/asm/syscall.h
62
static inline void syscall_set_return_value(struct task_struct *task,
arch/hexagon/include/asm/syscall.h
69
static inline int syscall_get_arch(struct task_struct *task)
arch/hexagon/include/asm/thread_info.h
32
struct task_struct *task; /* main task structure */
arch/hexagon/include/asm/thread_info.h
60
.task = &tsk, \
arch/hexagon/kernel/kgdb.c
108
struct task_struct *task)
arch/hexagon/kernel/kgdb.c
112
if (task == NULL)
arch/hexagon/kernel/kgdb.c
119
thread_regs = task_pt_regs(task);
arch/hexagon/kernel/ptrace.c
158
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/hexagon/kernel/traps.c
101
task->thread.switch_sp)->fp;
arch/hexagon/kernel/traps.c
115
low = (unsigned long)task_stack_page(task);
arch/hexagon/kernel/traps.c
174
void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
arch/hexagon/kernel/traps.c
177
do_show_stack(task, fp, 0, loglvl);
arch/hexagon/kernel/traps.c
77
static void do_show_stack(struct task_struct *task, unsigned long *fp,
arch/hexagon/kernel/traps.c
89
if (task == NULL)
arch/hexagon/kernel/traps.c
90
task = current;
arch/hexagon/kernel/traps.c
93
task->comm, task_pid_nr(task));
arch/hexagon/kernel/traps.c
96
if (task == current) {
arch/loongarch/include/asm/hw_breakpoint.h
123
extern void ptrace_hw_copy_thread(struct task_struct *task);
arch/loongarch/include/asm/hw_breakpoint.h
126
static inline void ptrace_hw_copy_thread(struct task_struct *task)
arch/loongarch/include/asm/mmu_context.h
134
#define deactivate_mm(task, mm) do { } while (0)
arch/loongarch/include/asm/stacktrace.h
31
bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info);
arch/loongarch/include/asm/stacktrace.h
32
int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info);
arch/loongarch/include/asm/syscall.h
23
static inline long syscall_get_nr(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
29
static inline void syscall_set_nr(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
36
static inline void syscall_rollback(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
42
static inline long syscall_get_error(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
50
static inline long syscall_get_return_value(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
56
static inline void syscall_set_return_value(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
63
static inline void syscall_get_arguments(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
71
static inline void syscall_set_arguments(struct task_struct *task,
arch/loongarch/include/asm/syscall.h
79
static inline int syscall_get_arch(struct task_struct *task)
arch/loongarch/include/asm/thread_info.h
25
struct task_struct *task; /* main task structure */
arch/loongarch/include/asm/thread_info.h
40
.task = &tsk, \
arch/loongarch/include/asm/unwind.h
25
struct task_struct *task;
arch/loongarch/include/asm/unwind.h
34
struct task_struct *task, struct pt_regs *regs);
arch/loongarch/include/asm/unwind.h
53
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
arch/loongarch/include/asm/unwind.h
58
struct task_struct *task, struct pt_regs *regs)
arch/loongarch/include/asm/unwind.h
66
} else if (task && task != current) {
arch/loongarch/include/asm/unwind.h
67
state->sp = thread_saved_fp(task);
arch/loongarch/include/asm/unwind.h
68
state->pc = thread_saved_ra(task);
arch/loongarch/include/asm/unwind.h
77
state->task = task;
arch/loongarch/include/asm/unwind.h
78
get_stack_info(state->sp, state->task, &state->stack_info);
arch/loongarch/kernel/asm-offsets.c
87
OFFSET(TI_TASK, thread_info, task);
arch/loongarch/kernel/process.c
235
unsigned long __get_wchan(struct task_struct *task)
arch/loongarch/kernel/process.c
240
if (!try_get_task_stack(task))
arch/loongarch/kernel/process.c
243
for (unwind_start(&state, task, NULL);
arch/loongarch/kernel/process.c
253
put_task_stack(task);
arch/loongarch/kernel/process.c
279
bool in_task_stack(unsigned long stack, struct task_struct *task,
arch/loongarch/kernel/process.c
282
unsigned long begin = (unsigned long)task_stack_page(task);
arch/loongarch/kernel/process.c
296
int get_stack_info(unsigned long stack, struct task_struct *task,
arch/loongarch/kernel/process.c
299
task = task ? : current;
arch/loongarch/kernel/process.c
304
if (in_task_stack(stack, task, info))
arch/loongarch/kernel/process.c
307
if (task != current)
arch/loongarch/kernel/ptrace.c
1082
void user_enable_single_step(struct task_struct *task)
arch/loongarch/kernel/ptrace.c
1084
struct thread_info *ti = task_thread_info(task);
arch/loongarch/kernel/ptrace.c
1086
set_single_step(task, task_pt_regs(task)->csr_era);
arch/loongarch/kernel/ptrace.c
1087
task->thread.single_step = task_pt_regs(task)->csr_era;
arch/loongarch/kernel/ptrace.c
1091
void user_disable_single_step(struct task_struct *task)
arch/loongarch/kernel/ptrace.c
1093
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/loongarch/kernel/ptrace.c
953
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/loongarch/kernel/stacktrace.c
15
struct task_struct *task, struct pt_regs *regs)
arch/loongarch/kernel/stacktrace.c
24
if (task == current) {
arch/loongarch/kernel/stacktrace.c
28
regs->regs[3] = thread_saved_fp(task);
arch/loongarch/kernel/stacktrace.c
29
regs->csr_era = thread_saved_ra(task);
arch/loongarch/kernel/stacktrace.c
35
for (unwind_start(&state, task, regs);
arch/loongarch/kernel/stacktrace.c
44
void *cookie, struct task_struct *task)
arch/loongarch/kernel/stacktrace.c
51
if (task == current) {
arch/loongarch/kernel/stacktrace.c
56
regs->regs[3] = thread_saved_fp(task);
arch/loongarch/kernel/stacktrace.c
57
regs->csr_era = thread_saved_ra(task);
arch/loongarch/kernel/stacktrace.c
58
regs->regs[22] = task->thread.reg22;
arch/loongarch/kernel/stacktrace.c
62
for (unwind_start(&state, task, regs);
arch/loongarch/kernel/traps.c
102
static void show_stacktrace(struct task_struct *task,
arch/loongarch/kernel/traps.c
131
show_backtrace(task, regs, loglvl, user);
arch/loongarch/kernel/traps.c
134
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
arch/loongarch/kernel/traps.c
144
if (!task || task == current)
arch/loongarch/kernel/traps.c
147
regs.csr_era = task->thread.reg01;
arch/loongarch/kernel/traps.c
149
regs.regs[3] = task->thread.reg03;
arch/loongarch/kernel/traps.c
150
regs.regs[22] = task->thread.reg22;
arch/loongarch/kernel/traps.c
154
show_stacktrace(task, &regs, loglvl, false);
arch/loongarch/kernel/traps.c
83
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
arch/loongarch/kernel/traps.c
90
if (!task)
arch/loongarch/kernel/traps.c
91
task = current;
arch/loongarch/kernel/traps.c
94
for (unwind_start(&state, task, pregs);
arch/loongarch/kernel/unwind.c
29
} while (!get_stack_info(state->sp, state->task, info));
arch/loongarch/kernel/unwind_guess.c
14
void unwind_start(struct unwind_state *state, struct task_struct *task,
arch/loongarch/kernel/unwind_guess.c
17
__unwind_start(state, task, regs);
arch/loongarch/kernel/unwind_orc.c
332
return !get_stack_info(addr, state->task, info) && on_stack(info, addr, len);
arch/loongarch/kernel/unwind_orc.c
341
void unwind_start(struct unwind_state *state, struct task_struct *task,
arch/loongarch/kernel/unwind_orc.c
344
__unwind_start(state, task, regs);
arch/loongarch/kernel/unwind_orc.c
501
get_stack_info(state->sp, state->task, info);
arch/loongarch/kernel/unwind_prologue.c
217
get_stack_info(state->sp, state->task, info);
arch/loongarch/kernel/unwind_prologue.c
224
} while (!get_stack_info(state->sp, state->task, info));
arch/loongarch/kernel/unwind_prologue.c
237
void unwind_start(struct unwind_state *state, struct task_struct *task,
arch/loongarch/kernel/unwind_prologue.c
240
__unwind_start(state, task, regs);
arch/m68k/include/asm/current.h
22
return(current_thread_info()->task);
arch/m68k/include/asm/mmu_context.h
106
mmuar = task->thread.ksp;
arch/m68k/include/asm/mmu_context.h
117
pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
arch/m68k/include/asm/mmu_context.h
118
mm = task->mm;
arch/m68k/include/asm/mmu_context.h
93
static inline void load_ksp_mmu(struct task_struct *task)
arch/m68k/include/asm/syscall.h
11
static inline int syscall_get_nr(struct task_struct *task,
arch/m68k/include/asm/syscall.h
17
static inline void syscall_set_nr(struct task_struct *task,
arch/m68k/include/asm/syscall.h
24
static inline void syscall_rollback(struct task_struct *task,
arch/m68k/include/asm/syscall.h
30
static inline long syscall_get_error(struct task_struct *task,
arch/m68k/include/asm/syscall.h
38
static inline long syscall_get_return_value(struct task_struct *task,
arch/m68k/include/asm/syscall.h
44
static inline void syscall_set_return_value(struct task_struct *task,
arch/m68k/include/asm/syscall.h
51
static inline void syscall_get_arguments(struct task_struct *task,
arch/m68k/include/asm/syscall.h
61
static inline void syscall_set_arguments(struct task_struct *task,
arch/m68k/include/asm/syscall.h
71
static inline int syscall_get_arch(struct task_struct *task)
arch/m68k/include/asm/thread_info.h
28
struct task_struct *task; /* main task structure */
arch/m68k/include/asm/thread_info.h
38
.task = &tsk, \
arch/m68k/kernel/ptrace.c
101
static inline int put_reg(struct task_struct *task, int regno,
arch/m68k/kernel/ptrace.c
107
addr = &task->thread.usp;
arch/m68k/kernel/ptrace.c
109
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
arch/m68k/kernel/ptrace.c
114
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
arch/m68k/kernel/ptrace.c
346
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/m68k/kernel/ptrace.c
77
static inline long get_reg(struct task_struct *task, int regno)
arch/m68k/kernel/ptrace.c
82
addr = &task->thread.usp;
arch/m68k/kernel/ptrace.c
84
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
arch/m68k/kernel/ptrace.c
89
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
arch/m68k/kernel/traps.c
931
void show_stack(struct task_struct *task, unsigned long *stack,
arch/m68k/kernel/traps.c
939
if (task)
arch/m68k/kernel/traps.c
940
stack = (unsigned long *)task->thread.esp0;
arch/microblaze/include/asm/processor.h
72
# define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE)
arch/microblaze/include/asm/processor.h
73
# define task_regs(task) ((struct pt_regs *)task_tos(task) - 1)
arch/microblaze/include/asm/processor.h
78
# define task_sp(task) (task_regs(task)->r1)
arch/microblaze/include/asm/processor.h
79
# define task_pc(task) (task_regs(task)->pc)
arch/microblaze/include/asm/processor.h
81
# define KSTK_EIP(task) (task_pc(task))
arch/microblaze/include/asm/processor.h
82
# define KSTK_ESP(task) (task_sp(task))
arch/microblaze/include/asm/syscall.h
11
static inline long syscall_get_nr(struct task_struct *task,
arch/microblaze/include/asm/syscall.h
17
static inline void syscall_set_nr(struct task_struct *task,
arch/microblaze/include/asm/syscall.h
24
static inline void syscall_rollback(struct task_struct *task,
arch/microblaze/include/asm/syscall.h
30
static inline long syscall_get_error(struct task_struct *task,
arch/microblaze/include/asm/syscall.h
36
static inline long syscall_get_return_value(struct task_struct *task,
arch/microblaze/include/asm/syscall.h
42
static inline void syscall_set_return_value(struct task_struct *task,
arch/microblaze/include/asm/syscall.h
68
static inline void syscall_get_arguments(struct task_struct *task,
arch/microblaze/include/asm/syscall.h
82
static inline int syscall_get_arch(struct task_struct *task)
arch/microblaze/include/asm/thread_info.h
60
struct task_struct *task; /* main task structure */
arch/microblaze/include/asm/thread_info.h
74
.task = &tsk, \
arch/microblaze/include/asm/unwind.h
23
void microblaze_unwind(struct task_struct *task, struct stack_trace *trace,
arch/microblaze/kernel/asm-offsets.c
88
DEFINE(TI_TASK, offsetof(struct thread_info, task));
arch/microblaze/kernel/traps.c
35
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
arch/microblaze/kernel/traps.c
41
if (task) {
arch/microblaze/kernel/traps.c
43
(task->stack))->cpu_context.r1;
arch/microblaze/kernel/traps.c
72
microblaze_unwind(task, NULL, loglvl);
arch/microblaze/kernel/traps.c
75
if (!task)
arch/microblaze/kernel/traps.c
76
task = current;
arch/microblaze/kernel/traps.c
78
debug_show_held_locks(task);
arch/microblaze/kernel/unwind.c
154
static void microblaze_unwind_inner(struct task_struct *task,
arch/microblaze/kernel/unwind.c
164
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
arch/microblaze/kernel/unwind.c
182
static void microblaze_unwind_inner(struct task_struct *task,
arch/microblaze/kernel/unwind.c
218
unwind_trap(task, pc, fp, trace, loglvl);
arch/microblaze/kernel/unwind.c
236
if (unlikely(pc == task_pt_regs(task)->pc)) {
arch/microblaze/kernel/unwind.c
239
(unsigned long) task->pid,
arch/microblaze/kernel/unwind.c
240
task->comm);
arch/microblaze/kernel/unwind.c
273
void microblaze_unwind(struct task_struct *task, struct stack_trace *trace,
arch/microblaze/kernel/unwind.c
276
if (task) {
arch/microblaze/kernel/unwind.c
277
if (task == current) {
arch/microblaze/kernel/unwind.c
278
const struct pt_regs *regs = task_pt_regs(task);
arch/microblaze/kernel/unwind.c
279
microblaze_unwind_inner(task, regs->pc, regs->r1,
arch/microblaze/kernel/unwind.c
283
(struct thread_info *)(task->stack);
arch/microblaze/kernel/unwind.c
287
microblaze_unwind_inner(task,
arch/mips/include/asm/processor.h
398
extern int mips_get_process_fp_mode(struct task_struct *task);
arch/mips/include/asm/processor.h
399
extern int mips_set_process_fp_mode(struct task_struct *task,
arch/mips/include/asm/processor.h
402
#define GET_FP_MODE(task) mips_get_process_fp_mode(task)
arch/mips/include/asm/processor.h
403
#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
arch/mips/include/asm/stacktrace.h
11
extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
arch/mips/include/asm/stacktrace.h
19
static inline unsigned long unwind_stack(struct task_struct *task,
arch/mips/include/asm/syscall.h
109
static inline long syscall_get_error(struct task_struct *task,
arch/mips/include/asm/syscall.h
115
static inline long syscall_get_return_value(struct task_struct *task,
arch/mips/include/asm/syscall.h
121
static inline void syscall_rollback(struct task_struct *task,
arch/mips/include/asm/syscall.h
127
static inline void syscall_set_return_value(struct task_struct *task,
arch/mips/include/asm/syscall.h
140
static inline void syscall_get_arguments(struct task_struct *task,
arch/mips/include/asm/syscall.h
148
if (mips_syscall_is_indirect(task, regs))
arch/mips/include/asm/syscall.h
152
mips_get_syscall_arg(args++, task, regs, i++);
arch/mips/include/asm/syscall.h
155
static inline void syscall_set_arguments(struct task_struct *task,
arch/mips/include/asm/syscall.h
163
mips_set_syscall_arg(args++, task, regs, i++);
arch/mips/include/asm/syscall.h
170
static inline int syscall_get_arch(struct task_struct *task)
arch/mips/include/asm/syscall.h
174
if (!test_tsk_thread_flag(task, TIF_32BIT_REGS)) {
arch/mips/include/asm/syscall.h
177
if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
arch/mips/include/asm/syscall.h
29
static inline bool mips_syscall_is_indirect(struct task_struct *task,
arch/mips/include/asm/syscall.h
34
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
arch/mips/include/asm/syscall.h
38
static inline long syscall_get_nr(struct task_struct *task,
arch/mips/include/asm/syscall.h
41
return task_thread_info(task)->syscall;
arch/mips/include/asm/syscall.h
44
static inline void syscall_set_nr(struct task_struct *task,
arch/mips/include/asm/syscall.h
56
task_thread_info(task)->syscall = regs->regs[2] = nr;
arch/mips/include/asm/syscall.h
59
static inline void mips_syscall_update_nr(struct task_struct *task,
arch/mips/include/asm/syscall.h
66
if (mips_syscall_is_indirect(task, regs))
arch/mips/include/asm/syscall.h
67
task_thread_info(task)->syscall = regs->regs[4];
arch/mips/include/asm/syscall.h
69
task_thread_info(task)->syscall = regs->regs[2];
arch/mips/include/asm/syscall.h
73
struct task_struct *task, struct pt_regs *regs, unsigned int n)
arch/mips/include/asm/syscall.h
87
test_tsk_thread_flag(task, TIF_32BIT_REGS)))
arch/mips/include/asm/syscall.h
93
struct task_struct *task, struct pt_regs *regs, unsigned int n)
arch/mips/include/asm/thread_info.h
26
struct task_struct *task; /* main task structure */
arch/mips/include/asm/thread_info.h
40
.task = &tsk, \
arch/mips/include/asm/watch.h
21
#define __restore_watch(task) do { \
arch/mips/include/asm/watch.h
23
&task_thread_info(task)->flags))) { \
arch/mips/include/asm/watch.h
24
mips_install_watch_registers(task); \
arch/mips/include/asm/watch.h
29
#define __restore_watch(task) do {} while (0)
arch/mips/kernel/asm-offsets.c
106
OFFSET(TI_TASK, thread_info, task);
arch/mips/kernel/process.c
636
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
arch/mips/kernel/process.c
650
stack_page = (unsigned long)task_stack_page(task);
arch/mips/kernel/process.c
659
unsigned long __get_wchan(struct task_struct *task)
arch/mips/kernel/process.c
667
if (!task_stack_page(task))
arch/mips/kernel/process.c
670
pc = thread_saved_pc(task);
arch/mips/kernel/process.c
673
sp = task->thread.reg29 + schedule_mfi.frame_size;
arch/mips/kernel/process.c
676
pc = unwind_stack(task, &sp, pc, &ra);
arch/mips/kernel/process.c
761
int mips_get_process_fp_mode(struct task_struct *task)
arch/mips/kernel/process.c
765
if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
arch/mips/kernel/process.c
767
if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
arch/mips/kernel/process.c
785
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
arch/mips/kernel/process.c
793
if (value == mips_get_process_fp_mode(task))
arch/mips/kernel/process.c
824
for_each_thread(task, t) {
arch/mips/kernel/process.c
851
for_each_thread(task, t)
arch/mips/kernel/ptrace.c
1067
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/mips/kernel/ptrace.c
1073
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
arch/mips/kernel/ptrace.c
1077
if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
arch/mips/kernel/traps.c
152
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
arch/mips/kernel/traps.c
159
if (!task)
arch/mips/kernel/traps.c
160
task = current;
arch/mips/kernel/traps.c
169
pc = unwind_stack(task, &sp, pc, &ra);
arch/mips/kernel/traps.c
178
static void show_stacktrace(struct task_struct *task,
arch/mips/kernel/traps.c
207
show_backtrace(task, regs, loglvl, user);
arch/mips/kernel/traps.c
210
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
arch/mips/kernel/traps.c
220
if (task && task != current) {
arch/mips/kernel/traps.c
221
regs.regs[29] = task->thread.reg29;
arch/mips/kernel/traps.c
223
regs.cp0_epc = task->thread.reg31;
arch/mips/kernel/traps.c
228
show_stacktrace(task, &regs, loglvl, false);
arch/nios2/include/asm/syscall.h
13
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
arch/nios2/include/asm/syscall.h
18
static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
arch/nios2/include/asm/syscall.h
23
static inline void syscall_rollback(struct task_struct *task,
arch/nios2/include/asm/syscall.h
30
static inline long syscall_get_error(struct task_struct *task,
arch/nios2/include/asm/syscall.h
36
static inline long syscall_get_return_value(struct task_struct *task,
arch/nios2/include/asm/syscall.h
42
static inline void syscall_set_return_value(struct task_struct *task,
arch/nios2/include/asm/syscall.h
55
static inline void syscall_get_arguments(struct task_struct *task,
arch/nios2/include/asm/syscall.h
66
static inline void syscall_set_arguments(struct task_struct *task,
arch/nios2/include/asm/syscall.h
77
static inline int syscall_get_arch(struct task_struct *task)
arch/nios2/include/asm/thread_info.h
37
struct task_struct *task; /* main task structure */
arch/nios2/include/asm/thread_info.h
51
.task = &tsk, \
arch/nios2/kernel/ptrace.c
115
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/nios2/kernel/traps.c
60
void show_stack(struct task_struct *task, unsigned long *stack,
arch/nios2/kernel/traps.c
67
if (task)
arch/nios2/kernel/traps.c
68
stack = (unsigned long *)task->thread.ksp;
arch/openrisc/include/asm/fpu.h
10
task->thread.fpcsr = mfspr(SPR_FPCSR);
arch/openrisc/include/asm/fpu.h
13
static inline void restore_fpu(struct task_struct *task)
arch/openrisc/include/asm/fpu.h
15
mtspr(SPR_FPCSR, task->thread.fpcsr);
arch/openrisc/include/asm/fpu.h
8
static inline void save_fpu(struct task_struct *task)
arch/openrisc/include/asm/processor.h
64
#define task_pt_regs(task) user_regs(task_thread_info(task))
arch/openrisc/include/asm/syscall.h
23
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
arch/openrisc/include/asm/syscall.h
29
syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
arch/openrisc/include/asm/syscall.h
35
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
arch/openrisc/include/asm/syscall.h
41
syscall_get_error(struct task_struct *task, struct pt_regs *regs)
arch/openrisc/include/asm/syscall.h
47
syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
arch/openrisc/include/asm/syscall.h
53
syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
arch/openrisc/include/asm/syscall.h
60
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
arch/openrisc/include/asm/syscall.h
67
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
arch/openrisc/include/asm/syscall.h
73
static inline int syscall_get_arch(struct task_struct *task)
arch/openrisc/include/asm/thread_info.h
44
struct task_struct *task; /* main task structure */
arch/openrisc/include/asm/thread_info.h
64
.task = &tsk, \
arch/openrisc/include/asm/thread_info.h
75
#define get_thread_info(ti) get_task_struct((ti)->task)
arch/openrisc/include/asm/thread_info.h
76
#define put_thread_info(ti) put_task_struct((ti)->task)
arch/openrisc/kernel/asm-offsets.c
48
DEFINE(TI_TASK, offsetof(struct thread_info, task));
arch/openrisc/kernel/process.c
260
last = (_switch(old_ti, new_ti))->task;
arch/openrisc/kernel/ptrace.c
153
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/openrisc/kernel/traps.c
68
void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl)
arch/parisc/include/asm/processor.h
122
#define SET_UNALIGN_CTL(task,value) \
arch/parisc/include/asm/processor.h
124
(task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
arch/parisc/include/asm/processor.h
130
#define GET_UNALIGN_CTL(task,addr) \
arch/parisc/include/asm/processor.h
132
put_user(((task)->thread.flags & PARISC_UAC_MASK) \
arch/parisc/include/asm/processor.h
150
void show_trace(struct task_struct *task, unsigned long *stack);
arch/parisc/include/asm/psw.h
97
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))
arch/parisc/include/asm/psw.h
99
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))
arch/parisc/include/asm/ptrace.h
11
#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
arch/parisc/include/asm/syscall.h
51
static inline long syscall_get_error(struct task_struct *task,
arch/parisc/include/asm/syscall.h
58
static inline long syscall_get_return_value(struct task_struct *task,
arch/parisc/include/asm/syscall.h
64
static inline void syscall_set_return_value(struct task_struct *task,
arch/parisc/include/asm/syscall.h
71
static inline void syscall_rollback(struct task_struct *task,
arch/parisc/include/asm/syscall.h
77
static inline int syscall_get_arch(struct task_struct *task)
arch/parisc/include/asm/syscall.h
81
if (!__is_compat_task(task))
arch/parisc/include/asm/unwind.h
79
struct task_struct *task, struct pt_regs *regs);
arch/parisc/kernel/kgdb.c
127
struct task_struct *task)
arch/parisc/kernel/kgdb.c
129
struct pt_regs *regs = task_pt_regs(task);
arch/parisc/kernel/perf_regs.c
45
u64 perf_reg_abi(struct task_struct *task)
arch/parisc/kernel/perf_regs.c
50
if (test_tsk_thread_flag(task, TIF_32BIT))
arch/parisc/kernel/ptrace.c
104
pa_psw(task)->r = 1;
arch/parisc/kernel/ptrace.c
105
pa_psw(task)->t = 0;
arch/parisc/kernel/ptrace.c
106
pa_psw(task)->h = 0;
arch/parisc/kernel/ptrace.c
107
pa_psw(task)->l = 0;
arch/parisc/kernel/ptrace.c
110
void user_enable_block_step(struct task_struct *task)
arch/parisc/kernel/ptrace.c
112
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/parisc/kernel/ptrace.c
113
set_tsk_thread_flag(task, TIF_BLOCKSTEP);
arch/parisc/kernel/ptrace.c
116
pa_psw(task)->r = 0;
arch/parisc/kernel/ptrace.c
117
pa_psw(task)->t = 1;
arch/parisc/kernel/ptrace.c
118
pa_psw(task)->h = 0;
arch/parisc/kernel/ptrace.c
119
pa_psw(task)->l = 0;
arch/parisc/kernel/ptrace.c
50
void ptrace_disable(struct task_struct *task)
arch/parisc/kernel/ptrace.c
52
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/parisc/kernel/ptrace.c
53
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
arch/parisc/kernel/ptrace.c
56
pa_psw(task)->r = 0;
arch/parisc/kernel/ptrace.c
57
pa_psw(task)->t = 0;
arch/parisc/kernel/ptrace.c
58
pa_psw(task)->h = 0;
arch/parisc/kernel/ptrace.c
59
pa_psw(task)->l = 0;
arch/parisc/kernel/ptrace.c
649
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/parisc/kernel/ptrace.c
66
void user_disable_single_step(struct task_struct *task)
arch/parisc/kernel/ptrace.c
68
ptrace_disable(task);
arch/parisc/kernel/ptrace.c
71
void user_enable_single_step(struct task_struct *task)
arch/parisc/kernel/ptrace.c
73
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
arch/parisc/kernel/ptrace.c
74
set_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/parisc/kernel/ptrace.c
76
if (pa_psw(task)->n) {
arch/parisc/kernel/ptrace.c
78
task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
arch/parisc/kernel/ptrace.c
79
task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
arch/parisc/kernel/ptrace.c
80
task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
arch/parisc/kernel/ptrace.c
81
pa_psw(task)->n = 0;
arch/parisc/kernel/ptrace.c
82
pa_psw(task)->x = 0;
arch/parisc/kernel/ptrace.c
83
pa_psw(task)->y = 0;
arch/parisc/kernel/ptrace.c
84
pa_psw(task)->z = 0;
arch/parisc/kernel/ptrace.c
85
pa_psw(task)->b = 0;
arch/parisc/kernel/ptrace.c
86
ptrace_disable(task);
arch/parisc/kernel/ptrace.c
90
(void __user *) (task_regs(task)->iaoq[0] & ~3),
arch/parisc/kernel/ptrace.c
91
task);
arch/parisc/kernel/stacktrace.c
16
static void notrace walk_stackframe(struct task_struct *task,
arch/parisc/kernel/stacktrace.c
21
unwind_frame_init_task(&info, task, NULL);
arch/parisc/kernel/stacktrace.c
33
struct task_struct *task, struct pt_regs *regs)
arch/parisc/kernel/stacktrace.c
35
walk_stackframe(task, regs, consume_entry, cookie);
arch/parisc/kernel/stacktrace.c
39
struct task_struct *task)
arch/parisc/kernel/stacktrace.c
41
walk_stackframe(task, NULL, consume_entry, cookie);
arch/parisc/kernel/traps.c
199
static void parisc_show_stack(struct task_struct *task,
arch/parisc/kernel/traps.c
204
unwind_frame_init_task(&info, task, regs);
arch/parisc/kernel/traps.c
59
static void parisc_show_stack(struct task_struct *task,
arch/parisc/kernel/unwind.c
428
struct task_struct *task, struct pt_regs *regs)
arch/parisc/kernel/unwind.c
430
task = task ? task : current;
arch/parisc/kernel/unwind.c
432
if (task == current) {
arch/parisc/kernel/unwind.c
442
unwind_frame_init(info, task, regs);
arch/parisc/kernel/unwind.c
444
unwind_frame_init_from_blocked_task(info, task);
arch/powerpc/include/asm/current.h
17
struct task_struct *task;
arch/powerpc/include/asm/current.h
21
: "=r" (task)
arch/powerpc/include/asm/current.h
24
return task;
arch/powerpc/include/asm/reg_booke.h
341
#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
arch/powerpc/include/asm/reg_booke.h
375
#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
arch/powerpc/include/asm/syscall.h
103
static inline void syscall_get_arguments(struct task_struct *task,
arch/powerpc/include/asm/syscall.h
110
if (is_tsk_32bit_task(task))
arch/powerpc/include/asm/syscall.h
123
static inline void syscall_set_arguments(struct task_struct *task,
arch/powerpc/include/asm/syscall.h
133
static inline int syscall_get_arch(struct task_struct *task)
arch/powerpc/include/asm/syscall.h
135
if (is_tsk_32bit_task(task))
arch/powerpc/include/asm/syscall.h
28
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
arch/powerpc/include/asm/syscall.h
42
static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
arch/powerpc/include/asm/syscall.h
52
static inline void syscall_rollback(struct task_struct *task,
arch/powerpc/include/asm/syscall.h
58
static inline long syscall_get_error(struct task_struct *task,
arch/powerpc/include/asm/syscall.h
74
static inline long syscall_get_return_value(struct task_struct *task,
arch/powerpc/include/asm/syscall.h
80
static inline void syscall_set_return_value(struct task_struct *task,
arch/powerpc/kernel/dexcr.c
117
task->thread.dexcr_onexec |= aspect;
arch/powerpc/kernel/dexcr.c
119
task->thread.dexcr_onexec &= ~aspect;
arch/powerpc/kernel/dexcr.c
53
int get_dexcr_prctl(struct task_struct *task, unsigned long which)
arch/powerpc/kernel/dexcr.c
70
if (aspect & task->thread.dexcr_onexec)
arch/powerpc/kernel/dexcr.c
78
int set_dexcr_prctl(struct task_struct *task, unsigned long which, unsigned long ctrl)
arch/powerpc/kernel/ptrace/ptrace-adv.c
10
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
110
task->thread.debug.dac1 = data & ~0x3UL;
arch/powerpc/kernel/ptrace/ptrace-adv.c
112
if (task->thread.debug.dac1 == 0) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
113
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
arch/powerpc/kernel/ptrace/ptrace-adv.c
114
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
arch/powerpc/kernel/ptrace/ptrace-adv.c
115
task->thread.debug.dbcr1)) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
117
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
128
task->thread.debug.dbcr0 |= DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
13
task->thread.debug.dbcr0 &= ~DBCR0_BT;
arch/powerpc/kernel/ptrace/ptrace-adv.c
131
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
arch/powerpc/kernel/ptrace/ptrace-adv.c
133
dbcr_dac(task) |= DBCR_DAC1R;
arch/powerpc/kernel/ptrace/ptrace-adv.c
135
dbcr_dac(task) |= DBCR_DAC1W;
arch/powerpc/kernel/ptrace/ptrace-adv.c
14
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
arch/powerpc/kernel/ptrace/ptrace-adv.c
17
set_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/powerpc/kernel/ptrace/ptrace-adv.c
20
void user_enable_block_step(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-adv.c
22
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
25
task->thread.debug.dbcr0 &= ~DBCR0_IC;
arch/powerpc/kernel/ptrace/ptrace-adv.c
26
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
arch/powerpc/kernel/ptrace/ptrace-adv.c
29
set_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/powerpc/kernel/ptrace/ptrace-adv.c
32
void user_disable_single_step(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-adv.c
34
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
43
task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
arch/powerpc/kernel/ptrace/ptrace-adv.c
47
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
arch/powerpc/kernel/ptrace/ptrace-adv.c
48
task->thread.debug.dbcr1)) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
52
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
56
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/powerpc/kernel/ptrace/ptrace-adv.c
8
void user_enable_single_step(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-adv.c
83
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
arch/powerpc/kernel/ptrace/ptrace-adv.c
85
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
88
struct thread_struct *thread = &task->thread;
arch/powerpc/kernel/ptrace/ptrace-decl.h
168
int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data);
arch/powerpc/kernel/ptrace/ptrace-decl.h
169
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
arch/powerpc/kernel/ptrace/ptrace-decl.h
181
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data);
arch/powerpc/kernel/ptrace/ptrace-noadv.c
10
void user_enable_single_step(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
12
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
155
ptrace_triggered, NULL, task);
arch/powerpc/kernel/ptrace/ptrace-noadv.c
16
set_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/powerpc/kernel/ptrace/ptrace-noadv.c
165
task->thread.hw_brk[0] = hw_brk;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
19
void user_enable_block_step(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
21
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
25
set_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/powerpc/kernel/ptrace/ptrace-noadv.c
28
void user_disable_single_step(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
30
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
35
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
arch/powerpc/kernel/ptrace/ptrace-noadv.c
78
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
82
struct thread_struct *thread = &task->thread;
arch/powerpc/kernel/ptrace/ptrace-tm.c
32
static unsigned long get_user_ckpt_msr(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-tm.c
34
return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
arch/powerpc/kernel/ptrace/ptrace-tm.c
37
static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
arch/powerpc/kernel/ptrace/ptrace-tm.c
39
task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
arch/powerpc/kernel/ptrace/ptrace-tm.c
40
task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
arch/powerpc/kernel/ptrace/ptrace-tm.c
44
static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
arch/powerpc/kernel/ptrace/ptrace-tm.c
46
set_trap(&task->thread.ckpt_regs, trap);
arch/powerpc/kernel/ptrace/ptrace-view.c
109
static unsigned long get_user_msr(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-view.c
111
return task->thread.regs->msr | task->thread.fpexc_mode;
arch/powerpc/kernel/ptrace/ptrace-view.c
114
static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr)
arch/powerpc/kernel/ptrace/ptrace-view.c
116
unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) |
arch/powerpc/kernel/ptrace/ptrace-view.c
118
regs_set_return_msr(task->thread.regs, newmsr);
arch/powerpc/kernel/ptrace/ptrace-view.c
123
static int get_user_dscr(struct task_struct *task, unsigned long *data)
arch/powerpc/kernel/ptrace/ptrace-view.c
125
*data = task->thread.dscr;
arch/powerpc/kernel/ptrace/ptrace-view.c
129
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
arch/powerpc/kernel/ptrace/ptrace-view.c
131
task->thread.dscr = dscr;
arch/powerpc/kernel/ptrace/ptrace-view.c
132
task->thread.dscr_inherit = 1;
arch/powerpc/kernel/ptrace/ptrace-view.c
136
static int get_user_dscr(struct task_struct *task, unsigned long *data)
arch/powerpc/kernel/ptrace/ptrace-view.c
141
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
arch/powerpc/kernel/ptrace/ptrace-view.c
151
static __always_inline int set_user_trap(struct task_struct *task, unsigned long trap)
arch/powerpc/kernel/ptrace/ptrace-view.c
153
set_trap(task->thread.regs, trap);
arch/powerpc/kernel/ptrace/ptrace-view.c
160
int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
arch/powerpc/kernel/ptrace/ptrace-view.c
164
if (task->thread.regs == NULL || !data)
arch/powerpc/kernel/ptrace/ptrace-view.c
168
*data = get_user_msr(task);
arch/powerpc/kernel/ptrace/ptrace-view.c
173
return get_user_dscr(task, data);
arch/powerpc/kernel/ptrace/ptrace-view.c
188
*data = ((unsigned long *)task->thread.regs)[regno];
arch/powerpc/kernel/ptrace/ptrace-view.c
198
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
arch/powerpc/kernel/ptrace/ptrace-view.c
200
if (task->thread.regs == NULL)
arch/powerpc/kernel/ptrace/ptrace-view.c
204
return set_user_msr(task, data);
arch/powerpc/kernel/ptrace/ptrace-view.c
206
return set_user_trap(task, data);
arch/powerpc/kernel/ptrace/ptrace-view.c
208
return set_user_dscr(task, data);
arch/powerpc/kernel/ptrace/ptrace-view.c
212
((unsigned long *)task->thread.regs)[regno] = data;
arch/powerpc/kernel/ptrace/ptrace-view.c
943
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/powerpc/kernel/ptrace/ptrace-view.c
945
if (IS_ENABLED(CONFIG_COMPAT) && is_tsk_32bit_task(task))
arch/powerpc/kernel/security.c
365
static int ssb_prctl_get(struct task_struct *task)
arch/powerpc/kernel/security.c
390
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
arch/powerpc/kernel/security.c
394
return ssb_prctl_get(task);
arch/powerpc/kernel/signal.c
102
task->thread.TS_CKFPR(i) = buf[i];
arch/powerpc/kernel/signal.c
103
task->thread.ckfp_state.fpscr = buf[i];
arch/powerpc/kernel/signal.c
109
struct task_struct *task)
arch/powerpc/kernel/signal.c
116
buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/signal.c
120
unsigned long copy_ckvsx_from_user(struct task_struct *task,
arch/powerpc/kernel/signal.c
129
task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
arch/powerpc/kernel/signal.c
27
struct task_struct *task)
arch/powerpc/kernel/signal.c
34
buf[i] = task->thread.TS_FPR(i);
arch/powerpc/kernel/signal.c
35
buf[i] = task->thread.fp_state.fpscr;
arch/powerpc/kernel/signal.c
39
unsigned long copy_fpr_from_user(struct task_struct *task,
arch/powerpc/kernel/signal.c
48
task->thread.TS_FPR(i) = buf[i];
arch/powerpc/kernel/signal.c
49
task->thread.fp_state.fpscr = buf[i];
arch/powerpc/kernel/signal.c
55
struct task_struct *task)
arch/powerpc/kernel/signal.c
62
buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/signal.c
66
unsigned long copy_vsx_from_user(struct task_struct *task,
arch/powerpc/kernel/signal.c
75
task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
arch/powerpc/kernel/signal.c
81
struct task_struct *task)
arch/powerpc/kernel/signal.c
88
buf[i] = task->thread.TS_CKFPR(i);
arch/powerpc/kernel/signal.c
89
buf[i] = task->thread.ckfp_state.fpscr;
arch/powerpc/kernel/signal.c
93
unsigned long copy_ckfpr_from_user(struct task_struct *task,
arch/powerpc/kernel/signal.h
100
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
109
#define unsafe_copy_ckfpr_from_user(task, from, label) do { \
arch/powerpc/kernel/signal.h
110
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
119
#define unsafe_copy_ckvsx_from_user(task, from, label) do { \
arch/powerpc/kernel/signal.h
120
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
131
#define unsafe_copy_fpr_to_user(to, task, label) \
arch/powerpc/kernel/signal.h
132
unsafe_copy_to_user(to, (task)->thread.fp_state.fpr, \
arch/powerpc/kernel/signal.h
135
#define unsafe_copy_fpr_from_user(task, from, label) \
arch/powerpc/kernel/signal.h
136
unsafe_copy_from_user((task)->thread.fp_state.fpr, from, \
arch/powerpc/kernel/signal.h
140
copy_fpr_to_user(void __user *to, struct task_struct *task)
arch/powerpc/kernel/signal.h
142
return __copy_to_user(to, task->thread.fp_state.fpr,
arch/powerpc/kernel/signal.h
147
copy_fpr_from_user(struct task_struct *task, void __user *from)
arch/powerpc/kernel/signal.h
149
return __copy_from_user(task->thread.fp_state.fpr, from,
arch/powerpc/kernel/signal.h
154
#define unsafe_copy_ckfpr_to_user(to, task, label) \
arch/powerpc/kernel/signal.h
155
unsafe_copy_to_user(to, (task)->thread.ckfp_state.fpr, \
arch/powerpc/kernel/signal.h
158
inline unsigned long copy_ckfpr_to_user(void __user *to, struct task_struct *task)
arch/powerpc/kernel/signal.h
160
return __copy_to_user(to, task->thread.ckfp_state.fpr,
arch/powerpc/kernel/signal.h
165
copy_ckfpr_from_user(struct task_struct *task, void __user *from)
arch/powerpc/kernel/signal.h
167
return __copy_from_user(task->thread.ckfp_state.fpr, from,
arch/powerpc/kernel/signal.h
172
#define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
arch/powerpc/kernel/signal.h
174
#define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
arch/powerpc/kernel/signal.h
177
copy_fpr_to_user(void __user *to, struct task_struct *task)
arch/powerpc/kernel/signal.h
183
copy_fpr_from_user(struct task_struct *task, void __user *from)
arch/powerpc/kernel/signal.h
36
struct task_struct *task);
arch/powerpc/kernel/signal.h
38
struct task_struct *task);
arch/powerpc/kernel/signal.h
39
extern unsigned long copy_vsx_from_user(struct task_struct *task,
arch/powerpc/kernel/signal.h
41
extern unsigned long copy_ckvsx_from_user(struct task_struct *task,
arch/powerpc/kernel/signal.h
43
unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task);
arch/powerpc/kernel/signal.h
44
unsigned long copy_ckfpr_to_user(void __user *to, struct task_struct *task);
arch/powerpc/kernel/signal.h
45
unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from);
arch/powerpc/kernel/signal.h
46
unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from);
arch/powerpc/kernel/signal.h
48
#define unsafe_copy_fpr_to_user(to, task, label) do { \
arch/powerpc/kernel/signal.h
49
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
58
#define unsafe_copy_vsx_to_user(to, task, label) do { \
arch/powerpc/kernel/signal.h
59
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
68
#define unsafe_copy_fpr_from_user(task, from, label) do { \
arch/powerpc/kernel/signal.h
69
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
78
#define unsafe_copy_vsx_from_user(task, from, label) do { \
arch/powerpc/kernel/signal.h
79
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
89
#define unsafe_copy_ckfpr_to_user(to, task, label) do { \
arch/powerpc/kernel/signal.h
90
struct task_struct *__t = task; \
arch/powerpc/kernel/signal.h
99
#define unsafe_copy_ckvsx_to_user(to, task, label) do { \
arch/powerpc/kernel/stacktrace.c
136
ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
arch/powerpc/kernel/stacktrace.c
29
struct task_struct *task, struct pt_regs *regs)
arch/powerpc/kernel/stacktrace.c
38
else if (task == current)
arch/powerpc/kernel/stacktrace.c
41
sp = task->thread.ksp;
arch/powerpc/kernel/stacktrace.c
47
if (!validate_sp(sp, task))
arch/powerpc/kernel/stacktrace.c
67
void *cookie, struct task_struct *task)
arch/powerpc/kernel/stacktrace.c
71
unsigned long stack_page = (unsigned long)task_stack_page(task);
arch/powerpc/kernel/stacktrace.c
79
if (task->flags & PF_KTHREAD)
arch/powerpc/kernel/stacktrace.c
84
if (task == current)
arch/powerpc/kernel/stacktrace.c
87
sp = task->thread.ksp;
arch/powerpc/perf/core-book3s.c
136
struct task_struct *task, bool sched_in)
arch/powerpc/perf/core-book3s.c
418
if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
arch/powerpc/perf/core-book3s.c
451
struct task_struct *task, bool sched_in)
arch/powerpc/perf/perf_regs.c
135
u64 perf_reg_abi(struct task_struct *task)
arch/powerpc/perf/perf_regs.c
137
if (is_tsk_32bit_task(task))
arch/powerpc/platforms/ps3/device-init.c
904
struct task_struct *task;
arch/powerpc/platforms/ps3/device-init.c
928
task = kthread_run(ps3_probe_thread, (void *)repo.bus_id,
arch/powerpc/platforms/ps3/device-init.c
931
if (IS_ERR(task)) {
arch/powerpc/platforms/ps3/device-init.c
932
result = PTR_ERR(task);
arch/powerpc/platforms/ps3/device-init.c
938
probe_task = task;
arch/riscv/include/asm/mmu_context.h
17
struct task_struct *task);
arch/riscv/include/asm/processor.h
213
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
arch/riscv/include/asm/processor.h
214
long get_tagged_addr_ctrl(struct task_struct *task);
arch/riscv/include/asm/stacktrace.h
14
extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
arch/riscv/include/asm/stacktrace.h
16
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
arch/riscv/include/asm/switch_to.h
27
static inline void fstate_off(struct task_struct *task,
arch/riscv/include/asm/switch_to.h
33
static inline void fstate_save(struct task_struct *task,
arch/riscv/include/asm/switch_to.h
37
__fstate_save(task);
arch/riscv/include/asm/switch_to.h
42
static inline void fstate_restore(struct task_struct *task,
arch/riscv/include/asm/switch_to.h
46
__fstate_restore(task);
arch/riscv/include/asm/switch_to.h
68
#define fstate_save(task, regs) do { } while (0)
arch/riscv/include/asm/switch_to.h
69
#define fstate_restore(task, regs) do { } while (0)
arch/riscv/include/asm/switch_to.h
73
static inline void envcfg_update_bits(struct task_struct *task,
arch/riscv/include/asm/switch_to.h
78
envcfg = (task->thread.envcfg & ~mask) | val;
arch/riscv/include/asm/switch_to.h
79
task->thread.envcfg = envcfg;
arch/riscv/include/asm/switch_to.h
80
if (task == current)
arch/riscv/include/asm/switch_to.h
94
static inline bool switch_to_should_flush_icache(struct task_struct *task)
arch/riscv/include/asm/switch_to.h
97
bool stale_mm = task->mm && task->mm->context.force_icache_flush;
arch/riscv/include/asm/switch_to.h
98
bool stale_thread = task->thread.force_icache_flush;
arch/riscv/include/asm/switch_to.h
99
bool thread_migrated = smp_processor_id() != task->thread.prev_cpu;
arch/riscv/include/asm/syscall.h
27
static inline int syscall_get_nr(struct task_struct *task,
arch/riscv/include/asm/syscall.h
33
static inline void syscall_set_nr(struct task_struct *task,
arch/riscv/include/asm/syscall.h
40
static inline void syscall_rollback(struct task_struct *task,
arch/riscv/include/asm/syscall.h
46
static inline long syscall_get_error(struct task_struct *task,
arch/riscv/include/asm/syscall.h
54
static inline long syscall_get_return_value(struct task_struct *task,
arch/riscv/include/asm/syscall.h
60
static inline void syscall_set_return_value(struct task_struct *task,
arch/riscv/include/asm/syscall.h
67
static inline void syscall_get_arguments(struct task_struct *task,
arch/riscv/include/asm/syscall.h
79
static inline void syscall_set_arguments(struct task_struct *task,
arch/riscv/include/asm/syscall.h
91
static inline int syscall_get_arch(struct task_struct *task)
arch/riscv/include/asm/usercfi.h
36
void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size);
arch/riscv/include/asm/usercfi.h
37
unsigned long get_shstk_base(struct task_struct *task, unsigned long *size);
arch/riscv/include/asm/usercfi.h
38
void set_active_shstk(struct task_struct *task, unsigned long shstk_addr);
arch/riscv/include/asm/usercfi.h
39
bool is_shstk_enabled(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
40
bool is_shstk_locked(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
41
bool is_shstk_allocated(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
42
void set_shstk_lock(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
43
void set_shstk_status(struct task_struct *task, bool enable);
arch/riscv/include/asm/usercfi.h
44
unsigned long get_active_shstk(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
47
bool is_indir_lp_enabled(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
48
bool is_indir_lp_locked(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
49
void set_indir_lp_status(struct task_struct *task, bool enable);
arch/riscv/include/asm/usercfi.h
50
void set_indir_lp_lock(struct task_struct *task);
arch/riscv/include/asm/usercfi.h
60
#define get_shstk_base(task, size) 0UL
arch/riscv/include/asm/usercfi.h
62
#define set_shstk_base(task, shstk_addr, size) do {} while (0)
arch/riscv/include/asm/usercfi.h
64
#define set_active_shstk(task, shstk_addr) do {} while (0)
arch/riscv/include/asm/usercfi.h
66
#define is_shstk_enabled(task) false
arch/riscv/include/asm/usercfi.h
68
#define is_shstk_locked(task) false
arch/riscv/include/asm/usercfi.h
70
#define is_shstk_allocated(task) false
arch/riscv/include/asm/usercfi.h
72
#define set_shstk_lock(task) do {} while (0)
arch/riscv/include/asm/usercfi.h
74
#define set_shstk_status(task, enable) do {} while (0)
arch/riscv/include/asm/usercfi.h
76
#define is_indir_lp_enabled(task) false
arch/riscv/include/asm/usercfi.h
78
#define is_indir_lp_locked(task) false
arch/riscv/include/asm/usercfi.h
80
#define set_indir_lp_status(task, enable) do {} while (0)
arch/riscv/include/asm/usercfi.h
82
#define set_indir_lp_lock(task) do {} while (0)
arch/riscv/include/asm/usercfi.h
88
#define get_active_shstk(task) 0UL
arch/riscv/include/asm/vector.h
324
static inline void riscv_v_vstate_set_restore(struct task_struct *task,
arch/riscv/include/asm/vector.h
328
set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
arch/riscv/include/asm/vector.h
334
static inline bool riscv_preempt_v_dirty(struct task_struct *task)
arch/riscv/include/asm/vector.h
336
return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY);
arch/riscv/include/asm/vector.h
339
static inline bool riscv_preempt_v_restore(struct task_struct *task)
arch/riscv/include/asm/vector.h
341
return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE);
arch/riscv/include/asm/vector.h
344
static inline void riscv_preempt_v_clear_dirty(struct task_struct *task)
arch/riscv/include/asm/vector.h
347
task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY;
arch/riscv/include/asm/vector.h
350
static inline void riscv_preempt_v_set_restore(struct task_struct *task)
arch/riscv/include/asm/vector.h
353
task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE;
arch/riscv/include/asm/vector.h
356
static inline bool riscv_preempt_v_started(struct task_struct *task)
arch/riscv/include/asm/vector.h
358
return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V);
arch/riscv/include/asm/vector.h
362
static inline bool riscv_preempt_v_dirty(struct task_struct *task) { return false; }
arch/riscv/include/asm/vector.h
363
static inline bool riscv_preempt_v_restore(struct task_struct *task) { return false; }
arch/riscv/include/asm/vector.h
364
static inline bool riscv_preempt_v_started(struct task_struct *task) { return false; }
arch/riscv/include/asm/vector.h
429
#define riscv_v_vstate_set_restore(task, regs) do {} while (0)
arch/riscv/kernel/compat_signal.c
123
struct task_struct *task;
arch/riscv/kernel/compat_signal.c
148
task = current;
arch/riscv/kernel/compat_signal.c
152
task->comm, task_pid_nr(task), __func__,
arch/riscv/kernel/compat_signal.c
98
#define compat_save_fp_state(task, regs) (0)
arch/riscv/kernel/compat_signal.c
99
#define compat_restore_fp_state(task, regs) (0)
arch/riscv/kernel/kgdb.c
232
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
arch/riscv/kernel/kgdb.c
237
gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
arch/riscv/kernel/kgdb.c
238
gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
arch/riscv/kernel/kgdb.c
239
gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
arch/riscv/kernel/kgdb.c
240
gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
arch/riscv/kernel/kgdb.c
241
gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
arch/riscv/kernel/kgdb.c
242
gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
arch/riscv/kernel/kgdb.c
243
gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
arch/riscv/kernel/kgdb.c
244
gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
arch/riscv/kernel/kgdb.c
245
gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
arch/riscv/kernel/kgdb.c
246
gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
arch/riscv/kernel/kgdb.c
247
gdb_regs[DBG_REG_S9_OFF] = task->thread.s[9];
arch/riscv/kernel/kgdb.c
248
gdb_regs[DBG_REG_S10_OFF] = task->thread.s[10];
arch/riscv/kernel/kgdb.c
249
gdb_regs[DBG_REG_S11_OFF] = task->thread.s[11];
arch/riscv/kernel/kgdb.c
250
gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
arch/riscv/kernel/perf_regs.c
29
u64 perf_reg_abi(struct task_struct *task)
arch/riscv/kernel/process.c
308
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
arch/riscv/kernel/process.c
311
struct thread_info *ti = task_thread_info(task);
arch/riscv/kernel/process.c
312
struct mm_struct *mm = task->mm;
arch/riscv/kernel/process.c
363
envcfg_update_bits(task, ENVCFG_PMM, pmm);
arch/riscv/kernel/process.c
371
long get_tagged_addr_ctrl(struct task_struct *task)
arch/riscv/kernel/process.c
373
struct thread_info *ti = task_thread_info(task);
arch/riscv/kernel/process.c
386
switch (task->thread.envcfg & ENVCFG_PMM) {
arch/riscv/kernel/process.c
395
if (task->mm->context.pmlen)
arch/riscv/kernel/ptrace.c
631
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/riscv/kernel/ptrace.c
633
if (is_compat_thread(&task->thread_info))
arch/riscv/kernel/signal.c
314
struct task_struct *task;
arch/riscv/kernel/signal.c
342
task = current;
arch/riscv/kernel/signal.c
346
task->comm, task_pid_nr(task), __func__,
arch/riscv/kernel/signal.c
69
#define save_fp_state(task, regs) (0)
arch/riscv/kernel/signal.c
70
#define restore_fp_state(task, regs) (0)
arch/riscv/kernel/stacktrace.c
107
void notrace walk_stackframe(struct task_struct *task,
arch/riscv/kernel/stacktrace.c
116
} else if (task == NULL || task == current) {
arch/riscv/kernel/stacktrace.c
121
sp = task->thread.sp;
arch/riscv/kernel/stacktrace.c
122
pc = task->thread.ra;
arch/riscv/kernel/stacktrace.c
146
noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
arch/riscv/kernel/stacktrace.c
149
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
arch/riscv/kernel/stacktrace.c
152
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
arch/riscv/kernel/stacktrace.c
155
dump_backtrace(NULL, task, loglvl);
arch/riscv/kernel/stacktrace.c
168
unsigned long __get_wchan(struct task_struct *task)
arch/riscv/kernel/stacktrace.c
172
if (!try_get_task_stack(task))
arch/riscv/kernel/stacktrace.c
174
walk_stackframe(task, NULL, save_wchan, &pc);
arch/riscv/kernel/stacktrace.c
175
put_task_stack(task);
arch/riscv/kernel/stacktrace.c
180
struct task_struct *task, struct pt_regs *regs)
arch/riscv/kernel/stacktrace.c
182
walk_stackframe(task, regs, consume_entry, cookie);
arch/riscv/kernel/stacktrace.c
24
#define READ_ONCE_TASK_STACK(task, x) \
arch/riscv/kernel/stacktrace.c
28
if ((task) == current) \
arch/riscv/kernel/stacktrace.c
48
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
arch/riscv/kernel/stacktrace.c
59
} else if (task == NULL || task == current) {
arch/riscv/kernel/stacktrace.c
66
fp = task->thread.s[0];
arch/riscv/kernel/stacktrace.c
67
sp = task->thread.sp;
arch/riscv/kernel/stacktrace.c
68
pc = task->thread.ra;
arch/riscv/kernel/stacktrace.c
88
fp = READ_ONCE_TASK_STACK(task, frame->fp);
arch/riscv/kernel/stacktrace.c
89
pc = READ_ONCE_TASK_STACK(task, frame->ra);
arch/riscv/kernel/usercfi.c
100
task->thread.envcfg |= ENVCFG_LPE;
arch/riscv/kernel/usercfi.c
102
task->thread.envcfg &= ~ENVCFG_LPE;
arch/riscv/kernel/usercfi.c
104
csr_write(CSR_ENVCFG, task->thread.envcfg);
arch/riscv/kernel/usercfi.c
107
void set_indir_lp_lock(struct task_struct *task)
arch/riscv/kernel/usercfi.c
109
task->thread_info.user_cfi_state.ufcfi_locked = 1;
arch/riscv/kernel/usercfi.c
24
bool is_shstk_enabled(struct task_struct *task)
arch/riscv/kernel/usercfi.c
26
return task->thread_info.user_cfi_state.ubcfi_en;
arch/riscv/kernel/usercfi.c
29
bool is_shstk_allocated(struct task_struct *task)
arch/riscv/kernel/usercfi.c
31
return task->thread_info.user_cfi_state.shdw_stk_base;
arch/riscv/kernel/usercfi.c
34
bool is_shstk_locked(struct task_struct *task)
arch/riscv/kernel/usercfi.c
36
return task->thread_info.user_cfi_state.ubcfi_locked;
arch/riscv/kernel/usercfi.c
39
void set_shstk_base(struct task_struct *task, unsigned long shstk_addr, unsigned long size)
arch/riscv/kernel/usercfi.c
41
task->thread_info.user_cfi_state.shdw_stk_base = shstk_addr;
arch/riscv/kernel/usercfi.c
42
task->thread_info.user_cfi_state.shdw_stk_size = size;
arch/riscv/kernel/usercfi.c
447
int arch_lock_shadow_stack_status(struct task_struct *task,
arch/riscv/kernel/usercfi.c
45
unsigned long get_shstk_base(struct task_struct *task, unsigned long *size)
arch/riscv/kernel/usercfi.c
452
!is_shstk_enabled(task) || arg != 0)
arch/riscv/kernel/usercfi.c
455
set_shstk_lock(task);
arch/riscv/kernel/usercfi.c
48
*size = task->thread_info.user_cfi_state.shdw_stk_size;
arch/riscv/kernel/usercfi.c
49
return task->thread_info.user_cfi_state.shdw_stk_base;
arch/riscv/kernel/usercfi.c
494
int arch_lock_indir_br_lp_status(struct task_struct *task,
arch/riscv/kernel/usercfi.c
502
!is_indir_lp_enabled(task) || arg != 0)
arch/riscv/kernel/usercfi.c
505
set_indir_lp_lock(task);
arch/riscv/kernel/usercfi.c
52
void set_active_shstk(struct task_struct *task, unsigned long shstk_addr)
arch/riscv/kernel/usercfi.c
54
task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
arch/riscv/kernel/usercfi.c
57
unsigned long get_active_shstk(struct task_struct *task)
arch/riscv/kernel/usercfi.c
59
return task->thread_info.user_cfi_state.user_shdw_stk;
arch/riscv/kernel/usercfi.c
62
void set_shstk_status(struct task_struct *task, bool enable)
arch/riscv/kernel/usercfi.c
67
task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0;
arch/riscv/kernel/usercfi.c
70
task->thread.envcfg |= ENVCFG_SSE;
arch/riscv/kernel/usercfi.c
72
task->thread.envcfg &= ~ENVCFG_SSE;
arch/riscv/kernel/usercfi.c
74
csr_write(CSR_ENVCFG, task->thread.envcfg);
arch/riscv/kernel/usercfi.c
77
void set_shstk_lock(struct task_struct *task)
arch/riscv/kernel/usercfi.c
79
task->thread_info.user_cfi_state.ubcfi_locked = 1;
arch/riscv/kernel/usercfi.c
82
bool is_indir_lp_enabled(struct task_struct *task)
arch/riscv/kernel/usercfi.c
84
return task->thread_info.user_cfi_state.ufcfi_en;
arch/riscv/kernel/usercfi.c
87
bool is_indir_lp_locked(struct task_struct *task)
arch/riscv/kernel/usercfi.c
89
return task->thread_info.user_cfi_state.ufcfi_locked;
arch/riscv/kernel/usercfi.c
92
void set_indir_lp_status(struct task_struct *task, bool enable)
arch/riscv/kernel/usercfi.c
97
task->thread_info.user_cfi_state.ufcfi_en = enable ? 1 : 0;
arch/riscv/mm/context.c
299
struct task_struct *task)
arch/riscv/mm/context.c
312
if (!(task && switch_to_should_flush_icache(task)))
arch/riscv/mm/context.c
319
struct task_struct *task)
arch/riscv/mm/context.c
326
membarrier_arch_switch_mm(prev, next, task);
arch/riscv/mm/context.c
337
flush_icache_deferred(next, cpu, task);
arch/s390/include/asm/ptrace.h
212
void update_cr_regs(struct task_struct *task);
arch/s390/include/asm/stacktrace.h
42
int get_stack_info(unsigned long sp, struct task_struct *task,
arch/s390/include/asm/stacktrace.h
87
static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
arch/s390/include/asm/stacktrace.h
92
if (task == current)
arch/s390/include/asm/stacktrace.h
94
return (unsigned long)task->thread.ksp;
arch/s390/include/asm/syscall.h
19
static inline long syscall_get_nr(struct task_struct *task,
arch/s390/include/asm/syscall.h
26
static inline void syscall_set_nr(struct task_struct *task,
arch/s390/include/asm/syscall.h
38
static inline void syscall_rollback(struct task_struct *task,
arch/s390/include/asm/syscall.h
44
static inline long syscall_get_error(struct task_struct *task,
arch/s390/include/asm/syscall.h
52
static inline long syscall_get_return_value(struct task_struct *task,
arch/s390/include/asm/syscall.h
58
static inline void syscall_set_return_value(struct task_struct *task,
arch/s390/include/asm/syscall.h
66
static inline void syscall_get_arguments(struct task_struct *task,
arch/s390/include/asm/syscall.h
78
static inline void syscall_set_arguments(struct task_struct *task,
arch/s390/include/asm/syscall.h
87
static inline int syscall_get_arch(struct task_struct *task)
arch/s390/include/asm/unwind.h
37
struct task_struct *task;
arch/s390/include/asm/unwind.h
50
ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *)state->sp);
arch/s390/include/asm/unwind.h
53
ip = rethook_find_ret_addr(state->task, state->sp, &state->kr_cur);
arch/s390/include/asm/unwind.h
58
void __unwind_start(struct unwind_state *state, struct task_struct *task,
arch/s390/include/asm/unwind.h
74
struct task_struct *task,
arch/s390/include/asm/unwind.h
78
task = task ?: current;
arch/s390/include/asm/unwind.h
79
first_frame = first_frame ?: get_stack_pointer(task, regs);
arch/s390/include/asm/unwind.h
80
__unwind_start(state, task, regs, first_frame);
arch/s390/include/asm/unwind.h
88
#define unwind_for_each_frame(state, task, regs, first_frame) \
arch/s390/include/asm/unwind.h
89
for (unwind_start(state, task, regs, first_frame); \
arch/s390/kernel/dumpstack.c
102
if (in_task_stack(sp, task, info))
arch/s390/kernel/dumpstack.c
105
if (task != current)
arch/s390/kernel/dumpstack.c
130
void show_stack(struct task_struct *task, unsigned long *stack,
arch/s390/kernel/dumpstack.c
136
unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
arch/s390/kernel/dumpstack.c
140
debug_show_held_locks(task ? : current);
arch/s390/kernel/dumpstack.c
55
static bool in_task_stack(unsigned long sp, struct task_struct *task,
arch/s390/kernel/dumpstack.c
58
unsigned long stack = (unsigned long)task_stack_page(task);
arch/s390/kernel/dumpstack.c
91
int get_stack_info(unsigned long sp, struct task_struct *task,
arch/s390/kernel/perf_pai.c
674
struct task_struct *task, bool sched_in)
arch/s390/kernel/perf_pai.c
738
struct task_struct *task, bool sched_in)
arch/s390/kernel/perf_regs.c
45
u64 perf_reg_abi(struct task_struct *task)
arch/s390/kernel/ptrace.c
102
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
arch/s390/kernel/ptrace.c
119
void user_enable_single_step(struct task_struct *task)
arch/s390/kernel/ptrace.c
121
clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
arch/s390/kernel/ptrace.c
122
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
arch/s390/kernel/ptrace.c
125
void user_disable_single_step(struct task_struct *task)
arch/s390/kernel/ptrace.c
127
clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
arch/s390/kernel/ptrace.c
128
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
arch/s390/kernel/ptrace.c
131
void user_enable_block_step(struct task_struct *task)
arch/s390/kernel/ptrace.c
133
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
arch/s390/kernel/ptrace.c
134
set_tsk_thread_flag(task, TIF_BLOCK_STEP);
arch/s390/kernel/ptrace.c
142
void ptrace_disable(struct task_struct *task)
arch/s390/kernel/ptrace.c
144
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
arch/s390/kernel/ptrace.c
145
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
arch/s390/kernel/ptrace.c
146
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
arch/s390/kernel/ptrace.c
147
clear_tsk_thread_flag(task, TIF_PER_TRAP);
arch/s390/kernel/ptrace.c
148
task->thread.per_flags = 0;
arch/s390/kernel/ptrace.c
40
void update_cr_regs(struct task_struct *task)
arch/s390/kernel/ptrace.c
42
struct pt_regs *regs = task_pt_regs(task);
arch/s390/kernel/ptrace.c
43
struct thread_struct *thread = &task->thread;
arch/s390/kernel/ptrace.c
64
if (task->thread.per_flags & PER_FLAG_NO_TE)
arch/s390/kernel/ptrace.c
68
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
arch/s390/kernel/ptrace.c
69
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
arch/s390/kernel/ptrace.c
78
if (task->thread.gs_cb)
arch/s390/kernel/ptrace.c
94
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
arch/s390/kernel/ptrace.c
95
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
arch/s390/kernel/ptrace.c
96
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
arch/s390/kernel/ptrace.c
993
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/s390/kernel/runtime_instr.c
33
struct task_struct *task = current;
arch/s390/kernel/runtime_instr.c
36
if (!task->thread.ri_cb)
arch/s390/kernel/runtime_instr.c
38
regs = task_pt_regs(task);
arch/s390/kernel/runtime_instr.c
41
kfree(task->thread.ri_cb);
arch/s390/kernel/runtime_instr.c
42
task->thread.ri_cb = NULL;
arch/s390/kernel/stacktrace.c
18
struct task_struct *task, struct pt_regs *regs)
arch/s390/kernel/stacktrace.c
23
unwind_for_each_frame(&state, task, regs, 0) {
arch/s390/kernel/stacktrace.c
31
void *cookie, struct task_struct *task)
arch/s390/kernel/stacktrace.c
36
unwind_for_each_frame(&state, task, NULL, 0) {
arch/s390/kernel/unwind_bc.c
127
void __unwind_start(struct unwind_state *state, struct task_struct *task,
arch/s390/kernel/unwind_bc.c
135
state->task = task;
arch/s390/kernel/unwind_bc.c
148
} else if (task == current) {
arch/s390/kernel/unwind_bc.c
151
sp = task->thread.ksp;
arch/s390/kernel/unwind_bc.c
34
if (get_stack_info(sp, state->task, info, mask) != 0 ||
arch/s390/kernel/unwind_bc.c
45
if (task_pt_regs(state->task) == regs)
arch/s390/lib/test_unwind.c
123
struct task_struct *task;
arch/s390/lib/test_unwind.c
381
u->task = NULL;
arch/s390/lib/test_unwind.c
402
struct task_struct *task;
arch/s390/lib/test_unwind.c
413
task = kthread_run(unwindme_func1, u, "%s", __func__);
arch/s390/lib/test_unwind.c
414
if (IS_ERR(task)) {
arch/s390/lib/test_unwind.c
416
return PTR_ERR(task);
arch/s390/lib/test_unwind.c
423
kthread_park(task);
arch/s390/lib/test_unwind.c
425
ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0);
arch/s390/lib/test_unwind.c
426
kthread_stop(task);
arch/s390/lib/test_unwind.c
47
static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
arch/s390/lib/test_unwind.c
67
unwind_for_each_frame(&state, task, regs, sp) {
arch/sh/include/asm/ptrace.h
126
#define task_pt_regs(task) \
arch/sh/include/asm/ptrace.h
127
((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1)
arch/sh/include/asm/syscall_32.h
12
static inline long syscall_get_nr(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
18
static inline void syscall_set_nr(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
30
static inline void syscall_rollback(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
39
static inline long syscall_get_error(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
45
static inline long syscall_get_return_value(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
51
static inline void syscall_set_return_value(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
58
static inline void syscall_get_arguments(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
72
static inline void syscall_set_arguments(struct task_struct *task,
arch/sh/include/asm/syscall_32.h
84
static inline int syscall_get_arch(struct task_struct *task)
arch/sh/include/asm/thread_info.h
28
struct task_struct *task; /* main task structure */
arch/sh/include/asm/thread_info.h
55
.task = &tsk, \
arch/sh/kernel/asm-offsets.c
25
DEFINE(TI_TASK, offsetof(struct thread_info, task));
arch/sh/kernel/dumpstack.c
59
struct task_struct *task = tinfo->task;
arch/sh/kernel/dumpstack.c
66
if (!task->ret_stack)
arch/sh/kernel/dumpstack.c
69
ret_stack = ftrace_graph_get_ret_stack(task, *graph);
arch/sh/kernel/dumpstack.c
88
stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
arch/sh/kernel/dwarf.c
957
static void dwarf_unwinder_dump(struct task_struct *task,
arch/sh/kernel/irq.c
128
irqctx->tinfo.task = NULL;
arch/sh/kernel/irq.c
136
irqctx->tinfo.task = NULL;
arch/sh/kernel/irq.c
161
irqctx->tinfo.task = curctx->task;
arch/sh/kernel/irq.c
89
irqctx->tinfo.task = curctx->tinfo.task;
arch/sh/kernel/ptrace_32.c
333
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/sh/kernel/ptrace_32.c
38
static inline int get_stack_long(struct task_struct *task, int offset)
arch/sh/kernel/ptrace_32.c
42
stack = (unsigned char *)task_pt_regs(task);
arch/sh/kernel/ptrace_32.c
50
static inline int put_stack_long(struct task_struct *task, int offset,
arch/sh/kernel/ptrace_32.c
55
stack = (unsigned char *)task_pt_regs(task);
arch/sh/kernel/unwinder.c
132
void unwind_stack(struct task_struct *task, struct pt_regs *regs,
arch/sh/kernel/unwinder.c
163
curr_unwinder->dump(task, regs, sp, ops, data);
arch/sparc/include/asm/current.h
30
return current_thread_info()->task;
arch/sparc/include/asm/processor_64.h
179
unsigned long __get_wchan(struct task_struct *task);
arch/sparc/include/asm/syscall.h
109
static inline void syscall_get_arguments(struct task_struct *task,
arch/sparc/include/asm/syscall.h
118
if (test_tsk_thread_flag(task, TIF_32BIT))
arch/sparc/include/asm/syscall.h
132
static inline void syscall_set_arguments(struct task_struct *task,
arch/sparc/include/asm/syscall.h
142
static inline int syscall_get_arch(struct task_struct *task)
arch/sparc/include/asm/syscall.h
145
return test_tsk_thread_flag(task, TIF_32BIT)
arch/sparc/include/asm/syscall.h
20
static inline long syscall_get_nr(struct task_struct *task,
arch/sparc/include/asm/syscall.h
28
static inline void syscall_set_nr(struct task_struct *task,
arch/sparc/include/asm/syscall.h
40
static inline void syscall_rollback(struct task_struct *task,
arch/sparc/include/asm/syscall.h
80
static inline long syscall_get_error(struct task_struct *task,
arch/sparc/include/asm/syscall.h
88
static inline long syscall_get_return_value(struct task_struct *task,
arch/sparc/include/asm/syscall.h
96
static inline void syscall_set_return_value(struct task_struct *task,
arch/sparc/include/asm/thread_info_32.h
30
struct task_struct *task; /* main task structure */
arch/sparc/include/asm/thread_info_32.h
60
.task = &tsk, \
arch/sparc/include/asm/thread_info_64.h
117
.task = &tsk, \
arch/sparc/include/asm/thread_info_64.h
38
struct task_struct *task;
arch/sparc/kernel/process_32.c
378
unsigned long __get_wchan(struct task_struct *task)
arch/sparc/kernel/process_32.c
381
unsigned long task_base = (unsigned long) task;
arch/sparc/kernel/process_32.c
386
fp = task_thread_info(task)->ksp + bias;
arch/sparc/kernel/process_64.c
274
((tp && tp->task) ? tp->task->comm : "NULL"),
arch/sparc/kernel/process_64.c
275
((tp && tp->task) ? tp->task->pid : -1));
arch/sparc/kernel/process_64.c
415
mm = t->task->mm;
arch/sparc/kernel/process_64.c
672
unsigned long __get_wchan(struct task_struct *task)
arch/sparc/kernel/process_64.c
680
tp = task_thread_info(task);
arch/sparc/kernel/process_64.c
682
fp = task_thread_info(task)->ksp + bias;
arch/sparc/kernel/ptrace_32.c
343
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/sparc/kernel/ptrace_64.c
905
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/sparc/kernel/ptrace_64.c
908
if (test_tsk_thread_flag(task, TIF_32BIT))
arch/sparc/kernel/stacktrace.c
32
t = tp->task;
arch/sparc/kernel/traps_32.c
373
TI_TASK != offsetof(struct thread_info, task) ||
arch/sparc/kernel/traps_64.c
2843
BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
arch/um/drivers/chan_kern.c
167
struct line *line = container_of(work, struct line, task.work);
arch/um/drivers/chan_kern.c
179
INIT_DELAYED_WORK(&line->task, line_timer_cb);
arch/um/drivers/chan_kern.c
590
schedule_delayed_work(&line->task, 1);
arch/um/drivers/line.h
55
struct delayed_work task;
arch/um/drivers/mconsole_kern.c
651
struct task_struct *task = arg;
arch/um/drivers/mconsole_kern.c
653
show_stack(task, NULL, KERN_INFO);
arch/um/include/asm/mmu_context.h
22
extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
arch/um/include/asm/ptrace-generic.h
43
extern void clear_flushed_tls(struct task_struct *task);
arch/um/include/asm/stacktrace.h
19
get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
arch/um/include/asm/stacktrace.h
21
if (!task || task == current)
arch/um/include/asm/stacktrace.h
23
return KSTK_EBP(task);
arch/um/include/asm/stacktrace.h
27
get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
arch/um/include/asm/stacktrace.h
34
*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
arch/um/include/asm/stacktrace.h
36
if (!task || task == current)
arch/um/include/asm/stacktrace.h
38
return (unsigned long *)KSTK_ESP(task);
arch/um/include/asm/syscall-generic.h
18
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
arch/um/include/asm/syscall-generic.h
24
static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
arch/um/include/asm/syscall-generic.h
29
static inline void syscall_rollback(struct task_struct *task,
arch/um/include/asm/syscall-generic.h
35
static inline long syscall_get_error(struct task_struct *task,
arch/um/include/asm/syscall-generic.h
43
static inline long syscall_get_return_value(struct task_struct *task,
arch/um/include/asm/syscall-generic.h
49
static inline void syscall_set_return_value(struct task_struct *task,
arch/um/include/asm/syscall-generic.h
56
static inline void syscall_get_arguments(struct task_struct *task,
arch/um/include/asm/syscall-generic.h
70
static inline void syscall_set_arguments(struct task_struct *task,
arch/um/kernel/process.c
68
static inline void set_current(struct task_struct *task)
arch/um/kernel/process.c
70
cpu_tasks[task_thread_info(task)->cpu] = task;
arch/um/kernel/skas/mmu.c
43
int init_new_context(struct task_struct *task, struct mm_struct *mm)
arch/um/kernel/sysrq.c
29
void show_stack(struct task_struct *task, unsigned long *stack,
arch/um/kernel/sysrq.c
36
stack = get_stack_pointer(task, segv_regs);
arch/um/kernel/sysrq.c
49
dump_trace(task ?: current, &stackops, (void *)loglvl);
arch/x86/events/amd/brs.c
385
struct task_struct *task, bool sched_in)
arch/x86/events/amd/lbr.c
376
struct task_struct *task, bool sched_in)
arch/x86/events/core.c
2727
struct task_struct *task, bool sched_in)
arch/x86/events/core.c
2729
static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in);
arch/x86/events/intel/core.c
3283
event->ctx->task))
arch/x86/events/intel/core.c
6201
struct task_struct *task, bool sched_in)
arch/x86/events/intel/core.c
6204
intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
arch/x86/events/intel/lbr.c
527
struct task_struct *task, bool sched_in)
arch/x86/events/intel/lbr.c
542
ctx_data = rcu_dereference(task->perf_ctx_data);
arch/x86/events/intel/lbr.c
583
struct task_struct *task = event->hw.target;
arch/x86/events/intel/lbr.c
587
ctx_data = rcu_dereference(task->perf_ctx_data);
arch/x86/events/intel/lbr.c
669
struct task_struct *task = event->hw.target;
arch/x86/events/intel/lbr.c
673
ctx_data = rcu_dereference(task->perf_ctx_data);
arch/x86/events/perf_event.h
1467
struct task_struct *task, bool sched_in);
arch/x86/events/perf_event.h
1522
struct task_struct *task, bool sched_in);
arch/x86/events/perf_event.h
1548
struct task_struct *task, bool sched_in)
arch/x86/events/perf_event.h
1758
struct task_struct *task, bool sched_in);
arch/x86/events/perf_event.h
922
struct task_struct *task, bool sched_in);
arch/x86/include/asm/fsgsbase.h
17
extern unsigned long x86_fsbase_read_task(struct task_struct *task);
arch/x86/include/asm/fsgsbase.h
18
extern unsigned long x86_gsbase_read_task(struct task_struct *task);
arch/x86/include/asm/fsgsbase.h
19
extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
arch/x86/include/asm/fsgsbase.h
20
extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
arch/x86/include/asm/fsgsbase.h
78
extern unsigned long x86_fsgsbase_read_task(struct task_struct *task,
arch/x86/include/asm/processor.h
520
extern struct fpu *x86_task_fpu(struct task_struct *task);
arch/x86/include/asm/processor.h
522
# define x86_task_fpu(task) ((struct fpu *)((void *)(task) + sizeof(*(task))))
arch/x86/include/asm/processor.h
600
extern void set_task_blockstep(struct task_struct *task, bool on);
arch/x86/include/asm/processor.h
646
#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
arch/x86/include/asm/processor.h
648
#define task_pt_regs(task) \
arch/x86/include/asm/processor.h
650
unsigned long __ptr = (unsigned long)task_stack_page(task); \
arch/x86/include/asm/processor.h
680
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
arch/x86/include/asm/processor.h
681
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
arch/x86/include/asm/proto.h
43
long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2);
arch/x86/include/asm/shstk.h
17
long shstk_prctl(struct task_struct *task, int option, unsigned long arg2);
arch/x86/include/asm/shstk.h
29
static inline long shstk_prctl(struct task_struct *task, int option,
arch/x86/include/asm/stacktrace.h
31
bool in_task_stack(unsigned long *stack, struct task_struct *task,
arch/x86/include/asm/stacktrace.h
36
int get_stack_info(unsigned long *stack, struct task_struct *task,
arch/x86/include/asm/stacktrace.h
38
bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
arch/x86/include/asm/stacktrace.h
71
get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
arch/x86/include/asm/stacktrace.h
76
if (task == current)
arch/x86/include/asm/stacktrace.h
79
return &((struct inactive_task_frame *)task->thread.sp)->bp;
arch/x86/include/asm/stacktrace.h
83
get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
arch/x86/include/asm/stacktrace.h
90
get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
arch/x86/include/asm/stacktrace.h
95
if (task == current)
arch/x86/include/asm/stacktrace.h
98
return (unsigned long *)task->thread.sp;
arch/x86/include/asm/switch_to.h
69
static inline void update_task_stack(struct task_struct *task)
arch/x86/include/asm/switch_to.h
73
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
arch/x86/include/asm/switch_to.h
77
load_sp0(task_top_of_stack(task));
arch/x86/include/asm/syscall.h
100
static inline void syscall_set_arguments(struct task_struct *task,
arch/x86/include/asm/syscall.h
112
static inline int syscall_get_arch(struct task_struct *task)
arch/x86/include/asm/syscall.h
119
static inline void syscall_get_arguments(struct task_struct *task,
arch/x86/include/asm/syscall.h
124
if (task->thread_info.status & TS_COMPAT) {
arch/x86/include/asm/syscall.h
143
static inline void syscall_set_arguments(struct task_struct *task,
arch/x86/include/asm/syscall.h
148
if (task->thread_info.status & TS_COMPAT) {
arch/x86/include/asm/syscall.h
167
static inline int syscall_get_arch(struct task_struct *task)
arch/x86/include/asm/syscall.h
171
task->thread_info.status & TS_COMPAT)
arch/x86/include/asm/syscall.h
36
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
arch/x86/include/asm/syscall.h
41
static inline void syscall_set_nr(struct task_struct *task,
arch/x86/include/asm/syscall.h
48
static inline void syscall_rollback(struct task_struct *task,
arch/x86/include/asm/syscall.h
54
static inline long syscall_get_error(struct task_struct *task,
arch/x86/include/asm/syscall.h
63
if (task->thread_info.status & (TS_COMPAT|TS_I386_REGS_POKED))
arch/x86/include/asm/syscall.h
73
static inline long syscall_get_return_value(struct task_struct *task,
arch/x86/include/asm/syscall.h
79
static inline void syscall_set_return_value(struct task_struct *task,
arch/x86/include/asm/syscall.h
88
static inline void syscall_get_arguments(struct task_struct *task,
arch/x86/include/asm/unwind.h
112
return rethook_find_ret_addr(state->task, (unsigned long)addr_p,
arch/x86/include/asm/unwind.h
125
ret = ftrace_graph_ret_addr(state->task, &state->graph_idx,
arch/x86/include/asm/unwind.h
135
#define READ_ONCE_TASK_STACK(task, x) \
arch/x86/include/asm/unwind.h
138
if (task == current) \
arch/x86/include/asm/unwind.h
145
static inline bool task_on_another_cpu(struct task_struct *task)
arch/x86/include/asm/unwind.h
148
return task != current && task->on_cpu;
arch/x86/include/asm/unwind.h
17
struct task_struct *task;
arch/x86/include/asm/unwind.h
42
void __unwind_start(struct unwind_state *state, struct task_struct *task,
arch/x86/include/asm/unwind.h
59
void unwind_start(struct unwind_state *state, struct task_struct *task,
arch/x86/include/asm/unwind.h
62
first_frame = first_frame ? : get_stack_pointer(task, regs);
arch/x86/include/asm/unwind.h
64
__unwind_start(state, task, regs, first_frame);
arch/x86/include/asm/vm86.h
87
#define free_vm86(task) do { (void)(task); } while(0)
arch/x86/kernel/cpu/bugs.c
2490
static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
arch/x86/kernel/cpu/bugs.c
2498
set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
arch/x86/kernel/cpu/bugs.c
2501
clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
arch/x86/kernel/cpu/bugs.c
2508
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
arch/x86/kernel/cpu/bugs.c
2517
if (task_spec_ssb_force_disable(task))
arch/x86/kernel/cpu/bugs.c
2519
task_clear_spec_ssb_disable(task);
arch/x86/kernel/cpu/bugs.c
2520
task_clear_spec_ssb_noexec(task);
arch/x86/kernel/cpu/bugs.c
2521
task_update_spec_tif(task);
arch/x86/kernel/cpu/bugs.c
2524
task_set_spec_ssb_disable(task);
arch/x86/kernel/cpu/bugs.c
2525
task_clear_spec_ssb_noexec(task);
arch/x86/kernel/cpu/bugs.c
2526
task_update_spec_tif(task);
arch/x86/kernel/cpu/bugs.c
2529
task_set_spec_ssb_disable(task);
arch/x86/kernel/cpu/bugs.c
2530
task_set_spec_ssb_force_disable(task);
arch/x86/kernel/cpu/bugs.c
2531
task_clear_spec_ssb_noexec(task);
arch/x86/kernel/cpu/bugs.c
2532
task_update_spec_tif(task);
arch/x86/kernel/cpu/bugs.c
2535
if (task_spec_ssb_force_disable(task))
arch/x86/kernel/cpu/bugs.c
2537
task_set_spec_ssb_disable(task);
arch/x86/kernel/cpu/bugs.c
2538
task_set_spec_ssb_noexec(task);
arch/x86/kernel/cpu/bugs.c
2539
task_update_spec_tif(task);
arch/x86/kernel/cpu/bugs.c
2555
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
arch/x86/kernel/cpu/bugs.c
2579
task_spec_ib_force_disable(task))
arch/x86/kernel/cpu/bugs.c
2582
task_clear_spec_ib_disable(task);
arch/x86/kernel/cpu/bugs.c
2583
task_update_spec_tif(task);
arch/x86/kernel/cpu/bugs.c
2598
task_set_spec_ib_disable(task);
arch/x86/kernel/cpu/bugs.c
2600
task_set_spec_ib_force_disable(task);
arch/x86/kernel/cpu/bugs.c
2601
task_update_spec_tif(task);
arch/x86/kernel/cpu/bugs.c
2602
if (task == current)
arch/x86/kernel/cpu/bugs.c
2611
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
arch/x86/kernel/cpu/bugs.c
2616
return ssb_prctl_set(task, ctrl);
arch/x86/kernel/cpu/bugs.c
2618
return ib_prctl_set(task, ctrl);
arch/x86/kernel/cpu/bugs.c
2620
return l1d_flush_prctl_set(task, ctrl);
arch/x86/kernel/cpu/bugs.c
2627
void arch_seccomp_spec_mitigate(struct task_struct *task)
arch/x86/kernel/cpu/bugs.c
2630
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
arch/x86/kernel/cpu/bugs.c
2633
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
arch/x86/kernel/cpu/bugs.c
2637
static int l1d_flush_prctl_get(struct task_struct *task)
arch/x86/kernel/cpu/bugs.c
2642
if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
arch/x86/kernel/cpu/bugs.c
2648
static int ssb_prctl_get(struct task_struct *task)
arch/x86/kernel/cpu/bugs.c
2660
if (task_spec_ssb_force_disable(task))
arch/x86/kernel/cpu/bugs.c
2662
if (task_spec_ssb_noexec(task))
arch/x86/kernel/cpu/bugs.c
2664
if (task_spec_ssb_disable(task))
arch/x86/kernel/cpu/bugs.c
2671
static int ib_prctl_get(struct task_struct *task)
arch/x86/kernel/cpu/bugs.c
2680
if (task_spec_ib_force_disable(task))
arch/x86/kernel/cpu/bugs.c
2682
if (task_spec_ib_disable(task))
arch/x86/kernel/cpu/bugs.c
2693
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
arch/x86/kernel/cpu/bugs.c
2697
return ssb_prctl_get(task);
arch/x86/kernel/cpu/bugs.c
2699
return ib_prctl_get(task);
arch/x86/kernel/cpu/bugs.c
2701
return l1d_flush_prctl_get(task);
arch/x86/kernel/cpu/proc.c
193
void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task)
arch/x86/kernel/cpu/proc.c
196
dump_x86_features(m, task->thread.features);
arch/x86/kernel/cpu/proc.c
200
dump_x86_features(m, task->thread.features_locked);
arch/x86/kernel/dumpstack.c
184
static void __show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
arch/x86/kernel/dumpstack.c
195
unwind_start(&state, task, regs, stack);
arch/x86/kernel/dumpstack.c
196
stack = stack ?: get_stack_pointer(task, regs);
arch/x86/kernel/dumpstack.c
220
if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
arch/x86/kernel/dumpstack.c
228
if (get_stack_info(stack, task, &stack_info, &visit_mask))
arch/x86/kernel/dumpstack.c
278
real_addr = ftrace_graph_ret_addr(task, &graph_idx,
arch/x86/kernel/dumpstack.c
30
bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task,
arch/x86/kernel/dumpstack.c
306
static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
arch/x86/kernel/dumpstack.c
314
bool disable_kasan = task && task != current;
arch/x86/kernel/dumpstack.c
319
__show_trace_log_lvl(task, regs, stack, log_lvl);
arch/x86/kernel/dumpstack.c
325
void show_stack(struct task_struct *task, unsigned long *sp,
arch/x86/kernel/dumpstack.c
328
task = task ? : current;
arch/x86/kernel/dumpstack.c
33
unsigned long *begin = task_stack_page(task);
arch/x86/kernel/dumpstack.c
334
if (!sp && task == current)
arch/x86/kernel/dumpstack.c
337
show_trace_log_lvl(task, NULL, sp, loglvl);
arch/x86/kernel/dumpstack.c
34
unsigned long *end = task_stack_page(task) + THREAD_SIZE;
arch/x86/kernel/dumpstack_32.c
108
int get_stack_info(unsigned long *stack, struct task_struct *task,
arch/x86/kernel/dumpstack_32.c
114
task = task ? : current;
arch/x86/kernel/dumpstack_32.c
116
if (in_task_stack(stack, task, info))
arch/x86/kernel/dumpstack_32.c
119
if (task != current)
arch/x86/kernel/dumpstack_64.c
170
bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
arch/x86/kernel/dumpstack_64.c
173
if (in_task_stack(stack, task, info))
arch/x86/kernel/dumpstack_64.c
176
if (task != current)
arch/x86/kernel/dumpstack_64.c
191
int get_stack_info(unsigned long *stack, struct task_struct *task,
arch/x86/kernel/dumpstack_64.c
194
task = task ? : current;
arch/x86/kernel/dumpstack_64.c
199
if (!get_stack_info_noinstr(stack, task, info))
arch/x86/kernel/dumpstack_64.c
209
if (task == current)
arch/x86/kernel/fpu/core.c
61
struct fpu *x86_task_fpu(struct task_struct *task)
arch/x86/kernel/fpu/core.c
63
if (WARN_ON_ONCE(task->flags & PF_KTHREAD))
arch/x86/kernel/fpu/core.c
66
return (void *)task + sizeof(*task);
arch/x86/kernel/fpu/xstate.c
1887
static void avx512_status(struct seq_file *m, struct task_struct *task)
arch/x86/kernel/fpu/xstate.c
1893
if (task->flags & (PF_KTHREAD | PF_USER_WORKER))
arch/x86/kernel/fpu/xstate.c
1896
timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
arch/x86/kernel/fpu/xstate.c
1916
struct pid *pid, struct task_struct *task)
arch/x86/kernel/fpu/xstate.c
1922
avx512_status(m, task);
arch/x86/kernel/perf_regs.c
123
u64 perf_reg_abi(struct task_struct *task)
arch/x86/kernel/perf_regs.c
125
if (!user_64bit_mode(task_pt_regs(task)))
arch/x86/kernel/perf_regs.c
98
u64 perf_reg_abi(struct task_struct *task)
arch/x86/kernel/process_64.c
276
static __always_inline void save_fsgs(struct task_struct *task)
arch/x86/kernel/process_64.c
278
savesegment(fs, task->thread.fsindex);
arch/x86/kernel/process_64.c
279
savesegment(gs, task->thread.gsindex);
arch/x86/kernel/process_64.c
286
task->thread.fsbase = rdfsbase();
arch/x86/kernel/process_64.c
287
task->thread.gsbase = __rdgsbase_inactive();
arch/x86/kernel/process_64.c
289
save_base_legacy(task, task->thread.fsindex, FS);
arch/x86/kernel/process_64.c
290
save_base_legacy(task, task->thread.gsindex, GS);
arch/x86/kernel/process_64.c
412
unsigned long x86_fsgsbase_read_task(struct task_struct *task,
arch/x86/kernel/process_64.c
430
base = get_desc_base(&task->thread.tls_array[idx]);
arch/x86/kernel/process_64.c
440
mutex_lock(&task->mm->context.lock);
arch/x86/kernel/process_64.c
441
ldt = task->mm->context.ldt;
arch/x86/kernel/process_64.c
446
mutex_unlock(&task->mm->context.lock);
arch/x86/kernel/process_64.c
485
unsigned long x86_fsbase_read_task(struct task_struct *task)
arch/x86/kernel/process_64.c
489
if (task == current)
arch/x86/kernel/process_64.c
492
(task->thread.fsindex == 0))
arch/x86/kernel/process_64.c
493
fsbase = task->thread.fsbase;
arch/x86/kernel/process_64.c
495
fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
arch/x86/kernel/process_64.c
500
unsigned long x86_gsbase_read_task(struct task_struct *task)
arch/x86/kernel/process_64.c
504
if (task == current)
arch/x86/kernel/process_64.c
507
(task->thread.gsindex == 0))
arch/x86/kernel/process_64.c
508
gsbase = task->thread.gsbase;
arch/x86/kernel/process_64.c
510
gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
arch/x86/kernel/process_64.c
515
void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
arch/x86/kernel/process_64.c
517
WARN_ON_ONCE(task == current);
arch/x86/kernel/process_64.c
519
task->thread.fsbase = fsbase;
arch/x86/kernel/process_64.c
522
void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
arch/x86/kernel/process_64.c
524
WARN_ON_ONCE(task == current);
arch/x86/kernel/process_64.c
526
task->thread.gsbase = gsbase;
arch/x86/kernel/process_64.c
866
long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
arch/x86/kernel/process_64.c
882
if (task == current) {
arch/x86/kernel/process_64.c
890
task->thread.gsbase = arg2;
arch/x86/kernel/process_64.c
893
task->thread.gsindex = 0;
arch/x86/kernel/process_64.c
894
x86_gsbase_write_task(task, arg2);
arch/x86/kernel/process_64.c
912
if (task == current) {
arch/x86/kernel/process_64.c
920
task->thread.fsbase = arg2;
arch/x86/kernel/process_64.c
922
task->thread.fsindex = 0;
arch/x86/kernel/process_64.c
923
x86_fsbase_write_task(task, arg2);
arch/x86/kernel/process_64.c
929
unsigned long base = x86_fsbase_read_task(task);
arch/x86/kernel/process_64.c
935
unsigned long base = x86_gsbase_read_task(task);
arch/x86/kernel/process_64.c
955
return put_user(task->mm->context.untag_mask,
arch/x86/kernel/process_64.c
958
return prctl_enable_tagged_addr(task->mm, arg2);
arch/x86/kernel/process_64.c
960
if (current != task)
arch/x86/kernel/process_64.c
962
set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags);
arch/x86/kernel/process_64.c
975
return shstk_prctl(task, option, arg2);
arch/x86/kernel/ptrace.c
1395
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/x86/kernel/ptrace.c
1398
if (!user_64bit_mode(task_pt_regs(task)))
arch/x86/kernel/ptrace.c
183
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
arch/x86/kernel/ptrace.c
190
retval = *pt_regs_access(task_pt_regs(task), offset);
arch/x86/kernel/ptrace.c
192
if (task == current)
arch/x86/kernel/ptrace.c
195
retval = task->thread.gs;
arch/x86/kernel/ptrace.c
200
static int set_segment_reg(struct task_struct *task,
arch/x86/kernel/ptrace.c
203
if (WARN_ON_ONCE(task == current))
arch/x86/kernel/ptrace.c
229
*pt_regs_access(task_pt_regs(task), offset) = value;
arch/x86/kernel/ptrace.c
233
task->thread.gs = value;
arch/x86/kernel/ptrace.c
249
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
arch/x86/kernel/ptrace.c
258
if (task == current) {
arch/x86/kernel/ptrace.c
263
return task->thread.fsindex;
arch/x86/kernel/ptrace.c
265
if (task == current) {
arch/x86/kernel/ptrace.c
269
return task->thread.gsindex;
arch/x86/kernel/ptrace.c
271
if (task == current) {
arch/x86/kernel/ptrace.c
275
return task->thread.ds;
arch/x86/kernel/ptrace.c
277
if (task == current) {
arch/x86/kernel/ptrace.c
281
return task->thread.es;
arch/x86/kernel/ptrace.c
287
return *pt_regs_access(task_pt_regs(task), offset);
arch/x86/kernel/ptrace.c
290
static int set_segment_reg(struct task_struct *task,
arch/x86/kernel/ptrace.c
293
if (WARN_ON_ONCE(task == current))
arch/x86/kernel/ptrace.c
310
task->thread.fsindex = value;
arch/x86/kernel/ptrace.c
313
task->thread.gsindex = value;
arch/x86/kernel/ptrace.c
316
task->thread.ds = value;
arch/x86/kernel/ptrace.c
319
task->thread.es = value;
arch/x86/kernel/ptrace.c
328
task_pt_regs(task)->cs = value;
arch/x86/kernel/ptrace.c
333
task_pt_regs(task)->ss = value;
arch/x86/kernel/ptrace.c
342
static unsigned long get_flags(struct task_struct *task)
arch/x86/kernel/ptrace.c
344
unsigned long retval = task_pt_regs(task)->flags;
arch/x86/kernel/ptrace.c
349
if (test_tsk_thread_flag(task, TIF_FORCED_TF))
arch/x86/kernel/ptrace.c
355
static int set_flags(struct task_struct *task, unsigned long value)
arch/x86/kernel/ptrace.c
357
struct pt_regs *regs = task_pt_regs(task);
arch/x86/kernel/ptrace.c
365
clear_tsk_thread_flag(task, TIF_FORCED_TF);
arch/x86/kernel/ptrace.c
366
else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
arch/x86/kernel/ptrace.c
407
static unsigned long getreg(struct task_struct *task, unsigned long offset)
arch/x86/kernel/ptrace.c
416
return get_segment_reg(task, offset);
arch/x86/kernel/ptrace.c
419
return get_flags(task);
arch/x86/kernel/ptrace.c
423
return x86_fsbase_read_task(task);
arch/x86/kernel/ptrace.c
425
return x86_gsbase_read_task(task);
arch/x86/kernel/ptrace.c
429
return *pt_regs_access(task_pt_regs(task), offset);
arch/x86/kernel/shstk.c
576
long shstk_prctl(struct task_struct *task, int option, unsigned long arg2)
arch/x86/kernel/shstk.c
581
return put_user(task->thread.features, (unsigned long __user *)arg2);
arch/x86/kernel/shstk.c
585
task->thread.features_locked |= features;
arch/x86/kernel/shstk.c
590
if (task != current) {
arch/x86/kernel/shstk.c
592
task->thread.features_locked &= ~features;
arch/x86/kernel/shstk.c
599
if (features & task->thread.features_locked)
arch/x86/kernel/stacktrace.c
16
struct task_struct *task, struct pt_regs *regs)
arch/x86/kernel/stacktrace.c
24
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
arch/x86/kernel/stacktrace.c
33
void *cookie, struct task_struct *task)
arch/x86/kernel/stacktrace.c
39
for (unwind_start(&state, task, NULL, NULL);
arch/x86/kernel/step.c
174
void set_task_blockstep(struct task_struct *task, bool on)
arch/x86/kernel/step.c
191
set_tsk_thread_flag(task, TIF_BLOCKSTEP);
arch/x86/kernel/step.c
194
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
arch/x86/kernel/step.c
196
if (task == current)
arch/x86/kernel/traps.c
1425
struct task_struct *task = current;
arch/x86/kernel/traps.c
1426
struct fpu *fpu = x86_task_fpu(task);
arch/x86/kernel/traps.c
1437
task->thread.error_code = 0;
arch/x86/kernel/traps.c
1438
task->thread.trap_nr = trapnr;
arch/x86/kernel/traps.c
1452
task->thread.trap_nr = trapnr;
arch/x86/kernel/traps.c
1453
task->thread.error_code = 0;
arch/x86/kernel/unwind_frame.c
229
if (get_stack_info(info->next_sp, state->task, info,
arch/x86/kernel/unwind_frame.c
252
addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
arch/x86/kernel/unwind_frame.c
277
regs = task_pt_regs(state->task);
arch/x86/kernel/unwind_frame.c
311
next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
arch/x86/kernel/unwind_frame.c
330
if (state->task != current)
arch/x86/kernel/unwind_frame.c
341
state->regs->sp < (unsigned long)task_pt_regs(state->task))
arch/x86/kernel/unwind_frame.c
351
if (state->task != current)
arch/x86/kernel/unwind_frame.c
357
state->regs, state->task->comm,
arch/x86/kernel/unwind_frame.c
358
state->task->pid, next_bp);
arch/x86/kernel/unwind_frame.c
363
state->bp, state->task->comm,
arch/x86/kernel/unwind_frame.c
364
state->task->pid, next_bp);
arch/x86/kernel/unwind_frame.c
373
void __unwind_start(struct unwind_state *state, struct task_struct *task,
arch/x86/kernel/unwind_frame.c
379
state->task = task;
arch/x86/kernel/unwind_frame.c
388
bp = get_frame_pointer(task, regs);
arch/x86/kernel/unwind_frame.c
405
get_stack_info(bp, state->task, &state->stack_info,
arch/x86/kernel/unwind_frame.c
50
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
arch/x86/kernel/unwind_frame.c
82
return (unsigned long *)task_pt_regs(state->task) - 2;
arch/x86/kernel/unwind_guess.c
44
} while (!get_stack_info(state->sp, state->task, info,
arch/x86/kernel/unwind_guess.c
51
void __unwind_start(struct unwind_state *state, struct task_struct *task,
arch/x86/kernel/unwind_guess.c
56
state->task = task;
arch/x86/kernel/unwind_guess.c
59
get_stack_info(first_frame, state->task, &state->stack_info,
arch/x86/kernel/unwind_orc.c
21
if (state->task == current && !state->error) { \
arch/x86/kernel/unwind_orc.c
412
return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
arch/x86/kernel/unwind_orc.c
65
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
arch/x86/kernel/unwind_orc.c
698
void __unwind_start(struct unwind_state *state, struct task_struct *task,
arch/x86/kernel/unwind_orc.c
702
state->task = task;
arch/x86/kernel/unwind_orc.c
712
if (task_on_another_cpu(task))
arch/x86/kernel/unwind_orc.c
726
} else if (task == current) {
arch/x86/kernel/unwind_orc.c
734
struct inactive_task_frame *frame = (void *)task->thread.sp;
arch/x86/kernel/unwind_orc.c
736
state->sp = task->thread.sp + sizeof(*frame);
arch/x86/kernel/unwind_orc.c
742
if (get_stack_info((unsigned long *)state->sp, state->task,
arch/x86/kernel/unwind_orc.c
752
if (get_stack_info(next_page, state->task, &state->stack_info,
arch/x86/kernel/vm86_32.c
769
void release_vm86_irqs(struct task_struct *task)
arch/x86/kernel/vm86_32.c
773
if (vm86_irqs[i].tsk == task)
arch/x86/um/asm/ptrace.h
94
extern long arch_prctl(struct task_struct *task, int option,
arch/x86/um/asm/syscall.h
14
static inline int syscall_get_arch(struct task_struct *task)
arch/x86/um/syscalls_64.c
16
long arch_prctl(struct task_struct *task, int option,
arch/x86/um/tls_32.c
126
static inline int needs_TLS_update(struct task_struct *task)
arch/x86/um/tls_32.c
133
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
arch/x86/um/tls_32.c
151
void clear_flushed_tls(struct task_struct *task)
arch/x86/um/tls_32.c
157
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
arch/x86/um/tls_32.c
197
static int set_tls_entry(struct task_struct* task, struct user_desc *info,
arch/x86/um/tls_32.c
200
struct thread_struct *t = &task->thread;
arch/x86/um/tls_32.c
231
static int get_tls_entry(struct task_struct *task, struct user_desc *info,
arch/x86/um/tls_32.c
234
struct thread_struct *t = &task->thread;
arch/x86/um/tls_32.c
249
if (unlikely(task == current &&
arch/x86/um/tls_32.c
25
static int do_set_thread_area(struct task_struct* task, struct user_desc *info)
arch/x86/um/tls_32.c
35
struct stub_data *data = (void *)task->mm->context.id.stack;
arch/x86/um/tls_32.c
43
ret = os_set_thread_area(info, task->mm->context.id.pid);
arch/x86/um/tls_32.c
60
static int get_free_idx(struct task_struct* task)
arch/x86/um/tls_32.c
62
struct thread_struct *t = &task->thread;
arch/x86/um/tls_64.c
5
void clear_flushed_tls(struct task_struct *task)
arch/xtensa/include/asm/current.h
24
return current_thread_info()->task;
arch/xtensa/include/asm/stacktrace.h
20
static __always_inline unsigned long *stack_pointer(struct task_struct *task)
arch/xtensa/include/asm/stacktrace.h
24
if (!task || task == current)
arch/xtensa/include/asm/stacktrace.h
27
sp = task->thread.sp;
arch/xtensa/include/asm/syscall.h
17
static inline int syscall_get_arch(struct task_struct *task)
arch/xtensa/include/asm/syscall.h
25
static inline long syscall_get_nr(struct task_struct *task,
arch/xtensa/include/asm/syscall.h
31
static inline void syscall_set_nr(struct task_struct *task,
arch/xtensa/include/asm/syscall.h
38
static inline void syscall_rollback(struct task_struct *task,
arch/xtensa/include/asm/syscall.h
44
static inline long syscall_get_error(struct task_struct *task,
arch/xtensa/include/asm/syscall.h
51
static inline long syscall_get_return_value(struct task_struct *task,
arch/xtensa/include/asm/syscall.h
57
static inline void syscall_set_return_value(struct task_struct *task,
arch/xtensa/include/asm/syscall.h
66
static inline void syscall_get_arguments(struct task_struct *task,
arch/xtensa/include/asm/syscall.h
77
static inline void syscall_set_arguments(struct task_struct *task,
arch/xtensa/include/asm/thread_info.h
49
struct task_struct *task; /* main task structure */
arch/xtensa/include/asm/thread_info.h
87
.task = &tsk, \
arch/xtensa/kernel/asm-offsets.c
87
OFFSET(TI_TASK, thread_info, task);
arch/xtensa/kernel/ptrace.c
220
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
arch/xtensa/kernel/stacktrace.c
224
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
arch/xtensa/kernel/stacktrace.c
230
walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
arch/xtensa/kernel/traps.c
561
static void show_trace(struct task_struct *task, unsigned long *sp,
arch/xtensa/kernel/traps.c
565
sp = stack_pointer(task);
arch/xtensa/kernel/traps.c
607
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
arch/xtensa/kernel/traps.c
612
sp = stack_pointer(task);
arch/xtensa/kernel/traps.c
624
show_trace(task, sp, loglvl);
arch/xtensa/mm/tlb.c
176
struct task_struct *task = get_current();
arch/xtensa/mm/tlb.c
177
struct mm_struct *mm = task->mm;
arch/xtensa/mm/tlb.c
186
mm = task->active_mm;
block/blk-ioc.c
207
void exit_io_context(struct task_struct *task)
block/blk-ioc.c
211
task_lock(task);
block/blk-ioc.c
212
ioc = task->io_context;
block/blk-ioc.c
213
task->io_context = NULL;
block/blk-ioc.c
214
task_unlock(task);
block/blk-ioc.c
244
int set_task_ioprio(struct task_struct *task, int ioprio)
block/blk-ioc.c
250
tcred = __task_cred(task);
block/blk-ioc.c
258
err = security_task_setioprio(task, ioprio);
block/blk-ioc.c
262
task_lock(task);
block/blk-ioc.c
263
if (unlikely(!task->io_context)) {
block/blk-ioc.c
266
task_unlock(task);
block/blk-ioc.c
272
task_lock(task);
block/blk-ioc.c
273
if (task->flags & PF_EXITING) {
block/blk-ioc.c
277
if (task->io_context)
block/blk-ioc.c
280
task->io_context = ioc;
block/blk-ioc.c
282
task->io_context->ioprio = ioprio;
block/blk-ioc.c
284
task_unlock(task);
crypto/crypto_engine.c
468
sched_set_fifo(engine->kworker->task);
drivers/accel/habanalabs/common/device.c
1405
struct task_struct *task = NULL;
drivers/accel/habanalabs/common/device.c
1439
task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
drivers/accel/habanalabs/common/device.c
1440
if (task) {
drivers/accel/habanalabs/common/device.c
1442
task_pid_nr(task));
drivers/accel/habanalabs/common/device.c
1443
send_sig(SIGKILL, task, 1);
drivers/accel/habanalabs/common/device.c
1446
put_task_struct(task);
drivers/accel/rocket/rocket_job.c
110
struct rocket_task *task;
drivers/accel/rocket/rocket_job.c
119
task = &job->tasks[job->next_task_idx];
drivers/accel/rocket/rocket_job.c
136
rocket_pc_writel(core, BASE_ADDRESS, task->regcmd);
drivers/accel/rocket/rocket_job.c
138
PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1));
drivers/accel/rocket/rocket_job.c
152
dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index);
drivers/accel/rocket/rocket_job.c
81
struct drm_rocket_task task = {0};
drivers/accel/rocket/rocket_job.c
83
if (copy_from_user(&task,
drivers/accel/rocket/rocket_job.c
85
sizeof(task))) {
drivers/accel/rocket/rocket_job.c
91
if (task.regcmd_count == 0) {
drivers/accel/rocket/rocket_job.c
97
rjob->tasks[i].regcmd = task.regcmd;
drivers/accel/rocket/rocket_job.c
98
rjob->tasks[i].regcmd_count = task.regcmd_count;
drivers/android/binder/rust_binder.h
34
size_t task;
drivers/android/binder/rust_binder.h
94
return *(struct task_struct **) (t + RUST_BINDER_LAYOUT.p.task);
drivers/atm/eni.c
1406
tasklet_disable(&eni_dev->task);
drivers/atm/eni.c
1408
tasklet_enable(&eni_dev->task);
drivers/atm/eni.c
1521
tasklet_schedule(&eni_dev->task);
drivers/atm/eni.c
1843
tasklet_init(&eni_dev->task,eni_tasklet,(unsigned long) dev);
drivers/atm/eni.c
1979
tasklet_disable(&eni_dev->task);
drivers/atm/eni.c
1989
tasklet_enable(&eni_dev->task);
drivers/atm/eni.c
2058
tasklet_disable_in_atomic(&ENI_DEV(vcc->dev)->task);
drivers/atm/eni.c
2060
tasklet_enable(&ENI_DEV(vcc->dev)->task);
drivers/atm/eni.c
2064
tasklet_schedule(&ENI_DEV(vcc->dev)->task);
drivers/atm/eni.c
879
tasklet_disable(&eni_dev->task);
drivers/atm/eni.c
882
tasklet_enable(&eni_dev->task);
drivers/atm/eni.h
72
struct tasklet_struct task; /* tasklet for interrupt work */
drivers/block/aoe/aoe.h
200
struct task_struct *task;
drivers/block/aoe/aoecmd.c
1260
kthread_stop(k->task);
drivers/block/aoe/aoecmd.c
1267
struct task_struct *task;
drivers/block/aoe/aoecmd.c
1270
task = kthread_run(kthread, k, "%s", k->name);
drivers/block/aoe/aoecmd.c
1271
if (task == NULL || IS_ERR(task))
drivers/block/aoe/aoecmd.c
1273
k->task = task;
drivers/block/drbd/drbd_int.h
164
struct task_struct *task;
drivers/block/drbd/drbd_int.h
1831
struct task_struct *task = connection->ack_receiver.task;
drivers/block/drbd/drbd_int.h
1832
if (task && get_t_state(&connection->ack_receiver) == RUNNING)
drivers/block/drbd/drbd_int.h
1833
send_sig(SIGXCPU, task, 1);
drivers/block/drbd/drbd_main.c
1457
|| !connection->ack_receiver.task
drivers/block/drbd/drbd_main.c
335
thi->task = NULL;
drivers/block/drbd/drbd_main.c
3482
D_ASSERT(device, current == peer_device->connection->worker.task);
drivers/block/drbd/drbd_main.c
3529
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
drivers/block/drbd/drbd_main.c
356
thi->task = NULL;
drivers/block/drbd/drbd_main.c
409
thi->task = nt;
drivers/block/drbd/drbd_main.c
447
if (thi->task == NULL) {
drivers/block/drbd/drbd_main.c
455
if (thi->task != current)
drivers/block/drbd/drbd_main.c
456
send_sig(DRBD_SIGKILL, thi->task, 1);
drivers/block/drbd/drbd_nl.c
368
if (current == connection->worker.task)
drivers/block/drbd/drbd_nl.c
397
if (current == connection->worker.task)
drivers/block/drbd/drbd_state.c
1529
D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
drivers/block/drbd/drbd_state.c
625
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
drivers/block/drbd/drbd_worker.c
1768
if (current == connection->worker.task) {
drivers/block/ublk_drv.c
1710
if (unlikely(current != io->task || current->flags & PF_EXITING)) {
drivers/block/ublk_drv.c
210
struct task_struct *task;
drivers/block/ublk_drv.c
2156
(io->task == io2->task);
drivers/block/ublk_drv.c
2299
if (io->task) {
drivers/block/ublk_drv.c
2300
put_task_struct(io->task);
drivers/block/ublk_drv.c
2301
io->task = NULL;
drivers/block/ublk_drv.c
2777
struct task_struct *task;
drivers/block/ublk_drv.c
2786
task = io_uring_cmd_get_task(cmd);
drivers/block/ublk_drv.c
2788
if (WARN_ON_ONCE(task && task != io->task))
drivers/block/ublk_drv.c
3072
if (current == io->task && io->task_registered_buffers)
drivers/block/ublk_drv.c
3177
WRITE_ONCE(io->task, NULL);
drivers/block/ublk_drv.c
3179
WRITE_ONCE(io->task, get_task_struct(current));
drivers/block/ublk_drv.c
3312
if (READ_ONCE(io->task) != current) {
drivers/block/ublk_drv.c
3972
on_daemon = current == READ_ONCE(io->task);
drivers/block/ublk_drv.c
4053
if (io->task)
drivers/block/ublk_drv.c
4054
put_task_struct(io->task);
drivers/bluetooth/btmrvl_drv.h
48
struct task_struct *task;
drivers/bluetooth/btmrvl_main.c
700
kthread_stop(priv->main_thread.task);
drivers/bluetooth/btmrvl_main.c
732
priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
drivers/bluetooth/btmrvl_main.c
734
if (IS_ERR(priv->main_thread.task))
drivers/bluetooth/btmrvl_main.c
762
kthread_stop(priv->main_thread.task);
drivers/bus/mhi/host/init.c
974
tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
drivers/bus/mhi/host/init.c
977
tasklet_init(&mhi_event->task, mhi_ev_task,
drivers/bus/mhi/host/internal.h
252
struct tasklet_struct task;
drivers/bus/mhi/host/main.c
475
tasklet_schedule(&mhi_event->task);
drivers/bus/mhi/host/pm.c
527
tasklet_kill(&mhi_event->task);
drivers/bus/mhi/host/pm.c
692
tasklet_kill(&mhi_event->task);
drivers/connector/cn_proc.c
117
void proc_fork_connector(struct task_struct *task)
drivers/connector/cn_proc.c
133
parent = rcu_dereference(task->real_parent);
drivers/connector/cn_proc.c
137
ev->event_data.fork.child_pid = task->pid;
drivers/connector/cn_proc.c
138
ev->event_data.fork.child_tgid = task->tgid;
drivers/connector/cn_proc.c
147
void proc_exec_connector(struct task_struct *task)
drivers/connector/cn_proc.c
161
ev->event_data.exec.process_pid = task->pid;
drivers/connector/cn_proc.c
162
ev->event_data.exec.process_tgid = task->tgid;
drivers/connector/cn_proc.c
171
void proc_id_connector(struct task_struct *task, int which_id)
drivers/connector/cn_proc.c
185
ev->event_data.id.process_pid = task->pid;
drivers/connector/cn_proc.c
186
ev->event_data.id.process_tgid = task->tgid;
drivers/connector/cn_proc.c
188
cred = __task_cred(task);
drivers/connector/cn_proc.c
209
void proc_sid_connector(struct task_struct *task)
drivers/connector/cn_proc.c
223
ev->event_data.sid.process_pid = task->pid;
drivers/connector/cn_proc.c
224
ev->event_data.sid.process_tgid = task->tgid;
drivers/connector/cn_proc.c
233
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
drivers/connector/cn_proc.c
247
ev->event_data.ptrace.process_pid = task->pid;
drivers/connector/cn_proc.c
248
ev->event_data.ptrace.process_tgid = task->tgid;
drivers/connector/cn_proc.c
265
void proc_comm_connector(struct task_struct *task)
drivers/connector/cn_proc.c
279
ev->event_data.comm.process_pid = task->pid;
drivers/connector/cn_proc.c
280
ev->event_data.comm.process_tgid = task->tgid;
drivers/connector/cn_proc.c
281
get_task_comm(ev->event_data.comm.comm, task);
drivers/connector/cn_proc.c
290
void proc_coredump_connector(struct task_struct *task)
drivers/connector/cn_proc.c
305
ev->event_data.coredump.process_pid = task->pid;
drivers/connector/cn_proc.c
306
ev->event_data.coredump.process_tgid = task->tgid;
drivers/connector/cn_proc.c
309
if (pid_alive(task)) {
drivers/connector/cn_proc.c
310
parent = rcu_dereference(task->real_parent);
drivers/connector/cn_proc.c
323
void proc_exit_connector(struct task_struct *task)
drivers/connector/cn_proc.c
338
ev->event_data.exit.process_pid = task->pid;
drivers/connector/cn_proc.c
339
ev->event_data.exit.process_tgid = task->tgid;
drivers/connector/cn_proc.c
340
ev->event_data.exit.exit_code = task->exit_code;
drivers/connector/cn_proc.c
341
ev->event_data.exit.exit_signal = task->exit_signal;
drivers/connector/cn_proc.c
344
if (pid_alive(task)) {
drivers/connector/cn_proc.c
345
parent = rcu_dereference(task->real_parent);
drivers/cpufreq/cppc_cpufreq.c
237
ret = sched_setattr_nocheck(kworker_fie->task, &attr);
drivers/crypto/axis/artpec6_crypto.c
2079
tasklet_schedule(&ac->task);
drivers/crypto/axis/artpec6_crypto.c
2615
tasklet_schedule(&ac->task);
drivers/crypto/axis/artpec6_crypto.c
264
struct tasklet_struct task;
drivers/crypto/axis/artpec6_crypto.c
2894
tasklet_init(&ac->task, artpec6_crypto_task,
drivers/crypto/axis/artpec6_crypto.c
2960
tasklet_disable(&ac->task);
drivers/crypto/axis/artpec6_crypto.c
2962
tasklet_kill(&ac->task);
drivers/crypto/tegra/tegra-se-aes.c
1481
if (!(rctx->task & SHA_UPDATE)) {
drivers/crypto/tegra/tegra-se-aes.c
1489
if (rctx->task & SHA_FIRST) {
drivers/crypto/tegra/tegra-se-aes.c
1490
rctx->task &= ~SHA_FIRST;
drivers/crypto/tegra/tegra-se-aes.c
1553
rctx->task |= SHA_FIRST;
drivers/crypto/tegra/tegra-se-aes.c
1633
if (!(rctx->task & SHA_FIRST))
drivers/crypto/tegra/tegra-se-aes.c
1680
if (!(rctx->task & SHA_FIRST))
drivers/crypto/tegra/tegra-se-aes.c
1715
if (rctx->task & SHA_INIT) {
drivers/crypto/tegra/tegra-se-aes.c
1720
rctx->task &= ~SHA_INIT;
drivers/crypto/tegra/tegra-se-aes.c
1730
if (rctx->task & SHA_UPDATE) {
drivers/crypto/tegra/tegra-se-aes.c
1735
rctx->task &= ~SHA_UPDATE;
drivers/crypto/tegra/tegra-se-aes.c
1738
if (rctx->task & SHA_FINAL) {
drivers/crypto/tegra/tegra-se-aes.c
1743
rctx->task &= ~SHA_FINAL;
drivers/crypto/tegra/tegra-se-aes.c
1843
rctx->task = SHA_INIT;
drivers/crypto/tegra/tegra-se-aes.c
1854
rctx->task |= SHA_UPDATE;
drivers/crypto/tegra/tegra-se-aes.c
1865
rctx->task |= SHA_FINAL;
drivers/crypto/tegra/tegra-se-aes.c
1876
rctx->task |= SHA_UPDATE | SHA_FINAL;
drivers/crypto/tegra/tegra-se-aes.c
1887
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
drivers/crypto/tegra/tegra-se-aes.c
85
unsigned int task;
drivers/crypto/tegra/tegra-se-hash.c
285
if (rctx->task & SHA_UPDATE) {
drivers/crypto/tegra/tegra-se-hash.c
304
if (rctx->task & SHA_FIRST) {
drivers/crypto/tegra/tegra-se-hash.c
306
rctx->task &= ~SHA_FIRST;
drivers/crypto/tegra/tegra-se-hash.c
321
if (rctx->task & SHA_UPDATE) {
drivers/crypto/tegra/tegra-se-hash.c
365
rctx->task |= SHA_FIRST;
drivers/crypto/tegra/tegra-se-hash.c
42
unsigned int task;
drivers/crypto/tegra/tegra-se-hash.c
524
if (rctx->task & SHA_INIT) {
drivers/crypto/tegra/tegra-se-hash.c
529
rctx->task &= ~SHA_INIT;
drivers/crypto/tegra/tegra-se-hash.c
532
if (rctx->task & SHA_UPDATE) {
drivers/crypto/tegra/tegra-se-hash.c
537
rctx->task &= ~SHA_UPDATE;
drivers/crypto/tegra/tegra-se-hash.c
540
if (rctx->task & SHA_FINAL) {
drivers/crypto/tegra/tegra-se-hash.c
545
rctx->task &= ~SHA_FINAL;
drivers/crypto/tegra/tegra-se-hash.c
658
rctx->task = SHA_INIT;
drivers/crypto/tegra/tegra-se-hash.c
672
rctx->task |= SHA_UPDATE;
drivers/crypto/tegra/tegra-se-hash.c
686
rctx->task |= SHA_FINAL;
drivers/crypto/tegra/tegra-se-hash.c
700
rctx->task |= SHA_UPDATE | SHA_FINAL;
drivers/crypto/tegra/tegra-se-hash.c
714
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
drivers/dma-buf/dma-fence.c
775
struct task_struct *task;
drivers/dma-buf/dma-fence.c
784
wake_up_state(wait->task, TASK_NORMAL);
drivers/dma-buf/dma-fence.c
822
cb.task = current;
drivers/dma-buf/dma-fence.c
917
cb[i].task = current;
drivers/dma-buf/st-dma-fence.c
406
struct task_struct *task;
drivers/dma-buf/st-dma-fence.c
500
t[i].task = kthread_run(thread_signal_callback, &t[i],
drivers/dma-buf/st-dma-fence.c
502
if (IS_ERR(t[i].task)) {
drivers/dma-buf/st-dma-fence.c
503
ret = PTR_ERR(t[i].task);
drivers/dma-buf/st-dma-fence.c
505
kthread_stop_put(t[i].task);
drivers/dma-buf/st-dma-fence.c
508
get_task_struct(t[i].task);
drivers/dma-buf/st-dma-fence.c
516
err = kthread_stop_put(t[i].task);
drivers/dma/bcm2835-dma.c
832
tasklet_kill(&c->vc.task);
drivers/dma/bestcomm/bestcomm.c
137
bcom_load_image(int task, u32 *task_image)
drivers/dma/bestcomm/bestcomm.c
151
if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
drivers/dma/bestcomm/bestcomm.c
153
": Trying to load invalid task %d\n", task);
drivers/dma/bestcomm/bestcomm.c
158
tdt = &bcom_eng->tdt[task];
drivers/dma/bestcomm/bestcomm.c
161
desc = bcom_task_desc(task);
drivers/dma/bestcomm/bestcomm.c
162
if (hdr->desc_size != bcom_task_num_descs(task)) {
drivers/dma/bestcomm/bestcomm.c
166
task,
drivers/dma/bestcomm/bestcomm.c
168
bcom_task_num_descs(task));
drivers/dma/bestcomm/bestcomm.c
182
var = bcom_task_var(task);
drivers/dma/bestcomm/bestcomm.c
183
inc = bcom_task_inc(task);
drivers/dma/bestcomm/bestcomm.c
202
bcom_set_initiator(int task, int initiator)
drivers/dma/bestcomm/bestcomm.c
209
bcom_set_tcr_initiator(task, initiator);
drivers/dma/bestcomm/bestcomm.c
215
desc = bcom_task_desc(task);
drivers/dma/bestcomm/bestcomm.c
217
num_descs = bcom_task_num_descs(task);
drivers/dma/bestcomm/bestcomm.c
276
int task;
drivers/dma/bestcomm/bestcomm.c
311
for (task=0; task<BCOM_MAX_TASKS; task++)
drivers/dma/bestcomm/bestcomm.c
313
out_be16(&bcom_eng->regs->tcr[task], 0);
drivers/dma/bestcomm/bestcomm.c
314
out_8(&bcom_eng->regs->ipr[task], 0);
drivers/dma/bestcomm/bestcomm.c
316
bcom_eng->tdt[task].context = ctx_pa;
drivers/dma/bestcomm/bestcomm.c
317
bcom_eng->tdt[task].var = var_pa;
drivers/dma/bestcomm/bestcomm.c
318
bcom_eng->tdt[task].fdt = fdt_pa;
drivers/dma/bestcomm/bestcomm.c
342
int task;
drivers/dma/bestcomm/bestcomm.c
345
for (task=0; task<BCOM_MAX_TASKS; task++)
drivers/dma/bestcomm/bestcomm.c
347
out_be16(&bcom_eng->regs->tcr[task], 0);
drivers/dma/bestcomm/bestcomm.c
348
out_8(&bcom_eng->regs->ipr[task], 0);
drivers/dma/dma-axi-dmac.c
1059
static void axi_dmac_tasklet_kill(void *task)
drivers/dma/dma-axi-dmac.c
1061
tasklet_kill(task);
drivers/dma/dma-axi-dmac.c
1165
&dmac->chan.vchan.task);
drivers/dma/dma-jz4780.c
1022
tasklet_kill(&jzdma->chan[i].vchan.task);
drivers/dma/dmatest.c
1002
thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
drivers/dma/dmatest.c
1004
if (IS_ERR(thread->task)) {
drivers/dma/dmatest.c
1012
get_task_struct(thread->task);
drivers/dma/dmatest.c
1142
wake_up_process(thread->task);
drivers/dma/dmatest.c
232
struct task_struct *task;
drivers/dma/dmatest.c
955
ret = kthread_stop(thread->task);
drivers/dma/dmatest.c
957
thread->task->comm, ret);
drivers/dma/dmatest.c
959
put_task_struct(thread->task);
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
1650
tasklet_kill(&chan->vc.task);
drivers/dma/dw-edma/dw-edma-core.c
1022
tasklet_kill(&chan->vc.task);
drivers/dma/fsl-edma-common.c
919
tasklet_kill(&chan->vchan.task);
drivers/dma/fsl-qdma.c
1267
tasklet_kill(&chan->vchan.task);
drivers/dma/hisi_dma.c
723
tasklet_kill(&hdma_dev->chan[i].vc.task);
drivers/dma/hsu/hsu.c
503
tasklet_kill(&hsuc->vchan.task);
drivers/dma/idma64.c
627
tasklet_kill(&idma64c->vchan.task);
drivers/dma/idxd/cdev.c
43
struct task_struct *task;
drivers/dma/img-mdc-dma.c
1034
tasklet_kill(&mchan->vc.task);
drivers/dma/imx-sdma.c
2430
tasklet_kill(&sdmac->vc.task);
drivers/dma/k3dma.c
101
struct tasklet_struct task;
drivers/dma/k3dma.c
255
tasklet_schedule(&d->task);
drivers/dma/k3dma.c
300
struct k3_dma_dev *d = from_tasklet(d, t, task);
drivers/dma/k3dma.c
436
tasklet_schedule(&d->task);
drivers/dma/k3dma.c
954
tasklet_setup(&d->task, k3_dma_tasklet);
drivers/dma/k3dma.c
979
tasklet_kill(&c->vc.task);
drivers/dma/k3dma.c
981
tasklet_kill(&d->task);
drivers/dma/loongson1-apb-dma.c
555
tasklet_kill(&chan->vc.task);
drivers/dma/mediatek/mtk-cqdma.c
898
tasklet_kill(&vc->vc.task);
drivers/dma/mediatek/mtk-hsdma.c
1023
tasklet_kill(&vc->vc.task);
drivers/dma/mediatek/mtk-uart-apdma.c
315
tasklet_kill(&c->vc.task);
drivers/dma/mediatek/mtk-uart-apdma.c
466
tasklet_kill(&c->vc.task);
drivers/dma/owl-dma.c
1058
tasklet_kill(&vchan->vc.task);
drivers/dma/pl330.c
1577
tasklet_schedule(&pch->task);
drivers/dma/pl330.c
2063
tasklet_schedule(&pch->task);
drivers/dma/pl330.c
2070
struct dma_pl330_chan *pch = from_tasklet(pch, t, task);
drivers/dma/pl330.c
2176
tasklet_setup(&pch->task, pl330_tasklet);
drivers/dma/pl330.c
2357
tasklet_kill(&pch->task);
drivers/dma/pl330.c
2493
pl330_tasklet(&pch->task);
drivers/dma/pl330.c
421
struct tasklet_struct task;
drivers/dma/pxa_dma.c
1218
tasklet_kill(&c->vc.task);
drivers/dma/qcom/bam_dma.c
1101
struct bam_device *bdev = from_tasklet(bdev, t, task);
drivers/dma/qcom/bam_dma.c
1289
tasklet_setup(&bdev->task, dma_tasklet);
drivers/dma/qcom/bam_dma.c
1360
tasklet_kill(&bdev->channels[i].vc.task);
drivers/dma/qcom/bam_dma.c
1362
tasklet_kill(&bdev->task);
drivers/dma/qcom/bam_dma.c
1386
tasklet_kill(&bdev->channels[i].vc.task);
drivers/dma/qcom/bam_dma.c
1396
tasklet_kill(&bdev->task);
drivers/dma/qcom/bam_dma.c
401
struct tasklet_struct task;
drivers/dma/qcom/bam_dma.c
898
tasklet_schedule(&bdev->task);
drivers/dma/qcom/hidma.c
221
struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
drivers/dma/qcom/hidma.c
252
tasklet_schedule(&dmadev->task);
drivers/dma/qcom/hidma.c
881
tasklet_setup(&dmadev->task, hidma_issue_task);
drivers/dma/qcom/hidma.c
928
tasklet_kill(&dmadev->task);
drivers/dma/qcom/hidma.h
132
struct tasklet_struct task;
drivers/dma/qcom/hidma.h
72
struct tasklet_struct task; /* task delivering notifications */
drivers/dma/qcom/hidma_ll.c
178
struct hidma_lldev *lldev = from_tasklet(lldev, t, task);
drivers/dma/qcom/hidma_ll.c
226
tasklet_schedule(&lldev->task);
drivers/dma/qcom/hidma_ll.c
795
tasklet_setup(&lldev->task, hidma_ll_tre_complete);
drivers/dma/qcom/hidma_ll.c
816
tasklet_kill(&lldev->task);
drivers/dma/qcom/qcom_adm.c
921
tasklet_kill(&adev->channels[i].vc.task);
drivers/dma/sa11x0-dma.c
121
struct tasklet_struct task;
drivers/dma/sa11x0-dma.c
235
tasklet_schedule(&p->dev->task);
drivers/dma/sa11x0-dma.c
328
struct sa11x0_dma_dev *d = from_tasklet(d, t, task);
drivers/dma/sa11x0-dma.c
512
tasklet_schedule(&d->task);
drivers/dma/sa11x0-dma.c
785
tasklet_schedule(&d->task);
drivers/dma/sa11x0-dma.c
894
tasklet_kill(&c->vc.task);
drivers/dma/sa11x0-dma.c
929
tasklet_setup(&d->task, sa11x0_dma_tasklet);
drivers/dma/sa11x0-dma.c
977
tasklet_kill(&d->task);
drivers/dma/sa11x0-dma.c
995
tasklet_kill(&d->task);
drivers/dma/sf-pdma/sf-pdma.c
606
tasklet_kill(&ch->vchan.task);
drivers/dma/sprd-dma.c
1256
tasklet_kill(&c->vc.task);
drivers/dma/st_fdma.c
736
tasklet_kill(&fchan->vchan.task);
drivers/dma/sun6i-dma.c
1028
tasklet_schedule(&sdev->task);
drivers/dma/sun6i-dma.c
1089
tasklet_kill(&sdev->task);
drivers/dma/sun6i-dma.c
1100
tasklet_kill(&vchan->vc.task);
drivers/dma/sun6i-dma.c
1421
tasklet_setup(&sdc->task, sun6i_dma_tasklet);
drivers/dma/sun6i-dma.c
204
struct tasklet_struct task;
drivers/dma/sun6i-dma.c
480
struct sun6i_dma_dev *sdev = from_tasklet(sdev, t, task);
drivers/dma/sun6i-dma.c
579
tasklet_schedule(&sdev->task);
drivers/dma/tegra186-gpc-dma.c
1281
tasklet_kill(&tdc->vc.task);
drivers/dma/tegra210-adma.c
803
tasklet_kill(&tdc->vc.task);
drivers/dma/ti/edma.c
2563
tasklet_kill(&echan->vchan.task);
drivers/dma/ti/k3-udma.c
4050
struct virt_dma_chan *vc = from_tasklet(vc, t, task);
drivers/dma/ti/k3-udma.c
4115
tasklet_kill(&uc->vc.task);
drivers/dma/ti/k3-udma.c
5631
tasklet_setup(&uc->vc.task, udma_vchan_complete);
drivers/dma/ti/omap-dma.c
1524
tasklet_kill(&c->vc.task);
drivers/dma/virt-dma.c
134
tasklet_setup(&vc->task, vchan_complete);
drivers/dma/virt-dma.c
85
struct virt_dma_chan *vc = from_tasklet(vc, t, task);
drivers/dma/virt-dma.h
109
tasklet_schedule(&vc->task);
drivers/dma/virt-dma.h
140
tasklet_schedule(&vc->task);
drivers/dma/virt-dma.h
226
tasklet_kill(&vc->task);
drivers/dma/virt-dma.h
24
struct tasklet_struct task;
drivers/firmware/stratix10-svc.c
1719
if (!chan->task) {
drivers/firmware/stratix10-svc.c
1720
struct task_struct *task;
drivers/firmware/stratix10-svc.c
1722
task = kthread_run_on_cpu(svc_normal_to_secure_thread,
drivers/firmware/stratix10-svc.c
1725
if (IS_ERR(task)) {
drivers/firmware/stratix10-svc.c
1733
if (chan->task) {
drivers/firmware/stratix10-svc.c
1736
kthread_stop(task);
drivers/firmware/stratix10-svc.c
1738
chan->task = task;
drivers/firmware/stratix10-svc.c
1811
if (chan->task) {
drivers/firmware/stratix10-svc.c
1814
kthread_stop(chan->task);
drivers/firmware/stratix10-svc.c
1815
chan->task = NULL;
drivers/firmware/stratix10-svc.c
2039
if (ctrl->chans[i].task) {
drivers/firmware/stratix10-svc.c
2040
kthread_stop(ctrl->chans[i].task);
drivers/firmware/stratix10-svc.c
2041
ctrl->chans[i].task = NULL;
drivers/firmware/stratix10-svc.c
292
struct task_struct *task;
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1765
seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
222
if (coredump->reset_task_info.task.pid)
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
225
coredump->reset_task_info.task.pid);
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
6614
ti ? &ti->task : NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1308
struct task_struct *task;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1321
task = pid_task(pid, PIDTYPE_TGID);
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1323
task ? task->comm : "<unknown>");
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
300
dev_warn(adev->dev, "pid %d\n", ti->task.pid);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
130
info = &ti->task;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
260
((blk_info.task.pid != task_pid_nr(current)) ||
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
261
strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
264
(blk_info.task.pid == con->init_task_pid) &&
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
265
!strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2557
if (vm->task_info->task.pid == current->pid)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2560
vm->task_info->task.pid = current->pid;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2561
get_task_comm(vm->task_info->task.comm, current);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2839
ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
3217
task_info->task.comm, task_info->task.pid);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
241
struct drm_wedge_task_info task;
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
418
memcpy(&info->task, &vres->task, sizeof(vres->task));
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
564
vres->task.pid = task_pid_nr(current);
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
565
get_task_comm(vres->task.comm, current);
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
647
memset(&vres->task, 0, sizeof(vres->task));
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
49
struct amdgpu_vres_task task;
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
57
struct amdgpu_vres_task task;
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
2190
task_info->task.comm, task_info->task.pid);
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
1885
task_info->task.comm, task_info->task.pid);
drivers/gpu/drm/amd/amdkfd/kfd_events.c
1299
ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1022
struct task_struct *task = NULL;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1026
task = current;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1027
get_task_struct(task);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1029
task = get_pid_task(pid, PIDTYPE_PID);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1032
if (task) {
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1033
p = find_process(task, true);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1034
put_task_struct(task);
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
256
if (task_info->task.pid)
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
258
task_info->task.pid, task_info->task.comm));
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
362
KFD_EVENT_FMT_PROCESS(task_info->task.pid,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
363
task_info->task.comm));
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
334
struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
336
task->type = type;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
337
task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
338
task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
349
task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
350
task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
351
task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
371
struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
373
task->type = TASK_TYPE_UCODE_LOAD;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
374
task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
375
task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
386
task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
387
task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
388
task->size_bytes = smu8_smu->driver_buffer[i].data_size;
drivers/gpu/drm/drm_debugfs.c
105
task = pid_task(pid, PIDTYPE_TGID);
drivers/gpu/drm/drm_debugfs.c
106
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
drivers/gpu/drm/drm_debugfs.c
108
task ? task->comm : "<unknown>",
drivers/gpu/drm/drm_debugfs.c
317
struct task_struct *task;
drivers/gpu/drm/drm_debugfs.c
325
task = pid_task(pid, PIDTYPE_TGID);
drivers/gpu/drm/drm_debugfs.c
327
seq_printf(m, "pid: %d\n", task ? task->pid : 0);
drivers/gpu/drm/drm_debugfs.c
328
seq_printf(m, "comm: %s\n", task ? task->comm : "Unset");
drivers/gpu/drm/drm_debugfs.c
99
struct task_struct *task;
drivers/gpu/drm/drm_file.c
1020
struct task_struct *task;
drivers/gpu/drm/drm_file.c
1030
task = pid_task(pid, PIDTYPE_TGID);
drivers/gpu/drm/drm_file.c
1033
task ? task->comm : "Unset",
drivers/gpu/drm/drm_file.c
1034
task ? task->pid : 0, file_priv->client_id,
drivers/gpu/drm/drm_flip_work.c
109
struct drm_flip_task *task, *tmp;
drivers/gpu/drm/drm_flip_work.c
120
list_for_each_entry_safe(task, tmp, &tasks, node) {
drivers/gpu/drm/drm_flip_work.c
121
work->func(work, task->data);
drivers/gpu/drm/drm_flip_work.c
122
kfree(task);
drivers/gpu/drm/drm_flip_work.c
38
struct drm_flip_task *task;
drivers/gpu/drm/drm_flip_work.c
40
task = kzalloc_obj(*task, flags);
drivers/gpu/drm/drm_flip_work.c
41
if (task)
drivers/gpu/drm/drm_flip_work.c
42
task->data = data;
drivers/gpu/drm/drm_flip_work.c
44
return task;
drivers/gpu/drm/drm_flip_work.c
47
static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task)
drivers/gpu/drm/drm_flip_work.c
52
list_add_tail(&task->node, &work->queued);
drivers/gpu/drm/drm_flip_work.c
66
struct drm_flip_task *task;
drivers/gpu/drm/drm_flip_work.c
68
task = drm_flip_work_allocate_task(val,
drivers/gpu/drm/drm_flip_work.c
70
if (task) {
drivers/gpu/drm/drm_flip_work.c
71
drm_flip_work_queue_task(work, task);
drivers/gpu/drm/drm_syncobj.c
1008
wake_up_process(wait->task);
drivers/gpu/drm/drm_syncobj.c
1029
wake_up_process(wait->task);
drivers/gpu/drm/drm_syncobj.c
1079
entries[i].task = current;
drivers/gpu/drm/drm_syncobj.c
217
struct task_struct *task;
drivers/gpu/drm/drm_syncobj.c
484
wait.task = current;
drivers/gpu/drm/drm_vblank_work.c
290
sched_set_fifo(worker->task);
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1491
struct task_struct *task;
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1496
task = get_pid_task(submit->pid, PIDTYPE_PID);
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1497
if (task) {
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1498
comm = kstrdup(task->comm, GFP_KERNEL);
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1499
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1500
put_task_struct(task);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
101
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1082
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1094
ctx->task = task;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1096
fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1097
fimc_src_set_size(ctx, &task->src);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1099
fimc_src_set_addr(ctx, &task->src);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1100
fimc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1101
fimc_dst_set_transf(ctx, task->transform.rotation);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1102
fimc_dst_set_size(ctx, &task->dst);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1103
fimc_dst_set_addr(ctx, &task->dst);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1104
fimc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1111
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1118
if (ctx->task) {
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1119
struct exynos_drm_ipp_task *task = ctx->task;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1121
ctx->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
1124
exynos_drm_ipp_task_done(task, -EIO);
drivers/gpu/drm/exynos/exynos_drm_fimc.c
966
if (ctx->task) {
drivers/gpu/drm/exynos/exynos_drm_fimc.c
967
struct exynos_drm_ipp_task *task = ctx->task;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
969
ctx->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_fimc.c
972
exynos_drm_ipp_task_done(task, 0);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
102
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1052
if (ctx->task) {
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1053
struct exynos_drm_ipp_task *task = ctx->task;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1055
ctx->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1058
exynos_drm_ipp_task_done(task, err);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1115
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1126
ctx->task = task;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1131
ctx->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1135
gsc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1136
gsc_src_set_transf(ctx, task->transform.rotation);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1137
gsc_src_set_size(ctx, &task->src);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1138
gsc_src_set_addr(ctx, 0, &task->src);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1139
gsc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1140
gsc_dst_set_size(ctx, &task->dst);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1141
gsc_dst_set_addr(ctx, 0, &task->dst);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1142
gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1149
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1155
if (ctx->task) {
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1156
struct exynos_drm_ipp_task *task = ctx->task;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1158
ctx->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_gsc.c
1161
exynos_drm_ipp_task_done(task, -EIO);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
261
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
263
task = kzalloc_obj(*task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
264
if (!task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
267
task->dev = ipp->dev;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
268
task->ipp = ipp;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
271
task->src.rect.w = task->dst.rect.w = UINT_MAX;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
272
task->src.rect.h = task->dst.rect.h = UINT_MAX;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
273
task->transform.rotation = DRM_MODE_ROTATE_0;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
275
DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %p\n", task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
277
return task;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
314
static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
334
if (copy_from_user((void *)task + map[i].offset, params,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
342
DRM_DEV_DEBUG_DRIVER(task->dev,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
344
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
396
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
398
DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %p\n", task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
400
exynos_drm_ipp_task_release_buf(&task->src);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
401
exynos_drm_ipp_task_release_buf(&task->dst);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
402
if (task->event)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
403
drm_event_cancel_free(ipp->drm_dev, &task->event->base);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
404
kfree(task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
549
static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
558
fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
562
DRM_DEV_DEBUG_DRIVER(task->dev,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
564
task, buf == src ? "src" : "dst");
drivers/gpu/drm/exynos/exynos_drm_ipp.c
603
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
605
struct exynos_drm_ipp *ipp = task->ipp;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
606
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
607
unsigned int rotation = task->transform.rotation;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
613
DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %p\n", task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
628
DRM_DEV_DEBUG_DRIVER(task->dev,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
630
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
646
DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: hw capabilities exceeded\n",
drivers/gpu/drm/exynos/exynos_drm_ipp.c
647
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
651
ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
655
ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
660
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
665
static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
668
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
671
DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %p\n",
drivers/gpu/drm/exynos/exynos_drm_ipp.c
672
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
676
DRM_DEV_DEBUG_DRIVER(task->dev,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
678
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
683
DRM_DEV_DEBUG_DRIVER(task->dev,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
685
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
689
DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: buffers prepared.\n",
drivers/gpu/drm/exynos/exynos_drm_ipp.c
690
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
696
static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
710
ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
715
task->event = e;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
722
static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
727
task->event->event.tv_sec = now.tv_sec;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
728
task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
729
task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
731
drm_send_event(task->ipp->drm_dev, &task->event->base);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
734
static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
736
int ret = task->ret;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
738
if (ret == 0 && task->event) {
drivers/gpu/drm/exynos/exynos_drm_ipp.c
739
exynos_drm_ipp_event_send(task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
741
task->event = NULL;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
744
exynos_drm_ipp_task_free(task->ipp, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
750
struct exynos_drm_ipp_task *task = container_of(work,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
753
exynos_drm_ipp_task_cleanup(task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
763
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
765
struct exynos_drm_ipp *ipp = task->ipp;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
768
DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %p done: %d\n",
drivers/gpu/drm/exynos/exynos_drm_ipp.c
769
ipp->id, task, ret);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
772
if (ipp->task == task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
773
ipp->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
774
task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
775
task->ret = ret;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
781
if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
drivers/gpu/drm/exynos/exynos_drm_ipp.c
782
INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
783
schedule_work(&task->cleanup_work);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
789
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
798
if (ipp->task || list_empty(&ipp->todo_list)) {
drivers/gpu/drm/exynos/exynos_drm_ipp.c
803
task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
805
list_del_init(&task->head);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
806
ipp->task = task;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
812
task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
814
ret = ipp->funcs->commit(ipp, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
816
exynos_drm_ipp_task_done(task, ret);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
820
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
825
list_add(&task->head, &ipp->todo_list);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
832
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
837
if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
drivers/gpu/drm/exynos/exynos_drm_ipp.c
839
exynos_drm_ipp_task_cleanup(task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
840
} else if (ipp->task != task) {
drivers/gpu/drm/exynos/exynos_drm_ipp.c
842
list_del_init(&task->head);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
843
exynos_drm_ipp_task_cleanup(task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
849
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
852
ipp->funcs->abort(ipp, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
87
WARN_ON(ipp->task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
877
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
892
task = exynos_drm_ipp_task_alloc(ipp);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
893
if (!task)
drivers/gpu/drm/exynos/exynos_drm_ipp.c
896
ret = exynos_drm_ipp_task_set(task, arg);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
900
ret = exynos_drm_ipp_task_check(task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
904
ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
909
ret = exynos_drm_ipp_event_create(task, file_priv,
drivers/gpu/drm/exynos/exynos_drm_ipp.c
922
ipp->id, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
924
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
drivers/gpu/drm/exynos/exynos_drm_ipp.c
925
exynos_drm_ipp_schedule_task(task->ipp, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
929
ipp->id, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
930
exynos_drm_ipp_schedule_task(ipp, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
932
task->flags & DRM_EXYNOS_IPP_TASK_DONE);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
934
exynos_drm_ipp_task_abort(ipp, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
936
ret = exynos_drm_ipp_task_cleanup(task);
drivers/gpu/drm/exynos/exynos_drm_ipp.c
940
exynos_drm_ipp_task_free(ipp, task);
drivers/gpu/drm/exynos/exynos_drm_ipp.h
134
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret);
drivers/gpu/drm/exynos/exynos_drm_ipp.h
30
struct exynos_drm_ipp_task *task);
drivers/gpu/drm/exynos/exynos_drm_ipp.h
44
struct exynos_drm_ipp_task *task);
drivers/gpu/drm/exynos/exynos_drm_ipp.h
64
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/exynos/exynos_drm_rotator.c
106
if (rot->task) {
drivers/gpu/drm/exynos/exynos_drm_rotator.c
107
struct exynos_drm_ipp_task *task = rot->task;
drivers/gpu/drm/exynos/exynos_drm_rotator.c
109
rot->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_rotator.c
112
exynos_drm_ipp_task_done(task,
drivers/gpu/drm/exynos/exynos_drm_rotator.c
218
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_rotator.c
229
rot->task = task;
drivers/gpu/drm/exynos/exynos_drm_rotator.c
231
rotator_src_set_fmt(rot, task->src.buf.fourcc);
drivers/gpu/drm/exynos/exynos_drm_rotator.c
232
rotator_src_set_buf(rot, &task->src);
drivers/gpu/drm/exynos/exynos_drm_rotator.c
233
rotator_dst_set_transf(rot, task->transform.rotation);
drivers/gpu/drm/exynos/exynos_drm_rotator.c
234
rotator_dst_set_buf(rot, &task->dst);
drivers/gpu/drm/exynos/exynos_drm_rotator.c
65
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/exynos/exynos_drm_scaler.c
358
struct exynos_drm_ipp_task *task)
drivers/gpu/drm/exynos/exynos_drm_scaler.c
363
struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
drivers/gpu/drm/exynos/exynos_drm_scaler.c
364
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
drivers/gpu/drm/exynos/exynos_drm_scaler.c
368
src_fmt = scaler_get_format(task->src.buf.fourcc);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
369
dst_fmt = scaler_get_format(task->dst.buf.fourcc);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
378
scaler->task = task;
drivers/gpu/drm/exynos/exynos_drm_scaler.c
381
scaler, src_fmt->internal_fmt, task->src.buf.modifier != 0);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
382
scaler_set_src_base(scaler, &task->src);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
383
scaler_set_src_span(scaler, &task->src);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
388
scaler_set_dst_base(scaler, &task->dst);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
389
scaler_set_dst_span(scaler, &task->dst);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
393
scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
394
scaler_set_rotation(scaler, task->transform.rotation);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
396
scaler_set_csc(scaler, task->src.format);
drivers/gpu/drm/exynos/exynos_drm_scaler.c
437
if (scaler->task) {
drivers/gpu/drm/exynos/exynos_drm_scaler.c
438
struct exynos_drm_ipp_task *task = scaler->task;
drivers/gpu/drm/exynos/exynos_drm_scaler.c
440
scaler->task = NULL;
drivers/gpu/drm/exynos/exynos_drm_scaler.c
443
exynos_drm_ipp_task_done(task, scaler_task_done(val));
drivers/gpu/drm/exynos/exynos_drm_scaler.c
47
struct exynos_drm_ipp_task *task;
drivers/gpu/drm/i915/i915_gpu_error.c
1454
struct task_struct *task;
drivers/gpu/drm/i915/i915_gpu_error.c
1466
task = pid_task(ctx->pid, PIDTYPE_PID);
drivers/gpu/drm/i915/i915_gpu_error.c
1467
if (task) {
drivers/gpu/drm/i915/i915_gpu_error.c
1468
strscpy(e->comm, task->comm);
drivers/gpu/drm/i915/i915_gpu_error.c
1469
e->pid = task->pid;
drivers/gpu/drm/lima/lima_drv.c
109
struct lima_sched_task *task;
drivers/gpu/drm/lima/lima_drv.c
135
task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
drivers/gpu/drm/lima/lima_drv.c
136
if (!task) {
drivers/gpu/drm/lima/lima_drv.c
141
task->frame = task + 1;
drivers/gpu/drm/lima/lima_drv.c
142
if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
drivers/gpu/drm/lima/lima_drv.c
147
err = pipe->task_validate(pipe, task);
drivers/gpu/drm/lima/lima_drv.c
161
submit.task = task;
drivers/gpu/drm/lima/lima_drv.c
173
kmem_cache_free(pipe->task_slab, task);
drivers/gpu/drm/lima/lima_drv.h
40
struct lima_sched_task *task;
drivers/gpu/drm/lima/lima_gem.c
262
static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
drivers/gpu/drm/lima/lima_gem.c
275
return drm_sched_job_add_implicit_dependencies(&task->base,
drivers/gpu/drm/lima/lima_gem.c
288
err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file,
drivers/gpu/drm/lima/lima_gem.c
343
submit->task, submit->ctx->context + submit->pipe,
drivers/gpu/drm/lima/lima_gem.c
354
submit->task, bos[i],
drivers/gpu/drm/lima/lima_gem.c
361
fence = lima_sched_context_queue_task(submit->task);
drivers/gpu/drm/lima/lima_gem.c
385
lima_sched_task_fini(submit->task);
drivers/gpu/drm/lima/lima_gp.c
105
struct lima_sched_task *task)
drivers/gpu/drm/lima/lima_gp.c
107
struct drm_lima_gp_frame *frame = task->frame;
drivers/gpu/drm/lima/lima_gp.c
129
struct lima_sched_task *task)
drivers/gpu/drm/lima/lima_gp.c
132
struct drm_lima_gp_frame *frame = task->frame;
drivers/gpu/drm/lima/lima_gp.c
138
for (i = 0; i < task->num_bos; i++) {
drivers/gpu/drm/lima/lima_gp.c
139
struct lima_bo *bo = task->bos[i];
drivers/gpu/drm/lima/lima_gp.c
142
lima_vm_get_va(task->vm, bo) ==
drivers/gpu/drm/lima/lima_gp.c
147
task->recoverable = true;
drivers/gpu/drm/lima/lima_gp.c
148
task->heap = bo;
drivers/gpu/drm/lima/lima_gp.c
246
struct lima_sched_task *task = pipe->current_task;
drivers/gpu/drm/lima/lima_gp.c
247
struct drm_lima_gp_frame *frame = task->frame;
drivers/gpu/drm/lima/lima_gp.c
25
struct lima_sched_task *task = pipe->current_task;
drivers/gpu/drm/lima/lima_gp.c
253
if (fail_size == task->heap->heap_size) {
drivers/gpu/drm/lima/lima_gp.c
256
ret = lima_heap_alloc(task->heap, task->vm);
drivers/gpu/drm/lima/lima_gp.c
266
f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size;
drivers/gpu/drm/lima/lima_gp.c
42
if (task)
drivers/gpu/drm/lima/lima_gp.c
43
task->recoverable = false;
drivers/gpu/drm/lima/lima_pp.c
330
struct lima_sched_task *task)
drivers/gpu/drm/lima/lima_pp.c
335
struct drm_lima_m450_pp_frame *f = task->frame;
drivers/gpu/drm/lima/lima_pp.c
342
struct drm_lima_m400_pp_frame *f = task->frame;
drivers/gpu/drm/lima/lima_pp.c
354
struct lima_sched_task *task)
drivers/gpu/drm/lima/lima_pp.c
357
struct drm_lima_m450_pp_frame *frame = task->frame;
drivers/gpu/drm/lima/lima_pp.c
363
atomic_set(&pipe->task, frame->num_pp);
drivers/gpu/drm/lima/lima_pp.c
389
struct drm_lima_m400_pp_frame *frame = task->frame;
drivers/gpu/drm/lima/lima_pp.c
392
atomic_set(&pipe->task, frame->num_pp);
drivers/gpu/drm/lima/lima_pp.c
443
if (atomic_dec_and_test(&pipe->task))
drivers/gpu/drm/lima/lima_pp.c
54
if (atomic_dec_and_test(&pipe->task))
drivers/gpu/drm/lima/lima_pp.c
97
if (atomic_dec_and_test(&pipe->task))
drivers/gpu/drm/lima/lima_sched.c
115
int lima_sched_task_init(struct lima_sched_task *task,
drivers/gpu/drm/lima/lima_sched.c
123
task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
drivers/gpu/drm/lima/lima_sched.c
124
if (!task->bos)
drivers/gpu/drm/lima/lima_sched.c
130
err = drm_sched_job_init(&task->base, &context->base, 1, vm,
drivers/gpu/drm/lima/lima_sched.c
133
kfree(task->bos);
drivers/gpu/drm/lima/lima_sched.c
137
drm_sched_job_arm(&task->base);
drivers/gpu/drm/lima/lima_sched.c
139
task->num_bos = num_bos;
drivers/gpu/drm/lima/lima_sched.c
140
task->vm = lima_vm_get(vm);
drivers/gpu/drm/lima/lima_sched.c
145
void lima_sched_task_fini(struct lima_sched_task *task)
drivers/gpu/drm/lima/lima_sched.c
149
drm_sched_job_cleanup(&task->base);
drivers/gpu/drm/lima/lima_sched.c
151
if (task->bos) {
drivers/gpu/drm/lima/lima_sched.c
152
for (i = 0; i < task->num_bos; i++)
drivers/gpu/drm/lima/lima_sched.c
153
drm_gem_object_put(&task->bos[i]->base.base);
drivers/gpu/drm/lima/lima_sched.c
154
kfree(task->bos);
drivers/gpu/drm/lima/lima_sched.c
157
lima_vm_put(task->vm);
drivers/gpu/drm/lima/lima_sched.c
175
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
drivers/gpu/drm/lima/lima_sched.c
177
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
drivers/gpu/drm/lima/lima_sched.c
179
trace_lima_task_submit(task);
drivers/gpu/drm/lima/lima_sched.c
180
drm_sched_entity_push_job(&task->base);
drivers/gpu/drm/lima/lima_sched.c
208
struct lima_sched_task *task = to_lima_task(job);
drivers/gpu/drm/lima/lima_sched.c
228
task->fence = &fence->base;
drivers/gpu/drm/lima/lima_sched.c
233
dma_fence_get(task->fence);
drivers/gpu/drm/lima/lima_sched.c
235
pipe->current_task = task;
drivers/gpu/drm/lima/lima_sched.c
256
pipe->current_vm = lima_vm_get(task->vm);
drivers/gpu/drm/lima/lima_sched.c
265
trace_lima_task_run(task);
drivers/gpu/drm/lima/lima_sched.c
268
pipe->task_run(pipe, task);
drivers/gpu/drm/lima/lima_sched.c
270
return task->fence;
drivers/gpu/drm/lima/lima_sched.c
273
static void lima_sched_build_error_task_list(struct lima_sched_task *task)
drivers/gpu/drm/lima/lima_sched.c
276
struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
drivers/gpu/drm/lima/lima_sched.c
281
container_of(task->base.entity,
drivers/gpu/drm/lima/lima_sched.c
309
for (i = 0; i < task->num_bos; i++) {
drivers/gpu/drm/lima/lima_sched.c
310
struct lima_bo *bo = task->bos[i];
drivers/gpu/drm/lima/lima_sched.c
337
memcpy(chunk + 1, task->frame, pipe->frame_size);
drivers/gpu/drm/lima/lima_sched.c
354
for (i = 0; i < task->num_bos; i++) {
drivers/gpu/drm/lima/lima_sched.c
355
struct lima_bo *bo = task->bos[i];
drivers/gpu/drm/lima/lima_sched.c
360
buffer_chunk->va = lima_vm_get_va(task->vm, bo);
drivers/gpu/drm/lima/lima_sched.c
406
struct lima_sched_task *task = to_lima_task(job);
drivers/gpu/drm/lima/lima_sched.c
415
if (dma_fence_is_signaled(task->fence)) {
drivers/gpu/drm/lima/lima_sched.c
432
if (dma_fence_is_signaled(task->fence)) {
drivers/gpu/drm/lima/lima_sched.c
447
drm_sched_stop(&pipe->base, &task->base);
drivers/gpu/drm/lima/lima_sched.c
449
drm_sched_increase_karma(&task->base);
drivers/gpu/drm/lima/lima_sched.c
452
lima_sched_build_error_task_list(task);
drivers/gpu/drm/lima/lima_sched.c
477
struct lima_sched_task *task = to_lima_task(job);
drivers/gpu/drm/lima/lima_sched.c
479
struct lima_vm *vm = task->vm;
drivers/gpu/drm/lima/lima_sched.c
480
struct lima_bo **bos = task->bos;
drivers/gpu/drm/lima/lima_sched.c
483
dma_fence_put(task->fence);
drivers/gpu/drm/lima/lima_sched.c
485
for (i = 0; i < task->num_bos; i++)
drivers/gpu/drm/lima/lima_sched.c
488
lima_sched_task_fini(task);
drivers/gpu/drm/lima/lima_sched.c
489
kmem_cache_free(pipe->task_slab, task);
drivers/gpu/drm/lima/lima_sched.c
547
struct lima_sched_task *task = pipe->current_task;
drivers/gpu/drm/lima/lima_sched.c
551
if (task && task->recoverable)
drivers/gpu/drm/lima/lima_sched.c
557
dma_fence_signal(task->fence);
drivers/gpu/drm/lima/lima_sched.h
72
atomic_t task;
drivers/gpu/drm/lima/lima_sched.h
77
int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
drivers/gpu/drm/lima/lima_sched.h
78
void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
drivers/gpu/drm/lima/lima_sched.h
88
int lima_sched_task_init(struct lima_sched_task *task,
drivers/gpu/drm/lima/lima_sched.h
93
void lima_sched_task_fini(struct lima_sched_task *task);
drivers/gpu/drm/lima/lima_sched.h
99
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
drivers/gpu/drm/lima/lima_trace.h
14
TP_PROTO(struct lima_sched_task *task),
drivers/gpu/drm/lima/lima_trace.h
15
TP_ARGS(task),
drivers/gpu/drm/lima/lima_trace.h
19
__string(pipe, task->base.sched->name)
drivers/gpu/drm/lima/lima_trace.h
23
__entry->context = task->base.s_fence->finished.context;
drivers/gpu/drm/lima/lima_trace.h
24
__entry->seqno = task->base.s_fence->finished.seqno;
drivers/gpu/drm/lima/lima_trace.h
34
TP_PROTO(struct lima_sched_task *task),
drivers/gpu/drm/lima/lima_trace.h
35
TP_ARGS(task)
drivers/gpu/drm/lima/lima_trace.h
39
TP_PROTO(struct lima_sched_task *task),
drivers/gpu/drm/lima/lima_trace.h
40
TP_ARGS(task)
drivers/gpu/drm/msm/msm_atomic.c
124
sched_set_fifo(timer->worker->task);
drivers/gpu/drm/msm/msm_gem.c
989
struct task_struct *task =
drivers/gpu/drm/msm/msm_gem.c
991
if (task) {
drivers/gpu/drm/msm/msm_gem.c
992
comm = kstrdup(task->comm, GFP_KERNEL);
drivers/gpu/drm/msm/msm_gem.c
993
put_task_struct(task);
drivers/gpu/drm/msm/msm_gpu.c
442
struct task_struct *task;
drivers/gpu/drm/msm/msm_gpu.c
450
task = get_pid_task(submit->pid, PIDTYPE_PID);
drivers/gpu/drm/msm/msm_gpu.c
451
if (!task)
drivers/gpu/drm/msm/msm_gpu.c
455
*comm = kstrdup(task->comm, GFP_KERNEL);
drivers/gpu/drm/msm/msm_gpu.c
458
*cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
drivers/gpu/drm/msm/msm_gpu.c
460
put_task_struct(task);
drivers/gpu/drm/msm/msm_gpu.c
471
struct task_struct *task;
drivers/gpu/drm/msm/msm_gpu.c
490
task = get_pid_task(submit->pid, PIDTYPE_PID);
drivers/gpu/drm/msm/msm_gpu.c
491
if (!task)
drivers/gpu/drm/msm/msm_gpu.c
952
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
drivers/gpu/drm/msm/msm_gpu.c
967
to_msm_vm(vm)->pid = get_pid(task_pid(task));
drivers/gpu/drm/msm/msm_gpu.c
999
sched_set_fifo_low(gpu->worker->task);
drivers/gpu/drm/msm/msm_gpu.h
705
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
drivers/gpu/drm/msm/msm_kms.c
317
sched_set_fifo(ev_thread->worker->task);
drivers/gpu/drm/msm/msm_rd.c
342
struct task_struct *task;
drivers/gpu/drm/msm/msm_rd.c
362
task = pid_task(submit->pid, PIDTYPE_PID);
drivers/gpu/drm/msm/msm_rd.c
363
if (task) {
drivers/gpu/drm/msm/msm_rd.c
365
TASK_COMM_LEN, task->comm,
drivers/gpu/drm/panfrost/panfrost_drv.c
873
struct task_struct *task;
drivers/gpu/drm/panfrost/panfrost_drv.c
885
task = pid_task(pid, PIDTYPE_TGID);
drivers/gpu/drm/panfrost/panfrost_drv.c
888
task ? task->comm : "<unknown>");
drivers/gpu/drm/panthor/panthor_sched.c
3603
struct task_struct *task = current->group_leader;
drivers/gpu/drm/panthor/panthor_sched.c
3605
group->task_info.pid = task->pid;
drivers/gpu/drm/panthor/panthor_sched.c
3606
get_task_comm(group->task_info.comm, task);
drivers/gpu/drm/radeon/radeon_fence.c
1000
wake_up_process(wait->task);
drivers/gpu/drm/radeon/radeon_fence.c
1010
cb.task = current;
drivers/gpu/drm/radeon/radeon_fence.c
991
struct task_struct *task;
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
179
struct task_struct *task = s_timer->ctx->task;
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
181
do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
214
struct task_struct *task;
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
219
task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
221
if (IS_ERR(task))
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
227
wake_up_process(task);
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
229
err = kthread_stop(task);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
598
struct task_struct *task;
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
617
task = kthread_create(threaded_dma_resv_signal, bo, "dma-resv-signal");
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
618
if (IS_ERR(task))
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
621
wake_up_process(task);
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
711
struct task_struct *task;
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
738
task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal");
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
739
if (IS_ERR(task))
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
742
wake_up_process(task);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
306
struct task_struct *task;
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
319
task = pid_task(pid, PIDTYPE_TGID);
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
321
task ? task->comm : "<unknown>");
drivers/gpu/drm/xe/xe_device.c
112
task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
drivers/gpu/drm/xe/xe_device.c
113
if (task) {
drivers/gpu/drm/xe/xe_device.c
114
xef->process_name = kstrdup(task->comm, GFP_KERNEL);
drivers/gpu/drm/xe/xe_device.c
115
xef->pid = task->pid;
drivers/gpu/drm/xe/xe_device.c
116
put_task_struct(task);
drivers/gpu/drm/xe/xe_device.c
87
struct task_struct *task = NULL;
drivers/gpu/drm/xe/xe_pm.c
511
struct task_struct *task)
drivers/gpu/drm/xe/xe_pm.c
513
WRITE_ONCE(xe->pm_callback_task, task);
drivers/gpu/ipu-v3/ipu-ic.c
139
enum ipu_ic_task task;
drivers/gpu/ipu-v3/ipu-ic.c
161
struct ipu_ic task[IC_NUM_TASKS];
drivers/gpu/ipu-v3/ipu-ic.c
586
struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task)
drivers/gpu/ipu-v3/ipu-ic.c
592
if (task >= IC_NUM_TASKS)
drivers/gpu/ipu-v3/ipu-ic.c
595
ic = &priv->task[task];
drivers/gpu/ipu-v3/ipu-ic.c
649
priv->task[i].task = i;
drivers/gpu/ipu-v3/ipu-ic.c
650
priv->task[i].priv = priv;
drivers/gpu/ipu-v3/ipu-ic.c
651
priv->task[i].reg = &ic_task_reg[i];
drivers/gpu/ipu-v3/ipu-ic.c
652
priv->task[i].bit = &ic_task_bit[i];
drivers/iio/adc/ina2xx-adc.c
140
struct task_struct *task;
drivers/iio/adc/ina2xx-adc.c
837
struct task_struct *task;
drivers/iio/adc/ina2xx-adc.c
847
task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
drivers/iio/adc/ina2xx-adc.c
851
if (IS_ERR(task))
drivers/iio/adc/ina2xx-adc.c
852
return PTR_ERR(task);
drivers/iio/adc/ina2xx-adc.c
854
chip->task = task;
drivers/iio/adc/ina2xx-adc.c
863
if (chip->task) {
drivers/iio/adc/ina2xx-adc.c
864
kthread_stop(chip->task);
drivers/iio/adc/ina2xx-adc.c
865
chip->task = NULL;
drivers/iio/light/rohm-bu27034.c
1206
struct task_struct *task;
drivers/iio/light/rohm-bu27034.c
1214
task = kthread_run(bu27034_buffer_thread, idev,
drivers/iio/light/rohm-bu27034.c
1217
if (IS_ERR(task))
drivers/iio/light/rohm-bu27034.c
1218
return PTR_ERR(task);
drivers/iio/light/rohm-bu27034.c
1220
data->task = task;
drivers/iio/light/rohm-bu27034.c
1230
if (data->task) {
drivers/iio/light/rohm-bu27034.c
1231
kthread_stop(data->task);
drivers/iio/light/rohm-bu27034.c
1232
data->task = NULL;
drivers/iio/light/rohm-bu27034.c
204
struct task_struct *task;
drivers/iio/trigger/iio-trig-loop.c
35
struct task_struct *task;
drivers/iio/trigger/iio-trig-loop.c
60
loop_trig->task = kthread_run(iio_loop_thread,
drivers/iio/trigger/iio-trig-loop.c
62
if (IS_ERR(loop_trig->task)) {
drivers/iio/trigger/iio-trig-loop.c
65
return PTR_ERR(loop_trig->task);
drivers/iio/trigger/iio-trig-loop.c
68
kthread_stop(loop_trig->task);
drivers/infiniband/core/counters.c
244
match &= (task_pid_nr(counter->res.task) ==
drivers/infiniband/core/counters.c
245
task_pid_nr(qp->res.task));
drivers/infiniband/core/nldev.c
478
pid = task_pid_vnr(res->task);
drivers/infiniband/core/restrack.c
118
struct task_struct *task)
drivers/infiniband/core/restrack.c
120
if (WARN_ON_ONCE(!task))
drivers/infiniband/core/restrack.c
123
if (res->task)
drivers/infiniband/core/restrack.c
124
put_task_struct(res->task);
drivers/infiniband/core/restrack.c
125
get_task_struct(task);
drivers/infiniband/core/restrack.c
126
res->task = task;
drivers/infiniband/core/restrack.c
158
rdma_restrack_attach_task(dst, parent->task);
drivers/infiniband/core/restrack.c
266
if (res->task) {
drivers/infiniband/core/restrack.c
267
put_task_struct(res->task);
drivers/infiniband/core/restrack.c
268
res->task = NULL;
drivers/infiniband/core/restrack.c
290
if (res->task) {
drivers/infiniband/core/restrack.c
291
put_task_struct(res->task);
drivers/infiniband/core/restrack.c
292
res->task = NULL;
drivers/infiniband/sw/rxe/rxe_task.c
103
static void do_task(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
111
WARN_ON(rxe_read(task->qp) <= 0);
drivers/infiniband/sw/rxe/rxe_task.c
113
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
114
if (task->state >= TASK_STATE_DRAINED) {
drivers/infiniband/sw/rxe/rxe_task.c
115
rxe_put(task->qp);
drivers/infiniband/sw/rxe/rxe_task.c
116
task->num_done++;
drivers/infiniband/sw/rxe/rxe_task.c
117
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
120
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
127
ret = task->func(task->qp);
drivers/infiniband/sw/rxe/rxe_task.c
130
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
135
if (task->state != TASK_STATE_DRAINING) {
drivers/infiniband/sw/rxe/rxe_task.c
136
task->state = TASK_STATE_IDLE;
drivers/infiniband/sw/rxe/rxe_task.c
144
switch (task->state) {
drivers/infiniband/sw/rxe/rxe_task.c
146
task->state = TASK_STATE_IDLE;
drivers/infiniband/sw/rxe/rxe_task.c
153
task->state = TASK_STATE_BUSY;
drivers/infiniband/sw/rxe/rxe_task.c
158
task->state = TASK_STATE_DRAINED;
drivers/infiniband/sw/rxe/rxe_task.c
163
rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
drivers/infiniband/sw/rxe/rxe_task.c
164
task->state);
drivers/infiniband/sw/rxe/rxe_task.c
165
task->state = TASK_STATE_IDLE;
drivers/infiniband/sw/rxe/rxe_task.c
170
task->num_done++;
drivers/infiniband/sw/rxe/rxe_task.c
171
if (WARN_ON(task->num_done != task->num_sched))
drivers/infiniband/sw/rxe/rxe_task.c
173
task->qp,
drivers/infiniband/sw/rxe/rxe_task.c
175
task->num_sched, task->num_done);
drivers/infiniband/sw/rxe/rxe_task.c
177
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
180
task->ret = ret;
drivers/infiniband/sw/rxe/rxe_task.c
183
rxe_sched_task(task);
drivers/infiniband/sw/rxe/rxe_task.c
185
rxe_put(task->qp);
drivers/infiniband/sw/rxe/rxe_task.c
194
int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
drivers/infiniband/sw/rxe/rxe_task.c
199
task->qp = qp;
drivers/infiniband/sw/rxe/rxe_task.c
200
task->func = func;
drivers/infiniband/sw/rxe/rxe_task.c
201
task->state = TASK_STATE_IDLE;
drivers/infiniband/sw/rxe/rxe_task.c
202
spin_lock_init(&task->lock);
drivers/infiniband/sw/rxe/rxe_task.c
203
INIT_WORK(&task->work, do_work);
drivers/infiniband/sw/rxe/rxe_task.c
216
void rxe_cleanup_task(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
220
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
221
if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
drivers/infiniband/sw/rxe/rxe_task.c
222
task->state = TASK_STATE_DRAINING;
drivers/infiniband/sw/rxe/rxe_task.c
224
task->state = TASK_STATE_INVALID;
drivers/infiniband/sw/rxe/rxe_task.c
225
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
228
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
233
while (!is_done(task))
drivers/infiniband/sw/rxe/rxe_task.c
236
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
237
task->state = TASK_STATE_INVALID;
drivers/infiniband/sw/rxe/rxe_task.c
238
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
245
void rxe_sched_task(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
249
WARN_ON(rxe_read(task->qp) <= 0);
drivers/infiniband/sw/rxe/rxe_task.c
251
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
252
if (__reserve_if_idle(task))
drivers/infiniband/sw/rxe/rxe_task.c
253
queue_work(rxe_wq, &task->work);
drivers/infiniband/sw/rxe/rxe_task.c
254
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
261
void rxe_disable_task(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
265
WARN_ON(rxe_read(task->qp) <= 0);
drivers/infiniband/sw/rxe/rxe_task.c
267
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
268
if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
drivers/infiniband/sw/rxe/rxe_task.c
269
task->state = TASK_STATE_DRAINING;
drivers/infiniband/sw/rxe/rxe_task.c
271
task->state = TASK_STATE_DRAINED;
drivers/infiniband/sw/rxe/rxe_task.c
272
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
275
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
277
while (!is_done(task))
drivers/infiniband/sw/rxe/rxe_task.c
280
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
281
task->state = TASK_STATE_DRAINED;
drivers/infiniband/sw/rxe/rxe_task.c
282
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
285
void rxe_enable_task(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
289
WARN_ON(rxe_read(task->qp) <= 0);
drivers/infiniband/sw/rxe/rxe_task.c
291
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
292
if (task->state == TASK_STATE_INVALID) {
drivers/infiniband/sw/rxe/rxe_task.c
293
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
297
task->state = TASK_STATE_IDLE;
drivers/infiniband/sw/rxe/rxe_task.c
298
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
36
static bool __reserve_if_idle(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
38
WARN_ON(rxe_read(task->qp) <= 0);
drivers/infiniband/sw/rxe/rxe_task.c
40
if (task->state == TASK_STATE_IDLE) {
drivers/infiniband/sw/rxe/rxe_task.c
41
rxe_get(task->qp);
drivers/infiniband/sw/rxe/rxe_task.c
42
task->state = TASK_STATE_BUSY;
drivers/infiniband/sw/rxe/rxe_task.c
43
task->num_sched++;
drivers/infiniband/sw/rxe/rxe_task.c
47
if (task->state == TASK_STATE_BUSY)
drivers/infiniband/sw/rxe/rxe_task.c
48
task->state = TASK_STATE_ARMED;
drivers/infiniband/sw/rxe/rxe_task.c
60
static bool __is_done(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
62
if (work_pending(&task->work))
drivers/infiniband/sw/rxe/rxe_task.c
65
if (task->state == TASK_STATE_IDLE ||
drivers/infiniband/sw/rxe/rxe_task.c
66
task->state == TASK_STATE_DRAINED) {
drivers/infiniband/sw/rxe/rxe_task.c
74
static bool is_done(struct rxe_task *task)
drivers/infiniband/sw/rxe/rxe_task.c
79
spin_lock_irqsave(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.c
80
done = __is_done(task);
drivers/infiniband/sw/rxe/rxe_task.c
81
spin_unlock_irqrestore(&task->lock, flags);
drivers/infiniband/sw/rxe/rxe_task.h
44
int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
drivers/infiniband/sw/rxe/rxe_task.h
48
void rxe_cleanup_task(struct rxe_task *task);
drivers/infiniband/sw/rxe/rxe_task.h
50
void rxe_sched_task(struct rxe_task *task);
drivers/infiniband/sw/rxe/rxe_task.h
53
void rxe_disable_task(struct rxe_task *task);
drivers/infiniband/sw/rxe/rxe_task.h
56
void rxe_enable_task(struct rxe_task *task);
drivers/infiniband/ulp/iser/iscsi_iser.c
174
static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
drivers/infiniband/ulp/iser/iscsi_iser.c
176
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.c
178
task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header;
drivers/infiniband/ulp/iser/iscsi_iser.c
179
task->hdr_max = sizeof(iser_task->desc.iscsi_header);
drivers/infiniband/ulp/iser/iscsi_iser.c
195
int iser_initialize_task_headers(struct iscsi_task *task,
drivers/infiniband/ulp/iser/iscsi_iser.c
198
struct iser_conn *iser_conn = task->conn->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.c
200
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.c
233
static int iscsi_iser_task_init(struct iscsi_task *task)
drivers/infiniband/ulp/iser/iscsi_iser.c
235
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.c
238
ret = iser_initialize_task_headers(task, &iser_task->desc);
drivers/infiniband/ulp/iser/iscsi_iser.c
246
if (!task->sc)
drivers/infiniband/ulp/iser/iscsi_iser.c
251
iser_task->sc = task->sc;
drivers/infiniband/ulp/iser/iscsi_iser.c
268
struct iscsi_task *task)
drivers/infiniband/ulp/iser/iscsi_iser.c
270
iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
drivers/infiniband/ulp/iser/iscsi_iser.c
278
return iser_send_control(conn, task);
drivers/infiniband/ulp/iser/iscsi_iser.c
282
struct iscsi_task *task)
drivers/infiniband/ulp/iser/iscsi_iser.c
284
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
drivers/infiniband/ulp/iser/iscsi_iser.c
289
while (iscsi_task_has_unsol_data(task)) {
drivers/infiniband/ulp/iser/iscsi_iser.c
290
iscsi_prep_data_out_pdu(task, r2t, &hdr);
drivers/infiniband/ulp/iser/iscsi_iser.c
296
error = iser_send_data_out(conn, task, &hdr);
drivers/infiniband/ulp/iser/iscsi_iser.c
316
static int iscsi_iser_task_xmit(struct iscsi_task *task)
drivers/infiniband/ulp/iser/iscsi_iser.c
318
struct iscsi_conn *conn = task->conn;
drivers/infiniband/ulp/iser/iscsi_iser.c
319
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.c
322
if (!task->sc)
drivers/infiniband/ulp/iser/iscsi_iser.c
323
return iscsi_iser_mtask_xmit(conn, task);
drivers/infiniband/ulp/iser/iscsi_iser.c
325
if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
drivers/infiniband/ulp/iser/iscsi_iser.c
326
BUG_ON(scsi_bufflen(task->sc) == 0);
drivers/infiniband/ulp/iser/iscsi_iser.c
329
task->itt, scsi_bufflen(task->sc),
drivers/infiniband/ulp/iser/iscsi_iser.c
330
task->imm_count, task->unsol_r2t.data_length);
drivers/infiniband/ulp/iser/iscsi_iser.c
334
conn->id, task->itt);
drivers/infiniband/ulp/iser/iscsi_iser.c
338
error = iser_send_command(conn, task);
drivers/infiniband/ulp/iser/iscsi_iser.c
345
if (iscsi_task_has_unsol_data(task))
drivers/infiniband/ulp/iser/iscsi_iser.c
346
error = iscsi_iser_task_xmit_unsol_data(conn, task);
drivers/infiniband/ulp/iser/iscsi_iser.c
360
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
drivers/infiniband/ulp/iser/iscsi_iser.c
362
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.c
364
struct iser_conn *iser_conn = task->conn->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.c
378
if (!task->sc)
drivers/infiniband/ulp/iser/iscsi_iser.c
399
static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
drivers/infiniband/ulp/iser/iscsi_iser.c
401
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iscsi_iser.h
474
struct iscsi_task *task);
drivers/infiniband/ulp/iser/iscsi_iser.h
477
struct iscsi_task *task);
drivers/infiniband/ulp/iser/iscsi_iser.h
480
struct iscsi_task *task,
drivers/infiniband/ulp/iser/iscsi_iser.h
504
void iser_task_rdma_init(struct iscsi_iser_task *task);
drivers/infiniband/ulp/iser/iscsi_iser.h
506
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
drivers/infiniband/ulp/iser/iscsi_iser.h
510
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
drivers/infiniband/ulp/iser/iscsi_iser.h
513
void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
drivers/infiniband/ulp/iser/iscsi_iser.h
534
int iser_initialize_task_headers(struct iscsi_task *task,
drivers/infiniband/ulp/iser/iser_initiator.c
122
task->itt, mem_reg->rkey,
drivers/infiniband/ulp/iser/iser_initiator.c
128
task->itt, imm_sz);
drivers/infiniband/ulp/iser/iser_initiator.c
341
int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
drivers/infiniband/ulp/iser/iser_initiator.c
344
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iser_initiator.c
348
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
drivers/infiniband/ulp/iser/iser_initiator.c
349
struct scsi_cmnd *sc = task->sc;
drivers/infiniband/ulp/iser/iser_initiator.c
380
err = iser_prepare_read_cmd(task);
drivers/infiniband/ulp/iser/iser_initiator.c
385
err = iser_prepare_write_cmd(task,
drivers/infiniband/ulp/iser/iser_initiator.c
386
task->imm_count,
drivers/infiniband/ulp/iser/iser_initiator.c
387
task->imm_count +
drivers/infiniband/ulp/iser/iser_initiator.c
388
task->unsol_r2t.data_length,
drivers/infiniband/ulp/iser/iser_initiator.c
401
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
drivers/infiniband/ulp/iser/iser_initiator.c
411
int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
drivers/infiniband/ulp/iser/iser_initiator.c
415
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iser_initiator.c
441
err = iser_initialize_task_headers(task, tx_desc);
drivers/infiniband/ulp/iser/iser_initiator.c
472
int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
drivers/infiniband/ulp/iser/iser_initiator.c
475
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iser_initiator.c
48
static int iser_prepare_read_cmd(struct iscsi_task *task)
drivers/infiniband/ulp/iser/iser_initiator.c
487
data_seg_len = ntoh24(task->hdr->dlength);
drivers/infiniband/ulp/iser/iser_initiator.c
493
if (task != conn->login_task) {
drivers/infiniband/ulp/iser/iser_initiator.c
499
task->data_count, DMA_TO_DEVICE);
drivers/infiniband/ulp/iser/iser_initiator.c
501
memcpy(desc->req, task->data, task->data_count);
drivers/infiniband/ulp/iser/iser_initiator.c
504
task->data_count, DMA_TO_DEVICE);
drivers/infiniband/ulp/iser/iser_initiator.c
507
tx_dsg->length = task->data_count;
drivers/infiniband/ulp/iser/iser_initiator.c
51
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iser_initiator.c
512
if (task == conn->login_task) {
drivers/infiniband/ulp/iser/iser_initiator.c
514
task->hdr->opcode, data_seg_len);
drivers/infiniband/ulp/iser/iser_initiator.c
518
err = iser_post_rx_bufs(conn, task->hdr);
drivers/infiniband/ulp/iser/iser_initiator.c
595
struct iscsi_task *task;
drivers/infiniband/ulp/iser/iser_initiator.c
607
task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
drivers/infiniband/ulp/iser/iser_initiator.c
608
if (likely(task)) {
drivers/infiniband/ulp/iser/iser_initiator.c
609
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iser_initiator.c
682
struct iscsi_task *task;
drivers/infiniband/ulp/iser/iser_initiator.c
690
task = (void *)desc - sizeof(struct iscsi_task);
drivers/infiniband/ulp/iser/iser_initiator.c
691
if (task->hdr->itt == RESERVED_ITT)
drivers/infiniband/ulp/iser/iser_initiator.c
692
iscsi_put_task(task);
drivers/infiniband/ulp/iser/iser_initiator.c
74
task->itt, mem_reg->rkey,
drivers/infiniband/ulp/iser/iser_initiator.c
89
static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
drivers/infiniband/ulp/iser/iser_initiator.c
92
struct iscsi_iser_task *iser_task = task->dd_data;
drivers/infiniband/ulp/iser/iser_memory.c
352
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
drivers/infiniband/ulp/iser/iser_memory.c
356
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
drivers/infiniband/ulp/iser/iser_memory.c
358
struct iser_data_buf *mem = &task->data[dir];
drivers/infiniband/ulp/iser/iser_memory.c
359
struct iser_mem_reg *reg = &task->rdma_reg[dir];
drivers/infiniband/ulp/iser/iser_memory.c
365
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
drivers/infiniband/ulp/iser/iser_memory.c
370
if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
drivers/infiniband/ulp/iser/iser_memory.c
371
err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
drivers/infiniband/ulp/iser/iser_memory.c
375
err = iser_reg_sig_mr(task, mem, &task->prot[dir],
drivers/input/serio/hp_sdc.c
216
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
592
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
620
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
682
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
703
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
725
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
746
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
767
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
788
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
797
tasklet_schedule(&hp_sdc.task);
drivers/input/serio/hp_sdc.c
894
tasklet_init(&hp_sdc.task, hp_sdc_tasklet, 0);
drivers/input/serio/hp_sdc.c
986
tasklet_kill(&hp_sdc.task);
drivers/mailbox/mtk-cmdq-mailbox.c
255
static void cmdq_task_insert_into_thread(struct cmdq_task *task)
drivers/mailbox/mtk-cmdq-mailbox.c
257
struct device *dev = task->cmdq->mbox.dev;
drivers/mailbox/mtk-cmdq-mailbox.c
258
struct cmdq_thread *thread = task->thread;
drivers/mailbox/mtk-cmdq-mailbox.c
260
&thread->task_busy_list, typeof(*task), list_entry);
drivers/mailbox/mtk-cmdq-mailbox.c
262
u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata);
drivers/mailbox/mtk-cmdq-mailbox.c
279
static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
drivers/mailbox/mtk-cmdq-mailbox.c
284
data.pkt = task->pkt;
drivers/mailbox/mtk-cmdq-mailbox.c
285
mbox_chan_received_data(task->thread->chan, &data);
drivers/mailbox/mtk-cmdq-mailbox.c
287
list_del(&task->list_entry);
drivers/mailbox/mtk-cmdq-mailbox.c
290
static void cmdq_task_handle_error(struct cmdq_task *task)
drivers/mailbox/mtk-cmdq-mailbox.c
292
struct cmdq_thread *thread = task->thread;
drivers/mailbox/mtk-cmdq-mailbox.c
294
struct cmdq *cmdq = task->cmdq;
drivers/mailbox/mtk-cmdq-mailbox.c
296
dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
drivers/mailbox/mtk-cmdq-mailbox.c
309
struct cmdq_task *task, *tmp, *curr_task = NULL;
drivers/mailbox/mtk-cmdq-mailbox.c
336
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
drivers/mailbox/mtk-cmdq-mailbox.c
338
task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
drivers/mailbox/mtk-cmdq-mailbox.c
339
if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
drivers/mailbox/mtk-cmdq-mailbox.c
340
curr_task = task;
drivers/mailbox/mtk-cmdq-mailbox.c
343
cmdq_task_exec_done(task, 0);
drivers/mailbox/mtk-cmdq-mailbox.c
344
kfree(task);
drivers/mailbox/mtk-cmdq-mailbox.c
346
cmdq_task_exec_done(task, -ENOEXEC);
drivers/mailbox/mtk-cmdq-mailbox.c
348
kfree(task);
drivers/mailbox/mtk-cmdq-mailbox.c
452
struct cmdq_task *task;
drivers/mailbox/mtk-cmdq-mailbox.c
459
task = kzalloc_obj(*task, GFP_ATOMIC);
drivers/mailbox/mtk-cmdq-mailbox.c
460
if (!task)
drivers/mailbox/mtk-cmdq-mailbox.c
463
task->cmdq = cmdq;
drivers/mailbox/mtk-cmdq-mailbox.c
464
INIT_LIST_HEAD(&task->list_entry);
drivers/mailbox/mtk-cmdq-mailbox.c
465
task->pa_base = pkt->pa_base;
drivers/mailbox/mtk-cmdq-mailbox.c
466
task->thread = thread;
drivers/mailbox/mtk-cmdq-mailbox.c
467
task->pkt = pkt;
drivers/mailbox/mtk-cmdq-mailbox.c
478
gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata);
drivers/mailbox/mtk-cmdq-mailbox.c
480
gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata);
drivers/mailbox/mtk-cmdq-mailbox.c
496
writel(task->pa_base >> cmdq->pdata->shift,
drivers/mailbox/mtk-cmdq-mailbox.c
499
cmdq_task_insert_into_thread(task);
drivers/mailbox/mtk-cmdq-mailbox.c
502
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
drivers/mailbox/mtk-cmdq-mailbox.c
506
list_move_tail(&task->list_entry, &thread->task_busy_list);
drivers/mailbox/mtk-cmdq-mailbox.c
520
struct cmdq_task *task, *tmp;
drivers/mailbox/mtk-cmdq-mailbox.c
536
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
drivers/mailbox/mtk-cmdq-mailbox.c
538
cmdq_task_exec_done(task, -ECONNABORTED);
drivers/mailbox/mtk-cmdq-mailbox.c
539
kfree(task);
drivers/mailbox/mtk-cmdq-mailbox.c
562
struct cmdq_task *task, *tmp;
drivers/mailbox/mtk-cmdq-mailbox.c
579
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
drivers/mailbox/mtk-cmdq-mailbox.c
582
data.pkt = task->pkt;
drivers/mailbox/mtk-cmdq-mailbox.c
583
mbox_chan_received_data(task->thread->chan, &data);
drivers/mailbox/mtk-cmdq-mailbox.c
584
list_del(&task->list_entry);
drivers/mailbox/mtk-cmdq-mailbox.c
585
kfree(task);
drivers/md/bcache/request.c
368
struct task_struct *task = current;
drivers/md/bcache/request.c
437
add_sequential(task);
drivers/md/bcache/request.c
445
task->sequential_io = i->sequential;
drivers/md/bcache/request.c
453
sectors = max(task->sequential_io,
drivers/md/bcache/request.c
454
task->sequential_io_avg) >> 9;
drivers/md/dm-integrity.c
1257
last_range_task = last_range->task;
drivers/md/dm-integrity.c
1260
last_range->task = last_range_task;
drivers/md/dm-integrity.c
1282
new_range->task = current;
drivers/md/dm-integrity.c
307
struct task_struct *task;
drivers/md/dm-vdo/indexer/radix-sort.c
102
struct task *task = (*stack_pointer)++;
drivers/md/dm-vdo/indexer/radix-sort.c
104
task->first_key = first_key;
drivers/md/dm-vdo/indexer/radix-sort.c
105
task->last_key = &first_key[count - 1];
drivers/md/dm-vdo/indexer/radix-sort.c
106
task->offset = offset;
drivers/md/dm-vdo/indexer/radix-sort.c
107
task->length = length;
drivers/md/dm-vdo/indexer/radix-sort.c
122
static inline void measure_bins(const struct task task, struct histogram *bins)
drivers/md/dm-vdo/indexer/radix-sort.c
134
for (key_ptr = task.first_key; key_ptr <= task.last_key; key_ptr++) {
drivers/md/dm-vdo/indexer/radix-sort.c
136
u8 bin = (*key_ptr)[task.offset];
drivers/md/dm-vdo/indexer/radix-sort.c
172
static inline int push_bins(struct task **stack, struct task *end_of_stack,
drivers/md/dm-vdo/indexer/radix-sort.c
173
struct task **list, sort_key_t *pile[],
drivers/md/dm-vdo/indexer/radix-sort.c
214
result = vdo_allocate_extended(struct radix_sorter, stack_size, struct task,
drivers/md/dm-vdo/indexer/radix-sort.c
237
struct task start;
drivers/md/dm-vdo/indexer/radix-sort.c
240
struct task *task_stack = sorter->stack;
drivers/md/dm-vdo/indexer/radix-sort.c
247
start = (struct task) {
drivers/md/dm-vdo/indexer/radix-sort.c
269
const struct task task = *task_stack;
drivers/md/dm-vdo/indexer/radix-sort.c
270
struct task *insertion_task_list;
drivers/md/dm-vdo/indexer/radix-sort.c
275
measure_bins(task, bins);
drivers/md/dm-vdo/indexer/radix-sort.c
283
&insertion_task_list, pile, bins, task.first_key,
drivers/md/dm-vdo/indexer/radix-sort.c
284
task.offset + 1, task.length - 1);
drivers/md/dm-vdo/indexer/radix-sort.c
296
end = task.last_key - bins->size[bins->last];
drivers/md/dm-vdo/indexer/radix-sort.c
299
for (fence = task.first_key; fence <= end; ) {
drivers/md/dm-vdo/indexer/radix-sort.c
307
while (--pile[bin = key[task.offset]] > fence)
drivers/md/dm-vdo/indexer/radix-sort.c
60
struct task *end_of_stack;
drivers/md/dm-vdo/indexer/radix-sort.c
61
struct task insertion_list[256];
drivers/md/dm-vdo/indexer/radix-sort.c
62
struct task stack[];
drivers/md/dm-vdo/indexer/radix-sort.c
72
static inline void insert_key(const struct task task, sort_key_t *next)
drivers/md/dm-vdo/indexer/radix-sort.c
78
while ((--next >= task.first_key) &&
drivers/md/dm-vdo/indexer/radix-sort.c
79
(compare(unsorted, next[0], task.offset, task.length) < 0))
drivers/md/dm-vdo/indexer/radix-sort.c
90
static inline void insertion_sort(const struct task task)
drivers/md/dm-vdo/indexer/radix-sort.c
94
for (next = task.first_key + 1; next <= task.last_key; next++)
drivers/md/dm-vdo/indexer/radix-sort.c
95
insert_key(task, next);
drivers/md/dm-vdo/indexer/radix-sort.c
99
static inline void push_task(struct task **stack_pointer, sort_key_t *first_key,
drivers/md/dm-vdo/thread-registry.c
33
new_thread->task = current;
drivers/md/dm-vdo/thread-registry.c
37
if (thread->task == current) {
drivers/md/dm-vdo/thread-registry.c
62
if (thread->task == current) {
drivers/md/dm-vdo/thread-registry.c
85
if (thread->task == current) {
drivers/md/dm-vdo/thread-registry.h
20
struct task_struct *task;
drivers/md/dm-vdo/thread-utils.c
55
struct task_struct *task;
drivers/md/dm-vdo/thread-utils.c
83
task = kthread_run(thread_starter, thread, "%.*s:%s",
drivers/md/dm-vdo/thread-utils.c
87
task = kthread_run(thread_starter, thread, "%s", name);
drivers/md/dm-vdo/thread-utils.c
90
if (IS_ERR(task)) {
drivers/md/dm-vdo/thread-utils.c
92
return PTR_ERR(task);
drivers/md/persistent-data/dm-block-manager.c
127
if (!w->task)
drivers/md/persistent-data/dm-block-manager.c
138
struct task_struct *task;
drivers/md/persistent-data/dm-block-manager.c
141
task = w->task;
drivers/md/persistent-data/dm-block-manager.c
143
w->task = NULL;
drivers/md/persistent-data/dm-block-manager.c
144
wake_up_process(task);
drivers/md/persistent-data/dm-block-manager.c
164
__add_holder(lock, w->task);
drivers/md/persistent-data/dm-block-manager.c
170
__add_holder(lock, w->task);
drivers/md/persistent-data/dm-block-manager.c
214
w.task = current;
drivers/md/persistent-data/dm-block-manager.c
276
w.task = current;
drivers/md/persistent-data/dm-block-manager.c
57
struct task_struct *task;
drivers/md/persistent-data/dm-block-manager.c
62
struct task_struct *task)
drivers/md/persistent-data/dm-block-manager.c
67
if (lock->holders[i] == task)
drivers/md/persistent-data/dm-block-manager.c
75
static void __add_holder(struct block_lock *lock, struct task_struct *task)
drivers/md/persistent-data/dm-block-manager.c
82
get_task_struct(task);
drivers/md/persistent-data/dm-block-manager.c
83
lock->holders[h] = task;
drivers/md/persistent-data/dm-block-manager.c
92
static void __del_holder(struct block_lock *lock, struct task_struct *task)
drivers/md/persistent-data/dm-block-manager.c
94
unsigned int h = __find_holder(lock, task);
drivers/md/persistent-data/dm-block-manager.c
97
put_task_struct(task);
drivers/media/i2c/saa717x.c
807
int task, int prescale)
drivers/media/i2c/saa717x.c
831
task_shift = task * 0x40;
drivers/media/i2c/saa717x.c
851
static void set_v_scale(struct v4l2_subdev *sd, int task, int yscale)
drivers/media/i2c/saa717x.c
855
task_shift = task * 0x40;
drivers/media/pci/saa7134/saa7134-core.c
375
u32 split, task=0, ctrl=0, irq=0;
drivers/media/pci/saa7134/saa7134-core.c
386
task |= 0x01;
drivers/media/pci/saa7134/saa7134-core.c
401
task |= 0x22;
drivers/media/pci/saa7134/saa7134-core.c
449
saa_writeb(SAA7134_REGION_ENABLE, task);
drivers/media/pci/saa7134/saa7134-core.c
461
task, ctrl, irq, split ? "no" : "yes");
drivers/media/pci/saa7134/saa7134-vbi.c
40
int task)
drivers/media/pci/saa7134/saa7134-vbi.c
45
saa_writeb(SAA7134_VBI_H_START1(task), norm->h_start & 0xff);
drivers/media/pci/saa7134/saa7134-vbi.c
46
saa_writeb(SAA7134_VBI_H_START2(task), norm->h_start >> 8);
drivers/media/pci/saa7134/saa7134-vbi.c
47
saa_writeb(SAA7134_VBI_H_STOP1(task), norm->h_stop & 0xff);
drivers/media/pci/saa7134/saa7134-vbi.c
48
saa_writeb(SAA7134_VBI_H_STOP2(task), norm->h_stop >> 8);
drivers/media/pci/saa7134/saa7134-vbi.c
49
saa_writeb(SAA7134_VBI_V_START1(task), norm->vbi_v_start_0 & 0xff);
drivers/media/pci/saa7134/saa7134-vbi.c
50
saa_writeb(SAA7134_VBI_V_START2(task), norm->vbi_v_start_0 >> 8);
drivers/media/pci/saa7134/saa7134-vbi.c
51
saa_writeb(SAA7134_VBI_V_STOP1(task), norm->vbi_v_stop_0 & 0xff);
drivers/media/pci/saa7134/saa7134-vbi.c
52
saa_writeb(SAA7134_VBI_V_STOP2(task), norm->vbi_v_stop_0 >> 8);
drivers/media/pci/saa7134/saa7134-vbi.c
54
saa_writeb(SAA7134_VBI_H_SCALE_INC1(task), VBI_SCALE & 0xff);
drivers/media/pci/saa7134/saa7134-vbi.c
55
saa_writeb(SAA7134_VBI_H_SCALE_INC2(task), VBI_SCALE >> 8);
drivers/media/pci/saa7134/saa7134-vbi.c
56
saa_writeb(SAA7134_VBI_PHASE_OFFSET_LUMA(task), 0x00);
drivers/media/pci/saa7134/saa7134-vbi.c
57
saa_writeb(SAA7134_VBI_PHASE_OFFSET_CHROMA(task), 0x00);
drivers/media/pci/saa7134/saa7134-vbi.c
59
saa_writeb(SAA7134_VBI_H_LEN1(task), dev->vbi_hlen & 0xff);
drivers/media/pci/saa7134/saa7134-vbi.c
60
saa_writeb(SAA7134_VBI_H_LEN2(task), dev->vbi_hlen >> 8);
drivers/media/pci/saa7134/saa7134-vbi.c
61
saa_writeb(SAA7134_VBI_V_LEN1(task), dev->vbi_vlen & 0xff);
drivers/media/pci/saa7134/saa7134-vbi.c
62
saa_writeb(SAA7134_VBI_V_LEN2(task), dev->vbi_vlen >> 8);
drivers/media/pci/saa7134/saa7134-vbi.c
64
saa_andorb(SAA7134_DATA_PATH(task), 0xc0, 0x00);
drivers/media/pci/saa7134/saa7134-video.c
461
static void set_h_prescale(struct saa7134_dev *dev, int task, int prescale)
drivers/media/pci/saa7134/saa7134-video.c
491
saa_writeb(SAA7134_H_PRESCALE(task), vals[i].xpsc);
drivers/media/pci/saa7134/saa7134-video.c
492
saa_writeb(SAA7134_ACC_LENGTH(task), vals[i].xacl);
drivers/media/pci/saa7134/saa7134-video.c
493
saa_writeb(SAA7134_LEVEL_CTRL(task),
drivers/media/pci/saa7134/saa7134-video.c
495
saa_andorb(SAA7134_FIR_PREFILTER_CTRL(task), 0x0f,
drivers/media/pci/saa7134/saa7134-video.c
499
static void set_v_scale(struct saa7134_dev *dev, int task, int yscale)
drivers/media/pci/saa7134/saa7134-video.c
503
saa_writeb(SAA7134_V_SCALE_RATIO1(task), yscale & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
504
saa_writeb(SAA7134_V_SCALE_RATIO2(task), yscale >> 8);
drivers/media/pci/saa7134/saa7134-video.c
510
saa_writeb(SAA7134_V_FILTER(task), 0x00 | mirror);
drivers/media/pci/saa7134/saa7134-video.c
511
saa_writeb(SAA7134_LUMA_CONTRAST(task), 0x40);
drivers/media/pci/saa7134/saa7134-video.c
512
saa_writeb(SAA7134_CHROMA_SATURATION(task), 0x40);
drivers/media/pci/saa7134/saa7134-video.c
517
saa_writeb(SAA7134_V_FILTER(task), 0x01 | mirror);
drivers/media/pci/saa7134/saa7134-video.c
518
saa_writeb(SAA7134_LUMA_CONTRAST(task), val);
drivers/media/pci/saa7134/saa7134-video.c
519
saa_writeb(SAA7134_CHROMA_SATURATION(task), val);
drivers/media/pci/saa7134/saa7134-video.c
521
saa_writeb(SAA7134_LUMA_BRIGHT(task), 0x80);
drivers/media/pci/saa7134/saa7134-video.c
524
static void set_size(struct saa7134_dev *dev, int task,
drivers/media/pci/saa7134/saa7134-video.c
537
saa_writeb(SAA7134_VIDEO_H_START1(task), h_start & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
538
saa_writeb(SAA7134_VIDEO_H_START2(task), h_start >> 8);
drivers/media/pci/saa7134/saa7134-video.c
539
saa_writeb(SAA7134_VIDEO_H_STOP1(task), h_stop & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
540
saa_writeb(SAA7134_VIDEO_H_STOP2(task), h_stop >> 8);
drivers/media/pci/saa7134/saa7134-video.c
541
saa_writeb(SAA7134_VIDEO_V_START1(task), v_start & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
542
saa_writeb(SAA7134_VIDEO_V_START2(task), v_start >> 8);
drivers/media/pci/saa7134/saa7134-video.c
543
saa_writeb(SAA7134_VIDEO_V_STOP1(task), v_stop & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
544
saa_writeb(SAA7134_VIDEO_V_STOP2(task), v_stop >> 8);
drivers/media/pci/saa7134/saa7134-video.c
553
set_h_prescale(dev,task,prescale);
drivers/media/pci/saa7134/saa7134-video.c
554
saa_writeb(SAA7134_H_SCALE_INC1(task), xscale & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
555
saa_writeb(SAA7134_H_SCALE_INC2(task), xscale >> 8);
drivers/media/pci/saa7134/saa7134-video.c
556
set_v_scale(dev,task,yscale);
drivers/media/pci/saa7134/saa7134-video.c
558
saa_writeb(SAA7134_VIDEO_PIXELS1(task), width & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
559
saa_writeb(SAA7134_VIDEO_PIXELS2(task), width >> 8);
drivers/media/pci/saa7134/saa7134-video.c
560
saa_writeb(SAA7134_VIDEO_LINES1(task), height/div & 0xff);
drivers/media/pci/saa7134/saa7134-video.c
561
saa_writeb(SAA7134_VIDEO_LINES2(task), height/div >> 8);
drivers/media/pci/saa7134/saa7134-video.c
566
saa_writeb(SAA7134_V_PHASE_OFFSET0(task), y_odd);
drivers/media/pci/saa7134/saa7134-video.c
567
saa_writeb(SAA7134_V_PHASE_OFFSET1(task), y_even);
drivers/media/pci/saa7134/saa7134-video.c
568
saa_writeb(SAA7134_V_PHASE_OFFSET2(task), y_odd);
drivers/media/pci/saa7134/saa7134-video.c
569
saa_writeb(SAA7134_V_PHASE_OFFSET3(task), y_even);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
100
task.config = ctx->mdp_dev->vpu.config;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
101
task.param = &param;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
102
task.composes[0] = &frame->compose;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
103
task.cmdq_cb = NULL;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
104
task.cb_data = NULL;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
105
task.mdp_ctx = ctx;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
119
ret = mdp_cmdq_send(ctx->mdp_dev, &task);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
67
struct mdp_cmdq_param task = {};
drivers/media/platform/st/sti/hva/hva-h264.c
1000
hva_mem_free(pctx, ctx->task);
drivers/media/platform/st/sti/hva/hva-h264.c
1011
struct hva_h264_task *task = ctx->task->vaddr;
drivers/media/platform/st/sti/hva/hva-h264.c
1015
ret = hva_h264_prepare_task(pctx, task, frame, stream);
drivers/media/platform/st/sti/hva/hva-h264.c
1019
ret = hva_hw_execute_task(pctx, H264_ENC, ctx->task);
drivers/media/platform/st/sti/hva/hva-h264.c
1024
stream->bytesused += hva_h264_get_stream_size(task);
drivers/media/platform/st/sti/hva/hva-h264.c
1026
stuffing_bytes = hva_h264_get_stuffing_bytes(task);
drivers/media/platform/st/sti/hva/hva-h264.c
412
struct hva_buffer *task;
drivers/media/platform/st/sti/hva/hva-h264.c
588
struct hva_h264_task *task,
drivers/media/platform/st/sti/hva/hva-h264.c
598
struct hva_h264_td *td = &task->td;
drivers/media/platform/st/sti/hva/hva-h264.c
781
td->addr_param_out = (u32)ctx->task->paddr +
drivers/media/platform/st/sti/hva/hva-h264.c
875
static unsigned int hva_h264_get_stream_size(struct hva_h264_task *task)
drivers/media/platform/st/sti/hva/hva-h264.c
877
struct hva_h264_po *po = &task->po;
drivers/media/platform/st/sti/hva/hva-h264.c
882
static u32 hva_h264_get_stuffing_bytes(struct hva_h264_task *task)
drivers/media/platform/st/sti/hva/hva-h264.c
884
struct hva_h264_po *po = &task->po;
drivers/media/platform/st/sti/hva/hva-h264.c
960
&ctx->task);
drivers/media/platform/st/sti/hva/hva-h264.c
999
if (ctx->task)
drivers/media/platform/st/sti/hva/hva-hw.c
455
struct hva_buffer *task)
drivers/media/platform/st/sti/hva/hva-hw.c
508
ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
drivers/media/platform/st/sti/hva/hva-hw.c
510
writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
drivers/media/platform/st/sti/hva/hva-hw.h
40
struct hva_buffer *task);
drivers/misc/ntsync.c
105
struct task_struct *task;
drivers/misc/ntsync.c
299
wake_up_process(q->task);
drivers/misc/ntsync.c
335
wake_up_process(q->task);
drivers/misc/ntsync.c
362
wake_up_process(q->task);
drivers/misc/ntsync.c
384
wake_up_process(q->task);
drivers/misc/ntsync.c
890
q->task = current;
drivers/mmc/core/core.c
758
struct task_struct *task)
drivers/mmc/core/core.c
761
(!ctx && task && host->claimer->task == task);
drivers/mmc/core/core.c
766
struct task_struct *task)
drivers/mmc/core/core.c
774
if (task)
drivers/mmc/core/core.c
775
host->claimer->task = task;
drivers/mmc/core/core.c
793
struct task_struct *task = ctx ? NULL : current;
drivers/mmc/core/core.c
806
if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
drivers/mmc/core/core.c
815
mmc_ctx_set_claimer(host, ctx, task);
drivers/mmc/core/core.c
850
host->claimer->task = NULL;
drivers/most/core.c
813
struct task_struct *task =
drivers/most/core.c
817
if (IS_ERR(task))
drivers/most/core.c
818
return PTR_ERR(task);
drivers/most/core.c
820
c->hdm_enqueue_task = task;
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
139
#define DECLARE_TASK_FUNC(task, task_param) \
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
140
static void task(struct work_struct *task_param)
drivers/net/ethernet/hisilicon/hisi_femac.c
304
int work_done = 0, task = budget;
drivers/net/ethernet/hisilicon/hisi_femac.c
309
num = hisi_femac_rx(dev, task);
drivers/net/ethernet/hisilicon/hisi_femac.c
311
task -= num;
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
646
int work_done = 0, task = budget;
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
651
num = hix5hd2_rx(dev, task);
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
653
task -= num;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
494
static void sq_prepare_task(struct hinic_sq_task *task)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
496
task->pkt_info0 = 0;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
497
task->pkt_info1 = 0;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
498
task->pkt_info2 = 0;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
500
task->ufo_v6_identify = 0;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
502
task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
504
task->zero_pad = 0;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
507
void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
509
task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
512
void hinic_task_set_outter_l3(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
516
task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
520
void hinic_task_set_inner_l3(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
524
task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
525
task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
528
void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
532
task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
536
void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
549
task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
550
task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
560
void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
571
task->ufo_v6_identify = ip_ident;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
573
task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
574
task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
575
task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
601
sq_prepare_task(&sq_wqe->task);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
733
*wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
153
void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
155
void hinic_task_set_outter_l3(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
159
void hinic_task_set_inner_l3(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
163
void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
167
void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
172
void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
384
struct hinic_sq_task task;
drivers/net/ethernet/huawei/hinic/hinic_tx.c
273
static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
307
hinic_task_set_outter_l3(task, l3_offload,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
319
hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
drivers/net/ethernet/huawei/hinic/hinic_tx.c
338
hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
drivers/net/ethernet/huawei/hinic/hinic_tx.c
347
hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
353
static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
393
hinic_task_set_outter_l3(task, l3_type,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
419
hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
drivers/net/ethernet/huawei/hinic/hinic_tx.c
429
hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
drivers/net/ethernet/huawei/hinic/hinic_tx.c
434
hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
drivers/net/ethernet/huawei/hinic/hinic_tx.c
439
static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
442
task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
drivers/net/ethernet/huawei/hinic/hinic_tx.c
448
static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
455
enabled = offload_tso(task, queue_info, skb);
drivers/net/ethernet/huawei/hinic/hinic_tx.c
459
enabled = offload_csum(task, queue_info, skb);
drivers/net/ethernet/huawei/hinic/hinic_tx.c
468
offload_vlan(task, queue_info, vlan_tag,
drivers/net/ethernet/huawei/hinic/hinic_tx.c
474
hinic_task_set_l2hdr(task, skb_network_offset(skb));
drivers/net/ethernet/huawei/hinic/hinic_tx.c
619
err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
229
static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
239
task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
269
task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN));
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
298
static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
304
task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
308
task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
313
task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN));
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
329
static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
350
task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN));
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
351
task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
359
task->pkt_info0 |=
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
377
hinic3_set_tso_info(task, queue_info, l4_offload, offset,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
383
static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
390
task->vlan_offload =
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
396
static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
402
task->pkt_info0 = 0;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
403
task->ip_identify = 0;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
404
task->rsvd = 0;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
405
task->vlan_offload = 0;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
407
tso_cs_en = hinic3_tso(task, queue_info, skb);
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
414
tso_cs_en = hinic3_tx_csum(txq, task, skb);
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
422
hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb),
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
463
wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq,
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
532
struct hinic3_sq_task task;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
563
offload = hinic3_tx_offload(skb, &task, &queue_info, txq);
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
578
*wqe_combo.task = task;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
95
struct hinic3_sq_task *task;
drivers/net/ethernet/intel/ice/ice.h
1047
void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
drivers/net/ethernet/intel/ice/ice.h
1049
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
drivers/net/ethernet/intel/ice/ice_fw_update.c
300
struct ice_aq_task task = {};
drivers/net/ethernet/intel/ice/ice_fw_update.c
310
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write);
drivers/net/ethernet/intel/ice/ice_fw_update.c
328
err = ice_aq_wait_for_event(pf, &task, 15 * HZ);
drivers/net/ethernet/intel/ice/ice_fw_update.c
336
desc = &task.event.desc;
drivers/net/ethernet/intel/ice/ice_fw_update.c
490
struct ice_aq_task task = {};
drivers/net/ethernet/intel/ice/ice_fw_update.c
503
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_erase);
drivers/net/ethernet/intel/ice/ice_fw_update.c
515
err = ice_aq_wait_for_event(pf, &task, ICE_FW_ERASE_TIMEOUT * HZ);
drivers/net/ethernet/intel/ice/ice_fw_update.c
523
desc = &task.event.desc;
drivers/net/ethernet/intel/ice/ice_fw_update.c
575
struct ice_aq_task task = {};
drivers/net/ethernet/intel/ice/ice_fw_update.c
581
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write_activate);
drivers/net/ethernet/intel/ice/ice_fw_update.c
607
err = ice_aq_wait_for_event(pf, &task, 30 * HZ);
drivers/net/ethernet/intel/ice/ice_fw_update.c
615
completion_retval = le16_to_cpu(task.event.desc.retval);
drivers/net/ethernet/intel/ice/ice_main.c
1272
void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
drivers/net/ethernet/intel/ice/ice_main.c
1275
INIT_HLIST_NODE(&task->entry);
drivers/net/ethernet/intel/ice/ice_main.c
1276
task->opcode = opcode;
drivers/net/ethernet/intel/ice/ice_main.c
1277
task->state = ICE_AQ_TASK_WAITING;
drivers/net/ethernet/intel/ice/ice_main.c
1280
hlist_add_head(&task->entry, &pf->aq_wait_list);
drivers/net/ethernet/intel/ice/ice_main.c
1296
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
drivers/net/ethernet/intel/ice/ice_main.c
1299
enum ice_aq_task_state *state = &task->state;
drivers/net/ethernet/intel/ice/ice_main.c
1331
task->opcode);
drivers/net/ethernet/intel/ice/ice_main.c
1334
hlist_del(&task->entry);
drivers/net/ethernet/intel/ice/ice_main.c
1362
struct ice_aq_task *task;
drivers/net/ethernet/intel/ice/ice_main.c
1366
hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
drivers/net/ethernet/intel/ice/ice_main.c
1367
if (task->state != ICE_AQ_TASK_WAITING)
drivers/net/ethernet/intel/ice/ice_main.c
1369
if (task->opcode != opcode)
drivers/net/ethernet/intel/ice/ice_main.c
1372
task_ev = &task->event;
drivers/net/ethernet/intel/ice/ice_main.c
1383
task->state = ICE_AQ_TASK_COMPLETE;
drivers/net/ethernet/intel/ice/ice_main.c
1401
struct ice_aq_task *task;
drivers/net/ethernet/intel/ice/ice_main.c
1404
hlist_for_each_entry(task, &pf->aq_wait_list, entry)
drivers/net/ethernet/intel/ice/ice_main.c
1405
task->state = ICE_AQ_TASK_CANCELED;
drivers/net/ethernet/mellanox/mlx4/cq.c
62
struct mlx4_eq_tasklet *ctx = from_tasklet(ctx, t, task);
drivers/net/ethernet/mellanox/mlx4/cq.c
79
tasklet_schedule(&ctx->task);
drivers/net/ethernet/mellanox/mlx4/cq.c
99
tasklet_schedule(&tasklet_ctx->task);
drivers/net/ethernet/mellanox/mlx4/eq.c
1060
tasklet_setup(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb);
drivers/net/ethernet/mellanox/mlx4/eq.c
1103
tasklet_disable(&eq->tasklet_ctx.task);
drivers/net/ethernet/mellanox/mlx4/mlx4.h
388
struct tasklet_struct task;
drivers/net/ethernet/mellanox/mlx5/core/cq.c
48
struct mlx5_eq_tasklet *ctx = from_tasklet(ctx, t, task);
drivers/net/ethernet/mellanox/mlx5/core/cq.c
66
tasklet_schedule(&ctx->task);
drivers/net/ethernet/mellanox/mlx5/core/cq.c
96
tasklet_schedule(&tasklet_ctx->task);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
1016
tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
drivers/net/ethernet/mellanox/mlx5/core/eq.c
963
tasklet_disable(&eq->tasklet_ctx.task);
drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
15
struct tasklet_struct task;
drivers/net/wireless/mediatek/mt76/mac80211.c
791
sched_set_fifo_low(dev->tx_worker.task);
drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
157
sched_set_fifo_low(mdev->sdio.txrx_worker.task);
drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
185
sched_set_fifo_low(mdev->sdio.txrx_worker.task);
drivers/net/wireless/mediatek/mt76/sdio.c
657
sched_set_fifo_low(sdio->status_worker.task);
drivers/net/wireless/mediatek/mt76/sdio.c
658
sched_set_fifo_low(sdio->net_worker.task);
drivers/net/wireless/mediatek/mt76/sdio.c
659
sched_set_fifo_low(sdio->stat_worker.task);
drivers/net/wireless/mediatek/mt76/usb.c
1113
sched_set_fifo_low(usb->rx_worker.task);
drivers/net/wireless/mediatek/mt76/usb.c
1114
sched_set_fifo_low(usb->status_worker.task);
drivers/net/wireless/mediatek/mt76/util.h
102
kthread_unpark(w->task);
drivers/net/wireless/mediatek/mt76/util.h
108
if (!w->task)
drivers/net/wireless/mediatek/mt76/util.h
111
kthread_stop(w->task);
drivers/net/wireless/mediatek/mt76/util.h
112
w->task = NULL;
drivers/net/wireless/mediatek/mt76/util.h
16
struct task_struct *task;
drivers/net/wireless/mediatek/mt76/util.h
66
w->task = kthread_run(__mt76_worker_fn, w,
drivers/net/wireless/mediatek/mt76/util.h
69
if (IS_ERR(w->task)) {
drivers/net/wireless/mediatek/mt76/util.h
70
ret = PTR_ERR(w->task);
drivers/net/wireless/mediatek/mt76/util.h
71
w->task = NULL;
drivers/net/wireless/mediatek/mt76/util.h
80
if (!w->task)
drivers/net/wireless/mediatek/mt76/util.h
85
wake_up_process(w->task);
drivers/net/wireless/mediatek/mt76/util.h
90
if (!w->task)
drivers/net/wireless/mediatek/mt76/util.h
93
kthread_park(w->task);
drivers/net/wireless/mediatek/mt76/util.h
99
if (!w->task)
drivers/net/wireless/rsi/rsi_common.h
66
thread->task = kthread_run(func_ptr, common, "%s", name);
drivers/net/wireless/rsi/rsi_common.h
67
if (IS_ERR(thread->task))
drivers/net/wireless/rsi/rsi_common.h
68
return (int)PTR_ERR(thread->task);
drivers/net/wireless/rsi/rsi_common.h
78
return kthread_stop(handle->task);
drivers/net/wireless/rsi/rsi_main.h
205
struct task_struct *task;
drivers/net/xen-netback/common.h
191
struct task_struct *task;
drivers/net/xen-netback/interface.c
233
if (queue->task == NULL ||
drivers/net/xen-netback/interface.c
673
if (queue->task) {
drivers/net/xen-netback/interface.c
674
kthread_stop_put(queue->task);
drivers/net/xen-netback/interface.c
675
queue->task = NULL;
drivers/net/xen-netback/interface.c
710
struct task_struct *task;
drivers/net/xen-netback/interface.c
714
BUG_ON(queue->task);
drivers/net/xen-netback/interface.c
730
task = kthread_run(xenvif_kthread_guest_rx, queue,
drivers/net/xen-netback/interface.c
732
if (IS_ERR(task))
drivers/net/xen-netback/interface.c
734
queue->task = task;
drivers/net/xen-netback/interface.c
739
get_task_struct(task);
drivers/net/xen-netback/interface.c
741
task = kthread_run(xenvif_dealloc_kthread, queue,
drivers/net/xen-netback/interface.c
743
if (IS_ERR(task))
drivers/net/xen-netback/interface.c
745
queue->dealloc_task = task;
drivers/net/xen-netback/interface.c
783
err = PTR_ERR(task);
drivers/pci/doe.c
296
struct pci_doe_task *task)
drivers/pci/doe.c
330
length = 2 + DIV_ROUND_UP(task->request_pl_sz, sizeof(__le32));
drivers/pci/doe.c
337
val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->feat.vid) |
drivers/pci/doe.c
338
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->feat.type);
drivers/pci/doe.c
345
for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
drivers/pci/doe.c
347
le32_to_cpu(task->request_pl[i]));
drivers/pci/doe.c
350
remainder = task->request_pl_sz % sizeof(__le32);
drivers/pci/doe.c
353
memcpy(&val, &task->request_pl[i], remainder);
drivers/pci/doe.c
375
static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
drivers/pci/doe.c
385
if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->feat.vid) ||
drivers/pci/doe.c
386
(FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->feat.type)) {
drivers/pci/doe.c
388
doe_mb->cap_offset, task->feat.vid, task->feat.type,
drivers/pci/doe.c
408
received = task->response_pl_sz;
drivers/pci/doe.c
409
payload_length = DIV_ROUND_UP(task->response_pl_sz, sizeof(__le32));
drivers/pci/doe.c
410
remainder = task->response_pl_sz % sizeof(__le32);
drivers/pci/doe.c
427
task->response_pl[i] = cpu_to_le32(val);
drivers/pci/doe.c
434
memcpy(&task->response_pl[i], &val, remainder);
drivers/pci/doe.c
456
static void signal_task_complete(struct pci_doe_task *task, int rv)
drivers/pci/doe.c
458
task->rv = rv;
drivers/pci/doe.c
459
destroy_work_on_stack(&task->work);
drivers/pci/doe.c
460
task->complete(task);
drivers/pci/doe.c
463
static void signal_task_abort(struct pci_doe_task *task, int rv)
drivers/pci/doe.c
465
struct pci_doe_mb *doe_mb = task->doe_mb;
drivers/pci/doe.c
477
signal_task_complete(task, rv);
drivers/pci/doe.c
482
struct pci_doe_task *task = container_of(work, struct pci_doe_task,
drivers/pci/doe.c
484
struct pci_doe_mb *doe_mb = task->doe_mb;
drivers/pci/doe.c
492
signal_task_complete(task, -EIO);
drivers/pci/doe.c
497
rc = pci_doe_send_req(doe_mb, task);
drivers/pci/doe.c
509
signal_task_abort(task, rc);
drivers/pci/doe.c
518
signal_task_abort(task, -EIO);
drivers/pci/doe.c
524
signal_task_abort(task, -EIO);
drivers/pci/doe.c
529
signal_task_abort(task, rc);
drivers/pci/doe.c
535
rc = pci_doe_recv_resp(doe_mb, task);
drivers/pci/doe.c
537
signal_task_abort(task, rc);
drivers/pci/doe.c
541
signal_task_complete(task, rc);
drivers/pci/doe.c
544
static void pci_doe_task_complete(struct pci_doe_task *task)
drivers/pci/doe.c
546
complete(task->private);
drivers/pci/doe.c
755
struct pci_doe_task *task)
drivers/pci/doe.c
757
if (!pci_doe_supports_feat(doe_mb, task->feat.vid, task->feat.type))
drivers/pci/doe.c
763
task->doe_mb = doe_mb;
drivers/pci/doe.c
764
INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
drivers/pci/doe.c
765
queue_work(doe_mb->work_queue, &task->work);
drivers/pci/doe.c
802
struct pci_doe_task task = {
drivers/pci/doe.c
814
rc = pci_doe_submit_task(doe_mb, &task);
drivers/pci/doe.c
820
return task.rv;
drivers/pci/doe.c
92
void (*complete)(struct pci_doe_task *task);
drivers/perf/arm_pmuv3.c
1075
struct task_struct *task, bool sched_in)
drivers/platform/chrome/cros_ec_spi.c
731
sched_set_fifo(ec_spi->high_pri_worker->task);
drivers/pnp/pnpbios/core.c
562
struct task_struct *task;
drivers/pnp/pnpbios/core.c
568
task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
drivers/pnp/pnpbios/core.c
569
return PTR_ERR_OR_ZERO(task);
drivers/s390/cio/crw.c
153
struct task_struct *task;
drivers/s390/cio/crw.c
155
task = kthread_run(crw_collect_info, NULL, "kmcheck");
drivers/s390/cio/crw.c
156
if (IS_ERR(task))
drivers/s390/cio/crw.c
157
return PTR_ERR(task);
drivers/scsi/aic94xx/aic94xx.h
56
int asd_execute_task(struct sas_task *task, gfp_t gfp_flags);
drivers/scsi/aic94xx/aic94xx_scb.c
445
struct sas_task *task = a->uldd_task;
drivers/scsi/aic94xx/aic94xx_scb.c
450
if (task) {
drivers/scsi/aic94xx/aic94xx_scb.c
451
failed_dev = task->dev;
drivers/scsi/aic94xx/aic94xx_scb.c
452
sas_task_abort(task);
drivers/scsi/aic94xx/aic94xx_scb.c
471
struct sas_task *task = a->uldd_task;
drivers/scsi/aic94xx/aic94xx_scb.c
473
if (task &&
drivers/scsi/aic94xx/aic94xx_scb.c
474
task->dev == failed_dev &&
drivers/scsi/aic94xx/aic94xx_scb.c
476
sas_task_abort(task);
drivers/scsi/aic94xx/aic94xx_scb.c
497
struct sas_task *task = a->uldd_task;
drivers/scsi/aic94xx/aic94xx_scb.c
499
if (!task)
drivers/scsi/aic94xx/aic94xx_scb.c
501
dev = task->dev;
drivers/scsi/aic94xx/aic94xx_scb.c
505
last_dev_task = task;
drivers/scsi/aic94xx/aic94xx_scb.c
523
struct sas_task *task = a->uldd_task;
drivers/scsi/aic94xx/aic94xx_scb.c
525
if (!task)
drivers/scsi/aic94xx/aic94xx_scb.c
527
dev = task->dev;
drivers/scsi/aic94xx/aic94xx_scb.c
531
sas_task_abort(task);
drivers/scsi/aic94xx/aic94xx_task.c
103
for_each_sg(task->scatter, sc, num_sg, i) {
drivers/scsi/aic94xx/aic94xx_task.c
113
if (sas_protocol_ata(task->task_proto))
drivers/scsi/aic94xx/aic94xx_task.c
114
dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
drivers/scsi/aic94xx/aic94xx_task.c
115
task->num_scatter, task->data_dir);
drivers/scsi/aic94xx/aic94xx_task.c
122
struct sas_task *task = ascb->uldd_task;
drivers/scsi/aic94xx/aic94xx_task.c
124
if (task->data_dir == DMA_NONE)
drivers/scsi/aic94xx/aic94xx_task.c
127
if (task->num_scatter == 0) {
drivers/scsi/aic94xx/aic94xx_task.c
131
task->total_xfer_len, task->data_dir);
drivers/scsi/aic94xx/aic94xx_task.c
136
if (task->task_proto != SAS_PROTOCOL_STP)
drivers/scsi/aic94xx/aic94xx_task.c
137
dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
drivers/scsi/aic94xx/aic94xx_task.c
138
task->num_scatter, task->data_dir);
drivers/scsi/aic94xx/aic94xx_task.c
147
struct sas_task *task = ascb->uldd_task;
drivers/scsi/aic94xx/aic94xx_task.c
148
struct task_status_struct *ts = &task->task_status;
drivers/scsi/aic94xx/aic94xx_task.c
175
if (task->task_proto == SAS_PROTOCOL_SSP) {
drivers/scsi/aic94xx/aic94xx_task.c
181
sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
drivers/scsi/aic94xx/aic94xx_task.c
200
struct sas_task *task = ascb->uldd_task;
drivers/scsi/aic94xx/aic94xx_task.c
201
struct task_status_struct *ts = &task->task_status;
drivers/scsi/aic94xx/aic94xx_task.c
311
switch (task->task_proto) {
drivers/scsi/aic94xx/aic94xx_task.c
326
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_task.c
327
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/aic94xx/aic94xx_task.c
328
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/aic94xx/aic94xx_task.c
329
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
drivers/scsi/aic94xx/aic94xx_task.c
331
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_task.c
334
task, opcode, ts->resp, ts->stat);
drivers/scsi/aic94xx/aic94xx_task.c
338
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_task.c
339
task->lldd_task = NULL;
drivers/scsi/aic94xx/aic94xx_task.c
342
task->task_done(task);
drivers/scsi/aic94xx/aic94xx_task.c
348
static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
drivers/scsi/aic94xx/aic94xx_task.c
351
struct domain_device *dev = task->dev;
drivers/scsi/aic94xx/aic94xx_task.c
358
if (unlikely(task->ata_task.device_control_reg_update))
drivers/scsi/aic94xx/aic94xx_task.c
36
static int asd_map_scatterlist(struct sas_task *task,
drivers/scsi/aic94xx/aic94xx_task.c
369
scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/aic94xx/aic94xx_task.c
370
scb->ata_task.fis = task->ata_task.fis;
drivers/scsi/aic94xx/aic94xx_task.c
371
if (likely(!task->ata_task.device_control_reg_update))
drivers/scsi/aic94xx/aic94xx_task.c
375
memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
drivers/scsi/aic94xx/aic94xx_task.c
381
if (likely(!task->ata_task.device_control_reg_update)) {
drivers/scsi/aic94xx/aic94xx_task.c
383
if (task->ata_task.dma_xfer)
drivers/scsi/aic94xx/aic94xx_task.c
385
if (task->ata_task.use_ncq &&
drivers/scsi/aic94xx/aic94xx_task.c
388
flags |= data_dir_flags[task->data_dir];
drivers/scsi/aic94xx/aic94xx_task.c
397
if (likely(!task->ata_task.device_control_reg_update))
drivers/scsi/aic94xx/aic94xx_task.c
398
res = asd_map_scatterlist(task, scb->ata_task.sg_element,
drivers/scsi/aic94xx/aic94xx_task.c
40
struct asd_ascb *ascb = task->lldd_task;
drivers/scsi/aic94xx/aic94xx_task.c
411
static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
drivers/scsi/aic94xx/aic94xx_task.c
415
struct domain_device *dev = task->dev;
drivers/scsi/aic94xx/aic94xx_task.c
418
dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
drivers/scsi/aic94xx/aic94xx_task.c
420
dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
drivers/scsi/aic94xx/aic94xx_task.c
430
cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
drivers/scsi/aic94xx/aic94xx_task.c
432
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
drivers/scsi/aic94xx/aic94xx_task.c
435
cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
drivers/scsi/aic94xx/aic94xx_task.c
437
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
drivers/scsi/aic94xx/aic94xx_task.c
45
if (task->data_dir == DMA_NONE)
drivers/scsi/aic94xx/aic94xx_task.c
450
struct sas_task *task = a->uldd_task;
drivers/scsi/aic94xx/aic94xx_task.c
452
BUG_ON(!task);
drivers/scsi/aic94xx/aic94xx_task.c
453
dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
drivers/scsi/aic94xx/aic94xx_task.c
455
dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
drivers/scsi/aic94xx/aic94xx_task.c
461
static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
drivers/scsi/aic94xx/aic94xx_task.c
464
struct domain_device *dev = task->dev;
drivers/scsi/aic94xx/aic94xx_task.c
474
scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/aic94xx/aic94xx_task.c
48
if (task->num_scatter == 0) {
drivers/scsi/aic94xx/aic94xx_task.c
482
memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
drivers/scsi/aic94xx/aic94xx_task.c
483
scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
drivers/scsi/aic94xx/aic94xx_task.c
484
memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
drivers/scsi/aic94xx/aic94xx_task.c
485
task->ssp_task.cmd->cmd_len);
drivers/scsi/aic94xx/aic94xx_task.c
49
void *p = task->scatter;
drivers/scsi/aic94xx/aic94xx_task.c
490
scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
drivers/scsi/aic94xx/aic94xx_task.c
494
res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
drivers/scsi/aic94xx/aic94xx_task.c
51
task->total_xfer_len,
drivers/scsi/aic94xx/aic94xx_task.c
52
task->data_dir);
drivers/scsi/aic94xx/aic94xx_task.c
521
int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
drivers/scsi/aic94xx/aic94xx_task.c
525
struct sas_task *t = task;
drivers/scsi/aic94xx/aic94xx_task.c
527
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
drivers/scsi/aic94xx/aic94xx_task.c
57
sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
drivers/scsi/aic94xx/aic94xx_task.c
64
if (sas_protocol_ata(task->task_proto))
drivers/scsi/aic94xx/aic94xx_task.c
65
num_sg = task->num_scatter;
drivers/scsi/aic94xx/aic94xx_task.c
67
num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
drivers/scsi/aic94xx/aic94xx_task.c
68
task->num_scatter, task->data_dir);
drivers/scsi/aic94xx/aic94xx_task.c
82
for_each_sg(task->scatter, sc, num_sg, i) {
drivers/scsi/aic94xx/aic94xx_task.c
91
for_each_sg(task->scatter, sc, 2, i) {
drivers/scsi/aic94xx/aic94xx_tmf.c
214
static int asd_clear_nexus_tag(struct sas_task *task)
drivers/scsi/aic94xx/aic94xx_tmf.c
216
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
drivers/scsi/aic94xx/aic94xx_tmf.c
217
struct asd_ascb *tascb = task->lldd_task;
drivers/scsi/aic94xx/aic94xx_tmf.c
221
memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
drivers/scsi/aic94xx/aic94xx_tmf.c
223
if (task->dev->tproto)
drivers/scsi/aic94xx/aic94xx_tmf.c
225
task->dev->lldd_dev);
drivers/scsi/aic94xx/aic94xx_tmf.c
229
static int asd_clear_nexus_index(struct sas_task *task)
drivers/scsi/aic94xx/aic94xx_tmf.c
231
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
drivers/scsi/aic94xx/aic94xx_tmf.c
232
struct asd_ascb *tascb = task->lldd_task;
drivers/scsi/aic94xx/aic94xx_tmf.c
236
if (task->dev->tproto)
drivers/scsi/aic94xx/aic94xx_tmf.c
238
task->dev->lldd_dev);
drivers/scsi/aic94xx/aic94xx_tmf.c
324
static int asd_clear_nexus(struct sas_task *task)
drivers/scsi/aic94xx/aic94xx_tmf.c
328
struct asd_ascb *tascb = task->lldd_task;
drivers/scsi/aic94xx/aic94xx_tmf.c
336
res = asd_clear_nexus_tag(task);
drivers/scsi/aic94xx/aic94xx_tmf.c
338
res = asd_clear_nexus_index(task);
drivers/scsi/aic94xx/aic94xx_tmf.c
343
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
346
if (task->task_state_flags & SAS_TASK_STATE_DONE)
drivers/scsi/aic94xx/aic94xx_tmf.c
348
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
386
int asd_abort_task(struct sas_task *task)
drivers/scsi/aic94xx/aic94xx_tmf.c
388
struct asd_ascb *tascb = task->lldd_task;
drivers/scsi/aic94xx/aic94xx_tmf.c
401
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
402
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/aic94xx/aic94xx_tmf.c
403
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
405
ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
drivers/scsi/aic94xx/aic94xx_tmf.c
408
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
419
switch (task->task_proto) {
drivers/scsi/aic94xx/aic94xx_tmf.c
426
scb->abort_task.proto_conn_rate |= task->dev->linkrate;
drivers/scsi/aic94xx/aic94xx_tmf.c
434
if (task->task_proto == SAS_PROTOCOL_SSP) {
drivers/scsi/aic94xx/aic94xx_tmf.c
437
task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
drivers/scsi/aic94xx/aic94xx_tmf.c
439
task->dev->port->ha->hashed_sas_addr,
drivers/scsi/aic94xx/aic94xx_tmf.c
443
memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
drivers/scsi/aic94xx/aic94xx_tmf.c
450
(u16)(unsigned long)task->dev->lldd_dev);
drivers/scsi/aic94xx/aic94xx_tmf.c
465
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
466
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/aic94xx/aic94xx_tmf.c
467
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
469
ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
drivers/scsi/aic94xx/aic94xx_tmf.c
472
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
478
res = asd_clear_nexus(task);
drivers/scsi/aic94xx/aic94xx_tmf.c
492
res = asd_clear_nexus(task);
drivers/scsi/aic94xx/aic94xx_tmf.c
508
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
511
if (task->task_state_flags & SAS_TASK_STATE_DONE)
drivers/scsi/aic94xx/aic94xx_tmf.c
513
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/aic94xx/aic94xx_tmf.c
528
task->lldd_task = NULL;
drivers/scsi/aic94xx/aic94xx_tmf.c
532
ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
drivers/scsi/aic94xx/aic94xx_tmf.c
537
ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
drivers/scsi/aic94xx/aic94xx_tmf.c
675
int asd_query_task(struct sas_task *task)
drivers/scsi/aic94xx/aic94xx_tmf.c
677
struct asd_ascb *ascb = task->lldd_task;
drivers/scsi/aic94xx/aic94xx_tmf.c
682
return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
drivers/scsi/be2iscsi/be_iscsi.c
1036
beiscsi_conn->task);
drivers/scsi/be2iscsi/be_main.c
1118
struct iscsi_task *task,
drivers/scsi/be2iscsi/be_main.c
1121
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
1137
if (!task->sc) {
drivers/scsi/be2iscsi/be_main.c
1145
task->sc->result = (DID_OK << 16) | status;
drivers/scsi/be2iscsi/be_main.c
1147
task->sc->result = DID_ERROR << 16;
drivers/scsi/be2iscsi/be_main.c
1154
task->sc->result = DID_ERROR << 16;
drivers/scsi/be2iscsi/be_main.c
1157
scsi_set_resid(task->sc, resid);
drivers/scsi/be2iscsi/be_main.c
1158
if (!status && (scsi_bufflen(task->sc) - resid <
drivers/scsi/be2iscsi/be_main.c
1159
task->sc->underflow))
drivers/scsi/be2iscsi/be_main.c
1160
task->sc->result = DID_ERROR << 16;
drivers/scsi/be2iscsi/be_main.c
1170
memcpy(task->sc->sense_buffer, sense,
drivers/scsi/be2iscsi/be_main.c
1181
iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
drivers/scsi/be2iscsi/be_main.c
1186
struct iscsi_task *task,
drivers/scsi/be2iscsi/be_main.c
1190
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
1193
hdr = (struct iscsi_logout_rsp *)task->hdr;
drivers/scsi/be2iscsi/be_main.c
1213
struct iscsi_task *task,
drivers/scsi/be2iscsi/be_main.c
1218
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
1220
hdr = (struct iscsi_tm_rsp *)task->hdr;
drivers/scsi/be2iscsi/be_main.c
1241
struct iscsi_task *task;
drivers/scsi/be2iscsi/be_main.c
1261
task = pwrb_handle->pio_handle;
drivers/scsi/be2iscsi/be_main.c
1262
if (task)
drivers/scsi/be2iscsi/be_main.c
1263
__iscsi_put_task(task);
drivers/scsi/be2iscsi/be_main.c
1269
struct iscsi_task *task,
drivers/scsi/be2iscsi/be_main.c
1274
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
1276
hdr = (struct iscsi_nopin *)task->hdr;
drivers/scsi/be2iscsi/be_main.c
1350
struct iscsi_task *task;
drivers/scsi/be2iscsi/be_main.c
1366
task = pwrb_handle->pio_handle;
drivers/scsi/be2iscsi/be_main.c
1367
if (!task) {
drivers/scsi/be2iscsi/be_main.c
1371
type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
drivers/scsi/be2iscsi/be_main.c
1376
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
drivers/scsi/be2iscsi/be_main.c
1378
be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
drivers/scsi/be2iscsi/be_main.c
1380
be_complete_io(beiscsi_conn, task, &csol_cqe);
drivers/scsi/be2iscsi/be_main.c
1384
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
drivers/scsi/be2iscsi/be_main.c
1385
be_complete_logout(beiscsi_conn, task, &csol_cqe);
drivers/scsi/be2iscsi/be_main.c
1387
be_complete_tmf(beiscsi_conn, task, &csol_cqe);
drivers/scsi/be2iscsi/be_main.c
1398
be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
drivers/scsi/be2iscsi/be_main.c
1431
struct iscsi_task *task;
drivers/scsi/be2iscsi/be_main.c
1451
task = conn->login_task;
drivers/scsi/be2iscsi/be_main.c
1452
io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
221
struct iscsi_task *abrt_task = iscsi_cmd(sc)->task;
drivers/scsi/be2iscsi/be_main.c
2277
static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
drivers/scsi/be2iscsi/be_main.c
2280
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
2291
if (task->data) {
drivers/scsi/be2iscsi/be_main.c
2294
dsp_value = (task->data_count) ? 1 : 0;
drivers/scsi/be2iscsi/be_main.c
2306
task->data,
drivers/scsi/be2iscsi/be_main.c
2307
task->data_count,
drivers/scsi/be2iscsi/be_main.c
2312
io_task->mtask_data_count = task->data_count;
drivers/scsi/be2iscsi/be_main.c
2321
task->data_count);
drivers/scsi/be2iscsi/be_main.c
2337
if (task->data) {
drivers/scsi/be2iscsi/be_main.c
2347
if (task->data) {
drivers/scsi/be2iscsi/be_main.c
281
struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ];
drivers/scsi/be2iscsi/be_main.c
289
struct iscsi_task *task;
drivers/scsi/be2iscsi/be_main.c
317
task = conn->session->cmds[i];
drivers/scsi/be2iscsi/be_main.c
318
if (!task->sc)
drivers/scsi/be2iscsi/be_main.c
321
if (sc->device->lun != task->sc->device->lun)
drivers/scsi/be2iscsi/be_main.c
333
if (!iscsi_get_task(task)) {
drivers/scsi/be2iscsi/be_main.c
342
io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
354
inv_tbl->task[nents] = task;
drivers/scsi/be2iscsi/be_main.c
380
iscsi_put_task(inv_tbl->task[i]);
drivers/scsi/be2iscsi/be_main.c
4195
struct iscsi_task *task)
drivers/scsi/be2iscsi/be_main.c
4207
io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
4233
static void beiscsi_cleanup_task(struct iscsi_task *task)
drivers/scsi/be2iscsi/be_main.c
4235
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
4236
struct iscsi_conn *conn = task->conn;
drivers/scsi/be2iscsi/be_main.c
4252
task->hdr = NULL;
drivers/scsi/be2iscsi/be_main.c
4255
if (task->sc) {
drivers/scsi/be2iscsi/be_main.c
4274
beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
drivers/scsi/be2iscsi/be_main.c
4285
struct iscsi_task *task = beiscsi_conn->task;
drivers/scsi/be2iscsi/be_main.c
4286
struct iscsi_session *session = task->conn->session;
drivers/scsi/be2iscsi/be_main.c
4295
beiscsi_cleanup_task(task);
drivers/scsi/be2iscsi/be_main.c
4353
static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
drivers/scsi/be2iscsi/be_main.c
4355
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
4356
struct iscsi_conn *conn = task->conn;
drivers/scsi/be2iscsi/be_main.c
4371
io_task->libiscsi_itt = (itt_t)task->itt;
drivers/scsi/be2iscsi/be_main.c
4374
task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
drivers/scsi/be2iscsi/be_main.c
4375
task->hdr_max = sizeof(struct be_cmd_bhs);
drivers/scsi/be2iscsi/be_main.c
4379
if (task->sc) {
drivers/scsi/be2iscsi/be_main.c
4403
beiscsi_conn->task = task;
drivers/scsi/be2iscsi/be_main.c
4471
io_task->pwrb_handle->pio_handle = task;
drivers/scsi/be2iscsi/be_main.c
4495
static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
drivers/scsi/be2iscsi/be_main.c
4500
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
4501
struct iscsi_conn *conn = task->conn;
drivers/scsi/be2iscsi/be_main.c
4531
be32_to_cpu(task->cmdsn));
drivers/scsi/be2iscsi/be_main.c
4556
static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
drivers/scsi/be2iscsi/be_main.c
4561
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
4562
struct iscsi_conn *conn = task->conn;
drivers/scsi/be2iscsi/be_main.c
4591
be32_to_cpu(task->cmdsn));
drivers/scsi/be2iscsi/be_main.c
4617
static int beiscsi_mtask(struct iscsi_task *task)
drivers/scsi/be2iscsi/be_main.c
4619
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
4620
struct iscsi_conn *conn = task->conn;
drivers/scsi/be2iscsi/be_main.c
4634
be32_to_cpu(task->cmdsn));
drivers/scsi/be2iscsi/be_main.c
4640
task->data_count);
drivers/scsi/be2iscsi/be_main.c
4652
be32_to_cpu(task->cmdsn));
drivers/scsi/be2iscsi/be_main.c
4658
task->data_count);
drivers/scsi/be2iscsi/be_main.c
4671
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
drivers/scsi/be2iscsi/be_main.c
4675
ret = hwi_write_buffer(pwrb, task);
drivers/scsi/be2iscsi/be_main.c
4678
if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
drivers/scsi/be2iscsi/be_main.c
4695
ret = hwi_write_buffer(pwrb, task);
drivers/scsi/be2iscsi/be_main.c
4699
ret = hwi_write_buffer(pwrb, task);
drivers/scsi/be2iscsi/be_main.c
4703
ret = hwi_write_buffer(pwrb, task);
drivers/scsi/be2iscsi/be_main.c
4707
ret = hwi_write_buffer(pwrb, task);
drivers/scsi/be2iscsi/be_main.c
4713
task->hdr->opcode & ISCSI_OPCODE_MASK);
drivers/scsi/be2iscsi/be_main.c
4735
static int beiscsi_task_xmit(struct iscsi_task *task)
drivers/scsi/be2iscsi/be_main.c
4737
struct beiscsi_io_task *io_task = task->dd_data;
drivers/scsi/be2iscsi/be_main.c
4738
struct scsi_cmnd *sc = task->sc;
drivers/scsi/be2iscsi/be_main.c
4754
task->hdr->exp_statsn = 0;
drivers/scsi/be2iscsi/be_main.c
4757
return beiscsi_mtask(task);
drivers/scsi/be2iscsi/be_main.c
4784
return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
drivers/scsi/be2iscsi/be_main.h
211
struct iscsi_task *task;
drivers/scsi/be2iscsi/be_main.h
417
struct iscsi_task *task;
drivers/scsi/be2iscsi/be_main.h
795
struct iscsi_task *task);
drivers/scsi/bnx2fc/bnx2fc.h
435
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc.h
477
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc.h
536
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc.h
539
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc.h
543
struct fcoe_task_ctx_entry *task);
drivers/scsi/bnx2fc/bnx2fc.h
545
struct fcoe_task_ctx_entry *task);
drivers/scsi/bnx2fc/bnx2fc.h
555
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc.h
558
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc.h
561
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc.h
564
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc.h
567
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc.h
585
struct fcoe_task_ctx_entry *task);
drivers/scsi/bnx2fc/bnx2fc.h
596
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc_els.c
608
rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
drivers/scsi/bnx2fc/bnx2fc_els.c
648
srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
drivers/scsi/bnx2fc/bnx2fc_els.c
682
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc_els.c
776
task = &(task_page[index]);
drivers/scsi/bnx2fc/bnx2fc_els.c
777
bnx2fc_init_mp_task(els_req, task);
drivers/scsi/bnx2fc/bnx2fc_els.c
806
struct fcoe_task_ctx_entry *task, u8 num_rq)
drivers/scsi/bnx2fc/bnx2fc_els.c
840
&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
drivers/scsi/bnx2fc/bnx2fc_els.c
846
task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
drivers/scsi/bnx2fc/bnx2fc_els.c
95
rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2640
work->num_rq, work->task);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
662
work->task);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1017
task = &task_page[index];
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1019
num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1049
num_rq, task);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1059
rq_data_buff, num_rq, task);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1489
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1506
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1514
task->txwr_rxrd.const_ctx.tx_flags =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1518
task->txwr_rxrd.const_ctx.init_flags = task_type <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1520
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1522
task->rxwr_txrd.const_ctx.init_flags = context_id <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1524
task->rxwr_txrd.const_ctx.init_flags = context_id <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1527
task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1529
task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1530
task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1543
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1545
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1547
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1549
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1551
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1555
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1562
memset(&task->rxwr_only.rx_seq_ctx, 0,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1564
task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1565
task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1569
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1576
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1580
task->txwr_rxrd.const_ctx.init_flags = task_type <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1582
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1585
task->txwr_rxrd.const_ctx.init_flags |=
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1589
task->txwr_rxrd.const_ctx.init_flags |=
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1592
task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1595
task->txwr_rxrd.const_ctx.tx_flags =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1600
task->rxwr_txrd.const_ctx.init_flags = context_id <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1602
task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1607
struct fcoe_task_ctx_entry *task)
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1627
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1630
io_req->task = task;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1638
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1640
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1642
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1647
task->txwr_rxrd.const_ctx.init_flags = task_type <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1650
task->txwr_rxrd.const_ctx.init_flags |=
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1654
task->txwr_rxrd.const_ctx.init_flags |=
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1657
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1661
task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1665
task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1668
task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1672
task->rxwr_txrd.const_ctx.init_flags = context_id <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1679
task->rxwr_txrd.var_ctx.rx_id = 0xffff;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1685
hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1693
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1703
struct fcoe_task_ctx_entry *task)
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1719
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1722
io_req->task = task;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1731
cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1736
task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1739
task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1742
task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1746
task->txwr_rxrd.const_ctx.init_flags |= 1 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1749
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1751
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1753
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1760
task->txwr_rxrd.const_ctx.init_flags |= task_type <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1763
task->txwr_rxrd.const_ctx.init_flags |=
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1769
task->txwr_rxrd.const_ctx.init_flags |=
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1772
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1775
task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1779
task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1787
task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1798
task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1801
task->rxwr_txrd.const_ctx.init_flags = context_id <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1806
task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1809
task->rxwr_txrd.var_ctx.rx_id = 0xffff;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1815
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1826
task->txwr_rxrd.const_ctx.init_flags |= 1 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1841
task->txwr_rxrd.const_ctx.init_flags |= 1 <<
drivers/scsi/bnx2fc/bnx2fc_hwi.c
861
struct fcoe_task_ctx_entry *task)
drivers/scsi/bnx2fc/bnx2fc_hwi.c
886
rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
drivers/scsi/bnx2fc/bnx2fc_hwi.c
894
bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
drivers/scsi/bnx2fc/bnx2fc_hwi.c
901
bnx2fc_process_abts_compl(io_req, task, num_rq);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
904
bnx2fc_process_cleanup_compl(io_req, task, num_rq);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
912
bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
927
bnx2fc_process_els_compl(io_req, task, num_rq);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
929
bnx2fc_process_abts_compl(io_req, task, num_rq);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
932
bnx2fc_process_cleanup_compl(io_req, task, num_rq);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
946
bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
972
struct fcoe_task_ctx_entry *task)
drivers/scsi/bnx2fc/bnx2fc_hwi.c
983
work->task = task;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
996
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc_io.c
1029
task = &(task_page[index]);
drivers/scsi/bnx2fc/bnx2fc_io.c
1034
bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
drivers/scsi/bnx2fc/bnx2fc_io.c
1286
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc_io.c
1321
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc_io.c
1353
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc_io.c
1399
r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
drivers/scsi/bnx2fc/bnx2fc_io.c
1523
struct fcoe_task_ctx_entry *task, u8 num_rq,
drivers/scsi/bnx2fc/bnx2fc_io.c
1550
&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
drivers/scsi/bnx2fc/bnx2fc_io.c
1556
task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
drivers/scsi/bnx2fc/bnx2fc_io.c
1903
struct fcoe_task_ctx_entry *task,
drivers/scsi/bnx2fc/bnx2fc_io.c
1921
bnx2fc_process_cleanup_compl(io_req, task, num_rq);
drivers/scsi/bnx2fc/bnx2fc_io.c
1939
&(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
drivers/scsi/bnx2fc/bnx2fc_io.c
2024
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc_io.c
2070
task = &(task_page[index]);
drivers/scsi/bnx2fc/bnx2fc_io.c
2071
bnx2fc_init_task(io_req, task);
drivers/scsi/bnx2fc/bnx2fc_io.c
666
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc_io.c
759
task = &(task_page[index]);
drivers/scsi/bnx2fc/bnx2fc_io.c
760
bnx2fc_init_mp_task(io_req, task);
drivers/scsi/bnx2fc/bnx2fc_io.c
822
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc_io.c
878
fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
drivers/scsi/bnx2fc/bnx2fc_io.c
895
task = &(task_page[index]);
drivers/scsi/bnx2fc/bnx2fc_io.c
896
bnx2fc_init_mp_task(abts_io_req, task);
drivers/scsi/bnx2fc/bnx2fc_io.c
929
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc_io.c
971
task = &(task_page[index]);
drivers/scsi/bnx2fc/bnx2fc_io.c
979
bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
drivers/scsi/bnx2fc/bnx2fc_io.c
996
struct fcoe_task_ctx_entry *task;
drivers/scsi/bnx2fc/bnx2fc_tgt.c
199
bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
drivers/scsi/bnx2fc/bnx2fc_tgt.c
233
bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
drivers/scsi/bnx2i/bnx2i_hwi.c
1344
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1350
task = iscsi_itt_to_task(conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1352
if (!task)
drivers/scsi/bnx2i/bnx2i_hwi.c
1355
bnx2i_cmd = task->dd_data;
drivers/scsi/bnx2i/bnx2i_hwi.c
1382
hdr = (struct iscsi_scsi_rsp *)task->hdr;
drivers/scsi/bnx2i/bnx2i_hwi.c
1438
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1446
task = iscsi_itt_to_task(conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1448
if (!task)
drivers/scsi/bnx2i/bnx2i_hwi.c
1462
resp_hdr->itt = task->hdr->itt;
drivers/scsi/bnx2i/bnx2i_hwi.c
1506
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1514
task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
drivers/scsi/bnx2i/bnx2i_hwi.c
1515
if (!task)
drivers/scsi/bnx2i/bnx2i_hwi.c
1525
resp_hdr->itt = task->hdr->itt;
drivers/scsi/bnx2i/bnx2i_hwi.c
1527
resp_hdr->statsn = task->hdr->exp_statsn;
drivers/scsi/bnx2i/bnx2i_hwi.c
1567
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1573
task = iscsi_itt_to_task(conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1575
if (!task)
drivers/scsi/bnx2i/bnx2i_hwi.c
1583
resp_hdr->itt = task->hdr->itt;
drivers/scsi/bnx2i/bnx2i_hwi.c
1606
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1612
task = iscsi_itt_to_task(conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1614
if (!task)
drivers/scsi/bnx2i/bnx2i_hwi.c
1623
resp_hdr->itt = task->hdr->itt;
drivers/scsi/bnx2i/bnx2i_hwi.c
1624
resp_hdr->statsn = task->hdr->exp_statsn;
drivers/scsi/bnx2i/bnx2i_hwi.c
1653
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1657
task = iscsi_itt_to_task(conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1659
if (task)
drivers/scsi/bnx2i/bnx2i_hwi.c
1660
__iscsi_put_task(task);
drivers/scsi/bnx2i/bnx2i_hwi.c
1693
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1716
task = iscsi_itt_to_task(conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1718
if (task) {
drivers/scsi/bnx2i/bnx2i_hwi.c
1720
hdr->itt = task->hdr->itt;
drivers/scsi/bnx2i/bnx2i_hwi.c
1833
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1837
task = iscsi_itt_to_task(conn,
drivers/scsi/bnx2i/bnx2i_hwi.c
1839
if (!task)
drivers/scsi/bnx2i/bnx2i_hwi.c
1906
struct iscsi_task *task;
drivers/scsi/bnx2i/bnx2i_hwi.c
1911
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
drivers/scsi/bnx2i/bnx2i_hwi.c
1913
if (!task || !task->sc) {
drivers/scsi/bnx2i/bnx2i_hwi.c
1917
sc = task->sc;
drivers/scsi/bnx2i/bnx2i_hwi.c
330
struct iscsi_task *task)
drivers/scsi/bnx2i/bnx2i_hwi.c
336
login_hdr = (struct iscsi_login_req *)task->hdr;
drivers/scsi/bnx2i/bnx2i_hwi.c
348
login_wqe->itt = task->itt |
drivers/scsi/bnx2i/bnx2i_hwi.c
531
struct iscsi_task *task,
drivers/scsi/bnx2i/bnx2i_hwi.c
538
nopout_hdr = (struct iscsi_nopout *)task->hdr;
drivers/scsi/bnx2i/bnx2i_hwi.c
551
nopout_wqe->itt = ((u16)task->itt |
drivers/scsi/bnx2i/bnx2i_hwi.c
589
struct iscsi_task *task)
drivers/scsi/bnx2i/bnx2i_hwi.c
594
logout_hdr = (struct iscsi_logout *)task->hdr;
drivers/scsi/bnx2i/bnx2i_hwi.c
604
logout_wqe->itt = ((u16)task->itt |
drivers/scsi/bnx2i/bnx2i_iscsi.c
100
if (task->imm_count == cmd_len)
drivers/scsi/bnx2i/bnx2i_iscsi.c
103
if (iscsi_task_has_unsol_data(task)) {
drivers/scsi/bnx2i/bnx2i_iscsi.c
1079
static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
drivers/scsi/bnx2i/bnx2i_iscsi.c
108
buffer_offset += task->unsol_r2t.data_length;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1081
struct bnx2i_cmd *cmd = task->dd_data;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1088
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
drivers/scsi/bnx2i/bnx2i_iscsi.c
1090
bnx2i_send_iscsi_login(bnx2i_conn, task);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1096
rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
drivers/scsi/bnx2i/bnx2i_iscsi.c
1099
rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
drivers/scsi/bnx2i/bnx2i_iscsi.c
1103
rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1106
rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1109
rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1114
task->hdr->opcode);
drivers/scsi/bnx2i/bnx2i_iscsi.c
114
if ((start_bd_offset > task->conn->session->first_burst) ||
drivers/scsi/bnx2i/bnx2i_iscsi.c
1156
static void bnx2i_cleanup_task(struct iscsi_task *task)
drivers/scsi/bnx2i/bnx2i_iscsi.c
1158
struct iscsi_conn *conn = task->conn;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1165
if (!task->sc || task->state == ISCSI_TASK_PENDING)
drivers/scsi/bnx2i/bnx2i_iscsi.c
1170
if (task->state == ISCSI_TASK_ABRT_TMF) {
drivers/scsi/bnx2i/bnx2i_iscsi.c
1171
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1178
bnx2i_iscsi_unmap_sg_list(task->dd_data);
drivers/scsi/bnx2i/bnx2i_iscsi.c
118
iscsi_conn_printk(KERN_ALERT, task->conn,
drivers/scsi/bnx2i/bnx2i_iscsi.c
1187
bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
drivers/scsi/bnx2i/bnx2i_iscsi.c
1191
struct bnx2i_cmd *cmd = task->dd_data;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1196
bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1200
ADD_STATS_64(hba, tx_bytes, task->data_count);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1202
if (task->data_count) {
drivers/scsi/bnx2i/bnx2i_iscsi.c
1203
memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
drivers/scsi/bnx2i/bnx2i_iscsi.c
1204
task->data_count);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1206
bnx2i_conn->gen_pdu.req_buf + task->data_count;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1210
return bnx2i_iscsi_send_generic_request(task);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1219
static int bnx2i_task_xmit(struct iscsi_task *task)
drivers/scsi/bnx2i/bnx2i_iscsi.c
1221
struct iscsi_conn *conn = task->conn;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1226
struct scsi_cmnd *sc = task->sc;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1227
struct bnx2i_cmd *cmd = task->dd_data;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1228
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
drivers/scsi/bnx2i/bnx2i_iscsi.c
1238
return bnx2i_mtask_xmit(conn, task);
drivers/scsi/bnx2i/bnx2i_iscsi.c
124
iscsi_conn_printk(KERN_ALERT, task->conn,
drivers/scsi/bnx2i/bnx2i_iscsi.c
1253
cmd->req.itt = task->itt |
drivers/scsi/bnx2i/bnx2i_iscsi.c
1255
bnx2i_setup_write_cmd_bd_info(task);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1259
cmd->req.itt = task->itt |
drivers/scsi/bnx2i/bnx2i_iscsi.c
473
struct iscsi_task *task = session->cmds[i];
drivers/scsi/bnx2i/bnx2i_iscsi.c
474
struct bnx2i_cmd *cmd = task->dd_data;
drivers/scsi/bnx2i/bnx2i_iscsi.c
498
struct iscsi_task *task = session->cmds[i];
drivers/scsi/bnx2i/bnx2i_iscsi.c
499
struct bnx2i_cmd *cmd = task->dd_data;
drivers/scsi/bnx2i/bnx2i_iscsi.c
501
task->hdr = &cmd->hdr;
drivers/scsi/bnx2i/bnx2i_iscsi.c
502
task->hdr_max = sizeof(struct iscsi_hdr);
drivers/scsi/bnx2i/bnx2i_iscsi.c
84
static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
drivers/scsi/bnx2i/bnx2i_iscsi.c
86
struct bnx2i_cmd *cmd = task->dd_data;
drivers/scsi/bnx2i/bnx2i_iscsi.c
95
if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
drivers/scsi/bnx2i/bnx2i_iscsi.c
99
buffer_offset += task->imm_count;
drivers/scsi/cxgbi/libcxgbi.c
1351
static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
drivers/scsi/cxgbi/libcxgbi.c
1353
struct scsi_cmnd *sc = task->sc;
drivers/scsi/cxgbi/libcxgbi.c
1354
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
1362
cdev, task, tag);
drivers/scsi/cxgbi/libcxgbi.c
1365
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
1382
static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
drivers/scsi/cxgbi/libcxgbi.c
1384
struct scsi_cmnd *sc = task->sc;
drivers/scsi/cxgbi/libcxgbi.c
1385
struct iscsi_conn *conn = task->conn;
drivers/scsi/cxgbi/libcxgbi.c
1391
u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age);
drivers/scsi/cxgbi/libcxgbi.c
1396
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
1407
cconn->cep->csk, task, tdata->dlen,
drivers/scsi/cxgbi/libcxgbi.c
1421
cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
drivers/scsi/cxgbi/libcxgbi.c
1540
struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
drivers/scsi/cxgbi/libcxgbi.c
1543
if (task && task->sc) {
drivers/scsi/cxgbi/libcxgbi.c
1544
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
1812
static void cxgbi_task_data_sgl_check(struct iscsi_task *task)
drivers/scsi/cxgbi/libcxgbi.c
1814
struct scsi_cmnd *sc = task->sc;
drivers/scsi/cxgbi/libcxgbi.c
1815
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
1839
cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count,
drivers/scsi/cxgbi/libcxgbi.c
1842
struct scsi_cmnd *sc = task->sc;
drivers/scsi/cxgbi/libcxgbi.c
1843
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
1885
int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op)
drivers/scsi/cxgbi/libcxgbi.c
1887
struct iscsi_conn *conn = task->conn;
drivers/scsi/cxgbi/libcxgbi.c
1888
struct iscsi_session *session = task->conn->session;
drivers/scsi/cxgbi/libcxgbi.c
1893
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
1894
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
1895
struct scsi_cmnd *sc = task->sc;
drivers/scsi/cxgbi/libcxgbi.c
1904
task, tcp_task, tdata);
drivers/scsi/cxgbi/libcxgbi.c
1908
pr_err("task 0x%p, csk gone.\n", task);
drivers/scsi/cxgbi/libcxgbi.c
1915
task->hdr = NULL;
drivers/scsi/cxgbi/libcxgbi.c
1930
if (task->state == ISCSI_TASK_PENDING)
drivers/scsi/cxgbi/libcxgbi.c
1934
cxgbi_task_data_sgl_check(task);
drivers/scsi/cxgbi/libcxgbi.c
1983
err = cxgbi_task_data_sgl_read(task,
drivers/scsi/cxgbi/libcxgbi.c
1990
task, tcp_task, tdata, err, count, dlimit);
drivers/scsi/cxgbi/libcxgbi.c
2006
task, tcp_task, tdata, err);
drivers/scsi/cxgbi/libcxgbi.c
2027
if (task->sc) {
drivers/scsi/cxgbi/libcxgbi.c
2028
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
drivers/scsi/cxgbi/libcxgbi.c
2030
task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
drivers/scsi/cxgbi/libcxgbi.c
2031
if (!task->hdr) {
drivers/scsi/cxgbi/libcxgbi.c
2038
task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX;
drivers/scsi/cxgbi/libcxgbi.c
2045
task_reserve_itt(task, &task->hdr->itt);
drivers/scsi/cxgbi/libcxgbi.c
2049
task, op, tdata->skb, cdev->skb_tx_rsvd, headroom,
drivers/scsi/cxgbi/libcxgbi.c
2050
conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt));
drivers/scsi/cxgbi/libcxgbi.c
2061
cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb,
drivers/scsi/cxgbi/libcxgbi.c
2066
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
2067
struct iscsi_conn *conn = task->conn;
drivers/scsi/cxgbi/libcxgbi.c
2069
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
2080
if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) {
drivers/scsi/cxgbi/libcxgbi.c
2085
dlength = ntoh24(task->hdr->dlength);
drivers/scsi/cxgbi/libcxgbi.c
2087
hton24(task->hdr->dlength, dlength);
drivers/scsi/cxgbi/libcxgbi.c
2091
if (iscsi_task_has_unsol_data(task))
drivers/scsi/cxgbi/libcxgbi.c
2092
r2t = &task->unsol_r2t;
drivers/scsi/cxgbi/libcxgbi.c
2100
count, tdata->count, num_pdu, task->hdr_len,
drivers/scsi/cxgbi/libcxgbi.c
2111
if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL)
drivers/scsi/cxgbi/libcxgbi.c
2114
task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
drivers/scsi/cxgbi/libcxgbi.c
2116
iso_info->op = task->hdr->opcode;
drivers/scsi/cxgbi/libcxgbi.c
2117
iso_info->ahs = task->hdr->hlength;
drivers/scsi/cxgbi/libcxgbi.c
2121
iso_info->len = count + task->hdr_len;
drivers/scsi/cxgbi/libcxgbi.c
2124
cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len;
drivers/scsi/cxgbi/libcxgbi.c
2144
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
drivers/scsi/cxgbi/libcxgbi.c
2147
struct iscsi_conn *conn = task->conn;
drivers/scsi/cxgbi/libcxgbi.c
2148
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
2149
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
2151
struct scsi_cmnd *sc = task->sc;
drivers/scsi/cxgbi/libcxgbi.c
2160
task, task->sc, tcp_task,
drivers/scsi/cxgbi/libcxgbi.c
2168
task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
drivers/scsi/cxgbi/libcxgbi.c
2169
be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count);
drivers/scsi/cxgbi/libcxgbi.c
2171
skb_put(skb, task->hdr_len);
drivers/scsi/cxgbi/libcxgbi.c
2194
err = cxgbi_task_data_sgl_read(task, offset, count, &dlimit);
drivers/scsi/cxgbi/libcxgbi.c
2197
"dlimit %u, sgl err %d.\n", task, task->sc,
drivers/scsi/cxgbi/libcxgbi.c
2216
char *dst = skb->data + task->hdr_len;
drivers/scsi/cxgbi/libcxgbi.c
2244
pg = virt_to_head_page(task->data);
drivers/scsi/cxgbi/libcxgbi.c
2247
task->data - (char *)page_address(pg),
drivers/scsi/cxgbi/libcxgbi.c
2265
cxgbi_prep_iso_info(task, skb, count);
drivers/scsi/cxgbi/libcxgbi.c
2361
int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
drivers/scsi/cxgbi/libcxgbi.c
2363
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
2365
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
2366
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
2376
task, task->sc, tcp_task,
drivers/scsi/cxgbi/libcxgbi.c
2384
"task 0x%p, skb NULL.\n", task);
drivers/scsi/cxgbi/libcxgbi.c
2393
"task 0x%p, csk gone.\n", task);
drivers/scsi/cxgbi/libcxgbi.c
2407
task);
drivers/scsi/cxgbi/libcxgbi.c
2411
if (!task->sc)
drivers/scsi/cxgbi/libcxgbi.c
2412
memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
drivers/scsi/cxgbi/libcxgbi.c
2419
task, task->sc, err);
drivers/scsi/cxgbi/libcxgbi.c
2421
if (task->conn->hdrdgst_en)
drivers/scsi/cxgbi/libcxgbi.c
2424
if (datalen && task->conn->datadgst_en)
drivers/scsi/cxgbi/libcxgbi.c
2427
task->conn->txdata_octets += pdulen;
drivers/scsi/cxgbi/libcxgbi.c
2444
task, skb, skb->len, skb->data_len, err);
drivers/scsi/cxgbi/libcxgbi.c
2462
task->itt, skb, skb->len, skb->data_len, err);
drivers/scsi/cxgbi/libcxgbi.c
2464
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
drivers/scsi/cxgbi/libcxgbi.c
2465
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
drivers/scsi/cxgbi/libcxgbi.c
2470
void cxgbi_cleanup_task(struct iscsi_task *task)
drivers/scsi/cxgbi/libcxgbi.c
2472
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/cxgbi/libcxgbi.c
2473
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
drivers/scsi/cxgbi/libcxgbi.c
2477
task, task->sc, tcp_task,
drivers/scsi/cxgbi/libcxgbi.c
2484
task, tdata->skb, task->hdr_itt);
drivers/scsi/cxgbi/libcxgbi.c
2488
if (!task->sc)
drivers/scsi/cxgbi/libcxgbi.c
2489
kfree(task->hdr);
drivers/scsi/cxgbi/libcxgbi.c
2490
task->hdr = NULL;
drivers/scsi/cxgbi/libcxgbi.c
2498
task_release_itt(task, task->hdr_itt);
drivers/scsi/cxgbi/libcxgbi.c
2501
iscsi_tcp_cleanup_task(task);
drivers/scsi/cxgbi/libcxgbi.h
555
#define iscsi_task_cxgbi_data(task) \
drivers/scsi/cxgbi/libcxgbi.h
556
((task)->dd_data + sizeof(struct iscsi_tcp_task))
drivers/scsi/cxgbi/libcxgbi.h
605
void cxgbi_cleanup_task(struct iscsi_task *task);
drivers/scsi/esas2r/esas2r.h
696
u8 task;
drivers/scsi/esas2r/esas2r_flash.c
1453
fc->task = FMTSK_ERASE_BOOT;
drivers/scsi/esas2r/esas2r_flash.c
381
switch (fc->task) {
drivers/scsi/esas2r/esas2r_flash.c
388
fc->task = FMTSK_WRTBIOS;
drivers/scsi/esas2r/esas2r_flash.c
402
fc->task = FMTSK_READBIOS;
drivers/scsi/esas2r/esas2r_flash.c
425
fc->task = FMTSK_WRTMAC;
drivers/scsi/esas2r/esas2r_flash.c
437
fc->task = FMTSK_READMAC;
drivers/scsi/esas2r/esas2r_flash.c
460
fc->task = FMTSK_WRTEFI;
drivers/scsi/esas2r/esas2r_flash.c
473
fc->task = FMTSK_READEFI;
drivers/scsi/esas2r/esas2r_flash.c
496
fc->task = FMTSK_WRTCFG;
drivers/scsi/esas2r/esas2r_flash.c
507
fc->task = FMTSK_READCFG;
drivers/scsi/hisi_sas/hisi_sas.h
247
struct sas_task *task;
drivers/scsi/hisi_sas/hisi_sas.h
616
struct ssp_command_iu task;
drivers/scsi/hisi_sas/hisi_sas.h
657
extern u8 hisi_sas_get_ata_protocol(struct sas_task *task);
drivers/scsi/hisi_sas/hisi_sas.h
659
extern void hisi_sas_sata_done(struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas.h
677
struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_main.c
1132
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_main.c
1135
if (task) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1139
ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_main.c
1143
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_main.c
1144
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/hisi_sas/hisi_sas_main.c
1145
if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
drivers/scsi/hisi_sas/hisi_sas_main.c
1146
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/hisi_sas/hisi_sas_main.c
1147
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_main.c
1150
hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock);
drivers/scsi/hisi_sas/hisi_sas_main.c
1161
hisi_sas_do_release_task(hisi_hba, slot->task, slot, false);
drivers/scsi/hisi_sas/hisi_sas_main.c
134
void hisi_sas_sata_done(struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_main.c
137
struct task_status_struct *ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_main.c
1685
static int hisi_sas_abort_task(struct sas_task *task)
drivers/scsi/hisi_sas/hisi_sas_main.c
1688
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_main.c
1690
struct hisi_sas_slot *slot = task->lldd_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
1699
hisi_hba = dev_to_hisi_hba(task->dev);
drivers/scsi/hisi_sas/hisi_sas_main.c
1702
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_main.c
1703
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1714
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_main.c
1718
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
drivers/scsi/hisi_sas/hisi_sas_main.c
1719
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_main.c
1724
if (task->task_proto & SAS_PROTOCOL_SSP) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1728
rc = sas_abort_task(task, tag);
drivers/scsi/hisi_sas/hisi_sas_main.c
1744
if (task->lldd_task)
drivers/scsi/hisi_sas/hisi_sas_main.c
1745
hisi_sas_do_release_task(hisi_hba, task, slot, true);
drivers/scsi/hisi_sas/hisi_sas_main.c
1747
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
drivers/scsi/hisi_sas/hisi_sas_main.c
1748
task->task_proto & SAS_PROTOCOL_STP) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1749
if (task->dev->dev_type == SAS_SATA_DEV) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1750
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
1765
hisi_sas_do_release_task(hisi_hba, task, slot, true);
drivers/scsi/hisi_sas/hisi_sas_main.c
1771
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1780
task->lldd_task) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1786
slot->task = NULL;
drivers/scsi/hisi_sas/hisi_sas_main.c
1988
static int hisi_sas_query_task(struct sas_task *task)
drivers/scsi/hisi_sas/hisi_sas_main.c
1992
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
drivers/scsi/hisi_sas/hisi_sas_main.c
1993
struct hisi_sas_slot *slot = task->lldd_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
1996
rc = sas_query_task(task, tag);
drivers/scsi/hisi_sas/hisi_sas_main.c
2012
static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_main.c
2015
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_main.c
2033
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/hisi_sas/hisi_sas_main.c
2037
struct hisi_sas_slot *slot = task->lldd_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
2049
slot->task = NULL;
drivers/scsi/hisi_sas/hisi_sas_main.c
239
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_main.c
245
if (task) {
drivers/scsi/hisi_sas/hisi_sas_main.c
248
if (!task->lldd_task)
drivers/scsi/hisi_sas/hisi_sas_main.c
251
task->lldd_task = NULL;
drivers/scsi/hisi_sas/hisi_sas_main.c
253
if (!sas_protocol_ata(task->task_proto)) {
drivers/scsi/hisi_sas/hisi_sas_main.c
255
if (task->task_proto & SAS_PROTOCOL_SSP)
drivers/scsi/hisi_sas/hisi_sas_main.c
256
dma_unmap_sg(dev, task->scatter,
drivers/scsi/hisi_sas/hisi_sas_main.c
257
task->num_scatter,
drivers/scsi/hisi_sas/hisi_sas_main.c
258
task->data_dir);
drivers/scsi/hisi_sas/hisi_sas_main.c
260
dma_unmap_sg(dev, &task->smp_task.smp_req,
drivers/scsi/hisi_sas/hisi_sas_main.c
264
struct sas_ssp_task *ssp_task = &task->ssp_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
269
task->data_dir);
drivers/scsi/hisi_sas/hisi_sas_main.c
313
struct sas_task *task, int n_elem)
drivers/scsi/hisi_sas/hisi_sas_main.c
317
if (!sas_protocol_ata(task->task_proto) && n_elem) {
drivers/scsi/hisi_sas/hisi_sas_main.c
318
if (task->num_scatter) {
drivers/scsi/hisi_sas/hisi_sas_main.c
319
dma_unmap_sg(dev, task->scatter, task->num_scatter,
drivers/scsi/hisi_sas/hisi_sas_main.c
320
task->data_dir);
drivers/scsi/hisi_sas/hisi_sas_main.c
321
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
drivers/scsi/hisi_sas/hisi_sas_main.c
322
dma_unmap_sg(dev, &task->smp_task.smp_req,
drivers/scsi/hisi_sas/hisi_sas_main.c
329
struct sas_task *task, int *n_elem)
drivers/scsi/hisi_sas/hisi_sas_main.c
334
if (sas_protocol_ata(task->task_proto)) {
drivers/scsi/hisi_sas/hisi_sas_main.c
335
*n_elem = task->num_scatter;
drivers/scsi/hisi_sas/hisi_sas_main.c
339
if (task->num_scatter) {
drivers/scsi/hisi_sas/hisi_sas_main.c
340
*n_elem = dma_map_sg(dev, task->scatter,
drivers/scsi/hisi_sas/hisi_sas_main.c
341
task->num_scatter, task->data_dir);
drivers/scsi/hisi_sas/hisi_sas_main.c
346
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
drivers/scsi/hisi_sas/hisi_sas_main.c
347
*n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
drivers/scsi/hisi_sas/hisi_sas_main.c
353
req_len = sg_dma_len(&task->smp_task.smp_req);
drivers/scsi/hisi_sas/hisi_sas_main.c
371
hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
drivers/scsi/hisi_sas/hisi_sas_main.c
377
struct sas_task *task, int n_elem_dif)
drivers/scsi/hisi_sas/hisi_sas_main.c
382
struct sas_ssp_task *ssp_task = &task->ssp_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
387
task->data_dir);
drivers/scsi/hisi_sas/hisi_sas_main.c
392
int *n_elem_dif, struct sas_task *task)
drivers/scsi/hisi_sas/hisi_sas_main.c
399
if (task->num_scatter) {
drivers/scsi/hisi_sas/hisi_sas_main.c
400
ssp_task = &task->ssp_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
407
task->data_dir);
drivers/scsi/hisi_sas/hisi_sas_main.c
425
scsi_prot_sg_count(scsi_cmnd), task->data_dir);
drivers/scsi/hisi_sas/hisi_sas_main.c
437
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_main.c
458
task->lldd_task = slot;
drivers/scsi/hisi_sas/hisi_sas_main.c
465
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_main.c
493
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
drivers/scsi/hisi_sas/hisi_sas_main.c
496
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_main.c
499
bool internal_abort = sas_is_internal_abort(task);
drivers/scsi/hisi_sas/hisi_sas_main.c
509
struct task_status_struct *ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_main.c
518
task->task_done(task);
drivers/scsi/hisi_sas/hisi_sas_main.c
525
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_main.c
54
u8 hisi_sas_get_ata_protocol(struct sas_task *task)
drivers/scsi/hisi_sas/hisi_sas_main.c
559
rq = sas_task_find_rq(task);
drivers/scsi/hisi_sas/hisi_sas_main.c
56
struct host_to_dev_fis *fis = &task->ata_task.fis;
drivers/scsi/hisi_sas/hisi_sas_main.c
57
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
58
int direction = task->data_dir;
drivers/scsi/hisi_sas/hisi_sas_main.c
599
dq = &hisi_hba->dq[task->abort_task.qid];
drivers/scsi/hisi_sas/hisi_sas_main.c
603
task->task_proto);
drivers/scsi/hisi_sas/hisi_sas_main.c
607
rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
drivers/scsi/hisi_sas/hisi_sas_main.c
611
if (!sas_protocol_ata(task->task_proto)) {
drivers/scsi/hisi_sas/hisi_sas_main.c
612
rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
drivers/scsi/hisi_sas/hisi_sas_main.c
628
slot->task = task;
drivers/scsi/hisi_sas/hisi_sas_main.c
631
slot->tmf = task->tmf;
drivers/scsi/hisi_sas/hisi_sas_main.c
632
slot->is_internal = !!task->tmf || internal_abort;
drivers/scsi/hisi_sas/hisi_sas_main.c
640
if (!sas_protocol_ata(task->task_proto))
drivers/scsi/hisi_sas/hisi_sas_main.c
641
hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
drivers/scsi/hisi_sas/hisi_sas_main.c
643
hisi_sas_dma_unmap(hisi_hba, task, n_elem);
drivers/scsi/hisi_sas/hisi_sas_main.c
771
static void hisi_sas_tmf_aborted(struct sas_task *task)
drivers/scsi/hisi_sas/hisi_sas_main.c
773
struct hisi_sas_slot *slot = task->lldd_task;
drivers/scsi/hisi_sas/hisi_sas_main.c
774
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_main.c
786
slot->task = NULL;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1012
prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter,
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1015
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1023
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1025
buf_cmd[9] = task->ssp_task.task_attr;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1026
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1027
task->ssp_task.cmd->cmd_len);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1046
struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1049
struct task_status_struct *ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1054
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1176
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1190
if (unlikely(!task || !task->lldd_task || !task->dev))
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1193
ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1194
device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1197
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1198
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1199
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1200
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1254
slot_err_v1_hw(hisi_hba, task, slot);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1256
if (dev_is_sata(device) && task->ata_task.use_ncq)
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1259
sas_task_abort(task);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1266
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1274
sas_ssp_task_response(dev, task, iu);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1279
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1308
hisi_sas_slot_task_free(hisi_hba, task, slot, true);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1310
if (task->task_done)
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
1311
task->task_done(task);
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
916
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
918
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
926
sg_req = &task->smp_task.smp_req;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
954
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
956
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
959
struct sas_ssp_task *ssp_task = &task->ssp_task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1698
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1700
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1708
sg_req = &task->smp_task.smp_req;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1710
req_len = sg_dma_len(&task->smp_task.smp_req);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1737
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1739
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1742
struct sas_ssp_task *ssp_task = &task->ssp_task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1788
prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1791
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1798
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1800
buf_cmd[9] = task->ssp_task.task_attr;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1801
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
1802
task->ssp_task.cmd->cmd_len);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2015
struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2019
struct task_status_struct *ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2052
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2317
hisi_sas_sata_done(task, slot);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2328
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2342
if (unlikely(!task || !task->lldd_task || !task->dev))
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2345
ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2346
device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2350
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2351
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2352
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2398
slot_err_v2_hw(hisi_hba, task, slot, 1);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2400
slot_err_v2_hw(hisi_hba, task, slot, 2);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2404
slot->idx, task, sas_dev->device_id,
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2411
if (dev_is_sata(device) && task->ata_task.use_ncq)
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2414
sas_task_abort(task);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2421
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2429
sas_ssp_task_response(dev, task, iu);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2434
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2451
hisi_sas_sata_done(task, slot);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2466
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2467
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2468
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2469
dev_info(dev, "slot complete: task(%p) aborted\n", task);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2472
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2473
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2474
hisi_sas_slot_task_free(hisi_hba, task, slot, true);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2476
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2481
task);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2487
if (task->task_done)
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2488
task->task_done(task);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2494
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2495
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2500
struct sas_ata_task *ata_task = &task->ata_task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2527
switch (task->data_dir) {
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2540
if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2541
(task->ata_task.fis.control & ATA_SRST))
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2544
dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2549
if (task->ata_task.use_ncq) {
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2550
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2553
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2565
prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2568
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2574
if (likely(!task->ata_task.device_control_reg_update))
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2575
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2577
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2618
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2619
struct sas_internal_abort_task *abort = &task->abort_task;
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
2620
struct domain_device *dev = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1311
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1313
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1316
struct sas_ssp_task *ssp_task = &task->ssp_task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1362
prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1377
memcpy(buf_cmd, &task->ssp_task.LUN, LUN_SIZE);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1422
len = (task->total_xfer_len >> ilog2_interval) *
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1429
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1435
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1437
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1446
sg_req = &task->smp_task.smp_req;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1476
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1477
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1498
switch (task->data_dir) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1511
if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1512
(task->ata_task.fis.control & ATA_SRST))
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1515
dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1518
if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1524
if (task->ata_task.use_ncq) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1525
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1528
task->ata_task.fis.sector_count |=
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1541
prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1544
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1550
if (likely(!task->ata_task.device_control_reg_update))
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1551
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1553
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1559
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1560
struct sas_internal_abort_task *abort = &task->abort_task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
1561
struct domain_device *dev = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2262
slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2265
struct task_status_struct *ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2278
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2305
if (task->ata_task.use_ncq) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2306
struct domain_device *device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2326
hisi_sas_sata_done(task, slot);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2340
struct sas_task *task = slot->task;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2354
if (unlikely(!task || !task->lldd_task || !task->dev))
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2357
ts = &task->task_status;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2358
device = task->dev;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2362
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2363
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2364
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2408
if (slot_err_v3_hw(hisi_hba, task, slot)) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2411
slot->idx, task, sas_dev->device_id,
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2417
if (dev_is_sata(device) && task->ata_task.use_ncq)
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2420
sas_task_abort(task);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2428
switch (task->task_proto) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2434
sas_ssp_task_response(dev, task, iu);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2438
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2454
hisi_sas_sata_done(task, slot);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2468
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2469
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2470
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2471
dev_info(dev, "slot complete: task(%p) aborted\n", task);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2474
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2475
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2476
hisi_sas_slot_task_free(hisi_hba, task, slot, true);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2478
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2483
task);
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2489
if (task->task_done)
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2490
task->task_done(task);
drivers/scsi/isci/host.c
1078
void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
drivers/scsi/isci/host.c
1081
!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
drivers/scsi/isci/host.c
1086
__func__, ireq, task);
drivers/scsi/isci/host.c
1087
task->lldd_task = NULL;
drivers/scsi/isci/host.c
1088
task->task_done(task);
drivers/scsi/isci/host.c
1092
__func__, ireq, task);
drivers/scsi/isci/host.c
1093
if (sas_protocol_ata(task->task_proto))
drivers/scsi/isci/host.c
1094
task->lldd_task = NULL;
drivers/scsi/isci/host.c
1095
sas_task_abort(task);
drivers/scsi/isci/host.c
1098
task->lldd_task = NULL;
drivers/scsi/isci/host.h
478
void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
drivers/scsi/isci/remote_device.c
657
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/remote_device.c
671
if (task->ata_task.use_ncq)
drivers/scsi/isci/remote_device.c
681
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/remote_device.c
683
if (task->ata_task.use_ncq) {
drivers/scsi/isci/request.c
116
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
123
if (task->num_scatter > 0) {
drivers/scsi/isci/request.c
124
sg = task->scatter;
drivers/scsi/isci/request.c
1407
struct sas_task *task;
drivers/scsi/isci/request.c
1413
task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
1416
if (task->num_scatter > 0) {
drivers/scsi/isci/request.c
1417
sg = task->scatter;
drivers/scsi/isci/request.c
1431
BUG_ON(task->total_xfer_len < total_len);
drivers/scsi/isci/request.c
1432
memcpy(task->scatter, src_addr, total_len);
drivers/scsi/isci/request.c
154
task->scatter,
drivers/scsi/isci/request.c
155
task->total_xfer_len,
drivers/scsi/isci/request.c
156
task->data_dir);
drivers/scsi/isci/request.c
160
scu_sg->A.length = task->total_xfer_len;
drivers/scsi/isci/request.c
1613
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
1634
if (task->data_dir == DMA_NONE)
drivers/scsi/isci/request.c
1660
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
1667
if (task->data_dir == DMA_TO_DEVICE) {
drivers/scsi/isci/request.c
1680
memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
drivers/scsi/isci/request.c
1690
task_context->transfer_length_bytes = task->total_xfer_len;
drivers/scsi/isci/request.c
174
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
1763
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
1764
struct scatterlist *sg = &task->smp_task.smp_resp;
drivers/scsi/isci/request.c
178
memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
drivers/scsi/isci/request.c
184
cmd_iu->task_attr = task->ssp_task.task_attr;
drivers/scsi/isci/request.c
187
sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
drivers/scsi/isci/request.c
188
(task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
drivers/scsi/isci/request.c
1886
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
1930
if (task->data_dir == DMA_FROM_DEVICE) {
drivers/scsi/isci/request.c
1932
} else if (task->data_dir == DMA_TO_DEVICE) {
drivers/scsi/isci/request.c
194
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
201
memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
drivers/scsi/isci/request.c
2056
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
2060
if (task->data_dir == DMA_NONE) {
drivers/scsi/isci/request.c
2455
struct sas_task *task,
drivers/scsi/isci/request.c
2471
task->task_status.stat = resp_iu->status;
drivers/scsi/isci/request.c
2474
sas_ssp_task_response(dev, task, resp_iu);
drivers/scsi/isci/request.c
2491
struct sas_task *task,
drivers/scsi/isci/request.c
2500
task->task_status.open_rej_reason = open_rej_reason;
drivers/scsi/isci/request.c
2517
struct sas_task *task,
drivers/scsi/isci/request.c
2546
if (task->task_proto == SAS_PROTOCOL_SMP) {
drivers/scsi/isci/request.c
2607
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2617
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2624
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2631
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2638
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2645
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2652
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2659
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2666
request, task, response_ptr, status_ptr,
drivers/scsi/isci/request.c
2699
if (task->task_proto == SAS_PROTOCOL_SMP)
drivers/scsi/isci/request.c
2707
static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
drivers/scsi/isci/request.c
2709
struct task_status_struct *ts = &task->task_status;
drivers/scsi/isci/request.c
2729
struct sas_task *task = isci_request_access_task(request);
drivers/scsi/isci/request.c
2739
__func__, request, task, task->data_dir, completion_status);
drivers/scsi/isci/request.c
2749
__func__, request, task);
drivers/scsi/isci/request.c
2751
if (sas_protocol_ata(task->task_proto)) {
drivers/scsi/isci/request.c
2752
isci_process_stp_response(task, &request->stp.rsp);
drivers/scsi/isci/request.c
2753
} else if (SAS_PROTOCOL_SSP == task->task_proto) {
drivers/scsi/isci/request.c
2757
isci_request_process_response_iu(task, resp_iu,
drivers/scsi/isci/request.c
2760
} else if (SAS_PROTOCOL_SMP == task->task_proto) {
drivers/scsi/isci/request.c
2775
response = task->task_status.resp;
drivers/scsi/isci/request.c
2776
status = task->task_status.stat;
drivers/scsi/isci/request.c
2794
task->task_status.residual
drivers/scsi/isci/request.c
2795
= task->total_xfer_len - transferred_length;
drivers/scsi/isci/request.c
2800
if (task->task_status.residual != 0)
drivers/scsi/isci/request.c
2816
__func__, request, task);
drivers/scsi/isci/request.c
2835
task, &response,
drivers/scsi/isci/request.c
2847
spin_lock_irqsave(&task->task_state_lock, task_flags);
drivers/scsi/isci/request.c
2848
task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
drivers/scsi/isci/request.c
2849
spin_unlock_irqrestore(&task->task_state_lock, task_flags);
drivers/scsi/isci/request.c
2889
if (SAS_PROTOCOL_SMP == task->task_proto)
drivers/scsi/isci/request.c
2896
switch (task->task_proto) {
drivers/scsi/isci/request.c
2898
if (task->data_dir == DMA_NONE)
drivers/scsi/isci/request.c
2900
if (task->num_scatter == 0)
drivers/scsi/isci/request.c
2904
task->total_xfer_len, task->data_dir);
drivers/scsi/isci/request.c
2906
dma_unmap_sg(&ihost->pdev->dev, task->scatter,
drivers/scsi/isci/request.c
2907
task->num_scatter, task->data_dir);
drivers/scsi/isci/request.c
2910
struct scatterlist *sg = &task->smp_task.smp_req;
drivers/scsi/isci/request.c
2927
spin_lock_irqsave(&task->task_state_lock, task_flags);
drivers/scsi/isci/request.c
2929
task->task_status.resp = response;
drivers/scsi/isci/request.c
2930
task->task_status.stat = status;
drivers/scsi/isci/request.c
2934
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/isci/request.c
2935
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/isci/request.c
2937
spin_unlock_irqrestore(&task->task_state_lock, task_flags);
drivers/scsi/isci/request.c
2948
ireq_done(ihost, request, task);
drivers/scsi/isci/request.c
2956
struct sas_task *task;
drivers/scsi/isci/request.c
2961
task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
drivers/scsi/isci/request.c
2966
if (!task && dev->dev_type == SAS_END_DEVICE) {
drivers/scsi/isci/request.c
2968
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
drivers/scsi/isci/request.c
2970
} else if (task && sas_protocol_ata(task->task_proto) &&
drivers/scsi/isci/request.c
2971
!task->ata_task.use_ncq) {
drivers/scsi/isci/request.c
2973
task->ata_task.fis.command == ATA_CMD_PACKET) {
drivers/scsi/isci/request.c
2975
} else if (task->data_dir == DMA_NONE) {
drivers/scsi/isci/request.c
2977
} else if (task->ata_task.dma_xfer) {
drivers/scsi/isci/request.c
3143
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
3145
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/isci/request.c
3153
memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
drivers/scsi/isci/request.c
3154
if (!task->ata_task.device_control_reg_update)
drivers/scsi/isci/request.c
3175
struct sas_task *task)
drivers/scsi/isci/request.c
3177
struct scatterlist *sg = &task->smp_task.smp_req;
drivers/scsi/isci/request.c
3305
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
3309
status = sci_io_request_construct_smp(dev, ireq, task);
drivers/scsi/isci/request.c
3334
struct sas_task *task = isci_request_access_task(request);
drivers/scsi/isci/request.c
3342
task->num_scatter);
drivers/scsi/isci/request.c
3348
if (task->num_scatter &&
drivers/scsi/isci/request.c
3349
!sas_protocol_ata(task->task_proto) &&
drivers/scsi/isci/request.c
3350
!(SAS_PROTOCOL_SMP & task->task_proto)) {
drivers/scsi/isci/request.c
3354
task->scatter,
drivers/scsi/isci/request.c
3355
task->num_scatter,
drivers/scsi/isci/request.c
3356
task->data_dir
drivers/scsi/isci/request.c
3372
switch (task->task_proto) {
drivers/scsi/isci/request.c
3407
struct sas_task *task,
drivers/scsi/isci/request.c
3413
ireq->ttype_ptr.io_task_ptr = task;
drivers/scsi/isci/request.c
3415
task->lldd_task = ireq;
drivers/scsi/isci/request.c
3434
struct sas_task *task, struct isci_request *ireq)
drivers/scsi/isci/request.c
3453
if (isci_task_is_ncq_recovery(task)) {
drivers/scsi/isci/request.c
3499
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/isci/request.c
3500
task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
drivers/scsi/isci/request.c
3501
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/isci/request.c
3506
sas_task_abort(task);
drivers/scsi/isci/request.c
639
struct sas_task *task;
drivers/scsi/isci/request.c
650
task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
651
if (task->data_dir == DMA_NONE)
drivers/scsi/isci/request.c
652
task->total_xfer_len = 0;
drivers/scsi/isci/request.c
667
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
682
if (!sas_protocol_ata(task->task_proto)) {
drivers/scsi/isci/request.c
686
task->task_proto);
drivers/scsi/isci/request.c
693
task->ata_task.fis.command == ATA_CMD_PACKET) {
drivers/scsi/isci/request.c
699
if (task->data_dir == DMA_NONE) {
drivers/scsi/isci/request.c
705
if (task->ata_task.use_ncq) {
drivers/scsi/isci/request.c
713
if (task->ata_task.dma_xfer) {
drivers/scsi/isci/request.c
726
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
731
task->data_dir,
drivers/scsi/isci/request.c
732
task->total_xfer_len);
drivers/scsi/isci/request.c
756
struct sas_task *task = isci_request_access_task(ireq);
drivers/scsi/isci/request.c
760
copy = (task->data_dir == DMA_NONE) ? false : true;
drivers/scsi/isci/request.c
763
task->total_xfer_len,
drivers/scsi/isci/request.c
764
task->data_dir,
drivers/scsi/isci/request.h
294
struct sas_task *task, struct isci_request *ireq);
drivers/scsi/isci/request.h
296
struct sas_task *task,
drivers/scsi/isci/request.h
306
static inline int isci_task_is_ncq_recovery(struct sas_task *task)
drivers/scsi/isci/request.h
308
return (sas_protocol_ata(task->task_proto) &&
drivers/scsi/isci/request.h
309
task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
drivers/scsi/isci/request.h
310
task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
drivers/scsi/isci/task.c
101
#define for_each_sas_task(num, task) \
drivers/scsi/isci/task.c
103
task = list_entry(task->list.next, struct sas_task, list))
drivers/scsi/isci/task.c
107
struct sas_task *task)
drivers/scsi/isci/task.c
111
isci_task_is_ncq_recovery(task))
drivers/scsi/isci/task.c
123
int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
drivers/scsi/isci/task.c
125
struct isci_host *ihost = dev_to_ihost(task->dev);
drivers/scsi/isci/task.c
133
idev = isci_lookup_device(task->dev);
drivers/scsi/isci/task.c
134
io_ready = isci_device_io_ready(idev, task);
drivers/scsi/isci/task.c
140
task, task->dev, idev, idev ? idev->flags : 0,
drivers/scsi/isci/task.c
141
task->uldd_task);
drivers/scsi/isci/task.c
144
isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
drivers/scsi/isci/task.c
150
isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
drivers/scsi/isci/task.c
154
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
156
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
drivers/scsi/isci/task.c
158
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
160
isci_task_refuse(ihost, task,
drivers/scsi/isci/task.c
167
ireq = isci_io_request_from_tag(ihost, task, tag);
drivers/scsi/isci/task.c
168
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
172
status = isci_request_execute(ihost, idev, task, ireq);
drivers/scsi/isci/task.c
179
isci_task_refuse(ihost, task,
drivers/scsi/isci/task.c
191
isci_task_refuse(ihost, task,
drivers/scsi/isci/task.c
474
int isci_task_abort_task(struct sas_task *task)
drivers/scsi/isci/task.c
476
struct isci_host *ihost = dev_to_ihost(task->dev);
drivers/scsi/isci/task.c
491
spin_lock(&task->task_state_lock);
drivers/scsi/isci/task.c
493
old_request = task->lldd_task;
drivers/scsi/isci/task.c
496
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
drivers/scsi/isci/task.c
498
idev = isci_get_device(task->dev->lldd_dev);
drivers/scsi/isci/task.c
502
spin_unlock(&task->task_state_lock);
drivers/scsi/isci/task.c
508
(dev_is_sata(task->dev) ? "STP/SATA"
drivers/scsi/isci/task.c
509
: ((dev_is_expander(task->dev->dev_type))
drivers/scsi/isci/task.c
516
task, old_request);
drivers/scsi/isci/task.c
528
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
529
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/isci/task.c
530
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/isci/task.c
531
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
537
__func__, task);
drivers/scsi/isci/task.c
546
__func__, idev, old_request, task);
drivers/scsi/isci/task.c
552
if (task->task_proto == SAS_PROTOCOL_SMP ||
drivers/scsi/isci/task.c
553
sas_protocol_ata(task->task_proto) ||
drivers/scsi/isci/task.c
567
((task->task_proto == SAS_PROTOCOL_SMP)
drivers/scsi/isci/task.c
569
: (sas_protocol_ata(task->task_proto)
drivers/scsi/isci/task.c
577
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
578
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/isci/task.c
579
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/isci/task.c
580
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
598
__func__, idev, task, old_request);
drivers/scsi/isci/task.c
650
struct sas_task *task)
drivers/scsi/isci/task.c
653
if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
drivers/scsi/isci/task.c
76
static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
drivers/scsi/isci/task.c
85
__func__, task, response, status);
drivers/scsi/isci/task.c
87
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
89
task->task_status.resp = response;
drivers/scsi/isci/task.c
90
task->task_status.stat = status;
drivers/scsi/isci/task.c
93
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/isci/task.c
94
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/isci/task.c
95
task->lldd_task = NULL;
drivers/scsi/isci/task.c
96
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/isci/task.c
98
task->task_done(task);
drivers/scsi/isci/task.h
135
struct sas_task *task,
drivers/scsi/isci/task.h
139
struct sas_task *task);
drivers/scsi/isci/task.h
150
struct sas_task *task);
drivers/scsi/iscsi_tcp.c
402
static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
drivers/scsi/iscsi_tcp.c
404
struct iscsi_conn *conn = task->conn;
drivers/scsi/iscsi_tcp.c
538
static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
drivers/scsi/iscsi_tcp.c
541
struct iscsi_conn *conn = task->conn;
drivers/scsi/iscsi_tcp.c
544
iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
drivers/scsi/iscsi_tcp.c
549
if (!task->sc)
drivers/scsi/iscsi_tcp.c
550
iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
drivers/scsi/iscsi_tcp.c
552
struct scsi_data_buffer *sdb = &task->sc->sdb;
drivers/scsi/iscsi_tcp.c
566
static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
drivers/scsi/iscsi_tcp.c
568
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/iscsi_tcp.c
570
task->hdr = task->dd_data + sizeof(*tcp_task);
drivers/scsi/iscsi_tcp.c
571
task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
drivers/scsi/libiscsi.c
1019
struct iscsi_task *task;
drivers/scsi/libiscsi.c
1037
task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
drivers/scsi/libiscsi.c
1038
if (!task)
drivers/scsi/libiscsi.c
1042
WRITE_ONCE(conn->ping_task, task);
drivers/scsi/libiscsi.c
1044
if (iscsi_send_mgmt_task(task)) {
drivers/scsi/libiscsi.c
1047
iscsi_put_task(task);
drivers/scsi/libiscsi.c
1069
static int iscsi_nop_out_rsp(struct iscsi_task *task,
drivers/scsi/libiscsi.c
1072
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
1075
if (READ_ONCE(conn->ping_task) != task) {
drivers/scsi/libiscsi.c
1085
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
drivers/scsi/libiscsi.c
1145
struct iscsi_task *task;
drivers/scsi/libiscsi.c
1150
task = iscsi_itt_to_task(conn, rejected_pdu.itt);
drivers/scsi/libiscsi.c
1151
if (!task) {
drivers/scsi/libiscsi.c
1157
rc = iscsi_nop_out_rsp(task,
drivers/scsi/libiscsi.c
1217
struct iscsi_task *task;
drivers/scsi/libiscsi.c
1271
task = iscsi_itt_to_ctask(conn, hdr->itt);
drivers/scsi/libiscsi.c
1272
if (!task)
drivers/scsi/libiscsi.c
1274
task->last_xfer = jiffies;
drivers/scsi/libiscsi.c
1286
task = iscsi_itt_to_task(conn, hdr->itt);
drivers/scsi/libiscsi.c
1287
if (!task)
drivers/scsi/libiscsi.c
1296
iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
drivers/scsi/libiscsi.c
1299
iscsi_data_in_rsp(conn, hdr, task);
drivers/scsi/libiscsi.c
1325
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
drivers/scsi/libiscsi.c
1335
rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
drivers/scsi/libiscsi.c
1348
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
drivers/scsi/libiscsi.c
1408
struct iscsi_task *task;
drivers/scsi/libiscsi.c
1413
task = iscsi_itt_to_task(conn, itt);
drivers/scsi/libiscsi.c
1414
if (!task || !task->sc)
drivers/scsi/libiscsi.c
1417
if (iscsi_cmd(task->sc)->age != conn->session->age) {
drivers/scsi/libiscsi.c
1420
iscsi_cmd(task->sc)->age, conn->session->age);
drivers/scsi/libiscsi.c
1424
return task;
drivers/scsi/libiscsi.c
146
void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
drivers/scsi/libiscsi.c
149
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
1501
static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
drivers/scsi/libiscsi.c
1506
if (!conn->task) {
drivers/scsi/libiscsi.c
1513
if (!iscsi_get_task(task)) {
drivers/scsi/libiscsi.c
1519
conn->task = NULL;
drivers/scsi/libiscsi.c
152
task->hdr_len = sizeof(struct iscsi_data);
drivers/scsi/libiscsi.c
1527
iscsi_put_task(task);
drivers/scsi/libiscsi.c
1538
conn->task = task;
drivers/scsi/libiscsi.c
1543
rc = conn->session->tt->xmit_task(task);
drivers/scsi/libiscsi.c
1547
task->last_xfer = jiffies;
drivers/scsi/libiscsi.c
1553
iscsi_get_task(task);
drivers/scsi/libiscsi.c
1554
conn->task = task;
drivers/scsi/libiscsi.c
1557
iscsi_put_task(task);
drivers/scsi/libiscsi.c
1567
void iscsi_requeue_task(struct iscsi_task *task)
drivers/scsi/libiscsi.c
1569
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
1576
if (list_empty(&task->running)) {
drivers/scsi/libiscsi.c
1577
list_add_tail(&task->running, &conn->requeue);
drivers/scsi/libiscsi.c
1583
iscsi_put_task(task);
drivers/scsi/libiscsi.c
159
hdr->lun = task->lun;
drivers/scsi/libiscsi.c
160
hdr->itt = task->hdr_itt;
drivers/scsi/libiscsi.c
1601
struct iscsi_task *task;
drivers/scsi/libiscsi.c
1611
if (conn->task) {
drivers/scsi/libiscsi.c
1612
rc = iscsi_xmit_task(conn, conn->task, false);
drivers/scsi/libiscsi.c
1624
task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
drivers/scsi/libiscsi.c
1626
list_del_init(&task->running);
drivers/scsi/libiscsi.c
1627
if (iscsi_prep_mgmt_task(conn, task)) {
drivers/scsi/libiscsi.c
1630
__iscsi_put_task(task);
drivers/scsi/libiscsi.c
1634
rc = iscsi_xmit_task(conn, task, false);
drivers/scsi/libiscsi.c
1647
task = list_entry(conn->requeue.next, struct iscsi_task,
drivers/scsi/libiscsi.c
1650
if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
drivers/scsi/libiscsi.c
1653
list_del_init(&task->running);
drivers/scsi/libiscsi.c
1654
rc = iscsi_xmit_task(conn, task, true);
drivers/scsi/libiscsi.c
1663
task = list_entry(conn->cmdqueue.next, struct iscsi_task,
drivers/scsi/libiscsi.c
1665
list_del_init(&task->running);
drivers/scsi/libiscsi.c
1667
fail_scsi_task(task, DID_IMM_RETRY);
drivers/scsi/libiscsi.c
1670
rc = iscsi_prep_scsi_cmd_pdu(task);
drivers/scsi/libiscsi.c
1673
fail_scsi_task(task, DID_IMM_RETRY);
drivers/scsi/libiscsi.c
1675
fail_scsi_task(task, DID_ABORT);
drivers/scsi/libiscsi.c
1678
rc = iscsi_xmit_task(conn, task, false);
drivers/scsi/libiscsi.c
1716
struct iscsi_task *task;
drivers/scsi/libiscsi.c
1719
(void *) &task, sizeof(void *)))
drivers/scsi/libiscsi.c
1723
iscsi_cmd(sc)->task = task;
drivers/scsi/libiscsi.c
1725
refcount_set(&task->refcount, 1);
drivers/scsi/libiscsi.c
1726
task->state = ISCSI_TASK_PENDING;
drivers/scsi/libiscsi.c
1727
task->conn = conn;
drivers/scsi/libiscsi.c
1728
task->sc = sc;
drivers/scsi/libiscsi.c
1729
task->have_checked_conn = false;
drivers/scsi/libiscsi.c
1730
task->last_timeout = jiffies;
drivers/scsi/libiscsi.c
1731
task->last_xfer = jiffies;
drivers/scsi/libiscsi.c
1732
task->protected = false;
drivers/scsi/libiscsi.c
1733
INIT_LIST_HEAD(&task->running);
drivers/scsi/libiscsi.c
1734
return task;
drivers/scsi/libiscsi.c
1758
struct iscsi_task *task = NULL;
drivers/scsi/libiscsi.c
176
static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
drivers/scsi/libiscsi.c
1761
iscsi_cmd(sc)->task = NULL;
drivers/scsi/libiscsi.c
178
unsigned exp_len = task->hdr_len + len;
drivers/scsi/libiscsi.c
180
if (exp_len > task->hdr_max) {
drivers/scsi/libiscsi.c
1835
task = iscsi_alloc_task(conn, sc);
drivers/scsi/libiscsi.c
1836
if (!task) {
drivers/scsi/libiscsi.c
1842
reason = iscsi_prep_scsi_cmd_pdu(task);
drivers/scsi/libiscsi.c
1852
if (session->tt->xmit_task(task)) {
drivers/scsi/libiscsi.c
1858
list_add_tail(&task->running, &conn->cmdqueue);
drivers/scsi/libiscsi.c
186
task->hdr_len = exp_len;
drivers/scsi/libiscsi.c
1868
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
drivers/scsi/libiscsi.c
1878
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
drivers/scsi/libiscsi.c
193
static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
drivers/scsi/libiscsi.c
195
struct scsi_cmnd *cmd = task->sc;
drivers/scsi/libiscsi.c
1967
struct iscsi_task *task;
drivers/scsi/libiscsi.c
1973
task = session->cmds[i];
drivers/scsi/libiscsi.c
1974
if (!task->sc || task->state == ISCSI_TASK_FREE)
drivers/scsi/libiscsi.c
1977
if (lun != -1 && lun != task->sc->device->lun)
drivers/scsi/libiscsi.c
1984
if (!iscsi_get_task(task)) {
drivers/scsi/libiscsi.c
1994
task->sc, task->itt, task->state);
drivers/scsi/libiscsi.c
1995
__fail_scsi_task(task, error);
drivers/scsi/libiscsi.c
1996
__iscsi_put_task(task);
drivers/scsi/libiscsi.c
201
ecdb_ahdr = iscsi_next_hdr(task);
drivers/scsi/libiscsi.c
2079
struct iscsi_task *task = NULL, *running_task;
drivers/scsi/libiscsi.c
209
rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
drivers/scsi/libiscsi.c
2092
task = iscsi_cmd(sc)->task;
drivers/scsi/libiscsi.c
2093
if (!task) {
drivers/scsi/libiscsi.c
2102
if (!iscsi_get_task(task)) {
drivers/scsi/libiscsi.c
2108
task = NULL;
drivers/scsi/libiscsi.c
2150
if (time_after(task->last_xfer, task->last_timeout)) {
drivers/scsi/libiscsi.c
2154
"%lu\n.", task->last_xfer, task->last_timeout);
drivers/scsi/libiscsi.c
2155
task->have_checked_conn = false;
drivers/scsi/libiscsi.c
2174
if (!running_task->sc || running_task == task ||
drivers/scsi/libiscsi.c
2183
task->sc->jiffies_at_alloc))
drivers/scsi/libiscsi.c
2186
if (time_after(running_task->last_xfer, task->last_timeout)) {
drivers/scsi/libiscsi.c
2202
task->last_xfer, running_task->last_xfer,
drivers/scsi/libiscsi.c
2203
task->last_timeout);
drivers/scsi/libiscsi.c
2212
if (task->have_checked_conn)
drivers/scsi/libiscsi.c
222
ISCSI_DBG_SESSION(task->conn->session,
drivers/scsi/libiscsi.c
2220
task->have_checked_conn = true;
drivers/scsi/libiscsi.c
2227
task->have_checked_conn = true;
drivers/scsi/libiscsi.c
2233
if (task) {
drivers/scsi/libiscsi.c
2234
task->last_timeout = jiffies;
drivers/scsi/libiscsi.c
2235
iscsi_put_task(task);
drivers/scsi/libiscsi.c
226
task->hdr_len);
drivers/scsi/libiscsi.c
2334
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
drivers/scsi/libiscsi.c
2341
hdr->lun = task->lun;
drivers/scsi/libiscsi.c
2342
hdr->rtt = task->hdr_itt;
drivers/scsi/libiscsi.c
2343
hdr->refcmdsn = task->cmdsn;
drivers/scsi/libiscsi.c
2351
struct iscsi_task *task;
drivers/scsi/libiscsi.c
2367
if (!iscsi_cmd(sc)->task) {
drivers/scsi/libiscsi.c
2389
task = iscsi_cmd(sc)->task;
drivers/scsi/libiscsi.c
2390
if (!task || !task->sc) {
drivers/scsi/libiscsi.c
2400
if (!iscsi_get_task(task)) {
drivers/scsi/libiscsi.c
2409
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
drivers/scsi/libiscsi.c
2416
if (task->state == ISCSI_TASK_PENDING) {
drivers/scsi/libiscsi.c
2417
fail_scsi_task(task, DID_ABORT);
drivers/scsi/libiscsi.c
2427
iscsi_prep_abort_task_pdu(task, hdr);
drivers/scsi/libiscsi.c
243
static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
drivers/scsi/libiscsi.c
2446
fail_scsi_task(task, DID_ABORT);
drivers/scsi/libiscsi.c
245
struct iscsi_session *session = task->conn->session;
drivers/scsi/libiscsi.c
2453
session->running_aborted_task = task;
drivers/scsi/libiscsi.c
2458
if (iscsi_task_is_completed(task)) {
drivers/scsi/libiscsi.c
2476
sc, task->itt);
drivers/scsi/libiscsi.c
2477
iscsi_put_task(task);
drivers/scsi/libiscsi.c
2486
task ? task->itt : 0);
drivers/scsi/libiscsi.c
2493
iscsi_put_task(task);
drivers/scsi/libiscsi.c
261
if (hdr_lun != task->sc->device->lun)
drivers/scsi/libiscsi.c
271
opcode, task->itt, task->hdr_itt);
drivers/scsi/libiscsi.c
281
opcode, task->itt, task->hdr_itt);
drivers/scsi/libiscsi.c
293
task->hdr_itt == tmf->rtt) {
drivers/scsi/libiscsi.c
297
"progress\n", task->itt,
drivers/scsi/libiscsi.c
298
task->hdr_itt);
drivers/scsi/libiscsi.c
3080
struct iscsi_task *task = session->cmds[cmd_i];
drivers/scsi/libiscsi.c
3083
task->dd_data = &task[1];
drivers/scsi/libiscsi.c
3084
task->itt = cmd_i;
drivers/scsi/libiscsi.c
3085
task->state = ISCSI_TASK_FREE;
drivers/scsi/libiscsi.c
3086
INIT_LIST_HEAD(&task->running);
drivers/scsi/libiscsi.c
314
static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
drivers/scsi/libiscsi.c
316
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
318
struct scsi_cmnd *sc = task->sc;
drivers/scsi/libiscsi.c
324
rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
drivers/scsi/libiscsi.c
329
rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
drivers/scsi/libiscsi.c
333
hdr = (struct iscsi_scsi_req *)task->hdr;
drivers/scsi/libiscsi.c
3360
struct iscsi_task *task;
drivers/scsi/libiscsi.c
3364
task = conn->session->cmds[i];
drivers/scsi/libiscsi.c
3365
if (task->sc)
drivers/scsi/libiscsi.c
3368
if (task->state == ISCSI_TASK_FREE)
drivers/scsi/libiscsi.c
3373
task->itt, task->state);
drivers/scsi/libiscsi.c
3376
if (cleanup_queued_task(task)) {
drivers/scsi/libiscsi.c
338
hdr->itt = task->hdr_itt = itt;
drivers/scsi/libiscsi.c
3382
if (task->state == ISCSI_TASK_PENDING)
drivers/scsi/libiscsi.c
3384
iscsi_complete_task(task, state);
drivers/scsi/libiscsi.c
340
hdr->itt = task->hdr_itt = build_itt(task->itt,
drivers/scsi/libiscsi.c
341
task->conn->session->age);
drivers/scsi/libiscsi.c
342
task->hdr_len = 0;
drivers/scsi/libiscsi.c
343
rc = iscsi_add_hdr(task, sizeof(*hdr));
drivers/scsi/libiscsi.c
349
task->lun = hdr->lun;
drivers/scsi/libiscsi.c
355
rc = iscsi_prep_ecdb_ahs(task);
drivers/scsi/libiscsi.c
362
task->imm_count = 0;
drivers/scsi/libiscsi.c
364
task->protected = true;
drivers/scsi/libiscsi.c
369
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
drivers/scsi/libiscsi.c
390
task->imm_count = min(session->first_burst,
drivers/scsi/libiscsi.c
393
task->imm_count = min(transfer_length,
drivers/scsi/libiscsi.c
395
hton24(hdr->dlength, task->imm_count);
drivers/scsi/libiscsi.c
402
task->imm_count;
drivers/scsi/libiscsi.c
403
r2t->data_offset = task->imm_count;
drivers/scsi/libiscsi.c
408
if (!task->unsol_r2t.data_length)
drivers/scsi/libiscsi.c
420
hdrlength = task->hdr_len - sizeof(*hdr);
drivers/scsi/libiscsi.c
427
hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
drivers/scsi/libiscsi.c
429
if (session->tt->init_task && session->tt->init_task(task))
drivers/scsi/libiscsi.c
432
task->state = ISCSI_TASK_RUNNING;
drivers/scsi/libiscsi.c
440
task->itt, transfer_length,
drivers/scsi/libiscsi.c
454
static void iscsi_free_task(struct iscsi_task *task)
drivers/scsi/libiscsi.c
456
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
458
struct scsi_cmnd *sc = task->sc;
drivers/scsi/libiscsi.c
459
int oldstate = task->state;
drivers/scsi/libiscsi.c
462
task->itt, task->state, task->sc);
drivers/scsi/libiscsi.c
464
session->tt->cleanup_task(task);
drivers/scsi/libiscsi.c
465
task->state = ISCSI_TASK_FREE;
drivers/scsi/libiscsi.c
466
task->sc = NULL;
drivers/scsi/libiscsi.c
470
if (conn->login_task == task)
drivers/scsi/libiscsi.c
473
kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
drivers/scsi/libiscsi.c
477
iscsi_cmd(sc)->task = NULL;
drivers/scsi/libiscsi.c
487
bool iscsi_get_task(struct iscsi_task *task)
drivers/scsi/libiscsi.c
489
return refcount_inc_not_zero(&task->refcount);
drivers/scsi/libiscsi.c
499
void __iscsi_put_task(struct iscsi_task *task)
drivers/scsi/libiscsi.c
501
if (refcount_dec_and_test(&task->refcount))
drivers/scsi/libiscsi.c
502
iscsi_free_task(task);
drivers/scsi/libiscsi.c
506
void iscsi_put_task(struct iscsi_task *task)
drivers/scsi/libiscsi.c
508
struct iscsi_session *session = task->conn->session;
drivers/scsi/libiscsi.c
510
if (refcount_dec_and_test(&task->refcount)) {
drivers/scsi/libiscsi.c
512
iscsi_free_task(task);
drivers/scsi/libiscsi.c
525
static void iscsi_complete_task(struct iscsi_task *task, int state)
drivers/scsi/libiscsi.c
527
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
531
task->itt, task->state, task->sc);
drivers/scsi/libiscsi.c
532
if (task->state == ISCSI_TASK_COMPLETED ||
drivers/scsi/libiscsi.c
533
task->state == ISCSI_TASK_ABRT_TMF ||
drivers/scsi/libiscsi.c
534
task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
drivers/scsi/libiscsi.c
535
task->state == ISCSI_TASK_REQUEUE_SCSIQ)
drivers/scsi/libiscsi.c
537
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
drivers/scsi/libiscsi.c
538
task->state = state;
drivers/scsi/libiscsi.c
540
if (READ_ONCE(conn->ping_task) == task)
drivers/scsi/libiscsi.c
544
__iscsi_put_task(task);
drivers/scsi/libiscsi.c
558
void iscsi_complete_scsi_task(struct iscsi_task *task,
drivers/scsi/libiscsi.c
561
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
563
ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
drivers/scsi/libiscsi.c
567
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
drivers/scsi/libiscsi.c
574
static bool cleanup_queued_task(struct iscsi_task *task)
drivers/scsi/libiscsi.c
576
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
584
if (task->state == ISCSI_TASK_COMPLETED)
drivers/scsi/libiscsi.c
587
if (!list_empty(&task->running)) {
drivers/scsi/libiscsi.c
588
list_del_init(&task->running);
drivers/scsi/libiscsi.c
593
if (task->state == ISCSI_TASK_RUNNING ||
drivers/scsi/libiscsi.c
594
task->state == ISCSI_TASK_COMPLETED)
drivers/scsi/libiscsi.c
595
__iscsi_put_task(task);
drivers/scsi/libiscsi.c
598
if (conn->session->running_aborted_task == task) {
drivers/scsi/libiscsi.c
600
__iscsi_put_task(task);
drivers/scsi/libiscsi.c
603
if (conn->task == task) {
drivers/scsi/libiscsi.c
604
conn->task = NULL;
drivers/scsi/libiscsi.c
605
__iscsi_put_task(task);
drivers/scsi/libiscsi.c
615
static void __fail_scsi_task(struct iscsi_task *task, int err)
drivers/scsi/libiscsi.c
617
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
621
if (cleanup_queued_task(task))
drivers/scsi/libiscsi.c
624
if (task->state == ISCSI_TASK_PENDING) {
drivers/scsi/libiscsi.c
637
sc = task->sc;
drivers/scsi/libiscsi.c
640
iscsi_complete_task(task, state);
drivers/scsi/libiscsi.c
643
static void fail_scsi_task(struct iscsi_task *task, int err)
drivers/scsi/libiscsi.c
645
struct iscsi_session *session = task->conn->session;
drivers/scsi/libiscsi.c
648
__fail_scsi_task(task, err);
drivers/scsi/libiscsi.c
653
struct iscsi_task *task)
drivers/scsi/libiscsi.c
656
struct iscsi_hdr *hdr = task->hdr;
drivers/scsi/libiscsi.c
685
if (session->tt->init_task && session->tt->init_task(task))
drivers/scsi/libiscsi.c
691
task->state = ISCSI_TASK_RUNNING;
drivers/scsi/libiscsi.c
694
hdr->itt, task->data_count);
drivers/scsi/libiscsi.c
711
struct iscsi_task *task;
drivers/scsi/libiscsi.c
736
task = conn->login_task;
drivers/scsi/libiscsi.c
750
(void*)&task, sizeof(void*)))
drivers/scsi/libiscsi.c
758
refcount_set(&task->refcount, 1);
drivers/scsi/libiscsi.c
759
task->conn = conn;
drivers/scsi/libiscsi.c
760
task->sc = NULL;
drivers/scsi/libiscsi.c
761
INIT_LIST_HEAD(&task->running);
drivers/scsi/libiscsi.c
762
task->state = ISCSI_TASK_PENDING;
drivers/scsi/libiscsi.c
765
memcpy(task->data, data, data_size);
drivers/scsi/libiscsi.c
766
task->data_count = data_size;
drivers/scsi/libiscsi.c
768
task->data_count = 0;
drivers/scsi/libiscsi.c
771
if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
drivers/scsi/libiscsi.c
778
itt = task->hdr->itt;
drivers/scsi/libiscsi.c
779
task->hdr_len = sizeof(struct iscsi_hdr);
drivers/scsi/libiscsi.c
780
memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
drivers/scsi/libiscsi.c
784
task->hdr->itt = itt;
drivers/scsi/libiscsi.c
786
task->hdr->itt = build_itt(task->itt,
drivers/scsi/libiscsi.c
787
task->conn->session->age);
drivers/scsi/libiscsi.c
790
return task;
drivers/scsi/libiscsi.c
793
iscsi_put_task(task);
drivers/scsi/libiscsi.c
804
static int iscsi_send_mgmt_task(struct iscsi_task *task)
drivers/scsi/libiscsi.c
806
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi.c
812
rc = iscsi_prep_mgmt_task(conn, task);
drivers/scsi/libiscsi.c
816
rc = session->tt->xmit_task(task);
drivers/scsi/libiscsi.c
820
list_add_tail(&task->running, &conn->mgmtqueue);
drivers/scsi/libiscsi.c
830
struct iscsi_task *task;
drivers/scsi/libiscsi.c
833
task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size);
drivers/scsi/libiscsi.c
834
if (!task)
drivers/scsi/libiscsi.c
837
rc = iscsi_send_mgmt_task(task);
drivers/scsi/libiscsi.c
839
iscsi_put_task(task);
drivers/scsi/libiscsi.c
870
struct iscsi_task *task, char *data,
drivers/scsi/libiscsi.c
875
struct scsi_cmnd *sc = task->sc;
drivers/scsi/libiscsi.c
882
if (task->protected) {
drivers/scsi/libiscsi.c
893
ascq = session->tt->check_protection(task, &sector);
drivers/scsi/libiscsi.c
950
sc, sc->result, task->itt);
drivers/scsi/libiscsi.c
952
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
drivers/scsi/libiscsi.c
966
struct iscsi_task *task)
drivers/scsi/libiscsi.c
969
struct scsi_cmnd *sc = task->sc;
drivers/scsi/libiscsi.c
991
sc, sc->result, task->itt);
drivers/scsi/libiscsi.c
993
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
drivers/scsi/libiscsi_tcp.c
1004
task->itt, task->imm_count, task->unsol_r2t.data_length);
drivers/scsi/libiscsi_tcp.c
1006
err = conn->session->tt->init_pdu(task, 0, task->imm_count);
drivers/scsi/libiscsi_tcp.c
1009
task->imm_count = 0;
drivers/scsi/libiscsi_tcp.c
1014
static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
drivers/scsi/libiscsi_tcp.c
1016
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
1019
if (iscsi_task_has_unsol_data(task))
drivers/scsi/libiscsi_tcp.c
1020
r2t = &task->unsol_r2t;
drivers/scsi/libiscsi_tcp.c
1027
ISCSI_DBG_TCP(task->conn,
drivers/scsi/libiscsi_tcp.c
1058
int iscsi_tcp_task_xmit(struct iscsi_task *task)
drivers/scsi/libiscsi_tcp.c
1060
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi_tcp.c
1067
rc = session->tt->xmit_pdu(task);
drivers/scsi/libiscsi_tcp.c
1072
if (!task->sc) {
drivers/scsi/libiscsi_tcp.c
1073
if (task->hdr->itt == RESERVED_ITT)
drivers/scsi/libiscsi_tcp.c
1074
iscsi_put_task(task);
drivers/scsi/libiscsi_tcp.c
1079
if (task->sc->sc_data_direction != DMA_TO_DEVICE)
drivers/scsi/libiscsi_tcp.c
1082
r2t = iscsi_tcp_get_curr_r2t(task);
drivers/scsi/libiscsi_tcp.c
1089
rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
drivers/scsi/libiscsi_tcp.c
1092
iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
drivers/scsi/libiscsi_tcp.c
1095
r2t, r2t->datasn - 1, task->hdr->itt,
drivers/scsi/libiscsi_tcp.c
1098
rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
drivers/scsi/libiscsi_tcp.c
1152
struct iscsi_task *task = session->cmds[cmd_i];
drivers/scsi/libiscsi_tcp.c
1153
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
1182
struct iscsi_task *task = session->cmds[i];
drivers/scsi/libiscsi_tcp.c
1183
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
1197
struct iscsi_task *task = session->cmds[i];
drivers/scsi/libiscsi_tcp.c
1198
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
443
void iscsi_tcp_cleanup_task(struct iscsi_task *task)
drivers/scsi/libiscsi_tcp.c
445
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
449
if (!task->sc)
drivers/scsi/libiscsi_tcp.c
457
ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
drivers/scsi/libiscsi_tcp.c
475
static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
drivers/scsi/libiscsi_tcp.c
478
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
481
unsigned total_in_length = task->sc->sdb.length;
drivers/scsi/libiscsi_tcp.c
525
struct iscsi_task *task;
drivers/scsi/libiscsi_tcp.c
532
task = iscsi_itt_to_ctask(conn, hdr->itt);
drivers/scsi/libiscsi_tcp.c
533
if (!task) {
drivers/scsi/libiscsi_tcp.c
536
} else if (task->sc->sc_data_direction != DMA_TO_DEVICE) {
drivers/scsi/libiscsi_tcp.c
544
if (task->state != ISCSI_TASK_RUNNING) {
drivers/scsi/libiscsi_tcp.c
549
task->last_xfer = jiffies;
drivers/scsi/libiscsi_tcp.c
550
if (!iscsi_get_task(task)) {
drivers/scsi/libiscsi_tcp.c
570
tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
582
task->itt);
drivers/scsi/libiscsi_tcp.c
601
if (data_offset + data_length > task->sc->sdb.length) {
drivers/scsi/libiscsi_tcp.c
605
data_offset, task->sc->sdb.length);
drivers/scsi/libiscsi_tcp.c
634
iscsi_requeue_task(task);
drivers/scsi/libiscsi_tcp.c
638
iscsi_put_task(task);
drivers/scsi/libiscsi_tcp.c
682
struct iscsi_task *task;
drivers/scsi/libiscsi_tcp.c
710
task = iscsi_itt_to_ctask(conn, hdr->itt);
drivers/scsi/libiscsi_tcp.c
711
if (!task)
drivers/scsi/libiscsi_tcp.c
714
rc = iscsi_tcp_data_in(conn, task);
drivers/scsi/libiscsi_tcp.c
721
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
723
struct scsi_data_buffer *sdb = &task->sc->sdb;
drivers/scsi/libiscsi_tcp.c
741
task->last_xfer = jiffies;
drivers/scsi/libiscsi_tcp.c
982
int iscsi_tcp_task_init(struct iscsi_task *task)
drivers/scsi/libiscsi_tcp.c
984
struct iscsi_tcp_task *tcp_task = task->dd_data;
drivers/scsi/libiscsi_tcp.c
985
struct iscsi_conn *conn = task->conn;
drivers/scsi/libiscsi_tcp.c
986
struct scsi_cmnd *sc = task->sc;
drivers/scsi/libiscsi_tcp.c
994
ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt);
drivers/scsi/libiscsi_tcp.c
996
return conn->session->tt->init_pdu(task, 0, task->data_count);
drivers/scsi/libsas/sas_ata.c
152
sas_free_task(task);
drivers/scsi/libsas/sas_ata.c
158
struct sas_task *task;
drivers/scsi/libsas/sas_ata.c
175
task = sas_alloc_task(GFP_ATOMIC);
drivers/scsi/libsas/sas_ata.c
176
if (!task)
drivers/scsi/libsas/sas_ata.c
178
task->dev = dev;
drivers/scsi/libsas/sas_ata.c
179
task->task_proto = SAS_PROTOCOL_STP;
drivers/scsi/libsas/sas_ata.c
180
task->task_done = sas_ata_task_done;
drivers/scsi/libsas/sas_ata.c
186
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
drivers/scsi/libsas/sas_ata.c
187
task->uldd_task = qc;
drivers/scsi/libsas/sas_ata.c
189
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
drivers/scsi/libsas/sas_ata.c
190
task->total_xfer_len = qc->nbytes;
drivers/scsi/libsas/sas_ata.c
191
task->num_scatter = qc->n_elem;
drivers/scsi/libsas/sas_ata.c
192
task->data_dir = qc->dma_dir;
drivers/scsi/libsas/sas_ata.c
194
task->data_dir = DMA_NONE;
drivers/scsi/libsas/sas_ata.c
199
task->total_xfer_len = xfer;
drivers/scsi/libsas/sas_ata.c
200
task->num_scatter = si;
drivers/scsi/libsas/sas_ata.c
201
task->data_dir = qc->dma_dir;
drivers/scsi/libsas/sas_ata.c
203
task->scatter = qc->sg;
drivers/scsi/libsas/sas_ata.c
204
qc->lldd_task = task;
drivers/scsi/libsas/sas_ata.c
206
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
drivers/scsi/libsas/sas_ata.c
207
task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
drivers/scsi/libsas/sas_ata.c
210
task->ata_task.return_fis_on_success = 1;
drivers/scsi/libsas/sas_ata.c
213
ASSIGN_SAS_TASK(qc->scsicmd, task);
drivers/scsi/libsas/sas_ata.c
215
ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
drivers/scsi/libsas/sas_ata.c
221
sas_free_task(task);
drivers/scsi/libsas/sas_ata.c
447
static void sas_ata_internal_abort(struct sas_task *task)
drivers/scsi/libsas/sas_ata.c
449
struct sas_internal *si = dev_to_sas_internal(task->dev);
drivers/scsi/libsas/sas_ata.c
453
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_ata.c
454
if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
drivers/scsi/libsas/sas_ata.c
455
task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/libsas/sas_ata.c
456
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_ata.c
457
pr_debug("%s: Task %p already finished.\n", __func__, task);
drivers/scsi/libsas/sas_ata.c
460
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
drivers/scsi/libsas/sas_ata.c
461
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_ata.c
463
res = si->dft->lldd_abort_task(task);
drivers/scsi/libsas/sas_ata.c
465
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_ata.c
466
if (task->task_state_flags & SAS_TASK_STATE_DONE ||
drivers/scsi/libsas/sas_ata.c
468
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_ata.c
477
pr_warn("%s: Task %p leaked.\n", __func__, task);
drivers/scsi/libsas/sas_ata.c
479
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
drivers/scsi/libsas/sas_ata.c
480
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
drivers/scsi/libsas/sas_ata.c
481
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_ata.c
485
sas_free_task(task);
drivers/scsi/libsas/sas_ata.c
502
struct sas_task *task = qc->lldd_task;
drivers/scsi/libsas/sas_ata.c
505
if (!task)
drivers/scsi/libsas/sas_ata.c
507
task->uldd_task = NULL;
drivers/scsi/libsas/sas_ata.c
508
sas_ata_internal_abort(task);
drivers/scsi/libsas/sas_ata.c
624
void sas_ata_task_abort(struct sas_task *task)
drivers/scsi/libsas/sas_ata.c
626
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/libsas/sas_ata.c
73
static void sas_ata_task_done(struct sas_task *task)
drivers/scsi/libsas/sas_ata.c
75
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/libsas/sas_ata.c
76
struct domain_device *dev = task->dev;
drivers/scsi/libsas/sas_ata.c
77
struct task_status_struct *stat = &task->task_status;
drivers/scsi/libsas/sas_ata.c
87
task = NULL;
drivers/scsi/libsas/sas_ata.c
93
if (unlikely(!task))
drivers/scsi/libsas/sas_expander.c
101
i->dft->lldd_abort_task(task);
drivers/scsi/libsas/sas_expander.c
102
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
drivers/scsi/libsas/sas_expander.c
107
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_expander.c
108
task->task_status.stat == SAS_SAM_STAT_GOOD) {
drivers/scsi/libsas/sas_expander.c
112
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_expander.c
113
task->task_status.stat == SAS_DATA_UNDERRUN) {
drivers/scsi/libsas/sas_expander.c
116
res = task->task_status.residual;
drivers/scsi/libsas/sas_expander.c
119
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_expander.c
120
task->task_status.stat == SAS_DATA_OVERRUN) {
drivers/scsi/libsas/sas_expander.c
124
if (task->task_status.resp == SAS_TASK_UNDELIVERED &&
drivers/scsi/libsas/sas_expander.c
125
task->task_status.stat == SAS_DEVICE_UNKNOWN)
drivers/scsi/libsas/sas_expander.c
131
task->task_status.resp,
drivers/scsi/libsas/sas_expander.c
132
task->task_status.stat);
drivers/scsi/libsas/sas_expander.c
133
sas_free_task(task);
drivers/scsi/libsas/sas_expander.c
134
task = NULL;
drivers/scsi/libsas/sas_expander.c
140
BUG_ON(retry == 3 && task != NULL);
drivers/scsi/libsas/sas_expander.c
141
sas_free_task(task);
drivers/scsi/libsas/sas_expander.c
60
struct sas_task *task = NULL;
drivers/scsi/libsas/sas_expander.c
73
task = sas_alloc_slow_task(GFP_KERNEL);
drivers/scsi/libsas/sas_expander.c
74
if (!task) {
drivers/scsi/libsas/sas_expander.c
78
task->dev = dev;
drivers/scsi/libsas/sas_expander.c
79
task->task_proto = dev->tproto;
drivers/scsi/libsas/sas_expander.c
80
task->smp_task.smp_req = *req;
drivers/scsi/libsas/sas_expander.c
81
task->smp_task.smp_resp = *resp;
drivers/scsi/libsas/sas_expander.c
83
task->task_done = sas_task_internal_done;
drivers/scsi/libsas/sas_expander.c
85
task->slow_task->timer.function = sas_task_internal_timedout;
drivers/scsi/libsas/sas_expander.c
86
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
drivers/scsi/libsas/sas_expander.c
87
add_timer(&task->slow_task->timer);
drivers/scsi/libsas/sas_expander.c
89
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
drivers/scsi/libsas/sas_expander.c
92
timer_delete_sync(&task->slow_task->timer);
drivers/scsi/libsas/sas_expander.c
97
wait_for_completion(&task->slow_task->completion);
drivers/scsi/libsas/sas_expander.c
99
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
drivers/scsi/libsas/sas_init.c
29
struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
drivers/scsi/libsas/sas_init.c
31
if (task) {
drivers/scsi/libsas/sas_init.c
32
spin_lock_init(&task->task_state_lock);
drivers/scsi/libsas/sas_init.c
33
task->task_state_flags = SAS_TASK_STATE_PENDING;
drivers/scsi/libsas/sas_init.c
36
return task;
drivers/scsi/libsas/sas_init.c
41
struct sas_task *task = sas_alloc_task(flags);
drivers/scsi/libsas/sas_init.c
44
if (!task || !slow) {
drivers/scsi/libsas/sas_init.c
45
if (task)
drivers/scsi/libsas/sas_init.c
46
kmem_cache_free(sas_task_cache, task);
drivers/scsi/libsas/sas_init.c
51
task->slow_task = slow;
drivers/scsi/libsas/sas_init.c
52
slow->task = task;
drivers/scsi/libsas/sas_init.c
56
return task;
drivers/scsi/libsas/sas_init.c
59
void sas_free_task(struct sas_task *task)
drivers/scsi/libsas/sas_init.c
61
if (task) {
drivers/scsi/libsas/sas_init.c
62
kfree(task->slow_task);
drivers/scsi/libsas/sas_init.c
63
kmem_cache_free(sas_task_cache, task);
drivers/scsi/libsas/sas_internal.h
108
void sas_task_internal_done(struct sas_task *task);
drivers/scsi/libsas/sas_internal.h
215
void sas_ata_task_abort(struct sas_task *task);
drivers/scsi/libsas/sas_internal.h
234
static inline void sas_ata_task_abort(struct sas_task *task)
drivers/scsi/libsas/sas_internal.h
64
void sas_free_task(struct sas_task *task);
drivers/scsi/libsas/sas_scsi_host.c
100
static void sas_scsi_task_done(struct sas_task *task)
drivers/scsi/libsas/sas_scsi_host.c
1001
memcpy(&task->ata_task.fis, parameter, para_len);
drivers/scsi/libsas/sas_scsi_host.c
1003
memcpy(&task->ssp_task, parameter, para_len);
drivers/scsi/libsas/sas_scsi_host.c
1006
task->task_done = sas_task_internal_done;
drivers/scsi/libsas/sas_scsi_host.c
1007
task->tmf = tmf;
drivers/scsi/libsas/sas_scsi_host.c
1009
task->slow_task->timer.function = sas_task_internal_timedout;
drivers/scsi/libsas/sas_scsi_host.c
1010
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
drivers/scsi/libsas/sas_scsi_host.c
1011
add_timer(&task->slow_task->timer);
drivers/scsi/libsas/sas_scsi_host.c
1013
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
drivers/scsi/libsas/sas_scsi_host.c
1015
timer_delete_sync(&task->slow_task->timer);
drivers/scsi/libsas/sas_scsi_host.c
102
struct scsi_cmnd *sc = task->uldd_task;
drivers/scsi/libsas/sas_scsi_host.c
1021
wait_for_completion(&task->slow_task->completion);
drivers/scsi/libsas/sas_scsi_host.c
1028
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
drivers/scsi/libsas/sas_scsi_host.c
1029
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
drivers/scsi/libsas/sas_scsi_host.c
103
struct domain_device *dev = task->dev;
drivers/scsi/libsas/sas_scsi_host.c
1033
i->dft->lldd_tmf_aborted(task);
drivers/scsi/libsas/sas_scsi_host.c
1040
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_scsi_host.c
1041
task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
drivers/scsi/libsas/sas_scsi_host.c
1046
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_scsi_host.c
1047
task->task_status.stat == TMF_RESP_FUNC_SUCC) {
drivers/scsi/libsas/sas_scsi_host.c
1052
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_scsi_host.c
1053
task->task_status.stat == SAS_DATA_UNDERRUN) {
drivers/scsi/libsas/sas_scsi_host.c
1059
task->task_status.resp,
drivers/scsi/libsas/sas_scsi_host.c
1060
task->task_status.stat);
drivers/scsi/libsas/sas_scsi_host.c
1061
res = task->task_status.residual;
drivers/scsi/libsas/sas_scsi_host.c
1065
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_scsi_host.c
1066
task->task_status.stat == SAS_DATA_OVERRUN) {
drivers/scsi/libsas/sas_scsi_host.c
1073
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_scsi_host.c
1074
task->task_status.stat == SAS_OPEN_REJECT) {
drivers/scsi/libsas/sas_scsi_host.c
1081
task->task_status.resp,
drivers/scsi/libsas/sas_scsi_host.c
1082
task->task_status.stat);
drivers/scsi/libsas/sas_scsi_host.c
1084
sas_free_task(task);
drivers/scsi/libsas/sas_scsi_host.c
1085
task = NULL;
drivers/scsi/libsas/sas_scsi_host.c
109
task = NULL;
drivers/scsi/libsas/sas_scsi_host.c
1091
sas_free_task(task);
drivers/scsi/libsas/sas_scsi_host.c
1139
int sas_query_task(struct sas_task *task, u16 tag)
drivers/scsi/libsas/sas_scsi_host.c
114
if (unlikely(!task)) {
drivers/scsi/libsas/sas_scsi_host.c
1145
struct scsi_cmnd *cmnd = task->uldd_task;
drivers/scsi/libsas/sas_scsi_host.c
1146
struct domain_device *dev = task->dev;
drivers/scsi/libsas/sas_scsi_host.c
1155
int sas_abort_task(struct sas_task *task, u16 tag)
drivers/scsi/libsas/sas_scsi_host.c
1161
struct scsi_cmnd *cmnd = task->uldd_task;
drivers/scsi/libsas/sas_scsi_host.c
1162
struct domain_device *dev = task->dev;
drivers/scsi/libsas/sas_scsi_host.c
1175
void sas_task_abort(struct sas_task *task)
drivers/scsi/libsas/sas_scsi_host.c
1177
struct scsi_cmnd *sc = task->uldd_task;
drivers/scsi/libsas/sas_scsi_host.c
1181
struct sas_task_slow *slow = task->slow_task;
drivers/scsi/libsas/sas_scsi_host.c
1191
if (dev_is_sata(task->dev))
drivers/scsi/libsas/sas_scsi_host.c
1192
sas_ata_task_abort(task);
drivers/scsi/libsas/sas_scsi_host.c
122
sas_free_task(task);
drivers/scsi/libsas/sas_scsi_host.c
126
sas_end_task(sc, task);
drivers/scsi/libsas/sas_scsi_host.c
134
struct sas_task *task = sas_alloc_task(gfp_flags);
drivers/scsi/libsas/sas_scsi_host.c
137
if (!task)
drivers/scsi/libsas/sas_scsi_host.c
140
task->uldd_task = cmd;
drivers/scsi/libsas/sas_scsi_host.c
141
ASSIGN_SAS_TASK(cmd, task);
drivers/scsi/libsas/sas_scsi_host.c
143
task->dev = dev;
drivers/scsi/libsas/sas_scsi_host.c
144
task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
drivers/scsi/libsas/sas_scsi_host.c
147
memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
drivers/scsi/libsas/sas_scsi_host.c
148
task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
drivers/scsi/libsas/sas_scsi_host.c
149
task->ssp_task.cmd = cmd;
drivers/scsi/libsas/sas_scsi_host.c
151
task->scatter = scsi_sglist(cmd);
drivers/scsi/libsas/sas_scsi_host.c
152
task->num_scatter = scsi_sg_count(cmd);
drivers/scsi/libsas/sas_scsi_host.c
153
task->total_xfer_len = scsi_bufflen(cmd);
drivers/scsi/libsas/sas_scsi_host.c
154
task->data_dir = cmd->sc_data_direction;
drivers/scsi/libsas/sas_scsi_host.c
156
task->task_done = sas_scsi_task_done;
drivers/scsi/libsas/sas_scsi_host.c
158
return task;
drivers/scsi/libsas/sas_scsi_host.c
166
struct sas_task *task;
drivers/scsi/libsas/sas_scsi_host.c
182
task = sas_create_task(cmd, dev, GFP_ATOMIC);
drivers/scsi/libsas/sas_scsi_host.c
183
if (!task)
drivers/scsi/libsas/sas_scsi_host.c
186
res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
drivers/scsi/libsas/sas_scsi_host.c
194
sas_free_task(task);
drivers/scsi/libsas/sas_scsi_host.c
209
struct sas_task *task = TO_SAS_TASK(cmd);
drivers/scsi/libsas/sas_scsi_host.c
215
sas_end_task(cmd, task);
drivers/scsi/libsas/sas_scsi_host.c
278
static enum task_disposition sas_scsi_find_task(struct sas_task *task)
drivers/scsi/libsas/sas_scsi_host.c
283
to_sas_internal(task->dev->port->ha->shost->transportt);
drivers/scsi/libsas/sas_scsi_host.c
286
pr_notice("%s: aborting task 0x%p\n", __func__, task);
drivers/scsi/libsas/sas_scsi_host.c
287
res = si->dft->lldd_abort_task(task);
drivers/scsi/libsas/sas_scsi_host.c
289
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_scsi_host.c
290
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/libsas/sas_scsi_host.c
291
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_scsi_host.c
292
pr_debug("%s: task 0x%p is done\n", __func__, task);
drivers/scsi/libsas/sas_scsi_host.c
295
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_scsi_host.c
299
__func__, task);
drivers/scsi/libsas/sas_scsi_host.c
302
pr_notice("%s: querying task 0x%p\n", __func__, task);
drivers/scsi/libsas/sas_scsi_host.c
303
res = si->dft->lldd_query_task(task);
drivers/scsi/libsas/sas_scsi_host.c
307
task);
drivers/scsi/libsas/sas_scsi_host.c
311
__func__, task);
drivers/scsi/libsas/sas_scsi_host.c
315
__func__, task);
drivers/scsi/libsas/sas_scsi_host.c
319
__func__, task, res);
drivers/scsi/libsas/sas_scsi_host.c
38
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
drivers/scsi/libsas/sas_scsi_host.c
40
struct task_status_struct *ts = &task->task_status;
drivers/scsi/libsas/sas_scsi_host.c
430
struct sas_task *task = TO_SAS_TASK(cmd);
drivers/scsi/libsas/sas_scsi_host.c
447
if (task)
drivers/scsi/libsas/sas_scsi_host.c
448
res = i->dft->lldd_abort_task(task);
drivers/scsi/libsas/sas_scsi_host.c
538
struct sas_task *task;
drivers/scsi/libsas/sas_scsi_host.c
545
task = TO_SAS_TASK(cmd);
drivers/scsi/libsas/sas_scsi_host.c
548
if (!task)
drivers/scsi/libsas/sas_scsi_host.c
554
struct sas_task *task = TO_SAS_TASK(cmd);
drivers/scsi/libsas/sas_scsi_host.c
558
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_scsi_host.c
559
need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
drivers/scsi/libsas/sas_scsi_host.c
560
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_scsi_host.c
564
__func__, task);
drivers/scsi/libsas/sas_scsi_host.c
568
pr_debug("trying to find task 0x%p\n", task);
drivers/scsi/libsas/sas_scsi_host.c
569
res = sas_scsi_find_task(task);
drivers/scsi/libsas/sas_scsi_host.c
574
task);
drivers/scsi/libsas/sas_scsi_host.c
579
__func__, task);
drivers/scsi/libsas/sas_scsi_host.c
583
pr_info("task 0x%p is at LU: lu recover\n", task);
drivers/scsi/libsas/sas_scsi_host.c
585
tmf_resp = sas_recover_lu(task->dev, cmd);
drivers/scsi/libsas/sas_scsi_host.c
588
SAS_ADDR(task->dev),
drivers/scsi/libsas/sas_scsi_host.c
598
task);
drivers/scsi/libsas/sas_scsi_host.c
599
tmf_resp = sas_recover_I_T(task->dev);
drivers/scsi/libsas/sas_scsi_host.c
602
struct domain_device *dev = task->dev;
drivers/scsi/libsas/sas_scsi_host.c
604
SAS_ADDR(task->dev->sas_addr));
drivers/scsi/libsas/sas_scsi_host.c
612
struct asd_sas_port *port = task->dev->port;
drivers/scsi/libsas/sas_scsi_host.c
639
SAS_ADDR(task->dev->sas_addr),
drivers/scsi/libsas/sas_scsi_host.c
79
task->dev->port->ha->sas_ha_name);
drivers/scsi/libsas/sas_scsi_host.c
862
void sas_task_internal_done(struct sas_task *task)
drivers/scsi/libsas/sas_scsi_host.c
864
timer_delete(&task->slow_task->timer);
drivers/scsi/libsas/sas_scsi_host.c
865
complete(&task->slow_task->completion);
drivers/scsi/libsas/sas_scsi_host.c
871
struct sas_task *task = slow->task;
drivers/scsi/libsas/sas_scsi_host.c
875
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_scsi_host.c
876
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
drivers/scsi/libsas/sas_scsi_host.c
877
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
drivers/scsi/libsas/sas_scsi_host.c
880
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/libsas/sas_scsi_host.c
883
complete(&task->slow_task->completion);
drivers/scsi/libsas/sas_scsi_host.c
895
struct sas_task *task = NULL;
drivers/scsi/libsas/sas_scsi_host.c
899
task = sas_alloc_slow_task(GFP_KERNEL);
drivers/scsi/libsas/sas_scsi_host.c
900
if (!task)
drivers/scsi/libsas/sas_scsi_host.c
903
task->dev = device;
drivers/scsi/libsas/sas_scsi_host.c
904
task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
drivers/scsi/libsas/sas_scsi_host.c
905
task->task_done = sas_task_internal_done;
drivers/scsi/libsas/sas_scsi_host.c
906
task->slow_task->timer.function = sas_task_internal_timedout;
drivers/scsi/libsas/sas_scsi_host.c
907
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
drivers/scsi/libsas/sas_scsi_host.c
908
add_timer(&task->slow_task->timer);
drivers/scsi/libsas/sas_scsi_host.c
910
task->abort_task.tag = tag;
drivers/scsi/libsas/sas_scsi_host.c
911
task->abort_task.type = type;
drivers/scsi/libsas/sas_scsi_host.c
912
task->abort_task.qid = qid;
drivers/scsi/libsas/sas_scsi_host.c
914
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
drivers/scsi/libsas/sas_scsi_host.c
916
timer_delete_sync(&task->slow_task->timer);
drivers/scsi/libsas/sas_scsi_host.c
922
wait_for_completion(&task->slow_task->completion);
drivers/scsi/libsas/sas_scsi_host.c
926
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
drivers/scsi/libsas/sas_scsi_host.c
930
quit = i->dft->lldd_abort_timeout(task, data);
drivers/scsi/libsas/sas_scsi_host.c
939
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_scsi_host.c
940
task->task_status.stat == SAS_SAM_STAT_GOOD) {
drivers/scsi/libsas/sas_scsi_host.c
945
if (task->task_status.resp == SAS_TASK_COMPLETE &&
drivers/scsi/libsas/sas_scsi_host.c
946
task->task_status.stat == TMF_RESP_FUNC_SUCC) {
drivers/scsi/libsas/sas_scsi_host.c
952
SAS_ADDR(device->sas_addr), task->task_status.resp,
drivers/scsi/libsas/sas_scsi_host.c
953
task->task_status.stat);
drivers/scsi/libsas/sas_scsi_host.c
954
sas_free_task(task);
drivers/scsi/libsas/sas_scsi_host.c
955
task = NULL;
drivers/scsi/libsas/sas_scsi_host.c
957
BUG_ON(retry == TASK_RETRY && task != NULL);
drivers/scsi/libsas/sas_scsi_host.c
958
sas_free_task(task);
drivers/scsi/libsas/sas_scsi_host.c
97
sas_free_task(task);
drivers/scsi/libsas/sas_scsi_host.c
982
struct sas_task *task;
drivers/scsi/libsas/sas_scsi_host.c
988
task = sas_alloc_slow_task(GFP_KERNEL);
drivers/scsi/libsas/sas_scsi_host.c
989
if (!task)
drivers/scsi/libsas/sas_scsi_host.c
992
task->dev = device;
drivers/scsi/libsas/sas_scsi_host.c
993
task->task_proto = device->tproto;
drivers/scsi/libsas/sas_scsi_host.c
996
task->ata_task.device_control_reg_update = 1;
drivers/scsi/libsas/sas_scsi_host.c
998
task->ata_task.force_phy = true;
drivers/scsi/libsas/sas_scsi_host.c
999
task->ata_task.force_phy_id = force_phy_id;
drivers/scsi/libsas/sas_task.c
11
void sas_ssp_task_response(struct device *dev, struct sas_task *task,
drivers/scsi/libsas/sas_task.c
14
struct task_status_struct *tstat = &task->task_status;
drivers/scsi/libsas/sas_task.c
34
SAS_ADDR(task->dev->sas_addr), iu->status);
drivers/scsi/mvsas/mv_sas.c
12
static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
drivers/scsi/mvsas/mv_sas.c
1294
int mvs_query_task(struct sas_task *task)
drivers/scsi/mvsas/mv_sas.c
1299
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
drivers/scsi/mvsas/mv_sas.c
1300
struct domain_device *dev = task->dev;
drivers/scsi/mvsas/mv_sas.c
1304
rc = mvs_find_tag(mvi, task, &tag);
drivers/scsi/mvsas/mv_sas.c
1310
rc = sas_query_task(task, tag);
drivers/scsi/mvsas/mv_sas.c
1325
int mvs_abort_task(struct sas_task *task)
drivers/scsi/mvsas/mv_sas.c
1327
struct domain_device *dev = task->dev;
drivers/scsi/mvsas/mv_sas.c
1341
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/mvsas/mv_sas.c
1342
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/mvsas/mv_sas.c
1343
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/mvsas/mv_sas.c
1347
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/mvsas/mv_sas.c
1349
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
drivers/scsi/mvsas/mv_sas.c
1350
rc = mvs_find_tag(mvi, task, &tag);
drivers/scsi/mvsas/mv_sas.c
1357
rc = sas_abort_task(task, tag);
drivers/scsi/mvsas/mv_sas.c
1364
if (task->lldd_task) {
drivers/scsi/mvsas/mv_sas.c
1365
slot = task->lldd_task;
drivers/scsi/mvsas/mv_sas.c
1373
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
drivers/scsi/mvsas/mv_sas.c
1374
task->task_proto & SAS_PROTOCOL_STP) {
drivers/scsi/mvsas/mv_sas.c
1376
struct mvs_slot_info *slot = task->lldd_task;
drivers/scsi/mvsas/mv_sas.c
1380
mvi, task, slot, slot_idx);
drivers/scsi/mvsas/mv_sas.c
1381
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
drivers/scsi/mvsas/mv_sas.c
1382
mvs_slot_task_free(mvi, task, slot, slot_idx);
drivers/scsi/mvsas/mv_sas.c
1394
static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
drivers/scsi/mvsas/mv_sas.c
1397
struct mvs_device *mvi_dev = task->dev->lldd_dev;
drivers/scsi/mvsas/mv_sas.c
1398
struct task_status_struct *tstat = &task->task_status;
drivers/scsi/mvsas/mv_sas.c
14
if (task->lldd_task) {
drivers/scsi/mvsas/mv_sas.c
1468
static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
drivers/scsi/mvsas/mv_sas.c
1484
switch (task->task_proto) {
drivers/scsi/mvsas/mv_sas.c
1492
sas_ssp_task_response(mvi->dev, task, iu);
drivers/scsi/mvsas/mv_sas.c
1507
task->ata_task.use_ncq = 0;
drivers/scsi/mvsas/mv_sas.c
1509
mvs_sata_done(mvi, task, slot_idx, err_dw0);
drivers/scsi/mvsas/mv_sas.c
1523
struct sas_task *task = slot->task;
drivers/scsi/mvsas/mv_sas.c
1532
if (unlikely(!task || !task->lldd_task || !task->dev))
drivers/scsi/mvsas/mv_sas.c
1535
tstat = &task->task_status;
drivers/scsi/mvsas/mv_sas.c
1536
dev = task->dev;
drivers/scsi/mvsas/mv_sas.c
1539
spin_lock(&task->task_state_lock);
drivers/scsi/mvsas/mv_sas.c
1540
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/mvsas/mv_sas.c
1541
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/mvsas/mv_sas.c
1543
aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
drivers/scsi/mvsas/mv_sas.c
1544
spin_unlock(&task->task_state_lock);
drivers/scsi/mvsas/mv_sas.c
1553
if (sas_protocol_ata(task->task_proto))
drivers/scsi/mvsas/mv_sas.c
1556
mvs_slot_task_free(mvi, task, slot, slot_idx);
drivers/scsi/mvsas/mv_sas.c
1578
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
drivers/scsi/mvsas/mv_sas.c
1583
switch (task->task_proto) {
drivers/scsi/mvsas/mv_sas.c
1594
sas_ssp_task_response(mvi->dev, task, iu);
drivers/scsi/mvsas/mv_sas.c
16
slot = task->lldd_task;
drivers/scsi/mvsas/mv_sas.c
1600
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
drivers/scsi/mvsas/mv_sas.c
1613
tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
drivers/scsi/mvsas/mv_sas.c
1630
if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
drivers/scsi/mvsas/mv_sas.c
1633
mvs_slot_task_free(mvi, task, slot, slot_idx);
drivers/scsi/mvsas/mv_sas.c
1637
if (task->task_done)
drivers/scsi/mvsas/mv_sas.c
1638
task->task_done(task);
drivers/scsi/mvsas/mv_sas.c
1663
struct sas_task *task;
drivers/scsi/mvsas/mv_sas.c
1665
task = slot->task;
drivers/scsi/mvsas/mv_sas.c
1667
if (dev && task->dev != dev)
drivers/scsi/mvsas/mv_sas.c
1671
slot_idx, slot->slot_tag, task);
drivers/scsi/mvsas/mv_sas.c
293
struct sas_task *task = tei->task;
drivers/scsi/mvsas/mv_sas.c
295
struct domain_device *dev = task->dev;
drivers/scsi/mvsas/mv_sas.c
311
sg_req = &task->smp_task.smp_req;
drivers/scsi/mvsas/mv_sas.c
317
sg_resp = &task->smp_task.smp_resp;
drivers/scsi/mvsas/mv_sas.c
386
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
drivers/scsi/mvsas/mv_sas.c
391
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
drivers/scsi/mvsas/mv_sas.c
394
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
drivers/scsi/mvsas/mv_sas.c
399
static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
drivers/scsi/mvsas/mv_sas.c
401
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/mvsas/mv_sas.c
420
struct sas_task *task = tei->task;
drivers/scsi/mvsas/mv_sas.c
421
struct domain_device *dev = task->dev;
drivers/scsi/mvsas/mv_sas.c
448
if (task->data_dir == DMA_FROM_DEVICE)
drivers/scsi/mvsas/mv_sas.c
453
if (task->ata_task.use_ncq)
drivers/scsi/mvsas/mv_sas.c
456
if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
drivers/scsi/mvsas/mv_sas.c
462
if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
drivers/scsi/mvsas/mv_sas.c
463
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
drivers/scsi/mvsas/mv_sas.c
469
hdr->data_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/mvsas/mv_sas.c
518
if (likely(!task->ata_task.device_control_reg_update))
drivers/scsi/mvsas/mv_sas.c
519
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
drivers/scsi/mvsas/mv_sas.c
521
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
drivers/scsi/mvsas/mv_sas.c
524
task->ata_task.atapi_packet, 16);
drivers/scsi/mvsas/mv_sas.c
534
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
drivers/scsi/mvsas/mv_sas.c
536
if (task->data_dir == DMA_FROM_DEVICE)
drivers/scsi/mvsas/mv_sas.c
547
struct sas_task *task = tei->task;
drivers/scsi/mvsas/mv_sas.c
550
struct domain_device *dev = task->dev;
drivers/scsi/mvsas/mv_sas.c
582
hdr->data_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/mvsas/mv_sas.c
653
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
drivers/scsi/mvsas/mv_sas.c
656
buf_cmd[9] = task->ssp_task.task_attr;
drivers/scsi/mvsas/mv_sas.c
657
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
drivers/scsi/mvsas/mv_sas.c
658
task->ssp_task.cmd->cmd_len);
drivers/scsi/mvsas/mv_sas.c
674
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
drivers/scsi/mvsas/mv_sas.c
679
static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
drivers/scsi/mvsas/mv_sas.c
682
struct domain_device *dev = task->dev;
drivers/scsi/mvsas/mv_sas.c
691
struct task_status_struct *tsm = &task->task_status;
drivers/scsi/mvsas/mv_sas.c
700
task->task_done(task);
drivers/scsi/mvsas/mv_sas.c
717
if (sas_protocol_ata(task->task_proto)) {
drivers/scsi/mvsas/mv_sas.c
718
struct task_status_struct *ts = &task->task_status;
drivers/scsi/mvsas/mv_sas.c
724
task->task_done(task);
drivers/scsi/mvsas/mv_sas.c
727
struct task_status_struct *ts = &task->task_status;
drivers/scsi/mvsas/mv_sas.c
732
task->task_done(task);
drivers/scsi/mvsas/mv_sas.c
737
if (!sas_protocol_ata(task->task_proto)) {
drivers/scsi/mvsas/mv_sas.c
738
if (task->num_scatter) {
drivers/scsi/mvsas/mv_sas.c
740
task->scatter,
drivers/scsi/mvsas/mv_sas.c
741
task->num_scatter,
drivers/scsi/mvsas/mv_sas.c
742
task->data_dir);
drivers/scsi/mvsas/mv_sas.c
749
n_elem = task->num_scatter;
drivers/scsi/mvsas/mv_sas.c
752
rq = sas_task_find_rq(task);
drivers/scsi/mvsas/mv_sas.c
763
task->lldd_task = NULL;
drivers/scsi/mvsas/mv_sas.c
773
tei.task = task;
drivers/scsi/mvsas/mv_sas.c
777
switch (task->task_proto) {
drivers/scsi/mvsas/mv_sas.c
792
task->task_proto);
drivers/scsi/mvsas/mv_sas.c
801
slot->task = task;
drivers/scsi/mvsas/mv_sas.c
803
task->lldd_task = slot;
drivers/scsi/mvsas/mv_sas.c
819
if (!sas_protocol_ata(task->task_proto))
drivers/scsi/mvsas/mv_sas.c
821
dma_unmap_sg(mvi->dev, task->scatter, task->num_scatter,
drivers/scsi/mvsas/mv_sas.c
822
task->data_dir);
drivers/scsi/mvsas/mv_sas.c
827
int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
drivers/scsi/mvsas/mv_sas.c
833
struct sas_tmf_task *tmf = task->tmf;
drivers/scsi/mvsas/mv_sas.c
834
int is_tmf = !!task->tmf;
drivers/scsi/mvsas/mv_sas.c
836
mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
drivers/scsi/mvsas/mv_sas.c
839
rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
drivers/scsi/mvsas/mv_sas.c
857
static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
drivers/scsi/mvsas/mv_sas.c
862
if (!slot->task)
drivers/scsi/mvsas/mv_sas.c
864
if (!sas_protocol_ata(task->task_proto))
drivers/scsi/mvsas/mv_sas.c
866
dma_unmap_sg(mvi->dev, task->scatter,
drivers/scsi/mvsas/mv_sas.c
867
task->num_scatter, task->data_dir);
drivers/scsi/mvsas/mv_sas.c
869
switch (task->task_proto) {
drivers/scsi/mvsas/mv_sas.c
871
dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
drivers/scsi/mvsas/mv_sas.c
873
dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
drivers/scsi/mvsas/mv_sas.c
890
task->lldd_task = NULL;
drivers/scsi/mvsas/mv_sas.c
891
slot->task = NULL;
drivers/scsi/mvsas/mv_sas.h
309
struct sas_task *task;
drivers/scsi/mvsas/mv_sas.h
417
struct sas_task *task;
drivers/scsi/mvsas/mv_sas.h
434
int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
drivers/scsi/mvsas/mv_sas.h
435
int mvs_abort_task(struct sas_task *task);
drivers/scsi/mvsas/mv_sas.h
443
int mvs_query_task(struct sas_task *task);
drivers/scsi/pm8001/pm8001_hwi.c
1494
(ccb->task == t))
drivers/scsi/pm8001/pm8001_hwi.c
1562
(ccb->task == t))
drivers/scsi/pm8001/pm8001_hwi.c
1631
struct sas_task *task;
drivers/scsi/pm8001/pm8001_hwi.c
1637
task = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
1638
ts = &task->task_status;
drivers/scsi/pm8001/pm8001_hwi.c
1640
if (task != NULL) {
drivers/scsi/pm8001/pm8001_hwi.c
1641
dev = task->dev;
drivers/scsi/pm8001/pm8001_hwi.c
1650
task->task_done(task);
drivers/scsi/pm8001/pm8001_hwi.c
1738
t = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
1960
t = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
2138
t = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
2513
t = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
2684
t = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
3457
t = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
3919
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
3920
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm8001_hwi.c
3931
sg_req = &task->smp_task.smp_req;
drivers/scsi/pm8001/pm8001_hwi.c
3937
sg_resp = &task->smp_task.smp_resp;
drivers/scsi/pm8001/pm8001_hwi.c
3953
cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
drivers/scsi/pm8001/pm8001_hwi.c
3955
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
drivers/scsi/pm8001/pm8001_hwi.c
3957
cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
drivers/scsi/pm8001/pm8001_hwi.c
3959
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
drivers/scsi/pm8001/pm8001_hwi.c
3969
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
drivers/scsi/pm8001/pm8001_hwi.c
3972
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
drivers/scsi/pm8001/pm8001_hwi.c
3985
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
3986
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm8001_hwi.c
3993
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
drivers/scsi/pm8001/pm8001_hwi.c
3995
cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
drivers/scsi/pm8001/pm8001_hwi.c
3997
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm8001_hwi.c
4000
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
drivers/scsi/pm8001/pm8001_hwi.c
4001
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
drivers/scsi/pm8001/pm8001_hwi.c
4002
task->ssp_task.cmd->cmd_len);
drivers/scsi/pm8001/pm8001_hwi.c
4005
if (task->num_scatter > 1) {
drivers/scsi/pm8001/pm8001_hwi.c
4006
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
drivers/scsi/pm8001/pm8001_hwi.c
4011
} else if (task->num_scatter == 1) {
drivers/scsi/pm8001/pm8001_hwi.c
4012
u64 dma_addr = sg_dma_address(task->scatter);
drivers/scsi/pm8001/pm8001_hwi.c
4015
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm8001_hwi.c
4017
} else if (task->num_scatter == 0) {
drivers/scsi/pm8001/pm8001_hwi.c
4020
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm8001_hwi.c
4031
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
4032
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm8001_hwi.c
4044
if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
drivers/scsi/pm8001/pm8001_hwi.c
4047
} else if (likely(!task->ata_task.device_control_reg_update)) {
drivers/scsi/pm8001/pm8001_hwi.c
4048
if (task->ata_task.use_ncq &&
drivers/scsi/pm8001/pm8001_hwi.c
4052
} else if (task->ata_task.dma_xfer) {
drivers/scsi/pm8001/pm8001_hwi.c
4060
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
drivers/scsi/pm8001/pm8001_hwi.c
4061
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
drivers/scsi/pm8001/pm8001_hwi.c
4064
dir = data_dir_flags[task->data_dir] << 8;
drivers/scsi/pm8001/pm8001_hwi.c
4067
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm8001_hwi.c
4068
if (task->ata_task.return_fis_on_success)
drivers/scsi/pm8001/pm8001_hwi.c
4073
sata_cmd.sata_fis = task->ata_task.fis;
drivers/scsi/pm8001/pm8001_hwi.c
4074
if (likely(!task->ata_task.device_control_reg_update))
drivers/scsi/pm8001/pm8001_hwi.c
4078
if (task->num_scatter > 1) {
drivers/scsi/pm8001/pm8001_hwi.c
4079
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
drivers/scsi/pm8001/pm8001_hwi.c
4084
} else if (task->num_scatter == 1) {
drivers/scsi/pm8001/pm8001_hwi.c
4085
u64 dma_addr = sg_dma_address(task->scatter);
drivers/scsi/pm8001/pm8001_hwi.c
4088
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm8001_hwi.c
4090
} else if (task->num_scatter == 0) {
drivers/scsi/pm8001/pm8001_hwi.c
4093
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm8001_hwi.c
4312
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
4313
struct sas_internal_abort_task *abort = &task->abort_task;
drivers/scsi/pm8001/pm8001_hwi.c
4343
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_hwi.c
4344
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm8001_hwi.c
4353
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
drivers/scsi/pm8001/pm8001_init.c
1283
pm8001_ha->ccb_info[i].task = NULL;
drivers/scsi/pm8001/pm8001_sas.c
1021
int pm8001_query_task(struct sas_task *task)
drivers/scsi/pm8001/pm8001_sas.c
1025
if (unlikely(!task || !task->lldd_task || !task->dev))
drivers/scsi/pm8001/pm8001_sas.c
1028
if (task->task_proto & SAS_PROTOCOL_SSP) {
drivers/scsi/pm8001/pm8001_sas.c
1029
struct scsi_cmnd *cmnd = task->uldd_task;
drivers/scsi/pm8001/pm8001_sas.c
1030
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm8001_sas.c
1034
rc = pm8001_find_tag(task, &tag);
drivers/scsi/pm8001/pm8001_sas.c
104
static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
drivers/scsi/pm8001/pm8001_sas.c
1041
rc = sas_query_task(task, tag);
drivers/scsi/pm8001/pm8001_sas.c
1061
int pm8001_abort_task(struct sas_task *task)
drivers/scsi/pm8001/pm8001_sas.c
1063
struct pm8001_ccb_info *ccb = task->lldd_task;
drivers/scsi/pm8001/pm8001_sas.c
1073
if (!task->lldd_task || !task->dev)
drivers/scsi/pm8001/pm8001_sas.c
1076
dev = task->dev;
drivers/scsi/pm8001/pm8001_sas.c
1086
ret = pm8001_find_tag(task, &tag);
drivers/scsi/pm8001/pm8001_sas.c
1088
pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
drivers/scsi/pm8001/pm8001_sas.c
1091
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/pm8001/pm8001_sas.c
1092
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
drivers/scsi/pm8001/pm8001_sas.c
1093
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/pm8001/pm8001_sas.c
1096
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
drivers/scsi/pm8001/pm8001_sas.c
1097
if (task->slow_task == NULL) {
drivers/scsi/pm8001/pm8001_sas.c
1099
task->slow_task = &slow_task;
drivers/scsi/pm8001/pm8001_sas.c
1101
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/pm8001/pm8001_sas.c
1102
if (task->task_proto & SAS_PROTOCOL_SSP) {
drivers/scsi/pm8001/pm8001_sas.c
1103
rc = sas_abort_task(task, tag);
drivers/scsi/pm8001/pm8001_sas.c
1105
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
drivers/scsi/pm8001/pm8001_sas.c
1106
task->task_proto & SAS_PROTOCOL_STP) {
drivers/scsi/pm8001/pm8001_sas.c
114
if (!task)
drivers/scsi/pm8001/pm8001_sas.c
117
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/pm8001/pm8001_sas.c
118
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED)))
drivers/scsi/pm8001/pm8001_sas.c
1182
&task->slow_task->completion,
drivers/scsi/pm8001/pm8001_sas.c
120
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/pm8001/pm8001_sas.c
1200
ccb->task = NULL;
drivers/scsi/pm8001/pm8001_sas.c
1204
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
drivers/scsi/pm8001/pm8001_sas.c
1210
spin_lock_irqsave(&task->task_state_lock, flags);
drivers/scsi/pm8001/pm8001_sas.c
1211
if (task->slow_task == &slow_task)
drivers/scsi/pm8001/pm8001_sas.c
1212
task->slow_task = NULL;
drivers/scsi/pm8001/pm8001_sas.c
1213
spin_unlock_irqrestore(&task->task_state_lock, flags);
drivers/scsi/pm8001/pm8001_sas.c
122
if (task->task_proto == SAS_PROTOCOL_STP) {
drivers/scsi/pm8001/pm8001_sas.c
125
qc = task->uldd_task;
drivers/scsi/pm8001/pm8001_sas.c
1258
void pm8001_tmf_aborted(struct sas_task *task)
drivers/scsi/pm8001/pm8001_sas.c
1260
struct pm8001_ccb_info *ccb = task->lldd_task;
drivers/scsi/pm8001/pm8001_sas.c
1263
ccb->task = NULL;
drivers/scsi/pm8001/pm8001_sas.c
148
struct sas_task *task = NULL;
drivers/scsi/pm8001/pm8001_sas.c
160
task = ccb->task;
drivers/scsi/pm8001/pm8001_sas.c
161
pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted);
drivers/scsi/pm8001/pm8001_sas.c
166
task, task_aborted,
drivers/scsi/pm8001/pm8001_sas.c
386
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
drivers/scsi/pm8001/pm8001_sas.c
388
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/pm8001/pm8001_sas.c
451
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_sas.c
452
enum sas_protocol task_proto = task->task_proto;
drivers/scsi/pm8001/pm8001_sas.c
453
struct sas_tmf_task *tmf = task->tmf;
drivers/scsi/pm8001/pm8001_sas.c
482
int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
drivers/scsi/pm8001/pm8001_sas.c
484
struct task_status_struct *ts = &task->task_status;
drivers/scsi/pm8001/pm8001_sas.c
485
enum sas_protocol task_proto = task->task_proto;
drivers/scsi/pm8001/pm8001_sas.c
486
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm8001_sas.c
488
bool internal_abort = sas_is_internal_abort(task);
drivers/scsi/pm8001/pm8001_sas.c
50
static int pm8001_find_tag(struct sas_task *task, u32 *tag)
drivers/scsi/pm8001/pm8001_sas.c
500
task->task_done(task);
drivers/scsi/pm8001/pm8001_sas.c
507
task->task_done(task);
drivers/scsi/pm8001/pm8001_sas.c
52
if (task->lldd_task) {
drivers/scsi/pm8001/pm8001_sas.c
523
task->task_done(task);
drivers/scsi/pm8001/pm8001_sas.c
526
task->task_done(task);
drivers/scsi/pm8001/pm8001_sas.c
533
ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
drivers/scsi/pm8001/pm8001_sas.c
54
ccb = task->lldd_task;
drivers/scsi/pm8001/pm8001_sas.c
540
if (task->num_scatter) {
drivers/scsi/pm8001/pm8001_sas.c
541
n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
drivers/scsi/pm8001/pm8001_sas.c
542
task->num_scatter, task->data_dir);
drivers/scsi/pm8001/pm8001_sas.c
549
n_elem = task->num_scatter;
drivers/scsi/pm8001/pm8001_sas.c
552
task->lldd_task = ccb;
drivers/scsi/pm8001/pm8001_sas.c
561
dma_unmap_sg(pm8001_ha->dev, task->scatter,
drivers/scsi/pm8001/pm8001_sas.c
562
task->num_scatter, task->data_dir);
drivers/scsi/pm8001/pm8001_sas.c
583
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_sas.c
587
if (!task)
drivers/scsi/pm8001/pm8001_sas.c
590
if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
drivers/scsi/pm8001/pm8001_sas.c
591
dma_unmap_sg(pm8001_ha->dev, task->scatter,
drivers/scsi/pm8001/pm8001_sas.c
592
task->num_scatter, task->data_dir);
drivers/scsi/pm8001/pm8001_sas.c
594
switch (task->task_proto) {
drivers/scsi/pm8001/pm8001_sas.c
596
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
drivers/scsi/pm8001/pm8001_sas.c
598
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
drivers/scsi/pm8001/pm8001_sas.c
610
if (sas_protocol_ata(task->task_proto)) {
drivers/scsi/pm8001/pm8001_sas.c
612
qc = task->uldd_task;
drivers/scsi/pm8001/pm8001_sas.c
621
task->lldd_task = NULL;
drivers/scsi/pm8001/pm8001_sas.c
817
struct sas_task *task;
drivers/scsi/pm8001/pm8001_sas.c
837
task = ccb->task;
drivers/scsi/pm8001/pm8001_sas.c
838
if (!task || !task->task_done)
drivers/scsi/pm8001/pm8001_sas.c
840
if (task_to_close && (task != task_to_close))
drivers/scsi/pm8001/pm8001_sas.c
842
ts = &task->task_status;
drivers/scsi/pm8001/pm8001_sas.c
849
spin_lock_irqsave(&task->task_state_lock, flags1);
drivers/scsi/pm8001/pm8001_sas.c
850
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
drivers/scsi/pm8001/pm8001_sas.c
851
task->task_state_flags |= SAS_TASK_STATE_DONE;
drivers/scsi/pm8001/pm8001_sas.c
852
if (unlikely((task->task_state_flags
drivers/scsi/pm8001/pm8001_sas.c
854
spin_unlock_irqrestore(&task->task_state_lock,
drivers/scsi/pm8001/pm8001_sas.c
858
spin_unlock_irqrestore(&task->task_state_lock,
drivers/scsi/pm8001/pm8001_sas.c
863
task->task_done(task);
drivers/scsi/pm8001/pm8001_sas.h
293
struct sas_task *task;
drivers/scsi/pm8001/pm8001_sas.h
642
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
drivers/scsi/pm8001/pm8001_sas.h
649
int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
drivers/scsi/pm8001/pm8001_sas.h
650
int pm8001_abort_task(struct sas_task *task);
drivers/scsi/pm8001/pm8001_sas.h
657
int pm8001_query_task(struct sas_task *task);
drivers/scsi/pm8001/pm8001_sas.h
744
struct pm8001_device *dev, struct sas_task *task)
drivers/scsi/pm8001/pm8001_sas.h
750
if (task)
drivers/scsi/pm8001/pm8001_sas.h
751
rq = sas_task_find_rq(task);
drivers/scsi/pm8001/pm8001_sas.h
761
ccb->task = task;
drivers/scsi/pm8001/pm8001_sas.h
784
ccb->task = NULL;
drivers/scsi/pm8001/pm8001_sas.h
795
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm8001_sas.h
799
task->task_done(task);
drivers/scsi/pm8001/pm8001_sas.h
802
void pm8001_tmf_aborted(struct sas_task *task);
drivers/scsi/pm8001/pm80xx_hwi.c
1844
t = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
2122
t = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
2314
t = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
2735
t = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
2925
t = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
4172
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
4173
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm80xx_hwi.c
4187
sg_req = &task->smp_task.smp_req;
drivers/scsi/pm8001/pm80xx_hwi.c
4193
sg_resp = &task->smp_task.smp_resp;
drivers/scsi/pm8001/pm80xx_hwi.c
4217
smp_req = &task->smp_task.smp_req;
drivers/scsi/pm8001/pm80xx_hwi.c
4231
(&task->smp_task.smp_req) + 4);
drivers/scsi/pm8001/pm80xx_hwi.c
4234
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
drivers/scsi/pm8001/pm80xx_hwi.c
4237
(&task->smp_task.smp_resp));
drivers/scsi/pm8001/pm80xx_hwi.c
4240
(&task->smp_task.smp_resp)-4);
drivers/scsi/pm8001/pm80xx_hwi.c
4244
(&task->smp_task.smp_req));
drivers/scsi/pm8001/pm80xx_hwi.c
4246
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
drivers/scsi/pm8001/pm80xx_hwi.c
4249
(&task->smp_task.smp_resp));
drivers/scsi/pm8001/pm80xx_hwi.c
4252
((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
drivers/scsi/pm8001/pm80xx_hwi.c
4281
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
drivers/scsi/pm8001/pm80xx_hwi.c
4284
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
drivers/scsi/pm8001/pm80xx_hwi.c
4289
static int check_enc_sas_cmd(struct sas_task *task)
drivers/scsi/pm8001/pm80xx_hwi.c
4291
u8 cmd = task->ssp_task.cmd->cmnd[0];
drivers/scsi/pm8001/pm80xx_hwi.c
4299
static int check_enc_sat_cmd(struct sas_task *task)
drivers/scsi/pm8001/pm80xx_hwi.c
4302
switch (task->ata_task.fis.command) {
drivers/scsi/pm8001/pm80xx_hwi.c
4322
static u32 pm80xx_chip_get_q_index(struct sas_task *task)
drivers/scsi/pm8001/pm80xx_hwi.c
4324
struct request *rq = sas_task_find_rq(task);
drivers/scsi/pm8001/pm80xx_hwi.c
4340
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
4341
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm80xx_hwi.c
4351
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
drivers/scsi/pm8001/pm80xx_hwi.c
4358
cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
drivers/scsi/pm8001/pm80xx_hwi.c
4359
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4362
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
drivers/scsi/pm8001/pm80xx_hwi.c
4363
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
drivers/scsi/pm8001/pm80xx_hwi.c
4364
task->ssp_task.cmd->cmd_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4365
q_index = pm80xx_chip_get_q_index(task);
drivers/scsi/pm8001/pm80xx_hwi.c
4369
!(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
drivers/scsi/pm8001/pm80xx_hwi.c
4372
task->ssp_task.cmd->cmnd[0]);
drivers/scsi/pm8001/pm80xx_hwi.c
4376
((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
drivers/scsi/pm8001/pm80xx_hwi.c
4379
if (task->num_scatter > 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4380
pm8001_chip_make_sg(task->scatter,
drivers/scsi/pm8001/pm80xx_hwi.c
4388
} else if (task->num_scatter == 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4389
u64 dma_addr = sg_dma_address(task->scatter);
drivers/scsi/pm8001/pm80xx_hwi.c
4395
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4409
pm8001_chip_make_sg(task->scatter, 1,
drivers/scsi/pm8001/pm80xx_hwi.c
4418
} else if (task->num_scatter == 0) {
drivers/scsi/pm8001/pm80xx_hwi.c
4421
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4429
ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
drivers/scsi/pm8001/pm80xx_hwi.c
4430
(task->ssp_task.cmd->cmnd[3] << 16) |
drivers/scsi/pm8001/pm80xx_hwi.c
4431
(task->ssp_task.cmd->cmnd[4] << 8) |
drivers/scsi/pm8001/pm80xx_hwi.c
4432
(task->ssp_task.cmd->cmnd[5]));
drivers/scsi/pm8001/pm80xx_hwi.c
4436
task->ssp_task.cmd->cmnd[0], q_index);
drivers/scsi/pm8001/pm80xx_hwi.c
4438
if (task->num_scatter > 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4439
pm8001_chip_make_sg(task->scatter, ccb->n_elem,
drivers/scsi/pm8001/pm80xx_hwi.c
4447
} else if (task->num_scatter == 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4448
u64 dma_addr = sg_dma_address(task->scatter);
drivers/scsi/pm8001/pm80xx_hwi.c
4453
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4466
pm8001_chip_make_sg(task->scatter, 1,
drivers/scsi/pm8001/pm80xx_hwi.c
4475
} else if (task->num_scatter == 0) {
drivers/scsi/pm8001/pm80xx_hwi.c
4478
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4490
struct sas_task *task = ccb->task;
drivers/scsi/pm8001/pm80xx_hwi.c
4491
struct domain_device *dev = task->dev;
drivers/scsi/pm8001/pm80xx_hwi.c
4493
struct ata_queued_cmd *qc = task->uldd_task;
drivers/scsi/pm8001/pm80xx_hwi.c
4504
q_index = pm80xx_chip_get_q_index(task);
drivers/scsi/pm8001/pm80xx_hwi.c
4506
if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
drivers/scsi/pm8001/pm80xx_hwi.c
4509
} else if (likely(!task->ata_task.device_control_reg_update)) {
drivers/scsi/pm8001/pm80xx_hwi.c
4510
if (task->ata_task.use_ncq &&
drivers/scsi/pm8001/pm80xx_hwi.c
4514
} else if (task->ata_task.dma_xfer) {
drivers/scsi/pm8001/pm80xx_hwi.c
4522
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
drivers/scsi/pm8001/pm80xx_hwi.c
4523
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
drivers/scsi/pm8001/pm80xx_hwi.c
4526
dir = data_dir_flags[task->data_dir] << 8;
drivers/scsi/pm8001/pm80xx_hwi.c
4529
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4530
if (task->ata_task.return_fis_on_success)
drivers/scsi/pm8001/pm80xx_hwi.c
4532
sata_cmd.sata_fis = task->ata_task.fis;
drivers/scsi/pm8001/pm80xx_hwi.c
4533
if (likely(!task->ata_task.device_control_reg_update))
drivers/scsi/pm8001/pm80xx_hwi.c
4539
!(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
drivers/scsi/pm8001/pm80xx_hwi.c
4549
if (task->num_scatter > 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4550
pm8001_chip_make_sg(task->scatter,
drivers/scsi/pm8001/pm80xx_hwi.c
4558
} else if (task->num_scatter == 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4559
u64 dma_addr = sg_dma_address(task->scatter);
drivers/scsi/pm8001/pm80xx_hwi.c
4565
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4578
pm8001_chip_make_sg(task->scatter, 1,
drivers/scsi/pm8001/pm80xx_hwi.c
4588
} else if (task->num_scatter == 0) {
drivers/scsi/pm8001/pm80xx_hwi.c
4591
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4615
if (task->num_scatter > 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4616
pm8001_chip_make_sg(task->scatter,
drivers/scsi/pm8001/pm80xx_hwi.c
4622
} else if (task->num_scatter == 1) {
drivers/scsi/pm8001/pm80xx_hwi.c
4623
u64 dma_addr = sg_dma_address(task->scatter);
drivers/scsi/pm8001/pm80xx_hwi.c
4627
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4640
pm8001_chip_make_sg(task->scatter, 1,
drivers/scsi/pm8001/pm80xx_hwi.c
4647
} else if (task->num_scatter == 0) {
drivers/scsi/pm8001/pm80xx_hwi.c
4650
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
drivers/scsi/pm8001/pm80xx_hwi.c
4656
cpu_to_le32(((task->ata_task.atapi_packet[0]) |
drivers/scsi/pm8001/pm80xx_hwi.c
4657
(task->ata_task.atapi_packet[1] << 8) |
drivers/scsi/pm8001/pm80xx_hwi.c
4658
(task->ata_task.atapi_packet[2] << 16) |
drivers/scsi/pm8001/pm80xx_hwi.c
4659
(task->ata_task.atapi_packet[3] << 24)));
drivers/scsi/pm8001/pm80xx_hwi.c
4661
cpu_to_le32(((task->ata_task.atapi_packet[4]) |
drivers/scsi/pm8001/pm80xx_hwi.c
4662
(task->ata_task.atapi_packet[5] << 8) |
drivers/scsi/pm8001/pm80xx_hwi.c
4663
(task->ata_task.atapi_packet[6] << 16) |
drivers/scsi/pm8001/pm80xx_hwi.c
4664
(task->ata_task.atapi_packet[7] << 24)));
drivers/scsi/pm8001/pm80xx_hwi.c
4666
cpu_to_le32(((task->ata_task.atapi_packet[8]) |
drivers/scsi/pm8001/pm80xx_hwi.c
4667
(task->ata_task.atapi_packet[9] << 8) |
drivers/scsi/pm8001/pm80xx_hwi.c
4668
(task->ata_task.atapi_packet[10] << 16) |
drivers/scsi/pm8001/pm80xx_hwi.c
4669
(task->ata_task.atapi_packet[11] << 24)));
drivers/scsi/pm8001/pm80xx_hwi.c
4671
cpu_to_le32(((task->ata_task.atapi_packet[12]) |
drivers/scsi/pm8001/pm80xx_hwi.c
4672
(task->ata_task.atapi_packet[13] << 8) |
drivers/scsi/pm8001/pm80xx_hwi.c
4673
(task->ata_task.atapi_packet[14] << 16) |
drivers/scsi/pm8001/pm80xx_hwi.c
4674
(task->ata_task.atapi_packet[15] << 24)));
drivers/scsi/qedf/qedf.h
144
struct fcoe_task_context *task;
drivers/scsi/qedf/qedf_els.c
1050
htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
drivers/scsi/qedf/qedf_els.c
124
task = qedf_get_task_mem(&qedf->tasks, xid);
drivers/scsi/qedf/qedf_els.c
125
qedf_init_mp_task(els_req, task, sqe);
drivers/scsi/qedf/qedf_els.c
19
struct fcoe_task_context *task;
drivers/scsi/qedf/qedf_els.c
314
htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
drivers/scsi/qedf/qedf_io.c
2290
struct fcoe_task_context *task;
drivers/scsi/qedf/qedf_io.c
2339
task = qedf_get_task_mem(&qedf->tasks, xid);
drivers/scsi/qedf/qedf_io.c
2352
qedf_init_task(fcport, lport, io_req, task, sqe);
drivers/scsi/qedf/qedf_io.c
604
io_req->task = task_ctx;
drivers/scsi/qedf/qedf_io.c
699
io_req->task = task_ctx;
drivers/scsi/qedi/qedi_fw.c
1007
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
drivers/scsi/qedi/qedi_fw.c
1008
login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
drivers/scsi/qedi/qedi_fw.c
1063
struct iscsi_task *task)
drivers/scsi/qedi/qedi_fw.c
1078
qedi_cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
1079
logout_hdr = (struct iscsi_logout *)task->hdr;
drivers/scsi/qedi/qedi_fw.c
1101
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
drivers/scsi/qedi/qedi_fw.c
1102
logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
drivers/scsi/qedi/qedi_fw.c
1136
struct iscsi_task *task, bool in_recovery)
drivers/scsi/qedi/qedi_fw.c
1148
if (task) {
drivers/scsi/qedi/qedi_fw.c
1149
tmf_hdr = (struct iscsi_tm *)task->hdr;
drivers/scsi/qedi/qedi_fw.c
1173
ctask = cmd->task;
drivers/scsi/qedi/qedi_fw.c
1174
if (ctask == task)
drivers/scsi/qedi/qedi_fw.c
1253
struct iscsi_task *task)
drivers/scsi/qedi/qedi_fw.c
1275
rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
drivers/scsi/qedi/qedi_fw.c
1286
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
1290
struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
1326
mtask = qedi_cmd->task;
drivers/scsi/qedi/qedi_fw.c
1396
send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask);
drivers/scsi/qedi/qedi_fw.c
1536
struct iscsi_task *task)
drivers/scsi/qedi/qedi_fw.c
1555
qedi_cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
1556
text_hdr = (struct iscsi_text *)task->hdr;
drivers/scsi/qedi/qedi_fw.c
1579
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
drivers/scsi/qedi/qedi_fw.c
1580
text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
drivers/scsi/qedi/qedi_fw.c
161
rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
drivers/scsi/qedi/qedi_fw.c
1635
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
1654
qedi_cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
1655
nopout_hdr = (struct iscsi_nopout *)task->hdr;
drivers/scsi/qedi/qedi_fw.c
1685
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
drivers/scsi/qedi/qedi_fw.c
1691
nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
drivers/scsi/qedi/qedi_fw.c
179
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
1913
void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
1917
struct iscsi_conn *conn = task->conn;
drivers/scsi/qedi/qedi_fw.c
1919
struct scsi_cmnd *sc_cmd = task->sc;
drivers/scsi/qedi/qedi_fw.c
192
qedi_cmd = task->dd_data;
drivers/scsi/qedi/qedi_fw.c
1968
int qedi_iscsi_send_ioreq(struct iscsi_task *task)
drivers/scsi/qedi/qedi_fw.c
1970
struct iscsi_conn *conn = task->conn;
drivers/scsi/qedi/qedi_fw.c
1975
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_fw.c
1976
struct scsi_cmnd *sc = task->sc;
drivers/scsi/qedi/qedi_fw.c
1987
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
drivers/scsi/qedi/qedi_fw.c
2040
qedi_update_itt_map(qedi, tid, task->itt, cmd);
drivers/scsi/qedi/qedi_fw.c
2041
cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
drivers/scsi/qedi/qedi_fw.c
2128
int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
drivers/scsi/qedi/qedi_fw.c
2132
struct iscsi_conn *conn = task->conn;
drivers/scsi/qedi/qedi_fw.c
2134
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_fw.c
2140
cmd->task_id, get_itt(task->itt), task->state,
drivers/scsi/qedi/qedi_fw.c
220
tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
drivers/scsi/qedi/qedi_fw.c
259
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
270
cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
32
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
41
cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
410
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
450
if (task) {
drivers/scsi/qedi/qedi_fw.c
451
cmd = task->dd_data;
drivers/scsi/qedi/qedi_fw.c
481
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
535
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
577
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
581
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_fw.c
606
if (!iscsi_cmd(sc_cmd)->task) {
drivers/scsi/qedi/qedi_fw.c
621
hdr = (struct iscsi_scsi_rsp *)task->hdr;
drivers/scsi/qedi/qedi_fw.c
665
qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
drivers/scsi/qedi/qedi_fw.c
675
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
687
qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
drivers/scsi/qedi/qedi_fw.c
690
qedi_process_login_resp(qedi, cqe, task, conn);
drivers/scsi/qedi/qedi_fw.c
693
qedi_process_tmf_resp(qedi, cqe, task, conn);
drivers/scsi/qedi/qedi_fw.c
696
qedi_process_text_resp(qedi, cqe, task, conn);
drivers/scsi/qedi/qedi_fw.c
699
qedi_process_logout_resp(qedi, cqe, task, conn);
drivers/scsi/qedi/qedi_fw.c
702
qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
drivers/scsi/qedi/qedi_fw.c
711
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
716
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_fw.c
725
__iscsi_put_task(task);
drivers/scsi/qedi/qedi_fw.c
740
struct iscsi_task *mtask, *task;
drivers/scsi/qedi/qedi_fw.c
765
mtask = qedi_cmd->task;
drivers/scsi/qedi/qedi_fw.c
766
task = work->ctask;
drivers/scsi/qedi/qedi_fw.c
784
if (iscsi_task_is_completed(task)) {
drivers/scsi/qedi/qedi_fw.c
791
dbg_cmd = task->dd_data;
drivers/scsi/qedi/qedi_fw.c
795
get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id,
drivers/scsi/qedi/qedi_fw.c
825
struct iscsi_task *task = NULL;
drivers/scsi/qedi/qedi_fw.c
83
struct iscsi_task *task,
drivers/scsi/qedi/qedi_fw.c
872
task = qedi_cmd->task;
drivers/scsi/qedi/qedi_fw.c
873
if (!task) {
drivers/scsi/qedi/qedi_fw.c
879
nopout_hdr = (struct iscsi_nopout *)task->hdr;
drivers/scsi/qedi/qedi_fw.c
883
task, q_conn);
drivers/scsi/qedi/qedi_fw.c
888
qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
drivers/scsi/qedi/qedi_fw.c
894
qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
drivers/scsi/qedi/qedi_fw.c
898
qedi_process_async_mesg(qedi, cqe, task, q_conn,
drivers/scsi/qedi/qedi_fw.c
902
qedi_process_reject_mesg(qedi, cqe, task, q_conn,
drivers/scsi/qedi/qedi_fw.c
94
cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
960
struct iscsi_task *task)
drivers/scsi/qedi/qedi_fw.c
977
qedi_cmd = (struct qedi_cmd *)task->dd_data;
drivers/scsi/qedi/qedi_fw.c
979
login_hdr = (struct iscsi_login_req *)task->hdr;
drivers/scsi/qedi/qedi_gbl.h
31
struct iscsi_task *task);
drivers/scsi/qedi/qedi_gbl.h
33
struct iscsi_task *task);
drivers/scsi/qedi/qedi_gbl.h
36
struct iscsi_task *task);
drivers/scsi/qedi/qedi_gbl.h
38
struct iscsi_task *task,
drivers/scsi/qedi/qedi_gbl.h
40
int qedi_iscsi_send_ioreq(struct iscsi_task *task);
drivers/scsi/qedi/qedi_gbl.h
43
int qedi_iscsi_cleanup_task(struct iscsi_task *task,
drivers/scsi/qedi/qedi_gbl.h
63
struct iscsi_task *task, bool in_recovery);
drivers/scsi/qedi/qedi_gbl.h
64
void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
drivers/scsi/qedi/qedi_gbl.h
71
struct iscsi_task *task);
drivers/scsi/qedi/qedi_iscsi.c
1449
static void qedi_cleanup_task(struct iscsi_task *task)
drivers/scsi/qedi/qedi_iscsi.c
1453
if (task->state == ISCSI_TASK_PENDING) {
drivers/scsi/qedi/qedi_iscsi.c
1455
refcount_read(&task->refcount));
drivers/scsi/qedi/qedi_iscsi.c
1459
if (task->sc)
drivers/scsi/qedi/qedi_iscsi.c
1460
qedi_iscsi_unmap_sg_list(task->dd_data);
drivers/scsi/qedi/qedi_iscsi.c
1462
cmd = task->dd_data;
drivers/scsi/qedi/qedi_iscsi.c
1464
qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host),
drivers/scsi/qedi/qedi_iscsi.c
170
struct iscsi_task *task = session->cmds[i];
drivers/scsi/qedi/qedi_iscsi.c
171
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_iscsi.c
214
struct iscsi_task *task = session->cmds[i];
drivers/scsi/qedi/qedi_iscsi.c
215
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_iscsi.c
217
task->hdr = &cmd->hdr;
drivers/scsi/qedi/qedi_iscsi.c
218
task->hdr_max = sizeof(struct iscsi_hdr);
drivers/scsi/qedi/qedi_iscsi.c
771
static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
drivers/scsi/qedi/qedi_iscsi.c
773
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_iscsi.c
780
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
drivers/scsi/qedi/qedi_iscsi.c
782
qedi_send_iscsi_login(qedi_conn, task);
drivers/scsi/qedi/qedi_iscsi.c
788
rc = qedi_send_iscsi_nopout(qedi_conn, task,
drivers/scsi/qedi/qedi_iscsi.c
791
rc = qedi_send_iscsi_nopout(qedi_conn, task,
drivers/scsi/qedi/qedi_iscsi.c
795
rc = qedi_send_iscsi_logout(qedi_conn, task);
drivers/scsi/qedi/qedi_iscsi.c
798
rc = qedi_send_iscsi_tmf(qedi_conn, task);
drivers/scsi/qedi/qedi_iscsi.c
801
rc = qedi_send_iscsi_text(qedi_conn, task);
drivers/scsi/qedi/qedi_iscsi.c
805
"unsupported op 0x%x\n", task->hdr->opcode);
drivers/scsi/qedi/qedi_iscsi.c
811
static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
drivers/scsi/qedi/qedi_iscsi.c
814
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_iscsi.c
818
qedi_conn->gen_pdu.req_buf_size = task->data_count;
drivers/scsi/qedi/qedi_iscsi.c
820
if (task->data_count) {
drivers/scsi/qedi/qedi_iscsi.c
821
memcpy(qedi_conn->gen_pdu.req_buf, task->data,
drivers/scsi/qedi/qedi_iscsi.c
822
task->data_count);
drivers/scsi/qedi/qedi_iscsi.c
824
qedi_conn->gen_pdu.req_buf + task->data_count;
drivers/scsi/qedi/qedi_iscsi.c
828
return qedi_iscsi_send_generic_request(task);
drivers/scsi/qedi/qedi_iscsi.c
831
static int qedi_task_xmit(struct iscsi_task *task)
drivers/scsi/qedi/qedi_iscsi.c
833
struct iscsi_conn *conn = task->conn;
drivers/scsi/qedi/qedi_iscsi.c
835
struct qedi_cmd *cmd = task->dd_data;
drivers/scsi/qedi/qedi_iscsi.c
836
struct scsi_cmnd *sc = task->sc;
drivers/scsi/qedi/qedi_iscsi.c
849
cmd->task = NULL;
drivers/scsi/qedi/qedi_iscsi.c
852
cmd->task = task;
drivers/scsi/qedi/qedi_iscsi.c
857
return qedi_mtask_xmit(conn, task);
drivers/scsi/qedi/qedi_iscsi.c
860
return qedi_iscsi_send_ioreq(task);
drivers/scsi/qedi/qedi_iscsi.h
191
struct iscsi_task *task;
drivers/scsi/qla2xxx/qla_fw.h
543
uint8_t task;
drivers/scsi/qla2xxx/qla_iocb.c
1642
cmd_pkt->task = TSK_SIMPLE;
drivers/scsi/qla2xxx/qla_iocb.c
2009
cmd_pkt->task = TSK_SIMPLE;
drivers/scsi/qla2xxx/qla_iocb.c
3612
cmd_pkt->task |= sp->fcport->fcp_prio << 3;
drivers/scsi/qla2xxx/qla_mr.h
43
uint8_t task;
drivers/scsi/qla4xxx/ql4_def.h
842
struct iscsi_task *task;
drivers/scsi/qla4xxx/ql4_glbl.h
156
int qla4xxx_send_passthru0(struct iscsi_task *task);
drivers/scsi/qla4xxx/ql4_iocb.c
383
int qla4xxx_send_passthru0(struct iscsi_task *task)
drivers/scsi/qla4xxx/ql4_iocb.c
386
struct iscsi_session *sess = task->conn->session;
drivers/scsi/qla4xxx/ql4_iocb.c
389
struct ql4_task_data *task_data = task->dd_data;
drivers/scsi/qla4xxx/ql4_iocb.c
406
passthru_iocb->handle = task->itt;
drivers/scsi/qla4xxx/ql4_iocb.c
413
sizeof(struct iscsi_hdr), task->data, task->data_count);
drivers/scsi/qla4xxx/ql4_iocb.c
420
cpu_to_le32(task->data_count +
drivers/scsi/qla4xxx/ql4_isr.c
366
struct iscsi_task *task;
drivers/scsi/qla4xxx/ql4_isr.c
388
task = iscsi_itt_to_task(conn, itt);
drivers/scsi/qla4xxx/ql4_isr.c
391
if (task == NULL) {
drivers/scsi/qla4xxx/ql4_isr.c
396
task_data = task->dd_data;
drivers/scsi/qla4xxx/ql4_os.c
3357
struct iscsi_task *task;
drivers/scsi/qla4xxx/ql4_os.c
3367
task = task_data->task;
drivers/scsi/qla4xxx/ql4_os.c
3376
conn = task->conn;
drivers/scsi/qla4xxx/ql4_os.c
3396
static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
drivers/scsi/qla4xxx/ql4_os.c
3404
sess = task->conn->session;
drivers/scsi/qla4xxx/ql4_os.c
3407
task_data = task->dd_data;
drivers/scsi/qla4xxx/ql4_os.c
3410
if (task->sc) {
drivers/scsi/qla4xxx/ql4_os.c
3418
task_data->task = task;
drivers/scsi/qla4xxx/ql4_os.c
3420
if (task->data_count) {
drivers/scsi/qla4xxx/ql4_os.c
3421
task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
drivers/scsi/qla4xxx/ql4_os.c
3422
task->data_count,
drivers/scsi/qla4xxx/ql4_os.c
3429
__func__, task->conn->max_recv_dlength, hdr_len));
drivers/scsi/qla4xxx/ql4_os.c
3431
task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
drivers/scsi/qla4xxx/ql4_os.c
3439
task_data->req_len = task->data_count + hdr_len;
drivers/scsi/qla4xxx/ql4_os.c
3447
task->hdr = task_data->req_buffer;
drivers/scsi/qla4xxx/ql4_os.c
3464
static void qla4xxx_task_cleanup(struct iscsi_task *task)
drivers/scsi/qla4xxx/ql4_os.c
3473
sess = task->conn->session;
drivers/scsi/qla4xxx/ql4_os.c
3476
task_data = task->dd_data;
drivers/scsi/qla4xxx/ql4_os.c
3478
if (task->data_count) {
drivers/scsi/qla4xxx/ql4_os.c
3480
task->data_count, DMA_TO_DEVICE);
drivers/scsi/qla4xxx/ql4_os.c
3484
__func__, task->conn->max_recv_dlength, hdr_len));
drivers/scsi/qla4xxx/ql4_os.c
3493
static int qla4xxx_task_xmit(struct iscsi_task *task)
drivers/scsi/qla4xxx/ql4_os.c
3495
struct scsi_cmnd *sc = task->sc;
drivers/scsi/qla4xxx/ql4_os.c
3496
struct iscsi_session *sess = task->conn->session;
drivers/scsi/qla4xxx/ql4_os.c
3501
return qla4xxx_send_passthru0(task);
drivers/scsi/smartpqi/smartpqi.h
759
struct task_struct *task;
drivers/scsi/sym53c8xx_2/sym_hipd.c
2965
sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task)
drivers/scsi/sym53c8xx_2/sym_hipd.c
2989
(task == -1 || cp->tag == task)) {
drivers/scsi/sym53c8xx_2/sym_hipd.c
3186
int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task)
drivers/scsi/sym53c8xx_2/sym_hipd.c
3211
(task != -1 &&
drivers/scsi/sym53c8xx_2/sym_hipd.c
3212
(cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
drivers/scsi/sym53c8xx_2/sym_hipd.c
3275
int target=-1, lun=-1, task;
drivers/scsi/sym53c8xx_2/sym_hipd.c
3540
task = -1;
drivers/scsi/sym53c8xx_2/sym_hipd.c
3564
task = np->abrt_msg[2];
drivers/scsi/sym53c8xx_2/sym_hipd.c
3573
sym_clear_tasks(np, DID_ABORT, target, lun, task);
drivers/scsi/sym53c8xx_2/sym_hipd.h
1052
int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
drivers/soc/ti/wkup_m3_ipc.c
615
struct task_struct *task;
drivers/soc/ti/wkup_m3_ipc.c
691
task = kthread_run(wkup_m3_rproc_boot_thread, m3_ipc,
drivers/soc/ti/wkup_m3_ipc.c
694
if (IS_ERR(task)) {
drivers/soc/ti/wkup_m3_ipc.c
696
ret = PTR_ERR(task);
drivers/spi/spi.c
2035
sched_set_fifo(ctlr->kworker->task);
drivers/ssb/driver_chipcommon_pmu.c
370
u8 task; /* SET | ADD | REMOVE */
drivers/ssb/driver_chipcommon_pmu.c
401
.task = PMU_RES_DEP_SET,
drivers/ssb/driver_chipcommon_pmu.c
415
.task = PMU_RES_DEP_ADD,
drivers/ssb/driver_chipcommon_pmu.c
490
switch (depend_tab[i].task) {
drivers/staging/greybus/loopback.c
1060
gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
drivers/staging/greybus/loopback.c
1061
if (IS_ERR(gb->task)) {
drivers/staging/greybus/loopback.c
1062
retval = PTR_ERR(gb->task);
drivers/staging/greybus/loopback.c
1106
if (!IS_ERR_OR_NULL(gb->task))
drivers/staging/greybus/loopback.c
1107
kthread_stop(gb->task);
drivers/staging/greybus/loopback.c
65
struct task_struct *task;
drivers/staging/media/imx/imx-ic-prpencvf.c
155
int ret, task = ic_priv->task_id;
drivers/staging/media/imx/imx-ic-prpencvf.c
157
ic = ipu_ic_get(ic_priv->ipu, task);
drivers/staging/media/imx/imx-ic-prpencvf.c
165
out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].out_ch);
drivers/staging/media/imx/imx-ic-prpencvf.c
168
prp_channel[task].out_ch);
drivers/staging/media/imx/imx-ic-prpencvf.c
174
rot_in_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_in_ch);
drivers/staging/media/imx/imx-ic-prpencvf.c
177
prp_channel[task].rot_in_ch);
drivers/staging/media/imx/imx-ic-prpencvf.c
183
rot_out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_out_ch);
drivers/staging/media/imx/imx-ic-prpencvf.c
186
prp_channel[task].rot_out_ch);
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
702
s32 rtw_register_task_alive(struct adapter *padapter, u32 task)
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
714
register_task_alive(pwrctrl, task);
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
744
void rtw_unregister_task_alive(struct adapter *padapter, u32 task)
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
762
unregister_task_alive(pwrctrl, task);
drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
218
s32 rtw_register_task_alive(struct adapter *, u32 task);
drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
219
void rtw_unregister_task_alive(struct adapter *, u32 task);
drivers/target/loopback/tcm_loop.c
192
u64 lun, int task, enum tcm_tmreq_table tmr)
drivers/target/loopback/tcm_loop.c
219
NULL, tmr, GFP_KERNEL, task,
drivers/tty/synclink_gt.c
1858
struct slgt_info *info = container_of(work, struct slgt_info, task);
drivers/tty/synclink_gt.c
2321
schedule_work(&port->task);
drivers/tty/synclink_gt.c
246
struct work_struct task;
drivers/tty/synclink_gt.c
3488
INIT_WORK(&info->task, bh_handler);
drivers/tty/synclink_gt.c
5036
bh_handler(&info->task);
drivers/tty/tty_ldsem.c
124
wake_up_process(waiter->task);
drivers/tty/tty_ldsem.c
183
waiter.task = current;
drivers/tty/tty_ldsem.c
196
if (!smp_load_acquire(&waiter.task))
drivers/tty/tty_ldsem.c
212
if (waiter.task) {
drivers/tty/tty_ldsem.c
217
put_task_struct(waiter.task);
drivers/tty/tty_ldsem.c
255
waiter.task = current;
drivers/tty/tty_ldsem.c
51
struct task_struct *task;
drivers/tty/tty_ldsem.c
95
tsk = waiter->task;
drivers/tty/tty_ldsem.c
96
smp_store_release(&waiter->task, NULL);
drivers/usb/atm/ueagle-atm.c
1026
struct uea_softc *sc = container_of(work, struct uea_softc, task);
drivers/usb/atm/ueagle-atm.c
127
struct work_struct task;
drivers/usb/atm/ueagle-atm.c
1862
schedule_work(&sc->task);
drivers/usb/atm/ueagle-atm.c
2073
schedule_work(&sc->task);
drivers/usb/atm/ueagle-atm.c
2080
schedule_work(&sc->task);
drivers/usb/atm/ueagle-atm.c
2141
INIT_WORK(&sc->task, uea_load_page_e4);
drivers/usb/atm/ueagle-atm.c
2148
INIT_WORK(&sc->task, uea_load_page_e1);
drivers/usb/atm/ueagle-atm.c
2222
flush_work(&sc->task);
drivers/usb/atm/ueagle-atm.c
901
struct uea_softc *sc = container_of(work, struct uea_softc, task);
drivers/usb/gadget/function/uvc_video.c
830
sched_set_fifo(video->kworker->task);
drivers/usb/typec/tcpm/tcpm.c
7842
sched_set_fifo(port->wq->task);
drivers/usb/usbip/usbip_common.h
326
int usbip_in_eh(struct task_struct *task);
drivers/usb/usbip/usbip_event.c
189
int usbip_in_eh(struct task_struct *task)
drivers/usb/usbip/usbip_event.c
191
if (task == worker_context)
drivers/vfio/vfio_iommu_type1.c
1239
put_task_struct(dma->task);
drivers/vfio/vfio_iommu_type1.c
1651
struct task_struct *task = current->group_leader;
drivers/vfio/vfio_iommu_type1.c
1661
ret = mm_lock_acct(task, mm, lock_cap, npage);
drivers/vfio/vfio_iommu_type1.c
1666
mm_lock_acct(dma->task, dma->mm, dma->lock_cap, -npage);
drivers/vfio/vfio_iommu_type1.c
1670
if (dma->task != task) {
drivers/vfio/vfio_iommu_type1.c
1671
put_task_struct(dma->task);
drivers/vfio/vfio_iommu_type1.c
1672
dma->task = get_task_struct(task);
drivers/vfio/vfio_iommu_type1.c
1777
dma->task = current->group_leader;
drivers/vfio/vfio_iommu_type1.c
431
static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm,
drivers/vfio/vfio_iommu_type1.c
439
ret = __account_locked_vm(mm, abs(npage), npage > 0, task, lock_cap);
drivers/vfio/vfio_iommu_type1.c
456
ret = mm_lock_acct(dma->task, mm, dma->lock_cap, npage);
drivers/vfio/vfio_iommu_type1.c
866
dma->task->comm, task_pid_nr(dma->task),
drivers/vfio/vfio_iommu_type1.c
867
task_rlimit(dma->task, RLIMIT_MEMLOCK));
drivers/vfio/vfio_iommu_type1.c
98
struct task_struct *task;
drivers/vhost/vhost.c
787
struct task_struct *task;
drivers/vhost/vhost.c
791
task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name);
drivers/vhost/vhost.c
792
if (IS_ERR(task))
drivers/vhost/vhost.c
793
return PTR_ERR(task);
drivers/vhost/vhost.c
795
worker->kthread_task = task;
drivers/vhost/vhost.c
796
wake_up_process(task);
drivers/video/fbdev/atmel_lcdfb.c
1160
INIT_WORK(&sinfo->task, atmel_lcdfb_task);
drivers/video/fbdev/atmel_lcdfb.c
1197
cancel_work_sync(&sinfo->task);
drivers/video/fbdev/atmel_lcdfb.c
1235
cancel_work_sync(&sinfo->task);
drivers/video/fbdev/atmel_lcdfb.c
44
struct work_struct task;
drivers/video/fbdev/atmel_lcdfb.c
826
schedule_work(&sinfo->task);
drivers/video/fbdev/atmel_lcdfb.c
838
container_of(work, struct atmel_lcdfb_info, task);
drivers/video/fbdev/ps3fb.c
1172
task = kthread_run(ps3fbd, info, DEVICE_NAME);
drivers/video/fbdev/ps3fb.c
1173
if (IS_ERR(task)) {
drivers/video/fbdev/ps3fb.c
1174
retval = PTR_ERR(task);
drivers/video/fbdev/ps3fb.c
1178
ps3fb.task = task;
drivers/video/fbdev/ps3fb.c
119
struct task_struct *task;
drivers/video/fbdev/ps3fb.c
1219
if (ps3fb.task) {
drivers/video/fbdev/ps3fb.c
1220
struct task_struct *task = ps3fb.task;
drivers/video/fbdev/ps3fb.c
1221
ps3fb.task = NULL;
drivers/video/fbdev/ps3fb.c
1222
kthread_stop(task);
drivers/video/fbdev/ps3fb.c
928
if (ps3fb.task && !ps3fb.is_blanked &&
drivers/video/fbdev/ps3fb.c
931
wake_up_process(ps3fb.task);
drivers/video/fbdev/ps3fb.c
977
struct task_struct *task;
drivers/video/fbdev/pxafb.c
130
schedule_work(&fbi->task);
drivers/video/fbdev/pxafb.c
1627
container_of(work, struct pxafb_info, task);
drivers/video/fbdev/pxafb.c
1832
INIT_WORK(&fbi->task, pxafb_task);
drivers/video/fbdev/pxafb.c
2389
cancel_work_sync(&fbi->task);
drivers/video/fbdev/pxafb.h
147
struct work_struct task;
drivers/video/fbdev/sa1100fb.c
1136
INIT_WORK(&fbi->task, sa1100fb_task);
drivers/video/fbdev/sa1100fb.c
241
schedule_work(&fbi->task);
drivers/video/fbdev/sa1100fb.c
963
struct sa1100fb_info *fbi = container_of(w, struct sa1100fb_info, task);
drivers/video/fbdev/sa1100fb.h
66
struct work_struct task;
drivers/video/fbdev/uvesafb.c
103
memcpy(&task->t, utask, sizeof(*utask));
drivers/video/fbdev/uvesafb.c
105
if (task->t.buf_len && task->buf)
drivers/video/fbdev/uvesafb.c
106
memcpy(task->buf, utask + 1, task->t.buf_len);
drivers/video/fbdev/uvesafb.c
108
complete(task->done);
drivers/video/fbdev/uvesafb.c
1105
struct uvesafb_ktask *task;
drivers/video/fbdev/uvesafb.c
1135
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
1136
if (!task)
drivers/video/fbdev/uvesafb.c
1139
task->t.regs.eax = 0x4f10;
drivers/video/fbdev/uvesafb.c
1142
task->t.regs.ebx = 0x0001;
drivers/video/fbdev/uvesafb.c
1145
task->t.regs.ebx = 0x0101; /* standby */
drivers/video/fbdev/uvesafb.c
1148
task->t.regs.ebx = 0x0401; /* powerdown */
drivers/video/fbdev/uvesafb.c
1154
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
1155
if (err || (task->t.regs.eax & 0xffff) != 0x004f)
drivers/video/fbdev/uvesafb.c
1157
out: uvesafb_free(task);
drivers/video/fbdev/uvesafb.c
1184
struct uvesafb_ktask *task = NULL;
drivers/video/fbdev/uvesafb.c
1194
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
1195
if (!task)
drivers/video/fbdev/uvesafb.c
1199
task->t.regs.eax = 0x0003;
drivers/video/fbdev/uvesafb.c
1200
uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
1209
uvesafb_free(task);
drivers/video/fbdev/uvesafb.c
1216
struct uvesafb_ktask *task = NULL;
drivers/video/fbdev/uvesafb.c
1232
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
1233
if (!task)
drivers/video/fbdev/uvesafb.c
1236
task->t.regs.eax = 0x4f02;
drivers/video/fbdev/uvesafb.c
1237
task->t.regs.ebx = mode->mode_id | 0x4000; /* use LFB */
drivers/video/fbdev/uvesafb.c
1241
task->t.regs.ebx |= 0x0800; /* use CRTC data */
drivers/video/fbdev/uvesafb.c
1242
task->t.flags = TF_BUF_ESDI;
drivers/video/fbdev/uvesafb.c
1273
task->t.buf_len = sizeof(struct vbe_crtc_ib);
drivers/video/fbdev/uvesafb.c
1274
task->buf = &par->crtc;
drivers/video/fbdev/uvesafb.c
1276
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
1277
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
drivers/video/fbdev/uvesafb.c
1284
task->t.regs.eax, err);
drivers/video/fbdev/uvesafb.c
1285
uvesafb_reset(task);
drivers/video/fbdev/uvesafb.c
1292
task->t.regs.eax, err);
drivers/video/fbdev/uvesafb.c
1302
uvesafb_reset(task);
drivers/video/fbdev/uvesafb.c
1303
task->t.regs.eax = 0x4f08;
drivers/video/fbdev/uvesafb.c
1304
task->t.regs.ebx = 0x0800;
drivers/video/fbdev/uvesafb.c
1306
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
1307
if (err || (task->t.regs.eax & 0xffff) != 0x004f ||
drivers/video/fbdev/uvesafb.c
1308
((task->t.regs.ebx & 0xff00) >> 8) != 8) {
drivers/video/fbdev/uvesafb.c
1321
uvesafb_free(task);
drivers/video/fbdev/uvesafb.c
144
static int uvesafb_exec(struct uvesafb_ktask *task)
drivers/video/fbdev/uvesafb.c
149
int len = sizeof(task->t) + task->t.buf_len;
drivers/video/fbdev/uvesafb.c
165
init_completion(task->done);
drivers/video/fbdev/uvesafb.c
173
memcpy(m + 1, &task->t, sizeof(task->t));
drivers/video/fbdev/uvesafb.c
176
memcpy((u8 *)(m + 1) + sizeof(task->t), task->buf, task->t.buf_len);
drivers/video/fbdev/uvesafb.c
182
task->ack = m->ack;
drivers/video/fbdev/uvesafb.c
1917
struct uvesafb_ktask *task;
drivers/video/fbdev/uvesafb.c
1920
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
1921
if (task) {
drivers/video/fbdev/uvesafb.c
1922
task->t.flags = TF_EXIT;
drivers/video/fbdev/uvesafb.c
1923
uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
1924
uvesafb_free(task);
drivers/video/fbdev/uvesafb.c
194
uvfb_tasks[seq] = task;
drivers/video/fbdev/uvesafb.c
216
if (!err && !(task->t.flags & TF_EXIT))
drivers/video/fbdev/uvesafb.c
217
err = !wait_for_completion_timeout(task->done,
drivers/video/fbdev/uvesafb.c
235
static void uvesafb_free(struct uvesafb_ktask *task)
drivers/video/fbdev/uvesafb.c
237
if (task) {
drivers/video/fbdev/uvesafb.c
238
kfree(task->done);
drivers/video/fbdev/uvesafb.c
239
kfree(task);
drivers/video/fbdev/uvesafb.c
246
static void uvesafb_reset(struct uvesafb_ktask *task)
drivers/video/fbdev/uvesafb.c
248
struct completion *cpl = task->done;
drivers/video/fbdev/uvesafb.c
250
memset(task, 0, sizeof(*task));
drivers/video/fbdev/uvesafb.c
251
task->done = cpl;
drivers/video/fbdev/uvesafb.c
259
struct uvesafb_ktask *task;
drivers/video/fbdev/uvesafb.c
261
task = kzalloc_obj(*task);
drivers/video/fbdev/uvesafb.c
262
if (task) {
drivers/video/fbdev/uvesafb.c
263
task->done = kzalloc_obj(*task->done);
drivers/video/fbdev/uvesafb.c
264
if (!task->done) {
drivers/video/fbdev/uvesafb.c
265
kfree(task);
drivers/video/fbdev/uvesafb.c
266
task = NULL;
drivers/video/fbdev/uvesafb.c
269
return task;
drivers/video/fbdev/uvesafb.c
354
struct uvesafb_ktask *task;
drivers/video/fbdev/uvesafb.c
365
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
366
if (!task) {
drivers/video/fbdev/uvesafb.c
371
task->t.regs.eax = 0x4f04;
drivers/video/fbdev/uvesafb.c
372
task->t.regs.ecx = 0x000f;
drivers/video/fbdev/uvesafb.c
373
task->t.regs.edx = 0x0001;
drivers/video/fbdev/uvesafb.c
374
task->t.flags = TF_BUF_RET | TF_BUF_ESBX;
drivers/video/fbdev/uvesafb.c
375
task->t.buf_len = par->vbe_state_size;
drivers/video/fbdev/uvesafb.c
376
task->buf = state;
drivers/video/fbdev/uvesafb.c
377
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
379
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
drivers/video/fbdev/uvesafb.c
381
task->t.regs.eax, err);
drivers/video/fbdev/uvesafb.c
386
uvesafb_free(task);
drivers/video/fbdev/uvesafb.c
392
struct uvesafb_ktask *task;
drivers/video/fbdev/uvesafb.c
398
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
399
if (!task)
drivers/video/fbdev/uvesafb.c
402
task->t.regs.eax = 0x4f04;
drivers/video/fbdev/uvesafb.c
403
task->t.regs.ecx = 0x000f;
drivers/video/fbdev/uvesafb.c
404
task->t.regs.edx = 0x0002;
drivers/video/fbdev/uvesafb.c
405
task->t.buf_len = par->vbe_state_size;
drivers/video/fbdev/uvesafb.c
406
task->t.flags = TF_BUF_ESBX;
drivers/video/fbdev/uvesafb.c
407
task->buf = state_buf;
drivers/video/fbdev/uvesafb.c
409
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
410
if (err || (task->t.regs.eax & 0xffff) != 0x004f)
drivers/video/fbdev/uvesafb.c
412
task->t.regs.eax, err);
drivers/video/fbdev/uvesafb.c
414
uvesafb_free(task);
drivers/video/fbdev/uvesafb.c
417
static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
drivers/video/fbdev/uvesafb.c
422
task->t.regs.eax = 0x4f00;
drivers/video/fbdev/uvesafb.c
423
task->t.flags = TF_VBEIB;
drivers/video/fbdev/uvesafb.c
424
task->t.buf_len = sizeof(struct vbe_ib);
drivers/video/fbdev/uvesafb.c
425
task->buf = &par->vbe_ib;
drivers/video/fbdev/uvesafb.c
428
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
429
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
drivers/video/fbdev/uvesafb.c
431
(u32)task->t.regs.eax, err);
drivers/video/fbdev/uvesafb.c
454
((char *)task->buf) + par->vbe_ib.oem_vendor_name_ptr);
drivers/video/fbdev/uvesafb.c
458
((char *)task->buf) + par->vbe_ib.oem_product_name_ptr);
drivers/video/fbdev/uvesafb.c
462
((char *)task->buf) + par->vbe_ib.oem_product_rev_ptr);
drivers/video/fbdev/uvesafb.c
466
((char *)task->buf) + par->vbe_ib.oem_string_ptr);
drivers/video/fbdev/uvesafb.c
475
static int uvesafb_vbe_getmodes(struct uvesafb_ktask *task,
drivers/video/fbdev/uvesafb.c
499
uvesafb_reset(task);
drivers/video/fbdev/uvesafb.c
500
task->t.regs.eax = 0x4f01;
drivers/video/fbdev/uvesafb.c
501
task->t.regs.ecx = (u32) *mode;
drivers/video/fbdev/uvesafb.c
502
task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
drivers/video/fbdev/uvesafb.c
503
task->t.buf_len = sizeof(struct vbe_mode_ib);
drivers/video/fbdev/uvesafb.c
504
task->buf = par->vbe_modes + off;
drivers/video/fbdev/uvesafb.c
506
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
507
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
drivers/video/fbdev/uvesafb.c
509
*mode, (u32)task->t.regs.eax, err);
drivers/video/fbdev/uvesafb.c
515
mib = task->buf;
drivers/video/fbdev/uvesafb.c
552
static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
drivers/video/fbdev/uvesafb.c
557
uvesafb_reset(task);
drivers/video/fbdev/uvesafb.c
558
task->t.regs.eax = 0x4f0a;
drivers/video/fbdev/uvesafb.c
559
task->t.regs.ebx = 0x0;
drivers/video/fbdev/uvesafb.c
560
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
564
if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
drivers/video/fbdev/uvesafb.c
567
par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
drivers/video/fbdev/uvesafb.c
568
+ task->t.regs.edi);
drivers/video/fbdev/uvesafb.c
572
(u16)task->t.regs.es, (u16)task->t.regs.edi);
drivers/video/fbdev/uvesafb.c
613
static int uvesafb_vbe_getedid(struct uvesafb_ktask *task, struct fb_info *info)
drivers/video/fbdev/uvesafb.c
621
task->t.regs.eax = 0x4f15;
drivers/video/fbdev/uvesafb.c
622
task->t.regs.ebx = 0;
drivers/video/fbdev/uvesafb.c
623
task->t.regs.ecx = 0;
drivers/video/fbdev/uvesafb.c
624
task->t.buf_len = 0;
drivers/video/fbdev/uvesafb.c
625
task->t.flags = 0;
drivers/video/fbdev/uvesafb.c
627
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
629
if ((task->t.regs.eax & 0xffff) != 0x004f || err)
drivers/video/fbdev/uvesafb.c
632
if ((task->t.regs.ebx & 0x3) == 3) {
drivers/video/fbdev/uvesafb.c
634
} else if ((task->t.regs.ebx & 0x3) == 2) {
drivers/video/fbdev/uvesafb.c
636
} else if ((task->t.regs.ebx & 0x3) == 1) {
drivers/video/fbdev/uvesafb.c
643
task->t.regs.eax = 0x4f15;
drivers/video/fbdev/uvesafb.c
644
task->t.regs.ebx = 1;
drivers/video/fbdev/uvesafb.c
645
task->t.regs.ecx = task->t.regs.edx = 0;
drivers/video/fbdev/uvesafb.c
646
task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
drivers/video/fbdev/uvesafb.c
647
task->t.buf_len = EDID_LENGTH;
drivers/video/fbdev/uvesafb.c
648
task->buf = kzalloc(EDID_LENGTH, GFP_KERNEL);
drivers/video/fbdev/uvesafb.c
649
if (!task->buf)
drivers/video/fbdev/uvesafb.c
652
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
654
if ((task->t.regs.eax & 0xffff) == 0x004f && !err) {
drivers/video/fbdev/uvesafb.c
655
fb_edid_to_monspecs(task->buf, &info->monspecs);
drivers/video/fbdev/uvesafb.c
670
kfree(task->buf);
drivers/video/fbdev/uvesafb.c
674
static void uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task,
drivers/video/fbdev/uvesafb.c
687
if (uvesafb_vbe_getedid(task, info)) {
drivers/video/fbdev/uvesafb.c
75
struct uvesafb_ktask *task;
drivers/video/fbdev/uvesafb.c
753
static void uvesafb_vbe_getstatesize(struct uvesafb_ktask *task,
drivers/video/fbdev/uvesafb.c
758
uvesafb_reset(task);
drivers/video/fbdev/uvesafb.c
764
task->t.regs.eax = 0x4f04;
drivers/video/fbdev/uvesafb.c
765
task->t.regs.ecx = 0x000f;
drivers/video/fbdev/uvesafb.c
766
task->t.regs.edx = 0x0000;
drivers/video/fbdev/uvesafb.c
767
task->t.flags = 0;
drivers/video/fbdev/uvesafb.c
769
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
771
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
drivers/video/fbdev/uvesafb.c
773
task->t.regs.eax, err);
drivers/video/fbdev/uvesafb.c
778
par->vbe_state_size = 64 * (task->t.regs.ebx & 0xffff);
drivers/video/fbdev/uvesafb.c
783
struct uvesafb_ktask *task = NULL;
drivers/video/fbdev/uvesafb.c
787
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
788
if (!task)
drivers/video/fbdev/uvesafb.c
791
err = uvesafb_vbe_getinfo(task, par);
drivers/video/fbdev/uvesafb.c
795
err = uvesafb_vbe_getmodes(task, par);
drivers/video/fbdev/uvesafb.c
809
uvesafb_vbe_getpmi(task, par);
drivers/video/fbdev/uvesafb.c
818
uvesafb_vbe_getmonspecs(task, info);
drivers/video/fbdev/uvesafb.c
819
uvesafb_vbe_getstatesize(task, par);
drivers/video/fbdev/uvesafb.c
821
out: uvesafb_free(task);
drivers/video/fbdev/uvesafb.c
84
task = uvfb_tasks[msg->seq];
drivers/video/fbdev/uvesafb.c
86
if (!task || msg->ack != task->ack) {
drivers/video/fbdev/uvesafb.c
925
struct uvesafb_ktask *task;
drivers/video/fbdev/uvesafb.c
94
if (task->t.buf_len < utask->buf_len ||
drivers/video/fbdev/uvesafb.c
966
task = uvesafb_prep();
drivers/video/fbdev/uvesafb.c
967
if (!task)
drivers/video/fbdev/uvesafb.c
970
task->t.regs.eax = 0x4f09;
drivers/video/fbdev/uvesafb.c
971
task->t.regs.ebx = 0x0;
drivers/video/fbdev/uvesafb.c
972
task->t.regs.ecx = count;
drivers/video/fbdev/uvesafb.c
973
task->t.regs.edx = start;
drivers/video/fbdev/uvesafb.c
974
task->t.flags = TF_BUF_ESDI;
drivers/video/fbdev/uvesafb.c
975
task->t.buf_len = sizeof(struct uvesafb_pal_entry) * count;
drivers/video/fbdev/uvesafb.c
976
task->buf = entries;
drivers/video/fbdev/uvesafb.c
978
err = uvesafb_exec(task);
drivers/video/fbdev/uvesafb.c
979
if ((task->t.regs.eax & 0xffff) != 0x004f)
drivers/video/fbdev/uvesafb.c
982
uvesafb_free(task);
drivers/watchdog/watchdog_dev.c
1232
sched_set_fifo(watchdog_kworker->task);
drivers/xen/balloon.c
726
struct task_struct *task;
drivers/xen/balloon.c
774
task = kthread_run(balloon_thread, NULL, "xen-balloon");
drivers/xen/balloon.c
775
if (IS_ERR(task)) {
drivers/xen/balloon.c
777
return PTR_ERR(task);
drivers/xen/xenbus/xenbus_xs.c
907
struct task_struct *task;
drivers/xen/xenbus/xenbus_xs.c
916
task = kthread_run(xenwatch_thread, NULL, "xenwatch");
drivers/xen/xenbus/xenbus_xs.c
917
if (IS_ERR(task))
drivers/xen/xenbus/xenbus_xs.c
918
return PTR_ERR(task);
fs/afs/fs_operation.c
112
struct afs_io_locker myself = { .task = current, };
fs/afs/fs_operation.c
54
struct task_struct *task;
fs/afs/fs_operation.c
72
wake_up_process(locker->task);
fs/afs/fs_operation.c
85
struct afs_io_locker myself = { .task = current, };
fs/aio.c
1351
if (!ret2 && !t.task)
fs/binfmt_elf.c
1684
struct task_struct *task;
fs/binfmt_elf.c
1706
static void do_thread_regset_writeback(struct task_struct *task,
fs/binfmt_elf.c
1710
regset->writeback(task, regset, 1);
fs/binfmt_elf.c
1733
fill_prstatus(&t->prstatus.common, t->task, signr);
fs/binfmt_elf.c
1734
regset_get(t->task, &view->regsets[0],
fs/binfmt_elf.c
1740
do_thread_regset_writeback(t->task, &view->regsets[0]);
fs/binfmt_elf.c
1755
do_thread_regset_writeback(t->task, regset);
fs/binfmt_elf.c
1758
if (regset->active && regset->active(t->task, regset) <= 0)
fs/binfmt_elf.c
1761
ret = regset_get_alloc(t->task, regset, ~0U, &data);
fs/binfmt_elf.c
1793
struct task_struct *p = t->task;
fs/binfmt_elf.c
1881
info->thread->task = dump_task;
fs/binfmt_elf.c
1887
t->task = ct->task;
fs/binfmt_elf_fdpic.c
1505
ct->task, &thread_status_size);
fs/bpf_fs_kfuncs.c
42
__bpf_kfunc struct file *bpf_get_task_exe_file(struct task_struct *task)
fs/bpf_fs_kfuncs.c
44
return get_task_exe_file(task);
fs/btrfs/dev-replace.c
1190
struct task_struct *task;
fs/btrfs/dev-replace.c
1235
task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl");
fs/btrfs/dev-replace.c
1236
return PTR_ERR_OR_ZERO(task);
fs/btrfs/disk-io.c
2980
struct task_struct *task;
fs/btrfs/disk-io.c
2983
task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
fs/btrfs/disk-io.c
2984
if (IS_ERR(task)) {
fs/btrfs/disk-io.c
2987
return PTR_ERR(task);
fs/btrfs/uuid-tree.c
534
struct task_struct *task;
fs/btrfs/uuid-tree.c
560
task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
fs/btrfs/uuid-tree.c
561
if (IS_ERR(task)) {
fs/btrfs/uuid-tree.c
565
return PTR_ERR(task);
fs/coredump.c
527
core_state->dumper.task = tsk;
fs/coredump.c
543
wait_task_inactive(ptr->task, TASK_ANY);
fs/coredump.c
554
struct task_struct *task;
fs/coredump.c
565
task = curr->task;
fs/coredump.c
571
curr->task = NULL;
fs/coredump.c
572
wake_up_process(task);
fs/ecryptfs/ecryptfs_kernel.h
386
struct task_struct *task;
fs/ecryptfs/messaging.c
242
wake_up_process(msg_ctx->task);
fs/ecryptfs/messaging.c
397
ecryptfs_msg_ctx_arr[i].task = NULL;
fs/ecryptfs/messaging.c
55
(*msg_ctx)->task = current;
fs/erofs/zdata.c
317
sched_set_fifo_low(worker->task);
fs/f2fs/dir.c
393
F2FS_I(dir)->task = current;
fs/f2fs/dir.c
816
if (current != F2FS_I(dir)->task) {
fs/f2fs/dir.c
818
F2FS_I(dir)->task = NULL;
fs/f2fs/f2fs.h
956
struct task_struct *task; /* lookup and create consistency */
fs/file.c
1123
struct file *fget_task(struct task_struct *task, unsigned int fd)
fs/file.c
1127
task_lock(task);
fs/file.c
1128
if (task->files)
fs/file.c
1129
file = __fget_files(task->files, fd, 0);
fs/file.c
1130
task_unlock(task);
fs/file.c
1135
struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd)
fs/file.c
1142
task_lock(task);
fs/file.c
1143
files = task->files;
fs/file.c
1153
task_unlock(task);
fs/file_table.c
521
struct task_struct *task = current;
fs/file_table.c
528
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
fs/file_table.c
530
if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
fs/gfs2/glock.c
2653
struct task_struct *task;
fs/gfs2/glock.c
2663
if (i->task)
fs/gfs2/glock.c
2664
put_task_struct(i->task);
fs/gfs2/glock.c
2668
i->task = NULL;
fs/gfs2/glock.c
2672
i->task = pid_task(pid, PIDTYPE_TGID);
fs/gfs2/glock.c
2673
if (!i->task) {
fs/gfs2/glock.c
2677
get_task_struct(i->task);
fs/gfs2/glock.c
2680
return i->task;
fs/gfs2/glock.c
2691
i->file = fget_task_next(i->task, &i->fd);
fs/gfs2/glock.c
2740
if (i->task)
fs/gfs2/glock.c
2741
put_task_struct(i->task);
fs/lockd/clntlock.c
214
struct task_struct *task;
fs/lockd/clntlock.c
218
task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name);
fs/lockd/clntlock.c
219
if (IS_ERR(task))
fs/lockd/clntlock.c
222
"(%ld)\n", host->h_name, PTR_ERR(task));
fs/lockd/clntproc.c
373
struct rpc_task *task;
fs/lockd/clntproc.c
375
task = __nlm_async_call(req, proc, msg, tk_ops);
fs/lockd/clntproc.c
376
if (IS_ERR(task))
fs/lockd/clntproc.c
377
return PTR_ERR(task);
fs/lockd/clntproc.c
378
rpc_put_task(task);
fs/lockd/clntproc.c
417
struct rpc_task *task;
fs/lockd/clntproc.c
420
task = __nlm_async_call(req, proc, &msg, tk_ops);
fs/lockd/clntproc.c
421
if (IS_ERR(task))
fs/lockd/clntproc.c
422
return PTR_ERR(task);
fs/lockd/clntproc.c
423
err = rpc_wait_for_completion_task(task);
fs/lockd/clntproc.c
424
rpc_put_task(task);
fs/lockd/clntproc.c
730
static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
fs/lockd/clntproc.c
737
defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
fs/lockd/clntproc.c
740
rpc_call_start(task);
fs/lockd/clntproc.c
743
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
fs/lockd/clntproc.c
748
if (RPC_SIGNALLED(task))
fs/lockd/clntproc.c
751
if (task->tk_status < 0) {
fs/lockd/clntproc.c
752
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
fs/lockd/clntproc.c
753
switch (task->tk_status) {
fs/lockd/clntproc.c
762
rpc_delay(task, NLMCLNT_GRACE_WAIT);
fs/lockd/clntproc.c
772
rpc_restart_call(task);
fs/lockd/clntproc.c
811
static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
fs/lockd/clntproc.c
816
if (RPC_SIGNALLED(task))
fs/lockd/clntproc.c
819
if (task->tk_status < 0) {
fs/lockd/clntproc.c
821
task->tk_status);
fs/lockd/clntproc.c
847
rpc_restart_call(task);
fs/lockd/clntproc.c
848
rpc_delay(task, 30 * HZ);
fs/lockd/svc4proc.c
269
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
fs/lockd/svclock.c
932
static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
fs/lockd/svclock.c
955
if (task->tk_status < 0) {
fs/lockd/svcproc.c
292
static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
fs/namespace.c
1376
struct task_struct *task = current;
fs/namespace.c
1377
if (likely(!(task->flags & PF_KTHREAD))) {
fs/namespace.c
1379
if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
fs/namespace.c
6404
static struct ns_common *mntns_get(struct task_struct *task)
fs/namespace.c
6409
task_lock(task);
fs/namespace.c
6410
nsproxy = task->nsproxy;
fs/namespace.c
6415
task_unlock(task);
fs/nfs/blocklayout/blocklayout.c
211
struct rpc_task *task;
fs/nfs/blocklayout/blocklayout.c
214
task = container_of(work, struct rpc_task, u.tk_work);
fs/nfs/blocklayout/blocklayout.c
215
hdr = container_of(task, struct nfs_pgio_header, task);
fs/nfs/blocklayout/blocklayout.c
224
hdr->task.tk_status = hdr->pnfs_error;
fs/nfs/blocklayout/blocklayout.c
225
INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
fs/nfs/blocklayout/blocklayout.c
226
schedule_work(&hdr->task.u.tk_work);
fs/nfs/blocklayout/blocklayout.c
343
struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
fs/nfs/blocklayout/blocklayout.c
345
container_of(task, struct nfs_pgio_header, task);
fs/nfs/blocklayout/blocklayout.c
368
hdr->task.tk_status = hdr->pnfs_error;
fs/nfs/blocklayout/blocklayout.c
370
INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
fs/nfs/blocklayout/blocklayout.c
371
schedule_work(&hdr->task.u.tk_work);
fs/nfs/dir.c
2713
nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
fs/nfs/dir.c
2764
struct rpc_task *task;
fs/nfs/dir.c
2824
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
fs/nfs/dir.c
2826
if (IS_ERR(task)) {
fs/nfs/dir.c
2829
error = PTR_ERR(task);
fs/nfs/dir.c
2833
error = rpc_wait_for_completion_task(task);
fs/nfs/dir.c
2835
((struct nfs_renamedata *)task->tk_calldata)->cancelled = 1;
fs/nfs/dir.c
2839
error = task->tk_status;
fs/nfs/dir.c
2840
rpc_put_task(task);
fs/nfs/dir.c
3067
static u64 nfs_access_login_time(const struct task_struct *task,
fs/nfs/dir.c
3076
parent = rcu_dereference(task->real_parent);
fs/nfs/dir.c
3078
if (parent == task || cred_fscmp(pcred, cred) != 0)
fs/nfs/dir.c
3080
task = parent;
fs/nfs/dir.c
3082
ret = task->start_time;
fs/nfs/direct.c
629
int status = data->task.tk_status;
fs/nfs/filelayout/filelayout.c
102
task->tk_status = pnfs_write_done_resend_to_mds(hdr);
fs/nfs/filelayout/filelayout.c
108
struct rpc_task *task = &hdr->task;
fs/nfs/filelayout/filelayout.c
113
hdr->task.tk_pid,
fs/nfs/filelayout/filelayout.c
119
task->tk_status = pnfs_read_done_resend_to_mds(hdr);
fs/nfs/filelayout/filelayout.c
123
static int filelayout_async_handle_error(struct rpc_task *task,
fs/nfs/filelayout/filelayout.c
133
if (task->tk_status >= 0)
fs/nfs/filelayout/filelayout.c
136
switch (task->tk_status) {
fs/nfs/filelayout/filelayout.c
146
"flags 0x%x\n", __func__, task->tk_status,
fs/nfs/filelayout/filelayout.c
148
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
fs/nfs/filelayout/filelayout.c
152
rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
fs/nfs/filelayout/filelayout.c
165
task->tk_status);
fs/nfs/filelayout/filelayout.c
187
task->tk_status);
fs/nfs/filelayout/filelayout.c
196
task->tk_status);
fs/nfs/filelayout/filelayout.c
199
task->tk_status = 0;
fs/nfs/filelayout/filelayout.c
205
static int filelayout_read_done_cb(struct rpc_task *task,
fs/nfs/filelayout/filelayout.c
210
trace_nfs4_pnfs_read(hdr, task->tk_status);
fs/nfs/filelayout/filelayout.c
211
err = filelayout_async_handle_error(task, hdr->args.context->state,
fs/nfs/filelayout/filelayout.c
217
return task->tk_status;
fs/nfs/filelayout/filelayout.c
219
rpc_restart_call_prepare(task);
fs/nfs/filelayout/filelayout.c
268
static void filelayout_read_prepare(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
273
rpc_exit(task, -EIO);
fs/nfs/filelayout/filelayout.c
277
dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
fs/nfs/filelayout/filelayout.c
279
rpc_exit(task, 0);
fs/nfs/filelayout/filelayout.c
287
task))
fs/nfs/filelayout/filelayout.c
291
rpc_exit(task, -EIO); /* lost lock, terminate I/O */
fs/nfs/filelayout/filelayout.c
294
static void filelayout_read_call_done(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
299
task->tk_status == 0) {
fs/nfs/filelayout/filelayout.c
300
nfs41_sequence_done(task, &hdr->res.seq_res);
fs/nfs/filelayout/filelayout.c
305
hdr->mds_ops->rpc_call_done(task, data);
fs/nfs/filelayout/filelayout.c
308
static void filelayout_read_count_stats(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
312
rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
fs/nfs/filelayout/filelayout.c
315
static int filelayout_write_done_cb(struct rpc_task *task,
fs/nfs/filelayout/filelayout.c
320
trace_nfs4_pnfs_write(hdr, task->tk_status);
fs/nfs/filelayout/filelayout.c
321
err = filelayout_async_handle_error(task, hdr->args.context->state,
fs/nfs/filelayout/filelayout.c
327
return task->tk_status;
fs/nfs/filelayout/filelayout.c
329
rpc_restart_call_prepare(task);
fs/nfs/filelayout/filelayout.c
337
if (task->tk_status >= 0)
fs/nfs/filelayout/filelayout.c
343
static int filelayout_commit_done_cb(struct rpc_task *task,
fs/nfs/filelayout/filelayout.c
348
trace_nfs4_pnfs_commit_ds(data, task->tk_status);
fs/nfs/filelayout/filelayout.c
349
err = filelayout_async_handle_error(task, NULL, data->ds_clp,
fs/nfs/filelayout/filelayout.c
357
rpc_restart_call_prepare(task);
fs/nfs/filelayout/filelayout.c
366
static void filelayout_write_prepare(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
371
rpc_exit(task, -EIO);
fs/nfs/filelayout/filelayout.c
375
dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
fs/nfs/filelayout/filelayout.c
377
rpc_exit(task, 0);
fs/nfs/filelayout/filelayout.c
383
task))
fs/nfs/filelayout/filelayout.c
387
rpc_exit(task, -EIO); /* lost lock, terminate I/O */
fs/nfs/filelayout/filelayout.c
390
static void filelayout_write_call_done(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
395
task->tk_status == 0) {
fs/nfs/filelayout/filelayout.c
396
nfs41_sequence_done(task, &hdr->res.seq_res);
fs/nfs/filelayout/filelayout.c
401
hdr->mds_ops->rpc_call_done(task, data);
fs/nfs/filelayout/filelayout.c
404
static void filelayout_write_count_stats(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
408
rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
fs/nfs/filelayout/filelayout.c
411
static void filelayout_commit_prepare(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
418
task);
fs/nfs/filelayout/filelayout.c
421
static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
fs/nfs/filelayout/filelayout.c
425
rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
fs/nfs/filelayout/filelayout.c
91
struct rpc_task *task = &hdr->task;
fs/nfs/filelayout/filelayout.c
96
hdr->task.tk_pid,
fs/nfs/flexfilelayout/flexfilelayout.c
1224
struct rpc_task *task = &hdr->task;
fs/nfs/flexfilelayout/flexfilelayout.c
1231
hdr->task.tk_pid,
fs/nfs/flexfilelayout/flexfilelayout.c
1244
hdr->task.tk_pid,
fs/nfs/flexfilelayout/flexfilelayout.c
1254
task->tk_status = pnfs_write_done_resend_to_mds(hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1276
struct rpc_task *task = &hdr->task;
fs/nfs/flexfilelayout/flexfilelayout.c
1284
hdr->task.tk_pid,
fs/nfs/flexfilelayout/flexfilelayout.c
1294
task->tk_status = pnfs_read_done_resend_to_mds(hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1298
static int ff_layout_async_handle_error_v4(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1315
if (!task->tk_xprt)
fs/nfs/flexfilelayout/flexfilelayout.c
1317
xprt_force_disconnect(task->tk_xprt);
fs/nfs/flexfilelayout/flexfilelayout.c
1327
"flags 0x%x\n", __func__, task->tk_status,
fs/nfs/flexfilelayout/flexfilelayout.c
1329
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1335
rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
fs/nfs/flexfilelayout/flexfilelayout.c
1347
task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1362
switch (task->tk_status) {
fs/nfs/flexfilelayout/flexfilelayout.c
1378
task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1391
task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1395
task->tk_status = 0;
fs/nfs/flexfilelayout/flexfilelayout.c
1400
static int ff_layout_async_handle_error_v3(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1413
if (!task->tk_xprt)
fs/nfs/flexfilelayout/flexfilelayout.c
1415
xprt_force_disconnect(task->tk_xprt);
fs/nfs/flexfilelayout/flexfilelayout.c
1432
switch (task->tk_status) {
fs/nfs/flexfilelayout/flexfilelayout.c
1451
task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1459
task->tk_status = 0;
fs/nfs/flexfilelayout/flexfilelayout.c
1460
rpc_restart_call_prepare(task);
fs/nfs/flexfilelayout/flexfilelayout.c
1461
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
fs/nfs/flexfilelayout/flexfilelayout.c
1465
static int ff_layout_async_handle_error(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1474
if (task->tk_status >= 0) {
fs/nfs/flexfilelayout/flexfilelayout.c
1485
return ff_layout_async_handle_error_v3(task, op_status, clp,
fs/nfs/flexfilelayout/flexfilelayout.c
1488
return ff_layout_async_handle_error_v4(task, op_status, state,
fs/nfs/flexfilelayout/flexfilelayout.c
1562
static int ff_layout_read_done_cb(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1572
if (task->tk_status < 0) {
fs/nfs/flexfilelayout/flexfilelayout.c
1577
task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1578
trace_ff_layout_read_error(hdr, task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1581
err = ff_layout_async_handle_error(task, hdr->res.op_status,
fs/nfs/flexfilelayout/flexfilelayout.c
1593
return task->tk_status;
fs/nfs/flexfilelayout/flexfilelayout.c
1596
return task->tk_status;
fs/nfs/flexfilelayout/flexfilelayout.c
1600
task->tk_status = -EIO;
fs/nfs/flexfilelayout/flexfilelayout.c
1606
rpc_restart_call_prepare(task);
fs/nfs/flexfilelayout/flexfilelayout.c
1638
static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1658
task->tk_start);
fs/nfs/flexfilelayout/flexfilelayout.c
1661
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1677
task,
fs/nfs/flexfilelayout/flexfilelayout.c
1685
static int ff_layout_read_prepare_common(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1689
rpc_exit(task, -EIO);
fs/nfs/flexfilelayout/flexfilelayout.c
1694
rpc_exit(task, -EAGAIN);
fs/nfs/flexfilelayout/flexfilelayout.c
1698
ff_layout_read_record_layoutstats_start(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1707
static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1711
if (ff_layout_read_prepare_common(task, hdr))
fs/nfs/flexfilelayout/flexfilelayout.c
1714
rpc_call_start(task);
fs/nfs/flexfilelayout/flexfilelayout.c
1717
static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1724
task))
fs/nfs/flexfilelayout/flexfilelayout.c
1727
ff_layout_read_prepare_common(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1730
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1735
task->tk_status == 0) {
fs/nfs/flexfilelayout/flexfilelayout.c
1736
nfs4_sequence_done(task, &hdr->res.seq_res);
fs/nfs/flexfilelayout/flexfilelayout.c
1741
hdr->mds_ops->rpc_call_done(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1744
static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1748
ff_layout_read_record_layoutstats_done(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1749
rpc_count_iostats_metrics(task,
fs/nfs/flexfilelayout/flexfilelayout.c
1757
ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1766
static int ff_layout_write_done_cb(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1777
if (task->tk_status < 0) {
fs/nfs/flexfilelayout/flexfilelayout.c
1782
task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1783
trace_ff_layout_write_error(hdr, task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1786
err = ff_layout_async_handle_error(task, hdr->res.op_status,
fs/nfs/flexfilelayout/flexfilelayout.c
1798
return task->tk_status;
fs/nfs/flexfilelayout/flexfilelayout.c
1801
return task->tk_status;
fs/nfs/flexfilelayout/flexfilelayout.c
1805
task->tk_status = -EIO;
fs/nfs/flexfilelayout/flexfilelayout.c
1818
if (task->tk_status >= 0)
fs/nfs/flexfilelayout/flexfilelayout.c
1824
static int ff_layout_commit_done_cb(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1831
if (task->tk_status < 0) {
fs/nfs/flexfilelayout/flexfilelayout.c
1835
task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1836
trace_ff_layout_commit_error(data, task->tk_status);
fs/nfs/flexfilelayout/flexfilelayout.c
1839
err = ff_layout_async_handle_error(task, data->res.op_status,
fs/nfs/flexfilelayout/flexfilelayout.c
1852
rpc_restart_call_prepare(task);
fs/nfs/flexfilelayout/flexfilelayout.c
1855
task->tk_status = -EIO;
fs/nfs/flexfilelayout/flexfilelayout.c
1863
static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1883
task->tk_start);
fs/nfs/flexfilelayout/flexfilelayout.c
1886
static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1902
task,
fs/nfs/flexfilelayout/flexfilelayout.c
1911
static int ff_layout_write_prepare_common(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
1915
rpc_exit(task, -EIO);
fs/nfs/flexfilelayout/flexfilelayout.c
1920
rpc_exit(task, -EAGAIN);
fs/nfs/flexfilelayout/flexfilelayout.c
1924
ff_layout_write_record_layoutstats_start(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1928
static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1932
if (ff_layout_write_prepare_common(task, hdr))
fs/nfs/flexfilelayout/flexfilelayout.c
1935
rpc_call_start(task);
fs/nfs/flexfilelayout/flexfilelayout.c
1938
static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1945
task))
fs/nfs/flexfilelayout/flexfilelayout.c
1948
ff_layout_write_prepare_common(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1951
static void ff_layout_write_call_done(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1956
task->tk_status == 0) {
fs/nfs/flexfilelayout/flexfilelayout.c
1957
nfs4_sequence_done(task, &hdr->res.seq_res);
fs/nfs/flexfilelayout/flexfilelayout.c
1962
hdr->mds_ops->rpc_call_done(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1965
static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
1969
ff_layout_write_record_layoutstats_done(task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1970
rpc_count_iostats_metrics(task,
fs/nfs/flexfilelayout/flexfilelayout.c
1978
ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
1987
static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
2000
0, task->tk_start);
fs/nfs/flexfilelayout/flexfilelayout.c
2003
static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
2013
if (task->tk_status == 0) {
fs/nfs/flexfilelayout/flexfilelayout.c
2020
nfs4_ff_layout_stat_io_end_write(task,
fs/nfs/flexfilelayout/flexfilelayout.c
2027
static int ff_layout_commit_prepare_common(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
2031
rpc_exit(task, -EAGAIN);
fs/nfs/flexfilelayout/flexfilelayout.c
2035
ff_layout_commit_record_layoutstats_start(task, cdata);
fs/nfs/flexfilelayout/flexfilelayout.c
2039
static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
2041
if (ff_layout_commit_prepare_common(task, data))
fs/nfs/flexfilelayout/flexfilelayout.c
2044
rpc_call_start(task);
fs/nfs/flexfilelayout/flexfilelayout.c
2047
static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
2054
task))
fs/nfs/flexfilelayout/flexfilelayout.c
2056
ff_layout_commit_prepare_common(task, data);
fs/nfs/flexfilelayout/flexfilelayout.c
2059
static void ff_layout_commit_done(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
2061
pnfs_generic_write_commit_done(task, data);
fs/nfs/flexfilelayout/flexfilelayout.c
2064
static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
2068
ff_layout_commit_record_layoutstats_done(task, cdata);
fs/nfs/flexfilelayout/flexfilelayout.c
2069
rpc_count_iostats_metrics(task,
fs/nfs/flexfilelayout/flexfilelayout.c
2077
ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
fs/nfs/flexfilelayout/flexfilelayout.c
2188
hdr->task.tk_start = ktime_get();
fs/nfs/flexfilelayout/flexfilelayout.c
2189
ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
2273
hdr->task.tk_start = ktime_get();
fs/nfs/flexfilelayout/flexfilelayout.c
2274
ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
fs/nfs/flexfilelayout/flexfilelayout.c
2354
data->task.tk_start = ktime_get();
fs/nfs/flexfilelayout/flexfilelayout.c
2355
ff_layout_commit_record_layoutstats_start(&data->task, data);
fs/nfs/flexfilelayout/flexfilelayout.c
2378
static bool ff_layout_match_rw(const struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
2385
static bool ff_layout_match_commit(const struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
2392
static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
fs/nfs/flexfilelayout/flexfilelayout.c
2394
const struct rpc_call_ops *ops = task->tk_ops;
fs/nfs/flexfilelayout/flexfilelayout.c
2400
return ff_layout_match_rw(task, task->tk_calldata, data);
fs/nfs/flexfilelayout/flexfilelayout.c
2403
return ff_layout_match_commit(task, task->tk_calldata, data);
fs/nfs/flexfilelayout/flexfilelayout.c
42
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
797
nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
806
ktime_get(), task->tk_start);
fs/nfs/flexfilelayout/flexfilelayout.c
836
nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
fs/nfs/flexfilelayout/flexfilelayout.c
848
requested, completed, ktime_get(), task->tk_start);
fs/nfs/internal.h
592
extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
fs/nfs/localio.c
1021
data->task.tk_status = 0;
fs/nfs/localio.c
1025
data->task.tk_status = status;
fs/nfs/localio.c
1035
call_ops->rpc_call_done(&data->task, data);
fs/nfs/localio.c
1043
ctx->data->task.tk_ops);
fs/nfs/localio.c
519
call_ops->rpc_call_done(&hdr->task, hdr);
fs/nfs/localio.c
527
hdr->task.tk_ops = call_ops;
fs/nfs/localio.c
528
if (!hdr->task.tk_start)
fs/nfs/localio.c
529
hdr->task.tk_start = ktime_get();
fs/nfs/localio.c
540
if (hdr->task.tk_status == 0)
fs/nfs/localio.c
544
hdr->task.tk_status = status;
fs/nfs/localio.c
572
nfs_local_do_read(iocb, hdr->task.tk_ops);
fs/nfs/localio.c
576
nfs_local_do_write(iocb, hdr->task.tk_ops);
fs/nfs/localio.c
584
hdr->task.tk_status = status;
fs/nfs/localio.c
585
nfs_local_hdr_release(hdr, hdr->task.tk_ops);
fs/nfs/localio.c
592
struct rpc_task *task = &hdr->task;
fs/nfs/localio.c
594
task->tk_action = NULL;
fs/nfs/localio.c
595
task->tk_ops->rpc_call_done(task, hdr);
fs/nfs/localio.c
597
if (task->tk_action == NULL) {
fs/nfs/localio.c
599
task->tk_ops->rpc_release(hdr);
fs/nfs/localio.c
620
long status = hdr->task.tk_status;
fs/nfs/localio.c
814
long status = hdr->task.tk_status;
fs/nfs/localio.c
982
hdr->task.tk_status = status;
fs/nfs/localio.c
992
data->task.tk_ops = call_ops;
fs/nfs/nfs3proc.c
472
static void nfs3_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
fs/nfs/nfs3proc.c
474
rpc_call_start(task);
fs/nfs/nfs3proc.c
478
nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
fs/nfs/nfs3proc.c
481
if (nfs3_async_handle_jukebox(task, dir))
fs/nfs/nfs3proc.c
483
res = task->tk_msg.rpc_resp;
fs/nfs/nfs3proc.c
49
nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)
fs/nfs/nfs3proc.c
497
static void nfs3_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
fs/nfs/nfs3proc.c
499
rpc_call_start(task);
fs/nfs/nfs3proc.c
503
nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
fs/nfs/nfs3proc.c
508
if (nfs3_async_handle_jukebox(task, old_dir))
fs/nfs/nfs3proc.c
51
if (task->tk_status != -EJUKEBOX)
fs/nfs/nfs3proc.c
510
res = task->tk_msg.rpc_resp;
fs/nfs/nfs3proc.c
54
task->tk_status = 0;
fs/nfs/nfs3proc.c
55
rpc_restart_call(task);
fs/nfs/nfs3proc.c
56
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
fs/nfs/nfs3proc.c
890
static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
fs/nfs/nfs3proc.c
896
return hdr->pgio_done_cb(task, hdr);
fs/nfs/nfs3proc.c
898
if (nfs3_async_handle_jukebox(task, inode))
fs/nfs/nfs3proc.c
901
if (task->tk_status >= 0) {
fs/nfs/nfs3proc.c
919
static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task,
fs/nfs/nfs3proc.c
922
rpc_call_start(task);
fs/nfs/nfs3proc.c
926
static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
fs/nfs/nfs3proc.c
931
return hdr->pgio_done_cb(task, hdr);
fs/nfs/nfs3proc.c
933
if (nfs3_async_handle_jukebox(task, inode))
fs/nfs/nfs3proc.c
935
if (task->tk_status >= 0) {
fs/nfs/nfs3proc.c
949
static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
fs/nfs/nfs3proc.c
951
rpc_call_start(task);
fs/nfs/nfs3proc.c
954
static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
fs/nfs/nfs3proc.c
957
return data->commit_done_cb(task, data);
fs/nfs/nfs3proc.c
959
if (nfs3_async_handle_jukebox(task, data->inode))
fs/nfs/nfs3proc.c
980
static bool nfs3_nlm_unlock_prepare(struct rpc_task *task, void *data)
fs/nfs/nfs3proc.c
984
return nfs_async_iocounter_wait(task, l_ctx);
fs/nfs/nfs40proc.c
12
static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs40proc.c
16
data->seq_args, data->seq_res, task);
fs/nfs/nfs40proc.c
19
static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs40proc.c
22
nfs4_sequence_done(task, data->seq_res);
fs/nfs/nfs40proc.c
265
static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs40proc.c
270
&data->res.seq_res, task);
fs/nfs/nfs40proc.c
275
static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs40proc.c
280
nfs40_sequence_done(task, &data->res.seq_res);
fs/nfs/nfs40proc.c
282
switch (task->tk_status) {
fs/nfs/nfs40proc.c
292
if (nfs4_async_handle_error(task, server,
fs/nfs/nfs40proc.c
294
rpc_restart_call_prepare(task);
fs/nfs/nfs40proc.c
39
static int nfs40_sequence_done(struct rpc_task *task,
fs/nfs/nfs40proc.c
81
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs40proc.c
87
trace_nfs4_renew_async(clp, task->tk_status);
fs/nfs/nfs40proc.c
88
switch (task->tk_status) {
fs/nfs/nfs40proc.c
98
if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
fs/nfs/nfs42proc.c
1013
rpc_delay(task, HZ);
fs/nfs/nfs42proc.c
1014
rpc_restart_call_prepare(task);
fs/nfs/nfs42proc.c
1023
trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
fs/nfs/nfs42proc.c
1068
struct rpc_task *task;
fs/nfs/nfs42proc.c
1077
task = rpc_run_task(&task_setup);
fs/nfs/nfs42proc.c
1078
if (IS_ERR(task))
fs/nfs/nfs42proc.c
1079
return PTR_ERR(task);
fs/nfs/nfs42proc.c
1080
rpc_put_task(task);
fs/nfs/nfs42proc.c
1113
nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs42proc.c
1124
rpc_exit(task, 0);
fs/nfs/nfs42proc.c
1132
&data->res.seq_res, task);
fs/nfs/nfs42proc.c
1136
nfs42_layouterror_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs42proc.c
1142
if (!nfs4_sequence_done(task, &data->res.seq_res))
fs/nfs/nfs42proc.c
1145
switch (task->tk_status) {
fs/nfs/nfs42proc.c
1182
rpc_delay(task, HZ);
fs/nfs/nfs42proc.c
1183
rpc_restart_call_prepare(task);
fs/nfs/nfs42proc.c
1193
task->tk_status);
fs/nfs/nfs42proc.c
1216
struct rpc_task *task;
fs/nfs/nfs42proc.c
1245
task = rpc_run_task(&task_setup);
fs/nfs/nfs42proc.c
1246
if (IS_ERR(task))
fs/nfs/nfs42proc.c
1247
return PTR_ERR(task);
fs/nfs/nfs42proc.c
1248
rpc_put_task(task);
fs/nfs/nfs42proc.c
607
static void nfs42_offload_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs42proc.c
613
&data->res.osr_seq_res, task);
fs/nfs/nfs42proc.c
616
static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs42proc.c
620
trace_nfs4_offload_cancel(&data->args, task->tk_status);
fs/nfs/nfs42proc.c
621
nfs41_sequence_done(task, &data->res.osr_seq_res);
fs/nfs/nfs42proc.c
622
if (task->tk_status &&
fs/nfs/nfs42proc.c
623
nfs4_async_handle_error(task, data->seq_server, NULL,
fs/nfs/nfs42proc.c
625
rpc_restart_call_prepare(task);
fs/nfs/nfs42proc.c
645
struct rpc_task *task;
fs/nfs/nfs42proc.c
675
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs42proc.c
676
if (IS_ERR(task))
fs/nfs/nfs42proc.c
677
return PTR_ERR(task);
fs/nfs/nfs42proc.c
678
status = rpc_wait_for_completion_task(task);
fs/nfs/nfs42proc.c
681
rpc_put_task(task);
fs/nfs/nfs42proc.c
944
nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs42proc.c
955
rpc_exit(task, 0);
fs/nfs/nfs42proc.c
961
&data->res.seq_res, task);
fs/nfs/nfs42proc.c
965
nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs42proc.c
971
if (!nfs4_sequence_done(task, &data->res.seq_res))
fs/nfs/nfs42proc.c
974
switch (task->tk_status) {
fs/nfs/nfs4_fs.h
101
struct rpc_task *task;
fs/nfs/nfs4_fs.h
312
extern int nfs4_async_handle_error(struct rpc_task *task,
fs/nfs/nfs4_fs.h
531
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
fs/nfs/nfs4_fs.h
539
struct rpc_task *task);
fs/nfs/nfs4_fs.h
540
extern int nfs4_sequence_done(struct rpc_task *task,
fs/nfs/nfs4proc.c
10011
.task = &data->task,
fs/nfs/nfs4proc.c
10018
struct rpc_task *task;
fs/nfs/nfs4proc.c
1003
data->seq_args, data->seq_res, task);
fs/nfs/nfs4proc.c
10036
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
10037
if (IS_ERR(task))
fs/nfs/nfs4proc.c
10038
return PTR_ERR(task);
fs/nfs/nfs4proc.c
10040
status = task->tk_status;
fs/nfs/nfs4proc.c
10043
rpc_put_task(task);
fs/nfs/nfs4proc.c
1006
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
1010
nfs41_sequence_done(task, data->seq_res);
fs/nfs/nfs4proc.c
10288
static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
10294
task);
fs/nfs/nfs4proc.c
10297
static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
10301
nfs41_sequence_done(task, &data->res.seq_res);
fs/nfs/nfs4proc.c
10303
switch (task->tk_status) {
fs/nfs/nfs4proc.c
10305
if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
fs/nfs/nfs4proc.c
10306
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
10351
struct rpc_task *task;
fs/nfs/nfs4proc.c
10373
task = rpc_run_task(&task_setup);
fs/nfs/nfs4proc.c
10374
if (IS_ERR(task))
fs/nfs/nfs4proc.c
10375
return PTR_ERR(task);
fs/nfs/nfs4proc.c
10376
rpc_put_task(task);
fs/nfs/nfs4proc.c
1045
static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
fs/nfs/nfs4proc.c
1049
return res->sr_slot_ops->process(task, res);
fs/nfs/nfs4proc.c
1052
int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
fs/nfs/nfs4proc.c
1056
return res->sr_slot_ops->done(task, res);
fs/nfs/nfs4proc.c
1064
struct rpc_task *task)
fs/nfs/nfs4proc.c
1095
rpc_call_start(task);
fs/nfs/nfs4proc.c
1100
rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
fs/nfs/nfs4proc.c
1103
rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
fs/nfs/nfs4proc.c
1109
rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
fs/nfs/nfs4proc.c
1112
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
fs/nfs/nfs4proc.c
1121
struct rpc_task *task;
fs/nfs/nfs4proc.c
1123
task = rpc_run_task(task_setup);
fs/nfs/nfs4proc.c
1124
if (IS_ERR(task))
fs/nfs/nfs4proc.c
1125
return PTR_ERR(task);
fs/nfs/nfs4proc.c
1127
ret = task->tk_status;
fs/nfs/nfs4proc.c
1128
rpc_put_task(task);
fs/nfs/nfs4proc.c
2381
static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
2386
&data->c_arg.seq_args, &data->c_res.seq_res, task);
fs/nfs/nfs4proc.c
2389
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
2393
data->c_res.seq_res.sr_slot_ops->done(task, &data->c_res.seq_res);
fs/nfs/nfs4proc.c
2395
data->rpc_status = task->tk_status;
fs/nfs/nfs4proc.c
2434
struct rpc_task *task;
fs/nfs/nfs4proc.c
2457
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
2458
if (IS_ERR(task))
fs/nfs/nfs4proc.c
2459
return PTR_ERR(task);
fs/nfs/nfs4proc.c
2460
status = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
2466
rpc_put_task(task);
fs/nfs/nfs4proc.c
2470
static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
2477
if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
fs/nfs/nfs4proc.c
2504
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
fs/nfs/nfs4proc.c
2510
task) != 0)
fs/nfs/nfs4proc.c
2529
task->tk_action = NULL;
fs/nfs/nfs4proc.c
2531
nfs4_sequence_done(task, &data->o_res.seq_res);
fs/nfs/nfs4proc.c
2534
static void nfs4_open_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
2538
data->rpc_status = task->tk_status;
fs/nfs/nfs4proc.c
2540
if (!nfs4_sequence_process(task, &data->o_res.seq_res))
fs/nfs/nfs4proc.c
2543
if (task->tk_status == 0) {
fs/nfs/nfs4proc.c
2602
struct rpc_task *task;
fs/nfs/nfs4proc.c
2635
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
2636
if (IS_ERR(task))
fs/nfs/nfs4proc.c
2637
return PTR_ERR(task);
fs/nfs/nfs4proc.c
2638
status = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
2644
rpc_put_task(task);
fs/nfs/nfs4proc.c
3431
nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
fs/nfs/nfs4proc.c
3436
return pnfs_wait_on_layoutreturn(inode, task);
fs/nfs/nfs4proc.c
3568
static void nfs4_close_done(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
3581
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
fs/nfs/nfs4proc.c
3583
trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
fs/nfs/nfs4proc.c
3586
if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
fs/nfs/nfs4proc.c
3593
switch (task->tk_status) {
fs/nfs/nfs4proc.c
3617
task->tk_msg.rpc_cred);
fs/nfs/nfs4proc.c
3624
task->tk_status = nfs4_async_handle_exception(task,
fs/nfs/nfs4proc.c
3625
server, task->tk_status, &exception);
fs/nfs/nfs4proc.c
3633
task->tk_status = 0;
fs/nfs/nfs4proc.c
3636
dprintk("%s: ret = %d\n", __func__, task->tk_status);
fs/nfs/nfs4proc.c
3639
task->tk_status = 0;
fs/nfs/nfs4proc.c
3640
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
3644
static void nfs4_close_prepare(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
3654
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
fs/nfs/nfs4proc.c
3657
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
fs/nfs/nfs4proc.c
3688
if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
fs/nfs/nfs4proc.c
3700
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
fs/nfs/nfs4proc.c
3724
task) != 0)
fs/nfs/nfs4proc.c
3728
task->tk_action = NULL;
fs/nfs/nfs4proc.c
3730
nfs4_sequence_done(task, &calldata->res.seq_res);
fs/nfs/nfs4proc.c
3757
struct rpc_task *task;
fs/nfs/nfs4proc.c
3810
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
3811
if (IS_ERR(task))
fs/nfs/nfs4proc.c
3812
return PTR_ERR(task);
fs/nfs/nfs4proc.c
3815
status = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
3816
rpc_put_task(task);
fs/nfs/nfs4proc.c
4392
static void nfs4_call_getattr_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
4396
data->seq_res, task);
fs/nfs/nfs4proc.c
4399
static void nfs4_call_getattr_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
4403
nfs4_sequence_process(task, data->seq_res);
fs/nfs/nfs4proc.c
4976
static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
fs/nfs/nfs4proc.c
4981
task);
fs/nfs/nfs4proc.c
4984
static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
fs/nfs/nfs4proc.c
4986
struct nfs_unlinkdata *data = task->tk_calldata;
fs/nfs/nfs4proc.c
4989
if (!nfs4_sequence_done(task, &res->seq_res))
fs/nfs/nfs4proc.c
4991
if (nfs4_async_handle_error(task, res->server, NULL,
fs/nfs/nfs4proc.c
4994
if (task->tk_status == 0)
fs/nfs/nfs4proc.c
5024
static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
fs/nfs/nfs4proc.c
5029
task);
fs/nfs/nfs4proc.c
5032
static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
fs/nfs/nfs4proc.c
5035
struct nfs_renamedata *data = task->tk_calldata;
fs/nfs/nfs4proc.c
5038
if (!nfs4_sequence_done(task, &res->seq_res))
fs/nfs/nfs4proc.c
5040
if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
fs/nfs/nfs4proc.c
5043
if (task->tk_status == 0) {
fs/nfs/nfs4proc.c
5592
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
fs/nfs/nfs4proc.c
5596
trace_nfs4_read(hdr, task->tk_status);
fs/nfs/nfs4proc.c
5597
if (task->tk_status < 0) {
fs/nfs/nfs4proc.c
5604
task->tk_status = nfs4_async_handle_exception(task,
fs/nfs/nfs4proc.c
5605
server, task->tk_status, &exception);
fs/nfs/nfs4proc.c
5608
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
5613
if (task->tk_status > 0)
fs/nfs/nfs4proc.c
5618
static bool nfs4_read_stateid_changed(struct rpc_task *task,
fs/nfs/nfs4proc.c
5622
if (!nfs4_error_stateid_expired(task->tk_status) ||
fs/nfs/nfs4proc.c
5628
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
5632
static bool nfs4_read_plus_not_supported(struct rpc_task *task,
fs/nfs/nfs4proc.c
5636
struct rpc_message *msg = &task->tk_msg;
fs/nfs/nfs4proc.c
5639
task->tk_status == -ENOTSUPP) {
fs/nfs/nfs4proc.c
5642
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
5648
static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
fs/nfs/nfs4proc.c
5650
if (!nfs4_sequence_done(task, &hdr->res.seq_res))
fs/nfs/nfs4proc.c
5652
if (nfs4_read_stateid_changed(task, &hdr->args))
fs/nfs/nfs4proc.c
5654
if (nfs4_read_plus_not_supported(task, hdr))
fs/nfs/nfs4proc.c
5656
if (task->tk_status > 0)
fs/nfs/nfs4proc.c
5658
return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
fs/nfs/nfs4proc.c
5659
nfs4_read_done_cb(task, hdr);
fs/nfs/nfs4proc.c
5693
static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
fs/nfs/nfs4proc.c
5699
task))
fs/nfs/nfs4proc.c
5710
static int nfs4_write_done_cb(struct rpc_task *task,
fs/nfs/nfs4proc.c
5715
trace_nfs4_write(hdr, task->tk_status);
fs/nfs/nfs4proc.c
5716
if (task->tk_status < 0) {
fs/nfs/nfs4proc.c
5723
task->tk_status = nfs4_async_handle_exception(task,
fs/nfs/nfs4proc.c
5724
NFS_SERVER(inode), task->tk_status,
fs/nfs/nfs4proc.c
5728
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
5732
if (task->tk_status >= 0) {
fs/nfs/nfs4proc.c
5739
static bool nfs4_write_stateid_changed(struct rpc_task *task,
fs/nfs/nfs4proc.c
5743
if (!nfs4_error_stateid_expired(task->tk_status) ||
fs/nfs/nfs4proc.c
5749
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
5753
static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
fs/nfs/nfs4proc.c
5755
if (!nfs4_sequence_done(task, &hdr->res.seq_res))
fs/nfs/nfs4proc.c
5757
if (nfs4_write_stateid_changed(task, &hdr->args))
fs/nfs/nfs4proc.c
5759
return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
fs/nfs/nfs4proc.c
5760
nfs4_write_done_cb(task, hdr);
fs/nfs/nfs4proc.c
5837
static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
fs/nfs/nfs4proc.c
5842
task);
fs/nfs/nfs4proc.c
5845
static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
fs/nfs/nfs4proc.c
5849
trace_nfs4_commit(data, task->tk_status);
fs/nfs/nfs4proc.c
5850
if (nfs4_async_handle_error(task, NFS_SERVER(inode),
fs/nfs/nfs4proc.c
5852
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
5858
static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
fs/nfs/nfs4proc.c
5860
if (!nfs4_sequence_done(task, &data->res.seq_res))
fs/nfs/nfs4proc.c
5862
return data->commit_done_cb(task, data);
fs/nfs/nfs4proc.c
6528
static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
6532
if (task->tk_status == 0)
fs/nfs/nfs4proc.c
6533
sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
fs/nfs/nfs4proc.c
6664
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
6674
if (!nfs4_sequence_done(task, &data->res.seq_res))
fs/nfs/nfs4proc.c
6677
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
fs/nfs/nfs4proc.c
6680
if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
fs/nfs/nfs4proc.c
6684
if (data->args.sattr_args && task->tk_status != 0) {
fs/nfs/nfs4proc.c
6708
switch (task->tk_status) {
fs/nfs/nfs4proc.c
6717
task->tk_msg.rpc_cred);
fs/nfs/nfs4proc.c
672
nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
fs/nfs/nfs4proc.c
6722
task->tk_status = 0;
fs/nfs/nfs4proc.c
6740
task->tk_status = nfs4_async_handle_exception(task,
fs/nfs/nfs4proc.c
6741
data->res.server, task->tk_status,
fs/nfs/nfs4proc.c
6748
data->rpc_status = task->tk_status;
fs/nfs/nfs4proc.c
6751
task->tk_status = 0;
fs/nfs/nfs4proc.c
6752
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
6772
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
6779
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
fs/nfs/nfs4proc.c
678
if ((task->tk_rpc_status == -ENETDOWN ||
fs/nfs/nfs4proc.c
6780
nfs4_sequence_done(task, &d_data->res.seq_res);
fs/nfs/nfs4proc.c
679
task->tk_rpc_status == -ENETUNREACH) &&
fs/nfs/nfs4proc.c
6793
task);
fs/nfs/nfs4proc.c
680
task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
fs/nfs/nfs4proc.c
6809
struct rpc_task *task;
fs/nfs/nfs4proc.c
6877
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
6878
if (IS_ERR(task))
fs/nfs/nfs4proc.c
6879
return PTR_ERR(task);
fs/nfs/nfs4proc.c
6882
status = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
6887
rpc_put_task(task);
fs/nfs/nfs4proc.c
694
rpc_delay(task, nfs4_update_delay(&exception->timeout));
fs/nfs/nfs4proc.c
700
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
fs/nfs/nfs4proc.c
702
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
fs/nfs/nfs4proc.c
7065
static void nfs4_locku_done(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
7074
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
fs/nfs/nfs4proc.c
7076
switch (task->tk_status) {
fs/nfs/nfs4proc.c
7088
task->tk_msg.rpc_cred);
fs/nfs/nfs4proc.c
7094
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
7099
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
7102
task->tk_status = nfs4_async_handle_exception(task,
fs/nfs/nfs4proc.c
7103
calldata->server, task->tk_status,
fs/nfs/nfs4proc.c
7107
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
7112
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
7117
nfs_async_iocounter_wait(task, calldata->l_ctx))
fs/nfs/nfs4proc.c
7120
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
fs/nfs/nfs4proc.c
7130
task) != 0)
fs/nfs/nfs4proc.c
7134
task->tk_action = NULL;
fs/nfs/nfs4proc.c
7136
nfs4_sequence_done(task, &calldata->res.seq_res);
fs/nfs/nfs4proc.c
716
rpc_task_release_transport(task);
fs/nfs/nfs4proc.c
7197
struct rpc_task *task;
fs/nfs/nfs4proc.c
722
nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
fs/nfs/nfs4proc.c
7228
task = nfs4_do_unlck(request,
fs/nfs/nfs4proc.c
7231
status = PTR_ERR(task);
fs/nfs/nfs4proc.c
7232
if (IS_ERR(task))
fs/nfs/nfs4proc.c
7234
status = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
7235
rpc_put_task(task);
fs/nfs/nfs4proc.c
729
if (task->tk_status >= 0)
fs/nfs/nfs4proc.c
7293
static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
7298
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
fs/nfs/nfs4proc.c
7302
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
fs/nfs/nfs4proc.c
7316
task->tk_action = NULL;
fs/nfs/nfs4proc.c
7323
task) == 0)
fs/nfs/nfs4proc.c
733
task->tk_status = nfs4_async_handle_exception(task, server,
fs/nfs/nfs4proc.c
7330
nfs4_sequence_done(task, &data->res.seq_res);
fs/nfs/nfs4proc.c
7334
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
7339
if (!nfs4_sequence_done(task, &data->res.seq_res))
fs/nfs/nfs4proc.c
734
task->tk_status,
fs/nfs/nfs4proc.c
7342
data->rpc_status = task->tk_status;
fs/nfs/nfs4proc.c
7343
switch (task->tk_status) {
fs/nfs/nfs4proc.c
7383
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
7393
struct rpc_task *task;
fs/nfs/nfs4proc.c
7394
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
fs/nfs/nfs4proc.c
7396
if (!IS_ERR(task))
fs/nfs/nfs4proc.c
7397
rpc_put_task_async(task);
fs/nfs/nfs4proc.c
7432
struct rpc_task *task;
fs/nfs/nfs4proc.c
7467
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
7468
if (IS_ERR(task))
fs/nfs/nfs4proc.c
7469
return PTR_ERR(task);
fs/nfs/nfs4proc.c
7470
ret = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
7479
rpc_put_task(task);
fs/nfs/nfs4proc.c
8412
nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
8414
struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
fs/nfs/nfs4proc.c
8415
struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
fs/nfs/nfs4proc.c
8418
switch (task->tk_status) {
fs/nfs/nfs4proc.c
842
struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
fs/nfs/nfs4proc.c
8422
task->tk_status);
fs/nfs/nfs4proc.c
8427
rpc_task_close_connection(task);
fs/nfs/nfs4proc.c
8429
rpc_restart_call(task);
fs/nfs/nfs4proc.c
843
if (!IS_ERR(task))
fs/nfs/nfs4proc.c
844
rpc_put_task_async(task);
fs/nfs/nfs4proc.c
847
static int nfs41_sequence_process(struct rpc_task *task,
fs/nfs/nfs4proc.c
8470
struct rpc_task *task;
fs/nfs/nfs4proc.c
8480
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
8481
if (!IS_ERR(task)) {
fs/nfs/nfs4proc.c
8482
status = task->tk_status;
fs/nfs/nfs4proc.c
8483
rpc_put_task(task);
fs/nfs/nfs4proc.c
8485
status = PTR_ERR(task);
fs/nfs/nfs4proc.c
859
if (!RPC_WAS_SENT(task) || slot->seq_done)
fs/nfs/nfs4proc.c
868
if (task->tk_status == -NFS4ERR_DEADSESSION)
fs/nfs/nfs4proc.c
8790
struct rpc_task *task;
fs/nfs/nfs4proc.c
8796
task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
fs/nfs/nfs4proc.c
8797
if (IS_ERR(task))
fs/nfs/nfs4proc.c
8798
return PTR_ERR(task);
fs/nfs/nfs4proc.c
8800
argp = task->tk_msg.rpc_argp;
fs/nfs/nfs4proc.c
8801
resp = task->tk_msg.rpc_resp;
fs/nfs/nfs4proc.c
8802
status = task->tk_status;
fs/nfs/nfs4proc.c
8842
rpc_put_task(task);
fs/nfs/nfs4proc.c
8894
struct rpc_task *task;
fs/nfs/nfs4proc.c
8906
task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
fs/nfs/nfs4proc.c
8907
if (IS_ERR(task))
fs/nfs/nfs4proc.c
8910
status = task->tk_status;
fs/nfs/nfs4proc.c
8913
task->tk_msg.rpc_resp, xprt);
fs/nfs/nfs4proc.c
8923
rpc_put_task(task);
fs/nfs/nfs4proc.c
8999
static void nfs4_get_lease_time_prepare(struct rpc_task *task,
fs/nfs/nfs4proc.c
9010
task);
fs/nfs/nfs4proc.c
9017
static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
9022
if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
fs/nfs/nfs4proc.c
9024
switch (task->tk_status) {
fs/nfs/nfs4proc.c
9027
rpc_delay(task, NFS4_POLL_RETRY_MIN);
fs/nfs/nfs4proc.c
9028
task->tk_status = 0;
fs/nfs/nfs4proc.c
9031
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
932
if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
fs/nfs/nfs4proc.c
9323
static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
fs/nfs/nfs4proc.c
9325
switch(task->tk_status) {
fs/nfs/nfs4proc.c
9327
rpc_delay(task, NFS4_POLL_RETRY_MAX);
fs/nfs/nfs4proc.c
933
nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
fs/nfs/nfs4proc.c
9335
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
9340
if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
fs/nfs/nfs4proc.c
9343
trace_nfs4_sequence(clp, task->tk_status);
fs/nfs/nfs4proc.c
9344
if (task->tk_status < 0 && clp->cl_cons_state >= 0) {
fs/nfs/nfs4proc.c
9345
dprintk("%s ERROR %d\n", __func__, task->tk_status);
fs/nfs/nfs4proc.c
9349
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
fs/nfs/nfs4proc.c
9350
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
9354
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
fs/nfs/nfs4proc.c
9357
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
9364
args = task->tk_msg.rpc_argp;
fs/nfs/nfs4proc.c
9365
res = task->tk_msg.rpc_resp;
fs/nfs/nfs4proc.c
9367
nfs4_setup_sequence(clp, args, res, task);
fs/nfs/nfs4proc.c
9422
struct rpc_task *task;
fs/nfs/nfs4proc.c
9427
task = _nfs41_proc_sequence(clp, cred, NULL, false);
fs/nfs/nfs4proc.c
9428
if (IS_ERR(task))
fs/nfs/nfs4proc.c
9429
ret = PTR_ERR(task);
fs/nfs/nfs4proc.c
9431
rpc_put_task_async(task);
fs/nfs/nfs4proc.c
9438
struct rpc_task *task;
fs/nfs/nfs4proc.c
9441
task = _nfs41_proc_sequence(clp, cred, NULL, true);
fs/nfs/nfs4proc.c
9442
if (IS_ERR(task)) {
fs/nfs/nfs4proc.c
9443
ret = PTR_ERR(task);
fs/nfs/nfs4proc.c
9446
ret = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
9448
ret = task->tk_status;
fs/nfs/nfs4proc.c
9449
rpc_put_task(task);
fs/nfs/nfs4proc.c
9461
static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
9468
task);
fs/nfs/nfs4proc.c
9471
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
fs/nfs/nfs4proc.c
9473
switch(task->tk_status) {
fs/nfs/nfs4proc.c
9481
rpc_delay(task, NFS4_POLL_RETRY_MAX);
fs/nfs/nfs4proc.c
9486
__func__, task->tk_status, clp->cl_hostname);
fs/nfs/nfs4proc.c
9498
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
fs/nfs/nfs4proc.c
9504
if (!nfs41_sequence_done(task, res))
fs/nfs/nfs4proc.c
9507
trace_nfs4_reclaim_complete(clp, task->tk_status);
fs/nfs/nfs4proc.c
9508
if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
fs/nfs/nfs4proc.c
9509
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
9563
nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
9569
&lgp->res.seq_res, task);
fs/nfs/nfs4proc.c
9572
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
9576
nfs41_sequence_process(task, &lgp->res.seq_res);
fs/nfs/nfs4proc.c
9580
nfs4_layoutget_handle_exception(struct rpc_task *task,
fs/nfs/nfs4proc.c
9586
int nfs4err = task->tk_status;
fs/nfs/nfs4proc.c
9590
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
fs/nfs/nfs4proc.c
9698
struct rpc_task *task;
fs/nfs/nfs4proc.c
972
if (rpc_restart_call_prepare(task)) {
fs/nfs/nfs4proc.c
9720
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
9721
if (IS_ERR(task))
fs/nfs/nfs4proc.c
9722
return ERR_CAST(task);
fs/nfs/nfs4proc.c
9724
status = rpc_wait_for_completion_task(task);
fs/nfs/nfs4proc.c
9728
if (task->tk_status < 0) {
fs/nfs/nfs4proc.c
9730
status = nfs4_layoutget_handle_exception(task, lgp, exception);
fs/nfs/nfs4proc.c
974
task->tk_status = 0;
fs/nfs/nfs4proc.c
9744
rpc_put_task(task);
fs/nfs/nfs4proc.c
9752
nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
9759
task);
fs/nfs/nfs4proc.c
9761
rpc_exit(task, 0);
fs/nfs/nfs4proc.c
9764
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
9769
if (!nfs41_sequence_process(task, &lrp->res.seq_res))
fs/nfs/nfs4proc.c
9772
if (task->tk_rpc_status == -ETIMEDOUT) {
fs/nfs/nfs4proc.c
9781
if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
fs/nfs/nfs4proc.c
9787
switch (task->tk_status) {
fs/nfs/nfs4proc.c
979
if (!rpc_restart_call(task))
fs/nfs/nfs4proc.c
9795
task->tk_status = 0;
fs/nfs/nfs4proc.c
9804
task->tk_status);
fs/nfs/nfs4proc.c
9807
task->tk_status = 0;
fs/nfs/nfs4proc.c
981
rpc_delay(task, NFS4_POLL_RETRY_MAX);
fs/nfs/nfs4proc.c
9810
if (nfs4_async_handle_error(task, server, NULL, NULL) ==
fs/nfs/nfs4proc.c
9818
task->tk_status = 0;
fs/nfs/nfs4proc.c
9820
rpc_restart_call_prepare(task);
fs/nfs/nfs4proc.c
985
int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
fs/nfs/nfs4proc.c
9853
struct rpc_task *task;
fs/nfs/nfs4proc.c
987
if (!nfs41_sequence_process(task, res))
fs/nfs/nfs4proc.c
9885
task = rpc_run_task(&task_setup_data);
fs/nfs/nfs4proc.c
9886
if (IS_ERR(task))
fs/nfs/nfs4proc.c
9887
return PTR_ERR(task);
fs/nfs/nfs4proc.c
9889
status = task->tk_status;
fs/nfs/nfs4proc.c
9892
rpc_put_task(task);
fs/nfs/nfs4proc.c
9946
static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
9954
task);
fs/nfs/nfs4proc.c
9958
nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
996
static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
fs/nfs/nfs4proc.c
9963
if (!nfs41_sequence_done(task, &data->res.seq_res))
fs/nfs/nfs4proc.c
9966
switch (task->tk_status) { /* Just ignore these failures */
fs/nfs/nfs4proc.c
9971
task->tk_status = 0;
fs/nfs/nfs4proc.c
9976
if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
fs/nfs/nfs4proc.c
9977
rpc_restart_call_prepare(task);
fs/nfs/nfs4session.c
357
static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
fs/nfs/nfs4session.c
359
struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
fs/nfs/nfs4session.c
360
struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
fs/nfs/nfs4state.c
1022
new->task = NULL;
fs/nfs/nfs4state.c
1037
rpc_wake_up_queued_task(&sequence->wait, next->task);
fs/nfs/nfs4state.c
1109
int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
fs/nfs/nfs4state.c
1118
seqid->task = task;
fs/nfs/nfs4state.c
1123
rpc_sleep_on(&sequence->wait, task, NULL);
fs/nfs/nfs4state.c
1144
struct task_struct *task;
fs/nfs/nfs4state.c
1176
task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
fs/nfs/nfs4state.c
1177
if (IS_ERR(task)) {
fs/nfs/nfs4state.c
1179
__func__, PTR_ERR(task));
fs/nfs/nfs4state.c
1181
nfs_mark_client_ready(clp, PTR_ERR(task));
fs/nfs/nfs4super.c
106
new->task = current;
fs/nfs/nfs4super.c
78
const struct task_struct *task;
fs/nfs/nfs4super.c
90
if (p->task == current)
fs/nfs/nfs4trace.h
454
const struct rpc_task *task = rqstp->rq_task;
fs/nfs/nfs4trace.h
456
__entry->task_id = task->tk_pid;
fs/nfs/nfs4trace.h
457
__entry->client_id = task->tk_client->cl_clid;
fs/nfs/nfs4trace.h
489
const struct rpc_task *task = rqstp->rq_task;
fs/nfs/nfs4trace.h
491
__entry->task_id = task->tk_pid;
fs/nfs/nfs4trace.h
492
__entry->client_id = task->tk_client->cl_clid;
fs/nfs/nfstrace.h
1258
const struct rpc_task *task,
fs/nfs/nfstrace.h
1262
TP_ARGS(task, hdr),
fs/nfs/nfstrace.h
1281
__entry->error = task->tk_status;
fs/nfs/nfstrace.h
1304
const struct rpc_task *task,
fs/nfs/nfstrace.h
1308
TP_ARGS(task, hdr),
fs/nfs/nfstrace.h
1327
__entry->error = task->tk_status;
fs/nfs/nfstrace.h
1436
const struct rpc_task *task,
fs/nfs/nfstrace.h
1440
TP_ARGS(task, hdr),
fs/nfs/nfstrace.h
1461
__entry->error = task->tk_status;
fs/nfs/nfstrace.h
1628
const struct rpc_task *task,
fs/nfs/nfstrace.h
1632
TP_ARGS(task, data),
fs/nfs/nfstrace.h
1651
__entry->error = task->tk_status;
fs/nfs/nfstrace.h
1951
const struct rpc_task *task = rqstp->rq_task;
fs/nfs/nfstrace.h
1953
__entry->task_id = task->tk_pid;
fs/nfs/nfstrace.h
1954
__entry->client_id = task->tk_client->cl_clid;
fs/nfs/nfstrace.h
1956
__entry->version = task->tk_client->cl_vers;
fs/nfs/pagelist.c
171
nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
fs/nfs/pagelist.c
177
rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
fs/nfs/pagelist.c
182
rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
fs/nfs/pagelist.c
724
static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
fs/nfs/pagelist.c
728
err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
fs/nfs/pagelist.c
730
rpc_exit(task, err);
fs/nfs/pagelist.c
738
struct rpc_task *task;
fs/nfs/pagelist.c
746
.task = &hdr->task,
fs/nfs/pagelist.c
770
task = rpc_run_task(&task_setup_data);
fs/nfs/pagelist.c
771
if (IS_ERR(task))
fs/nfs/pagelist.c
772
return PTR_ERR(task);
fs/nfs/pagelist.c
773
rpc_put_task(task);
fs/nfs/pagelist.c
854
static void nfs_pgio_result(struct rpc_task *task, void *calldata)
fs/nfs/pagelist.c
859
if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
fs/nfs/pagelist.c
861
if (task->tk_status < 0)
fs/nfs/pagelist.c
862
nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
fs/nfs/pagelist.c
864
hdr->rw_ops->rw_result(task, hdr);
fs/nfs/pnfs.c
1669
int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
fs/nfs/pnfs.c
1684
if (task->tk_rpc_status == 0)
fs/nfs/pnfs.c
1690
if ((task->tk_rpc_status == -ENETDOWN ||
fs/nfs/pnfs.c
1691
task->tk_rpc_status == -ENETUNREACH) &&
fs/nfs/pnfs.c
1692
task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
fs/nfs/pnfs.c
1699
if (!RPC_WAS_SENT(task))
fs/nfs/pnfs.c
1759
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
fs/nfs/pnfs.c
1770
rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
fs/nfs/pnfs.c
3012
hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
fs/nfs/pnfs.c
3023
hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
fs/nfs/pnfs.c
3139
hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
fs/nfs/pnfs.c
3148
hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
fs/nfs/pnfs.c
3208
hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
fs/nfs/pnfs.h
309
int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
fs/nfs/pnfs.h
314
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task);
fs/nfs/pnfs.h
416
void pnfs_generic_write_commit_done(struct rpc_task *task, void *data);
fs/nfs/pnfs.h
784
pnfs_roc_done(struct rpc_task *task,
fs/nfs/pnfs.h
800
pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
fs/nfs/pnfs_nfs.c
38
data->task.tk_status = 0;
fs/nfs/pnfs_nfs.c
44
void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
fs/nfs/pnfs_nfs.c
49
wdata->mds_ops->rpc_call_done(task, data);
fs/nfs/proc.c
342
static void nfs_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
fs/nfs/proc.c
344
rpc_call_start(task);
fs/nfs/proc.c
347
static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
fs/nfs/proc.c
362
static void nfs_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
fs/nfs/proc.c
364
rpc_call_start(task);
fs/nfs/proc.c
368
nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
fs/nfs/proc.c
603
static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
fs/nfs/proc.c
608
if (task->tk_status >= 0) {
fs/nfs/proc.c
626
static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
fs/nfs/proc.c
629
rpc_call_start(task);
fs/nfs/proc.c
633
static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
fs/nfs/proc.c
635
if (task->tk_status >= 0) {
fs/nfs/proc.c
651
static void nfs_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
fs/nfs/read.c
220
static int nfs_readpage_done(struct rpc_task *task,
fs/nfs/read.c
224
int status = NFS_PROTO(inode)->read_done(task, hdr);
fs/nfs/read.c
229
trace_nfs_readpage_done(task, hdr);
fs/nfs/read.c
231
if (task->tk_status == -ESTALE) {
fs/nfs/read.c
238
static void nfs_readpage_retry(struct rpc_task *task,
fs/nfs/read.c
246
trace_nfs_readpage_short(task, hdr);
fs/nfs/read.c
255
if (!task->tk_ops) {
fs/nfs/read.c
267
rpc_restart_call_prepare(task);
fs/nfs/read.c
270
static void nfs_readpage_result(struct rpc_task *task,
fs/nfs/read.c
283
nfs_readpage_retry(task, hdr);
fs/nfs/sysfs.c
222
static bool shutdown_match_client(const struct rpc_task *task, const void *data)
fs/nfs/unlink.c
103
struct rpc_task *task;
fs/nfs/unlink.c
116
task = rpc_run_task(&task_setup_data);
fs/nfs/unlink.c
117
if (!IS_ERR(task))
fs/nfs/unlink.c
118
rpc_put_task_async(task);
fs/nfs/unlink.c
265
static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
fs/nfs/unlink.c
273
new_dir, data->new_dentry, task->tk_status);
fs/nfs/unlink.c
274
if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
fs/nfs/unlink.c
275
rpc_restart_call_prepare(task);
fs/nfs/unlink.c
280
data->complete(task, data);
fs/nfs/unlink.c
317
static void nfs_rename_prepare(struct rpc_task *task, void *calldata)
fs/nfs/unlink.c
320
NFS_PROTO(data->old_dir)->rename_rpc_prepare(task, data);
fs/nfs/unlink.c
361
task_setup_data.task = &data->task;
fs/nfs/unlink.c
404
nfs_complete_sillyrename(struct rpc_task *task, struct nfs_renamedata *data)
fs/nfs/unlink.c
408
if (task->tk_status != 0) {
fs/nfs/unlink.c
451
struct rpc_task *task;
fs/nfs/unlink.c
46
static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
fs/nfs/unlink.c
497
task = nfs_async_rename(dir, dir, dentry, sdentry,
fs/nfs/unlink.c
499
if (IS_ERR(task)) {
fs/nfs/unlink.c
506
error = rpc_wait_for_completion_task(task);
fs/nfs/unlink.c
508
error = task->tk_status;
fs/nfs/unlink.c
51
trace_nfs_sillyrename_unlink(data, task->tk_status);
fs/nfs/unlink.c
52
if (!NFS_PROTO(dir)->unlink_done(task, dir))
fs/nfs/unlink.c
527
rpc_put_task(task);
fs/nfs/unlink.c
53
rpc_restart_call_prepare(task);
fs/nfs/unlink.c
76
static void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
fs/nfs/unlink.c
80
NFS_PROTO(dir)->unlink_rpc_prepare(task, data);
fs/nfs/write.c
1431
void nfs_commit_prepare(struct rpc_task *task, void *calldata)
fs/nfs/write.c
1435
NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
fs/nfs/write.c
1481
static int nfs_writeback_done(struct rpc_task *task,
fs/nfs/write.c
1494
status = NFS_PROTO(inode)->write_done(task, hdr);
fs/nfs/write.c
1499
trace_nfs_writeback_done(task, hdr);
fs/nfs/write.c
1501
if (task->tk_status >= 0) {
fs/nfs/write.c
1548
static void nfs_writeback_result(struct rpc_task *task,
fs/nfs/write.c
1569
task->tk_status = -EIO;
fs/nfs/write.c
1574
if (!task->tk_ops) {
fs/nfs/write.c
1594
rpc_restart_call_prepare(task);
fs/nfs/write.c
1631
struct rpc_task *task;
fs/nfs/write.c
1639
.task = &data->task,
fs/nfs/write.c
1661
task = rpc_run_task(&task_setup_data);
fs/nfs/write.c
1662
if (IS_ERR(task))
fs/nfs/write.c
1663
return PTR_ERR(task);
fs/nfs/write.c
1665
rpc_wait_for_completion_task(task);
fs/nfs/write.c
1666
rpc_put_task(task);
fs/nfs/write.c
1789
static void nfs_commit_done(struct rpc_task *task, void *calldata)
fs/nfs/write.c
1794
NFS_PROTO(data->inode)->commit_done(task, data);
fs/nfs/write.c
1795
trace_nfs_commit_done(task, data);
fs/nfs/write.c
1802
int status = data->task.tk_status;
fs/nfsd/nfs4callback.c
1093
static void nfsd4_requeue_cb(struct rpc_task *task, struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1099
task->tk_status = 0;
fs/nfsd/nfs4callback.c
1230
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
fs/nfsd/nfs4callback.c
1234
if (task->tk_status)
fs/nfsd/nfs4callback.c
1302
static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4callback.c
1311
rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
fs/nfsd/nfs4callback.c
1316
rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
fs/nfsd/nfs4callback.c
1448
static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
fs/nfsd/nfs4callback.c
1461
if (minorversion && !nfsd41_cb_get_slot(cb, task))
fs/nfsd/nfs4callback.c
1463
rpc_call_start(task);
fs/nfsd/nfs4callback.c
1467
static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
fs/nfsd/nfs4callback.c
1476
trace_nfsd_cb_seq_status(task, cb);
fs/nfsd/nfs4callback.c
1511
if (RPC_SIGNALLED(task) || !rpc_restart_call(task))
fs/nfsd/nfs4callback.c
1513
rpc_delay(task, 2 * HZ);
fs/nfsd/nfs4callback.c
1529
trace_nfsd_cb_free_slot(task, cb);
fs/nfsd/nfs4callback.c
1537
if (!RPC_SIGNALLED(task)) {
fs/nfsd/nfs4callback.c
1538
if (rpc_restart_call_prepare(task))
fs/nfsd/nfs4callback.c
1543
nfsd4_requeue_cb(task, cb);
fs/nfsd/nfs4callback.c
1547
static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
fs/nfsd/nfs4callback.c
1564
if (RPC_SIGNALLED(task))
fs/nfsd/nfs4callback.c
1565
nfsd4_requeue_cb(task, cb);
fs/nfsd/nfs4callback.c
1566
} else if (!nfsd4_cb_sequence_done(task, cb)) {
fs/nfsd/nfs4callback.c
1571
WARN_ONCE(task->tk_status,
fs/nfsd/nfs4callback.c
1573
cb->cb_status, task->tk_status, cb->cb_ops->opcode);
fs/nfsd/nfs4callback.c
1574
task->tk_status = cb->cb_status;
fs/nfsd/nfs4callback.c
1577
switch (cb->cb_ops->done(cb, task)) {
fs/nfsd/nfs4callback.c
1579
task->tk_status = 0;
fs/nfsd/nfs4callback.c
1580
rpc_restart_call_prepare(task);
fs/nfsd/nfs4callback.c
1583
switch (task->tk_status) {
fs/nfsd/nfs4layouts.c
667
nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4layouts.c
676
trace_nfsd_cb_layout_done(&ls->ls_stid.sc_stateid, task);
fs/nfsd/nfs4layouts.c
677
switch (task->tk_status) {
fs/nfsd/nfs4layouts.c
693
cutoff = ktime_add_ns(task->tk_start,
fs/nfsd/nfs4layouts.c
697
rpc_delay(task, HZ/100); /* 10 mili-seconds */
fs/nfsd/nfs4layouts.c
720
task->tk_status = 0;
fs/nfsd/nfs4proc.c
1901
struct rpc_task *task)
fs/nfsd/nfs4proc.c
1906
trace_nfsd_cb_offload_done(&cbo->co_res.cb_stateid, task);
fs/nfsd/nfs4proc.c
1907
switch (task->tk_status) {
fs/nfsd/nfs4proc.c
1910
rpc_delay(task, HZ / 5);
fs/nfsd/nfs4state.c
3195
struct rpc_task *task)
fs/nfsd/nfs4state.c
3197
trace_nfsd_cb_recall_any_done(cb, task);
fs/nfsd/nfs4state.c
3198
switch (task->tk_status) {
fs/nfsd/nfs4state.c
3200
rpc_delay(task, 2 * HZ);
fs/nfsd/nfs4state.c
3216
nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4state.c
3223
trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task);
fs/nfsd/nfs4state.c
3224
ncf->ncf_cb_status = task->tk_status;
fs/nfsd/nfs4state.c
3225
switch (task->tk_status) {
fs/nfsd/nfs4state.c
3227
rpc_delay(task, 2 * HZ);
fs/nfsd/nfs4state.c
372
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
fs/nfsd/nfs4state.c
374
trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
fs/nfsd/nfs4state.c
381
switch (task->tk_status) {
fs/nfsd/nfs4state.c
383
rpc_delay(task, 1 * HZ);
fs/nfsd/nfs4state.c
5430
struct rpc_task *task)
fs/nfsd/nfs4state.c
5434
trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
fs/nfsd/nfs4state.c
5440
switch (task->tk_status) {
fs/nfsd/nfs4state.c
5444
rpc_delay(task, 2 * HZ);
fs/nfsd/nfs4state.c
5453
rpc_delay(task, 2 * HZ);
fs/nfsd/trace.h
1728
const struct rpc_task *task,
fs/nfsd/trace.h
1731
TP_ARGS(task, cb),
fs/nfsd/trace.h
1748
__entry->task_id = task->tk_pid;
fs/nfsd/trace.h
1749
__entry->client_id = task->tk_client ?
fs/nfsd/trace.h
1750
task->tk_client->cl_clid : -1;
fs/nfsd/trace.h
1755
__entry->tk_status = task->tk_status;
fs/nfsd/trace.h
1769
const struct rpc_task *task,
fs/nfsd/trace.h
1772
TP_ARGS(task, cb),
fs/nfsd/trace.h
1788
__entry->task_id = task->tk_pid;
fs/nfsd/trace.h
1789
__entry->client_id = task->tk_client ?
fs/nfsd/trace.h
1790
task->tk_client->cl_clid : -1;
fs/nfsd/trace.h
1926
const struct rpc_task *task
fs/nfsd/trace.h
1928
TP_ARGS(stp, task),
fs/nfsd/trace.h
1941
__entry->status = task->tk_status;
fs/nfsd/trace.h
1953
const struct rpc_task *task \
fs/nfsd/trace.h
1955
TP_ARGS(stp, task))
fs/nfsd/trace.h
1966
const struct rpc_task *task
fs/nfsd/trace.h
1968
TP_ARGS(cb, task),
fs/nfsd/trace.h
1975
__entry->status = task->tk_status;
fs/notify/fanotify/fanotify_user.c
121
struct task_struct *task;
fs/notify/fanotify/fanotify_user.c
152
task = find_task_by_pid_ns(event->recv_pid,
fs/notify/fanotify/fanotify_user.c
157
task ? task->comm : NULL,
fs/nsfs.c
370
int ns_get_name(char *buf, size_t size, struct task_struct *task,
fs/nsfs.c
376
ns = ns_ops->get(task);
fs/nsfs.c
81
struct task_struct *task;
fs/nsfs.c
88
return args->ns_ops->get(args->task);
fs/nsfs.c
91
int ns_get_path(struct path *path, struct task_struct *task,
fs/nsfs.c
96
.task = task,
fs/ocfs2/cluster/tcp.c
134
u32 msgkey, struct task_struct *task, u8 node)
fs/ocfs2/cluster/tcp.c
137
nst->st_task = task;
fs/ocfs2/ocfs2_trace.h
1541
TP_PROTO(void *task, void *dc_task, unsigned long long ino,
fs/ocfs2/ocfs2_trace.h
1543
TP_ARGS(task, dc_task, ino, flags),
fs/ocfs2/ocfs2_trace.h
1545
__field(void *, task)
fs/ocfs2/ocfs2_trace.h
1551
__entry->task = task;
fs/ocfs2/ocfs2_trace.h
1556
TP_printk("%p %p %llu %u", __entry->task, __entry->dc_task,
fs/pidfs.c
281
struct task_struct *task;
fs/pidfs.c
292
task = pid_task(pid, PIDTYPE_PID);
fs/pidfs.c
293
if (!task)
fs/pidfs.c
295
else if (task->exit_state && !delay_group_leader(task))
fs/pidfs.c
339
struct task_struct *task __free(put_task) = NULL;
fs/pidfs.c
387
task = get_pid_task(pid, PIDTYPE_PID);
fs/pidfs.c
388
if (!task) {
fs/pidfs.c
399
c = get_task_cred(task);
fs/pidfs.c
404
guard(task_lock)(task);
fs/pidfs.c
405
if (task->mm) {
fs/pidfs.c
406
unsigned long flags = __mm_flags_get_dumpable(task->mm);
fs/pidfs.c
433
cgrp = task_dfl_cgroup(task);
fs/pidfs.c
449
kinfo.ppid = task_ppid_vnr(task);
fs/pidfs.c
450
kinfo.tgid = task_tgid_vnr(task);
fs/pidfs.c
451
kinfo.pid = task_pid_vnr(task);
fs/pidfs.c
507
struct task_struct *task __free(put_task) = NULL;
fs/pidfs.c
526
task = get_pid_task(pidfd_pid(file), PIDTYPE_PID);
fs/pidfs.c
527
if (!task)
fs/pidfs.c
533
scoped_guard(task_lock, task) {
fs/pidfs.c
534
nsp = task->nsproxy;
fs/pidfs.c
545
if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
fs/pidfs.c
610
user_ns = task_cred_xxx(task, user_ns);
fs/pidfs.c
621
pid_ns = task_active_pid_ns(task);
fs/proc/array.c
405
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
fs/proc/array.c
408
cpumask_pr_args(&task->cpus_mask));
fs/proc/array.c
410
cpumask_pr_args(&task->cpus_mask));
fs/proc/array.c
413
static inline void task_core_dumping(struct seq_file *m, struct task_struct *task)
fs/proc/array.c
415
seq_put_decimal_ull(m, "CoreDumping:\t", !!task->signal->core_state);
fs/proc/array.c
434
struct task_struct *task)
fs/proc/array.c
439
struct pid *pid, struct task_struct *task)
fs/proc/array.c
441
struct mm_struct *mm = get_task_mm(task);
fs/proc/array.c
444
proc_task_name(m, task, true);
fs/proc/array.c
447
task_state(m, ns, pid, task);
fs/proc/array.c
451
task_core_dumping(m, task);
fs/proc/array.c
456
task_sig(m, task);
fs/proc/array.c
457
task_cap(m, task);
fs/proc/array.c
458
task_seccomp(m, task);
fs/proc/array.c
459
task_cpus_allowed(m, task);
fs/proc/array.c
460
cpuset_task_status_allowed(m, task);
fs/proc/array.c
461
task_context_switch_counts(m, task);
fs/proc/array.c
462
arch_proc_pid_thread_features(m, task);
fs/proc/array.c
467
struct pid *pid, struct task_struct *task, int whole)
fs/proc/array.c
483
int exit_code = task->exit_code;
fs/proc/array.c
484
struct signal_struct *sig = task->signal;
fs/proc/array.c
486
state = *get_task_state(task);
fs/proc/array.c
488
permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
fs/proc/array.c
489
mm = get_task_mm(task);
fs/proc/array.c
501
if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE|PF_POSTCOREDUMP))) {
fs/proc/array.c
502
if (try_get_task_stack(task)) {
fs/proc/array.c
503
eip = KSTK_EIP(task);
fs/proc/array.c
504
esp = KSTK_ESP(task);
fs/proc/array.c
505
put_task_stack(task);
fs/proc/array.c
513
if (lock_task_sighand(task, &flags)) {
fs/proc/array.c
521
num_threads = get_nr_threads(task);
fs/proc/array.c
522
collect_sigign_sigcatch(task, &sigign, &sigcatch);
fs/proc/array.c
531
sid = task_session_nr_ns(task, ns);
fs/proc/array.c
532
ppid = task_ppid_nr_ns(task, ns);
fs/proc/array.c
533
pgid = task_pgrp_nr_ns(task, ns);
fs/proc/array.c
535
unlock_task_sighand(task, &flags);
fs/proc/array.c
539
wchan = !task_is_running(task);
fs/proc/array.c
566
thread_group_cputime_adjusted(task, &utime, &stime);
fs/proc/array.c
568
task_cputime_adjusted(task, &utime, &stime);
fs/proc/array.c
569
min_flt = task->min_flt;
fs/proc/array.c
570
maj_flt = task->maj_flt;
fs/proc/array.c
571
gtime = task_gtime(task);
fs/proc/array.c
576
priority = task_prio(task);
fs/proc/array.c
577
nice = task_nice(task);
fs/proc/array.c
581
nsec_to_clock_t(timens_add_boottime_ns(task->start_boottime));
fs/proc/array.c
585
proc_task_name(m, task, false);
fs/proc/array.c
593
seq_put_decimal_ull(m, " ", task->flags);
fs/proc/array.c
619
seq_put_decimal_ull(m, " ", task->pending.signal.sig[0] & 0x7fffffffUL);
fs/proc/array.c
620
seq_put_decimal_ull(m, " ", task->blocked.sig[0] & 0x7fffffffUL);
fs/proc/array.c
635
seq_put_decimal_ll(m, " ", task->exit_signal);
fs/proc/array.c
636
seq_put_decimal_ll(m, " ", task_cpu(task));
fs/proc/array.c
637
seq_put_decimal_ull(m, " ", task->rt_priority);
fs/proc/array.c
638
seq_put_decimal_ull(m, " ", task->policy);
fs/proc/array.c
639
seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task));
fs/proc/array.c
666
struct pid *pid, struct task_struct *task)
fs/proc/array.c
668
return do_task_stat(m, ns, pid, task, 0);
fs/proc/array.c
672
struct pid *pid, struct task_struct *task)
fs/proc/array.c
674
return do_task_stat(m, ns, pid, task, 1);
fs/proc/array.c
678
struct pid *pid, struct task_struct *task)
fs/proc/array.c
680
struct mm_struct *mm = get_task_mm(task);
fs/proc/array.c
716
struct task_struct *start, *task;
fs/proc/array.c
730
task = pid_task(pid_prev, PIDTYPE_PID);
fs/proc/array.c
731
if (task && task->real_parent == start &&
fs/proc/array.c
732
!(list_empty(&task->sibling))) {
fs/proc/array.c
733
if (list_is_last(&task->sibling, &start->children))
fs/proc/array.c
735
task = list_first_entry(&task->sibling,
fs/proc/array.c
737
pid = get_pid(task_pid(task));
fs/proc/array.c
757
list_for_each_entry(task, &start->children, sibling) {
fs/proc/array.c
759
pid = get_pid(task_pid(task));
fs/proc/base.c
1106
struct task_struct *task = get_proc_task(file_inode(file));
fs/proc/base.c
1111
if (!task)
fs/proc/base.c
1113
if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
fs/proc/base.c
1116
oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
fs/proc/base.c
1118
put_task_struct(task);
fs/proc/base.c
1128
struct task_struct *task;
fs/proc/base.c
1131
task = get_proc_task(file_inode(file));
fs/proc/base.c
1132
if (!task)
fs/proc/base.c
1137
if (oom_adj < task->signal->oom_score_adj &&
fs/proc/base.c
1147
current->comm, task_pid_nr(current), task_pid_nr(task),
fs/proc/base.c
1148
task_pid_nr(task));
fs/proc/base.c
1150
if ((short)oom_adj < task->signal->oom_score_adj_min &&
fs/proc/base.c
1162
if (!task->vfork_done) {
fs/proc/base.c
1163
struct task_struct *p = find_lock_task_mm(task);
fs/proc/base.c
1174
task->signal->oom_score_adj = oom_adj;
fs/proc/base.c
1176
task->signal->oom_score_adj_min = (short)oom_adj;
fs/proc/base.c
1177
trace_oom_score_adj_update(task);
fs/proc/base.c
1184
if (same_thread_group(task, p))
fs/proc/base.c
1204
put_task_struct(task);
fs/proc/base.c
1264
struct task_struct *task = get_proc_task(file_inode(file));
fs/proc/base.c
1269
if (!task)
fs/proc/base.c
1271
oom_score_adj = task->signal->oom_score_adj;
fs/proc/base.c
1272
put_task_struct(task);
fs/proc/base.c
1317
struct task_struct *task = get_proc_task(inode);
fs/proc/base.c
1321
if (!task)
fs/proc/base.c
1325
audit_get_loginuid(task)));
fs/proc/base.c
1326
put_task_struct(task);
fs/proc/base.c
1383
struct task_struct *task = get_proc_task(inode);
fs/proc/base.c
1387
if (!task)
fs/proc/base.c
1390
audit_get_sessionid(task));
fs/proc/base.c
1391
put_task_struct(task);
fs/proc/base.c
1405
struct task_struct *task = get_proc_task(file_inode(file));
fs/proc/base.c
1410
if (!task)
fs/proc/base.c
1412
make_it_fail = task->make_it_fail;
fs/proc/base.c
1413
put_task_struct(task);
fs/proc/base.c
1423
struct task_struct *task;
fs/proc/base.c
1441
task = get_proc_task(file_inode(file));
fs/proc/base.c
1442
if (!task)
fs/proc/base.c
1444
task->make_it_fail = make_it_fail;
fs/proc/base.c
1445
put_task_struct(task);
fs/proc/base.c
1459
struct task_struct *task;
fs/proc/base.c
1467
task = get_proc_task(file_inode(file));
fs/proc/base.c
1468
if (!task)
fs/proc/base.c
1470
task->fail_nth = n;
fs/proc/base.c
1471
put_task_struct(task);
fs/proc/base.c
1479
struct task_struct *task;
fs/proc/base.c
1483
task = get_proc_task(file_inode(file));
fs/proc/base.c
1484
if (!task)
fs/proc/base.c
1486
len = snprintf(numbuf, sizeof(numbuf), "%u\n", task->fail_nth);
fs/proc/base.c
1487
put_task_struct(task);
fs/proc/base.c
1782
struct task_struct *task;
fs/proc/base.c
1785
task = get_proc_task(d_inode(dentry));
fs/proc/base.c
1786
if (!task)
fs/proc/base.c
1788
exe_file = get_task_exe_file(task);
fs/proc/base.c
1789
put_task_struct(task);
fs/proc/base.c
1875
void task_dump_owner(struct task_struct *task, umode_t mode,
fs/proc/base.c
1885
if (unlikely(task->flags & PF_KTHREAD)) {
fs/proc/base.c
1893
cred = __task_cred(task);
fs/proc/base.c
1908
task_lock(task);
fs/proc/base.c
1909
mm = task->mm;
fs/proc/base.c
1927
task_unlock(task);
fs/proc/base.c
1945
struct task_struct *task, umode_t mode)
fs/proc/base.c
1967
pid = get_task_pid(task, PIDTYPE_PID);
fs/proc/base.c
1974
task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
fs/proc/base.c
1975
security_task_to_inode(task, inode);
fs/proc/base.c
1998
struct task_struct *task, umode_t mode)
fs/proc/base.c
2004
inode = proc_pid_make_inode(sb, task, mode);
fs/proc/base.c
2023
struct task_struct *task;
fs/proc/base.c
2030
task = pid_task(proc_pid(inode), PIDTYPE_PID);
fs/proc/base.c
2031
if (task) {
fs/proc/base.c
2032
if (!has_pid_permissions(fs_info, task, HIDEPID_INVISIBLE)) {
fs/proc/base.c
2040
task_dump_owner(task, inode->i_mode, &stat->uid, &stat->gid);
fs/proc/base.c
2051
void pid_update_inode(struct task_struct *task, struct inode *inode)
fs/proc/base.c
2053
task_dump_owner(task, inode->i_mode, &inode->i_uid, &inode->i_gid);
fs/proc/base.c
2056
security_task_to_inode(task, inode);
fs/proc/base.c
2068
struct task_struct *task;
fs/proc/base.c
2075
task = pid_task(proc_pid(inode), PIDTYPE_PID);
fs/proc/base.c
2077
if (task) {
fs/proc/base.c
2078
pid_update_inode(task, inode);
fs/proc/base.c
208
static int get_task_root(struct task_struct *task, struct path *root)
fs/proc/base.c
212
task_lock(task);
fs/proc/base.c
2122
instantiate_t instantiate, struct task_struct *task, const void *ptr)
fs/proc/base.c
213
if (task->fs) {
fs/proc/base.c
214
get_fs_root(task->fs, root);
fs/proc/base.c
2141
res = instantiate(child, task, ptr);
fs/proc/base.c
217
task_unlock(task);
fs/proc/base.c
2207
struct task_struct *task;
fs/proc/base.c
2215
task = get_proc_task(inode);
fs/proc/base.c
2216
if (!task)
fs/proc/base.c
2219
mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
fs/proc/base.c
223
struct task_struct *task = get_proc_task(d_inode(dentry));
fs/proc/base.c
2235
task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
fs/proc/base.c
2237
security_task_to_inode(task, inode);
fs/proc/base.c
2242
put_task_struct(task);
fs/proc/base.c
2257
struct task_struct *task;
fs/proc/base.c
226
if (task) {
fs/proc/base.c
2262
task = get_proc_task(d_inode(dentry));
fs/proc/base.c
2263
if (!task)
fs/proc/base.c
2266
mm = get_task_mm(task);
fs/proc/base.c
2267
put_task_struct(task);
fs/proc/base.c
227
task_lock(task);
fs/proc/base.c
228
if (task->fs) {
fs/proc/base.c
229
get_fs_pwd(task->fs, path);
fs/proc/base.c
232
task_unlock(task);
fs/proc/base.c
2327
struct task_struct *task, const void *ptr)
fs/proc/base.c
233
put_task_struct(task);
fs/proc/base.c
2333
inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK |
fs/proc/base.c
2354
struct task_struct *task;
fs/proc/base.c
2359
task = get_proc_task(dir);
fs/proc/base.c
2360
if (!task)
fs/proc/base.c
2364
if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
fs/proc/base.c
2371
mm = get_task_mm(task);
fs/proc/base.c
2385
result = proc_map_files_instantiate(dentry, task,
fs/proc/base.c
2393
put_task_struct(task);
fs/proc/base.c
240
struct task_struct *task = get_proc_task(d_inode(dentry));
fs/proc/base.c
2408
struct task_struct *task;
fs/proc/base.c
2419
task = get_proc_task(file_inode(file));
fs/proc/base.c
2420
if (!task)
fs/proc/base.c
2424
if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
fs/proc/base.c
243
if (task) {
fs/proc/base.c
2431
mm = get_task_mm(task);
fs/proc/base.c
244
result = get_task_root(task, path);
fs/proc/base.c
245
put_task_struct(task);
fs/proc/base.c
2485
task,
fs/proc/base.c
2492
put_task_struct(task);
fs/proc/base.c
2507
struct task_struct *task;
fs/proc/base.c
2515
tp->task = get_pid_task(tp->pid, PIDTYPE_PID);
fs/proc/base.c
2516
if (!tp->task)
fs/proc/base.c
2520
return seq_hlist_start_rcu(&tp->task->signal->posix_timers, *pos);
fs/proc/base.c
2527
return seq_hlist_next_rcu(v, &tp->task->signal->posix_timers, pos);
fs/proc/base.c
2534
if (tp->task) {
fs/proc/base.c
2535
put_task_struct(tp->task);
fs/proc/base.c
2536
tp->task = NULL;
fs/proc/base.c
2691
struct task_struct *task, const void *ptr)
fs/proc/base.c
2697
inode = proc_pid_make_inode(dentry->d_sb, task, p->mode);
fs/proc/base.c
2709
pid_update_inode(task, inode);
fs/proc/base.c
2718
struct task_struct *task = get_proc_task(dir);
fs/proc/base.c
2721
if (!task)
fs/proc/base.c
2732
res = proc_pident_instantiate(dentry, task, p);
fs/proc/base.c
2736
put_task_struct(task);
fs/proc/base.c
2744
struct task_struct *task = get_proc_task(file_inode(file));
fs/proc/base.c
2747
if (!task)
fs/proc/base.c
2758
proc_pident_instantiate, task, p))
fs/proc/base.c
2763
put_task_struct(task);
fs/proc/base.c
2781
struct task_struct *task = get_proc_task(inode);
fs/proc/base.c
2783
if (!task)
fs/proc/base.c
2786
length = security_getprocattr(task, PROC_I(inode)->op.lsmid,
fs/proc/base.c
2789
put_task_struct(task);
fs/proc/base.c
2800
struct task_struct *task;
fs/proc/base.c
2809
task = pid_task(proc_pid(inode), PIDTYPE_PID);
fs/proc/base.c
2810
if (!task) {
fs/proc/base.c
2815
if (current != task) {
fs/proc/base.c
2956
struct task_struct *task = get_proc_task(file_inode(file));
fs/proc/base.c
2962
if (!task)
fs/proc/base.c
2966
mm = get_task_mm(task);
fs/proc/base.c
2977
put_task_struct(task);
fs/proc/base.c
2987
struct task_struct *task;
fs/proc/base.c
2999
task = get_proc_task(file_inode(file));
fs/proc/base.c
3000
if (!task)
fs/proc/base.c
3003
mm = get_task_mm(task);
fs/proc/base.c
3017
put_task_struct(task);
fs/proc/base.c
3032
static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole)
fs/proc/base.c
3037
result = down_read_killable(&task->signal->exec_update_lock);
fs/proc/base.c
3041
if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
fs/proc/base.c
3047
struct signal_struct *sig = task->signal;
fs/proc/base.c
3058
acct = task->ioac;
fs/proc/base.c
3079
up_read(&task->signal->exec_update_lock);
fs/proc/base.c
3084
struct pid *pid, struct task_struct *task)
fs/proc/base.c
3086
return do_io_accounting(task, m, 0);
fs/proc/base.c
3090
struct pid *pid, struct task_struct *task)
fs/proc/base.c
3092
return do_io_accounting(task, m, 1);
fs/proc/base.c
3101
struct task_struct *task;
fs/proc/base.c
3105
task = get_proc_task(inode);
fs/proc/base.c
3106
if (task) {
fs/proc/base.c
3108
ns = get_user_ns(task_cred_xxx(task, user_ns));
fs/proc/base.c
3110
put_task_struct(task);
fs/proc/base.c
3179
struct task_struct *task;
fs/proc/base.c
3183
task = get_proc_task(inode);
fs/proc/base.c
3184
if (task) {
fs/proc/base.c
3186
ns = get_user_ns(task_cred_xxx(task, user_ns));
fs/proc/base.c
3188
put_task_struct(task);
fs/proc/base.c
3229
struct pid *pid, struct task_struct *task)
fs/proc/base.c
3231
int err = lock_trace(task);
fs/proc/base.c
3233
seq_printf(m, "%08x\n", task->personality);
fs/proc/base.c
3234
unlock_trace(task);
fs/proc/base.c
3241
struct pid *pid, struct task_struct *task)
fs/proc/base.c
3243
seq_printf(m, "%d\n", task->patch_state);
fs/proc/base.c
3250
struct pid *pid, struct task_struct *task)
fs/proc/base.c
3254
mm = get_task_mm(task);
fs/proc/base.c
3263
struct pid *pid, struct task_struct *task)
fs/proc/base.c
3268
mm = get_task_mm(task);
fs/proc/base.c
3293
struct pid *pid, struct task_struct *task)
fs/proc/base.c
3296
(task->prev_lowest_stack & (THREAD_SIZE - 1));
fs/proc/base.c
3298
(task->lowest_stack & (THREAD_SIZE - 1));
fs/proc/base.c
3485
struct task_struct *task, const void *ptr)
fs/proc/base.c
3489
inode = proc_pid_make_base_inode(dentry->d_sb, task,
fs/proc/base.c
3499
pid_update_inode(task, inode);
fs/proc/base.c
3506
struct task_struct *task;
fs/proc/base.c
3519
task = find_task_by_pid_ns(tgid, ns);
fs/proc/base.c
3520
if (task)
fs/proc/base.c
3521
get_task_struct(task);
fs/proc/base.c
3523
if (!task)
fs/proc/base.c
3528
if (!has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS))
fs/proc/base.c
3532
result = proc_pid_instantiate(dentry, task, NULL);
fs/proc/base.c
3534
put_task_struct(task);
fs/proc/base.c
3545
struct task_struct *task;
fs/proc/base.c
3551
if (iter.task)
fs/proc/base.c
3552
put_task_struct(iter.task);
fs/proc/base.c
3555
iter.task = NULL;
fs/proc/base.c
3559
iter.task = pid_task(pid, PIDTYPE_TGID);
fs/proc/base.c
3560
if (!iter.task) {
fs/proc/base.c
3564
get_task_struct(iter.task);
fs/proc/base.c
3594
iter.task = NULL;
fs/proc/base.c
3596
iter.task;
fs/proc/base.c
3602
if (!has_pid_permissions(fs_info, iter.task, HIDEPID_INVISIBLE))
fs/proc/base.c
3608
proc_pid_instantiate, iter.task, NULL)) {
fs/proc/base.c
3609
put_task_struct(iter.task);
fs/proc/base.c
3633
struct task_struct *task;
fs/proc/base.c
3635
task = get_proc_task(inode);
fs/proc/base.c
3636
if (!task)
fs/proc/base.c
3638
is_same_tgroup = same_thread_group(current, task);
fs/proc/base.c
3639
put_task_struct(task);
fs/proc/base.c
3786
struct task_struct *task, const void *ptr)
fs/proc/base.c
3789
inode = proc_pid_make_base_inode(dentry->d_sb, task,
fs/proc/base.c
3799
pid_update_inode(task, inode);
fs/proc/base.c
3806
struct task_struct *task;
fs/proc/base.c
3823
task = find_task_by_pid_ns(tid, ns);
fs/proc/base.c
3824
if (task)
fs/proc/base.c
3825
get_task_struct(task);
fs/proc/base.c
3827
if (!task)
fs/proc/base.c
3829
if (!same_thread_group(leader, task))
fs/proc/base.c
3832
result = proc_task_instantiate(dentry, task, NULL);
fs/proc/base.c
3834
put_task_struct(task);
fs/proc/base.c
3856
struct task_struct *pos, *task;
fs/proc/base.c
3863
task = pid_task(pid, PIDTYPE_PID);
fs/proc/base.c
3864
if (!task)
fs/proc/base.c
3870
if (pos && same_thread_group(pos, task))
fs/proc/base.c
3875
if (nr >= get_nr_threads(task))
fs/proc/base.c
3881
for_each_thread(task, pos) {
fs/proc/base.c
3919
struct task_struct *task;
fs/proc/base.c
3935
for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
fs/proc/base.c
3936
task;
fs/proc/base.c
3937
task = next_tid(task), ctx->pos++) {
fs/proc/base.c
3941
tid = task_pid_nr_ns(task, ns);
fs/proc/base.c
3946
proc_task_instantiate, task, NULL)) {
fs/proc/base.c
3950
put_task_struct(task);
fs/proc/base.c
422
struct pid *pid, struct task_struct *task)
fs/proc/base.c
427
if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
fs/proc/base.c
430
wchan = get_wchan(task);
fs/proc/base.c
442
static int lock_trace(struct task_struct *task)
fs/proc/base.c
444
int err = down_read_killable(&task->signal->exec_update_lock);
fs/proc/base.c
447
if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
fs/proc/base.c
448
up_read(&task->signal->exec_update_lock);
fs/proc/base.c
454
static void unlock_trace(struct task_struct *task)
fs/proc/base.c
456
up_read(&task->signal->exec_update_lock);
fs/proc/base.c
464
struct pid *pid, struct task_struct *task)
fs/proc/base.c
488
err = lock_trace(task);
fs/proc/base.c
492
nr_entries = stack_trace_save_tsk(task, entries,
fs/proc/base.c
499
unlock_trace(task);
fs/proc/base.c
512
struct pid *pid, struct task_struct *task)
fs/proc/base.c
518
(unsigned long long)task->se.sum_exec_runtime,
fs/proc/base.c
519
(unsigned long long)task->sched_info.run_delay,
fs/proc/base.c
520
task->sched_info.pcount);
fs/proc/base.c
531
struct task_struct *task = get_proc_task(inode);
fs/proc/base.c
533
if (!task)
fs/proc/base.c
537
struct latency_record *lr = &task->latency_record[i];
fs/proc/base.c
553
put_task_struct(task);
fs/proc/base.c
565
struct task_struct *task = get_proc_task(file_inode(file));
fs/proc/base.c
567
if (!task)
fs/proc/base.c
569
clear_tsk_latency_tracing(task);
fs/proc/base.c
570
put_task_struct(task);
fs/proc/base.c
586
struct pid *pid, struct task_struct *task)
fs/proc/base.c
592
badness = oom_badness(task, totalpages);
fs/proc/base.c
632
struct pid *pid, struct task_struct *task)
fs/proc/base.c
639
if (!lock_task_sighand(task, &flags))
fs/proc/base.c
641
memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
fs/proc/base.c
642
unlock_task_sighand(task, &flags);
fs/proc/base.c
676
struct pid *pid, struct task_struct *task)
fs/proc/base.c
682
res = lock_trace(task);
fs/proc/base.c
686
if (task_current_syscall(task, &info))
fs/proc/base.c
697
unlock_trace(task);
fs/proc/base.c
710
struct task_struct *task;
fs/proc/base.c
716
task = get_proc_task(inode);
fs/proc/base.c
717
if (task) {
fs/proc/base.c
718
allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
fs/proc/base.c
719
put_task_struct(task);
fs/proc/base.c
746
struct task_struct *task,
fs/proc/base.c
755
return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
fs/proc/base.c
761
return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
fs/proc/base.c
769
struct task_struct *task;
fs/proc/base.c
772
task = get_proc_task(inode);
fs/proc/base.c
773
if (!task)
fs/proc/base.c
775
has_perms = has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS);
fs/proc/base.c
776
put_task_struct(task);
fs/proc/base.c
805
struct task_struct *task;
fs/proc/base.c
808
task = get_pid_task(pid, PIDTYPE_PID);
fs/proc/base.c
809
if (!task)
fs/proc/base.c
812
ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
fs/proc/base.c
814
put_task_struct(task);
fs/proc/base.c
839
struct task_struct *task = get_proc_task(inode);
fs/proc/base.c
842
if (!task)
fs/proc/base.c
845
mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
fs/proc/base.c
846
put_task_struct(task);
fs/proc/base.c
879
struct task_struct *task;
fs/proc/base.c
886
task = get_proc_task(file_inode(file));
fs/proc/base.c
887
if (task) {
fs/proc/base.c
888
ptrace_active = READ_ONCE(task->ptrace) &&
fs/proc/base.c
889
READ_ONCE(task->mm) == mm &&
fs/proc/base.c
890
READ_ONCE(task->parent) == current;
fs/proc/base.c
891
put_task_struct(task);
fs/proc/fd.c
115
static bool tid_fd_mode(struct task_struct *task, unsigned fd, fmode_t *mode)
fs/proc/fd.c
119
file = fget_task(task, fd);
fs/proc/fd.c
127
static void tid_fd_update_inode(struct task_struct *task, struct inode *inode,
fs/proc/fd.c
130
task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
fs/proc/fd.c
140
security_task_to_inode(task, inode);
fs/proc/fd.c
146
struct task_struct *task;
fs/proc/fd.c
154
task = get_proc_task(inode);
fs/proc/fd.c
157
if (task) {
fs/proc/fd.c
159
if (tid_fd_mode(task, fd, &f_mode)) {
fs/proc/fd.c
160
tid_fd_update_inode(task, inode, f_mode);
fs/proc/fd.c
161
put_task_struct(task);
fs/proc/fd.c
164
put_task_struct(task);
fs/proc/fd.c
176
struct task_struct *task;
fs/proc/fd.c
179
task = get_proc_task(d_inode(dentry));
fs/proc/fd.c
180
if (task) {
fs/proc/fd.c
184
fd_file = fget_task(task, fd);
fs/proc/fd.c
191
put_task_struct(task);
fs/proc/fd.c
203
struct task_struct *task, const void *ptr)
fs/proc/fd.c
209
inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK);
fs/proc/fd.c
220
tid_fd_update_inode(task, inode, data->mode);
fs/proc/fd.c
230
struct task_struct *task = get_proc_task(dir);
fs/proc/fd.c
234
if (!task)
fs/proc/fd.c
238
if (!tid_fd_mode(task, data.fd, &data.mode))
fs/proc/fd.c
241
result = instantiate(dentry, task, &data);
fs/proc/fd.c
243
put_task_struct(task);
fs/proc/fd.c
28
struct task_struct *task;
fs/proc/fd.c
30
task = get_proc_task(m->private);
fs/proc/fd.c
31
if (!task)
fs/proc/fd.c
34
task_lock(task);
fs/proc/fd.c
35
files = task->files;
fs/proc/fd.c
368
struct task_struct *task, const void *ptr)
fs/proc/fd.c
374
inode = proc_pid_make_inode(dentry->d_sb, task, S_IFREG | S_IRUGO);
fs/proc/fd.c
384
tid_fd_update_inode(task, inode, 0);
fs/proc/fd.c
51
task_unlock(task);
fs/proc/fd.c
52
put_task_struct(task);
fs/proc/fd.c
89
struct task_struct *task = get_proc_task(inode);
fs/proc/fd.c
91
if (!task)
fs/proc/fd.c
94
allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
fs/proc/fd.c
95
put_task_struct(task);
fs/proc/internal.h
113
struct task_struct *task);
fs/proc/internal.h
152
void task_dump_owner(struct task_struct *task, umode_t mode,
fs/proc/internal.h
393
struct task_struct *task;
fs/proc/namespaces.c
105
inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK | S_IRWXUGO);
fs/proc/namespaces.c
112
pid_update_inode(task, inode);
fs/proc/namespaces.c
119
struct task_struct *task = get_proc_task(file_inode(file));
fs/proc/namespaces.c
122
if (!task)
fs/proc/namespaces.c
134
proc_ns_instantiate, task, ops))
fs/proc/namespaces.c
140
put_task_struct(task);
fs/proc/namespaces.c
153
struct task_struct *task = get_proc_task(dir);
fs/proc/namespaces.c
158
if (!task)
fs/proc/namespaces.c
171
res = proc_ns_instantiate(dentry, task, *entry);
fs/proc/namespaces.c
173
put_task_struct(task);
fs/proc/namespaces.c
47
struct task_struct *task;
fs/proc/namespaces.c
54
task = get_proc_task(inode);
fs/proc/namespaces.c
55
if (!task)
fs/proc/namespaces.c
58
if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
fs/proc/namespaces.c
61
error = ns_get_path(&ns_path, task, ns_ops);
fs/proc/namespaces.c
67
put_task_struct(task);
fs/proc/namespaces.c
75
struct task_struct *task;
fs/proc/namespaces.c
79
task = get_proc_task(inode);
fs/proc/namespaces.c
80
if (!task)
fs/proc/namespaces.c
83
if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
fs/proc/namespaces.c
84
res = ns_get_name(name, sizeof(name), task, ns_ops);
fs/proc/namespaces.c
88
put_task_struct(task);
fs/proc/namespaces.c
99
struct task_struct *task, const void *ptr)
fs/proc/proc_net.c
270
struct task_struct *task;
fs/proc/proc_net.c
275
task = pid_task(proc_pid(dir), PIDTYPE_PID);
fs/proc/proc_net.c
276
if (task != NULL) {
fs/proc/proc_net.c
277
task_lock(task);
fs/proc/proc_net.c
278
ns = task->nsproxy;
fs/proc/proc_net.c
281
task_unlock(task);
fs/proc/task_mmu.c
113
struct task_struct *task = priv->task;
fs/proc/task_mmu.c
115
task_lock(task);
fs/proc/task_mmu.c
116
priv->task_mempolicy = get_task_policy(task);
fs/proc/task_mmu.c
118
task_unlock(task);
fs/proc/task_mmu.c
1407
priv->task = get_proc_task(priv->inode);
fs/proc/task_mmu.c
1408
if (!priv->task)
fs/proc/task_mmu.c
1513
put_task_struct(priv->task);
fs/proc/task_mmu.c
1514
priv->task = NULL;
fs/proc/task_mmu.c
1771
struct task_struct *task;
fs/proc/task_mmu.c
1790
task = get_proc_task(file_inode(file));
fs/proc/task_mmu.c
1791
if (!task)
fs/proc/task_mmu.c
1793
mm = get_task_mm(task);
fs/proc/task_mmu.c
1838
put_task_struct(task);
fs/proc/task_mmu.c
286
priv->task = get_proc_task(priv->inode);
fs/proc/task_mmu.c
287
if (!priv->task)
fs/proc/task_mmu.c
293
put_task_struct(priv->task);
fs/proc/task_mmu.c
294
priv->task = NULL;
fs/proc/task_mmu.c
300
put_task_struct(priv->task);
fs/proc/task_mmu.c
301
priv->task = NULL;
fs/proc/task_mmu.c
333
if (!priv->task)
fs/proc/task_mmu.c
339
put_task_struct(priv->task);
fs/proc/task_mmu.c
340
priv->task = NULL;
fs/proc/task_nommu.c
203
priv->task = get_proc_task(priv->inode);
fs/proc/task_nommu.c
204
if (!priv->task)
fs/proc/task_nommu.c
209
put_task_struct(priv->task);
fs/proc/task_nommu.c
210
priv->task = NULL;
fs/proc/task_nommu.c
216
put_task_struct(priv->task);
fs/proc/task_nommu.c
217
priv->task = NULL;
fs/proc/task_nommu.c
231
if (!priv->task)
fs/proc/task_nommu.c
236
put_task_struct(priv->task);
fs/proc/task_nommu.c
237
priv->task = NULL;
fs/proc_namespace.c
237
struct task_struct *task = get_proc_task(inode);
fs/proc_namespace.c
245
if (!task)
fs/proc_namespace.c
248
task_lock(task);
fs/proc_namespace.c
249
nsp = task->nsproxy;
fs/proc_namespace.c
251
task_unlock(task);
fs/proc_namespace.c
252
put_task_struct(task);
fs/proc_namespace.c
257
if (!task->fs) {
fs/proc_namespace.c
258
task_unlock(task);
fs/proc_namespace.c
259
put_task_struct(task);
fs/proc_namespace.c
263
get_fs_root(task->fs, &root);
fs/proc_namespace.c
264
task_unlock(task);
fs/proc_namespace.c
265
put_task_struct(task);
fs/resctrl/rdtgroup.c
600
static void _update_task_closid_rmid(void *task)
fs/resctrl/rdtgroup.c
606
if (task == current)
fs/resctrl/rdtgroup.c
607
resctrl_arch_sched_in(task);
fs/resctrl/rdtgroup.c
718
static int rdtgroup_task_write_permission(struct task_struct *task,
fs/resctrl/rdtgroup.c
721
const struct cred *tcred = get_task_cred(task);
fs/resctrl/rdtgroup.c
732
rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
fs/smb/client/connect.c
1698
struct task_struct *task;
fs/smb/client/connect.c
1739
task = xchg(&server->tsk, NULL);
fs/smb/client/connect.c
1740
if (task)
fs/smb/client/connect.c
1741
send_sig(SIGKILL, task, 1);
fs/xfs/xfs_zone_space_resv.c
125
.task = current,
fs/xfs/xfs_zone_space_resv.c
33
struct task_struct *task;
fs/xfs/xfs_zone_space_resv.c
76
wake_up_process(reservation->task);
fs/xfs/xfs_zone_space_resv.c
99
wake_up_process(reservation->task);
include/asm-generic/current.h
8
#define get_current() (current_thread_info()->task)
include/asm-generic/syscall.h
115
void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
include/asm-generic/syscall.h
131
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
include/asm-generic/syscall.h
147
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
include/asm-generic/syscall.h
163
int syscall_get_arch(struct task_struct *task);
include/asm-generic/syscall.h
38
int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
include/asm-generic/syscall.h
52
void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr);
include/asm-generic/syscall.h
70
void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
include/asm-generic/syscall.h
83
long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
include/asm-generic/syscall.h
97
long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
include/linux/audit.h
313
extern int audit_alloc(struct task_struct *task);
include/linux/audit.h
314
extern void __audit_free(struct task_struct *task);
include/linux/audit.h
332
static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
include/linux/audit.h
334
task->audit_context = ctx;
include/linux/audit.h
347
static inline void audit_free(struct task_struct *task)
include/linux/audit.h
349
if (unlikely(task->audit_context))
include/linux/audit.h
350
__audit_free(task);
include/linux/audit.h
595
static inline int audit_alloc(struct task_struct *task)
include/linux/audit.h
599
static inline void audit_free(struct task_struct *task)
include/linux/audit.h
615
static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
include/linux/bpf.h
2834
} task;
include/linux/bpf.h
3037
void bpf_task_storage_free(struct task_struct *task);
include/linux/bpf.h
3318
static inline void bpf_task_storage_free(struct task_struct *task)
include/linux/cgroup-defs.h
789
int (*can_fork)(struct task_struct *task,
include/linux/cgroup-defs.h
791
void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
include/linux/cgroup-defs.h
792
void (*fork)(struct task_struct *task);
include/linux/cgroup-defs.h
793
void (*exit)(struct task_struct *task);
include/linux/cgroup-defs.h
794
void (*release)(struct task_struct *task);
include/linux/cgroup.h
295
#define cgroup_taskset_for_each(task, dst_css, tset) \
include/linux/cgroup.h
296
for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
include/linux/cgroup.h
297
(task); \
include/linux/cgroup.h
298
(task) = cgroup_taskset_next((tset), &(dst_css)))
include/linux/cgroup.h
416
#define task_css_set_check(task, __c) \
include/linux/cgroup.h
417
rcu_dereference_check((task)->cgroups, \
include/linux/cgroup.h
421
((task)->flags & PF_EXITING) || (__c))
include/linux/cgroup.h
423
#define task_css_set_check(task, __c) \
include/linux/cgroup.h
424
rcu_dereference((task)->cgroups)
include/linux/cgroup.h
436
#define task_css_check(task, subsys_id, __c) \
include/linux/cgroup.h
437
task_css_set_check((task), (__c))->subsys[(subsys_id)]
include/linux/cgroup.h
445
static inline struct css_set *task_css_set(struct task_struct *task)
include/linux/cgroup.h
447
return task_css_set_check(task, false);
include/linux/cgroup.h
457
static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
include/linux/cgroup.h
460
return task_css_check(task, subsys_id, false);
include/linux/cgroup.h
473
task_get_css(struct task_struct *task, int subsys_id)
include/linux/cgroup.h
479
css = task_css(task, subsys_id);
include/linux/cgroup.h
502
static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
include/linux/cgroup.h
504
return task_css_check(task, subsys_id, true) ==
include/linux/cgroup.h
508
static inline struct cgroup *task_cgroup(struct task_struct *task,
include/linux/cgroup.h
511
return task_css(task, subsys_id)->cgroup;
include/linux/cgroup.h
514
static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
include/linux/cgroup.h
516
return task_css_set(task)->dfl_cgrp;
include/linux/cgroup.h
573
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
include/linux/cgroup.h
576
struct css_set *cset = task_css_set(task);
include/linux/cgroup.h
704
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
include/linux/cgroup.h
737
static inline void cgroup_account_cputime(struct task_struct *task,
include/linux/cgroup.h
742
cpuacct_charge(task, delta_exec);
include/linux/cgroup.h
744
cgrp = task_dfl_cgroup(task);
include/linux/cgroup.h
749
static inline void cgroup_account_cputime_field(struct task_struct *task,
include/linux/cgroup.h
755
cpuacct_account_field(task, index, delta_exec);
include/linux/cgroup.h
757
cgrp = task_dfl_cgroup(task);
include/linux/cgroup.h
764
static inline void cgroup_account_cputime(struct task_struct *task,
include/linux/cgroup.h
766
static inline void cgroup_account_cputime_field(struct task_struct *task,
include/linux/cgroup.h
801
void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
include/linux/cgroup.h
804
static inline bool cgroup_task_frozen(struct task_struct *task)
include/linux/cgroup.h
806
return task->frozen;
include/linux/cgroup.h
813
static inline bool cgroup_task_frozen(struct task_struct *task)
include/linux/cn_proc.h
23
void proc_fork_connector(struct task_struct *task);
include/linux/cn_proc.h
24
void proc_exec_connector(struct task_struct *task);
include/linux/cn_proc.h
25
void proc_id_connector(struct task_struct *task, int which_id);
include/linux/cn_proc.h
26
void proc_sid_connector(struct task_struct *task);
include/linux/cn_proc.h
27
void proc_ptrace_connector(struct task_struct *task, int which_id);
include/linux/cn_proc.h
28
void proc_comm_connector(struct task_struct *task);
include/linux/cn_proc.h
29
void proc_coredump_connector(struct task_struct *task);
include/linux/cn_proc.h
30
void proc_exit_connector(struct task_struct *task);
include/linux/cn_proc.h
32
static inline void proc_fork_connector(struct task_struct *task)
include/linux/cn_proc.h
35
static inline void proc_exec_connector(struct task_struct *task)
include/linux/cn_proc.h
38
static inline void proc_id_connector(struct task_struct *task,
include/linux/cn_proc.h
42
static inline void proc_sid_connector(struct task_struct *task)
include/linux/cn_proc.h
45
static inline void proc_comm_connector(struct task_struct *task)
include/linux/cn_proc.h
48
static inline void proc_ptrace_connector(struct task_struct *task,
include/linux/cn_proc.h
52
static inline void proc_coredump_connector(struct task_struct *task)
include/linux/cn_proc.h
55
static inline void proc_exit_connector(struct task_struct *task)
include/linux/cpuset.h
118
struct task_struct *task);
include/linux/cpuset.h
196
static inline void inc_dl_tasks_cs(struct task_struct *task) { }
include/linux/cpuset.h
197
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
include/linux/cpuset.h
251
struct task_struct *task)
include/linux/cpuset.h
75
extern void inc_dl_tasks_cs(struct task_struct *task);
include/linux/cpuset.h
76
extern void dec_dl_tasks_cs(struct task_struct *task);
include/linux/cred.h
317
#define __task_cred(task) \
include/linux/cred.h
318
rcu_dereference((task)->real_cred)
include/linux/cred.h
360
#define task_cred_xxx(task, xxx) \
include/linux/cred.h
364
___val = __task_cred((task))->xxx; \
include/linux/cred.h
369
#define task_uid(task) (task_cred_xxx((task), uid))
include/linux/cred.h
370
#define task_euid(task) (task_cred_xxx((task), euid))
include/linux/cred.h
371
#define task_ucounts(task) (task_cred_xxx((task), ucounts))
include/linux/debug_locks.h
52
extern void debug_show_held_locks(struct task_struct *task);
include/linux/debug_locks.h
60
static inline void debug_show_held_locks(struct task_struct *task)
include/linux/delayacct.h
109
extern void __delayacct_irq(struct task_struct *task, u32 delta);
include/linux/delayacct.h
244
static inline void delayacct_irq(struct task_struct *task, u32 delta)
include/linux/delayacct.h
249
if (task->delays)
include/linux/delayacct.h
250
__delayacct_irq(task, delta);
include/linux/delayacct.h
291
static inline void delayacct_irq(struct task_struct *task, u32 delta)
include/linux/file.h
68
extern struct file *fget_task(struct task_struct *task, unsigned int fd);
include/linux/file.h
69
extern struct file *fget_task_next(struct task_struct *task, unsigned int *fd);
include/linux/freezer.h
70
extern bool cgroup1_freezing(struct task_struct *task);
include/linux/freezer.h
72
static inline bool cgroup1_freezing(struct task_struct *task)
include/linux/fs.h
615
uncached_acl_sentinel(struct task_struct *task)
include/linux/fs.h
617
return (void *)task + 1;
include/linux/fsl/bestcomm/bestcomm_priv.h
238
extern int bcom_load_image(int task, u32 *task_image);
include/linux/fsl/bestcomm/bestcomm_priv.h
239
extern void bcom_set_initiator(int task, int initiator);
include/linux/fsl/bestcomm/bestcomm_priv.h
261
bcom_enable_task(int task)
include/linux/fsl/bestcomm/bestcomm_priv.h
264
reg = in_be16(&bcom_eng->regs->tcr[task]);
include/linux/fsl/bestcomm/bestcomm_priv.h
265
out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE);
include/linux/fsl/bestcomm/bestcomm_priv.h
269
bcom_disable_task(int task)
include/linux/fsl/bestcomm/bestcomm_priv.h
271
u16 reg = in_be16(&bcom_eng->regs->tcr[task]);
include/linux/fsl/bestcomm/bestcomm_priv.h
272
out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE);
include/linux/fsl/bestcomm/bestcomm_priv.h
277
bcom_task_desc(int task)
include/linux/fsl/bestcomm/bestcomm_priv.h
279
return bcom_sram_pa2va(bcom_eng->tdt[task].start);
include/linux/fsl/bestcomm/bestcomm_priv.h
283
bcom_task_num_descs(int task)
include/linux/fsl/bestcomm/bestcomm_priv.h
285
return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1;
include/linux/fsl/bestcomm/bestcomm_priv.h
289
bcom_task_var(int task)
include/linux/fsl/bestcomm/bestcomm_priv.h
291
return bcom_sram_pa2va(bcom_eng->tdt[task].var);
include/linux/fsl/bestcomm/bestcomm_priv.h
295
bcom_task_inc(int task)
include/linux/fsl/bestcomm/bestcomm_priv.h
297
return &bcom_task_var(task)[BCOM_MAX_VAR];
include/linux/fsl/bestcomm/bestcomm_priv.h
328
bcom_set_task_pragma(int task, int pragma)
include/linux/fsl/bestcomm/bestcomm_priv.h
330
u32 *fdt = &bcom_eng->tdt[task].fdt;
include/linux/fsl/bestcomm/bestcomm_priv.h
335
bcom_set_task_auto_start(int task, int next_task)
include/linux/fsl/bestcomm/bestcomm_priv.h
337
u16 __iomem *tcr = &bcom_eng->regs->tcr[task];
include/linux/fsl/bestcomm/bestcomm_priv.h
342
bcom_set_tcr_initiator(int task, int initiator)
include/linux/fsl/bestcomm/bestcomm_priv.h
344
u16 __iomem *tcr = &bcom_eng->regs->tcr[task];
include/linux/ftrace.h
1295
ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
include/linux/ftrace.h
1296
unsigned long ftrace_graph_top_ret_addr(struct task_struct *task);
include/linux/ftrace.h
1298
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
include/linux/ftrace.h
1364
ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
include/linux/hp_sdc.h
297
struct tasklet_struct task;
include/linux/hrtimer.h
94
struct task_struct *task;
include/linux/io_uring/cmd.h
161
return cmd_to_io_kiocb(cmd)->tctx->task;
include/linux/io_uring_types.h
115
struct task_struct *task;
include/linux/iocontext.h
120
void exit_io_context(struct task_struct *task);
include/linux/iocontext.h
131
static inline void exit_io_context(struct task_struct *task) { }
include/linux/ioprio.h
30
static inline int task_nice_ioprio(struct task_struct *task)
include/linux/ioprio.h
32
return (task_nice(task) + 20) / 5;
include/linux/ioprio.h
39
static inline int task_nice_ioclass(struct task_struct *task)
include/linux/ioprio.h
41
if (task->policy == SCHED_IDLE)
include/linux/ioprio.h
43
else if (rt_or_dl_task_policy(task))
include/linux/ioprio.h
87
extern int set_task_ioprio(struct task_struct *task, int ioprio);
include/linux/kasan.h
477
void kasan_unpoison_task_stack(struct task_struct *task);
include/linux/kasan.h
480
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
include/linux/kmsan.h
29
void kmsan_task_create(struct task_struct *task);
include/linux/kmsan.h
311
static inline void kmsan_task_create(struct task_struct *task)
include/linux/kmsan.h
315
static inline void kmsan_task_exit(struct task_struct *task)
include/linux/kmsan.h
35
void kmsan_task_exit(struct task_struct *task);
include/linux/kthread.h
149
struct task_struct *task;
include/linux/kthread.h
231
wake_up_process(__kw->task); \
include/linux/kthread.h
258
wake_up_process(kw->task);
include/linux/latencytop.h
31
void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
include/linux/latencytop.h
33
account_scheduler_latency(struct task_struct *task, int usecs, int inter)
include/linux/latencytop.h
36
__account_scheduler_latency(task, usecs, inter);
include/linux/latencytop.h
44
account_scheduler_latency(struct task_struct *task, int usecs, int inter)
include/linux/livepatch.h
182
void klp_update_patch_state(struct task_struct *task);
include/linux/livepatch.h
184
static inline bool klp_patch_pending(struct task_struct *task)
include/linux/livepatch.h
186
return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
include/linux/livepatch.h
222
static inline bool klp_patch_pending(struct task_struct *task) { return false; }
include/linux/livepatch.h
223
static inline void klp_update_patch_state(struct task_struct *task) {}
include/linux/lockdep.h
320
static inline void lockdep_init_task(struct task_struct *task)
include/linux/lockdep.h
332
static inline void lockdep_set_selftest_task(struct task_struct *task)
include/linux/lockdep.h
434
static inline void lockdep_free_task(struct task_struct *task) {}
include/linux/lockdep.h
93
extern void lockdep_set_selftest_task(struct task_struct *task);
include/linux/lockdep.h
95
extern void lockdep_init_task(struct task_struct *task);
include/linux/lsm_hook_defs.h
213
LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
include/linux/lsm_hook_defs.h
215
LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
include/linux/mempolicy.h
297
static inline void mpol_put_task_policy(struct task_struct *task)
include/linux/mm.h
2048
#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
include/linux/mm.h
2142
static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
include/linux/mm.h
3008
const struct task_struct *task, bool bypass_rlim);
include/linux/mm.h
3018
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
include/linux/mm.h
3848
extern struct file *get_task_exe_file(struct task_struct *task);
include/linux/mmc/host.h
352
struct task_struct *task;
include/linux/nfs_xdr.h
1675
struct rpc_task task;
include/linux/nfs_xdr.h
1711
struct rpc_task task;
include/linux/nfs_xdr.h
1728
int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
include/linux/nfs_xdr.h
1752
struct rpc_task task;
include/linux/nfs_xdr.h
1810
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
include/linux/nfs_xdr.h
1811
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
include/linux/nfs_xdr.h
320
struct rpc_task task;
include/linux/nospec.h
68
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
include/linux/nospec.h
69
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
include/linux/nospec.h
72
void arch_seccomp_spec_mitigate(struct task_struct *task);
include/linux/nsfs.h
15
int ns_get_path(struct path *path, struct task_struct *task,
include/linux/nsfs.h
23
int ns_get_name(char *buf, size_t size, struct task_struct *task,
include/linux/perf_event.h
1040
struct task_struct *task;
include/linux/perf_event.h
1199
perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
include/linux/perf_event.h
1201
return container_of(task_css_check(task, perf_event_cgrp_id,
include/linux/perf_event.h
1226
struct task_struct *task);
include/linux/perf_event.h
1231
extern void perf_event_free_task(struct task_struct *task);
include/linux/perf_event.h
1232
extern void perf_event_delayed_put(struct task_struct *task);
include/linux/perf_event.h
1253
struct task_struct *task,
include/linux/perf_event.h
1629
static inline void perf_event_task_migrate(struct task_struct *task)
include/linux/perf_event.h
1632
task->sched_migrated = 1;
include/linux/perf_event.h
1636
struct task_struct *task)
include/linux/perf_event.h
1639
__perf_event_task_sched_in(prev, task);
include/linux/perf_event.h
1642
task->sched_migrated) {
include/linux/perf_event.h
1644
task->sched_migrated = 0;
include/linux/perf_event.h
1817
struct task_struct *task);
include/linux/perf_event.h
1954
perf_event_task_migrate(struct task_struct *task) { }
include/linux/perf_event.h
1957
struct task_struct *task) { }
include/linux/perf_event.h
1964
static inline void perf_event_free_task(struct task_struct *task) { }
include/linux/perf_event.h
1965
static inline void perf_event_delayed_put(struct task_struct *task) { }
include/linux/perf_event.h
515
struct task_struct *task, bool sched_in);
include/linux/perf_regs.h
21
u64 perf_reg_abi(struct task_struct *task);
include/linux/perf_regs.h
38
static inline u64 perf_reg_abi(struct task_struct *task)
include/linux/pid.h
102
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
include/linux/pid.h
107
extern void attach_pid(struct task_struct *task, enum pid_type);
include/linux/pid.h
108
void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type);
include/linux/pid.h
109
void change_pid(struct pid **pids, struct task_struct *task, enum pid_type,
include/linux/pid.h
111
extern void exchange_tids(struct task_struct *task, struct task_struct *old);
include/linux/pid.h
190
#define do_each_pid_task(pid, type, task) \
include/linux/pid.h
193
hlist_for_each_entry_rcu((task), \
include/linux/pid.h
200
#define while_each_pid_task(pid, type, task) \
include/linux/pid.h
206
#define do_each_pid_thread(pid, type, task) \
include/linux/pid.h
207
do_each_pid_task(pid, type, task) { \
include/linux/pid.h
208
struct task_struct *tg___ = task; \
include/linux/pid.h
209
for_each_thread(tg___, task) {
include/linux/pid.h
211
#define while_each_pid_thread(pid, type, task) \
include/linux/pid.h
213
task = tg___; \
include/linux/pid.h
214
} while_each_pid_task(pid, type, task)
include/linux/pid.h
216
static inline struct pid *task_pid(struct task_struct *task)
include/linux/pid.h
218
return task->thread_pid;
include/linux/pid.h
232
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
include/linux/pid.h
85
void do_notify_pidfd(struct task_struct *task);
include/linux/posix-timers.h
226
void posix_cpu_timers_exit(struct task_struct *task);
include/linux/posix-timers.h
227
void posix_cpu_timers_exit_group(struct task_struct *task);
include/linux/posix-timers.h
228
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
include/linux/posix-timers.h
231
int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
include/linux/proc_fs.h
163
struct pid *pid, struct task_struct *task);
include/linux/proc_fs.h
167
void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task);
include/linux/proc_ns.h
20
struct ns_common *(*get)(struct task_struct *task);
include/linux/ptrace.h
117
static inline struct task_struct *ptrace_parent(struct task_struct *task)
include/linux/ptrace.h
119
if (unlikely(task->ptrace))
include/linux/ptrace.h
120
return rcu_dereference(task->parent);
include/linux/ptrace.h
133
static inline bool ptrace_event_enabled(struct task_struct *task, int event)
include/linux/ptrace.h
135
return task->ptrace & PT_EVENT_FLAG(event);
include/linux/ptrace.h
227
static inline void ptrace_release_task(struct task_struct *task)
include/linux/ptrace.h
229
BUG_ON(!list_empty(&task->ptraced));
include/linux/ptrace.h
230
ptrace_unlink(task);
include/linux/ptrace.h
231
BUG_ON(!list_empty(&task->ptrace_entry));
include/linux/ptrace.h
287
static inline void user_enable_single_step(struct task_struct *task)
include/linux/ptrace.h
301
static inline void user_disable_single_step(struct task_struct *task)
include/linux/ptrace.h
330
static inline void user_enable_block_step(struct task_struct *task)
include/linux/ptrace.h
88
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
include/linux/rcuwait.h
10
{ .task = NULL, }
include/linux/rcuwait.h
14
w->task = NULL;
include/linux/rcuwait.h
23
return !!rcu_access_pointer(w->task);
include/linux/rcuwait.h
36
rcu_assign_pointer(w->task, current);
include/linux/resume_user_mode.h
21
static inline void set_notify_resume(struct task_struct *task)
include/linux/resume_user_mode.h
23
if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
include/linux/resume_user_mode.h
24
kick_process(task);
include/linux/sched.h
152
#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
include/linux/sched.h
154
#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
include/linux/sched.h
155
#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
include/linux/sched.h
156
#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
include/linux/sched.h
1821
static __always_inline bool is_user_task(struct task_struct *task)
include/linux/sched.h
1823
return task->mm && !(task->flags & (PF_KTHREAD | PF_USER_WORKER));
include/linux/sched.h
1951
struct task_struct task;
include/linux/sched.h
1965
# define task_thread_info(task) (&(task)->thread_info)
include/linux/sched.h
1967
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
include/linux/sched/cputime.h
182
task_sched_runtime(struct task_struct *task);
include/linux/sched/debug.h
33
extern void show_stack(struct task_struct *task, unsigned long *sp,
include/linux/sched/jobctl.h
43
extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
include/linux/sched/jobctl.h
44
extern void task_clear_jobctl_trapping(struct task_struct *task);
include/linux/sched/jobctl.h
45
extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask);
include/linux/sched/mm.h
151
extern struct mm_struct *get_task_mm(struct task_struct *task);
include/linux/sched/mm.h
157
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
include/linux/sched/rt.h
68
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
include/linux/sched/signal.h
289
struct task_struct *task = current;
include/linux/sched/signal.h
294
spin_lock_irq(&task->sighand->siglock);
include/linux/sched/signal.h
295
ret = dequeue_signal(&task->blocked, &__info, &__type);
include/linux/sched/signal.h
296
spin_unlock_irq(&task->sighand->siglock);
include/linux/sched/signal.h
359
static inline bool __set_notify_signal(struct task_struct *task)
include/linux/sched/signal.h
361
return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
include/linux/sched/signal.h
362
!wake_up_state(task, TASK_INTERRUPTIBLE);
include/linux/sched/signal.h
369
static inline void set_notify_signal(struct task_struct *task)
include/linux/sched/signal.h
371
if (__set_notify_signal(task))
include/linux/sched/signal.h
372
kick_process(task);
include/linux/sched/signal.h
462
void task_join_group_stop(struct task_struct *task);
include/linux/sched/signal.h
486
static inline void clear_tsk_restore_sigmask(struct task_struct *task)
include/linux/sched/signal.h
488
clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
include/linux/sched/signal.h
495
static inline bool test_tsk_restore_sigmask(struct task_struct *task)
include/linux/sched/signal.h
497
return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
include/linux/sched/signal.h
515
static inline void clear_tsk_restore_sigmask(struct task_struct *task)
include/linux/sched/signal.h
517
task->restore_sigmask = false;
include/linux/sched/signal.h
527
static inline bool test_tsk_restore_sigmask(struct task_struct *task)
include/linux/sched/signal.h
529
return task->restore_sigmask;
include/linux/sched/signal.h
669
struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
include/linux/sched/signal.h
673
pid = task_pid(task);
include/linux/sched/signal.h
675
pid = task->signal->pids[type];
include/linux/sched/signal.h
679
static inline struct pid *task_tgid(struct task_struct *task)
include/linux/sched/signal.h
681
return task->signal->pids[PIDTYPE_TGID];
include/linux/sched/signal.h
689
static inline struct pid *task_pgrp(struct task_struct *task)
include/linux/sched/signal.h
691
return task->signal->pids[PIDTYPE_PGID];
include/linux/sched/signal.h
694
static inline struct pid *task_session(struct task_struct *task)
include/linux/sched/signal.h
696
return task->signal->pids[PIDTYPE_SID];
include/linux/sched/signal.h
699
static inline int get_nr_threads(struct task_struct *task)
include/linux/sched/signal.h
701
return task->signal->nr_threads;
include/linux/sched/signal.h
740
extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
include/linux/sched/signal.h
742
__acquires(&task->sighand->siglock);
include/linux/sched/signal.h
744
static inline void unlock_task_sighand(struct task_struct *task,
include/linux/sched/signal.h
746
__releases(&task->sighand->siglock)
include/linux/sched/signal.h
748
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
include/linux/sched/signal.h
752
extern void lockdep_assert_task_sighand_held(struct task_struct *task);
include/linux/sched/signal.h
754
static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
include/linux/sched/signal.h
757
static inline unsigned long task_rlimit(const struct task_struct *task,
include/linux/sched/signal.h
760
return READ_ONCE(task->signal->rlim[limit].rlim_cur);
include/linux/sched/signal.h
763
static inline unsigned long task_rlimit_max(const struct task_struct *task,
include/linux/sched/signal.h
766
return READ_ONCE(task->signal->rlim[limit].rlim_max);
include/linux/sched/signal.h
77
struct task_struct *task;
include/linux/sched/task.h
170
void put_task_struct_rcu_user(struct task_struct *task);
include/linux/sched/task_stack.h
21
static __always_inline void *task_stack_page(const struct task_struct *task)
include/linux/sched/task_stack.h
23
return task->stack;
include/linux/sched/task_stack.h
28
static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
include/linux/sched/task_stack.h
31
return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
include/linux/sched/task_stack.h
33
return task->stack;
include/linux/sched/task_stack.h
39
#define task_stack_page(task) ((void *)(task)->stack)
include/linux/sched/task_stack.h
44
task_thread_info(p)->task = p;
include/linux/sched/task_stack.h
86
#define task_stack_end_corrupted(task) \
include/linux/sched/task_stack.h
87
(*(end_of_stack(task)) != STACK_END_MAGIC)
include/linux/sched/wake_q.h
62
extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
include/linux/sched/wake_q.h
63
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
include/linux/seccomp.h
100
static inline long seccomp_get_metadata(struct task_struct *task,
include/linux/seccomp.h
114
struct pid *pid, struct task_struct *task);
include/linux/seccomp.h
90
extern long seccomp_get_filter(struct task_struct *task,
include/linux/seccomp.h
92
extern long seccomp_get_metadata(struct task_struct *task,
include/linux/seccomp.h
95
static inline long seccomp_get_filter(struct task_struct *task,
include/linux/security.h
1218
static inline int security_task_alloc(struct task_struct *task,
include/linux/security.h
1224
static inline void security_task_free(struct task_struct *task)
include/linux/security.h
492
int security_task_alloc(struct task_struct *task, u64 clone_flags);
include/linux/security.h
493
void security_task_free(struct task_struct *task);
include/linux/shm.h
19
void exit_shm(struct task_struct *task);
include/linux/shm.h
20
#define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
include/linux/shm.h
32
static inline void exit_shm(struct task_struct *task)
include/linux/shm.h
35
static inline void shm_init_task(struct task_struct *task)
include/linux/stacktrace.h
39
struct task_struct *task, struct pt_regs *regs);
include/linux/stacktrace.h
59
struct task_struct *task);
include/linux/stacktrace.h
72
unsigned int stack_trace_save_tsk(struct task_struct *task,
include/linux/stop_machine.h
40
extern void print_stop_info(const char *log_lvl, struct task_struct *task);
include/linux/stop_machine.h
86
static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { }
include/linux/string_helpers.h
114
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
include/linux/sunrpc/auth.h
132
int (*crmarshal)(struct rpc_task *task,
include/linux/sunrpc/auth.h
135
int (*crvalidate)(struct rpc_task *task,
include/linux/sunrpc/auth.h
137
int (*crwrap_req)(struct rpc_task *task,
include/linux/sunrpc/auth.h
139
int (*crunwrap_resp)(struct rpc_task *task,
include/linux/sunrpc/auth.h
168
int rpcauth_marshcred(struct rpc_task *task,
include/linux/sunrpc/auth.h
170
int rpcauth_checkverf(struct rpc_task *task,
include/linux/sunrpc/auth.h
172
int rpcauth_wrap_req_encode(struct rpc_task *task,
include/linux/sunrpc/auth.h
174
int rpcauth_wrap_req(struct rpc_task *task,
include/linux/sunrpc/auth.h
176
int rpcauth_unwrap_resp_decode(struct rpc_task *task,
include/linux/sunrpc/auth.h
178
int rpcauth_unwrap_resp(struct rpc_task *task,
include/linux/sunrpc/auth.h
180
bool rpcauth_xmit_need_reencode(struct rpc_task *task);
include/linux/sunrpc/bc_xprt.h
23
void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
include/linux/sunrpc/clnt.h
258
const char *rpc_proc_name(const struct rpc_task *task);
include/linux/sunrpc/clnt.h
268
static inline int rpc_reply_expected(struct rpc_task *task)
include/linux/sunrpc/clnt.h
270
return (task->tk_msg.rpc_proc != NULL) &&
include/linux/sunrpc/clnt.h
271
(task->tk_msg.rpc_proc->p_decode != NULL);
include/linux/sunrpc/clnt.h
274
static inline void rpc_task_close_connection(struct rpc_task *task)
include/linux/sunrpc/clnt.h
276
if (task->tk_xprt)
include/linux/sunrpc/clnt.h
277
xprt_force_disconnect(task->tk_xprt);
include/linux/sunrpc/metrics.h
96
static inline void rpc_count_iostats(const struct rpc_task *task,
include/linux/sunrpc/metrics.h
98
static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
include/linux/sunrpc/sched.h
117
struct rpc_task *task;
include/linux/sunrpc/sched.h
174
#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
include/linux/sunrpc/sched.h
225
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
include/linux/sunrpc/sched.h
226
void rpc_task_try_cancel(struct rpc_task *task, int error);
include/linux/sunrpc/sched.h
240
unsigned long rpc_task_timeout(const struct rpc_task *task);
include/linux/sunrpc/sched.h
242
struct rpc_task *task,
include/linux/sunrpc/sched.h
248
struct rpc_task *task,
include/linux/sunrpc/sched.h
274
int rpc_wait_for_completion_task(struct rpc_task *task);
include/linux/sunrpc/sched.h
283
void rpc_prepare_task(struct rpc_task *task);
include/linux/sunrpc/xprt.h
154
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
155
void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
156
void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
159
void (*rpcbind)(struct rpc_task *task);
include/linux/sunrpc/xprt.h
161
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
165
int (*buf_alloc)(struct rpc_task *task);
include/linux/sunrpc/xprt.h
166
void (*buf_free)(struct rpc_task *task);
include/linux/sunrpc/xprt.h
171
void (*wait_for_reply_request)(struct rpc_task *task);
include/linux/sunrpc/xprt.h
172
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
173
void (*release_request)(struct rpc_task *task);
include/linux/sunrpc/xprt.h
377
void xprt_connect(struct rpc_task *task);
include/linux/sunrpc/xprt.h
381
void xprt_reserve(struct rpc_task *task);
include/linux/sunrpc/xprt.h
382
void xprt_retry_reserve(struct rpc_task *task);
include/linux/sunrpc/xprt.h
383
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
384
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
385
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
388
bool xprt_prepare_transmit(struct rpc_task *task);
include/linux/sunrpc/xprt.h
389
void xprt_request_enqueue_transmit(struct rpc_task *task);
include/linux/sunrpc/xprt.h
390
int xprt_request_enqueue_receive(struct rpc_task *task);
include/linux/sunrpc/xprt.h
391
void xprt_request_wait_receive(struct rpc_task *task);
include/linux/sunrpc/xprt.h
392
void xprt_request_dequeue_xprt(struct rpc_task *task);
include/linux/sunrpc/xprt.h
393
bool xprt_request_need_retransmit(struct rpc_task *task);
include/linux/sunrpc/xprt.h
394
void xprt_transmit(struct rpc_task *task);
include/linux/sunrpc/xprt.h
395
void xprt_end_transmit(struct rpc_task *task);
include/linux/sunrpc/xprt.h
397
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
398
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
399
void xprt_release(struct rpc_task *task);
include/linux/sunrpc/xprt.h
406
void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
include/linux/sunrpc/xprt.h
428
void xprt_wait_for_reply_request_def(struct rpc_task *task);
include/linux/sunrpc/xprt.h
429
void xprt_wait_for_reply_request_rtt(struct rpc_task *task);
include/linux/sunrpc/xprt.h
433
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
include/linux/sunrpc/xprt.h
435
void xprt_update_rtt(struct rpc_task *task);
include/linux/sunrpc/xprt.h
436
void xprt_complete_rqst(struct rpc_task *task, int copied);
include/linux/sunrpc/xprt.h
439
void xprt_release_rqst_cong(struct rpc_task *task);
include/linux/swait.h
49
struct task_struct *task;
include/linux/swait.h
54
.task = current, \
include/linux/syscall_user_dispatch.h
19
int syscall_user_dispatch_get_config(struct task_struct *task, unsigned long size,
include/linux/syscall_user_dispatch.h
22
int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long size,
include/linux/syscall_user_dispatch.h
37
static inline int syscall_user_dispatch_get_config(struct task_struct *task,
include/linux/syscall_user_dispatch.h
43
static inline int syscall_user_dispatch_set_config(struct task_struct *task,
include/linux/task_work.h
24
static inline bool task_work_pending(struct task_struct *task)
include/linux/task_work.h
26
return READ_ONCE(task->task_works);
include/linux/task_work.h
29
int task_work_add(struct task_struct *task, struct callback_head *twork,
include/linux/task_work.h
32
struct callback_head *task_work_cancel_match(struct task_struct *task,
include/linux/task_work.h
35
bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
include/linux/task_work.h
38
static inline void exit_task_work(struct task_struct *task)
include/linux/time_namespace.h
120
static inline int vdso_join_timens(struct task_struct *task,
include/linux/time_namespace.h
41
extern int vdso_join_timens(struct task_struct *task,
include/linux/trace_events.h
932
struct task_struct *task);
include/linux/trace_events.h
937
struct task_struct *task)
include/linux/trace_events.h
939
perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
include/linux/types.h
270
struct task_struct __rcu *task;
include/linux/unwind_deferred.h
23
void unwind_task_init(struct task_struct *task);
include/linux/unwind_deferred.h
24
void unwind_task_free(struct task_struct *task);
include/linux/unwind_deferred.h
32
void unwind_deferred_task_exit(struct task_struct *task);
include/linux/unwind_deferred.h
58
static inline void unwind_task_init(struct task_struct *task) {}
include/linux/unwind_deferred.h
59
static inline void unwind_task_free(struct task_struct *task) {}
include/linux/unwind_deferred.h
74
static inline void unwind_deferred_task_exit(struct task_struct *task) {}
include/linux/wait.h
559
if (!__t.task) { \
include/linux/workqueue.h
637
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
include/linux/workqueue.h
641
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
include/linux/ww_mutex.h
147
ctx->task = current;
include/linux/ww_mutex.h
57
struct task_struct *task;
include/net/cls_cgroup.h
48
static inline u32 __task_get_classid(struct task_struct *task)
include/net/cls_cgroup.h
50
return task_cls_state(task)->classid;
include/net/ip_vs.h
454
struct task_struct *task; /* task if running */
include/rdma/restrack.h
107
struct task_struct *task;
include/rv/ltl_monitor.h
107
ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
include/rv/ltl_monitor.h
127
CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
include/rv/ltl_monitor.h
130
static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
include/rv/ltl_monitor.h
142
ltl_trace_event(task, mon, next_states);
include/rv/ltl_monitor.h
147
ltl_illegal_state(task, mon);
include/rv/ltl_monitor.h
150
static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
include/rv/ltl_monitor.h
152
struct ltl_monitor *mon = ltl_get_monitor(task);
include/rv/ltl_monitor.h
155
ltl_atoms_fetch(task, mon);
include/rv/ltl_monitor.h
158
ltl_attempt_start(task, mon);
include/rv/ltl_monitor.h
162
ltl_validate(task, mon);
include/rv/ltl_monitor.h
165
static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
include/rv/ltl_monitor.h
167
struct ltl_monitor *mon = ltl_get_monitor(task);
include/rv/ltl_monitor.h
169
ltl_atom_update(task, atom, value);
include/rv/ltl_monitor.h
172
ltl_validate(task, mon);
include/rv/ltl_monitor.h
24
static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
include/rv/ltl_monitor.h
25
static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
include/rv/ltl_monitor.h
27
static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
include/rv/ltl_monitor.h
29
return &task->rv[ltl_monitor_slot].ltl_mon;
include/rv/ltl_monitor.h
32
static void ltl_task_init(struct task_struct *task, bool task_creation)
include/rv/ltl_monitor.h
34
struct ltl_monitor *mon = ltl_get_monitor(task);
include/rv/ltl_monitor.h
41
ltl_atoms_init(task, mon, task_creation);
include/rv/ltl_monitor.h
42
ltl_atoms_fetch(task, mon);
include/rv/ltl_monitor.h
45
static void handle_task_newtask(void *data, struct task_struct *task, u64 flags)
include/rv/ltl_monitor.h
47
ltl_task_init(task, true);
include/rv/ltl_monitor.h
84
static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
include/rv/ltl_monitor.h
86
CONCATENATE(trace_error_, MONITOR_NAME)(task);
include/rv/ltl_monitor.h
88
task->comm, task->pid);
include/rv/ltl_monitor.h
91
static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
include/rv/ltl_monitor.h
94
ltl_start(task, mon);
include/scsi/libiscsi.h
138
static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
include/scsi/libiscsi.h
140
return task->unsol_r2t.data_length > task->unsol_r2t.sent;
include/scsi/libiscsi.h
143
static inline void* iscsi_next_hdr(struct iscsi_task *task)
include/scsi/libiscsi.h
145
return (void*)task->hdr + task->hdr_len;
include/scsi/libiscsi.h
148
static inline bool iscsi_task_is_completed(struct iscsi_task *task)
include/scsi/libiscsi.h
150
return task->state == ISCSI_TASK_COMPLETED ||
include/scsi/libiscsi.h
151
task->state == ISCSI_TASK_ABRT_TMF ||
include/scsi/libiscsi.h
152
task->state == ISCSI_TASK_ABRT_SESS_RECOV;
include/scsi/libiscsi.h
157
struct iscsi_task *task;
include/scsi/libiscsi.h
205
struct iscsi_task *task; /* xmit task in progress */
include/scsi/libiscsi.h
470
extern void iscsi_prep_data_out_pdu(struct iscsi_task *task,
include/scsi/libiscsi.h
482
extern void iscsi_requeue_task(struct iscsi_task *task);
include/scsi/libiscsi.h
483
extern void iscsi_put_task(struct iscsi_task *task);
include/scsi/libiscsi.h
484
extern void __iscsi_put_task(struct iscsi_task *task);
include/scsi/libiscsi.h
485
extern bool iscsi_get_task(struct iscsi_task *task);
include/scsi/libiscsi.h
486
extern void iscsi_complete_scsi_task(struct iscsi_task *task,
include/scsi/libiscsi_tcp.h
88
extern void iscsi_tcp_cleanup_task(struct iscsi_task *task);
include/scsi/libiscsi_tcp.h
89
extern int iscsi_tcp_task_init(struct iscsi_task *task);
include/scsi/libiscsi_tcp.h
90
extern int iscsi_tcp_task_xmit(struct iscsi_task *task);
include/scsi/libsas.h
614
struct sas_task *task;
include/scsi/libsas.h
622
static inline bool sas_is_internal_abort(struct sas_task *task)
include/scsi/libsas.h
624
return task->task_proto == SAS_PROTOCOL_INTERNAL_ABORT;
include/scsi/libsas.h
627
static inline struct request *sas_task_find_rq(struct sas_task *task)
include/scsi/libsas.h
631
if (task->task_proto & SAS_PROTOCOL_STP_ALL) {
include/scsi/libsas.h
632
struct ata_queued_cmd *qc = task->uldd_task;
include/scsi/libsas.h
636
scmd = task->uldd_task;
include/scsi/libsas.h
668
void (*lldd_tmf_aborted)(struct sas_task *task);
include/scsi/libsas.h
669
bool (*lldd_abort_timeout)(struct sas_task *task, void *data);
include/scsi/libsas.h
719
extern void sas_ssp_task_response(struct device *dev, struct sas_task *task,
include/scsi/libsas.h
728
int sas_query_task(struct sas_task *task, u16 tag);
include/scsi/libsas.h
729
int sas_abort_task(struct sas_task *task, u16 tag);
include/scsi/scsi_transport_iscsi.h
110
int (*init_task) (struct iscsi_task *task);
include/scsi/scsi_transport_iscsi.h
111
int (*xmit_task) (struct iscsi_task *task);
include/scsi/scsi_transport_iscsi.h
112
void (*cleanup_task) (struct iscsi_task *task);
include/scsi/scsi_transport_iscsi.h
114
int (*alloc_pdu) (struct iscsi_task *task, uint8_t opcode);
include/scsi/scsi_transport_iscsi.h
115
int (*xmit_pdu) (struct iscsi_task *task);
include/scsi/scsi_transport_iscsi.h
116
int (*init_pdu) (struct iscsi_task *task, unsigned int offset,
include/scsi/scsi_transport_iscsi.h
158
u8 (*check_protection)(struct iscsi_task *task, sector_t *sector);
include/sound/compress_driver.h
175
int (*task_create) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
include/sound/compress_driver.h
176
int (*task_start) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
include/sound/compress_driver.h
177
int (*task_stop) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
include/sound/compress_driver.h
178
int (*task_free) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
include/sound/compress_driver.h
292
struct snd_compr_task_runtime *task);
include/trace/events/cgroup.h
123
struct task_struct *task, bool threadgroup),
include/trace/events/cgroup.h
125
TP_ARGS(dst_cgrp, path, task, threadgroup),
include/trace/events/cgroup.h
133
__string( comm, task->comm )
include/trace/events/cgroup.h
141
__entry->pid = task->pid;
include/trace/events/cgroup.h
153
struct task_struct *task, bool threadgroup),
include/trace/events/cgroup.h
155
TP_ARGS(dst_cgrp, path, task, threadgroup)
include/trace/events/cgroup.h
161
struct task_struct *task, bool threadgroup),
include/trace/events/cgroup.h
163
TP_ARGS(dst_cgrp, path, task, threadgroup)
include/trace/events/oom.h
101
__entry->pgtables = mm_pgtables_bytes(task->mm) >> 10;
include/trace/events/oom.h
102
__entry->oom_score_adj = task->signal->oom_score_adj;
include/trace/events/oom.h
14
TP_PROTO(struct task_struct *task),
include/trace/events/oom.h
16
TP_ARGS(task),
include/trace/events/oom.h
25
__entry->pid = task->pid;
include/trace/events/oom.h
26
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
include/trace/events/oom.h
27
__entry->oom_score_adj = task->signal->oom_score_adj;
include/trace/events/oom.h
77
TP_PROTO(struct task_struct *task, uid_t uid),
include/trace/events/oom.h
79
TP_ARGS(task, uid),
include/trace/events/oom.h
83
__string(comm, task->comm)
include/trace/events/oom.h
94
__entry->pid = task->pid;
include/trace/events/oom.h
96
__entry->total_vm = PG_COUNT_TO_KB(task->mm->total_vm);
include/trace/events/oom.h
97
__entry->anon_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_ANONPAGES));
include/trace/events/oom.h
98
__entry->file_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_FILEPAGES));
include/trace/events/oom.h
99
__entry->shmem_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_SHMEMPAGES));
include/trace/events/rpcgss.h
100
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcgss.h
113
const struct rpc_task *task, \
include/trace/events/rpcgss.h
116
TP_ARGS(task, maj_stat))
include/trace/events/rpcgss.h
344
const struct rpc_task *task
include/trace/events/rpcgss.h
347
TP_ARGS(task),
include/trace/events/rpcgss.h
355
__entry->task_id = task->tk_pid;
include/trace/events/rpcgss.h
356
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcgss.h
365
const struct rpc_task *task,
include/trace/events/rpcgss.h
370
TP_ARGS(task, expected, received),
include/trace/events/rpcgss.h
380
__entry->task_id = task->tk_pid;
include/trace/events/rpcgss.h
381
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcgss.h
394
const struct rpc_task *task
include/trace/events/rpcgss.h
397
TP_ARGS(task),
include/trace/events/rpcgss.h
407
const struct rpc_rqst *rqst = task->tk_rqstp;
include/trace/events/rpcgss.h
409
__entry->task_id = task->tk_pid;
include/trace/events/rpcgss.h
410
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcgss.h
422
const struct rpc_task *task,
include/trace/events/rpcgss.h
427
TP_ARGS(task, seq_xmit, ret),
include/trace/events/rpcgss.h
439
__entry->task_id = task->tk_pid;
include/trace/events/rpcgss.h
440
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcgss.h
441
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
include/trace/events/rpcgss.h
443
__entry->seqno = *task->tk_rqstp->rq_seqnos;
include/trace/events/rpcgss.h
456
const struct rpc_task *task,
include/trace/events/rpcgss.h
460
TP_ARGS(task, auth),
include/trace/events/rpcgss.h
473
__entry->task_id = task->tk_pid;
include/trace/events/rpcgss.h
474
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcgss.h
475
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
include/trace/events/rpcgss.h
85
const struct rpc_task *task,
include/trace/events/rpcgss.h
89
TP_ARGS(task, maj_stat),
include/trace/events/rpcgss.h
99
__entry->task_id = task->tk_pid;
include/trace/events/rpcrdma.h
1082
const struct rpc_task *task = req->rl_slot.rq_task;
include/trace/events/rpcrdma.h
1084
__entry->task_id = task->tk_pid;
include/trace/events/rpcrdma.h
1085
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcrdma.h
1256
const struct rpc_task *task,
include/trace/events/rpcrdma.h
1261
TP_ARGS(task, rep, credits),
include/trace/events/rpcrdma.h
1271
__entry->task_id = task->tk_pid;
include/trace/events/rpcrdma.h
1272
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcrdma.h
1431
const struct rpc_task *task
include/trace/events/rpcrdma.h
1434
TP_ARGS(task),
include/trace/events/rpcrdma.h
1442
__entry->task_id = task->tk_pid;
include/trace/events/rpcrdma.h
1443
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcrdma.h
392
const struct rpc_task *task,
include/trace/events/rpcrdma.h
398
TP_ARGS(task, pos, mr, nsegs),
include/trace/events/rpcrdma.h
412
__entry->task_id = task->tk_pid;
include/trace/events/rpcrdma.h
413
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcrdma.h
434
const struct rpc_task *task, \
include/trace/events/rpcrdma.h
439
TP_ARGS(task, pos, mr, nsegs))
include/trace/events/rpcrdma.h
443
const struct rpc_task *task,
include/trace/events/rpcrdma.h
448
TP_ARGS(task, mr, nsegs),
include/trace/events/rpcrdma.h
461
__entry->task_id = task->tk_pid;
include/trace/events/rpcrdma.h
462
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/rpcrdma.h
482
const struct rpc_task *task, \
include/trace/events/rpcrdma.h
486
TP_ARGS(task, mr, nsegs))
include/trace/events/rpcrdma.h
522
const struct rpc_task *task = req->rl_slot.rq_task;
include/trace/events/rpcrdma.h
524
__entry->task_id = task->tk_pid;
include/trace/events/rpcrdma.h
525
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sched.h
460
TP_PROTO(struct task_struct *task, struct linux_binprm *bprm),
include/trace/events/sched.h
462
TP_ARGS(task, bprm),
include/trace/events/sched.h
468
__string( comm, task->comm )
include/trace/events/sched.h
474
__entry->pid = task->pid;
include/trace/events/signal.h
52
TP_PROTO(int sig, struct kernel_siginfo *info, struct task_struct *task,
include/trace/events/signal.h
55
TP_ARGS(sig, info, task, group, result),
include/trace/events/signal.h
70
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
include/trace/events/signal.h
71
__entry->pid = task->pid;
include/trace/events/sunrpc.h
100
TP_ARGS(task, xdr))
include/trace/events/sunrpc.h
1131
struct rpc_task *task = rqst->rq_task;
include/trace/events/sunrpc.h
1133
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
1134
__entry->client_id = task->tk_client ?
include/trace/events/sunrpc.h
1135
task->tk_client->cl_clid : -1;
include/trace/events/sunrpc.h
1138
__entry->timeout = task->tk_timeout;
include/trace/events/sunrpc.h
1140
__entry->version = task->tk_client->cl_vers;
include/trace/events/sunrpc.h
1175
const struct rpc_xprt *xprt, const struct rpc_task *task
include/trace/events/sunrpc.h
1178
TP_ARGS(xprt, task),
include/trace/events/sunrpc.h
1187
if (task) {
include/trace/events/sunrpc.h
1188
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
1189
__entry->client_id = task->tk_client ?
include/trace/events/sunrpc.h
1190
task->tk_client->cl_clid : -1;
include/trace/events/sunrpc.h
1212
const struct rpc_task *task \
include/trace/events/sunrpc.h
1214
TP_ARGS(xprt, task))
include/trace/events/sunrpc.h
1221
const struct rpc_xprt *xprt, const struct rpc_task *task
include/trace/events/sunrpc.h
1224
TP_ARGS(xprt, task),
include/trace/events/sunrpc.h
1236
if (task) {
include/trace/events/sunrpc.h
1237
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
1238
__entry->client_id = task->tk_client ?
include/trace/events/sunrpc.h
1239
task->tk_client->cl_clid : -1;
include/trace/events/sunrpc.h
1267
const struct rpc_task *task \
include/trace/events/sunrpc.h
1269
TP_ARGS(xprt, task))
include/trace/events/sunrpc.h
1376
const struct rpc_task *task,
include/trace/events/sunrpc.h
1380
TP_ARGS(clnt, task, bind_version),
include/trace/events/sunrpc.h
1389
__string(servername, task->tk_xprt->servername)
include/trace/events/sunrpc.h
1393
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
1397
__entry->protocol = task->tk_xprt->prot;
include/trace/events/sunrpc.h
1412
const struct rpc_task *task,
include/trace/events/sunrpc.h
1417
TP_ARGS(task, status, port),
include/trace/events/sunrpc.h
1427
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
1428
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
274
TP_PROTO(const struct rpc_task *task),
include/trace/events/sunrpc.h
276
TP_ARGS(task),
include/trace/events/sunrpc.h
285
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
286
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
287
__entry->status = task->tk_status;
include/trace/events/sunrpc.h
297
const struct rpc_task *task \
include/trace/events/sunrpc.h
299
TP_ARGS(task))
include/trace/events/sunrpc.h
308
TP_PROTO(const struct rpc_task *task),
include/trace/events/sunrpc.h
310
TP_ARGS(task),
include/trace/events/sunrpc.h
317
__string(progname, task->tk_client->cl_program->name)
include/trace/events/sunrpc.h
318
__string(procname, rpc_proc_name(task))
include/trace/events/sunrpc.h
322
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
323
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
324
__entry->version = task->tk_client->cl_vers;
include/trace/events/sunrpc.h
325
__entry->async = RPC_IS_ASYNC(task);
include/trace/events/sunrpc.h
366
TP_PROTO(const struct rpc_task *task, const void *action),
include/trace/events/sunrpc.h
368
TP_ARGS(task, action),
include/trace/events/sunrpc.h
380
__entry->client_id = task->tk_client ?
include/trace/events/sunrpc.h
381
task->tk_client->cl_clid : -1;
include/trace/events/sunrpc.h
382
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
384
__entry->runstate = task->tk_runstate;
include/trace/events/sunrpc.h
385
__entry->status = task->tk_status;
include/trace/events/sunrpc.h
386
__entry->flags = task->tk_flags;
include/trace/events/sunrpc.h
401
const struct rpc_task *task, \
include/trace/events/sunrpc.h
404
TP_ARGS(task, action))
include/trace/events/sunrpc.h
418
TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
include/trace/events/sunrpc.h
420
TP_ARGS(task, q),
include/trace/events/sunrpc.h
433
__entry->client_id = task->tk_client ?
include/trace/events/sunrpc.h
434
task->tk_client->cl_clid : -1;
include/trace/events/sunrpc.h
435
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
436
__entry->timeout = rpc_task_timeout(task);
include/trace/events/sunrpc.h
437
__entry->runstate = task->tk_runstate;
include/trace/events/sunrpc.h
438
__entry->status = task->tk_status;
include/trace/events/sunrpc.h
439
__entry->flags = task->tk_flags;
include/trace/events/sunrpc.h
456
const struct rpc_task *task, \
include/trace/events/sunrpc.h
459
TP_ARGS(task, q))
include/trace/events/sunrpc.h
466
TP_PROTO(const struct rpc_task *task),
include/trace/events/sunrpc.h
468
TP_ARGS(task),
include/trace/events/sunrpc.h
476
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
477
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
487
const struct rpc_task *task \
include/trace/events/sunrpc.h
489
TP_ARGS(task))
include/trace/events/sunrpc.h
497
const struct rpc_task *task
include/trace/events/sunrpc.h
500
TP_ARGS(task),
include/trace/events/sunrpc.h
506
__string(progname, task->tk_client->cl_program->name)
include/trace/events/sunrpc.h
508
__string(procname, rpc_proc_name(task))
include/trace/events/sunrpc.h
509
__string(servername, task->tk_xprt->servername)
include/trace/events/sunrpc.h
513
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
514
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
515
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
include/trace/events/sunrpc.h
517
__entry->version = task->tk_client->cl_vers;
include/trace/events/sunrpc.h
52
const struct rpc_task *task,
include/trace/events/sunrpc.h
532
const struct rpc_task *task \
include/trace/events/sunrpc.h
534
TP_ARGS(task))
include/trace/events/sunrpc.h
549
const struct rpc_task *task \
include/trace/events/sunrpc.h
551
TP_ARGS(task))
include/trace/events/sunrpc.h
56
TP_ARGS(task, xdr),
include/trace/events/sunrpc.h
561
const struct rpc_task *task,
include/trace/events/sunrpc.h
565
TP_ARGS(task, status),
include/trace/events/sunrpc.h
576
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
577
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
578
__entry->callsize = task->tk_rqstp->rq_callsize;
include/trace/events/sunrpc.h
579
__entry->recvsize = task->tk_rqstp->rq_rcvsize;
include/trace/events/sunrpc.h
592
const struct rpc_task *task,
include/trace/events/sunrpc.h
597
TP_ARGS(task, tk_status, rpc_status),
include/trace/events/sunrpc.h
607
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
608
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
622
const struct rpc_task *task,
include/trace/events/sunrpc.h
628
TP_ARGS(task, backlog, rtt, execute),
include/trace/events/sunrpc.h
635
__string(progname, task->tk_client->cl_program->name)
include/trace/events/sunrpc.h
636
__string(procname, rpc_proc_name(task))
include/trace/events/sunrpc.h
644
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
645
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
646
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
include/trace/events/sunrpc.h
647
__entry->version = task->tk_client->cl_vers;
include/trace/events/sunrpc.h
653
__entry->xprt_id = task->tk_xprt->id;
include/trace/events/sunrpc.h
694
const struct rpc_task *task = xdr->rqst->rq_task;
include/trace/events/sunrpc.h
696
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
697
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
699
__entry->version = task->tk_client->cl_vers;
include/trace/events/sunrpc.h
71
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
72
__entry->client_id = task->tk_client ?
include/trace/events/sunrpc.h
73
task->tk_client->cl_clid : -1;
include/trace/events/sunrpc.h
759
const struct rpc_task *task = xdr->rqst->rq_task;
include/trace/events/sunrpc.h
761
__entry->task_id = task->tk_pid;
include/trace/events/sunrpc.h
762
__entry->client_id = task->tk_client->cl_clid;
include/trace/events/sunrpc.h
764
__entry->version = task->tk_client->cl_vers;
include/trace/events/sunrpc.h
97
const struct rpc_task *task, \
include/trace/events/task.h
11
TP_PROTO(struct task_struct *task, u64 clone_flags),
include/trace/events/task.h
13
TP_ARGS(task, clone_flags),
include/trace/events/task.h
23
__entry->pid = task->pid;
include/trace/events/task.h
24
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
include/trace/events/task.h
26
__entry->oom_score_adj = task->signal->oom_score_adj;
include/trace/events/task.h
3
#define TRACE_SYSTEM task
include/trace/events/task.h
36
TP_PROTO(struct task_struct *task, const char *comm),
include/trace/events/task.h
38
TP_ARGS(task, comm),
include/trace/events/task.h
48
__entry->pid = task->pid;
include/trace/events/task.h
49
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
include/trace/events/task.h
51
__entry->oom_score_adj = task->signal->oom_score_adj;
include/uapi/linux/bpf.h
152
} task;
include/uapi/linux/bpf.h
6774
} task;
include/uapi/linux/perf_event.h
431
task : 1, /* trace fork/exit */
include/video/imx-ipu-v3.h
448
struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task);
io_uring/cancel.c
191
ret = io_async_cancel_one(node->task->io_uring, cd);
io_uring/cancel.c
491
struct io_uring_task *tctx = node->task->io_uring;
io_uring/fdinfo.c
238
task_work_pending(req->tctx->task));
io_uring/io-wq.c
1093
__set_notify_signal(worker->task);
io_uring/io-wq.c
126
struct task_struct *task;
io_uring/io-wq.c
1269
cpuset_cpus_allowed(data->task, wq->cpu_mask);
io_uring/io-wq.c
1288
wq->task = get_task_struct(data->task);
io_uring/io-wq.c
1293
put_task_struct(wq->task);
io_uring/io-wq.c
1324
while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
io_uring/io-wq.c
1342
if (!wq->task)
io_uring/io-wq.c
1376
put_task_struct(wq->task);
io_uring/io-wq.c
1377
wq->task = NULL;
io_uring/io-wq.c
1457
cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask);
io_uring/io-wq.c
238
struct callback_head *cb = task_work_cancel_match(wq->task,
io_uring/io-wq.c
311
wake_up_process(worker->task);
io_uring/io-wq.c
410
if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
io_uring/io-wq.c
53
struct task_struct *task;
io_uring/io-wq.c
693
snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
io_uring/io-wq.c
790
worker->task = tsk;
io_uring/io-wq.c
945
if (worker->task)
io_uring/io-wq.c
967
__set_notify_signal(worker->task);
io_uring/io-wq.c
968
wake_up_process(worker->task);
io_uring/io-wq.c
974
if (!wq->task)
io_uring/io-wq.h
39
struct task_struct *task;
io_uring/io_uring.c
2375
ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
io_uring/io_uring.c
427
if (WARN_ON_ONCE(!same_thread_group(tctx->task, current)))
io_uring/io_uring.c
597
if (likely(tctx->task == current)) {
io_uring/io_uring.c
603
put_task_struct(tctx->task);
io_uring/io_uring.c
616
__cold void io_uring_drop_tctx_refs(struct task_struct *task)
io_uring/io_uring.c
618
struct io_uring_task *tctx = task->io_uring;
io_uring/io_uring.c
624
put_task_struct_many(task, refs);
io_uring/io_uring.h
193
__cold void io_uring_drop_tctx_refs(struct task_struct *task);
io_uring/msg_ring.c
225
struct task_struct *task = ctx->submitter_task;
io_uring/msg_ring.c
228
if (task_work_add(task, &msg->tw, TWA_SIGNAL))
io_uring/register.c
422
tctx = node->task->io_uring;
io_uring/rw.c
1294
if (timer.task)
io_uring/tctx.c
101
tctx->task = task;
io_uring/tctx.c
106
task->io_uring = tctx;
io_uring/tctx.c
146
node->task = current;
io_uring/tctx.c
17
struct task_struct *task)
io_uring/tctx.c
192
WARN_ON_ONCE(current != node->task);
io_uring/tctx.c
38
data.task = task;
io_uring/tctx.c
77
__cold int io_uring_alloc_task_context(struct task_struct *task,
io_uring/tctx.c
93
tctx->io_wq = io_init_wq_offload(ctx, task);
io_uring/tctx.h
5
struct task_struct *task;
io_uring/tctx.h
9
int io_uring_alloc_task_context(struct task_struct *task,
io_uring/tw.c
257
__set_notify_signal(tctx->task);
io_uring/tw.c
261
if (likely(!task_work_add(tctx->task, &tctx->task_work, ctx->notify_method)))
io_uring/waitid.c
316
iwa->wo.child_wait.private = req->tctx->task;
ipc/mqueue.c
1000
task = get_task_struct(this->task);
ipc/mqueue.c
1004
wake_q_add_safe(wake_q, task);
ipc/mqueue.c
1108
wait.task = current;
ipc/mqueue.c
1201
wait.task = current;
ipc/mqueue.c
127
struct task_struct *task;
ipc/mqueue.c
695
if (walk->task->prio <= current->prio) {
ipc/mqueue.c
791
struct task_struct *task;
ipc/mqueue.c
815
task = pid_task(info->notify_owner, PIDTYPE_TGID);
ipc/mqueue.c
816
if (task && task->self_exec_id ==
ipc/mqueue.c
819
&sig_i, task, PIDTYPE_TGID);
ipc/mqueue.c
997
struct task_struct *task;
ipc/namespace.c
214
static struct ns_common *ipcns_get(struct task_struct *task)
ipc/namespace.c
219
task_lock(task);
ipc/namespace.c
220
nsproxy = task->nsproxy;
ipc/namespace.c
223
task_unlock(task);
ipc/shm.c
445
void exit_shm(struct task_struct *task)
ipc/shm.c
451
task_lock(task);
ipc/shm.c
453
if (list_empty(&task->sysvshm.shm_clist)) {
ipc/shm.c
454
task_unlock(task);
ipc/shm.c
458
shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
ipc/shm.c
487
task_unlock(task);
ipc/shm.c
507
task_unlock(task);
kernel/audit.c
230
int auditd_test_task(struct task_struct *task)
kernel/audit.c
237
rc = (ac && ac->pid == task_tgid(task) ? 1 : 0);
kernel/audit.h
223
extern int auditd_test_task(struct task_struct *task);
kernel/bpf/bpf_task_storage.c
113
task = pid_task(pid, PIDTYPE_PID);
kernel/bpf/bpf_task_storage.c
114
if (!task) {
kernel/bpf/bpf_task_storage.c
120
task, (struct bpf_local_storage_map *)map, value, map_flags,
kernel/bpf/bpf_task_storage.c
129
static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
kernel/bpf/bpf_task_storage.c
133
sdata = task_storage_lookup(task, map, false);
kernel/bpf/bpf_task_storage.c
142
struct task_struct *task;
kernel/bpf/bpf_task_storage.c
156
task = pid_task(pid, PIDTYPE_PID);
kernel/bpf/bpf_task_storage.c
157
if (!task) {
kernel/bpf/bpf_task_storage.c
162
err = task_storage_delete(task, map);
kernel/bpf/bpf_task_storage.c
170
task, void *, value, u64, flags, gfp_t, gfp_flags)
kernel/bpf/bpf_task_storage.c
175
if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
kernel/bpf/bpf_task_storage.c
178
sdata = task_storage_lookup(task, map, true);
kernel/bpf/bpf_task_storage.c
183
if (refcount_read(&task->usage) &&
kernel/bpf/bpf_task_storage.c
186
task, (struct bpf_local_storage_map *)map, value,
kernel/bpf/bpf_task_storage.c
195
task)
kernel/bpf/bpf_task_storage.c
198
if (!task)
kernel/bpf/bpf_task_storage.c
205
return task_storage_delete(task, map);
kernel/bpf/bpf_task_storage.c
25
struct task_struct *task = owner;
kernel/bpf/bpf_task_storage.c
27
return &task->bpf_storage;
kernel/bpf/bpf_task_storage.c
31
task_storage_lookup(struct task_struct *task, struct bpf_map *map,
kernel/bpf/bpf_task_storage.c
38
rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
kernel/bpf/bpf_task_storage.c
46
void bpf_task_storage_free(struct task_struct *task)
kernel/bpf/bpf_task_storage.c
52
local_storage = rcu_dereference(task->bpf_storage);
kernel/bpf/bpf_task_storage.c
64
struct task_struct *task;
kernel/bpf/bpf_task_storage.c
78
task = pid_task(pid, PIDTYPE_PID);
kernel/bpf/bpf_task_storage.c
79
if (!task) {
kernel/bpf/bpf_task_storage.c
84
sdata = task_storage_lookup(task, map, true);
kernel/bpf/bpf_task_storage.c
96
struct task_struct *task;
kernel/bpf/helpers.c
227
struct task_struct *task = current;
kernel/bpf/helpers.c
229
if (unlikely(!task))
kernel/bpf/helpers.c
232
return (u64) task->tgid << 32 | task->pid;
kernel/bpf/helpers.c
243
struct task_struct *task = current;
kernel/bpf/helpers.c
247
if (unlikely(!task))
kernel/bpf/helpers.c
263
struct task_struct *task = current;
kernel/bpf/helpers.c
265
if (unlikely(!task))
kernel/bpf/helpers.c
2687
__bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
kernel/bpf/helpers.c
269
strscpy_pad(buf, task->comm, size);
kernel/bpf/helpers.c
2693
ret = task_under_cgroup_hierarchy(task, ancestor);
kernel/bpf/helpers.c
2731
bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
kernel/bpf/helpers.c
2733
struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
kernel/bpf/helpers.c
4137
struct task_struct *task;
kernel/bpf/helpers.c
4157
if (ctx->task) {
kernel/bpf/helpers.c
4158
bpf_task_release(ctx->task);
kernel/bpf/helpers.c
4159
ctx->task = NULL;
kernel/bpf/helpers.c
4189
if (task_work_cancel(ctx->task, &ctx->work))
kernel/bpf/helpers.c
4239
err = task_work_add(ctx->task, &ctx->work, ctx->mode);
kernel/bpf/helpers.c
4329
static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work *tw,
kernel/bpf/helpers.c
4342
task = bpf_task_acquire(task);
kernel/bpf/helpers.c
4343
if (!task) {
kernel/bpf/helpers.c
4354
ctx->task = task;
kernel/bpf/helpers.c
4367
bpf_task_release(task);
kernel/bpf/helpers.c
4384
__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
kernel/bpf/helpers.c
4388
return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
kernel/bpf/helpers.c
4402
__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
kernel/bpf/helpers.c
4406
return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_RESUME);
kernel/bpf/helpers.c
593
struct task_struct *task = current;
kernel/bpf/helpers.c
603
if (unlikely(!task))
kernel/bpf/helpers.c
606
pidns = task_active_pid_ns(task);
kernel/bpf/helpers.c
615
nsdata->pid = task_pid_nr_ns(task, pidns);
kernel/bpf/helpers.c
616
nsdata->tgid = task_tgid_nr_ns(task, pidns);
kernel/bpf/stackmap.c
213
get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
kernel/bpf/stackmap.c
224
entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
kernel/bpf/stackmap.c
425
static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
kernel/bpf/stackmap.c
431
bool crosstask = task && task != current;
kernel/bpf/stackmap.c
450
if (task && user && !user_mode(regs))
kernel/bpf/stackmap.c
469
} else if (kernel && task) {
kernel/bpf/stackmap.c
470
trace = get_callchain_entry_for_task(task, max_depth);
kernel/bpf/stackmap.c
546
static long __bpf_get_task_stack(struct task_struct *task, void *buf, u32 size,
kernel/bpf/stackmap.c
552
if (!try_get_task_stack(task))
kernel/bpf/stackmap.c
555
regs = task_pt_regs(task);
kernel/bpf/stackmap.c
557
res = __bpf_get_stack(regs, task, NULL, buf, size, flags, may_fault);
kernel/bpf/stackmap.c
558
put_task_stack(task);
kernel/bpf/stackmap.c
563
BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
kernel/bpf/stackmap.c
566
return __bpf_get_task_stack(task, buf, size, flags, false /* !may_fault */);
kernel/bpf/stackmap.c
580
BPF_CALL_4(bpf_get_task_stack_sleepable, struct task_struct *, task, void *, buf,
kernel/bpf/stackmap.c
583
return __bpf_get_task_stack(task, buf, size, flags, true /* !may_fault */);
kernel/bpf/syscall.c
5579
struct task_struct *task;
kernel/bpf/syscall.c
5593
task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
kernel/bpf/syscall.c
5595
if (!task)
kernel/bpf/syscall.c
5599
file = fget_task(task, fd);
kernel/bpf/syscall.c
5600
put_task_struct(task);
kernel/bpf/task_iter.c
101
task = get_pid_task(pid, PIDTYPE_PID);
kernel/bpf/task_iter.c
1017
kit->task = next_task(kit->task);
kernel/bpf/task_iter.c
1018
if (kit->task == &init_task)
kernel/bpf/task_iter.c
1021
kit->pos = kit->task;
kernel/bpf/task_iter.c
106
return task;
kernel/bpf/task_iter.c
111
task = task_group_seq_get_next(common, tid, skip_if_dup_files);
kernel/bpf/task_iter.c
114
return task;
kernel/bpf/task_iter.c
122
task = get_pid_task(pid, PIDTYPE_PID);
kernel/bpf/task_iter.c
123
if (!task) {
kernel/bpf/task_iter.c
126
} else if (skip_if_dup_files && !thread_group_leader(task) &&
kernel/bpf/task_iter.c
127
task->files == task->group_leader->files) {
kernel/bpf/task_iter.c
128
put_task_struct(task);
kernel/bpf/task_iter.c
129
task = NULL;
kernel/bpf/task_iter.c
136
return task;
kernel/bpf/task_iter.c
142
struct task_struct *task;
kernel/bpf/task_iter.c
144
task = task_seq_get_next(&info->common, &info->tid, false);
kernel/bpf/task_iter.c
145
if (!task)
kernel/bpf/task_iter.c
150
return task;
kernel/bpf/task_iter.c
156
struct task_struct *task;
kernel/bpf/task_iter.c
161
task = task_seq_get_next(&info->common, &info->tid, false);
kernel/bpf/task_iter.c
162
if (!task)
kernel/bpf/task_iter.c
165
return task;
kernel/bpf/task_iter.c
170
__bpf_md_ptr(struct task_struct *, task);
kernel/bpf/task_iter.c
173
DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
kernel/bpf/task_iter.c
175
static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
kernel/bpf/task_iter.c
188
ctx.task = task;
kernel/bpf/task_iter.c
213
if ((!!linfo->task.tid + !!linfo->task.pid + !!linfo->task.pid_fd) > 1)
kernel/bpf/task_iter.c
216
aux->task.type = BPF_TASK_ITER_ALL;
kernel/bpf/task_iter.c
217
if (linfo->task.tid != 0) {
kernel/bpf/task_iter.c
218
aux->task.type = BPF_TASK_ITER_TID;
kernel/bpf/task_iter.c
219
aux->task.pid = linfo->task.tid;
kernel/bpf/task_iter.c
221
if (linfo->task.pid != 0) {
kernel/bpf/task_iter.c
222
aux->task.type = BPF_TASK_ITER_TGID;
kernel/bpf/task_iter.c
223
aux->task.pid = linfo->task.pid;
kernel/bpf/task_iter.c
225
if (linfo->task.pid_fd != 0) {
kernel/bpf/task_iter.c
226
aux->task.type = BPF_TASK_ITER_TGID;
kernel/bpf/task_iter.c
228
pid = pidfd_get_pid(linfo->task.pid_fd, &flags);
kernel/bpf/task_iter.c
233
aux->task.pid = tgid;
kernel/bpf/task_iter.c
252
struct task_struct *task;
kernel/bpf/task_iter.c
270
if (info->task) {
kernel/bpf/task_iter.c
271
curr_task = info->task;
kernel/bpf/task_iter.c
276
info->task = NULL;
kernel/bpf/task_iter.c
281
info->task = curr_task;
kernel/bpf/task_iter.c
299
info->task = NULL;
kernel/bpf/task_iter.c
303
info->task = NULL;
kernel/bpf/task_iter.c
314
info->task = NULL;
kernel/bpf/task_iter.c
334
__bpf_md_ptr(struct task_struct *, task);
kernel/bpf/task_iter.c
340
struct task_struct *task, u32 fd,
kernel/bpf/task_iter.c
357
ctx.task = info->task;
kernel/bpf/task_iter.c
376
put_task_struct(info->task);
kernel/bpf/task_iter.c
377
info->task = NULL;
kernel/bpf/task_iter.c
386
common->type = aux->task.type;
kernel/bpf/task_iter.c
387
common->pid = aux->task.pid;
kernel/bpf/task_iter.c
39
struct task_struct *task;
kernel/bpf/task_iter.c
411
struct task_struct *task;
kernel/bpf/task_iter.c
440
if (info->task) {
kernel/bpf/task_iter.c
441
curr_task = info->task;
kernel/bpf/task_iter.c
46
task = get_pid_task(pid, PIDTYPE_TGID);
kernel/bpf/task_iter.c
47
if (!task)
kernel/bpf/task_iter.c
53
return task;
kernel/bpf/task_iter.c
553
info->task = curr_task;
kernel/bpf/task_iter.c
563
info->task = NULL;
kernel/bpf/task_iter.c
571
info->task = NULL;
kernel/bpf/task_iter.c
599
__bpf_md_ptr(struct task_struct *, task);
kernel/bpf/task_iter.c
604
struct task_struct *task, struct vm_area_struct *vma)
kernel/bpf/task_iter.c
619
ctx.task = info->task;
kernel/bpf/task_iter.c
62
task = get_pid_task(pid, PIDTYPE_PID);
kernel/bpf/task_iter.c
64
return task;
kernel/bpf/task_iter.c
648
put_task_struct(info->task);
kernel/bpf/task_iter.c
649
info->task = NULL;
kernel/bpf/task_iter.c
669
switch (aux->task.type) {
kernel/bpf/task_iter.c
67
task = find_task_by_pid_ns(common->pid_visiting, common->ns);
kernel/bpf/task_iter.c
671
info->iter.task.tid = aux->task.pid;
kernel/bpf/task_iter.c
674
info->iter.task.pid = aux->task.pid;
kernel/bpf/task_iter.c
68
if (!task)
kernel/bpf/task_iter.c
684
seq_printf(seq, "task_type:\t%s\n", iter_task_type_names[aux->task.type]);
kernel/bpf/task_iter.c
685
if (aux->task.type == BPF_TASK_ITER_TID)
kernel/bpf/task_iter.c
686
seq_printf(seq, "tid:\t%u\n", aux->task.pid);
kernel/bpf/task_iter.c
687
else if (aux->task.type == BPF_TASK_ITER_TGID)
kernel/bpf/task_iter.c
688
seq_printf(seq, "pid:\t%u\n", aux->task.pid);
kernel/bpf/task_iter.c
697
{ offsetof(struct bpf_iter__task, task),
kernel/bpf/task_iter.c
718
{ offsetof(struct bpf_iter__task_file, task),
kernel/bpf/task_iter.c
72
task = __next_thread(task);
kernel/bpf/task_iter.c
73
if (!task)
kernel/bpf/task_iter.c
741
{ offsetof(struct bpf_iter__task_vma, task),
kernel/bpf/task_iter.c
751
BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
kernel/bpf/task_iter.c
76
next_tid = __task_pid_nr_ns(task, PIDTYPE_PID, common->ns);
kernel/bpf/task_iter.c
763
if (!task)
kernel/bpf/task_iter.c
766
mm = task->mm;
kernel/bpf/task_iter.c
778
callback_fn((u64)(long)task, (u64)(long)vma,
kernel/bpf/task_iter.c
798
struct task_struct *task;
kernel/bpf/task_iter.c
80
if (skip_if_dup_files && task->files == task->group_leader->files)
kernel/bpf/task_iter.c
819
struct task_struct *task, u64 addr)
kernel/bpf/task_iter.c
836
kit->data->task = get_task_struct(task);
kernel/bpf/task_iter.c
837
kit->data->mm = task->mm;
kernel/bpf/task_iter.c
84
get_task_struct(task);
kernel/bpf/task_iter.c
85
return task;
kernel/bpf/task_iter.c
854
if (kit->data->task)
kernel/bpf/task_iter.c
855
put_task_struct(kit->data->task);
kernel/bpf/task_iter.c
877
put_task_struct(kit->data->task);
kernel/bpf/task_iter.c
92
struct task_struct *task = NULL;
kernel/bpf/task_iter.c
949
struct task_struct *task;
kernel/bpf/task_iter.c
989
kit->task = task__nullable;
kernel/bpf/task_iter.c
991
kit->task = &init_task;
kernel/bpf/task_iter.c
992
kit->pos = kit->task;
kernel/bpf/verifier.c
7308
struct task_struct *task;
kernel/cgroup/cgroup-internal.h
228
struct cgroup *task_cgroup_from_root(struct task_struct *task,
kernel/cgroup/cgroup-internal.h
259
void cgroup_procs_write_finish(struct task_struct *task,
kernel/cgroup/cgroup-v1.c
109
struct task_struct *task;
kernel/cgroup/cgroup-v1.c
141
task = css_task_iter_next(&it);
kernel/cgroup/cgroup-v1.c
142
} while (task && (task->flags & PF_EXITING));
kernel/cgroup/cgroup-v1.c
144
if (task)
kernel/cgroup/cgroup-v1.c
145
get_task_struct(task);
kernel/cgroup/cgroup-v1.c
148
if (task) {
kernel/cgroup/cgroup-v1.c
149
ret = cgroup_migrate(task, false, &mgctx);
kernel/cgroup/cgroup-v1.c
151
TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
kernel/cgroup/cgroup-v1.c
152
put_task_struct(task);
kernel/cgroup/cgroup-v1.c
154
} while (task && !ret);
kernel/cgroup/cgroup-v1.c
503
struct task_struct *task;
kernel/cgroup/cgroup-v1.c
512
task = cgroup_procs_write_start(buf, threadgroup, &lock_mode);
kernel/cgroup/cgroup-v1.c
513
ret = PTR_ERR_OR_ZERO(task);
kernel/cgroup/cgroup-v1.c
523
tcred = get_task_cred(task);
kernel/cgroup/cgroup-v1.c
532
ret = cgroup_attach_task(cgrp, task, threadgroup);
kernel/cgroup/cgroup-v1.c
535
cgroup_procs_write_finish(task, lock_mode);
kernel/cgroup/cgroup.c
1545
struct cgroup *task_cgroup_from_root(struct task_struct *task,
kernel/cgroup/cgroup.c
1552
return cset_cgroup_from_root(task_css_set(task), root);
kernel/cgroup/cgroup.c
2592
static void cgroup_migrate_add_task(struct task_struct *task,
kernel/cgroup/cgroup.c
2600
if (task->flags & PF_EXITING)
kernel/cgroup/cgroup.c
2604
WARN_ON_ONCE(list_empty(&task->cg_list));
kernel/cgroup/cgroup.c
2606
cset = task_css_set(task);
kernel/cgroup/cgroup.c
2612
css_set_skip_task_iters(cset, task);
kernel/cgroup/cgroup.c
2613
list_move_tail(&task->cg_list, &cset->mg_tasks);
kernel/cgroup/cgroup.c
2650
struct task_struct *task = tset->cur_task;
kernel/cgroup/cgroup.c
2653
if (!task)
kernel/cgroup/cgroup.c
2654
task = list_first_entry(&cset->mg_tasks,
kernel/cgroup/cgroup.c
2657
task = list_next_entry(task, cg_list);
kernel/cgroup/cgroup.c
2659
if (&task->cg_list != &cset->mg_tasks) {
kernel/cgroup/cgroup.c
2661
tset->cur_task = task;
kernel/cgroup/cgroup.c
2674
return task;
kernel/cgroup/cgroup.c
2678
task = NULL;
kernel/cgroup/cgroup.c
2697
struct task_struct *task, *tmp_task;
kernel/cgroup/cgroup.c
2722
list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
kernel/cgroup/cgroup.c
2723
struct css_set *from_cset = task_css_set(task);
kernel/cgroup/cgroup.c
2728
css_set_move_task(task, from_cset, to_cset, true);
kernel/cgroup/cgroup.c
2734
cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp,
kernel/cgroup/cgroup.c
279
struct task_struct *task);
kernel/cgroup/cgroup.c
2993
struct task_struct *task;
kernel/cgroup/cgroup.c
3001
task = leader;
kernel/cgroup/cgroup.c
3003
cgroup_migrate_add_task(task, mgctx);
kernel/cgroup/cgroup.c
3006
} while_each_thread(leader, task);
kernel/cgroup/cgroup.c
3024
struct task_struct *task;
kernel/cgroup/cgroup.c
3029
task = leader;
kernel/cgroup/cgroup.c
3031
cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx);
kernel/cgroup/cgroup.c
3034
} while_each_thread(leader, task);
kernel/cgroup/cgroup.c
3127
void cgroup_procs_write_finish(struct task_struct *task,
kernel/cgroup/cgroup.c
3130
cgroup_attach_unlock(lock_mode, task);
kernel/cgroup/cgroup.c
3133
put_task_struct(task);
kernel/cgroup/cgroup.c
3233
struct task_struct *task, *ntask;
kernel/cgroup/cgroup.c
3236
list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
kernel/cgroup/cgroup.c
3237
cgroup_migrate_add_task(task, &mgctx);
kernel/cgroup/cgroup.c
4183
struct task_struct *task;
kernel/cgroup/cgroup.c
4192
while ((task = css_task_iter_next(&it))) {
kernel/cgroup/cgroup.c
4194
if (task->flags & PF_KTHREAD)
kernel/cgroup/cgroup.c
4198
if (__fatal_signal_pending(task))
kernel/cgroup/cgroup.c
4201
send_sig(SIGKILL, task, 0);
kernel/cgroup/cgroup.c
5067
struct task_struct *task)
kernel/cgroup/cgroup.c
5071
if (it->task_pos == &task->cg_list) {
kernel/cgroup/cgroup.c
5079
struct task_struct *task;
kernel/cgroup/cgroup.c
5112
task = list_entry(it->task_pos, struct task_struct, cg_list);
kernel/cgroup/cgroup.c
5117
if ((task->flags & PF_EXITING) && !atomic_read(&task->signal->live))
kernel/cgroup/cgroup.c
5122
if (!thread_group_leader(task))
kernel/cgroup/cgroup.c
5127
!atomic_read(&task->signal->live))
kernel/cgroup/cgroup.c
5371
struct task_struct *task;
kernel/cgroup/cgroup.c
5379
task = cgroup_procs_write_start(buf, threadgroup, &lock_mode);
kernel/cgroup/cgroup.c
5380
ret = PTR_ERR_OR_ZERO(task);
kernel/cgroup/cgroup.c
5386
src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
kernel/cgroup/cgroup.c
5401
ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
kernel/cgroup/cgroup.c
5404
cgroup_procs_write_finish(task, lock_mode);
kernel/cgroup/cgroup.c
6258
struct task_struct *task;
kernel/cgroup/cgroup.c
6268
task = css_task_iter_next(&it);
kernel/cgroup/cgroup.c
6271
if (task)
kernel/cgroup/cgroup.c
7120
struct task_struct *task, *next;
kernel/cgroup/cgroup.c
7123
llist_for_each_entry_safe(task, next, lnode, cg_dead_lnode) {
kernel/cgroup/cgroup.c
7124
do_cgroup_task_dead(task);
kernel/cgroup/cgroup.c
7125
put_task_struct(task);
kernel/cgroup/cgroup.c
7140
void cgroup_task_dead(struct task_struct *task)
kernel/cgroup/cgroup.c
7142
get_task_struct(task);
kernel/cgroup/cgroup.c
7143
llist_add(&task->cg_dead_lnode, this_cpu_ptr(&cgrp_dead_tasks));
kernel/cgroup/cgroup.c
7149
void cgroup_task_dead(struct task_struct *task)
kernel/cgroup/cgroup.c
7151
do_cgroup_task_dead(task);
kernel/cgroup/cgroup.c
7155
void cgroup_task_release(struct task_struct *task)
kernel/cgroup/cgroup.c
7161
ss->release(task);
kernel/cgroup/cgroup.c
7165
void cgroup_task_free(struct task_struct *task)
kernel/cgroup/cgroup.c
7167
struct css_set *cset = task_css_set(task);
kernel/cgroup/cgroup.c
7169
if (!list_empty(&task->cg_list)) {
kernel/cgroup/cgroup.c
7171
css_set_skip_task_iters(task_css_set(task), task);
kernel/cgroup/cgroup.c
7172
list_del_init(&task->cg_list);
kernel/cgroup/cgroup.c
896
struct task_struct *task)
kernel/cgroup/cgroup.c
901
css_task_iter_skip(it, task);
kernel/cgroup/cgroup.c
919
static void css_set_move_task(struct task_struct *task,
kernel/cgroup/cgroup.c
929
WARN_ON_ONCE(list_empty(&task->cg_list));
kernel/cgroup/cgroup.c
931
css_set_skip_task_iters(from_cset, task);
kernel/cgroup/cgroup.c
932
list_del_init(&task->cg_list);
kernel/cgroup/cgroup.c
936
WARN_ON_ONCE(!list_empty(&task->cg_list));
kernel/cgroup/cgroup.c
946
WARN_ON_ONCE(task->flags & PF_EXITING);
kernel/cgroup/cgroup.c
948
cgroup_move_task(task, to_cset);
kernel/cgroup/cgroup.c
949
list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
kernel/cgroup/cpuset-internal.h
197
static inline struct cpuset *task_cs(struct task_struct *task)
kernel/cgroup/cpuset-internal.h
199
return css_cs(task_css(task, cpuset_cgrp_id));
kernel/cgroup/cpuset-v1.c
240
struct task_struct *task;
kernel/cgroup/cpuset-v1.c
243
while ((task = css_task_iter_next(&it)))
kernel/cgroup/cpuset-v1.c
244
cpuset1_update_task_spread_flags(cs, task);
kernel/cgroup/cpuset.c
1057
struct task_struct *task;
kernel/cgroup/cpuset.c
1061
while ((task = css_task_iter_next(&it))) {
kernel/cgroup/cpuset.c
1062
const struct cpumask *possible_mask = task_cpu_possible_mask(task);
kernel/cgroup/cpuset.c
1069
if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
kernel/cgroup/cpuset.c
1075
set_cpus_allowed_ptr(task, new_cpus);
kernel/cgroup/cpuset.c
2619
struct task_struct *task;
kernel/cgroup/cpuset.c
2636
while ((task = css_task_iter_next(&it))) {
kernel/cgroup/cpuset.c
2640
cpuset_change_task_nodemask(task, &newmems);
kernel/cgroup/cpuset.c
2642
mm = get_task_mm(task);
kernel/cgroup/cpuset.c
2990
struct task_struct *task;
kernel/cgroup/cpuset.c
3025
cgroup_taskset_for_each(task, css, tset) {
kernel/cgroup/cpuset.c
3026
ret = task_can_attach(task);
kernel/cgroup/cpuset.c
3031
ret = security_task_setscheduler(task);
kernel/cgroup/cpuset.c
3036
if (dl_task(task)) {
kernel/cgroup/cpuset.c
3038
cs->sum_migrate_dl_bw += task->dl.dl_bw;
kernel/cgroup/cpuset.c
3101
static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
kernel/cgroup/cpuset.c
3106
guarantee_active_cpus(task, cpus_attach);
kernel/cgroup/cpuset.c
3108
cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
kernel/cgroup/cpuset.c
3114
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
kernel/cgroup/cpuset.c
3116
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
kernel/cgroup/cpuset.c
3117
cpuset1_update_task_spread_flags(cs, task);
kernel/cgroup/cpuset.c
3122
struct task_struct *task;
kernel/cgroup/cpuset.c
3152
cgroup_taskset_for_each(task, css, tset)
kernel/cgroup/cpuset.c
3153
cpuset_attach_task(cs, task);
kernel/cgroup/cpuset.c
3560
static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
kernel/cgroup/cpuset.c
3581
ret = task_can_attach(task);
kernel/cgroup/cpuset.c
3585
ret = security_task_setscheduler(task);
kernel/cgroup/cpuset.c
3599
static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
kernel/cgroup/cpuset.c
3619
static void cpuset_fork(struct task_struct *task)
kernel/cgroup/cpuset.c
3625
cs = task_cs(task);
kernel/cgroup/cpuset.c
3633
set_cpus_allowed_ptr(task, current->cpus_ptr);
kernel/cgroup/cpuset.c
3634
task->mems_allowed = current->mems_allowed;
kernel/cgroup/cpuset.c
3641
cpuset_attach_task(cs, task);
kernel/cgroup/cpuset.c
4373
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
kernel/cgroup/cpuset.c
4376
nodemask_pr_args(&task->mems_allowed));
kernel/cgroup/cpuset.c
4378
nodemask_pr_args(&task->mems_allowed));
kernel/cgroup/cpuset.c
916
struct task_struct *task;
kernel/cgroup/cpuset.c
923
while ((task = css_task_iter_next(&it)))
kernel/cgroup/cpuset.c
924
dl_add_task_root_domain(task);
kernel/cgroup/debug.c
120
struct task_struct *task;
kernel/cgroup/debug.c
160
list_for_each_entry(task, &cset->tasks, cg_list) {
kernel/cgroup/debug.c
163
task_pid_vnr(task));
kernel/cgroup/debug.c
166
list_for_each_entry(task, &cset->mg_tasks, cg_list) {
kernel/cgroup/debug.c
169
task_pid_vnr(task));
kernel/cgroup/freezer.c
152
static void cgroup_freeze_task(struct task_struct *task, bool freeze)
kernel/cgroup/freezer.c
157
if (!lock_task_sighand(task, &flags))
kernel/cgroup/freezer.c
161
task->jobctl |= JOBCTL_TRAP_FREEZE;
kernel/cgroup/freezer.c
162
signal_wake_up(task, false);
kernel/cgroup/freezer.c
164
task->jobctl &= ~JOBCTL_TRAP_FREEZE;
kernel/cgroup/freezer.c
165
wake_up_process(task);
kernel/cgroup/freezer.c
168
unlock_task_sighand(task, &flags);
kernel/cgroup/freezer.c
177
struct task_struct *task;
kernel/cgroup/freezer.c
200
while ((task = css_task_iter_next(&it))) {
kernel/cgroup/freezer.c
205
if (task->flags & PF_KTHREAD)
kernel/cgroup/freezer.c
207
cgroup_freeze_task(task, freeze);
kernel/cgroup/freezer.c
225
void cgroup_freezer_migrate_task(struct task_struct *task,
kernel/cgroup/freezer.c
233
if (task->flags & PF_KTHREAD)
kernel/cgroup/freezer.c
242
!task->frozen)
kernel/cgroup/freezer.c
250
if (task->frozen) {
kernel/cgroup/freezer.c
260
cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags));
kernel/cgroup/legacy_freezer.c
157
struct task_struct *task;
kernel/cgroup/legacy_freezer.c
172
cgroup_taskset_for_each(task, new_css, tset) {
kernel/cgroup/legacy_freezer.c
176
__thaw_task(task);
kernel/cgroup/legacy_freezer.c
183
freeze_task(task);
kernel/cgroup/legacy_freezer.c
200
static void freezer_fork(struct task_struct *task)
kernel/cgroup/legacy_freezer.c
211
if (task_css_is_root(task, freezer_cgrp_id))
kernel/cgroup/legacy_freezer.c
217
freezer = task_freezer(task);
kernel/cgroup/legacy_freezer.c
219
freeze_task(task);
kernel/cgroup/legacy_freezer.c
246
struct task_struct *task;
kernel/cgroup/legacy_freezer.c
270
while ((task = css_task_iter_next(&it))) {
kernel/cgroup/legacy_freezer.c
271
if (freezing(task) && !frozen(task))
kernel/cgroup/legacy_freezer.c
310
struct task_struct *task;
kernel/cgroup/legacy_freezer.c
313
while ((task = css_task_iter_next(&it)))
kernel/cgroup/legacy_freezer.c
314
freeze_task(task);
kernel/cgroup/legacy_freezer.c
321
struct task_struct *task;
kernel/cgroup/legacy_freezer.c
324
while ((task = css_task_iter_next(&it)))
kernel/cgroup/legacy_freezer.c
325
__thaw_task(task);
kernel/cgroup/legacy_freezer.c
49
static inline struct freezer *task_freezer(struct task_struct *task)
kernel/cgroup/legacy_freezer.c
51
return css_freezer(task_css(task, freezer_cgrp_id));
kernel/cgroup/legacy_freezer.c
59
bool cgroup1_freezing(struct task_struct *task)
kernel/cgroup/legacy_freezer.c
64
ret = task_freezer(task)->state & CGROUP_FREEZING;
kernel/cgroup/namespace.c
112
static struct ns_common *cgroupns_get(struct task_struct *task)
kernel/cgroup/namespace.c
117
task_lock(task);
kernel/cgroup/namespace.c
118
nsproxy = task->nsproxy;
kernel/cgroup/namespace.c
123
task_unlock(task);
kernel/cgroup/pids.c
202
struct task_struct *task;
kernel/cgroup/pids.c
205
cgroup_taskset_for_each(task, dst_css, tset) {
kernel/cgroup/pids.c
215
old_css = task_css(task, pids_cgrp_id);
kernel/cgroup/pids.c
227
struct task_struct *task;
kernel/cgroup/pids.c
230
cgroup_taskset_for_each(task, dst_css, tset) {
kernel/cgroup/pids.c
235
old_css = task_css(task, pids_cgrp_id);
kernel/cgroup/pids.c
273
static int pids_can_fork(struct task_struct *task, struct css_set *cset)
kernel/cgroup/pids.c
286
static void pids_cancel_fork(struct task_struct *task, struct css_set *cset)
kernel/cgroup/pids.c
294
static void pids_release(struct task_struct *task)
kernel/cgroup/pids.c
296
struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
kernel/cred.c
126
const struct cred *get_task_cred(struct task_struct *task)
kernel/cred.c
133
cred = __task_cred((task));
kernel/cred.c
181
struct task_struct *task = current;
kernel/cred.c
191
old = task->cred;
kernel/cred.c
370
struct task_struct *task = current;
kernel/cred.c
371
const struct cred *old = task->real_cred;
kernel/cred.c
376
BUG_ON(task->cred != old);
kernel/cred.c
387
if (task->mm)
kernel/cred.c
388
set_dumpable(task->mm, suid_dumpable);
kernel/cred.c
389
task->pdeath_signal = 0;
kernel/cred.c
415
rcu_assign_pointer(task->real_cred, new);
kernel/cred.c
416
rcu_assign_pointer(task->cred, new);
kernel/cred.c
427
proc_id_connector(task, PROC_EVENT_UID);
kernel/cred.c
433
proc_id_connector(task, PROC_EVENT_GID);
kernel/debug/debug_core.c
603
kgdb_info[cpu].task = current;
kernel/debug/debug_core.c
646
kgdb_info[cpu].task = NULL;
kernel/debug/debug_core.c
667
(kgdb_info[cpu].task &&
kernel/debug/debug_core.c
668
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
kernel/debug/debug_core.c
800
if (kgdb_info[sstep_cpu].task)
kernel/debug/debug_core.c
801
kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
kernel/debug/debug_core.c
811
kgdb_info[cpu].task = NULL;
kernel/debug/debug_core.h
37
struct task_struct *task;
kernel/debug/gdbstub.c
440
if (kgdb_info[-tid - 2].task)
kernel/debug/gdbstub.c
441
return kgdb_info[-tid - 2].task;
kernel/debug/gdbstub.c
501
thread = kgdb_info[ks->cpu].task;
kernel/debug/gdbstub.c
512
if (thread == kgdb_info[i].task)
kernel/debug/gdbstub.c
963
kgdb_usethread = kgdb_info[ks->cpu].task;
kernel/debug/gdbstub.c
964
ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
kernel/debug/kdb/kdb_debugger.c
41
kdb_current_task = kgdb_info[ks->cpu].task;
kernel/debug/kdb/kdb_private.h
212
#define KDB_TSK(cpu) kgdb_info[cpu].task
kernel/delayacct.c
305
void __delayacct_irq(struct task_struct *task, u32 delta)
kernel/delayacct.c
309
raw_spin_lock_irqsave(&task->delays->lock, flags);
kernel/delayacct.c
310
task->delays->irq_delay += delta;
kernel/delayacct.c
311
task->delays->irq_count++;
kernel/delayacct.c
312
if (delta > task->delays->irq_delay_max) {
kernel/delayacct.c
313
task->delays->irq_delay_max = delta;
kernel/delayacct.c
314
ktime_get_real_ts64(&task->delays->irq_delay_max_ts);
kernel/delayacct.c
316
if (delta && (!task->delays->irq_delay_min || delta < task->delays->irq_delay_min))
kernel/delayacct.c
317
task->delays->irq_delay_min = delta;
kernel/delayacct.c
318
raw_spin_unlock_irqrestore(&task->delays->lock, flags);
kernel/entry/syscall_user_dispatch.c
118
task->syscall_dispatch.selector = selector;
kernel/entry/syscall_user_dispatch.c
119
task->syscall_dispatch.offset = offset;
kernel/entry/syscall_user_dispatch.c
120
task->syscall_dispatch.len = len;
kernel/entry/syscall_user_dispatch.c
121
task->syscall_dispatch.on_dispatch = false;
kernel/entry/syscall_user_dispatch.c
124
set_task_syscall_work(task, SYSCALL_USER_DISPATCH);
kernel/entry/syscall_user_dispatch.c
126
clear_task_syscall_work(task, SYSCALL_USER_DISPATCH);
kernel/entry/syscall_user_dispatch.c
137
int syscall_user_dispatch_get_config(struct task_struct *task, unsigned long size,
kernel/entry/syscall_user_dispatch.c
140
struct syscall_user_dispatch *sd = &task->syscall_dispatch;
kernel/entry/syscall_user_dispatch.c
146
if (test_task_syscall_work(task, SYSCALL_USER_DISPATCH))
kernel/entry/syscall_user_dispatch.c
161
int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long size,
kernel/entry/syscall_user_dispatch.c
172
return task_set_syscall_user_dispatch(task, cfg.mode, cfg.offset, cfg.len,
kernel/entry/syscall_user_dispatch.c
72
static int task_set_syscall_user_dispatch(struct task_struct *task, unsigned long mode,
kernel/events/core.c
10022
if (event->ctx->task) {
kernel/events/core.c
10040
if (event->ctx->task)
kernel/events/core.c
10050
static void perf_event_switch(struct task_struct *task,
kernel/events/core.c
10058
.task = task,
kernel/events/core.c
10071
if (!sched_in && task_is_runnable(task)) {
kernel/events/core.c
1043
static void perf_cgroup_switch(struct task_struct *task)
kernel/events/core.c
1055
cgrp = perf_cgroup_from_task(task, NULL);
kernel/events/core.c
11325
struct task_struct *task)
kernel/events/core.c
11335
rctx, task);
kernel/events/core.c
11384
struct task_struct *task)
kernel/events/core.c
11423
if (task && task != current) {
kernel/events/core.c
11427
ctx = rcu_dereference(task->perf_event_ctxp);
kernel/events/core.c
11834
struct task_struct *task = READ_ONCE(event->ctx->task);
kernel/events/core.c
11844
if (task == TASK_TOMBSTONE)
kernel/events/core.c
11848
mm = get_task_mm(task);
kernel/events/core.c
12051
if (!event->ctx->task)
kernel/events/core.c
1264
static void perf_cgroup_switch(struct task_struct *task)
kernel/events/core.c
12864
perf_event_exit_event(event, ctx, ctx->task, true);
kernel/events/core.c
13213
if (event->attr.task)
kernel/events/core.c
13268
struct task_struct *task,
kernel/events/core.c
13280
if (!task || cpu != -1)
kernel/events/core.c
13283
if (attr->sigtrap && !task) {
kernel/events/core.c
13340
if (task) {
kernel/events/core.c
13347
event->hw.target = get_task_struct(task);
kernel/events/core.c
13423
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1))
kernel/events/core.c
13766
perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
kernel/events/core.c
13777
is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
kernel/events/core.c
13793
return is_capable || ptrace_may_access(task, ptrace_mode);
kernel/events/core.c
13815
struct task_struct *task = NULL;
kernel/events/core.c
1388
if (ctx->task && ctx->task != TASK_TOMBSTONE)
kernel/events/core.c
1389
put_task_struct(ctx->task);
kernel/events/core.c
13908
task = find_lively_task_by_vpid(pid);
kernel/events/core.c
13909
if (IS_ERR(task)) {
kernel/events/core.c
13910
err = PTR_ERR(task);
kernel/events/core.c
13915
if (task && group_leader &&
kernel/events/core.c
13924
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
kernel/events/core.c
1393
if (ctx->task == TASK_TOMBSTONE)
kernel/events/core.c
13953
if (task) {
kernel/events/core.c
13954
err = down_read_interruptible(&task->signal->exec_update_lock);
kernel/events/core.c
13965
if (!perf_check_permission(&attr, task))
kernel/events/core.c
13972
ctx = find_get_context(task, event);
kernel/events/core.c
13980
if (ctx->task == TASK_TOMBSTONE) {
kernel/events/core.c
13985
if (!task) {
kernel/events/core.c
14168
if (task) {
kernel/events/core.c
14169
up_read(&task->signal->exec_update_lock);
kernel/events/core.c
14170
put_task_struct(task);
kernel/events/core.c
14194
if (task)
kernel/events/core.c
14195
up_read(&task->signal->exec_update_lock);
kernel/events/core.c
14199
if (task)
kernel/events/core.c
14200
put_task_struct(task);
kernel/events/core.c
14217
struct task_struct *task,
kernel/events/core.c
14239
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
kernel/events/core.c
14256
ctx = find_get_context(task, event);
kernel/events/core.c
14264
if (ctx->task == TASK_TOMBSTONE) {
kernel/events/core.c
14276
if (!task) {
kernel/events/core.c
14426
struct task_struct *task)
kernel/events/core.c
14432
if (task && task != TASK_TOMBSTONE)
kernel/events/core.c
14433
perf_event_read_event(child_event, task);
kernel/events/core.c
14451
struct task_struct *task,
kernel/events/core.c
14477
sync_child_event(event, task);
kernel/events/core.c
14515
static void perf_event_exit_task_context(struct task_struct *task, bool exit)
kernel/events/core.c
14520
ctx = perf_pin_task_context(task);
kernel/events/core.c
14549
RCU_INIT_POINTER(task->perf_event_ctxp, NULL);
kernel/events/core.c
14551
WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
kernel/events/core.c
14552
put_task_struct(task); /* cannot be last */
kernel/events/core.c
14566
perf_event_task(task, ctx, 0);
kernel/events/core.c
14569
perf_event_exit_event(child_event, ctx, exit ? task : NULL, false);
kernel/events/core.c
14599
void perf_event_exit_task(struct task_struct *task)
kernel/events/core.c
14603
WARN_ON_ONCE(task != current);
kernel/events/core.c
14605
mutex_lock(&task->perf_event_mutex);
kernel/events/core.c
14606
list_for_each_entry_safe(event, tmp, &task->perf_event_list,
kernel/events/core.c
14617
mutex_unlock(&task->perf_event_mutex);
kernel/events/core.c
14619
perf_event_exit_task_context(task, true);
kernel/events/core.c
14627
perf_event_task(task, NULL, 0);
kernel/events/core.c
14636
detach_task_ctx_data(task);
kernel/events/core.c
14646
void perf_event_free_task(struct task_struct *task)
kernel/events/core.c
14648
perf_event_exit_task_context(task, false);
kernel/events/core.c
14651
void perf_event_delayed_put(struct task_struct *task)
kernel/events/core.c
14653
WARN_ON_ONCE(task->perf_event_ctxp);
kernel/events/core.c
15339
struct task_struct *task = info;
kernel/events/core.c
15342
perf_cgroup_switch(task);
kernel/events/core.c
15350
struct task_struct *task;
kernel/events/core.c
15353
cgroup_taskset_for_each(task, css, tset)
kernel/events/core.c
15354
task_function_call(task, __perf_cgroup_move, task);
kernel/events/core.c
1569
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
kernel/events/core.c
1585
ctx = rcu_dereference(task->perf_event_ctxp);
kernel/events/core.c
1598
if (ctx != rcu_dereference(task->perf_event_ctxp)) {
kernel/events/core.c
1605
if (ctx->task == TASK_TOMBSTONE ||
kernel/events/core.c
1610
WARN_ON_ONCE(ctx->task != task);
kernel/events/core.c
1625
perf_pin_task_context(struct task_struct *task)
kernel/events/core.c
1630
ctx = perf_lock_task_context(task, &flags);
kernel/events/core.c
1716
if (!ctx->task)
kernel/events/core.c
2610
if (ctx->task && ctx->is_active) {
kernel/events/core.c
2623
if (ctx->task) {
kernel/events/core.c
296
if (ctx->task) {
kernel/events/core.c
297
if (ctx->task != current) {
kernel/events/core.c
3061
if (ctx->task) {
kernel/events/core.c
3065
reprogram = (ctx->task == current);
kernel/events/core.c
3074
if (task_curr(ctx->task) && !reprogram) {
kernel/events/core.c
3124
struct task_struct *task = READ_ONCE(ctx->task);
kernel/events/core.c
3150
if (ctx->task == TASK_TOMBSTONE) {
kernel/events/core.c
3159
if (!task) {
kernel/events/core.c
3167
if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
kernel/events/core.c
3202
if (!task_function_call(task, __perf_install_in_context, event))
kernel/events/core.c
3206
task = ctx->task;
kernel/events/core.c
3207
if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
kernel/events/core.c
3220
if (task_curr(task)) {
kernel/events/core.c
3262
if (ctx->task)
kernel/events/core.c
329
struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
kernel/events/core.c
346
if (!task) {
kernel/events/core.c
351
if (task == TASK_TOMBSTONE)
kernel/events/core.c
3533
if (ctx->task && !(ctx->is_active & EVENT_ALL)) {
kernel/events/core.c
355
if (!task_function_call(task, event_function, &efs))
kernel/events/core.c
3591
if (ctx->task)
kernel/events/core.c
3626
if (ctx->task) {
kernel/events/core.c
365
task = ctx->task;
kernel/events/core.c
366
if (task == TASK_TOMBSTONE)
kernel/events/core.c
3751
struct task_struct *task, bool sched_in)
kernel/events/core.c
3760
pmu_ctx->pmu->sched_task(pmu_ctx, task, sched_in);
kernel/events/core.c
3765
perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
kernel/events/core.c
3767
struct perf_event_context *ctx = task->perf_event_ctxp;
kernel/events/core.c
3820
WRITE_ONCE(ctx->task, next);
kernel/events/core.c
3821
WRITE_ONCE(next_ctx->task, task);
kernel/events/core.c
3823
perf_ctx_sched_task_cb(ctx, task, false);
kernel/events/core.c
3834
RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx);
kernel/events/core.c
3852
perf_ctx_sched_task_cb(ctx, task, false);
kernel/events/core.c
387
struct task_struct *task = READ_ONCE(ctx->task);
kernel/events/core.c
3895
struct task_struct *task, bool sched_in)
kernel/events/core.c
3909
pmu->sched_task(cpc->task_epc, task, sched_in);
kernel/events/core.c
392
if (task) {
kernel/events/core.c
393
if (task == TASK_TOMBSTONE)
kernel/events/core.c
3930
static void perf_event_switch(struct task_struct *task,
kernel/events/core.c
3944
void __perf_event_task_sched_out(struct task_struct *task,
kernel/events/core.c
3948
perf_pmu_sched_task(task, next, false);
kernel/events/core.c
3951
perf_event_switch(task, next, false);
kernel/events/core.c
3953
perf_event_context_sched_out(task, next);
kernel/events/core.c
3992
if (!pmu_ctx->ctx->task)
kernel/events/core.c
401
task = ctx->task;
kernel/events/core.c
4019
if (!ctx->task) {
kernel/events/core.c
402
if (task == TASK_TOMBSTONE)
kernel/events/core.c
405
if (task) {
kernel/events/core.c
412
if (WARN_ON_ONCE(task != current))
kernel/events/core.c
4206
if (ctx->task) {
kernel/events/core.c
4247
static void perf_event_context_sched_in(struct task_struct *task)
kernel/events/core.c
4253
ctx = rcu_dereference(task->perf_event_ctxp);
kernel/events/core.c
4261
perf_ctx_sched_task_cb(ctx, task, true);
kernel/events/core.c
4292
perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);
kernel/events/core.c
4317
struct task_struct *task)
kernel/events/core.c
4319
perf_event_context_sched_in(task);
kernel/events/core.c
4322
perf_event_switch(task, prev, true);
kernel/events/core.c
4325
perf_pmu_sched_task(prev, task, true);
kernel/events/core.c
4556
if (!pmu_ctx->ctx->task) {
kernel/events/core.c
4730
struct task_struct *task,
kernel/events/core.c
4746
if (WARN_ON_ONCE(ctx->task != current))
kernel/events/core.c
4758
perf_event_exit_event(event, ctx, ctx->task, false);
kernel/events/core.c
4825
if (ctx->task && cpuctx->task_ctx != ctx)
kernel/events/core.c
5055
alloc_perf_context(struct task_struct *task)
kernel/events/core.c
5064
if (task)
kernel/events/core.c
5065
ctx->task = get_task_struct(task);
kernel/events/core.c
5073
struct task_struct *task;
kernel/events/core.c
5077
task = current;
kernel/events/core.c
5079
task = find_task_by_vpid(vpid);
kernel/events/core.c
5080
if (task)
kernel/events/core.c
5081
get_task_struct(task);
kernel/events/core.c
5084
if (!task)
kernel/events/core.c
5087
return task;
kernel/events/core.c
5094
find_get_context(struct task_struct *task, struct perf_event *event)
kernel/events/core.c
5101
if (!task) {
kernel/events/core.c
5119
ctx = perf_lock_task_context(task, &flags);
kernel/events/core.c
5129
ctx = alloc_perf_context(task);
kernel/events/core.c
5135
mutex_lock(&task->perf_event_mutex);
kernel/events/core.c
5140
if (task->flags & PF_EXITING)
kernel/events/core.c
5142
else if (task->perf_event_ctxp)
kernel/events/core.c
5147
rcu_assign_pointer(task->perf_event_ctxp, ctx);
kernel/events/core.c
5149
mutex_unlock(&task->perf_event_mutex);
kernel/events/core.c
5172
if (!ctx->task) {
kernel/events/core.c
5333
attr->task || attr->ksymbol ||
kernel/events/core.c
5412
attach_task_ctx_data(struct task_struct *task, struct kmem_cache *ctx_cache,
kernel/events/core.c
5422
if (try_cmpxchg(&task->perf_ctx_data, &old, cd)) {
kernel/events/core.c
5431
if (task->flags & PF_EXITING) {
kernel/events/core.c
5433
if (try_cmpxchg(&task->perf_ctx_data, &cd, NULL))
kernel/events/core.c
5512
struct task_struct *task = event->hw.target;
kernel/events/core.c
5519
if (task)
kernel/events/core.c
5520
return attach_task_ctx_data(task, ctx_cache, false);
kernel/events/core.c
5588
struct task_struct *task = event->hw.target;
kernel/events/core.c
5592
if (task)
kernel/events/core.c
5593
return detach_task_ctx_data(task);
kernel/events/core.c
5620
if (event->attr.task)
kernel/events/core.c
7551
if (WARN_ON_ONCE(event->ctx->task != current))
kernel/events/core.c
8510
bool crosstask = event->ctx->task && event->ctx->task != current;
kernel/events/core.c
8856
struct task_struct *task)
kernel/events/core.c
8866
.pid = perf_event_pid(event, task),
kernel/events/core.c
8867
.tid = perf_event_tid(event, task),
kernel/events/core.c
9100
struct task_struct *task;
kernel/events/core.c
9118
event->attr.task;
kernel/events/core.c
9127
struct task_struct *task = task_event->task;
kernel/events/core.c
9140
task_event->event_id.pid = perf_event_pid(event, task);
kernel/events/core.c
9141
task_event->event_id.tid = perf_event_tid(event, task);
kernel/events/core.c
9145
task->real_parent);
kernel/events/core.c
9147
task->real_parent);
kernel/events/core.c
9164
static void perf_event_task(struct task_struct *task,
kernel/events/core.c
9176
.task = task,
kernel/events/core.c
9244
void perf_event_fork(struct task_struct *task)
kernel/events/core.c
9246
perf_event_task(task, NULL, 1);
kernel/events/core.c
9247
perf_event_namespaces(task);
kernel/events/core.c
9248
perf_event_alloc_task_data(task, current);
kernel/events/core.c
9256
struct task_struct *task;
kernel/events/core.c
9292
comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
kernel/events/core.c
9293
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
kernel/events/core.c
9312
strscpy(comm, comm_event->task->comm);
kernel/events/core.c
9325
void perf_event_comm(struct task_struct *task, bool exec)
kernel/events/core.c
9333
.task = task,
kernel/events/core.c
9355
struct task_struct *task;
kernel/events/core.c
9392
namespaces_event->task);
kernel/events/core.c
9394
namespaces_event->task);
kernel/events/core.c
9406
struct task_struct *task,
kernel/events/core.c
9413
error = ns_get_path(&ns_path, task, ns_ops);
kernel/events/core.c
9422
void perf_event_namespaces(struct task_struct *task)
kernel/events/core.c
9431
.task = task,
kernel/events/core.c
9448
task, &mntns_operations);
kernel/events/core.c
9452
task, &userns_operations);
kernel/events/core.c
9456
task, &netns_operations);
kernel/events/core.c
9460
task, &utsns_operations);
kernel/events/core.c
9464
task, &ipcns_operations);
kernel/events/core.c
9468
task, &pidns_operations);
kernel/events/core.c
9472
task, &cgroupns_operations);
kernel/events/core.c
9996
struct task_struct *task;
kernel/events/hw_breakpoint.c
808
if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
kernel/exit.c
234
void put_task_struct_rcu_user(struct task_struct *task)
kernel/exit.c
236
if (refcount_dec_and_test(&task->rcu_users))
kernel/exit.c
237
call_rcu(&task->rcu, delayed_put_task_struct);
kernel/exit.c
317
struct task_struct *task;
kernel/exit.c
334
task = rcu_dereference(w->task);
kernel/exit.c
335
if (task)
kernel/exit.c
336
ret = wake_up_process(task);
kernel/exit.c
429
self.task = tsk;
kernel/exit.c
430
if (self.task->flags & PF_SIGNALED)
kernel/exit.c
433
self.task = NULL;
kernel/exit.c
443
if (!self.task) /* see coredump_finish() */
kernel/fork.c
1340
struct file *get_task_exe_file(struct task_struct *task)
kernel/fork.c
1345
if (task->flags & PF_KTHREAD)
kernel/fork.c
1348
task_lock(task);
kernel/fork.c
1349
mm = task->mm;
kernel/fork.c
1352
task_unlock(task);
kernel/fork.c
1366
struct mm_struct *get_task_mm(struct task_struct *task)
kernel/fork.c
1370
if (task->flags & PF_KTHREAD)
kernel/fork.c
1373
task_lock(task);
kernel/fork.c
1374
mm = task->mm;
kernel/fork.c
1377
task_unlock(task);
kernel/fork.c
1382
static bool may_access_mm(struct mm_struct *mm, struct task_struct *task, unsigned int mode)
kernel/fork.c
1386
if (ptrace_may_access(task, mode))
kernel/fork.c
1393
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
kernel/fork.c
1398
err = down_read_killable(&task->signal->exec_update_lock);
kernel/fork.c
1402
mm = get_task_mm(task);
kernel/fork.c
1405
} else if (!may_access_mm(mm, task, mode)) {
kernel/fork.c
1409
up_read(&task->signal->exec_update_lock);
kernel/fork.c
1799
static inline void init_task_pid_links(struct task_struct *task)
kernel/fork.c
1804
INIT_HLIST_NODE(&task->pid_links[type]);
kernel/fork.c
1808
init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
kernel/fork.c
1811
task->thread_pid = pid;
kernel/fork.c
1813
task->signal->pids[type] = pid;
kernel/fork.c
2565
struct task_struct *task;
kernel/fork.c
2574
task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
kernel/fork.c
2575
if (!IS_ERR(task)) {
kernel/fork.c
2576
init_idle_pids(task);
kernel/fork.c
2577
init_idle(task, cpu);
kernel/fork.c
2580
return task;
kernel/fork.c
3255
struct task_struct *task = current;
kernel/fork.c
3263
old = task->files;
kernel/fork.c
3264
task_lock(task);
kernel/fork.c
3265
task->files = copy;
kernel/fork.c
3266
task_unlock(task);
kernel/fork.c
797
struct task_struct *task = container_of(rhp, struct task_struct, rcu);
kernel/fork.c
799
__put_task_struct(task);
kernel/futex/core.c
892
struct task_struct *task)
kernel/futex/core.c
908
q->task = task;
kernel/futex/futex.h
195
struct task_struct *task;
kernel/futex/futex.h
265
struct task_struct *task);
kernel/futex/futex.h
293
struct task_struct *task);
kernel/futex/futex.h
312
struct task_struct *task)
kernel/futex/futex.h
315
__futex_queue(q, hb, task);
kernel/futex/futex.h
368
struct task_struct *task,
kernel/futex/pi.c
518
struct task_struct *task,
kernel/futex/pi.c
522
u32 uval, newval, vpid = task_pid_vnr(task);
kernel/futex/pi.c
587
raw_spin_lock_irq(&task->pi_lock);
kernel/futex/pi.c
588
__attach_to_pi_owner(task, key, ps);
kernel/futex/pi.c
589
raw_spin_unlock_irq(&task->pi_lock);
kernel/futex/pi.c
624
new_owner = top_waiter->task;
kernel/futex/requeue.c
233
struct task_struct *task;
kernel/futex/requeue.c
247
task = READ_ONCE(q->task);
kernel/futex/requeue.c
251
wake_up_state(task, TASK_NORMAL);
kernel/futex/requeue.c
333
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
kernel/futex/requeue.c
645
this->task);
kernel/futex/requeue.c
732
if (timeout && !timeout->task)
kernel/futex/waitwake.c
136
struct task_struct *p = q->task;
kernel/futex/waitwake.c
357
if (!timeout || timeout->task)
kernel/futex/waitwake.c
511
if (to && !to->task)
kernel/futex/waitwake.c
562
if (to && !to->task)
kernel/futex/waitwake.c
593
struct task_struct *task)
kernel/futex/waitwake.c
658
if (task == current)
kernel/futex/waitwake.c
660
futex_queue(q, hb, task);
kernel/futex/waitwake.c
693
if (to && !to->task)
kernel/hung_task.c
137
static void debug_show_blocker(struct task_struct *task, unsigned long timeout)
kernel/hung_task.c
145
blocker = READ_ONCE(task->blocker);
kernel/hung_task.c
178
task->comm, task->pid);
kernel/hung_task.c
182
task->comm, task->pid);
kernel/hung_task.c
187
task->comm, task->pid);
kernel/hung_task.c
201
task->comm, task->pid, t->comm, t->pid);
kernel/hung_task.c
205
task->comm, task->pid, t->comm, t->pid);
kernel/hung_task.c
210
task->comm, task->pid, rwsem_blocked_as, t->comm,
kernel/hung_task.c
221
static inline void debug_show_blocker(struct task_struct *task, unsigned long timeout)
kernel/kcmp.c
62
get_file_raw_ptr(struct task_struct *task, unsigned int idx)
kernel/kcmp.c
66
file = fget_task(task, idx);
kernel/kcsan/core.c
358
void kcsan_save_irqtrace(struct task_struct *task)
kernel/kcsan/core.c
361
task->kcsan_save_irqtrace = task->irqtrace;
kernel/kcsan/core.c
365
void kcsan_restore_irqtrace(struct task_struct *task)
kernel/kcsan/core.c
368
task->irqtrace = task->kcsan_save_irqtrace;
kernel/kcsan/kcsan.h
31
void kcsan_save_irqtrace(struct task_struct *task);
kernel/kcsan/kcsan.h
32
void kcsan_restore_irqtrace(struct task_struct *task);
kernel/kcsan/report.c
377
static void print_verbose_info(struct task_struct *task)
kernel/kcsan/report.c
380
if (!task)
kernel/kcsan/report.c
384
kcsan_restore_irqtrace(task);
kernel/kcsan/report.c
387
debug_show_held_locks(task);
kernel/kcsan/report.c
388
print_irqtrace_events(task);
kernel/kcsan/report.c
459
print_verbose_info(other_info->task);
kernel/kcsan/report.c
536
other_info->task = current;
kernel/kcsan/report.c
561
other_info->task = NULL;
kernel/kcsan/report.c
569
other_info->task == current);
kernel/kcsan/report.c
64
struct task_struct *task;
kernel/kthread.c
1006
worker->task = NULL;
kernel/kthread.c
1053
struct task_struct *task;
kernel/kthread.c
1061
task = __kthread_create_on_node(kthread_worker_fn, worker,
kernel/kthread.c
1063
if (IS_ERR(task))
kernel/kthread.c
1067
worker->task = task;
kernel/kthread.c
1073
return ERR_CAST(task);
kernel/kthread.c
1144
kthread_bind(worker->task, cpu);
kernel/kthread.c
116
kthread->task = p;
kernel/kthread.c
1183
if (!worker->current_work && likely(worker->task))
kernel/kthread.c
1184
wake_up_process(worker->task);
kernel/kthread.c
1597
struct task_struct *task;
kernel/kthread.c
1599
task = worker->task;
kernel/kthread.c
1600
if (WARN_ON(!task))
kernel/kthread.c
1604
kthread_stop(task);
kernel/kthread.c
217
void *kthread_func(struct task_struct *task)
kernel/kthread.c
219
struct kthread *kthread = tsk_is_kthread(task);
kernel/kthread.c
234
void *kthread_data(struct task_struct *task)
kernel/kthread.c
236
return to_kthread(task)->data;
kernel/kthread.c
249
void *kthread_probe_data(struct task_struct *task)
kernel/kthread.c
251
struct kthread *kthread = tsk_is_kthread(task);
kernel/kthread.c
482
struct task_struct *task;
kernel/kthread.c
493
task = ERR_PTR(-ENOMEM);
kernel/kthread.c
521
task = create->result;
kernel/kthread.c
524
return task;
kernel/kthread.c
555
struct task_struct *task;
kernel/kthread.c
559
task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
kernel/kthread.c
562
return task;
kernel/kthread.c
71
struct task_struct *task;
kernel/kthread.c
895
if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
kernel/kthread.c
896
kthread_is_per_cpu(k->task))) {
kernel/kthread.c
915
set_cpus_allowed_ptr(k->task, affinity);
kernel/kthread.c
994
WARN_ON(worker->task && worker->task != current);
kernel/kthread.c
995
worker->task = current;
kernel/livepatch/transition.c
122
for_each_process_thread(g, task) {
kernel/livepatch/transition.c
123
WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
kernel/livepatch/transition.c
124
task->patch_state = KLP_TRANSITION_IDLE;
kernel/livepatch/transition.c
129
task = idle_task(cpu);
kernel/livepatch/transition.c
130
WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
kernel/livepatch/transition.c
131
task->patch_state = KLP_TRANSITION_IDLE;
kernel/livepatch/transition.c
175
void klp_update_patch_state(struct task_struct *task)
kernel/livepatch/transition.c
195
if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
kernel/livepatch/transition.c
196
task->patch_state = READ_ONCE(klp_target_state);
kernel/livepatch/transition.c
254
static int klp_check_stack(struct task_struct *task, const char **oldname)
kernel/livepatch/transition.c
264
ret = stack_trace_save_tsk_reliable(task, entries, MAX_STACK_ENTRIES);
kernel/livepatch/transition.c
284
static int klp_check_and_switch_task(struct task_struct *task, void *arg)
kernel/livepatch/transition.c
288
if (task_curr(task) && task != current)
kernel/livepatch/transition.c
291
ret = klp_check_stack(task, arg);
kernel/livepatch/transition.c
295
clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
kernel/livepatch/transition.c
296
task->patch_state = klp_target_state;
kernel/livepatch/transition.c
305
static bool klp_try_switch_task(struct task_struct *task)
kernel/livepatch/transition.c
311
if (task->patch_state == klp_target_state)
kernel/livepatch/transition.c
326
if (task == current)
kernel/livepatch/transition.c
329
ret = task_call_func(task, klp_check_and_switch_task, &old_name);
kernel/livepatch/transition.c
337
__func__, task->comm, task->pid);
kernel/livepatch/transition.c
341
__func__, task->comm, task->pid);
kernel/livepatch/transition.c
345
__func__, task->comm, task->pid, old_name);
kernel/livepatch/transition.c
350
__func__, ret, task->comm, task->pid);
kernel/livepatch/transition.c
389
struct task_struct *g, *task;
kernel/livepatch/transition.c
395
for_each_process_thread(g, task) {
kernel/livepatch/transition.c
396
if (!klp_patch_pending(task))
kernel/livepatch/transition.c
405
if (task->flags & PF_KTHREAD) {
kernel/livepatch/transition.c
410
wake_up_state(task, TASK_INTERRUPTIBLE);
kernel/livepatch/transition.c
416
set_notify_signal(task);
kernel/livepatch/transition.c
433
struct task_struct *g, *task;
kernel/livepatch/transition.c
449
for_each_process_thread(g, task)
kernel/livepatch/transition.c
450
if (!klp_try_switch_task(task))
kernel/livepatch/transition.c
459
task = idle_task(cpu);
kernel/livepatch/transition.c
461
if (!klp_try_switch_task(task)) {
kernel/livepatch/transition.c
466
} else if (task->patch_state != klp_target_state) {
kernel/livepatch/transition.c
468
clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
kernel/livepatch/transition.c
469
task->patch_state = klp_target_state;
kernel/livepatch/transition.c
511
struct task_struct *g, *task;
kernel/livepatch/transition.c
526
for_each_process_thread(g, task)
kernel/livepatch/transition.c
527
if (task->patch_state != klp_target_state)
kernel/livepatch/transition.c
528
set_tsk_thread_flag(task, TIF_PATCH_PENDING);
kernel/livepatch/transition.c
537
task = idle_task(cpu);
kernel/livepatch/transition.c
538
if (task->patch_state != klp_target_state)
kernel/livepatch/transition.c
539
set_tsk_thread_flag(task, TIF_PATCH_PENDING);
kernel/livepatch/transition.c
554
struct task_struct *g, *task;
kernel/livepatch/transition.c
578
for_each_process_thread(g, task) {
kernel/livepatch/transition.c
579
WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE);
kernel/livepatch/transition.c
580
task->patch_state = initial_state;
kernel/livepatch/transition.c
588
task = idle_task(cpu);
kernel/livepatch/transition.c
589
WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE);
kernel/livepatch/transition.c
590
task->patch_state = initial_state;
kernel/livepatch/transition.c
630
struct task_struct *g, *task;
kernel/livepatch/transition.c
643
for_each_process_thread(g, task)
kernel/livepatch/transition.c
644
clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
kernel/livepatch/transition.c
709
struct task_struct *g, *task;
kernel/livepatch/transition.c
715
for_each_process_thread(g, task)
kernel/livepatch/transition.c
716
klp_update_patch_state(task);
kernel/livepatch/transition.c
85
struct task_struct *g, *task;
kernel/locking/lockdep.c
456
void lockdep_init_task(struct task_struct *task)
kernel/locking/lockdep.c
458
task->lockdep_depth = 0; /* no locks held yet */
kernel/locking/lockdep.c
459
task->curr_chain_key = INITIAL_CHAIN_KEY;
kernel/locking/lockdep.c
460
task->lockdep_recursion = 0;
kernel/locking/lockdep.c
4680
static inline unsigned int task_irq_context(struct task_struct *task)
kernel/locking/lockdep.c
4683
LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
kernel/locking/lockdep.c
474
void lockdep_set_selftest_task(struct task_struct *task)
kernel/locking/lockdep.c
476
lockdep_selftest_task_struct = task;
kernel/locking/lockdep.c
4915
static inline unsigned int task_irq_context(struct task_struct *task)
kernel/locking/lockdep.c
6795
void debug_show_held_locks(struct task_struct *task)
kernel/locking/lockdep.c
6801
lockdep_print_held_locks(task);
kernel/locking/mutex-debug.c
52
struct task_struct *task)
kernel/locking/mutex-debug.c
57
DEBUG_LOCKS_WARN_ON(__get_task_blocked_on(task));
kernel/locking/mutex-debug.c
61
struct task_struct *task)
kernel/locking/mutex-debug.c
63
struct mutex *blocked_on = __get_task_blocked_on(task);
kernel/locking/mutex-debug.c
66
DEBUG_LOCKS_WARN_ON(waiter->task != task);
kernel/locking/mutex-debug.c
70
waiter->task = NULL;
kernel/locking/mutex.c
107
task = curr;
kernel/locking/mutex.c
110
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
kernel/locking/mutex.c
111
if (task == curr)
kernel/locking/mutex.c
235
static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
kernel/locking/mutex.c
246
new |= (unsigned long)task;
kernel/locking/mutex.c
247
if (task)
kernel/locking/mutex.c
640
waiter.task = current;
kernel/locking/mutex.c
91
unsigned long task = owner & ~MUTEX_FLAGS;
kernel/locking/mutex.c
93
if (task) {
kernel/locking/mutex.c
95
if (task != curr)
kernel/locking/mutex.c
971
next = waiter->task;
kernel/locking/mutex.h
16
struct task_struct *task;
kernel/locking/mutex.h
58
struct task_struct *task);
kernel/locking/mutex.h
60
struct task_struct *task);
kernel/locking/rtmutex.c
1000
waiter_clone_prio(waiter, task);
kernel/locking/rtmutex.c
1001
rt_mutex_enqueue_pi(task, waiter);
kernel/locking/rtmutex.c
1002
rt_mutex_adjust_prio(lock, task);
kernel/locking/rtmutex.c
1015
rt_mutex_dequeue_pi(task, waiter);
kernel/locking/rtmutex.c
1017
waiter_clone_prio(waiter, task);
kernel/locking/rtmutex.c
1018
rt_mutex_enqueue_pi(task, waiter);
kernel/locking/rtmutex.c
1019
rt_mutex_adjust_prio(lock, task);
kernel/locking/rtmutex.c
1037
next_lock = task_blocked_on_lock(task);
kernel/locking/rtmutex.c
1045
raw_spin_unlock(&task->pi_lock);
kernel/locking/rtmutex.c
1069
raw_spin_unlock_irq(&task->pi_lock);
kernel/locking/rtmutex.c
1071
put_task_struct(task);
kernel/locking/rtmutex.c
1087
try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
kernel/locking/rtmutex.c
1149
if (!rt_mutex_steal(task_to_waiter(task),
kernel/locking/rtmutex.c
1175
raw_spin_lock(&task->pi_lock);
kernel/locking/rtmutex.c
1176
task->pi_blocked_on = NULL;
kernel/locking/rtmutex.c
1183
rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
kernel/locking/rtmutex.c
1184
raw_spin_unlock(&task->pi_lock);
kernel/locking/rtmutex.c
1191
rt_mutex_set_owner(lock, task);
kernel/locking/rtmutex.c
1205
struct task_struct *task,
kernel/locking/rtmutex.c
1229
if (owner == task && !(build_ww_mutex() && ww_ctx))
kernel/locking/rtmutex.c
1232
raw_spin_lock(&task->pi_lock);
kernel/locking/rtmutex.c
1233
waiter->task = task;
kernel/locking/rtmutex.c
1235
waiter_update_prio(waiter, task);
kernel/locking/rtmutex.c
1236
waiter_clone_prio(waiter, task);
kernel/locking/rtmutex.c
1243
task->pi_blocked_on = waiter;
kernel/locking/rtmutex.c
1245
raw_spin_unlock(&task->pi_lock);
kernel/locking/rtmutex.c
1254
raw_spin_lock(&task->pi_lock);
kernel/locking/rtmutex.c
1256
task->pi_blocked_on = NULL;
kernel/locking/rtmutex.c
1257
raw_spin_unlock(&task->pi_lock);
kernel/locking/rtmutex.c
1299
next_lock, waiter, task);
kernel/locking/rtmutex.c
1624
if (timeout && !timeout->task) {
kernel/locking/rtmutex.c
349
static __always_inline int __waiter_prio(struct task_struct *task)
kernel/locking/rtmutex.c
351
int prio = task->prio;
kernel/locking/rtmutex.c
363
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
kernel/locking/rtmutex.c
368
waiter->tree.prio = __waiter_prio(task);
kernel/locking/rtmutex.c
369
waiter->tree.deadline = task->dl.deadline;
kernel/locking/rtmutex.c
376
waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
kernel/locking/rtmutex.c
379
lockdep_assert_held(&task->pi_lock);
kernel/locking/rtmutex.c
508
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
kernel/locking/rtmutex.c
510
lockdep_assert_held(&task->pi_lock);
kernel/locking/rtmutex.c
512
rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less);
kernel/locking/rtmutex.c
516
rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
kernel/locking/rtmutex.c
518
lockdep_assert_held(&task->pi_lock);
kernel/locking/rtmutex.c
523
rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters);
kernel/locking/rtmutex.c
537
pi_task = task_top_pi_waiter(p)->task;
kernel/locking/rtmutex.c
544
struct task_struct *task,
kernel/locking/rtmutex.c
550
get_task_struct(task);
kernel/locking/rtmutex.c
551
wqh->rtlock_task = task;
kernel/locking/rtmutex.c
553
wake_q_add(&wqh->head, task);
kernel/locking/rtmutex.c
560
rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state);
kernel/locking/rtmutex.c
678
static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
kernel/locking/rtmutex.c
717
put_task_struct(task);
kernel/locking/rtmutex.c
732
raw_spin_lock_irq(&task->pi_lock);
kernel/locking/rtmutex.c
737
waiter = task->pi_blocked_on;
kernel/locking/rtmutex.c
801
if (!task_has_pi_waiters(task))
kernel/locking/rtmutex.c
809
if (top_waiter != task_top_pi_waiter(task)) {
kernel/locking/rtmutex.c
824
if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
kernel/locking/rtmutex.c
846
raw_spin_unlock_irq(&task->pi_lock);
kernel/locking/rtmutex.c
889
raw_spin_unlock(&task->pi_lock);
kernel/locking/rtmutex.c
890
put_task_struct(task);
kernel/locking/rtmutex.c
902
task = get_task_struct(rt_mutex_owner(lock));
kernel/locking/rtmutex.c
903
raw_spin_lock(&task->pi_lock);
kernel/locking/rtmutex.c
911
next_lock = task_blocked_on_lock(task);
kernel/locking/rtmutex.c
918
raw_spin_unlock(&task->pi_lock);
kernel/locking/rtmutex.c
948
waiter_update_prio(waiter, task);
kernel/locking/rtmutex.c
959
raw_spin_unlock(&task->pi_lock);
kernel/locking/rtmutex.c
960
put_task_struct(task);
kernel/locking/rtmutex.c
977
wake_up_state(top_waiter->task, top_waiter->wake_state);
kernel/locking/rtmutex.c
988
task = get_task_struct(rt_mutex_owner(lock));
kernel/locking/rtmutex.c
989
raw_spin_lock(&task->pi_lock);
kernel/locking/rtmutex.c
999
rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
kernel/locking/rtmutex_api.c
313
struct task_struct *task,
kernel/locking/rtmutex_api.c
320
if (try_to_take_rt_mutex(lock, task, NULL))
kernel/locking/rtmutex_api.c
324
ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
kernel/locking/rtmutex_api.c
361
struct task_struct *task)
kernel/locking/rtmutex_api.c
367
ret = __rt_mutex_start_proxy_lock(lock, waiter, task, &wake_q);
kernel/locking/rtmutex_api.c
477
void __sched rt_mutex_adjust_pi(struct task_struct *task)
kernel/locking/rtmutex_api.c
483
raw_spin_lock_irqsave(&task->pi_lock, flags);
kernel/locking/rtmutex_api.c
485
waiter = task->pi_blocked_on;
kernel/locking/rtmutex_api.c
486
if (!waiter || rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
kernel/locking/rtmutex_api.c
487
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
kernel/locking/rtmutex_api.c
491
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
kernel/locking/rtmutex_api.c
494
get_task_struct(task);
kernel/locking/rtmutex_api.c
496
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
kernel/locking/rtmutex_api.c
497
next_lock, NULL, task);
kernel/locking/rtmutex_api.c
509
void rt_mutex_debug_task_free(struct task_struct *task)
kernel/locking/rtmutex_api.c
511
DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
kernel/locking/rtmutex_api.c
512
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
kernel/locking/rtmutex_common.h
209
waiter->task = NULL;
kernel/locking/rtmutex_common.h
55
struct task_struct *task;
kernel/locking/rtmutex_common.h
86
struct task_struct *task,
kernel/locking/rtmutex_common.h
90
struct task_struct *task);
kernel/locking/rwsem.c
1032
waiter.task = current;
kernel/locking/rwsem.c
1074
if (!smp_load_acquire(&waiter.task)) {
kernel/locking/rwsem.c
1080
if (waiter.task)
kernel/locking/rwsem.c
1126
waiter.task = current;
kernel/locking/rwsem.c
339
struct task_struct *task;
kernel/locking/rwsem.c
435
wake_q_add(wake_q, waiter->task);
kernel/locking/rwsem.c
481
owner = waiter->task;
kernel/locking/rwsem.c
551
tsk = waiter->task;
kernel/locking/rwsem.c
560
smp_store_release(&waiter->task, NULL);
kernel/locking/rwsem.c
633
if (has_handoff || (!rt_or_dl_task(waiter->task) &&
kernel/locking/semaphore.c
243
struct task_struct *task;
kernel/locking/semaphore.c
258
waiter.task = current;
kernel/locking/semaphore.c
328
wake_q_add(wake_q, waiter->task);
kernel/locking/ww_mutex.h
237
int a_prio = a->task->prio;
kernel/locking/ww_mutex.h
238
int b_prio = b->task->prio;
kernel/locking/ww_mutex.h
251
if (dl_time_before(b->task->dl.deadline,
kernel/locking/ww_mutex.h
252
a->task->dl.deadline))
kernel/locking/ww_mutex.h
255
if (dl_time_before(a->task->dl.deadline,
kernel/locking/ww_mutex.h
256
b->task->dl.deadline))
kernel/locking/ww_mutex.h
292
__clear_task_blocked_on(waiter->task, lock);
kernel/locking/ww_mutex.h
293
wake_q_add(wake_q, waiter->task);
kernel/pid.c
373
static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
kernel/pid.c
376
&task->thread_pid :
kernel/pid.c
377
&task->signal->pids[type];
kernel/pid.c
383
void attach_pid(struct task_struct *task, enum pid_type type)
kernel/pid.c
389
pid = *task_pid_ptr(task, type);
kernel/pid.c
390
hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
kernel/pid.c
393
static void __change_pid(struct pid **pids, struct task_struct *task,
kernel/pid.c
401
pid_ptr = task_pid_ptr(task, type);
kernel/pid.c
404
hlist_del_rcu(&task->pid_links[type]);
kernel/pid.c
415
void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type)
kernel/pid.c
417
__change_pid(pids, task, type, NULL);
kernel/pid.c
420
void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type,
kernel/pid.c
423
__change_pid(pids, task, type, pid);
kernel/pid.c
424
attach_pid(task, type);
kernel/pid.c
488
struct task_struct *task;
kernel/pid.c
491
task = find_task_by_vpid(nr);
kernel/pid.c
492
if (task)
kernel/pid.c
493
get_task_struct(task);
kernel/pid.c
496
return task;
kernel/pid.c
499
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
kernel/pid.c
503
pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
kernel/pid.c
553
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
kernel/pid.c
561
nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
kernel/pid.c
617
struct task_struct *task;
kernel/pid.c
637
task = get_pid_task(pid, type);
kernel/pid.c
639
if (!task)
kernel/pid.c
643
return task;
kernel/pid.c
872
static struct file *__pidfd_fget(struct task_struct *task, int fd)
kernel/pid.c
877
ret = down_read_killable(&task->signal->exec_update_lock);
kernel/pid.c
881
if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
kernel/pid.c
882
file = fget_task(task, fd);
kernel/pid.c
886
up_read(&task->signal->exec_update_lock);
kernel/pid.c
901
if (task->flags & PF_EXITING)
kernel/pid.c
912
struct task_struct *task;
kernel/pid.c
916
task = get_pid_task(pid, PIDTYPE_PID);
kernel/pid.c
917
if (!task)
kernel/pid.c
920
file = __pidfd_fget(task, fd);
kernel/pid.c
921
put_task_struct(task);
kernel/pid_namespace.c
196
struct task_struct *task, *me = current;
kernel/pid_namespace.c
229
task = pid_task(pid, PIDTYPE_PID);
kernel/pid_namespace.c
230
if (task && !__fatal_signal_pending(task))
kernel/pid_namespace.c
231
group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX);
kernel/pid_namespace.c
348
static struct ns_common *pidns_get(struct task_struct *task)
kernel/pid_namespace.c
353
ns = task_active_pid_ns(task);
kernel/pid_namespace.c
361
static struct ns_common *pidns_for_children_get(struct task_struct *task)
kernel/pid_namespace.c
365
task_lock(task);
kernel/pid_namespace.c
366
if (task->nsproxy) {
kernel/pid_namespace.c
367
ns = task->nsproxy->pid_ns_for_children;
kernel/pid_namespace.c
370
task_unlock(task);
kernel/printk/printk_ringbuffer_kunit_test.c
173
struct task_struct *task;
kernel/printk/printk_ringbuffer_kunit_test.c
180
set_tsk_thread_flag(wakeup->task, TIF_NOTIFY_SIGNAL);
kernel/printk/printk_ringbuffer_kunit_test.c
181
wake_up_process(wakeup->task);
kernel/printk/printk_ringbuffer_kunit_test.c
193
wakeup.task = current;
kernel/ptrace.c
163
static bool looks_like_a_spurious_pid(struct task_struct *task)
kernel/ptrace.c
165
if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
kernel/ptrace.c
168
if (task_pid_vnr(task) == task->ptrace_message)
kernel/ptrace.c
184
static bool ptrace_freeze_traced(struct task_struct *task)
kernel/ptrace.c
189
if (task->jobctl & JOBCTL_LISTENING)
kernel/ptrace.c
192
spin_lock_irq(&task->sighand->siglock);
kernel/ptrace.c
193
if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
kernel/ptrace.c
194
!__fatal_signal_pending(task)) {
kernel/ptrace.c
195
task->jobctl |= JOBCTL_PTRACE_FROZEN;
kernel/ptrace.c
198
spin_unlock_irq(&task->sighand->siglock);
kernel/ptrace.c
203
static void ptrace_unfreeze_traced(struct task_struct *task)
kernel/ptrace.c
212
if (lock_task_sighand(task, &flags)) {
kernel/ptrace.c
213
task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
kernel/ptrace.c
214
if (__fatal_signal_pending(task)) {
kernel/ptrace.c
215
task->jobctl &= ~JOBCTL_TRACED;
kernel/ptrace.c
216
wake_up_state(task, __TASK_TRACED);
kernel/ptrace.c
218
unlock_task_sighand(task, &flags);
kernel/ptrace.c
276
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
kernel/ptrace.c
298
if (same_thread_group(task, current))
kernel/ptrace.c
316
tcred = __task_cred(task);
kernel/ptrace.c
340
mm = task->mm;
kernel/ptrace.c
346
return security_ptrace_access_check(task, mode);
kernel/ptrace.c
349
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
kernel/ptrace.c
352
task_lock(task);
kernel/ptrace.c
353
err = __ptrace_may_access(task, mode);
kernel/ptrace.c
354
task_unlock(task);
kernel/ptrace.c
378
static inline void ptrace_set_stopped(struct task_struct *task, bool seize)
kernel/ptrace.c
380
guard(spinlock)(&task->sighand->siglock);
kernel/ptrace.c
384
send_signal_locked(SIGSTOP, SEND_SIG_PRIV, task, PIDTYPE_PID);
kernel/ptrace.c
402
if (task_is_stopped(task) &&
kernel/ptrace.c
403
task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
kernel/ptrace.c
404
task->jobctl &= ~JOBCTL_STOPPED;
kernel/ptrace.c
405
signal_wake_up_state(task, __TASK_STOPPED);
kernel/ptrace.c
409
static int ptrace_attach(struct task_struct *task, long request,
kernel/ptrace.c
435
audit_ptrace(task);
kernel/ptrace.c
437
if (unlikely(task->flags & PF_KTHREAD))
kernel/ptrace.c
439
if (same_thread_group(task, current))
kernel/ptrace.c
448
&task->signal->cred_guard_mutex) {
kernel/ptrace.c
450
scoped_guard (task_lock, task) {
kernel/ptrace.c
451
retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
kernel/ptrace.c
457
if (unlikely(task->exit_state))
kernel/ptrace.c
459
if (task->ptrace)
kernel/ptrace.c
462
task->ptrace = flags;
kernel/ptrace.c
463
ptrace_link(task, current);
kernel/ptrace.c
464
ptrace_set_stopped(task, seize);
kernel/ptrace.c
475
wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
kernel/ptrace.c
476
proc_ptrace_connector(task, PTRACE_ATTACH);
kernel/ptrace.c
792
static long ptrace_get_rseq_configuration(struct task_struct *task,
kernel/ptrace.c
796
.rseq_abi_pointer = (u64)(uintptr_t)task->rseq.usrptr,
kernel/ptrace.c
797
.rseq_abi_size = task->rseq.len,
kernel/ptrace.c
798
.signature = task->rseq.sig,
kernel/ptrace.c
888
static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
kernel/ptrace.c
891
const struct user_regset_view *view = task_user_regset_view(task);
kernel/ptrace.c
903
return copy_regset_to_user(task, view, regset_no, 0,
kernel/ptrace.c
906
return copy_regset_from_user(task, view, regset_no, 0,
kernel/rcu/refscale.c
106
struct task_struct *task;
kernel/rcu/refscale.c
1486
reader_tasks[i].task);
kernel/rcu/refscale.c
1590
reader_tasks[i].task);
kernel/rcu/tree.c
4198
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
kernel/rcu/tree.c
4200
rcu_thread_affine_rnp(kworker->task, rnp);
kernel/rcu/tree.c
4201
wake_up_process(kworker->task);
kernel/rcu/tree.c
4217
sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
kernel/rcu/update.c
452
rcu_assign_pointer(w->task, NULL);
kernel/scftorture.c
554
torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task);
kernel/scftorture.c
677
scf_stats_p[i].task);
kernel/scftorture.c
82
struct task_struct *task;
kernel/sched/core.c
1018
static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
kernel/sched/core.c
1020
struct wake_q_node *node = &task->wake_q;
kernel/sched/core.c
1054
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
kernel/sched/core.c
1056
if (__wake_q_add(head, task))
kernel/sched/core.c
1057
get_task_struct(task);
kernel/sched/core.c
1077
void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
kernel/sched/core.c
1079
if (!__wake_q_add(head, task))
kernel/sched/core.c
1080
put_task_struct(task);
kernel/sched/core.c
1088
struct task_struct *task;
kernel/sched/core.c
1090
task = container_of(node, struct task_struct, wake_q);
kernel/sched/core.c
1093
WRITE_ONCE(task->wake_q.next, NULL);
kernel/sched/core.c
1100
wake_up_process(task);
kernel/sched/core.c
1101
put_task_struct(task);
kernel/sched/core.c
2469
struct task_struct *task;
kernel/sched/core.c
2517
struct task_struct *p = arg->task;
kernel/sched/core.c
2916
.task = p,
kernel/sched/core.c
9265
struct task_struct *task;
kernel/sched/core.c
9271
cgroup_taskset_for_each(task, css, tset) {
kernel/sched/core.c
9272
if (!sched_rt_can_attach(css_tg(css), task))
kernel/sched/core.c
9282
struct task_struct *task;
kernel/sched/core.c
9285
cgroup_taskset_for_each(task, css, tset)
kernel/sched/core.c
9286
sched_move_task(task, false);
kernel/sched/core_sched.c
135
struct task_struct *task, *p;
kernel/sched/core_sched.c
152
task = current;
kernel/sched/core_sched.c
154
task = find_task_by_vpid(pid);
kernel/sched/core_sched.c
155
if (!task) {
kernel/sched/core_sched.c
160
get_task_struct(task);
kernel/sched/core_sched.c
167
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
kernel/sched/core_sched.c
178
cookie = sched_core_clone_cookie(task);
kernel/sched/core_sched.c
203
cookie = sched_core_clone_cookie(task);
kernel/sched/core_sched.c
213
__sched_core_set(task, cookie);
kernel/sched/core_sched.c
218
grp = task_pid_type(task, type);
kernel/sched/core_sched.c
235
put_task_struct(task);
kernel/sched/deadline.c
2404
static int find_later_rq(struct task_struct *task);
kernel/sched/deadline.c
2715
static int find_later_rq(struct task_struct *task)
kernel/sched/deadline.c
2720
int cpu = task_cpu(task);
kernel/sched/deadline.c
2726
if (task->nr_cpus_allowed == 1)
kernel/sched/deadline.c
2733
if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
kernel/sched/deadline.c
2822
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
kernel/sched/deadline.c
2829
cpu = find_later_rq(task);
kernel/sched/deadline.c
2836
if (!dl_task_is_earliest_deadline(task, later_rq)) {
kernel/sched/deadline.c
2869
if (unlikely(is_migration_disabled(task) ||
kernel/sched/deadline.c
2870
!cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
kernel/sched/deadline.c
2871
(task->dl.dl_throttled &&
kernel/sched/deadline.c
2872
(task_rq(task) != rq ||
kernel/sched/deadline.c
2873
task_on_cpu(rq, task) ||
kernel/sched/deadline.c
2874
!dl_task(task) ||
kernel/sched/deadline.c
2875
!task_on_rq_queued(task))) ||
kernel/sched/deadline.c
2876
(!task->dl.dl_throttled &&
kernel/sched/deadline.c
2877
task != pick_next_pushable_dl_task(rq)))) {
kernel/sched/deadline.c
2890
if (dl_task_is_earliest_deadline(task, later_rq))
kernel/sched/deadline.c
2941
struct task_struct *task;
kernel/sched/deadline.c
2948
task = pick_next_pushable_dl_task(rq);
kernel/sched/deadline.c
2949
if (task == next_task) {
kernel/sched/deadline.c
2957
if (!task)
kernel/sched/deadline.c
2962
next_task = task;
kernel/sched/deadline.c
643
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
kernel/sched/ext.c
111
struct task_struct *task;
kernel/sched/ext.c
2153
finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
kernel/sched/ext.c
2411
.task = next,
kernel/sched/ext.c
355
#define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \
kernel/sched/ext.c
358
current->scx.kf_tasks[0] = task; \
kernel/sched/ext.c
359
SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
kernel/sched/ext.c
363
#define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \
kernel/sched/ext.c
365
__typeof__((sch)->ops.op(task, ##args)) __ret; \
kernel/sched/ext.c
367
current->scx.kf_tasks[0] = task; \
kernel/sched/ext.c
368
__ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
kernel/sched/ext.c
4967
sched_set_fifo(sch->helper->task);
kernel/sched/ext.c
5344
sched_set_fifo(w->task);
kernel/sched/ext.c
5918
.task = p,
kernel/sched/ext_internal.h
250
struct task_struct *task;
kernel/sched/fair.c
13933
static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
kernel/sched/fair.c
13935
struct sched_entity *se = &task->se;
kernel/sched/fair.c
1759
int lim_dist, bool task)
kernel/sched/fair.c
1799
if (task)
kernel/sched/psi.c
1155
void cgroup_move_task(struct task_struct *task, struct css_set *to)
kernel/sched/psi.c
1166
rcu_assign_pointer(task->cgroups, to);
kernel/sched/psi.c
1170
rq = task_rq_lock(task, &rf);
kernel/sched/psi.c
1196
task_flags = task->psi_flags;
kernel/sched/psi.c
1199
psi_task_change(task, task_flags, 0);
kernel/sched/psi.c
1202
rcu_assign_pointer(task->cgroups, to);
kernel/sched/psi.c
1205
psi_task_change(task, 0, task_flags);
kernel/sched/psi.c
1207
task_rq_unlock(rq, task, &rf);
kernel/sched/psi.c
1366
struct task_struct *task;
kernel/sched/psi.c
1368
task = kthread_create(psi_rtpoll_worker, group, "psimon");
kernel/sched/psi.c
1369
if (IS_ERR(task)) {
kernel/sched/psi.c
1372
return ERR_CAST(task);
kernel/sched/psi.c
1375
wake_up_process(task);
kernel/sched/psi.c
1376
rcu_assign_pointer(group->rtpoll_task, task);
kernel/sched/psi.c
629
struct task_struct *task;
kernel/sched/psi.c
640
task = rcu_dereference(group->rtpoll_task);
kernel/sched/psi.c
645
if (likely(task))
kernel/sched/psi.c
885
static inline struct psi_group *task_psi_group(struct task_struct *task)
kernel/sched/psi.c
889
return cgroup_psi(task_dfl_cgroup(task));
kernel/sched/psi.c
894
static void psi_flags_change(struct task_struct *task, int clear, int set)
kernel/sched/psi.c
896
if (((task->psi_flags & set) ||
kernel/sched/psi.c
897
(task->psi_flags & clear) != clear) &&
kernel/sched/psi.c
900
task->pid, task->comm, task_cpu(task),
kernel/sched/psi.c
901
task->psi_flags, clear, set);
kernel/sched/psi.c
905
task->psi_flags &= ~clear;
kernel/sched/psi.c
906
task->psi_flags |= set;
kernel/sched/psi.c
909
void psi_task_change(struct task_struct *task, int clear, int set)
kernel/sched/psi.c
911
int cpu = task_cpu(task);
kernel/sched/psi.c
914
if (!task->pid)
kernel/sched/psi.c
917
psi_flags_change(task, clear, set);
kernel/sched/psi.c
921
for_each_group(group, task_psi_group(task))
kernel/sched/rt.c
1496
static int find_lowest_rq(struct task_struct *task);
kernel/sched/rt.c
1763
static int find_lowest_rq(struct task_struct *task)
kernel/sched/rt.c
1768
int cpu = task_cpu(task);
kernel/sched/rt.c
1775
if (task->nr_cpus_allowed == 1)
kernel/sched/rt.c
1784
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
kernel/sched/rt.c
1785
task, lowest_mask,
kernel/sched/rt.c
1789
ret = cpupri_find(&task_rq(task)->rd->cpupri,
kernel/sched/rt.c
1790
task, lowest_mask);
kernel/sched/rt.c
1876
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
kernel/sched/rt.c
1883
cpu = find_lowest_rq(task);
kernel/sched/rt.c
1890
if (lowest_rq->rt.highest_prio.curr <= task->prio) {
kernel/sched/rt.c
1912
if (unlikely(is_migration_disabled(task) ||
kernel/sched/rt.c
1913
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
kernel/sched/rt.c
1914
task != pick_next_pushable_task(rq))) {
kernel/sched/rt.c
1923
if (lowest_rq->rt.highest_prio.curr > task->prio)
kernel/sched/rt.c
2014
struct task_struct *task;
kernel/sched/rt.c
2023
task = pick_next_pushable_task(rq);
kernel/sched/rt.c
2024
if (task == next_task) {
kernel/sched/rt.c
2034
if (!task)
kernel/sched/rt.c
2042
next_task = task;
kernel/sched/rt.c
2554
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
kernel/sched/rt.c
2559
if (task->policy == SCHED_RR)
kernel/sched/rt.c
2627
struct task_struct *task;
kernel/sched/rt.c
2638
while (!ret && (task = css_task_iter_next(&it)))
kernel/sched/rt.c
2639
ret |= rt_task(task);
kernel/sched/sched.h
2585
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2621
void (*switching_from)(struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2622
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2623
void (*switching_to) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2624
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2625
u64 (*get_prio) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2626
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
kernel/sched/sched.h
2633
void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
kernel/sched/sched.h
2640
struct task_struct *task);
kernel/sched/sched.h
4016
void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task)
kernel/sched/sched.h
4021
deactivate_task(src_rq, task, 0);
kernel/sched/sched.h
4022
set_task_cpu(task, dst_rq->cpu);
kernel/sched/sched.h
4023
activate_task(dst_rq, task, 0);
kernel/sched/sched.h
4024
wakeup_preempt(dst_rq, task, 0);
kernel/sched/stats.h
110
void psi_task_change(struct task_struct *task, int clear, int set);
kernel/sched/swait.c
30
try_to_wake_up(curr->task, TASK_NORMAL, wake_flags);
kernel/sched/swait.c
72
wake_up_state(curr->task, TASK_NORMAL);
kernel/sched/swait.c
87
wait->task = current;
kernel/seccomp.c
1178
n.task = current;
kernel/seccomp.c
1591
unotif.pid = task_pid_vnr(knotif->task);
kernel/seccomp.c
2167
static struct seccomp_filter *get_nth_filter(struct task_struct *task,
kernel/seccomp.c
2177
spin_lock_irq(&task->sighand->siglock);
kernel/seccomp.c
2179
if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
kernel/seccomp.c
2180
spin_unlock_irq(&task->sighand->siglock);
kernel/seccomp.c
2184
orig = task->seccomp.filter;
kernel/seccomp.c
2186
spin_unlock_irq(&task->sighand->siglock);
kernel/seccomp.c
2213
long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
kernel/seccomp.c
2225
filter = get_nth_filter(task, filter_off);
kernel/seccomp.c
2251
long seccomp_get_metadata(struct task_struct *task,
kernel/seccomp.c
2271
filter = get_nth_filter(task, kmd.filter_off);
kernel/seccomp.c
250
struct task_struct *task = current;
kernel/seccomp.c
251
struct pt_regs *regs = task_pt_regs(task);
kernel/seccomp.c
2531
struct pid *pid, struct task_struct *task)
kernel/seccomp.c
254
sd->nr = syscall_get_nr(task, regs);
kernel/seccomp.c
2543
if (!lock_task_sighand(task, &flags))
kernel/seccomp.c
2546
f = READ_ONCE(task->seccomp.filter);
kernel/seccomp.c
2548
unlock_task_sighand(task, &flags);
kernel/seccomp.c
255
sd->arch = syscall_get_arch(task);
kernel/seccomp.c
2554
unlock_task_sighand(task, &flags);
kernel/seccomp.c
256
syscall_get_arguments(task, regs, args);
kernel/seccomp.c
263
sd->instruction_pointer = KSTK_EIP(task);
kernel/seccomp.c
445
void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
kernel/seccomp.c
447
static inline void seccomp_assign_mode(struct task_struct *task,
kernel/seccomp.c
451
assert_spin_locked(&task->sighand->siglock);
kernel/seccomp.c
453
task->seccomp.mode = seccomp_mode;
kernel/seccomp.c
461
arch_seccomp_spec_mitigate(task);
kernel/seccomp.c
462
set_task_syscall_work(task, SECCOMP);
kernel/seccomp.c
63
struct task_struct *task;
kernel/signal.c
1391
void lockdep_assert_task_sighand_held(struct task_struct *task)
kernel/signal.c
1396
sighand = rcu_dereference(task->sighand);
kernel/signal.c
2151
void do_notify_pidfd(struct task_struct *task)
kernel/signal.c
2153
struct pid *pid = task_pid(task);
kernel/signal.c
2155
WARN_ON(task->exit_state == 0);
kernel/signal.c
280
bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
kernel/signal.c
286
if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
kernel/signal.c
290
task->jobctl &= ~JOBCTL_STOP_SIGMASK;
kernel/signal.c
292
task->jobctl |= mask;
kernel/signal.c
308
void task_clear_jobctl_trapping(struct task_struct *task)
kernel/signal.c
310
if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
kernel/signal.c
311
task->jobctl &= ~JOBCTL_TRAPPING;
kernel/signal.c
313
wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
kernel/signal.c
332
void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
kernel/signal.c
339
task->jobctl &= ~mask;
kernel/signal.c
341
if (!(task->jobctl & JOBCTL_PENDING_MASK))
kernel/signal.c
342
task_clear_jobctl_trapping(task);
kernel/signal.c
361
static bool task_participate_group_stop(struct task_struct *task)
kernel/signal.c
363
struct signal_struct *sig = task->signal;
kernel/signal.c
364
bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
kernel/signal.c
366
WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
kernel/signal.c
368
task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
kernel/signal.c
387
void task_join_group_stop(struct task_struct *task)
kernel/signal.c
399
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
kernel/stacktrace.c
291
unsigned int stack_trace_save_tsk(struct task_struct *task,
kernel/stacktrace.c
299
.skip = skipnr + (current == task),
kernel/stacktrace.c
302
save_stack_trace_tsk(task, &trace);
kernel/stop_machine.c
52
void print_stop_info(const char *log_lvl, struct task_struct *task)
kernel/stop_machine.c
58
struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
kernel/stop_machine.c
60
if (task != stopper->thread)
kernel/sys.c
1707
static int check_prlimit_permission(struct task_struct *task,
kernel/sys.c
1713
if (current == task)
kernel/sys.c
1716
tcred = __task_cred(task);
kernel/task_work.c
116
task_work_cancel_match(struct task_struct *task,
kernel/task_work.c
120
struct callback_head **pprev = &task->task_works;
kernel/task_work.c
124
if (likely(!task_work_pending(task)))
kernel/task_work.c
132
raw_spin_lock_irqsave(&task->pi_lock, flags);
kernel/task_work.c
141
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
kernel/task_work.c
163
task_work_cancel_func(struct task_struct *task, task_work_func_t func)
kernel/task_work.c
165
return task_work_cancel_match(task, task_work_func_match, func);
kernel/task_work.c
183
bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
kernel/task_work.c
187
ret = task_work_cancel_match(task, task_work_match, cb);
kernel/task_work.c
202
struct task_struct *task = current;
kernel/task_work.c
210
work = READ_ONCE(task->task_works);
kernel/task_work.c
214
if (task->flags & PF_EXITING)
kernel/task_work.c
219
} while (!try_cmpxchg(&task->task_works, &work, head));
kernel/task_work.c
228
raw_spin_lock_irq(&task->pi_lock);
kernel/task_work.c
229
raw_spin_unlock_irq(&task->pi_lock);
kernel/task_work.c
59
int task_work_add(struct task_struct *task, struct callback_head *work,
kernel/task_work.c
65
if (WARN_ON_ONCE(task != current))
kernel/task_work.c
73
head = READ_ONCE(task->task_works);
kernel/task_work.c
78
} while (!try_cmpxchg(&task->task_works, &head, work));
kernel/task_work.c
84
set_notify_resume(task);
kernel/task_work.c
87
set_notify_signal(task);
kernel/task_work.c
90
__set_notify_signal(task);
kernel/time/alarmtimer.c
683
struct task_struct *task = alarm->data;
kernel/time/alarmtimer.c
686
if (task)
kernel/time/alarmtimer.c
687
wake_up_process(task);
kernel/time/hrtimer.c
2017
struct task_struct *task = t->task;
kernel/time/hrtimer.c
2019
t->task = NULL;
kernel/time/hrtimer.c
2020
if (task)
kernel/time/hrtimer.c
2021
wake_up_process(task);
kernel/time/hrtimer.c
2079
sl->task = current;
kernel/time/hrtimer.c
2123
if (likely(t->task))
kernel/time/hrtimer.c
2129
} while (t->task && !signal_pending(current));
kernel/time/hrtimer.c
2133
if (!t->task)
kernel/time/namespace.c
218
static void timens_set_vvar_page(struct task_struct *task,
kernel/time/namespace.c
264
static struct ns_common *timens_get(struct task_struct *task)
kernel/time/namespace.c
269
task_lock(task);
kernel/time/namespace.c
270
nsproxy = task->nsproxy;
kernel/time/namespace.c
275
task_unlock(task);
kernel/time/namespace.c
280
static struct ns_common *timens_for_children_get(struct task_struct *task)
kernel/time/namespace.c
285
task_lock(task);
kernel/time/namespace.c
286
nsproxy = task->nsproxy;
kernel/time/namespace.c
291
task_unlock(task);
kernel/time/posix-cpu-timers.c
42
int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
kernel/time/posix-cpu-timers.c
47
if (!lock_task_sighand(task, &irq_fl))
kernel/time/posix-cpu-timers.c
49
set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
kernel/time/posix-cpu-timers.c
50
unlock_task_sighand(task, &irq_fl);
kernel/time/sleep_timeout.c
20
struct task_struct *task;
kernel/time/sleep_timeout.c
215
if (likely(t.task))
kernel/time/sleep_timeout.c
223
return !t.task ? 0 : -EINTR;
kernel/time/sleep_timeout.c
27
wake_up_process(timeout->task);
kernel/time/sleep_timeout.c
95
timer.task = current;
kernel/trace/bpf_trace.c
2921
struct task_struct *task;
kernel/trace/bpf_trace.c
2947
if (umulti_link->task)
kernel/trace/bpf_trace.c
2948
put_task_struct(umulti_link->task);
kernel/trace/bpf_trace.c
2984
info->uprobe_multi.pid = umulti_link->task ?
kernel/trace/bpf_trace.c
2985
task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
kernel/trace/bpf_trace.c
3048
pid = umulti_link->task ?
kernel/trace/bpf_trace.c
3049
task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
kernel/trace/bpf_trace.c
3097
if (link->task && !same_thread_group(current, link->task))
kernel/trace/bpf_trace.c
3126
return uprobe->link->task->mm == mm;
kernel/trace/bpf_trace.c
3178
struct task_struct *task = NULL;
kernel/trace/bpf_trace.c
3237
task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
kernel/trace/bpf_trace.c
3239
if (!task) {
kernel/trace/bpf_trace.c
3286
link->task = task;
kernel/trace/bpf_trace.c
3316
if (task)
kernel/trace/bpf_trace.c
3317
put_task_struct(task);
kernel/trace/bpf_trace.c
3533
__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
kernel/trace/bpf_trace.c
3539
return bpf_send_signal_common(sig, type, task, value);
kernel/trace/bpf_trace.c
778
BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
kernel/trace/bpf_trace.c
780
return (unsigned long) task_pt_regs(task);
kernel/trace/bpf_trace.c
796
struct task_struct *task;
kernel/trace/bpf_trace.c
813
group_send_sig_info(work->sig, siginfo, work->task, work->type);
kernel/trace/bpf_trace.c
814
put_task_struct(work->task);
kernel/trace/bpf_trace.c
817
static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value)
kernel/trace/bpf_trace.c
823
if (!task) {
kernel/trace/bpf_trace.c
824
task = current;
kernel/trace/bpf_trace.c
842
if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING)))
kernel/trace/bpf_trace.c
847
if (unlikely(is_global_init(task)))
kernel/trace/bpf_trace.c
865
work->task = get_task_struct(task);
kernel/trace/bpf_trace.c
875
return group_send_sig_info(sig, siginfo, task, type);
kernel/trace/fgraph.c
909
ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
kernel/trace/fgraph.c
912
int offset = task->curr_ret_stack;
kernel/trace/fgraph.c
918
ret_stack = get_ret_stack(task, offset, &offset);
kernel/trace/fgraph.c
931
unsigned long ftrace_graph_top_ret_addr(struct task_struct *task)
kernel/trace/fgraph.c
935
int offset = task->curr_ret_stack;
kernel/trace/fgraph.c
941
ret_stack = get_ret_stack(task, offset, &offset);
kernel/trace/fgraph.c
967
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
kernel/trace/fgraph.c
980
i = *idx ? : task->curr_ret_stack;
kernel/trace/fgraph.c
982
ret_stack = get_ret_stack(task, i, &i);
kernel/trace/ftrace.c
8611
struct task_struct *task)
kernel/trace/ftrace.c
8618
trace_filter_add_remove_task(pid_list, self, task);
kernel/trace/ftrace.c
8621
trace_filter_add_remove_task(pid_list, self, task);
kernel/trace/ftrace.c
8625
ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
kernel/trace/ftrace.c
8632
trace_filter_add_remove_task(pid_list, NULL, task);
kernel/trace/ftrace.c
8635
trace_filter_add_remove_task(pid_list, NULL, task);
kernel/trace/preemptirq_delay_test.c
152
struct task_struct *task;
kernel/trace/preemptirq_delay_test.c
158
task = kthread_run(preemptirq_delay_run, NULL, task_name);
kernel/trace/preemptirq_delay_test.c
159
if (IS_ERR(task))
kernel/trace/preemptirq_delay_test.c
160
return PTR_ERR(task);
kernel/trace/preemptirq_delay_test.c
161
if (task) {
kernel/trace/preemptirq_delay_test.c
163
kthread_stop(task);
kernel/trace/rv/monitors/pagefault/pagefault.c
21
static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon)
kernel/trace/rv/monitors/pagefault/pagefault.c
29
ltl_atom_set(mon, LTL_RT, rt_or_dl_task(task));
kernel/trace/rv/monitors/pagefault/pagefault.c
32
static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation)
kernel/trace/rv/monitors/pagefault/pagefault.h
37
static void ltl_start(struct task_struct *task, struct ltl_monitor *mon)
kernel/trace/rv/monitors/pagefault/pagefault_trace.h
10
TP_ARGS(task, states, atoms, next));
kernel/trace/rv/monitors/pagefault/pagefault_trace.h
12
TP_PROTO(struct task_struct *task),
kernel/trace/rv/monitors/pagefault/pagefault_trace.h
13
TP_ARGS(task));
kernel/trace/rv/monitors/pagefault/pagefault_trace.h
9
TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next),
kernel/trace/rv/monitors/sleep/sleep.c
101
ltl_atom_pulse(task, LTL_WOKEN_BY_HARDIRQ, true);
kernel/trace/rv/monitors/sleep/sleep.c
103
if (current->prio <= task->prio)
kernel/trace/rv/monitors/sleep/sleep.c
104
ltl_atom_pulse(task, LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, true);
kernel/trace/rv/monitors/sleep/sleep.c
106
ltl_atom_pulse(task, LTL_WOKEN_BY_NMI, true);
kernel/trace/rv/monitors/sleep/sleep.c
180
static void handle_kthread_stop(void *data, struct task_struct *task)
kernel/trace/rv/monitors/sleep/sleep.c
183
ltl_atom_update(task, LTL_KTHREAD_SHOULD_STOP, true);
kernel/trace/rv/monitors/sleep/sleep.c
25
static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon)
kernel/trace/rv/monitors/sleep/sleep.c
33
ltl_atom_set(mon, LTL_RT, rt_or_dl_task(task));
kernel/trace/rv/monitors/sleep/sleep.c
36
static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation)
kernel/trace/rv/monitors/sleep/sleep.c
56
if (task->flags & PF_KTHREAD) {
kernel/trace/rv/monitors/sleep/sleep.c
67
if (strstarts(task->comm, "migration/"))
kernel/trace/rv/monitors/sleep/sleep.c
72
if (strstarts(task->comm, "rcu"))
kernel/trace/rv/monitors/sleep/sleep.c
85
static void handle_sched_set_state(void *data, struct task_struct *task, int state)
kernel/trace/rv/monitors/sleep/sleep.c
88
ltl_atom_pulse(task, LTL_SLEEP, true);
kernel/trace/rv/monitors/sleep/sleep.c
90
ltl_atom_pulse(task, LTL_ABORT_SLEEP, true);
kernel/trace/rv/monitors/sleep/sleep.c
93
static void handle_sched_wakeup(void *data, struct task_struct *task)
kernel/trace/rv/monitors/sleep/sleep.c
95
ltl_atom_pulse(task, LTL_WAKE, true);
kernel/trace/rv/monitors/sleep/sleep.c
98
static void handle_sched_waking(void *data, struct task_struct *task)
kernel/trace/rv/monitors/sleep/sleep.h
76
static void ltl_start(struct task_struct *task, struct ltl_monitor *mon)
kernel/trace/rv/monitors/sleep/sleep_trace.h
10
TP_ARGS(task, states, atoms, next));
kernel/trace/rv/monitors/sleep/sleep_trace.h
12
TP_PROTO(struct task_struct *task),
kernel/trace/rv/monitors/sleep/sleep_trace.h
13
TP_ARGS(task));
kernel/trace/rv/monitors/sleep/sleep_trace.h
9
TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next),
kernel/trace/rv/monitors/wip/wip.c
30
static void handle_sched_waking(void *data, struct task_struct *task)
kernel/trace/rv/rv_trace.h
135
TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next),
kernel/trace/rv/rv_trace.h
137
TP_ARGS(task, states, atoms, next),
kernel/trace/rv/rv_trace.h
140
__string(comm, task->comm)
kernel/trace/rv/rv_trace.h
149
__entry->pid = task->pid;
kernel/trace/rv/rv_trace.h
161
TP_PROTO(struct task_struct *task),
kernel/trace/rv/rv_trace.h
163
TP_ARGS(task),
kernel/trace/rv/rv_trace.h
166
__string(comm, task->comm)
kernel/trace/rv/rv_trace.h
172
__entry->pid = task->pid;
kernel/trace/trace.h
789
struct task_struct *task);
kernel/trace/trace.h
792
struct task_struct *task);
kernel/trace/trace_events.c
1037
event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
kernel/trace/trace_events.c
1044
trace_filter_add_remove_task(pid_list, NULL, task);
kernel/trace/trace_events.c
1047
trace_filter_add_remove_task(pid_list, NULL, task);
kernel/trace/trace_events.c
1053
struct task_struct *task)
kernel/trace/trace_events.c
1060
trace_filter_add_remove_task(pid_list, self, task);
kernel/trace/trace_events.c
1063
trace_filter_add_remove_task(pid_list, self, task);
kernel/trace/trace_events.c
1125
event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
kernel/trace/trace_events.c
1139
trace_ignore_this_task(pid_list, no_pid_list, task));
kernel/trace/trace_events.c
1143
event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
kernel/trace/trace_events_hist.c
1606
static inline void save_comm(char *comm, struct task_struct *task)
kernel/trace/trace_events_hist.c
1608
if (!task->pid) {
kernel/trace/trace_events_hist.c
1613
if (WARN_ON_ONCE(task->pid < 0)) {
kernel/trace/trace_events_hist.c
1618
strscpy(comm, task->comm, TASK_COMM_LEN);
kernel/trace/trace_output.c
1468
struct task_struct *task;
kernel/trace/trace_output.c
1474
task = find_task_by_vpid(field->tgid);
kernel/trace/trace_output.c
1475
if (task)
kernel/trace/trace_output.c
1476
mm = get_task_mm(task);
kernel/trace/trace_pid.c
31
struct task_struct *task)
kernel/trace/trace_pid.c
42
!trace_find_filtered_pid(filtered_pids, task->pid)) ||
kernel/trace/trace_pid.c
44
trace_find_filtered_pid(filtered_no_pids, task->pid));
kernel/trace/trace_pid.c
61
struct task_struct *task)
kernel/trace/trace_pid.c
74
trace_pid_list_set(pid_list, task->pid);
kernel/trace/trace_pid.c
76
trace_pid_list_clear(pid_list, task->pid);
kernel/trace/trace_sched_switch.c
375
void tracing_record_taskinfo(struct task_struct *task, int flags)
kernel/trace/trace_sched_switch.c
386
done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
kernel/trace/trace_sched_switch.c
387
done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
kernel/trace/trace_sched_switch.c
429
void tracing_record_cmdline(struct task_struct *task)
kernel/trace/trace_sched_switch.c
431
tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
kernel/trace/trace_sched_switch.c
434
void tracing_record_tgid(struct task_struct *task)
kernel/trace/trace_sched_switch.c
436
tracing_record_taskinfo(task, TRACE_RECORD_TGID);
kernel/trace/trace_sched_wakeup.c
379
probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
kernel/trace/trace_sched_wakeup.c
381
if (task != wakeup_task)
kernel/trace/trace_syscalls.c
68
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
kernel/trace/trace_syscalls.c
73
return syscall_get_nr(task, regs);
kernel/trace/trace_syscalls.c
77
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
kernel/trace/trace_syscalls.c
79
return syscall_get_nr(task, regs);
kernel/unwind/deferred.c
148
static void process_unwind_deferred(struct task_struct *task)
kernel/unwind/deferred.c
150
struct unwind_task_info *info = &task->unwind_info;
kernel/unwind/deferred.c
192
void unwind_deferred_task_exit(struct task_struct *task)
kernel/unwind/deferred.c
199
process_unwind_deferred(task);
kernel/unwind/deferred.c
201
task_work_cancel(task, &info->work);
kernel/unwind/deferred.c
350
void unwind_task_init(struct task_struct *task)
kernel/unwind/deferred.c
352
struct unwind_task_info *info = &task->unwind_info;
kernel/unwind/deferred.c
359
void unwind_task_free(struct task_struct *task)
kernel/unwind/deferred.c
361
struct unwind_task_info *info = &task->unwind_info;
kernel/unwind/deferred.c
364
task_work_cancel(task, &info->work);
kernel/user_namespace.c
1327
static struct ns_common *userns_get(struct task_struct *task)
kernel/user_namespace.c
1332
user_ns = get_user_ns(__task_cred(task)->user_ns);
kernel/utsname.c
106
static struct ns_common *utsns_get(struct task_struct *task)
kernel/utsname.c
111
task_lock(task);
kernel/utsname.c
112
nsproxy = task->nsproxy;
kernel/utsname.c
117
task_unlock(task);
kernel/vhost_task.c
103
put_task_struct(vtsk->task);
kernel/vhost_task.c
152
vtsk->task = get_task_struct(tsk);
kernel/vhost_task.c
163
wake_up_new_task(vtsk->task);
kernel/vhost_task.c
22
struct task_struct *task;
kernel/vhost_task.c
78
wake_up_process(vtsk->task);
kernel/workqueue.c
1267
p = worker->task;
kernel/workqueue.c
1404
void wq_worker_running(struct task_struct *task)
kernel/workqueue.c
1406
struct worker *worker = kthread_data(task);
kernel/workqueue.c
1426
worker->current_at = worker->task->se.sum_exec_runtime;
kernel/workqueue.c
1438
void wq_worker_sleeping(struct task_struct *task)
kernel/workqueue.c
1440
struct worker *worker = kthread_data(task);
kernel/workqueue.c
1484
void wq_worker_tick(struct task_struct *task)
kernel/workqueue.c
1486
struct worker *worker = kthread_data(task);
kernel/workqueue.c
1511
worker->task->se.sum_exec_runtime - worker->current_at <
kernel/workqueue.c
1551
work_func_t wq_worker_last_func(struct task_struct *task)
kernel/workqueue.c
1553
struct worker *worker = kthread_data(task);
kernel/workqueue.c
2702
kthread_set_per_cpu(worker->task, pool->cpu);
kernel/workqueue.c
2706
set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
kernel/workqueue.c
2718
kthread_set_per_cpu(worker->task, -1);
kernel/workqueue.c
2720
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
kernel/workqueue.c
2722
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
kernel/workqueue.c
2815
worker->task = kthread_create_on_node(worker_thread, worker,
kernel/workqueue.c
2817
if (IS_ERR(worker->task)) {
kernel/workqueue.c
2818
if (PTR_ERR(worker->task) == -EINTR) {
kernel/workqueue.c
2823
worker->task);
kernel/workqueue.c
2828
set_user_nice(worker->task, pool->attrs->nice);
kernel/workqueue.c
2829
kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
kernel/workqueue.c
2846
if (worker->task)
kernel/workqueue.c
2847
wake_up_process(worker->task);
kernel/workqueue.c
2873
kthread_stop_put(worker->task);
kernel/workqueue.c
2910
get_task_struct(worker->task);
kernel/workqueue.c
3016
wake_up_process(wq->rescuer->task);
kernel/workqueue.c
3205
if (worker->task)
kernel/workqueue.c
3206
worker->current_at = worker->task->se.sum_exec_runtime;
kernel/workqueue.c
3287
if (unlikely((worker->task && in_atomic()) ||
kernel/workqueue.c
3308
if (worker->task)
kernel/workqueue.c
3811
struct task_struct *task; /* purely informational */
kernel/workqueue.c
3869
barr->task = current;
kernel/workqueue.c
5670
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", id_buf);
kernel/workqueue.c
5671
if (IS_ERR(rescuer->task)) {
kernel/workqueue.c
5672
ret = PTR_ERR(rescuer->task);
kernel/workqueue.c
5683
kthread_bind_mask(rescuer->task, wq_unbound_cpumask);
kernel/workqueue.c
5685
kthread_bind_mask(rescuer->task, cpu_possible_mask);
kernel/workqueue.c
5687
wake_up_process(rescuer->task);
kernel/workqueue.c
5972
kthread_stop(wq->rescuer->task);
kernel/workqueue.c
6225
void print_worker_info(const char *log_lvl, struct task_struct *task)
kernel/workqueue.c
6234
if (!(task->flags & PF_WQ_WORKER))
kernel/workqueue.c
6241
worker = kthread_probe_data(task);
kernel/workqueue.c
6282
pr_cont("%d%s", task_pid_nr(worker->task),
kernel/workqueue.c
6322
task_pid_nr(barr->task));
kernel/workqueue.c
6481
task_pid_nr(pool->manager->task));
kernel/workqueue.c
6548
void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
kernel/workqueue.c
6553
if (task->flags & PF_WQ_WORKER) {
kernel/workqueue.c
6554
struct worker *worker = kthread_data(task);
kernel/workqueue.c
6578
strscpy(buf, task->comm, size);
kernel/workqueue.c
6669
kthread_set_per_cpu(worker->task, pool->cpu);
kernel/workqueue.c
6670
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
kernel/workqueue.c
6730
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
kernel/workqueue.c
7607
sched_show_task(worker->task);
kernel/workqueue_internal.h
45
struct task_struct *task; /* I: worker task */
kernel/workqueue_internal.h
79
void wq_worker_running(struct task_struct *task);
kernel/workqueue_internal.h
80
void wq_worker_sleeping(struct task_struct *task);
kernel/workqueue_internal.h
81
void wq_worker_tick(struct task_struct *task);
kernel/workqueue_internal.h
82
work_func_t wq_worker_last_func(struct task_struct *task);
lib/closure.c
118
struct task_struct *task;
lib/closure.c
129
p = READ_ONCE(s->task);
lib/closure.c
137
struct closure_syncer s = { .task = current };
lib/closure.c
162
struct closure_syncer s = { .task = current };
lib/closure.c
190
struct closure_syncer s = { .task = current };
lib/fault-inject.c
79
static bool fail_task(struct fault_attr *attr, struct task_struct *task)
lib/fault-inject.c
81
return in_task() && task->make_it_fail;
lib/is_single_threaded.c
17
struct task_struct *task = current;
lib/is_single_threaded.c
18
struct mm_struct *mm = task->mm;
lib/is_single_threaded.c
22
if (atomic_read(&task->signal->live) != 1)
lib/is_single_threaded.c
33
if (unlikely(p == task->group_leader))
lib/string_helpers.c
680
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp)
lib/string_helpers.c
689
res = get_cmdline(task, buffer, PAGE_SIZE - 1);
lib/test_firmware.c
51
struct task_struct *task;
lib/test_firmware.c
911
req->task = NULL;
lib/test_firmware.c
956
req->task = kthread_run(test_fw_run_batch_request, req,
lib/test_firmware.c
958
if (!req->task || IS_ERR(req->task)) {
lib/test_firmware.c
960
req->task = NULL;
lib/test_firmware.c
979
if (req->task || req->sent)
lib/test_rhashtable.c
76
struct task_struct *task;
lib/test_rhashtable.c
770
tdata[i].task = kthread_run(threadfunc, &tdata[i],
lib/test_rhashtable.c
772
if (IS_ERR(tdata[i].task)) {
lib/test_rhashtable.c
785
if (IS_ERR(tdata[i].task))
lib/test_rhashtable.c
787
if ((err = kthread_stop(tdata[i].task))) {
lib/test_vmalloc.c
454
struct task_struct *task;
lib/test_vmalloc.c
577
t->task = kthread_run(test_func, t, "vmalloc_test/%d", i);
lib/test_vmalloc.c
579
if (!IS_ERR(t->task))
lib/test_vmalloc.c
605
if (!IS_ERR(t->task))
lib/test_vmalloc.c
606
kthread_stop(t->task);
lib/vdso/datastore.c
115
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
lib/vdso/datastore.c
117
struct mm_struct *mm = task->mm;
mm/damon/vaddr.c
44
struct task_struct *task;
mm/damon/vaddr.c
47
task = damon_get_task_struct(t);
mm/damon/vaddr.c
48
if (!task)
mm/damon/vaddr.c
51
mm = get_task_mm(task);
mm/damon/vaddr.c
52
put_task_struct(task);
mm/damon/vaddr.c
761
struct task_struct *task;
mm/damon/vaddr.c
763
task = damon_get_task_struct(t);
mm/damon/vaddr.c
764
if (task) {
mm/damon/vaddr.c
765
put_task_struct(task);
mm/huge_memory.c
4588
struct task_struct *task;
mm/huge_memory.c
4596
task = find_get_task_by_vpid(pid);
mm/huge_memory.c
4597
if (!task) {
mm/huge_memory.c
4603
mm = get_task_mm(task);
mm/huge_memory.c
4604
put_task_struct(task);
mm/hugetlb_cgroup.c
63
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
mm/hugetlb_cgroup.c
65
return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
mm/kasan/common.c
107
void kasan_unpoison_task_stack(struct task_struct *task)
mm/kasan/common.c
109
void *base = task_stack_page(task);
mm/kmsan/core.c
40
void kmsan_internal_task_create(struct task_struct *task)
mm/kmsan/core.c
42
struct kmsan_ctx *ctx = &task->kmsan_ctx;
mm/kmsan/hooks.c
33
void kmsan_task_create(struct task_struct *task)
mm/kmsan/hooks.c
36
kmsan_internal_task_create(task);
mm/kmsan/hooks.c
40
void kmsan_task_exit(struct task_struct *task)
mm/kmsan/kmsan.h
162
void kmsan_internal_task_create(struct task_struct *task);
mm/madvise.c
2114
struct task_struct *task;
mm/madvise.c
2127
task = pidfd_get_task(pidfd, &f_flags);
mm/madvise.c
2128
if (IS_ERR(task)) {
mm/madvise.c
2129
ret = PTR_ERR(task);
mm/madvise.c
2134
mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
mm/madvise.c
2164
put_task_struct(task);
mm/memcontrol.c
1180
struct task_struct *task;
mm/memcontrol.c
1183
while (!ret && (task = css_task_iter_next(&it))) {
mm/memcontrol.c
1184
ret = fn(task, arg);
mm/memcontrol.c
4168
static void mem_cgroup_fork(struct task_struct *task)
mm/memcontrol.c
4176
task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
mm/memcontrol.c
4179
static void mem_cgroup_exit(struct task_struct *task)
mm/memcontrol.c
4181
struct obj_cgroup *objcg = task->objcg;
mm/memcontrol.c
4193
task->objcg = NULL;
mm/memcontrol.c
4199
struct task_struct *task;
mm/memcontrol.c
4203
cgroup_taskset_for_each_leader(task, css, tset)
mm/memcontrol.c
4206
if (!task)
mm/memcontrol.c
4209
task_lock(task);
mm/memcontrol.c
4210
if (task->mm && READ_ONCE(task->mm->owner) == task)
mm/memcontrol.c
4211
lru_gen_migrate_mm(task->mm);
mm/memcontrol.c
4212
task_unlock(task);
mm/memcontrol.c
4220
struct task_struct *task;
mm/memcontrol.c
4223
cgroup_taskset_for_each(task, css, tset) {
mm/memcontrol.c
4225
set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
mm/mempolicy.c
1865
struct task_struct *task;
mm/mempolicy.c
1888
task = pid ? find_task_by_vpid(pid) : current;
mm/mempolicy.c
1889
if (!task) {
mm/mempolicy.c
1894
get_task_struct(task);
mm/mempolicy.c
1902
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
mm/mempolicy.c
1909
task_nodes = cpuset_mems_allowed(task);
mm/mempolicy.c
1920
err = security_task_movememory(task);
mm/mempolicy.c
1924
mm = get_task_mm(task);
mm/mempolicy.c
1925
put_task_struct(task);
mm/mempolicy.c
1942
put_task_struct(task);
mm/mempolicy.c
3087
void mpol_put_task_policy(struct task_struct *task)
mm/mempolicy.c
3091
task_lock(task);
mm/mempolicy.c
3092
pol = task->mempolicy;
mm/mempolicy.c
3093
task->mempolicy = NULL;
mm/mempolicy.c
3094
task_unlock(task);
mm/migrate.c
2528
struct task_struct *task;
mm/migrate.c
2541
task = find_get_task_by_vpid(pid);
mm/migrate.c
2542
if (!task) {
mm/migrate.c
2550
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
mm/migrate.c
2555
mm = ERR_PTR(security_task_movememory(task));
mm/migrate.c
2558
*mem_nodes = cpuset_mems_allowed(task);
mm/migrate.c
2559
mm = get_task_mm(task);
mm/migrate.c
2561
put_task_struct(task);
mm/oom_kill.c
1014
static int oom_kill_memcg_member(struct task_struct *task, void *message)
mm/oom_kill.c
1016
if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
mm/oom_kill.c
1017
!is_global_init(task)) {
mm/oom_kill.c
1018
get_task_struct(task);
mm/oom_kill.c
1019
__oom_kill_process(task, message);
mm/oom_kill.c
1215
struct task_struct *task;
mm/oom_kill.c
1224
task = pidfd_get_task(pidfd, &f_flags);
mm/oom_kill.c
1225
if (IS_ERR(task))
mm/oom_kill.c
1226
return PTR_ERR(task);
mm/oom_kill.c
1232
p = find_lock_task_mm(task);
mm/oom_kill.c
1268
put_task_struct(task);
mm/oom_kill.c
309
static int oom_evaluate_task(struct task_struct *task, void *arg)
mm/oom_kill.c
314
if (oom_unkillable_task(task))
mm/oom_kill.c
318
if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
mm/oom_kill.c
327
if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
mm/oom_kill.c
328
if (mm_flags_test(MMF_OOM_SKIP, task->signal->oom_mm))
mm/oom_kill.c
337
if (oom_task_origin(task)) {
mm/oom_kill.c
342
points = oom_badness(task, oc->totalpages);
mm/oom_kill.c
349
get_task_struct(task);
mm/oom_kill.c
350
oc->chosen = task;
mm/oom_kill.c
385
struct task_struct *task;
mm/oom_kill.c
394
task = find_lock_task_mm(p);
mm/oom_kill.c
395
if (!task) {
mm/oom_kill.c
404
task->pid, from_kuid(&init_user_ns, task_uid(task)),
mm/oom_kill.c
405
task->tgid, task->mm->total_vm, get_mm_rss_sum(task->mm),
mm/oom_kill.c
406
get_mm_counter_sum(task->mm, MM_ANONPAGES), get_mm_counter_sum(task->mm, MM_FILEPAGES),
mm/oom_kill.c
407
get_mm_counter_sum(task->mm, MM_SHMEMPAGES), mm_pgtables_bytes(task->mm),
mm/oom_kill.c
408
get_mm_counter_sum(task->mm, MM_SWAPENTS),
mm/oom_kill.c
409
task->signal->oom_score_adj, task->comm);
mm/oom_kill.c
410
task_unlock(task);
mm/oom_kill.c
853
static inline bool __task_will_free_mem(struct task_struct *task)
mm/oom_kill.c
855
struct signal_struct *sig = task->signal;
mm/oom_kill.c
868
if (thread_group_empty(task) && (task->flags & PF_EXITING))
mm/oom_kill.c
881
static bool task_will_free_mem(struct task_struct *task)
mm/oom_kill.c
883
struct mm_struct *mm = task->mm;
mm/oom_kill.c
895
if (!__task_will_free_mem(task))
mm/oom_kill.c
917
if (same_thread_group(task, p))
mm/page_alloc.c
1286
void __pgalloc_tag_add(struct page *page, struct task_struct *task,
mm/page_alloc.c
1293
alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
mm/page_alloc.c
1299
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
mm/page_alloc.c
1303
__pgalloc_tag_add(page, task, nr);
mm/page_alloc.c
1335
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
mm/process_vm_access.c
156
struct task_struct *task;
mm/process_vm_access.c
197
task = find_get_task_by_vpid(pid);
mm/process_vm_access.c
198
if (!task) {
mm/process_vm_access.c
203
mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
mm/process_vm_access.c
218
iter, process_pages, mm, task, vm_write);
mm/process_vm_access.c
232
put_task_struct(task);
mm/process_vm_access.c
78
struct task_struct *task,
mm/util.c
1009
res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
mm/util.c
1023
res += access_process_vm(task, env_start,
mm/util.c
507
const struct task_struct *task, bool bypass_rlim)
mm/util.c
517
limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
mm/util.c
528
pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
mm/util.c
530
locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
mm/util.c
986
int get_cmdline(struct task_struct *task, char *buffer, int buflen)
mm/util.c
990
struct mm_struct *mm = get_task_mm(task);
mm/vmscan.c
291
static void set_task_reclaim_state(struct task_struct *task,
mm/vmscan.c
295
WARN_ON_ONCE(rs && task->reclaim_state);
mm/vmscan.c
2973
struct task_struct *task = rcu_dereference_protected(mm->owner, true);
mm/vmscan.c
2975
VM_WARN_ON_ONCE(task->mm != mm);
mm/vmscan.c
2976
lockdep_assert_held(&task->alloc_lock);
mm/vmscan.c
298
WARN_ON_ONCE(!rs && !task->reclaim_state);
mm/vmscan.c
2987
memcg = mem_cgroup_from_task(task);
mm/vmscan.c
300
task->reclaim_state = rs;
net/bluetooth/bnep/bnep.h
152
struct task_struct *task;
net/bluetooth/bnep/core.c
631
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
net/bluetooth/bnep/core.c
632
if (IS_ERR(s->task)) {
net/bluetooth/bnep/core.c
637
err = PTR_ERR(s->task);
net/bluetooth/cmtp/capi.c
360
wake_up_process(session->task);
net/bluetooth/cmtp/cmtp.h
85
struct task_struct *task;
net/bluetooth/cmtp/core.c
384
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
net/bluetooth/cmtp/core.c
386
if (IS_ERR(session->task)) {
net/bluetooth/cmtp/core.c
388
err = PTR_ERR(session->task);
net/bluetooth/hidp/core.c
1060
session->task = kthread_run(hidp_session_thread, session,
net/bluetooth/hidp/core.c
1062
if (IS_ERR(session->task))
net/bluetooth/hidp/core.c
1063
return PTR_ERR(session->task);
net/bluetooth/hidp/hidp.h
145
struct task_struct *task;
net/core/net_namespace.c
1512
static struct ns_common *netns_get(struct task_struct *task)
net/core/net_namespace.c
1517
task_lock(task);
net/core/net_namespace.c
1518
nsproxy = task->nsproxy;
net/core/net_namespace.c
1521
task_unlock(task);
net/core/pktgen.c
2348
if (likely(t.task))
net/core/pktgen.c
2352
} while (t.task && pkt_dev->running && !signal_pending(current));
net/netfilter/ipvs/ip_vs_ctl.c
265
if (!kd->task && !ip_vs_est_stopped(ipvs)) {
net/netfilter/ipvs/ip_vs_est.c
251
if (kd->task)
net/netfilter/ipvs/ip_vs_est.c
258
kd->task = kthread_create(ip_vs_estimation_kthread, kd, "ipvs-e:%d:%d",
net/netfilter/ipvs/ip_vs_est.c
260
if (IS_ERR(kd->task)) {
net/netfilter/ipvs/ip_vs_est.c
261
ret = PTR_ERR(kd->task);
net/netfilter/ipvs/ip_vs_est.c
262
kd->task = NULL;
net/netfilter/ipvs/ip_vs_est.c
266
set_user_nice(kd->task, sysctl_est_nice(ipvs));
net/netfilter/ipvs/ip_vs_est.c
268
kthread_affine_preferred(kd->task, sysctl_est_preferred_cpulist(ipvs));
net/netfilter/ipvs/ip_vs_est.c
271
wake_up_process(kd->task);
net/netfilter/ipvs/ip_vs_est.c
279
if (kd->task) {
net/netfilter/ipvs/ip_vs_est.c
281
kthread_stop(kd->task);
net/netfilter/ipvs/ip_vs_est.c
282
kd->task = NULL;
net/netfilter/ipvs/ip_vs_est.c
511
if (kd->task) {
net/netfilter/ipvs/ip_vs_est.c
513
kthread_stop(kd->task);
net/netfilter/ipvs/ip_vs_sync.c
1631
wake_up_process(ipvs->master_tinfo[id].task);
net/netfilter/ipvs/ip_vs_sync.c
1742
struct task_struct *task;
net/netfilter/ipvs/ip_vs_sync.c
1866
task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
net/netfilter/ipvs/ip_vs_sync.c
1867
if (IS_ERR(task)) {
net/netfilter/ipvs/ip_vs_sync.c
1868
result = PTR_ERR(task);
net/netfilter/ipvs/ip_vs_sync.c
1871
tinfo->task = task;
net/netfilter/ipvs/ip_vs_sync.c
1897
if (tinfo->task)
net/netfilter/ipvs/ip_vs_sync.c
1898
kthread_stop(tinfo->task);
net/netfilter/ipvs/ip_vs_sync.c
1965
task_pid_nr(tinfo->task));
net/netfilter/ipvs/ip_vs_sync.c
1967
ret = kthread_stop(tinfo->task);
net/netfilter/ipvs/ip_vs_sync.c
197
struct task_struct *task;
net/netfilter/ipvs/ip_vs_sync.c
1987
task_pid_nr(tinfo->task));
net/netfilter/ipvs/ip_vs_sync.c
1988
ret = kthread_stop(tinfo->task);
net/netfilter/ipvs/ip_vs_sync.c
377
wake_up_process(ipvs->master_tinfo[id].task);
net/sunrpc/auth.c
607
rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
net/sunrpc/auth.c
609
struct rpc_auth *auth = task->tk_client->cl_auth;
net/sunrpc/auth.c
615
if (RPC_IS_ASYNC(task))
net/sunrpc/auth.c
623
rpcauth_bind_machine_cred(struct rpc_task *task, int lookupflags)
net/sunrpc/auth.c
625
struct rpc_auth *auth = task->tk_client->cl_auth;
net/sunrpc/auth.c
627
.principal = task->tk_client->cl_principal,
net/sunrpc/auth.c
633
if (RPC_IS_ASYNC(task))
net/sunrpc/auth.c
639
rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
net/sunrpc/auth.c
641
struct rpc_auth *auth = task->tk_client->cl_auth;
net/sunrpc/auth.c
647
rpcauth_bindcred(struct rpc_task *task, const struct cred *cred, int flags)
net/sunrpc/auth.c
649
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/auth.c
652
struct rpc_auth *auth = task->tk_client->cl_auth;
net/sunrpc/auth.c
659
if (task->tk_op_cred)
net/sunrpc/auth.c
661
new = get_rpccred(task->tk_op_cred);
net/sunrpc/auth.c
665
new = rpcauth_bind_machine_cred(task, lookupflags);
net/sunrpc/auth.c
671
new = rpcauth_bind_root_cred(task, lookupflags);
net/sunrpc/auth.c
675
new = rpcauth_bind_new_cred(task, lookupflags);
net/sunrpc/auth.c
724
int rpcauth_marshcred(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth.c
726
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
net/sunrpc/auth.c
728
return ops->crmarshal(task, xdr);
net/sunrpc/auth.c
739
int rpcauth_wrap_req_encode(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth.c
741
kxdreproc_t encode = task->tk_msg.rpc_proc->p_encode;
net/sunrpc/auth.c
743
encode(task->tk_rqstp, xdr, task->tk_msg.rpc_argp);
net/sunrpc/auth.c
757
int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth.c
759
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
net/sunrpc/auth.c
761
return ops->crwrap_req(task, xdr);
net/sunrpc/auth.c
779
rpcauth_checkverf(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth.c
781
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
net/sunrpc/auth.c
783
return ops->crvalidate(task, xdr);
net/sunrpc/auth.c
794
rpcauth_unwrap_resp_decode(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth.c
796
kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
net/sunrpc/auth.c
798
return decode(task->tk_rqstp, xdr, task->tk_msg.rpc_resp);
net/sunrpc/auth.c
810
rpcauth_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth.c
812
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
net/sunrpc/auth.c
814
return ops->crunwrap_resp(task, xdr);
net/sunrpc/auth.c
818
rpcauth_xmit_need_reencode(struct rpc_task *task)
net/sunrpc/auth.c
820
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth.c
824
return cred->cr_ops->crneed_reencode(task);
net/sunrpc/auth.c
828
rpcauth_refreshcred(struct rpc_task *task)
net/sunrpc/auth.c
833
cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth.c
835
err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags);
net/sunrpc/auth.c
838
cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth.c
841
err = cred->cr_ops->crrefresh(task);
net/sunrpc/auth.c
844
task->tk_status = err;
net/sunrpc/auth.c
849
rpcauth_invalcred(struct rpc_task *task)
net/sunrpc/auth.c
851
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth.c
858
rpcauth_uptodatecred(struct rpc_task *task)
net/sunrpc/auth.c
860
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_gss/auth_gss.c
1283
struct rpc_task *task;
net/sunrpc/auth_gss/auth_gss.c
1290
task = rpc_call_null(gss_auth->client, &new->gc_base,
net/sunrpc/auth_gss/auth_gss.c
1292
if (!IS_ERR(task))
net/sunrpc/auth_gss/auth_gss.c
1293
rpc_put_task(task);
net/sunrpc/auth_gss/auth_gss.c
1529
static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_gss/auth_gss.c
1531
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/auth_gss/auth_gss.c
1559
trace_rpcgss_seqno(task);
net/sunrpc/auth_gss/auth_gss.c
1600
trace_rpcgss_get_mic(task, maj_stat);
net/sunrpc/auth_gss/auth_gss.c
1605
static int gss_renew_cred(struct rpc_task *task)
net/sunrpc/auth_gss/auth_gss.c
1607
struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_gss/auth_gss.c
1622
task->tk_rqstp->rq_cred = new;
net/sunrpc/auth_gss/auth_gss.c
1648
gss_refresh(struct rpc_task *task)
net/sunrpc/auth_gss/auth_gss.c
1650
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_gss/auth_gss.c
1658
ret = gss_renew_cred(task);
net/sunrpc/auth_gss/auth_gss.c
1661
cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_gss/auth_gss.c
1665
ret = gss_refresh_upcall(task);
net/sunrpc/auth_gss/auth_gss.c
1672
gss_refresh_null(struct rpc_task *task)
net/sunrpc/auth_gss/auth_gss.c
1694
gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_gss/auth_gss.c
1696
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_gss/auth_gss.c
1718
maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[0], seq, p, len);
net/sunrpc/auth_gss/auth_gss.c
1720
while (unlikely(maj_stat == GSS_S_BAD_SIG && i < task->tk_rqstp->rq_seqno_count))
net/sunrpc/auth_gss/auth_gss.c
1721
maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[i++], seq, p, len);
net/sunrpc/auth_gss/auth_gss.c
1741
trace_rpcgss_verify_mic(task, maj_stat);
net/sunrpc/auth_gss/auth_gss.c
1748
struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_gss/auth_gss.c
1750
struct rpc_rqst *rqstp = task->tk_rqstp;
net/sunrpc/auth_gss/auth_gss.c
1762
if (rpcauth_wrap_req_encode(task, xdr))
net/sunrpc/auth_gss/auth_gss.c
1787
trace_rpcgss_get_mic(task, maj_stat);
net/sunrpc/auth_gss/auth_gss.c
1839
struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_gss/auth_gss.c
1841
struct rpc_rqst *rqstp = task->tk_rqstp;
net/sunrpc/auth_gss/auth_gss.c
1857
if (rpcauth_wrap_req_encode(task, xdr))
net/sunrpc/auth_gss/auth_gss.c
1910
trace_rpcgss_wrap(task, maj_stat);
net/sunrpc/auth_gss/auth_gss.c
1914
static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_gss/auth_gss.c
1916
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_gss/auth_gss.c
1927
status = rpcauth_wrap_req_encode(task, xdr);
net/sunrpc/auth_gss/auth_gss.c
1932
status = rpcauth_wrap_req_encode(task, xdr);
net/sunrpc/auth_gss/auth_gss.c
1935
status = gss_wrap_req_integ(cred, ctx, task, xdr);
net/sunrpc/auth_gss/auth_gss.c
1938
status = gss_wrap_req_priv(cred, ctx, task, xdr);
net/sunrpc/auth_gss/auth_gss.c
1956
static void gss_update_rslack(struct rpc_task *task, struct rpc_cred *cred,
net/sunrpc/auth_gss/auth_gss.c
1964
trace_rpcgss_update_slack(task, auth);
net/sunrpc/auth_gss/auth_gss.c
1969
gss_unwrap_resp_auth(struct rpc_task *task, struct rpc_cred *cred)
net/sunrpc/auth_gss/auth_gss.c
1971
gss_update_rslack(task, cred, 0, 0);
net/sunrpc/auth_gss/auth_gss.c
1989
gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
net/sunrpc/auth_gss/auth_gss.c
2042
gss_update_rslack(task, cred, 2, 2 + 1 + XDR_QUADLEN(mic.len));
net/sunrpc/auth_gss/auth_gss.c
2050
trace_rpcgss_unwrap_failed(task);
net/sunrpc/auth_gss/auth_gss.c
2053
trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, seqno);
net/sunrpc/auth_gss/auth_gss.c
2056
trace_rpcgss_verify_mic(task, maj_stat);
net/sunrpc/auth_gss/auth_gss.c
2061
gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
net/sunrpc/auth_gss/auth_gss.c
2093
gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
net/sunrpc/auth_gss/auth_gss.c
2098
trace_rpcgss_unwrap_failed(task);
net/sunrpc/auth_gss/auth_gss.c
2101
trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, be32_to_cpup(--p));
net/sunrpc/auth_gss/auth_gss.c
2104
trace_rpcgss_unwrap(task, maj_stat);
net/sunrpc/auth_gss/auth_gss.c
2115
gss_xmit_need_reencode(struct rpc_task *task)
net/sunrpc/auth_gss/auth_gss.c
2117
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/auth_gss/auth_gss.c
2147
trace_rpcgss_need_reencode(task, seq_xmit, ret);
net/sunrpc/auth_gss/auth_gss.c
2152
gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_gss/auth_gss.c
2154
struct rpc_rqst *rqstp = task->tk_rqstp;
net/sunrpc/auth_gss/auth_gss.c
2165
status = gss_unwrap_resp_auth(task, cred);
net/sunrpc/auth_gss/auth_gss.c
2168
status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
net/sunrpc/auth_gss/auth_gss.c
2171
status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
net/sunrpc/auth_gss/auth_gss.c
2178
status = rpcauth_unwrap_resp_decode(task, xdr);
net/sunrpc/auth_gss/auth_gss.c
393
gss_upcall_callback(struct rpc_task *task)
net/sunrpc/auth_gss/auth_gss.c
395
struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
net/sunrpc/auth_gss/auth_gss.c
403
task->tk_status = gss_msg->msg.errno;
net/sunrpc/auth_gss/auth_gss.c
597
gss_refresh_upcall(struct rpc_task *task)
net/sunrpc/auth_gss/auth_gss.c
599
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_gss/auth_gss.c
614
task, NULL, jiffies + (15 * HZ));
net/sunrpc/auth_gss/auth_gss.c
625
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
net/sunrpc/auth_gss/auth_gss.c
630
rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
net/sunrpc/auth_null.c
63
nul_marshal(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_null.c
83
nul_refresh(struct rpc_task *task)
net/sunrpc/auth_null.c
85
set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
net/sunrpc/auth_null.c
90
nul_validate(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_tls.c
115
static int tls_refresh(struct rpc_task *task)
net/sunrpc/auth_tls.c
117
set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
net/sunrpc/auth_tls.c
121
static int tls_validate(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_tls.c
35
static void rpc_tls_probe_call_prepare(struct rpc_task *task, void *data)
net/sunrpc/auth_tls.c
37
task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT;
net/sunrpc/auth_tls.c
38
rpc_call_start(task);
net/sunrpc/auth_tls.c
41
static void rpc_tls_probe_call_done(struct rpc_task *task, void *data)
net/sunrpc/auth_tls.c
62
struct rpc_task *task;
net/sunrpc/auth_tls.c
65
task = rpc_run_task(&task_setup_data);
net/sunrpc/auth_tls.c
66
if (IS_ERR(task))
net/sunrpc/auth_tls.c
67
return PTR_ERR(task);
net/sunrpc/auth_tls.c
68
status = task->tk_status;
net/sunrpc/auth_tls.c
69
rpc_put_task(task);
net/sunrpc/auth_tls.c
99
static int tls_marshal(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_unix.c
111
unx_marshal(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_unix.c
113
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/auth_unix.c
114
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/auth_unix.c
166
unx_refresh(struct rpc_task *task)
net/sunrpc/auth_unix.c
168
set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
net/sunrpc/auth_unix.c
173
unx_validate(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/auth_unix.c
175
struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
net/sunrpc/clnt.c
1114
void rpc_task_release_transport(struct rpc_task *task)
net/sunrpc/clnt.c
1116
struct rpc_xprt *xprt = task->tk_xprt;
net/sunrpc/clnt.c
1119
task->tk_xprt = NULL;
net/sunrpc/clnt.c
1120
if (task->tk_client)
net/sunrpc/clnt.c
1121
rpc_task_release_xprt(task->tk_client, xprt);
net/sunrpc/clnt.c
1128
void rpc_task_release_client(struct rpc_task *task)
net/sunrpc/clnt.c
1130
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
1132
rpc_task_release_transport(task);
net/sunrpc/clnt.c
1136
list_del(&task->tk_task);
net/sunrpc/clnt.c
1138
task->tk_client = NULL;
net/sunrpc/clnt.c
1163
void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
net/sunrpc/clnt.c
1165
if (task->tk_xprt) {
net/sunrpc/clnt.c
1166
if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
net/sunrpc/clnt.c
1167
(task->tk_flags & RPC_TASK_MOVEABLE)))
net/sunrpc/clnt.c
1169
xprt_release(task);
net/sunrpc/clnt.c
1170
xprt_put(task->tk_xprt);
net/sunrpc/clnt.c
1172
if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
net/sunrpc/clnt.c
1173
task->tk_xprt = rpc_task_get_first_xprt(clnt);
net/sunrpc/clnt.c
1175
task->tk_xprt = rpc_task_get_next_xprt(clnt);
net/sunrpc/clnt.c
1179
void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
net/sunrpc/clnt.c
1181
rpc_task_set_transport(task, clnt);
net/sunrpc/clnt.c
1182
task->tk_client = clnt;
net/sunrpc/clnt.c
1185
task->tk_flags |= RPC_TASK_SOFT;
net/sunrpc/clnt.c
1187
task->tk_flags |= RPC_TASK_TIMEOUT;
net/sunrpc/clnt.c
1189
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
net/sunrpc/clnt.c
1191
task->tk_flags |= RPC_TASK_NETUNREACH_FATAL;
net/sunrpc/clnt.c
1196
rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
net/sunrpc/clnt.c
1199
task->tk_msg.rpc_proc = msg->rpc_proc;
net/sunrpc/clnt.c
1200
task->tk_msg.rpc_argp = msg->rpc_argp;
net/sunrpc/clnt.c
1201
task->tk_msg.rpc_resp = msg->rpc_resp;
net/sunrpc/clnt.c
1202
task->tk_msg.rpc_cred = msg->rpc_cred;
net/sunrpc/clnt.c
1203
if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
net/sunrpc/clnt.c
1204
get_cred(task->tk_msg.rpc_cred);
net/sunrpc/clnt.c
1212
rpc_default_callback(struct rpc_task *task, void *data)
net/sunrpc/clnt.c
1226
struct rpc_task *task;
net/sunrpc/clnt.c
1228
task = rpc_new_task(task_setup_data);
net/sunrpc/clnt.c
1229
if (IS_ERR(task))
net/sunrpc/clnt.c
1230
return task;
net/sunrpc/clnt.c
1232
if (!RPC_IS_ASYNC(task))
net/sunrpc/clnt.c
1233
task->tk_flags |= RPC_TASK_CRED_NOREF;
net/sunrpc/clnt.c
1235
rpc_task_set_client(task, task_setup_data->rpc_client);
net/sunrpc/clnt.c
1236
rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
net/sunrpc/clnt.c
1238
if (task->tk_action == NULL)
net/sunrpc/clnt.c
1239
rpc_call_start(task);
net/sunrpc/clnt.c
1241
atomic_inc(&task->tk_count);
net/sunrpc/clnt.c
1242
rpc_execute(task);
net/sunrpc/clnt.c
1243
return task;
net/sunrpc/clnt.c
1255
struct rpc_task *task;
net/sunrpc/clnt.c
1271
task = rpc_run_task(&task_setup_data);
net/sunrpc/clnt.c
1272
if (IS_ERR(task))
net/sunrpc/clnt.c
1273
return PTR_ERR(task);
net/sunrpc/clnt.c
1274
status = task->tk_status;
net/sunrpc/clnt.c
1275
rpc_put_task(task);
net/sunrpc/clnt.c
1292
struct rpc_task *task;
net/sunrpc/clnt.c
1301
task = rpc_run_task(&task_setup_data);
net/sunrpc/clnt.c
1302
if (IS_ERR(task))
net/sunrpc/clnt.c
1303
return PTR_ERR(task);
net/sunrpc/clnt.c
1304
rpc_put_task(task);
net/sunrpc/clnt.c
1310
static void call_bc_encode(struct rpc_task *task);
net/sunrpc/clnt.c
1321
struct rpc_task *task;
net/sunrpc/clnt.c
1332
task = rpc_new_task(&task_setup_data);
net/sunrpc/clnt.c
1333
if (IS_ERR(task)) {
net/sunrpc/clnt.c
1335
return task;
net/sunrpc/clnt.c
1338
xprt_init_bc_request(req, task, timeout);
net/sunrpc/clnt.c
1340
task->tk_action = call_bc_encode;
net/sunrpc/clnt.c
1341
atomic_inc(&task->tk_count);
net/sunrpc/clnt.c
1342
WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
net/sunrpc/clnt.c
1343
rpc_execute(task);
net/sunrpc/clnt.c
1345
dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
net/sunrpc/clnt.c
1346
return task;
net/sunrpc/clnt.c
1371
rpc_call_start(struct rpc_task *task)
net/sunrpc/clnt.c
1373
task->tk_action = call_start;
net/sunrpc/clnt.c
1671
__rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *))
net/sunrpc/clnt.c
1673
task->tk_status = 0;
net/sunrpc/clnt.c
1674
task->tk_rpc_status = 0;
net/sunrpc/clnt.c
1675
task->tk_action = action;
net/sunrpc/clnt.c
1684
rpc_restart_call(struct rpc_task *task)
net/sunrpc/clnt.c
1686
return __rpc_restart_call(task, call_start);
net/sunrpc/clnt.c
1695
rpc_restart_call_prepare(struct rpc_task *task)
net/sunrpc/clnt.c
1697
if (task->tk_ops->rpc_call_prepare != NULL)
net/sunrpc/clnt.c
1698
return __rpc_restart_call(task, rpc_prepare_task);
net/sunrpc/clnt.c
1699
return rpc_restart_call(task);
net/sunrpc/clnt.c
1704
*rpc_proc_name(const struct rpc_task *task)
net/sunrpc/clnt.c
1706
const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
net/sunrpc/clnt.c
1718
__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
net/sunrpc/clnt.c
1720
trace_rpc_call_rpcerror(task, tk_status, rpc_status);
net/sunrpc/clnt.c
1721
rpc_task_set_rpc_status(task, rpc_status);
net/sunrpc/clnt.c
1722
rpc_exit(task, tk_status);
net/sunrpc/clnt.c
1726
rpc_call_rpcerror(struct rpc_task *task, int status)
net/sunrpc/clnt.c
1728
__rpc_call_rpcerror(task, status, status);
net/sunrpc/clnt.c
1738
call_start(struct rpc_task *task)
net/sunrpc/clnt.c
1740
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
1741
int idx = task->tk_msg.rpc_proc->p_statidx;
net/sunrpc/clnt.c
1743
trace_rpc_request(task);
net/sunrpc/clnt.c
1745
if (task->tk_client->cl_shutdown) {
net/sunrpc/clnt.c
1746
rpc_call_rpcerror(task, -EIO);
net/sunrpc/clnt.c
1754
task->tk_action = call_reserve;
net/sunrpc/clnt.c
1755
rpc_task_set_transport(task, clnt);
net/sunrpc/clnt.c
1762
call_reserve(struct rpc_task *task)
net/sunrpc/clnt.c
1764
task->tk_status = 0;
net/sunrpc/clnt.c
1765
task->tk_action = call_reserveresult;
net/sunrpc/clnt.c
1766
xprt_reserve(task);
net/sunrpc/clnt.c
1769
static void call_retry_reserve(struct rpc_task *task);
net/sunrpc/clnt.c
1775
call_reserveresult(struct rpc_task *task)
net/sunrpc/clnt.c
1777
int status = task->tk_status;
net/sunrpc/clnt.c
1783
task->tk_status = 0;
net/sunrpc/clnt.c
1785
if (task->tk_rqstp) {
net/sunrpc/clnt.c
1786
task->tk_action = call_refresh;
net/sunrpc/clnt.c
1789
spin_lock(&task->tk_client->cl_lock);
net/sunrpc/clnt.c
1790
if (list_empty(&task->tk_task))
net/sunrpc/clnt.c
1791
list_add_tail(&task->tk_task, &task->tk_client->cl_tasks);
net/sunrpc/clnt.c
1792
spin_unlock(&task->tk_client->cl_lock);
net/sunrpc/clnt.c
1795
rpc_call_rpcerror(task, -EIO);
net/sunrpc/clnt.c
1801
rpc_delay(task, HZ >> 2);
net/sunrpc/clnt.c
1804
task->tk_action = call_retry_reserve;
net/sunrpc/clnt.c
1807
rpc_call_rpcerror(task, status);
net/sunrpc/clnt.c
1815
call_retry_reserve(struct rpc_task *task)
net/sunrpc/clnt.c
1817
task->tk_status = 0;
net/sunrpc/clnt.c
1818
task->tk_action = call_reserveresult;
net/sunrpc/clnt.c
1819
xprt_retry_reserve(task);
net/sunrpc/clnt.c
1826
call_refresh(struct rpc_task *task)
net/sunrpc/clnt.c
1828
task->tk_action = call_refreshresult;
net/sunrpc/clnt.c
1829
task->tk_status = 0;
net/sunrpc/clnt.c
1830
task->tk_client->cl_stats->rpcauthrefresh++;
net/sunrpc/clnt.c
1831
rpcauth_refreshcred(task);
net/sunrpc/clnt.c
1838
call_refreshresult(struct rpc_task *task)
net/sunrpc/clnt.c
1840
int status = task->tk_status;
net/sunrpc/clnt.c
1842
task->tk_status = 0;
net/sunrpc/clnt.c
1843
task->tk_action = call_refresh;
net/sunrpc/clnt.c
1846
if (rpcauth_uptodatecred(task)) {
net/sunrpc/clnt.c
1847
task->tk_action = call_allocate;
net/sunrpc/clnt.c
1855
rpc_delay(task, 3*HZ);
net/sunrpc/clnt.c
1859
if (!task->tk_cred_retry)
net/sunrpc/clnt.c
1861
task->tk_cred_retry--;
net/sunrpc/clnt.c
1862
trace_rpc_retry_refresh_status(task);
net/sunrpc/clnt.c
1867
rpc_delay(task, HZ >> 4);
net/sunrpc/clnt.c
1870
trace_rpc_refresh_status(task);
net/sunrpc/clnt.c
1871
rpc_call_rpcerror(task, status);
net/sunrpc/clnt.c
1879
call_allocate(struct rpc_task *task)
net/sunrpc/clnt.c
1881
const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
net/sunrpc/clnt.c
1882
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/clnt.c
1884
const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
net/sunrpc/clnt.c
1887
task->tk_status = 0;
net/sunrpc/clnt.c
1888
task->tk_action = call_encode;
net/sunrpc/clnt.c
1909
status = xprt->ops->buf_alloc(task);
net/sunrpc/clnt.c
1910
trace_rpc_buf_alloc(task, status);
net/sunrpc/clnt.c
1914
rpc_call_rpcerror(task, status);
net/sunrpc/clnt.c
1918
if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
net/sunrpc/clnt.c
1919
task->tk_action = call_allocate;
net/sunrpc/clnt.c
1920
rpc_delay(task, HZ>>4);
net/sunrpc/clnt.c
1924
rpc_call_rpcerror(task, -ERESTARTSYS);
net/sunrpc/clnt.c
1928
rpc_task_need_encode(struct rpc_task *task)
net/sunrpc/clnt.c
1930
return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 &&
net/sunrpc/clnt.c
1931
(!(task->tk_flags & RPC_TASK_SENT) ||
net/sunrpc/clnt.c
1932
!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
net/sunrpc/clnt.c
1933
xprt_request_need_retransmit(task));
net/sunrpc/clnt.c
1937
rpc_xdr_encode(struct rpc_task *task)
net/sunrpc/clnt.c
1939
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/clnt.c
1953
if (rpc_encode_header(task, &xdr))
net/sunrpc/clnt.c
1956
task->tk_status = rpcauth_wrap_req(task, &xdr);
net/sunrpc/clnt.c
1963
call_encode(struct rpc_task *task)
net/sunrpc/clnt.c
1965
if (!rpc_task_need_encode(task))
net/sunrpc/clnt.c
1969
xprt_request_dequeue_xprt(task);
net/sunrpc/clnt.c
1971
rpc_xdr_encode(task);
net/sunrpc/clnt.c
1973
if (task->tk_status == 0 && rpc_reply_expected(task))
net/sunrpc/clnt.c
1974
task->tk_status = xprt_request_enqueue_receive(task);
net/sunrpc/clnt.c
1976
if (task->tk_status != 0) {
net/sunrpc/clnt.c
1978
switch (task->tk_status) {
net/sunrpc/clnt.c
1981
rpc_delay(task, HZ >> 4);
net/sunrpc/clnt.c
1984
if (!task->tk_cred_retry) {
net/sunrpc/clnt.c
1985
rpc_call_rpcerror(task, task->tk_status);
net/sunrpc/clnt.c
1987
task->tk_action = call_refresh;
net/sunrpc/clnt.c
1988
task->tk_cred_retry--;
net/sunrpc/clnt.c
1989
trace_rpc_retry_refresh_status(task);
net/sunrpc/clnt.c
1993
rpc_call_rpcerror(task, task->tk_status);
net/sunrpc/clnt.c
1998
xprt_request_enqueue_transmit(task);
net/sunrpc/clnt.c
2000
task->tk_action = call_transmit;
net/sunrpc/clnt.c
2002
if (!xprt_bound(task->tk_xprt))
net/sunrpc/clnt.c
2003
task->tk_action = call_bind;
net/sunrpc/clnt.c
2004
else if (!xprt_connected(task->tk_xprt))
net/sunrpc/clnt.c
2005
task->tk_action = call_connect;
net/sunrpc/clnt.c
2013
rpc_task_transmitted(struct rpc_task *task)
net/sunrpc/clnt.c
2015
return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
net/sunrpc/clnt.c
2019
rpc_task_handle_transmitted(struct rpc_task *task)
net/sunrpc/clnt.c
2021
xprt_end_transmit(task);
net/sunrpc/clnt.c
2022
task->tk_action = call_transmit_status;
net/sunrpc/clnt.c
2029
call_bind(struct rpc_task *task)
net/sunrpc/clnt.c
2031
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
net/sunrpc/clnt.c
2033
if (rpc_task_transmitted(task)) {
net/sunrpc/clnt.c
2034
rpc_task_handle_transmitted(task);
net/sunrpc/clnt.c
2039
task->tk_action = call_connect;
net/sunrpc/clnt.c
2043
task->tk_action = call_bind_status;
net/sunrpc/clnt.c
2044
if (!xprt_prepare_transmit(task))
net/sunrpc/clnt.c
2047
xprt->ops->rpcbind(task);
net/sunrpc/clnt.c
2054
call_bind_status(struct rpc_task *task)
net/sunrpc/clnt.c
2056
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
net/sunrpc/clnt.c
2059
if (rpc_task_transmitted(task)) {
net/sunrpc/clnt.c
2060
rpc_task_handle_transmitted(task);
net/sunrpc/clnt.c
2064
if (task->tk_status >= 0)
net/sunrpc/clnt.c
2067
task->tk_status = 0;
net/sunrpc/clnt.c
2071
switch (task->tk_status) {
net/sunrpc/clnt.c
2073
rpc_delay(task, HZ >> 2);
net/sunrpc/clnt.c
2076
trace_rpcb_prog_unavail_err(task);
net/sunrpc/clnt.c
2078
if (task->tk_msg.rpc_proc->p_proc == 0) {
net/sunrpc/clnt.c
2082
rpc_delay(task, 3*HZ);
net/sunrpc/clnt.c
2085
rpc_delay(task, HZ >> 2);
net/sunrpc/clnt.c
2090
trace_rpcb_timeout_err(task);
net/sunrpc/clnt.c
2094
trace_rpcb_bind_version_err(task);
net/sunrpc/clnt.c
2097
trace_rpcb_bind_version_err(task);
net/sunrpc/clnt.c
2101
if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
net/sunrpc/clnt.c
2111
trace_rpcb_unreachable_err(task);
net/sunrpc/clnt.c
2112
if (!RPC_IS_SOFTCONN(task)) {
net/sunrpc/clnt.c
2113
rpc_delay(task, 5*HZ);
net/sunrpc/clnt.c
2116
status = task->tk_status;
net/sunrpc/clnt.c
2119
trace_rpcb_unrecognized_err(task);
net/sunrpc/clnt.c
2122
rpc_call_rpcerror(task, status);
net/sunrpc/clnt.c
2125
task->tk_action = call_connect;
net/sunrpc/clnt.c
2128
task->tk_status = 0;
net/sunrpc/clnt.c
2129
task->tk_action = call_bind;
net/sunrpc/clnt.c
2130
rpc_check_timeout(task);
net/sunrpc/clnt.c
2137
call_connect(struct rpc_task *task)
net/sunrpc/clnt.c
2139
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
net/sunrpc/clnt.c
2141
if (rpc_task_transmitted(task)) {
net/sunrpc/clnt.c
2142
rpc_task_handle_transmitted(task);
net/sunrpc/clnt.c
2147
task->tk_action = call_transmit;
net/sunrpc/clnt.c
2151
task->tk_action = call_connect_status;
net/sunrpc/clnt.c
2152
if (task->tk_status < 0)
net/sunrpc/clnt.c
2154
if (task->tk_flags & RPC_TASK_NOCONNECT) {
net/sunrpc/clnt.c
2155
rpc_call_rpcerror(task, -ENOTCONN);
net/sunrpc/clnt.c
2158
if (!xprt_prepare_transmit(task))
net/sunrpc/clnt.c
2160
xprt_connect(task);
net/sunrpc/clnt.c
2167
call_connect_status(struct rpc_task *task)
net/sunrpc/clnt.c
2169
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
net/sunrpc/clnt.c
2170
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
2171
int status = task->tk_status;
net/sunrpc/clnt.c
2173
if (rpc_task_transmitted(task)) {
net/sunrpc/clnt.c
2174
rpc_task_handle_transmitted(task);
net/sunrpc/clnt.c
2178
trace_rpc_connect_status(task);
net/sunrpc/clnt.c
2180
if (task->tk_status == 0) {
net/sunrpc/clnt.c
2185
task->tk_status = 0;
net/sunrpc/clnt.c
2189
task->tk_status = 0;
net/sunrpc/clnt.c
2193
if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
net/sunrpc/clnt.c
2201
if (RPC_IS_SOFTCONN(task))
net/sunrpc/clnt.c
2210
xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
net/sunrpc/clnt.c
2211
task->tk_rqstp->rq_connect_cookie);
net/sunrpc/clnt.c
2212
if (RPC_IS_SOFTCONN(task))
net/sunrpc/clnt.c
2215
rpc_delay(task, 3*HZ);
net/sunrpc/clnt.c
2221
if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) &&
net/sunrpc/clnt.c
2222
(task->tk_flags & RPC_TASK_MOVEABLE) &&
net/sunrpc/clnt.c
2224
struct rpc_xprt *saved = task->tk_xprt;
net/sunrpc/clnt.c
2231
xprt_release(task);
net/sunrpc/clnt.c
2237
task->tk_xprt = NULL;
net/sunrpc/clnt.c
2238
task->tk_action = call_start;
net/sunrpc/clnt.c
2241
if (!task->tk_xprt)
net/sunrpc/clnt.c
2246
rpc_delay(task, HZ >> 2);
net/sunrpc/clnt.c
2249
rpc_call_rpcerror(task, status);
net/sunrpc/clnt.c
2252
task->tk_action = call_transmit;
net/sunrpc/clnt.c
2256
task->tk_action = call_bind;
net/sunrpc/clnt.c
2258
rpc_check_timeout(task);
net/sunrpc/clnt.c
2265
call_transmit(struct rpc_task *task)
net/sunrpc/clnt.c
2267
if (rpc_task_transmitted(task)) {
net/sunrpc/clnt.c
2268
rpc_task_handle_transmitted(task);
net/sunrpc/clnt.c
2272
task->tk_action = call_transmit_status;
net/sunrpc/clnt.c
2273
if (!xprt_prepare_transmit(task))
net/sunrpc/clnt.c
2275
task->tk_status = 0;
net/sunrpc/clnt.c
2276
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
net/sunrpc/clnt.c
2277
if (!xprt_connected(task->tk_xprt)) {
net/sunrpc/clnt.c
2278
task->tk_status = -ENOTCONN;
net/sunrpc/clnt.c
2281
xprt_transmit(task);
net/sunrpc/clnt.c
2283
xprt_end_transmit(task);
net/sunrpc/clnt.c
2290
call_transmit_status(struct rpc_task *task)
net/sunrpc/clnt.c
2292
task->tk_action = call_status;
net/sunrpc/clnt.c
2298
if (rpc_task_transmitted(task)) {
net/sunrpc/clnt.c
2299
task->tk_status = 0;
net/sunrpc/clnt.c
2300
xprt_request_wait_receive(task);
net/sunrpc/clnt.c
2304
switch (task->tk_status) {
net/sunrpc/clnt.c
2308
task->tk_status = 0;
net/sunrpc/clnt.c
2309
task->tk_action = call_encode;
net/sunrpc/clnt.c
2319
rpc_delay(task, HZ>>2);
net/sunrpc/clnt.c
2323
task->tk_action = call_transmit;
net/sunrpc/clnt.c
2324
task->tk_status = 0;
net/sunrpc/clnt.c
2333
if (RPC_IS_SOFTCONN(task)) {
net/sunrpc/clnt.c
2334
if (!task->tk_msg.rpc_proc->p_proc)
net/sunrpc/clnt.c
2335
trace_xprt_ping(task->tk_xprt,
net/sunrpc/clnt.c
2336
task->tk_status);
net/sunrpc/clnt.c
2337
rpc_call_rpcerror(task, task->tk_status);
net/sunrpc/clnt.c
2346
task->tk_action = call_bind;
net/sunrpc/clnt.c
2347
task->tk_status = 0;
net/sunrpc/clnt.c
2350
rpc_check_timeout(task);
net/sunrpc/clnt.c
2354
static void call_bc_transmit(struct rpc_task *task);
net/sunrpc/clnt.c
2355
static void call_bc_transmit_status(struct rpc_task *task);
net/sunrpc/clnt.c
2358
call_bc_encode(struct rpc_task *task)
net/sunrpc/clnt.c
2360
xprt_request_enqueue_transmit(task);
net/sunrpc/clnt.c
2361
task->tk_action = call_bc_transmit;
net/sunrpc/clnt.c
2369
call_bc_transmit(struct rpc_task *task)
net/sunrpc/clnt.c
2371
task->tk_action = call_bc_transmit_status;
net/sunrpc/clnt.c
2372
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
net/sunrpc/clnt.c
2373
if (!xprt_prepare_transmit(task))
net/sunrpc/clnt.c
2375
task->tk_status = 0;
net/sunrpc/clnt.c
2376
xprt_transmit(task);
net/sunrpc/clnt.c
2378
xprt_end_transmit(task);
net/sunrpc/clnt.c
2382
call_bc_transmit_status(struct rpc_task *task)
net/sunrpc/clnt.c
2384
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/clnt.c
2386
if (rpc_task_transmitted(task))
net/sunrpc/clnt.c
2387
task->tk_status = 0;
net/sunrpc/clnt.c
2389
switch (task->tk_status) {
net/sunrpc/clnt.c
2404
rpc_delay(task, HZ>>2);
net/sunrpc/clnt.c
2408
task->tk_status = 0;
net/sunrpc/clnt.c
2409
task->tk_action = call_bc_transmit;
net/sunrpc/clnt.c
2420
"error: %d\n", task->tk_status);
net/sunrpc/clnt.c
2430
"error: %d\n", task->tk_status);
net/sunrpc/clnt.c
2433
task->tk_action = rpc_exit_task;
net/sunrpc/clnt.c
2441
call_status(struct rpc_task *task)
net/sunrpc/clnt.c
2443
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
2446
if (!task->tk_msg.rpc_proc->p_proc)
net/sunrpc/clnt.c
2447
trace_xprt_ping(task->tk_xprt, task->tk_status);
net/sunrpc/clnt.c
2449
status = task->tk_status;
net/sunrpc/clnt.c
2451
task->tk_action = call_decode;
net/sunrpc/clnt.c
2455
trace_rpc_call_status(task);
net/sunrpc/clnt.c
2456
task->tk_status = 0;
net/sunrpc/clnt.c
2460
if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
net/sunrpc/clnt.c
2466
if (RPC_IS_SOFTCONN(task))
net/sunrpc/clnt.c
2472
rpc_delay(task, 3*HZ);
net/sunrpc/clnt.c
2483
rpc_delay(task, 3*HZ);
net/sunrpc/clnt.c
2491
rpc_delay(task, HZ>>2);
net/sunrpc/clnt.c
2502
task->tk_action = call_encode;
net/sunrpc/clnt.c
2503
rpc_check_timeout(task);
net/sunrpc/clnt.c
2506
rpc_call_rpcerror(task, status);
net/sunrpc/clnt.c
2519
rpc_check_timeout(struct rpc_task *task)
net/sunrpc/clnt.c
2521
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
2523
if (RPC_SIGNALLED(task))
net/sunrpc/clnt.c
2526
if (xprt_adjust_timeout(task->tk_rqstp) == 0)
net/sunrpc/clnt.c
2529
trace_rpc_timeout_status(task);
net/sunrpc/clnt.c
2530
task->tk_timeouts++;
net/sunrpc/clnt.c
2532
if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
net/sunrpc/clnt.c
2533
rpc_call_rpcerror(task, -ETIMEDOUT);
net/sunrpc/clnt.c
2537
if (RPC_IS_SOFT(task)) {
net/sunrpc/clnt.c
2543
if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) &&
net/sunrpc/clnt.c
2544
rpc_check_connected(task->tk_rqstp))
net/sunrpc/clnt.c
2551
task->tk_xprt->servername);
net/sunrpc/clnt.c
2553
if (task->tk_flags & RPC_TASK_TIMEOUT)
net/sunrpc/clnt.c
2554
rpc_call_rpcerror(task, -ETIMEDOUT);
net/sunrpc/clnt.c
2556
__rpc_call_rpcerror(task, -EIO, -ETIMEDOUT);
net/sunrpc/clnt.c
2560
if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
net/sunrpc/clnt.c
2561
task->tk_flags |= RPC_CALL_MAJORSEEN;
net/sunrpc/clnt.c
2566
task->tk_xprt->servername);
net/sunrpc/clnt.c
2574
rpcauth_invalcred(task);
net/sunrpc/clnt.c
2581
call_decode(struct rpc_task *task)
net/sunrpc/clnt.c
2583
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
2584
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/clnt.c
2588
if (!task->tk_msg.rpc_proc->p_decode) {
net/sunrpc/clnt.c
2589
task->tk_action = rpc_exit_task;
net/sunrpc/clnt.c
2593
if (task->tk_flags & RPC_CALL_MAJORSEEN) {
net/sunrpc/clnt.c
2597
task->tk_xprt->servername);
net/sunrpc/clnt.c
2599
task->tk_flags &= ~RPC_CALL_MAJORSEEN;
net/sunrpc/clnt.c
2616
trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
net/sunrpc/clnt.c
2624
err = rpc_decode_header(task, &xdr);
net/sunrpc/clnt.c
2628
task->tk_action = rpc_exit_task;
net/sunrpc/clnt.c
2629
task->tk_status = rpcauth_unwrap_resp(task, &xdr);
net/sunrpc/clnt.c
2633
task->tk_status = 0;
net/sunrpc/clnt.c
2634
if (task->tk_client->cl_discrtry)
net/sunrpc/clnt.c
2637
task->tk_action = call_encode;
net/sunrpc/clnt.c
2638
rpc_check_timeout(task);
net/sunrpc/clnt.c
2641
task->tk_action = call_reserve;
net/sunrpc/clnt.c
2642
rpc_check_timeout(task);
net/sunrpc/clnt.c
2643
rpcauth_invalcred(task);
net/sunrpc/clnt.c
2645
xprt_release(task);
net/sunrpc/clnt.c
2650
rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/clnt.c
2652
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
2653
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/clnt.c
2666
*p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
net/sunrpc/clnt.c
2668
error = rpcauth_marshcred(task, xdr);
net/sunrpc/clnt.c
2673
trace_rpc_bad_callhdr(task);
net/sunrpc/clnt.c
2674
rpc_call_rpcerror(task, error);
net/sunrpc/clnt.c
2679
rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
net/sunrpc/clnt.c
2681
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/clnt.c
2690
if (task->tk_rqstp->rq_rcv_buf.len & 3)
net/sunrpc/clnt.c
2702
error = rpcauth_checkverf(task, xdr);
net/sunrpc/clnt.c
2704
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
net/sunrpc/clnt.c
2707
rpcauth_invalcred(task);
net/sunrpc/clnt.c
2708
if (!task->tk_cred_retry)
net/sunrpc/clnt.c
2710
task->tk_cred_retry--;
net/sunrpc/clnt.c
2711
trace_rpc__stale_creds(task);
net/sunrpc/clnt.c
2724
trace_rpc__prog_unavail(task);
net/sunrpc/clnt.c
2728
trace_rpc__prog_mismatch(task);
net/sunrpc/clnt.c
2732
trace_rpc__proc_unavail(task);
net/sunrpc/clnt.c
2737
trace_rpc__garbage_args(task);
net/sunrpc/clnt.c
2746
if (task->tk_garb_retry) {
net/sunrpc/clnt.c
2747
task->tk_garb_retry--;
net/sunrpc/clnt.c
2748
task->tk_action = call_encode;
net/sunrpc/clnt.c
2752
rpc_call_rpcerror(task, error);
net/sunrpc/clnt.c
2756
trace_rpc__unparsable(task);
net/sunrpc/clnt.c
2761
trace_rpc_bad_verifier(task);
net/sunrpc/clnt.c
2769
task->tk_rqstp->rq_reply_bytes_recvd = 0;
net/sunrpc/clnt.c
2770
task->tk_status = xprt_request_enqueue_receive(task);
net/sunrpc/clnt.c
2771
task->tk_action = call_transmit_status;
net/sunrpc/clnt.c
2786
trace_rpc__mismatch(task);
net/sunrpc/clnt.c
2801
rpcauth_invalcred(task);
net/sunrpc/clnt.c
2802
if (!task->tk_cred_retry)
net/sunrpc/clnt.c
2804
task->tk_cred_retry--;
net/sunrpc/clnt.c
2805
trace_rpc__stale_creds(task);
net/sunrpc/clnt.c
2810
if (!task->tk_garb_retry)
net/sunrpc/clnt.c
2812
task->tk_garb_retry--;
net/sunrpc/clnt.c
2813
trace_rpc__bad_creds(task);
net/sunrpc/clnt.c
2814
task->tk_action = call_encode;
net/sunrpc/clnt.c
2817
trace_rpc__auth_tooweak(task);
net/sunrpc/clnt.c
2819
task->tk_xprt->servername);
net/sunrpc/clnt.c
2848
rpc_null_call_prepare(struct rpc_task *task, void *data)
net/sunrpc/clnt.c
2850
task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT;
net/sunrpc/clnt.c
2851
rpc_call_start(task);
net/sunrpc/clnt.c
2889
struct rpc_task *task;
net/sunrpc/clnt.c
2895
task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL);
net/sunrpc/clnt.c
2896
if (IS_ERR(task))
net/sunrpc/clnt.c
2897
return PTR_ERR(task);
net/sunrpc/clnt.c
2898
status = task->tk_status;
net/sunrpc/clnt.c
2899
rpc_put_task(task);
net/sunrpc/clnt.c
2914
struct rpc_task *task;
net/sunrpc/clnt.c
2917
task = rpc_run_task(&task_setup_data);
net/sunrpc/clnt.c
2918
if (IS_ERR(task))
net/sunrpc/clnt.c
2919
return PTR_ERR(task);
net/sunrpc/clnt.c
2920
status = task->tk_status;
net/sunrpc/clnt.c
2921
rpc_put_task(task);
net/sunrpc/clnt.c
2930
static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata)
net/sunrpc/clnt.c
2934
if (task->tk_status == 0)
net/sunrpc/clnt.c
2965
struct rpc_task *task;
net/sunrpc/clnt.c
2989
task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
net/sunrpc/clnt.c
2991
if (IS_ERR(task))
net/sunrpc/clnt.c
2992
return PTR_ERR(task);
net/sunrpc/clnt.c
2995
rpc_put_task(task);
net/sunrpc/clnt.c
3005
struct rpc_task *task;
net/sunrpc/clnt.c
3009
task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
net/sunrpc/clnt.c
3010
if (IS_ERR(task))
net/sunrpc/clnt.c
3011
return PTR_ERR(task);
net/sunrpc/clnt.c
3013
status = task->tk_status;
net/sunrpc/clnt.c
3014
rpc_put_task(task);
net/sunrpc/clnt.c
3348
const struct rpc_task *task)
net/sunrpc/clnt.c
3352
if (RPC_IS_QUEUED(task))
net/sunrpc/clnt.c
3353
rpc_waitq = rpc_qname(task->tk_waitqueue);
net/sunrpc/clnt.c
3356
task->tk_pid, task->tk_flags, task->tk_status,
net/sunrpc/clnt.c
3357
clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops,
net/sunrpc/clnt.c
3358
clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
net/sunrpc/clnt.c
3359
task->tk_action, rpc_waitq);
net/sunrpc/clnt.c
3365
struct rpc_task *task;
net/sunrpc/clnt.c
3372
list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
net/sunrpc/clnt.c
3377
rpc_show_task(clnt, task);
net/sunrpc/clnt.c
53
static void call_start(struct rpc_task *task);
net/sunrpc/clnt.c
54
static void call_reserve(struct rpc_task *task);
net/sunrpc/clnt.c
55
static void call_reserveresult(struct rpc_task *task);
net/sunrpc/clnt.c
56
static void call_allocate(struct rpc_task *task);
net/sunrpc/clnt.c
57
static void call_encode(struct rpc_task *task);
net/sunrpc/clnt.c
58
static void call_decode(struct rpc_task *task);
net/sunrpc/clnt.c
59
static void call_bind(struct rpc_task *task);
net/sunrpc/clnt.c
60
static void call_bind_status(struct rpc_task *task);
net/sunrpc/clnt.c
61
static void call_transmit(struct rpc_task *task);
net/sunrpc/clnt.c
62
static void call_status(struct rpc_task *task);
net/sunrpc/clnt.c
63
static void call_transmit_status(struct rpc_task *task);
net/sunrpc/clnt.c
64
static void call_refresh(struct rpc_task *task);
net/sunrpc/clnt.c
65
static void call_refreshresult(struct rpc_task *task);
net/sunrpc/clnt.c
66
static void call_connect(struct rpc_task *task);
net/sunrpc/clnt.c
67
static void call_connect_status(struct rpc_task *task);
net/sunrpc/clnt.c
69
static int rpc_encode_header(struct rpc_task *task,
net/sunrpc/clnt.c
71
static int rpc_decode_header(struct rpc_task *task,
net/sunrpc/clnt.c
75
static void rpc_check_timeout(struct rpc_task *task);
net/sunrpc/clnt.c
907
struct rpc_task *task;
net/sunrpc/clnt.c
916
list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
net/sunrpc/clnt.c
917
if (!RPC_IS_ACTIVATED(task))
net/sunrpc/clnt.c
919
if (!fnmatch(task, data))
net/sunrpc/clnt.c
921
rpc_task_try_cancel(task, error);
net/sunrpc/debugfs.c
23
struct rpc_task *task = v;
net/sunrpc/debugfs.c
24
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/debugfs.c
27
if (RPC_IS_QUEUED(task))
net/sunrpc/debugfs.c
28
rpc_waitq = rpc_qname(task->tk_waitqueue);
net/sunrpc/debugfs.c
30
if (task->tk_rqstp)
net/sunrpc/debugfs.c
31
xid = be32_to_cpu(task->tk_rqstp->rq_xid);
net/sunrpc/debugfs.c
34
task->tk_pid, task->tk_flags, task->tk_status,
net/sunrpc/debugfs.c
35
clnt->cl_clid, xid, rpc_task_timeout(task), task->tk_ops,
net/sunrpc/debugfs.c
36
clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
net/sunrpc/debugfs.c
37
task->tk_action, rpc_waitq);
net/sunrpc/debugfs.c
47
struct rpc_task *task;
net/sunrpc/debugfs.c
50
list_for_each_entry(task, &clnt->cl_tasks, tk_task)
net/sunrpc/debugfs.c
52
return task;
net/sunrpc/debugfs.c
60
struct rpc_task *task = v;
net/sunrpc/debugfs.c
61
struct list_head *next = task->tk_task.next;
net/sunrpc/rpcb_clnt.c
669
void rpcb_getport_async(struct rpc_task *task)
net/sunrpc/rpcb_clnt.c
684
clnt = rpcb_find_transport_owner(task->tk_client);
net/sunrpc/rpcb_clnt.c
686
xprt = xprt_get(task->tk_xprt);
net/sunrpc/rpcb_clnt.c
690
rpc_sleep_on_timeout(&xprt->binding, task,
net/sunrpc/rpcb_clnt.c
727
trace_rpcb_getport(clnt, task, bind_version);
net/sunrpc/rpcb_clnt.c
734
task->tk_client->cl_timeout);
net/sunrpc/rpcb_clnt.c
787
task->tk_status = status;
net/sunrpc/sched.c
100
list_del(&task->u.tk_wait.timer_list);
net/sunrpc/sched.c
1001
rpc_release_task(task);
net/sunrpc/sched.c
1015
void rpc_execute(struct rpc_task *task)
net/sunrpc/sched.c
1017
bool is_async = RPC_IS_ASYNC(task);
net/sunrpc/sched.c
1019
rpc_set_active(task);
net/sunrpc/sched.c
1020
rpc_make_runnable(rpciod_workqueue, task);
net/sunrpc/sched.c
1023
__rpc_execute(task);
net/sunrpc/sched.c
1053
int rpc_malloc(struct rpc_task *task)
net/sunrpc/sched.c
1055
struct rpc_rqst *rqst = task->tk_rqstp;
net/sunrpc/sched.c
1064
if (!buf && RPC_IS_ASYNC(task))
net/sunrpc/sched.c
1083
void rpc_free(struct rpc_task *task)
net/sunrpc/sched.c
1085
void *buffer = task->tk_rqstp->rq_buffer;
net/sunrpc/sched.c
1101
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
net/sunrpc/sched.c
1103
memset(task, 0, sizeof(*task));
net/sunrpc/sched.c
1104
atomic_set(&task->tk_count, 1);
net/sunrpc/sched.c
1105
task->tk_flags = task_setup_data->flags;
net/sunrpc/sched.c
1106
task->tk_ops = task_setup_data->callback_ops;
net/sunrpc/sched.c
1107
task->tk_calldata = task_setup_data->callback_data;
net/sunrpc/sched.c
1108
INIT_LIST_HEAD(&task->tk_task);
net/sunrpc/sched.c
1110
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
net/sunrpc/sched.c
1111
task->tk_owner = current->tgid;
net/sunrpc/sched.c
1114
task->tk_workqueue = task_setup_data->workqueue;
net/sunrpc/sched.c
1116
task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
net/sunrpc/sched.c
1119
task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
net/sunrpc/sched.c
1121
if (task->tk_ops->rpc_call_prepare != NULL)
net/sunrpc/sched.c
1122
task->tk_action = rpc_prepare_task;
net/sunrpc/sched.c
1124
rpc_init_task_statistics(task);
net/sunrpc/sched.c
1129
struct rpc_task *task;
net/sunrpc/sched.c
1131
task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask());
net/sunrpc/sched.c
1132
if (task)
net/sunrpc/sched.c
1133
return task;
net/sunrpc/sched.c
1142
struct rpc_task *task = setup_data->task;
net/sunrpc/sched.c
1145
if (task == NULL) {
net/sunrpc/sched.c
1146
task = rpc_alloc_task();
net/sunrpc/sched.c
1147
if (task == NULL) {
net/sunrpc/sched.c
1155
rpc_init_task(task, setup_data);
net/sunrpc/sched.c
1156
task->tk_flags |= flags;
net/sunrpc/sched.c
1157
return task;
net/sunrpc/sched.c
1179
static void rpc_free_task(struct rpc_task *task)
net/sunrpc/sched.c
1181
unsigned short tk_flags = task->tk_flags;
net/sunrpc/sched.c
1183
put_rpccred(task->tk_op_cred);
net/sunrpc/sched.c
1184
rpc_release_calldata(task->tk_ops, task->tk_calldata);
net/sunrpc/sched.c
1187
mempool_free(task, rpc_task_mempool);
net/sunrpc/sched.c
1198
static void rpc_release_resources_task(struct rpc_task *task)
net/sunrpc/sched.c
1200
xprt_release(task);
net/sunrpc/sched.c
1201
if (task->tk_msg.rpc_cred) {
net/sunrpc/sched.c
1202
if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
net/sunrpc/sched.c
1203
put_cred(task->tk_msg.rpc_cred);
net/sunrpc/sched.c
1204
task->tk_msg.rpc_cred = NULL;
net/sunrpc/sched.c
1206
rpc_task_release_client(task);
net/sunrpc/sched.c
1209
static void rpc_final_put_task(struct rpc_task *task,
net/sunrpc/sched.c
121
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
net/sunrpc/sched.c
1213
INIT_WORK(&task->u.tk_work, rpc_async_release);
net/sunrpc/sched.c
1214
queue_work(q, &task->u.tk_work);
net/sunrpc/sched.c
1216
rpc_free_task(task);
net/sunrpc/sched.c
1219
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
net/sunrpc/sched.c
1221
if (atomic_dec_and_test(&task->tk_count)) {
net/sunrpc/sched.c
1222
rpc_release_resources_task(task);
net/sunrpc/sched.c
1223
rpc_final_put_task(task, q);
net/sunrpc/sched.c
1227
void rpc_put_task(struct rpc_task *task)
net/sunrpc/sched.c
1229
rpc_do_put_task(task, NULL);
net/sunrpc/sched.c
1233
void rpc_put_task_async(struct rpc_task *task)
net/sunrpc/sched.c
1235
rpc_do_put_task(task, task->tk_workqueue);
net/sunrpc/sched.c
1239
static void rpc_release_task(struct rpc_task *task)
net/sunrpc/sched.c
124
task->tk_timeout = timeout;
net/sunrpc/sched.c
1241
WARN_ON_ONCE(RPC_IS_QUEUED(task));
net/sunrpc/sched.c
1243
rpc_release_resources_task(task);
net/sunrpc/sched.c
1250
if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
net/sunrpc/sched.c
1252
if (!rpc_complete_task(task))
net/sunrpc/sched.c
1255
if (!atomic_dec_and_test(&task->tk_count))
net/sunrpc/sched.c
1258
rpc_final_put_task(task, task->tk_workqueue);
net/sunrpc/sched.c
127
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
net/sunrpc/sched.c
147
__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
net/sunrpc/sched.c
152
if (t->tk_owner == task->tk_owner) {
net/sunrpc/sched.c
153
list_add_tail(&task->u.tk_wait.links,
net/sunrpc/sched.c
156
task->u.tk_wait.list.next = q;
net/sunrpc/sched.c
157
task->u.tk_wait.list.prev = NULL;
net/sunrpc/sched.c
161
INIT_LIST_HEAD(&task->u.tk_wait.links);
net/sunrpc/sched.c
162
list_add_tail(&task->u.tk_wait.list, q);
net/sunrpc/sched.c
169
__rpc_list_dequeue_task(struct rpc_task *task)
net/sunrpc/sched.c
174
if (task->u.tk_wait.list.prev == NULL) {
net/sunrpc/sched.c
175
list_del(&task->u.tk_wait.links);
net/sunrpc/sched.c
178
if (!list_empty(&task->u.tk_wait.links)) {
net/sunrpc/sched.c
179
t = list_first_entry(&task->u.tk_wait.links,
net/sunrpc/sched.c
185
list_del(&task->u.tk_wait.links);
net/sunrpc/sched.c
187
list_del(&task->u.tk_wait.list);
net/sunrpc/sched.c
194
struct rpc_task *task,
net/sunrpc/sched.c
199
__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
net/sunrpc/sched.c
206
struct rpc_task *task,
net/sunrpc/sched.c
209
INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
net/sunrpc/sched.c
211
__rpc_add_wait_queue_priority(queue, task, queue_priority);
net/sunrpc/sched.c
213
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
net/sunrpc/sched.c
214
task->tk_waitqueue = queue;
net/sunrpc/sched.c
218
rpc_set_queued(task);
net/sunrpc/sched.c
224
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
net/sunrpc/sched.c
226
__rpc_list_dequeue_task(task);
net/sunrpc/sched.c
233
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
net/sunrpc/sched.c
235
__rpc_disable_timer(queue, task);
net/sunrpc/sched.c
237
__rpc_remove_wait_queue_priority(task);
net/sunrpc/sched.c
239
list_del(&task->u.tk_wait.list);
net/sunrpc/sched.c
286
static void rpc_task_set_debuginfo(struct rpc_task *task)
net/sunrpc/sched.c
288
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/sched.c
294
task->tk_pid = atomic_inc_return(&rpc_pid);
net/sunrpc/sched.c
298
task->tk_pid = atomic_inc_return(&clnt->cl_pid);
net/sunrpc/sched.c
301
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
net/sunrpc/sched.c
306
static void rpc_set_active(struct rpc_task *task)
net/sunrpc/sched.c
308
rpc_task_set_debuginfo(task);
net/sunrpc/sched.c
309
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
net/sunrpc/sched.c
310
trace_rpc_task_begin(task, NULL);
net/sunrpc/sched.c
317
static int rpc_complete_task(struct rpc_task *task)
net/sunrpc/sched.c
319
void *m = &task->tk_runstate;
net/sunrpc/sched.c
325
trace_rpc_task_complete(task, NULL);
net/sunrpc/sched.c
328
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
net/sunrpc/sched.c
329
ret = atomic_dec_and_test(&task->tk_count);
net/sunrpc/sched.c
343
int rpc_wait_for_completion_task(struct rpc_task *task)
net/sunrpc/sched.c
345
return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
net/sunrpc/sched.c
362
struct rpc_task *task)
net/sunrpc/sched.c
364
bool need_wakeup = !rpc_test_and_set_running(task);
net/sunrpc/sched.c
366
rpc_clear_queued(task);
net/sunrpc/sched.c
369
if (RPC_IS_ASYNC(task)) {
net/sunrpc/sched.c
370
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
net/sunrpc/sched.c
371
queue_work(wq, &task->u.tk_work);
net/sunrpc/sched.c
374
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
net/sunrpc/sched.c
385
struct rpc_task *task,
net/sunrpc/sched.c
388
trace_rpc_task_sleep(task, q);
net/sunrpc/sched.c
390
__rpc_add_wait_queue(q, task, queue_priority);
net/sunrpc/sched.c
394
struct rpc_task *task,
net/sunrpc/sched.c
397
if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
net/sunrpc/sched.c
399
__rpc_do_sleep_on_priority(q, task, queue_priority);
net/sunrpc/sched.c
403
struct rpc_task *task, unsigned long timeout,
net/sunrpc/sched.c
406
if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
net/sunrpc/sched.c
409
__rpc_do_sleep_on_priority(q, task, queue_priority);
net/sunrpc/sched.c
410
__rpc_add_timer(q, task, timeout);
net/sunrpc/sched.c
412
task->tk_status = -ETIMEDOUT;
net/sunrpc/sched.c
415
static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
net/sunrpc/sched.c
417
if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
net/sunrpc/sched.c
418
task->tk_callback = action;
net/sunrpc/sched.c
421
static bool rpc_sleep_check_activated(struct rpc_task *task)
net/sunrpc/sched.c
424
if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
net/sunrpc/sched.c
425
task->tk_status = -EIO;
net/sunrpc/sched.c
426
rpc_put_task_async(task);
net/sunrpc/sched.c
432
void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
net/sunrpc/sched.c
435
if (!rpc_sleep_check_activated(task))
net/sunrpc/sched.c
438
rpc_set_tk_callback(task, action);
net/sunrpc/sched.c
444
__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
net/sunrpc/sched.c
449
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
net/sunrpc/sched.c
45
static void rpc_release_task(struct rpc_task *task);
net/sunrpc/sched.c
452
if (!rpc_sleep_check_activated(task))
net/sunrpc/sched.c
455
rpc_set_tk_callback(task, action);
net/sunrpc/sched.c
457
WARN_ON_ONCE(task->tk_timeout != 0);
net/sunrpc/sched.c
462
__rpc_sleep_on_priority(q, task, task->tk_priority);
net/sunrpc/sched.c
468
struct rpc_task *task, unsigned long timeout, int priority)
net/sunrpc/sched.c
470
if (!rpc_sleep_check_activated(task))
net/sunrpc/sched.c
478
__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
net/sunrpc/sched.c
483
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
net/sunrpc/sched.c
486
if (!rpc_sleep_check_activated(task))
net/sunrpc/sched.c
489
WARN_ON_ONCE(task->tk_timeout != 0);
net/sunrpc/sched.c
495
__rpc_sleep_on_priority(q, task, priority);
net/sunrpc/sched.c
510
struct rpc_task *task)
net/sunrpc/sched.c
513
if (!RPC_IS_ACTIVATED(task)) {
net/sunrpc/sched.c
514
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
net/sunrpc/sched.c
518
trace_rpc_task_wakeup(task, queue);
net/sunrpc/sched.c
520
__rpc_remove_wait_queue(queue, task);
net/sunrpc/sched.c
522
rpc_make_runnable(wq, task);
net/sunrpc/sched.c
530
struct rpc_wait_queue *queue, struct rpc_task *task,
net/sunrpc/sched.c
533
if (RPC_IS_QUEUED(task)) {
net/sunrpc/sched.c
535
if (task->tk_waitqueue == queue) {
net/sunrpc/sched.c
536
if (action == NULL || action(task, data)) {
net/sunrpc/sched.c
537
__rpc_do_wake_up_task_on_wq(wq, queue, task);
net/sunrpc/sched.c
538
return task;
net/sunrpc/sched.c
549
struct rpc_task *task)
net/sunrpc/sched.c
552
task, NULL, NULL);
net/sunrpc/sched.c
558
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
net/sunrpc/sched.c
560
if (!RPC_IS_QUEUED(task))
net/sunrpc/sched.c
563
rpc_wake_up_task_queue_locked(queue, task);
net/sunrpc/sched.c
568
static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
net/sunrpc/sched.c
570
task->tk_status = *(int *)status;
net/sunrpc/sched.c
576
struct rpc_task *task, int status)
net/sunrpc/sched.c
579
task, rpc_task_action_set_status, &status);
net/sunrpc/sched.c
593
struct rpc_task *task, int status)
net/sunrpc/sched.c
595
if (!RPC_IS_QUEUED(task))
net/sunrpc/sched.c
598
rpc_wake_up_task_queue_set_status_locked(queue, task, status);
net/sunrpc/sched.c
608
struct rpc_task *task;
net/sunrpc/sched.c
615
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
net/sunrpc/sched.c
625
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
net/sunrpc/sched.c
638
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
net/sunrpc/sched.c
649
return task;
net/sunrpc/sched.c
668
struct rpc_task *task = NULL;
net/sunrpc/sched.c
671
task = __rpc_find_next_queued(queue);
net/sunrpc/sched.c
672
if (task != NULL)
net/sunrpc/sched.c
673
task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
net/sunrpc/sched.c
674
task, func, data);
net/sunrpc/sched.c
677
return task;
net/sunrpc/sched.c
68
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
net/sunrpc/sched.c
690
static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
net/sunrpc/sched.c
70
if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
net/sunrpc/sched.c
711
struct rpc_task *task;
net/sunrpc/sched.c
714
task = __rpc_find_next_queued(queue);
net/sunrpc/sched.c
715
if (task == NULL)
net/sunrpc/sched.c
717
rpc_wake_up_task_queue_locked(queue, task);
net/sunrpc/sched.c
742
struct rpc_task *task;
net/sunrpc/sched.c
745
task = __rpc_find_next_queued(queue);
net/sunrpc/sched.c
746
if (task == NULL)
net/sunrpc/sched.c
748
rpc_wake_up_task_queue_set_status_locked(queue, task, status);
net/sunrpc/sched.c
76
rpc_task_timeout(const struct rpc_task *task)
net/sunrpc/sched.c
772
struct rpc_task *task, *n;
net/sunrpc/sched.c
777
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
net/sunrpc/sched.c
778
timeo = task->tk_timeout;
net/sunrpc/sched.c
78
unsigned long timeout = READ_ONCE(task->tk_timeout);
net/sunrpc/sched.c
780
trace_rpc_task_timeout(task, task->tk_action);
net/sunrpc/sched.c
781
task->tk_status = -ETIMEDOUT;
net/sunrpc/sched.c
782
rpc_wake_up_task_queue_locked(queue, task);
net/sunrpc/sched.c
793
static void __rpc_atrun(struct rpc_task *task)
net/sunrpc/sched.c
795
if (task->tk_status == -ETIMEDOUT)
net/sunrpc/sched.c
796
task->tk_status = 0;
net/sunrpc/sched.c
802
void rpc_delay(struct rpc_task *task, unsigned long delay)
net/sunrpc/sched.c
804
rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
net/sunrpc/sched.c
811
void rpc_prepare_task(struct rpc_task *task)
net/sunrpc/sched.c
813
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
net/sunrpc/sched.c
817
rpc_init_task_statistics(struct rpc_task *task)
net/sunrpc/sched.c
820
task->tk_garb_retry = 2;
net/sunrpc/sched.c
821
task->tk_cred_retry = 2;
net/sunrpc/sched.c
824
task->tk_start = ktime_get();
net/sunrpc/sched.c
828
rpc_reset_task_statistics(struct rpc_task *task)
net/sunrpc/sched.c
830
task->tk_timeouts = 0;
net/sunrpc/sched.c
831
task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
net/sunrpc/sched.c
832
rpc_init_task_statistics(task);
net/sunrpc/sched.c
838
void rpc_exit_task(struct rpc_task *task)
net/sunrpc/sched.c
840
trace_rpc_task_end(task, task->tk_action);
net/sunrpc/sched.c
841
task->tk_action = NULL;
net/sunrpc/sched.c
842
if (task->tk_ops->rpc_count_stats)
net/sunrpc/sched.c
843
task->tk_ops->rpc_count_stats(task, task->tk_calldata);
net/sunrpc/sched.c
844
else if (task->tk_client)
net/sunrpc/sched.c
845
rpc_count_iostats(task, task->tk_client->cl_metrics);
net/sunrpc/sched.c
846
if (task->tk_ops->rpc_call_done != NULL) {
net/sunrpc/sched.c
847
trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done);
net/sunrpc/sched.c
848
task->tk_ops->rpc_call_done(task, task->tk_calldata);
net/sunrpc/sched.c
849
if (task->tk_action != NULL) {
net/sunrpc/sched.c
851
xprt_release(task);
net/sunrpc/sched.c
852
rpc_reset_task_statistics(task);
net/sunrpc/sched.c
857
void rpc_signal_task(struct rpc_task *task)
net/sunrpc/sched.c
861
if (!RPC_IS_ACTIVATED(task))
net/sunrpc/sched.c
864
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
net/sunrpc/sched.c
866
trace_rpc_task_signalled(task, task->tk_action);
net/sunrpc/sched.c
867
queue = READ_ONCE(task->tk_waitqueue);
net/sunrpc/sched.c
869
rpc_wake_up_queued_task(queue, task);
net/sunrpc/sched.c
872
void rpc_task_try_cancel(struct rpc_task *task, int error)
net/sunrpc/sched.c
876
if (!rpc_task_set_rpc_status(task, error))
net/sunrpc/sched.c
878
queue = READ_ONCE(task->tk_waitqueue);
net/sunrpc/sched.c
880
rpc_wake_up_queued_task(queue, task);
net/sunrpc/sched.c
883
void rpc_exit(struct rpc_task *task, int status)
net/sunrpc/sched.c
885
task->tk_status = status;
net/sunrpc/sched.c
886
task->tk_action = rpc_exit_task;
net/sunrpc/sched.c
887
rpc_wake_up_queued_task(task->tk_waitqueue, task);
net/sunrpc/sched.c
909
static void __rpc_execute(struct rpc_task *task)
net/sunrpc/sched.c
912
int task_is_async = RPC_IS_ASYNC(task);
net/sunrpc/sched.c
916
WARN_ON_ONCE(RPC_IS_QUEUED(task));
net/sunrpc/sched.c
917
if (RPC_IS_QUEUED(task))
net/sunrpc/sched.c
928
do_action = task->tk_action;
net/sunrpc/sched.c
931
(status = READ_ONCE(task->tk_rpc_status)) != 0) {
net/sunrpc/sched.c
932
task->tk_status = status;
net/sunrpc/sched.c
936
if (task->tk_callback) {
net/sunrpc/sched.c
937
do_action = task->tk_callback;
net/sunrpc/sched.c
938
task->tk_callback = NULL;
net/sunrpc/sched.c
942
if (RPC_IS_SWAPPER(task) ||
net/sunrpc/sched.c
943
xprt_needs_memalloc(task->tk_xprt, task))
net/sunrpc/sched.c
946
trace_rpc_task_run_action(task, do_action);
net/sunrpc/sched.c
947
do_action(task);
net/sunrpc/sched.c
95
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
net/sunrpc/sched.c
952
if (!RPC_IS_QUEUED(task)) {
net/sunrpc/sched.c
966
queue = task->tk_waitqueue;
net/sunrpc/sched.c
968
if (!RPC_IS_QUEUED(task)) {
net/sunrpc/sched.c
97
if (list_empty(&task->u.tk_wait.timer_list))
net/sunrpc/sched.c
973
if (READ_ONCE(task->tk_rpc_status) != 0) {
net/sunrpc/sched.c
974
rpc_wake_up_task_queue_locked(queue, task);
net/sunrpc/sched.c
978
rpc_clear_running(task);
net/sunrpc/sched.c
984
trace_rpc_task_sync_sleep(task, task->tk_action);
net/sunrpc/sched.c
985
status = out_of_line_wait_on_bit(&task->tk_runstate,
net/sunrpc/sched.c
99
task->tk_timeout = 0;
net/sunrpc/sched.c
995
rpc_signal_task(task);
net/sunrpc/sched.c
997
trace_rpc_task_sync_wake(task, task->tk_action);
net/sunrpc/stats.c
154
void rpc_count_iostats_metrics(const struct rpc_task *task,
net/sunrpc/stats.c
157
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/stats.c
169
op_metrics->om_timeouts += task->tk_timeouts;
net/sunrpc/stats.c
176
backlog = ktime_sub(req->rq_xtime, task->tk_start);
net/sunrpc/stats.c
182
execute = ktime_sub(now, task->tk_start);
net/sunrpc/stats.c
184
if (task->tk_status < 0)
net/sunrpc/stats.c
200
void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
net/sunrpc/stats.c
202
rpc_count_iostats_metrics(task,
net/sunrpc/stats.c
203
&stats[task->tk_msg.rpc_proc->p_statidx]);
net/sunrpc/svc.c
1647
struct rpc_task *task;
net/sunrpc/svc.c
1704
task = rpc_run_bc_task(req, &timeout);
net/sunrpc/svc.c
1707
if (IS_ERR(task))
net/sunrpc/svc.c
1710
WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
net/sunrpc/svc.c
1711
rpc_put_task(task);
net/sunrpc/svc.c
362
svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
net/sunrpc/svc.c
378
set_cpus_allowed_ptr(task, cpumask_of(node));
net/sunrpc/svc.c
383
set_cpus_allowed_ptr(task, cpumask_of_node(node));
net/sunrpc/svc.c
778
struct task_struct *task;
net/sunrpc/svc.c
787
task = kthread_create_on_node(serv->sv_threadfn, rqstp,
net/sunrpc/svc.c
789
if (IS_ERR(task)) {
net/sunrpc/svc.c
790
err = PTR_ERR(task);
net/sunrpc/svc.c
794
rqstp->rq_task = task;
net/sunrpc/svc.c
796
svc_pool_map_set_cpumask(task, pool->sp_id);
net/sunrpc/svc.c
799
wake_up_process(task);
net/sunrpc/xprt.c
1127
xprt_request_data_received(struct rpc_task *task)
net/sunrpc/xprt.c
1129
return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
net/sunrpc/xprt.c
1130
READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
net/sunrpc/xprt.c
1134
xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
net/sunrpc/xprt.c
1136
return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
net/sunrpc/xprt.c
1137
READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
net/sunrpc/xprt.c
1146
xprt_request_enqueue_receive(struct rpc_task *task)
net/sunrpc/xprt.c
1148
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1152
if (!xprt_request_need_enqueue_receive(task, req))
net/sunrpc/xprt.c
1155
ret = xprt_request_prepare(task->tk_rqstp, &req->rq_rcv_buf);
net/sunrpc/xprt.c
1166
set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
net/sunrpc/xprt.c
1181
xprt_request_dequeue_receive_locked(struct rpc_task *task)
net/sunrpc/xprt.c
1183
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1185
if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
net/sunrpc/xprt.c
1195
void xprt_update_rtt(struct rpc_task *task)
net/sunrpc/xprt.c
1197
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1198
struct rpc_rtt *rtt = task->tk_client->cl_rtt;
net/sunrpc/xprt.c
1199
unsigned int timer = task->tk_msg.rpc_proc->p_timer;
net/sunrpc/xprt.c
1217
void xprt_complete_rqst(struct rpc_task *task, int copied)
net/sunrpc/xprt.c
1219
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1231
xprt_request_dequeue_receive_locked(task);
net/sunrpc/xprt.c
1232
rpc_wake_up_queued_task(&xprt->pending, task);
net/sunrpc/xprt.c
1236
static void xprt_timer(struct rpc_task *task)
net/sunrpc/xprt.c
1238
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1241
if (task->tk_status != -ETIMEDOUT)
net/sunrpc/xprt.c
1244
trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
net/sunrpc/xprt.c
1247
xprt->ops->timer(xprt, task);
net/sunrpc/xprt.c
1249
task->tk_status = 0;
net/sunrpc/xprt.c
1261
void xprt_wait_for_reply_request_def(struct rpc_task *task)
net/sunrpc/xprt.c
1263
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1265
rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
net/sunrpc/xprt.c
1277
void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
net/sunrpc/xprt.c
1279
int timer = task->tk_msg.rpc_proc->p_timer;
net/sunrpc/xprt.c
1280
struct rpc_clnt *clnt = task->tk_client;
net/sunrpc/xprt.c
1282
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1290
rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
net/sunrpc/xprt.c
1300
void xprt_request_wait_receive(struct rpc_task *task)
net/sunrpc/xprt.c
1302
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1305
if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
net/sunrpc/xprt.c
1313
if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
net/sunrpc/xprt.c
1314
xprt->ops->wait_for_reply_request(task);
net/sunrpc/xprt.c
1320
if (xprt_request_retransmit_after_disconnect(task))
net/sunrpc/xprt.c
1322
task, -ENOTCONN);
net/sunrpc/xprt.c
1328
xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
net/sunrpc/xprt.c
1330
return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
net/sunrpc/xprt.c
1340
xprt_request_enqueue_transmit(struct rpc_task *task)
net/sunrpc/xprt.c
1342
struct rpc_rqst *pos, *req = task->tk_rqstp;
net/sunrpc/xprt.c
1346
if (xprt_request_need_enqueue_transmit(task, req)) {
net/sunrpc/xprt.c
1347
ret = xprt_request_prepare(task->tk_rqstp, &req->rq_snd_buf);
net/sunrpc/xprt.c
1349
task->tk_status = ret;
net/sunrpc/xprt.c
1370
if (pos->rq_task->tk_owner != task->tk_owner)
net/sunrpc/xprt.c
1381
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
net/sunrpc/xprt.c
1394
xprt_request_dequeue_transmit_locked(struct rpc_task *task)
net/sunrpc/xprt.c
1396
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1398
if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
net/sunrpc/xprt.c
1427
xprt_request_dequeue_transmit(struct rpc_task *task)
net/sunrpc/xprt.c
1429
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1433
xprt_request_dequeue_transmit_locked(task);
net/sunrpc/xprt.c
1445
xprt_request_dequeue_xprt(struct rpc_task *task)
net/sunrpc/xprt.c
1447
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1450
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
net/sunrpc/xprt.c
1451
test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
net/sunrpc/xprt.c
1455
set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
net/sunrpc/xprt.c
1459
clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
net/sunrpc/xprt.c
1461
xprt_request_dequeue_transmit_locked(task);
net/sunrpc/xprt.c
1462
xprt_request_dequeue_receive_locked(task);
net/sunrpc/xprt.c
1494
xprt_request_need_retransmit(struct rpc_task *task)
net/sunrpc/xprt.c
1496
return xprt_request_retransmit_after_disconnect(task);
net/sunrpc/xprt.c
1504
bool xprt_prepare_transmit(struct rpc_task *task)
net/sunrpc/xprt.c
1506
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1509
if (!xprt_lock_write(xprt, task)) {
net/sunrpc/xprt.c
1511
if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
net/sunrpc/xprt.c
1513
task, 0);
net/sunrpc/xprt.c
1523
void xprt_end_transmit(struct rpc_task *task)
net/sunrpc/xprt.c
1525
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
net/sunrpc/xprt.c
1528
xprt_release_write(xprt, task);
net/sunrpc/xprt.c
1545
struct rpc_task *task = req->rq_task;
net/sunrpc/xprt.c
1547
int is_retrans = RPC_WAS_SENT(task);
net/sunrpc/xprt.c
1554
if (xprt_request_data_received(task)) {
net/sunrpc/xprt.c
1559
if (rpcauth_xmit_need_reencode(task)) {
net/sunrpc/xprt.c
1563
if (RPC_SIGNALLED(task)) {
net/sunrpc/xprt.c
1576
trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
net/sunrpc/xprt.c
1586
task->tk_client->cl_stats->rpcretrans++;
net/sunrpc/xprt.c
1592
task->tk_flags |= RPC_TASK_SENT;
net/sunrpc/xprt.c
1605
xprt_request_dequeue_transmit(task);
net/sunrpc/xprt.c
1606
rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
net/sunrpc/xprt.c
1620
xprt_transmit(struct rpc_task *task)
net/sunrpc/xprt.c
1622
struct rpc_rqst *next, *req = task->tk_rqstp;
net/sunrpc/xprt.c
1634
status = xprt_request_transmit(next, task);
net/sunrpc/xprt.c
1640
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
net/sunrpc/xprt.c
1641
task->tk_status = status;
net/sunrpc/xprt.c
1645
if (xprt_request_data_received(task) &&
net/sunrpc/xprt.c
1646
!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
net/sunrpc/xprt.c
1653
static void xprt_complete_request_init(struct rpc_task *task)
net/sunrpc/xprt.c
1655
if (task->tk_rqstp)
net/sunrpc/xprt.c
1656
xprt_request_init(task);
net/sunrpc/xprt.c
1659
void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
1662
rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
net/sunrpc/xprt.c
1666
static bool __xprt_set_rq(struct rpc_task *task, void *data)
net/sunrpc/xprt.c
1670
if (task->tk_rqstp == NULL) {
net/sunrpc/xprt.c
1672
task->tk_rqstp = req;
net/sunrpc/xprt.c
1688
static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
1696
xprt_add_backlog(xprt, task);
net/sunrpc/xprt.c
1732
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
1749
task->tk_status = -ENOMEM;
net/sunrpc/xprt.c
1752
xprt_add_backlog(xprt, task);
net/sunrpc/xprt.c
1756
task->tk_status = -EAGAIN;
net/sunrpc/xprt.c
1765
task->tk_status = 0;
net/sunrpc/xprt.c
1766
task->tk_rqstp = req;
net/sunrpc/xprt.c
1884
xprt_request_init(struct rpc_task *task)
net/sunrpc/xprt.c
1886
struct rpc_xprt *xprt = task->tk_xprt;
net/sunrpc/xprt.c
1887
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1889
req->rq_task = task;
net/sunrpc/xprt.c
1902
xprt_init_majortimeo(task, req, task->tk_client->cl_timeout);
net/sunrpc/xprt.c
1908
xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
1910
xprt->ops->alloc_slot(xprt, task);
net/sunrpc/xprt.c
1911
if (task->tk_rqstp != NULL)
net/sunrpc/xprt.c
1912
xprt_request_init(task);
net/sunrpc/xprt.c
1923
void xprt_reserve(struct rpc_task *task)
net/sunrpc/xprt.c
1925
struct rpc_xprt *xprt = task->tk_xprt;
net/sunrpc/xprt.c
1927
task->tk_status = 0;
net/sunrpc/xprt.c
1928
if (task->tk_rqstp != NULL)
net/sunrpc/xprt.c
1931
task->tk_status = -EAGAIN;
net/sunrpc/xprt.c
1932
if (!xprt_throttle_congested(xprt, task))
net/sunrpc/xprt.c
1933
xprt_do_reserve(xprt, task);
net/sunrpc/xprt.c
1945
void xprt_retry_reserve(struct rpc_task *task)
net/sunrpc/xprt.c
1947
struct rpc_xprt *xprt = task->tk_xprt;
net/sunrpc/xprt.c
1949
task->tk_status = 0;
net/sunrpc/xprt.c
1950
if (task->tk_rqstp != NULL)
net/sunrpc/xprt.c
1953
task->tk_status = -EAGAIN;
net/sunrpc/xprt.c
1954
xprt_do_reserve(xprt, task);
net/sunrpc/xprt.c
1962
void xprt_release(struct rpc_task *task)
net/sunrpc/xprt.c
1965
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
1968
if (task->tk_client) {
net/sunrpc/xprt.c
1969
xprt = task->tk_xprt;
net/sunrpc/xprt.c
1970
xprt_release_write(xprt, task);
net/sunrpc/xprt.c
1976
xprt_request_dequeue_xprt(task);
net/sunrpc/xprt.c
1978
xprt->ops->release_xprt(xprt, task);
net/sunrpc/xprt.c
1980
xprt->ops->release_request(task);
net/sunrpc/xprt.c
1984
xprt->ops->buf_free(task);
net/sunrpc/xprt.c
1990
task->tk_rqstp = NULL;
net/sunrpc/xprt.c
1999
xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
net/sunrpc/xprt.c
2004
task->tk_rqstp = req;
net/sunrpc/xprt.c
2005
req->rq_task = task;
net/sunrpc/xprt.c
2019
xprt_init_majortimeo(task, req, to);
net/sunrpc/xprt.c
265
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
267
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
270
if (task == xprt->snd_task)
net/sunrpc/xprt.c
276
xprt->snd_task = task;
net/sunrpc/xprt.c
279
trace_xprt_reserve_xprt(xprt, task);
net/sunrpc/xprt.c
285
task->tk_status = -EAGAIN;
net/sunrpc/xprt.c
286
if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
net/sunrpc/xprt.c
287
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
net/sunrpc/xprt.c
290
rpc_sleep_on(&xprt->sending, task, NULL);
net/sunrpc/xprt.c
329
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
331
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
334
if (task == xprt->snd_task)
net/sunrpc/xprt.c
339
xprt->snd_task = task;
net/sunrpc/xprt.c
345
xprt->snd_task = task;
net/sunrpc/xprt.c
351
task->tk_status = -EAGAIN;
net/sunrpc/xprt.c
352
if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
net/sunrpc/xprt.c
353
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
net/sunrpc/xprt.c
356
rpc_sleep_on(&xprt->sending, task, NULL);
net/sunrpc/xprt.c
359
trace_xprt_reserve_cong(xprt, task);
net/sunrpc/xprt.c
364
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
368
if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
net/sunrpc/xprt.c
371
retval = xprt->ops->reserve_xprt(xprt, task);
net/sunrpc/xprt.c
376
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
net/sunrpc/xprt.c
380
xprt->snd_task = task;
net/sunrpc/xprt.c
419
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
421
if (xprt->snd_task == task) {
net/sunrpc/xprt.c
425
trace_xprt_release_xprt(xprt, task);
net/sunrpc/xprt.c
437
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
439
if (xprt->snd_task == task) {
net/sunrpc/xprt.c
443
trace_xprt_release_cong(xprt, task);
net/sunrpc/xprt.c
447
void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprt.c
449
if (xprt->snd_task != task)
net/sunrpc/xprt.c
452
xprt->ops->release_xprt(xprt, task);
net/sunrpc/xprt.c
518
void xprt_release_rqst_cong(struct rpc_task *task)
net/sunrpc/xprt.c
520
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
562
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
net/sunrpc/xprt.c
564
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
679
static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req,
net/sunrpc/xprt.c
688
time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
net/sunrpc/xprt.c
75
static void xprt_request_init(struct rpc_task *task);
net/sunrpc/xprt.c
806
xprt_request_retransmit_after_disconnect(struct rpc_task *task)
net/sunrpc/xprt.c
808
struct rpc_rqst *req = task->tk_rqstp;
net/sunrpc/xprt.c
882
struct rpc_task *task,
net/sunrpc/xprt.c
890
if (xprt->snd_task != task)
net/sunrpc/xprt.c
923
void xprt_connect(struct rpc_task *task)
net/sunrpc/xprt.c
925
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
net/sunrpc/xprt.c
930
task->tk_status = -EAGAIN;
net/sunrpc/xprt.c
933
if (!xprt_lock_write(xprt, task))
net/sunrpc/xprt.c
937
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
net/sunrpc/xprt.c
938
rpc_sleep_on_timeout(&xprt->pending, task, NULL,
net/sunrpc/xprt.c
939
xprt_request_timeout(task->tk_rqstp));
net/sunrpc/xprt.c
948
xprt->ops->connect(xprt, task);
net/sunrpc/xprt.c
951
task->tk_status = 0;
net/sunrpc/xprt.c
952
rpc_wake_up_queued_task(&xprt->pending, task);
net/sunrpc/xprt.c
955
xprt_release_write(xprt, task);
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
101
xprt_rdma_bc_allocate(struct rpc_task *task)
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
103
struct rpc_rqst *rqst = task->tk_rqstp;
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
127
xprt_rdma_bc_free(struct rpc_task *task)
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
129
struct rpc_rqst *rqst = task->tk_rqstp;
net/sunrpc/xprtrdma/transport.c
423
xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprtrdma/transport.c
473
xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprtrdma/transport.c
479
WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
net/sunrpc/xprtrdma/transport.c
500
xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprtrdma/transport.c
508
task->tk_rqstp = &req->rl_slot;
net/sunrpc/xprtrdma/transport.c
509
task->tk_status = 0;
net/sunrpc/xprtrdma/transport.c
513
task->tk_status = -ENOMEM;
net/sunrpc/xprtrdma/transport.c
514
xprt_add_backlog(xprt, task);
net/sunrpc/xprtrdma/transport.c
558
xprt_rdma_allocate(struct rpc_task *task)
net/sunrpc/xprtrdma/transport.c
560
struct rpc_rqst *rqst = task->tk_rqstp;
net/sunrpc/xprtrdma/transport.c
587
xprt_rdma_free(struct rpc_task *task)
net/sunrpc/xprtrdma/transport.c
589
struct rpc_rqst *rqst = task->tk_rqstp;
net/sunrpc/xprtrdma/transport.c
593
trace_xprtrdma_mrs_zap(task);
net/sunrpc/xprtsock.c
1383
struct rpc_task *task;
net/sunrpc/xprtsock.c
1408
task = rovr->rq_task;
net/sunrpc/xprtsock.c
1422
xprt_adjust_cwnd(xprt, task, copied);
net/sunrpc/xprtsock.c
1425
xprt_complete_rqst(task, copied);
net/sunrpc/xprtsock.c
1708
static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprtsock.c
1711
xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
net/sunrpc/xprtsock.c
1875
static void xs_local_rpcbind(struct rpc_task *task)
net/sunrpc/xprtsock.c
1877
xprt_set_bound(task->tk_xprt);
net/sunrpc/xprtsock.c
2074
static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprtsock.c
2082
if (RPC_IS_ASYNC(task)) {
net/sunrpc/xprtsock.c
2092
rpc_task_set_rpc_status(task, -ENOTCONN);
net/sunrpc/xprtsock.c
2096
if (ret && !RPC_IS_SOFTCONN(task))
net/sunrpc/xprtsock.c
2791
static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
net/sunrpc/xprtsock.c
2796
WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
net/sunrpc/xprtsock.c
2808
transport->clnt = task->tk_client;
net/sunrpc/xprtsock.c
2945
static int bc_malloc(struct rpc_task *task)
net/sunrpc/xprtsock.c
2947
struct rpc_rqst *rqst = task->tk_rqstp;
net/sunrpc/xprtsock.c
2973
static void bc_free(struct rpc_task *task)
net/sunrpc/xprtsock.c
2975
void *buffer = task->tk_rqstp->rq_buffer;
rust/helpers/pid_namespace.c
19
rust_helper_task_get_pid_ns(struct task_struct *task)
rust/helpers/pid_namespace.c
24
pid_ns = task_active_pid_ns(task);
rust/helpers/task.c
26
__rust_helper kuid_t rust_helper_task_uid(struct task_struct *task)
rust/helpers/task.c
28
return task_uid(task);
rust/helpers/task.c
31
__rust_helper kuid_t rust_helper_task_euid(struct task_struct *task)
rust/helpers/task.c
33
return task_euid(task);
samples/bpf/test_lru_dist.c
266
static void do_test_lru_dist(int task, void *data)
samples/bpf/test_lru_dist.c
275
unsigned long long key_offset = task * dist_key_counts;
samples/bpf/test_lru_dist.c
297
task, pfect_lru.nr_unique, dist_key_counts, nr_misses,
samples/bpf/test_lru_dist.c
300
task, pfect_lru.nr_unique, pfect_lru.total,
samples/bpf/test_lru_dist.c
419
static void do_test_parallel_lru_loss(int task, void *data)
samples/bpf/test_lru_dist.c
431
stable_base = task * nr_repeats * 2 + 1;
samples/bpf/test_lru_dist.c
462
printf(" task:%d nr_losses:%u\n", task, nr_losses);
security/apparmor/include/resource.h
37
struct task_struct *task,
security/apparmor/include/task.h
13
static inline struct aa_task_ctx *task_ctx(struct task_struct *task)
security/apparmor/include/task.h
15
return task->security + apparmor_blob_sizes.lbs_task;
security/apparmor/include/task.h
36
struct aa_label *aa_get_task_label(struct task_struct *task);
security/apparmor/lsm.c
1003
static int apparmor_task_setrlimit(struct task_struct *task,
security/apparmor/lsm.c
1013
error = aa_task_setrlimit(current_cred(), label, task,
security/apparmor/lsm.c
109
static void apparmor_task_free(struct task_struct *task)
security/apparmor/lsm.c
112
aa_free_task_ctx(task_ctx(task));
security/apparmor/lsm.c
115
static int apparmor_task_alloc(struct task_struct *task,
security/apparmor/lsm.c
118
struct aa_task_ctx *new = task_ctx(task);
security/apparmor/lsm.c
821
static int apparmor_getprocattr(struct task_struct *task, const char *name,
security/apparmor/lsm.c
826
const struct cred *cred = get_task_cred(task);
security/apparmor/resource.c
115
struct task_struct *task,
security/apparmor/resource.c
123
peer = aa_get_newest_cred_label(__task_cred(task));
security/apparmor/task.c
30
struct aa_label *aa_get_task_label(struct task_struct *task)
security/apparmor/task.c
35
p = aa_get_newest_cred_label(__task_cred(task));
security/device_cgroup.c
54
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
security/device_cgroup.c
56
return css_to_devcgroup(task_css(task, devices_cgrp_id));
security/landlock/cred.h
102
has_dom = !!landlock_get_task_domain(task);
security/landlock/cred.h
89
landlock_get_task_domain(const struct task_struct *const task)
security/landlock/cred.h
91
return landlock_cred(__task_cred(task))->domain;
security/landlock/cred.h
94
static inline bool landlocked(const struct task_struct *const task)
security/landlock/cred.h
98
if (task == current)
security/landlock/tsync.c
192
struct task_struct *task)
security/landlock/tsync.c
202
ctx->task = get_task_struct(task);
security/landlock/tsync.c
229
put_task_struct(ctx->task);
security/landlock/tsync.c
293
const struct task_struct *task)
security/landlock/tsync.c
298
if (s->works[i]->task == task)
security/landlock/tsync.c
314
if (WARN_ON_ONCE(!s->works[i]->task))
security/landlock/tsync.c
317
put_task_struct(s->works[i]->task);
security/landlock/tsync.c
450
if (WARN_ON_ONCE(!works->works[i]->task))
security/landlock/tsync.c
453
if (!task_work_cancel(works->works[i]->task,
security/landlock/tsync.c
61
struct task_struct *task;
security/lsm.h
36
int lsm_task_alloc(struct task_struct *task);
security/security.c
243
int lsm_task_alloc(struct task_struct *task)
security/security.c
245
return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL);
security/security.c
2681
int security_task_alloc(struct task_struct *task, u64 clone_flags)
security/security.c
2683
int rc = lsm_task_alloc(task);
security/security.c
2687
rc = call_int_hook(task_alloc, task, clone_flags);
security/security.c
2689
security_task_free(task);
security/security.c
2700
void security_task_free(struct task_struct *task)
security/security.c
2702
call_void_hook(task_free, task);
security/security.c
2704
kfree(task->security);
security/security.c
2705
task->security = NULL;
security/selinux/hooks.c
262
static inline u32 task_sid_obj(const struct task_struct *task)
security/selinux/hooks.c
267
sid = cred_sid(__task_cred(task));
security/selinux/hooks.c
4173
static int selinux_task_alloc(struct task_struct *task,
security/selinux/hooks.c
4178
struct task_security_struct *new_tsec = selinux_task(task);
security/selinux/include/objsec.h
188
selinux_task(const struct task_struct *task)
security/selinux/include/objsec.h
190
return task->security + selinux_blob_sizes.lbs_task;
security/smack/smack_lsm.c
323
static void init_task_smack(struct task_smack *tsp, struct smack_known *task,
security/smack/smack_lsm.c
326
tsp->smk_task = task;
security/tomoyo/common.c
944
const struct task_struct *task = current;
security/tomoyo/common.c
951
(!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) ||
security/tomoyo/common.c
952
!uid_eq(task->cred->euid, GLOBAL_ROOT_UID)))
security/tomoyo/common.h
1222
static inline struct tomoyo_task *tomoyo_task(struct task_struct *task)
security/tomoyo/common.h
1224
return task->security + tomoyo_blob_sizes.lbs_task;
security/tomoyo/common.h
492
} task;
security/tomoyo/securityfs_if.c
25
return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
security/tomoyo/securityfs_if.c
63
r.param.task.domainname = &name;
security/tomoyo/tomoyo.c
516
static int tomoyo_task_alloc(struct task_struct *task,
security/tomoyo/tomoyo.c
520
struct tomoyo_task *new = tomoyo_task(task);
security/tomoyo/tomoyo.c
533
static void tomoyo_task_free(struct task_struct *task)
security/tomoyo/tomoyo.c
535
struct tomoyo_task *s = tomoyo_task(task);
security/yama/yama_lsm.c
205
static void yama_task_free(struct task_struct *task)
security/yama/yama_lsm.c
207
yama_ptracer_del(task, task);
sound/core/compress_offload.c
1034
struct snd_compr_task_runtime *task;
sound/core/compress_offload.c
1036
list_for_each_entry(task, &stream->runtime->tasks, list) {
sound/core/compress_offload.c
1037
if (task->seqno == seqno)
sound/core/compress_offload.c
1038
return task;
sound/core/compress_offload.c
1043
static void snd_compr_task_free(struct snd_compr_task_runtime *task)
sound/core/compress_offload.c
1045
if (task->output)
sound/core/compress_offload.c
1046
dma_buf_put(task->output);
sound/core/compress_offload.c
1047
if (task->input)
sound/core/compress_offload.c
1048
dma_buf_put(task->input);
sound/core/compress_offload.c
1049
kfree(task);
sound/core/compress_offload.c
1062
struct snd_compr_task_runtime *task;
sound/core/compress_offload.c
1069
task = kzalloc_obj(*task);
sound/core/compress_offload.c
1070
if (task == NULL)
sound/core/compress_offload.c
1072
task->seqno = utask->seqno = snd_compr_seqno_next(stream);
sound/core/compress_offload.c
1073
task->input_size = utask->input_size;
sound/core/compress_offload.c
1074
retval = stream->ops->task_create(stream, task);
sound/core/compress_offload.c
1079
if (!task->input || !task->input->file || !task->output || !task->output->file) {
sound/core/compress_offload.c
1092
get_dma_buf(task->input);
sound/core/compress_offload.c
1093
get_dma_buf(task->output);
sound/core/compress_offload.c
1094
fd_install(fd_i, task->input->file);
sound/core/compress_offload.c
1095
fd_install(fd_o, task->output->file);
sound/core/compress_offload.c
1098
list_add_tail(&task->list, &stream->runtime->tasks);
sound/core/compress_offload.c
1102
snd_compr_task_free(task);
sound/core/compress_offload.c
1113
struct snd_compr_task *task __free(kfree) =
sound/core/compress_offload.c
1114
memdup_user((void __user *)arg, sizeof(*task));
sound/core/compress_offload.c
1115
if (IS_ERR(task))
sound/core/compress_offload.c
1116
return PTR_ERR(task);
sound/core/compress_offload.c
1117
retval = snd_compr_task_new(stream, task);
sound/core/compress_offload.c
1119
if (copy_to_user((void __user *)arg, task, sizeof(*task)))
sound/core/compress_offload.c
1124
static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
sound/core/compress_offload.c
1127
if (task == NULL)
sound/core/compress_offload.c
1129
if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
sound/core/compress_offload.c
1131
if (utask->input_size > task->input->size)
sound/core/compress_offload.c
1133
task->flags = utask->flags;
sound/core/compress_offload.c
1134
task->input_size = utask->input_size;
sound/core/compress_offload.c
1135
task->state = SND_COMPRESS_TASK_STATE_IDLE;
sound/core/compress_offload.c
1141
struct snd_compr_task_runtime *task;
sound/core/compress_offload.c
1145
task = snd_compr_find_task(stream, utask->origin_seqno);
sound/core/compress_offload.c
1146
retval = snd_compr_task_start_prepare(task, utask);
sound/core/compress_offload.c
1149
task->seqno = utask->seqno = snd_compr_seqno_next(stream);
sound/core/compress_offload.c
1151
list_move_tail(&task->list, &stream->runtime->tasks);
sound/core/compress_offload.c
1153
task = snd_compr_find_task(stream, utask->seqno);
sound/core/compress_offload.c
1154
if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
sound/core/compress_offload.c
1156
retval = snd_compr_task_start_prepare(task, utask);
sound/core/compress_offload.c
1160
retval = stream->ops->task_start(stream, task);
sound/core/compress_offload.c
1162
task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
sound/core/compress_offload.c
1175
struct snd_compr_task *task __free(kfree) =
sound/core/compress_offload.c
1176
memdup_user((void __user *)arg, sizeof(*task));
sound/core/compress_offload.c
1177
if (IS_ERR(task))
sound/core/compress_offload.c
1178
return PTR_ERR(task);
sound/core/compress_offload.c
1179
retval = snd_compr_task_start(stream, task);
sound/core/compress_offload.c
1181
if (copy_to_user((void __user *)arg, task, sizeof(*task)))
sound/core/compress_offload.c
1187
struct snd_compr_task_runtime *task)
sound/core/compress_offload.c
1189
if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
sound/core/compress_offload.c
1191
stream->ops->task_stop(stream, task);
sound/core/compress_offload.c
1194
list_move_tail(&task->list, &stream->runtime->tasks);
sound/core/compress_offload.c
1195
task->state = SND_COMPRESS_TASK_STATE_IDLE;
sound/core/compress_offload.c
1199
struct snd_compr_task_runtime *task)
sound/core/compress_offload.c
1201
snd_compr_task_stop_one(stream, task);
sound/core/compress_offload.c
1202
stream->ops->task_free(stream, task);
sound/core/compress_offload.c
1203
list_del(&task->list);
sound/core/compress_offload.c
1204
snd_compr_task_free(task);
sound/core/compress_offload.c
1210
struct snd_compr_task_runtime *task, *temp;
sound/core/compress_offload.c
1212
list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
sound/core/compress_offload.c
1213
snd_compr_task_free_one(stream, task);
sound/core/compress_offload.c
1217
struct snd_compr_task_runtime *task);
sound/core/compress_offload.c
1222
struct snd_compr_task_runtime *task, *temp;
sound/core/compress_offload.c
1233
list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
sound/core/compress_offload.c
1234
fcn(stream, task);
sound/core/compress_offload.c
1236
task = snd_compr_find_task(stream, seqno);
sound/core/compress_offload.c
1237
if (task == NULL) {
sound/core/compress_offload.c
1240
fcn(stream, task);
sound/core/compress_offload.c
1249
struct snd_compr_task_runtime *task;
sound/core/compress_offload.c
1251
task = snd_compr_find_task(stream, status->seqno);
sound/core/compress_offload.c
1252
if (task == NULL)
sound/core/compress_offload.c
1254
status->input_size = task->input_size;
sound/core/compress_offload.c
1255
status->output_size = task->output_size;
sound/core/compress_offload.c
1256
status->state = task->state;
sound/core/compress_offload.c
1286
struct snd_compr_task_runtime *task)
sound/core/compress_offload.c
1291
task->state = SND_COMPRESS_TASK_STATE_FINISHED;
sound/core/compress_offload.c
457
struct snd_compr_task_runtime *task;
sound/core/compress_offload.c
460
task = list_first_entry_or_null(&runtime->tasks,
sound/core/compress_offload.c
463
if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
sound/core/oss/pcm_oss.c
2464
static int snd_task_name(struct task_struct *task, char *name, size_t size)
sound/core/oss/pcm_oss.c
2468
if (snd_BUG_ON(!task || !name || size < 2))
sound/core/oss/pcm_oss.c
2470
for (idx = 0; idx < sizeof(task->comm) && idx + 1 < size; idx++)
sound/core/oss/pcm_oss.c
2471
name[idx] = task->comm[idx];
sound/soc/fsl/fsl_asrc_m2m.c
186
static int asrc_m2m_device_run(struct fsl_asrc_pair *pair, struct snd_compr_task_runtime *task)
sound/soc/fsl/fsl_asrc_m2m.c
212
in_buf_len = task->input_size;
sound/soc/fsl/fsl_asrc_m2m.c
289
task->output_size = out_dma_len;
sound/soc/fsl/fsl_asrc_m2m.c
461
struct snd_compr_task_runtime *task)
sound/soc/fsl/fsl_asrc_m2m.c
475
task->input = dma_buf_export(&exp_info_in);
sound/soc/fsl/fsl_asrc_m2m.c
476
if (IS_ERR(task->input)) {
sound/soc/fsl/fsl_asrc_m2m.c
477
ret = PTR_ERR(task->input);
sound/soc/fsl/fsl_asrc_m2m.c
485
task->output = dma_buf_export(&exp_info_out);
sound/soc/fsl/fsl_asrc_m2m.c
486
if (IS_ERR(task->output)) {
sound/soc/fsl/fsl_asrc_m2m.c
487
ret = PTR_ERR(task->output);
sound/soc/fsl/fsl_asrc_m2m.c
533
struct snd_compr_task_runtime *task)
sound/soc/fsl/fsl_asrc_m2m.c
538
return asrc_m2m_device_run(pair, task);
sound/soc/fsl/fsl_asrc_m2m.c
542
struct snd_compr_task_runtime *task)
sound/soc/fsl/fsl_asrc_m2m.c
548
struct snd_compr_task_runtime *task)
sound/soc/intel/atom/sst-mfld-dsp.h
394
u8 task;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
202
str_params->task = map[index].task_id;
sound/soc/intel/atom/sst-mfld-platform-pcm.c
215
str_params->task = map[index].task_id;
sound/soc/intel/atom/sst/sst_stream.c
73
sst_drv_ctx->streams[str_id].task_id = str_params->task;
tools/accounting/delaytop.c
70
#define TASK_AVG(task, field) average_ms((task).field##_delay_total, (task).field##_count)
tools/bpf/bpftool/link.c
210
if (info->iter.task.tid)
tools/bpf/bpftool/link.c
211
jsonw_uint_field(wtr, "tid", info->iter.task.tid);
tools/bpf/bpftool/link.c
212
else if (info->iter.task.pid)
tools/bpf/bpftool/link.c
213
jsonw_uint_field(wtr, "pid", info->iter.task.pid);
tools/bpf/bpftool/link.c
686
if (info->iter.task.tid)
tools/bpf/bpftool/link.c
687
printf("tid %u ", info->iter.task.tid);
tools/bpf/bpftool/link.c
688
else if (info->iter.task.pid)
tools/bpf/bpftool/link.c
689
printf("pid %u ", info->iter.task.pid);
tools/bpf/bpftool/skeleton/pid_iter.bpf.c
102
e.pid = task->tgid;
tools/bpf/bpftool/skeleton/pid_iter.bpf.c
118
task->group_leader->comm);
tools/bpf/bpftool/skeleton/pid_iter.bpf.c
70
struct task_struct *task = ctx->task;
tools/bpf/bpftool/skeleton/pid_iter.bpf.c
74
if (!file || !task)
tools/include/uapi/linux/bpf.h
152
} task;
tools/include/uapi/linux/bpf.h
6774
} task;
tools/include/uapi/linux/perf_event.h
431
task : 1, /* trace fork/exit */
tools/perf/builtin-kvm.c
1517
attr->task = 0;
tools/perf/builtin-lock.c
1742
int broken = fails->task + fails->stack + fails->time + fails->data;
tools/perf/builtin-lock.c
1756
fprintf(lock_output, " %10s: %d\n", "task", fails->task);
tools/perf/builtin-lock.c
1767
bad = fails->task + fails->stack + fails->time + fails->data;
tools/perf/builtin-lock.c
1776
fprintf(lock_output, "%s bad_%s=%d", sep, "task", fails->task);
tools/perf/builtin-report.c
1043
struct thread_list *task;
tools/perf/builtin-report.c
1049
list_for_each_entry(task, &tasks, list)
tools/perf/builtin-report.c
1050
task__print_level(machine, task->thread, fp);
tools/perf/builtin-sched.c
362
get_new_event(struct task_desc *task, u64 timestamp)
tools/perf/builtin-sched.c
365
unsigned long idx = task->nr_events;
tools/perf/builtin-sched.c
371
task->nr_events++;
tools/perf/builtin-sched.c
372
size = sizeof(struct sched_atom *) * task->nr_events;
tools/perf/builtin-sched.c
373
task->atoms = realloc(task->atoms, size);
tools/perf/builtin-sched.c
374
BUG_ON(!task->atoms);
tools/perf/builtin-sched.c
376
task->atoms[idx] = event;
tools/perf/builtin-sched.c
381
static struct sched_atom *last_event(struct task_desc *task)
tools/perf/builtin-sched.c
383
if (!task->nr_events)
tools/perf/builtin-sched.c
386
return task->atoms[task->nr_events - 1];
tools/perf/builtin-sched.c
389
static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
tools/perf/builtin-sched.c
392
struct sched_atom *event, *curr_event = last_event(task);
tools/perf/builtin-sched.c
404
event = get_new_event(task, timestamp);
tools/perf/builtin-sched.c
412
static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
tools/perf/builtin-sched.c
417
event = get_new_event(task, timestamp);
tools/perf/builtin-sched.c
438
static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
tools/perf/builtin-sched.c
441
struct sched_atom *event = get_new_event(task, timestamp);
tools/perf/builtin-sched.c
451
struct task_desc *task;
tools/perf/builtin-sched.c
466
task = sched->pid_to_task[pid];
tools/perf/builtin-sched.c
468
if (task)
tools/perf/builtin-sched.c
469
return task;
tools/perf/builtin-sched.c
471
task = zalloc(sizeof(*task));
tools/perf/builtin-sched.c
472
task->pid = pid;
tools/perf/builtin-sched.c
473
task->nr = sched->nr_tasks;
tools/perf/builtin-sched.c
474
strcpy(task->comm, comm);
tools/perf/builtin-sched.c
479
add_sched_event_sleep(sched, task, 0);
tools/perf/builtin-sched.c
481
sched->pid_to_task[pid] = task;
tools/perf/builtin-sched.c
485
sched->tasks[task->nr] = task;
tools/perf/builtin-sched.c
490
return task;
tools/perf/builtin-sched.c
496
struct task_desc *task;
tools/perf/builtin-sched.c
500
task = sched->tasks[i];
tools/perf/builtin-sched.c
502
task->nr, task->comm, task->pid, task->nr_events);
tools/perf/builtin-sched.c
614
struct task_desc *task;
tools/perf/builtin-sched.c
622
struct task_desc *this_task = parms->task;
tools/perf/builtin-sched.c
664
struct task_desc *task;
tools/perf/builtin-sched.c
679
parms->task = task = sched->tasks[i];
tools/perf/builtin-sched.c
682
sem_init(&task->ready_for_work, 0, 0);
tools/perf/builtin-sched.c
683
sem_init(&task->work_done_sem, 0, 0);
tools/perf/builtin-sched.c
684
task->curr_event = 0;
tools/perf/builtin-sched.c
685
err = pthread_create(&task->thread, &attr, thread_func, parms);
tools/perf/builtin-sched.c
694
struct task_desc *task;
tools/perf/builtin-sched.c
702
task = sched->tasks[i];
tools/perf/builtin-sched.c
703
err = pthread_join(task->thread, NULL);
tools/perf/builtin-sched.c
705
sem_destroy(&task->ready_for_work);
tools/perf/builtin-sched.c
706
sem_destroy(&task->work_done_sem);
tools/perf/builtin-sched.c
715
struct task_desc *task;
tools/perf/builtin-sched.c
723
task = sched->tasks[i];
tools/perf/builtin-sched.c
724
ret = sem_wait(&task->ready_for_work);
tools/perf/builtin-sched.c
726
sem_init(&task->ready_for_work, 0, 0);
tools/perf/builtin-sched.c
735
task = sched->tasks[i];
tools/perf/builtin-sched.c
736
ret = sem_wait(&task->work_done_sem);
tools/perf/builtin-sched.c
738
sem_init(&task->work_done_sem, 0, 0);
tools/perf/builtin-sched.c
739
sched->cpu_usage += task->cpu_usage;
tools/perf/builtin-sched.c
740
task->cpu_usage = 0;
tools/perf/builtin-sched.c
757
task = sched->tasks[i];
tools/perf/builtin-sched.c
758
task->curr_event = 0;
tools/perf/tests/task-exit.c
89
evsel->core.attr.task = 1;
tools/perf/util/bpf_lock_contention.c
573
struct contention_task_data task;
tools/perf/util/bpf_lock_contention.c
582
!bpf_map_lookup_elem(task_fd, &pid, &task) &&
tools/perf/util/bpf_lock_contention.c
583
thread__set_comm(t, task.comm, /*timestamp=*/0)) {
tools/perf/util/bpf_lock_contention.c
584
snprintf(name_buf, sizeof(name_buf), "%s", task.comm);
tools/perf/util/bpf_lock_contention.c
731
con->fails.task = skel->bss->task_fail;
tools/perf/util/bpf_skel/bperf_follower.bpf.c
110
child_key = task->pid;
tools/perf/util/bpf_skel/bperf_follower.bpf.c
114
child_key = task->tgid;
tools/perf/util/bpf_skel/bperf_follower.bpf.c
140
int BPF_PROG(on_exittask, struct task_struct *task)
tools/perf/util/bpf_skel/bperf_follower.bpf.c
154
pid = task->pid;
tools/perf/util/bpf_skel/bperf_follower.bpf.c
98
int BPF_PROG(on_newtask, struct task_struct *task, __u64 clone_flags)
tools/perf/util/bpf_skel/kwork_top.bpf.c
105
static __always_inline void update_task_info(struct task_struct *task, __u32 cpu)
tools/perf/util/bpf_skel/kwork_top.bpf.c
108
.pid = task->pid,
tools/perf/util/bpf_skel/kwork_top.bpf.c
114
.tgid = task->tgid,
tools/perf/util/bpf_skel/kwork_top.bpf.c
115
.is_kthread = task->flags & PF_KTHREAD ? 1 : 0,
tools/perf/util/bpf_skel/kwork_top.bpf.c
117
BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
tools/perf/util/bpf_skel/kwork_top.bpf.c
139
static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu)
tools/perf/util/bpf_skel/kwork_top.bpf.c
144
pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0);
tools/perf/util/bpf_skel/kwork_top.bpf.c
152
.pid = task->pid,
tools/perf/util/bpf_skel/kwork_top.bpf.c
153
.task_p = (__u64)task,
tools/perf/util/bpf_skel/kwork_top.bpf.c
157
update_task_info(task, cpu);
tools/perf/util/bpf_skel/kwork_top.bpf.c
160
static void on_sched_in(struct task_struct *task, __u64 ts)
tools/perf/util/bpf_skel/kwork_top.bpf.c
164
pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL,
tools/perf/util/bpf_skel/kwork_top.bpf.c
197
struct task_struct *task;
tools/perf/util/bpf_skel/kwork_top.bpf.c
209
task = (struct task_struct *)bpf_get_current_task();
tools/perf/util/bpf_skel/kwork_top.bpf.c
210
if (!task)
tools/perf/util/bpf_skel/kwork_top.bpf.c
215
.pid = BPF_CORE_READ(task, pid),
tools/perf/util/bpf_skel/kwork_top.bpf.c
216
.task_p = (__u64)task,
tools/perf/util/bpf_skel/kwork_top.bpf.c
232
struct task_struct *task;
tools/perf/util/bpf_skel/kwork_top.bpf.c
245
task = (struct task_struct *)bpf_get_current_task();
tools/perf/util/bpf_skel/kwork_top.bpf.c
246
if (!task)
tools/perf/util/bpf_skel/kwork_top.bpf.c
251
.pid = BPF_CORE_READ(task, pid),
tools/perf/util/bpf_skel/kwork_top.bpf.c
252
.task_p = (__u64)task,
tools/perf/util/bpf_skel/kwork_top.bpf.c
269
struct task_struct *task;
tools/perf/util/bpf_skel/kwork_top.bpf.c
281
task = (struct task_struct *)bpf_get_current_task();
tools/perf/util/bpf_skel/kwork_top.bpf.c
282
if (!task)
tools/perf/util/bpf_skel/kwork_top.bpf.c
287
.pid = BPF_CORE_READ(task, pid),
tools/perf/util/bpf_skel/kwork_top.bpf.c
288
.task_p = (__u64)task,
tools/perf/util/bpf_skel/kwork_top.bpf.c
304
struct task_struct *task;
tools/perf/util/bpf_skel/kwork_top.bpf.c
317
task = (struct task_struct *)bpf_get_current_task();
tools/perf/util/bpf_skel/kwork_top.bpf.c
318
if (!task)
tools/perf/util/bpf_skel/kwork_top.bpf.c
323
.pid = BPF_CORE_READ(task, pid),
tools/perf/util/bpf_skel/kwork_top.bpf.c
324
.task_p = (__u64)task,
tools/perf/util/bpf_skel/lock_contention.bpf.c
219
struct task_struct *task;
tools/perf/util/bpf_skel/lock_contention.bpf.c
225
task = bpf_get_current_task_btf();
tools/perf/util/bpf_skel/lock_contention.bpf.c
236
cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
tools/perf/util/bpf_skel/lock_contention.bpf.c
301
static inline int update_task_data(struct task_struct *task)
tools/perf/util/bpf_skel/lock_contention.bpf.c
306
err = bpf_core_read(&pid, sizeof(pid), &task->pid);
tools/perf/util/bpf_skel/lock_contention.bpf.c
314
BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
tools/perf/util/bpf_skel/lock_contention.bpf.c
328
struct task_struct *task;
tools/perf/util/bpf_skel/lock_contention.bpf.c
359
task = (void *)(owner & ~7UL);
tools/perf/util/bpf_skel/lock_contention.bpf.c
360
return task;
tools/perf/util/bpf_skel/lock_contention.bpf.c
545
struct task_struct *task;
tools/perf/util/bpf_skel/lock_contention.bpf.c
551
task = get_lock_owner(pelem->lock, pelem->flags);
tools/perf/util/bpf_skel/lock_contention.bpf.c
552
if (!task)
tools/perf/util/bpf_skel/lock_contention.bpf.c
555
owner_pid = BPF_CORE_READ(task, pid);
tools/perf/util/bpf_skel/lock_contention.bpf.c
566
task = bpf_task_from_pid(owner_pid);
tools/perf/util/bpf_skel/lock_contention.bpf.c
567
if (!task)
tools/perf/util/bpf_skel/lock_contention.bpf.c
570
bpf_get_task_stack(task, buf, max_stack * sizeof(unsigned long), 0);
tools/perf/util/bpf_skel/lock_contention.bpf.c
571
bpf_task_release(task);
tools/perf/util/bpf_skel/lock_contention.bpf.c
612
struct task_struct *task;
tools/perf/util/bpf_skel/lock_contention.bpf.c
615
task = get_lock_owner(pelem->lock, pelem->flags);
tools/perf/util/bpf_skel/lock_contention.bpf.c
618
if (task)
tools/perf/util/bpf_skel/lock_contention.bpf.c
619
pelem->flags = BPF_CORE_READ(task, pid);
tools/perf/util/bpf_skel/lock_contention.bpf.c
624
task = bpf_get_current_task_btf();
tools/perf/util/bpf_skel/lock_contention.bpf.c
627
if (task) {
tools/perf/util/bpf_skel/lock_contention.bpf.c
628
if (update_task_data(task) < 0 && lock_owner)
tools/perf/util/bpf_skel/lock_contention.bpf.c
716
struct task_struct *task;
tools/perf/util/bpf_skel/lock_contention.bpf.c
721
task = bpf_task_from_pid(otdata->pid);
tools/perf/util/bpf_skel/lock_contention.bpf.c
722
if (!task)
tools/perf/util/bpf_skel/lock_contention.bpf.c
725
bpf_get_task_stack(task, buf,
tools/perf/util/bpf_skel/lock_contention.bpf.c
727
bpf_task_release(task);
tools/perf/util/bpf_skel/off_cpu.bpf.c
328
struct task_struct *task;
tools/perf/util/bpf_skel/off_cpu.bpf.c
336
task = (struct task_struct *)bpf_get_current_task();
tools/perf/util/bpf_skel/off_cpu.bpf.c
338
pid = BPF_CORE_READ(task, tgid);
tools/perf/util/bpf_skel/off_cpu.bpf.c
342
task = (struct task_struct *)ctx[0];
tools/perf/util/bpf_skel/off_cpu.bpf.c
345
pid = task->tgid;
tools/perf/util/bpf_skel/syscall_summary.bpf.c
45
struct task_struct *task;
tools/perf/util/bpf_skel/syscall_summary.bpf.c
51
task = bpf_get_current_task_btf();
tools/perf/util/bpf_skel/syscall_summary.bpf.c
62
cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
tools/perf/util/evsel.c
1630
attr->task = track;
tools/perf/util/evsel.c
165
WRITE_ASS(task, "d");
tools/perf/util/lock-contention.h
137
int task;
tools/perf/util/perf_event_attr_fprintf.c
322
PRINT_ATTRf(task, p_unsigned);
tools/perf/util/python.c
1003
task = 0,
tools/perf/util/python.c
1018
&enable_on_exec, &task, &watermark,
tools/perf/util/python.c
1046
attr.task = task;
tools/sched_ext/scx_userland.bpf.c
185
struct scx_userland_enqueued_task task = {};
tools/sched_ext/scx_userland.bpf.c
187
task.pid = p->pid;
tools/sched_ext/scx_userland.bpf.c
188
task.sum_exec_runtime = p->se.sum_exec_runtime;
tools/sched_ext/scx_userland.bpf.c
189
task.weight = p->scx.weight;
tools/sched_ext/scx_userland.bpf.c
191
if (bpf_map_push_elem(&enqueued, &task, 0)) {
tools/sched_ext/scx_userland.c
149
static __u32 task_pid(const struct enqueued_task *task)
tools/sched_ext/scx_userland.c
151
return ((uintptr_t)task - (uintptr_t)tasks) / sizeof(*task);
tools/sched_ext/scx_userland.c
237
struct scx_userland_enqueued_task task;
tools/sched_ext/scx_userland.c
240
if (bpf_map_lookup_and_delete_elem(enqueued_fd, NULL, &task)) {
tools/sched_ext/scx_userland.c
246
err = vruntime_enqueue(&task);
tools/sched_ext/scx_userland.c
249
task.pid, strerror(err));
tools/sched_ext/scx_userland.c
261
struct enqueued_task *task;
tools/sched_ext/scx_userland.c
265
task = LIST_FIRST(&vruntime_head);
tools/sched_ext/scx_userland.c
266
if (!task)
tools/sched_ext/scx_userland.c
269
min_vruntime = task->vruntime;
tools/sched_ext/scx_userland.c
270
pid = task_pid(task);
tools/sched_ext/scx_userland.c
271
LIST_REMOVE(task, entries);
tools/sched_ext/scx_userland.c
279
LIST_INSERT_HEAD(&vruntime_head, task, entries);
tools/testing/selftests/bpf/bpf_experimental.h
165
struct task_struct *task,
tools/testing/selftests/bpf/bpf_experimental.h
206
extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym;
tools/testing/selftests/bpf/bpf_experimental.h
571
struct task_struct *task, unsigned int flags) __weak __ksym;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1639
linfo.task.tid = getpid();
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1702
linfo.task.pid = getpid();
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1708
linfo.task.pid = 0;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
1709
linfo.task.tid = getpid();
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
187
linfo.task.tid = getpid();
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
198
ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
261
linfo.task.tid = sys_gettid();
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
266
linfo.task.tid = 0;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
267
linfo.task.pid = getpid();
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
296
linfo.task.pid = getpid();
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
314
linfo.task.pid_fd = pidfd;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
444
linfo.task.tid = getpid();
tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
28
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
33
if (!task || !file)
tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
44
*sock_tgid = task->tgid;
tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
19
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
25
ptr.ptr = task;
tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
17
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
21
if (task == (void *)0 || file == (void *)0)
tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
29
if (tgid == task->tgid && task->tgid != task->pid)
tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
32
if (last_tgid != task->tgid) {
tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
33
last_tgid = task->tgid;
tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
37
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
16
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
19
if (task == (void *)0)
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
22
retlen = bpf_get_task_stack(task, entries,
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
27
BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid,
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
44
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
48
if (task == (void *)0)
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
51
res = bpf_get_task_stack(task, entries,
tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
30
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
34
if (task == (void *)0 || vma == (void *)0)
tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
38
if (task->tgid != (pid_t)pid) {
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
107
ptr, task, BPF_F_PAD_ZEROS);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
116
ret = bpf_copy_from_user_task_str((char *)task_str2, 10, user_ptr, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
118
if (bpf_strncmp(task_str2, 10, "test_data\0") != 0 || ret != 10 || task->tgid != pid) {
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
124
ret = bpf_copy_from_user_task_str((char *)task_str3, 2, user_ptr, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
131
ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
139
ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, BPF_F_PAD_ZEROS);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
147
ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
154
ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr_long, task, BPF_F_PAD_ZEROS);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
176
ret = bpf_copy_from_user_task_str(big_str2, 5005, user_ptr_long, task, BPF_F_PAD_ZEROS);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
184
ret = bpf_copy_from_user_task_str(big_str3, 4996, user_ptr_long, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
196
BPF_SEQ_PRINTF(seq, "%8d %8d %8d\n", task->tgid, task->pid, user_data);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
24
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
27
if (task == (void *)0) {
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
32
if (task->pid != (pid_t)tid)
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
40
BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
53
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
63
if (task == (void *)0) {
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
70
ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
81
regs = (struct pt_regs *)bpf_task_pt_regs(task);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
88
ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
98
ret = bpf_copy_from_user_task_str((char *)task_str1, sizeof(task_str1), ptr, task, 0);
tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
12
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
15
tgid = task->tgid;
tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
20
struct task_struct *task = ctx->task;
tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
22
if (task == NULL || vma == NULL)
tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
25
if (last_tgid != task->tgid)
tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
27
last_tgid = task->tgid;
tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
29
if (task->tgid != pid)
tools/testing/selftests/bpf/progs/bpf_mod_race.c
20
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/bpf_mod_race.c
22
return task->tgid == bpf_mod_race_config.tgid;
tools/testing/selftests/bpf/progs/bpf_smc.c
78
struct task_struct *task;
tools/testing/selftests/bpf/progs/bpf_smc.c
89
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/bpf_smc.c
91
if (!task || !task->nsproxy->net_ns->smc.hs_ctrl)
tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
67
struct task_struct *task, bool threadgroup)
tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
20
struct task_struct *task;
tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
22
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
23
(void)bpf_cgrp_storage_get(&map_a, (struct cgroup *)task, 0,
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
27
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
46
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
50
cgrp = bpf_task_get_cgroup1(task, target_hid);
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
59
__on_update(task->cgroups->dfl_cgrp);
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
79
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
83
cgrp = bpf_task_get_cgroup1(task, target_hid);
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
92
__on_enter(regs, id, task->cgroups->dfl_cgrp);
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
103
cgrp = bpf_task_get_cgroup1(task, target_hid);
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
118
cgrp = task->cgroups->dfl_cgrp;
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
22
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
59
struct task_struct *task;
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
62
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
63
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
67
cgrp = bpf_task_get_cgroup1(task, target_hid);
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
79
struct task_struct *task;
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
81
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
82
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
86
__no_rcu_lock(task->cgroups->dfl_cgrp);
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
93
struct task_struct *task;
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
97
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
98
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
107
struct task_struct *task;
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
110
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
111
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
115
cgrp = bpf_task_get_cgroup1(task, target_hid);
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
124
__on_exit(regs, id, task->cgroups->dfl_cgrp);
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
33
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
69
struct task_struct *task;
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
72
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
73
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
77
cgrp = bpf_task_get_cgroup1(task, target_hid);
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
86
__on_enter(regs, id, task->cgroups->dfl_cgrp);
tools/testing/selftests/bpf/progs/cpumask_failure.c
111
int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
121
int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
155
int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
183
int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
208
int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
228
int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
243
int BPF_PROG(test_populate_invalid_source, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
36
int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
49
int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
64
int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
69
cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
tools/testing/selftests/bpf/progs/cpumask_failure.c
77
int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_failure.c
80
bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
tools/testing/selftests/bpf/progs/cpumask_failure.c
87
int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
139
int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
155
int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
184
int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
213
int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
252
int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
284
int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
316
int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
363
int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
405
int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
459
int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
474
int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
504
int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
537
int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
635
int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
641
int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
647
int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
673
int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
692
int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
709
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
752
int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
774
int BPF_PROG(test_populate_reject_small_mask, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
805
int BPF_PROG(test_populate_reject_unaligned, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/cpumask_success.c
838
int BPF_PROG(test_populate, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/dynptr_success.c
1112
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/dynptr_success.c
1114
return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task);
tools/testing/selftests/bpf/progs/dynptr_success.c
1120
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/dynptr_success.c
1122
return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task);
tools/testing/selftests/bpf/progs/exhandler_kern.c
19
int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/exhandler_kern.c
38
work = task->task_works;
tools/testing/selftests/bpf/progs/file_reader.c
68
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/file_reader.c
80
bpf_task_work_schedule_signal(task, &work->tw, &arrmap, task_work_callback);
tools/testing/selftests/bpf/progs/file_reader.c
87
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/file_reader.c
88
struct file *file = bpf_get_task_exe_file(task);
tools/testing/selftests/bpf/progs/file_reader_fail.c
20
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/file_reader_fail.c
21
struct file *file = bpf_get_task_exe_file(task);
tools/testing/selftests/bpf/progs/find_vma.c
23
static long check_vma(struct task_struct *task, struct vm_area_struct *vma,
tools/testing/selftests/bpf/progs/find_vma.c
40
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/find_vma.c
43
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/find_vma.c
46
find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
tools/testing/selftests/bpf/progs/find_vma.c
49
find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
tools/testing/selftests/bpf/progs/find_vma.c
56
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/find_vma.c
59
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/find_vma.c
62
find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
tools/testing/selftests/bpf/progs/find_vma.c
67
find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
tools/testing/selftests/bpf/progs/find_vma_fail1.c
13
static long write_vma(struct task_struct *task, struct vm_area_struct *vma,
tools/testing/selftests/bpf/progs/find_vma_fail1.c
25
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/find_vma_fail1.c
28
bpf_find_vma(task, 0, write_vma, &data, 0);
tools/testing/selftests/bpf/progs/find_vma_fail2.c
12
static long write_task(struct task_struct *task, struct vm_area_struct *vma,
tools/testing/selftests/bpf/progs/find_vma_fail2.c
16
task->mm = NULL;
tools/testing/selftests/bpf/progs/find_vma_fail2.c
24
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/find_vma_fail2.c
27
bpf_find_vma(task, 0, write_task, &data, 0);
tools/testing/selftests/bpf/progs/iters_css_task.c
27
struct task_struct *task;
tools/testing/selftests/bpf/progs/iters_css_task.c
41
bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS)
tools/testing/selftests/bpf/progs/iters_css_task.c
42
if (task->pid == target_pid)
tools/testing/selftests/bpf/progs/iters_css_task.c
61
struct task_struct *task;
tools/testing/selftests/bpf/progs/iters_css_task.c
77
bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
tools/testing/selftests/bpf/progs/iters_css_task.c
78
if (task->pid == target_pid)
tools/testing/selftests/bpf/progs/iters_css_task.c
91
struct task_struct *task;
tools/testing/selftests/bpf/progs/iters_css_task.c
97
bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
tools/testing/selftests/bpf/progs/iters_task.c
29
bpf_for_each(task, pos, NULL, ~0U) {
tools/testing/selftests/bpf/progs/iters_task.c
34
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_PROC_THREADS) {
tools/testing/selftests/bpf/progs/iters_task.c
39
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS)
tools/testing/selftests/bpf/progs/iters_task.c
43
bpf_for_each(task, pos, cur_task, BPF_TASK_ITER_PROC_THREADS)
tools/testing/selftests/bpf/progs/iters_task.c
46
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_THREADS)
tools/testing/selftests/bpf/progs/iters_task_failure.c
100
bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
tools/testing/selftests/bpf/progs/iters_task_failure.c
23
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
tools/testing/selftests/bpf/progs/iters_task_failure.c
55
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
tools/testing/selftests/bpf/progs/iters_task_failure.c
94
struct task_struct *task;
tools/testing/selftests/bpf/progs/iters_task_vma.c
20
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/iters_task_vma.c
24
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/iters_task_vma.c
30
bpf_for_each(task_vma, vma, task, 0) {
tools/testing/selftests/bpf/progs/local_storage.c
66
struct task_struct *task;
tools/testing/selftests/bpf/progs/local_storage.c
72
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/local_storage.c
73
if (!task)
tools/testing/selftests/bpf/progs/local_storage.c
78
storage = bpf_task_storage_get(&task_storage_map, task, 0, 0);
tools/testing/selftests/bpf/progs/local_storage.c
85
storage = bpf_task_storage_get(&task_storage_map2, task, 0,
tools/testing/selftests/bpf/progs/local_storage.c
90
if (bpf_task_storage_delete(&task_storage_map2, task))
tools/testing/selftests/bpf/progs/local_storage.c
93
if (bpf_task_storage_delete(&task_storage_map, task))
tools/testing/selftests/bpf/progs/local_storage_bench.c
46
struct task_struct *task;
tools/testing/selftests/bpf/progs/local_storage_bench.c
69
bpf_task_storage_get(inner_map, lctx->task, &idx,
tools/testing/selftests/bpf/progs/local_storage_bench.c
95
lctx.task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
25
struct task_struct *task;
tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
30
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
31
s = bpf_task_storage_get(&task_storage, task, &idx,
tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
37
bpf_task_storage_delete(&task_storage, task);
tools/testing/selftests/bpf/progs/lsm.c
160
int BPF_PROG(test_task_free, struct task_struct *task)
tools/testing/selftests/bpf/progs/map_kptr.c
232
struct task_struct *task;
tools/testing/selftests/bpf/progs/map_kptr.c
235
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/map_kptr.c
236
if (!task)
tools/testing/selftests/bpf/progs/map_kptr.c
238
v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
15
struct task_struct *task;
tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
19
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
20
idata = task->nameidata;
tools/testing/selftests/bpf/progs/nested_trust_failure.c
28
int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/nested_trust_failure.c
30
bpf_cpumask_test_cpu(0, task->user_cpus_ptr);
tools/testing/selftests/bpf/progs/nested_trust_success.c
22
int BPF_PROG(test_read_cpumask, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/nested_trust_success.c
24
bpf_cpumask_test_cpu(0, task->cpus_ptr);
tools/testing/selftests/bpf/progs/nested_trust_success.c
38
int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/nested_trust_success.c
40
bpf_cpumask_first_zero(&task->cpus_mask);
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
25
struct task_struct *task;
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
29
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
30
e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
50
struct task_struct *task;
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
55
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
56
e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
79
struct task_struct *task;
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
88
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
89
e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
tools/testing/selftests/bpf/progs/profiler.inc.h
178
static INLINE void populate_ancestors(struct task_struct* task,
tools/testing/selftests/bpf/progs/profiler.inc.h
181
struct task_struct* parent = task;
tools/testing/selftests/bpf/progs/profiler.inc.h
250
struct task_struct* task,
tools/testing/selftests/bpf/progs/profiler.inc.h
254
BPF_CORE_READ(task, nsproxy, cgroup_ns, root_cset, dfl_cgrp, kn);
tools/testing/selftests/bpf/progs/profiler.inc.h
255
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
tools/testing/selftests/bpf/progs/profiler.inc.h
266
BPF_CORE_READ(task, cgroups, subsys[i]);
tools/testing/selftests/bpf/progs/profiler.inc.h
331
struct task_struct* task,
tools/testing/selftests/bpf/progs/profiler.inc.h
339
metadata->exec_id = BPF_CORE_READ(task, self_exec_id);
tools/testing/selftests/bpf/progs/profiler.inc.h
340
metadata->start_time = BPF_CORE_READ(task, start_time);
tools/testing/selftests/bpf/progs/profiler.inc.h
343
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
tools/testing/selftests/bpf/progs/profiler.inc.h
360
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/profiler.inc.h
362
void* payload = populate_var_metadata(&kill_data->meta, task, spid, kill_data->payload);
tools/testing/selftests/bpf/progs/profiler.inc.h
363
payload = populate_cgroup_info(&kill_data->cgroup_data, task, payload);
tools/testing/selftests/bpf/progs/profiler.inc.h
366
populate_ancestors(task, &kill_data->ancestors_info);
tools/testing/selftests/bpf/progs/profiler.inc.h
561
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/profiler.inc.h
563
void* payload = populate_var_metadata(&sysctl_data->meta, task, pid, sysctl_data->payload);
tools/testing/selftests/bpf/progs/profiler.inc.h
564
payload = populate_cgroup_info(&sysctl_data->cgroup_data, task, payload);
tools/testing/selftests/bpf/progs/profiler.inc.h
566
populate_ancestors(task, &sysctl_data->ancestors_info);
tools/testing/selftests/bpf/progs/profiler.inc.h
626
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/profiler.inc.h
627
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
tools/testing/selftests/bpf/progs/profiler.inc.h
647
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
tools/testing/selftests/bpf/progs/profiler.inc.h
698
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/profiler.inc.h
704
void* payload = populate_var_metadata(&proc_exec_data->meta, task, pid,
tools/testing/selftests/bpf/progs/profiler.inc.h
706
payload = populate_cgroup_info(&proc_exec_data->cgroup_data, task, payload);
tools/testing/selftests/bpf/progs/profiler.inc.h
708
struct task_struct* parent_task = BPF_CORE_READ(task, real_parent);
tools/testing/selftests/bpf/progs/profiler.inc.h
722
void* arg_start = (void*)BPF_CORE_READ(task, mm, arg_start);
tools/testing/selftests/bpf/progs/profiler.inc.h
723
void* arg_end = (void*)BPF_CORE_READ(task, mm, arg_end);
tools/testing/selftests/bpf/progs/profiler.inc.h
733
void* env_start = (void*)BPF_CORE_READ(task, mm, env_start);
tools/testing/selftests/bpf/progs/profiler.inc.h
734
void* env_end = (void*)BPF_CORE_READ(task, mm, env_end);
tools/testing/selftests/bpf/progs/profiler.inc.h
787
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/profiler.inc.h
799
void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
tools/testing/selftests/bpf/progs/profiler.inc.h
801
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
tools/testing/selftests/bpf/progs/profiler.inc.h
840
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/profiler.inc.h
852
void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
tools/testing/selftests/bpf/progs/profiler.inc.h
854
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
tools/testing/selftests/bpf/progs/profiler.inc.h
895
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/profiler.inc.h
907
void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
tools/testing/selftests/bpf/progs/profiler.inc.h
909
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
tools/testing/selftests/bpf/progs/pyperf.h
248
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
tools/testing/selftests/bpf/progs/pyperf.h
249
void* tls_base = (void*)task;
tools/testing/selftests/bpf/progs/raw_tp_null.c
16
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/raw_tp_null.c
18
if (task->pid != tid)
tools/testing/selftests/bpf/progs/rcu_read_lock.c
105
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
108
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
112
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
124
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
126
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
128
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
140
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
143
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
147
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
159
struct task_struct *task, *real_parent, *gparent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
161
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
163
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
187
struct task_struct *task;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
190
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
192
(void)bpf_task_storage_get(&map_a, task, 0, 0);
tools/testing/selftests/bpf/progs/rcu_read_lock.c
201
struct task_struct *task;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
204
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
206
(void)bpf_task_storage_get(&map_a, task, 0, 0);
tools/testing/selftests/bpf/progs/rcu_read_lock.c
213
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
215
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
218
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
231
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
236
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
239
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
247
(void)bpf_copy_from_user_task(&value, sizeof(uint32_t), ptr, task, 0);
tools/testing/selftests/bpf/progs/rcu_read_lock.c
275
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
278
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
281
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
294
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
297
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
300
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
314
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
317
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
321
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
334
struct task_struct *task, *group_leader;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
336
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
339
group_leader = task->real_parent->group_leader;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
348
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
350
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
352
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
362
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
365
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
367
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
43
struct task_struct *task;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
46
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
47
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/rcu_read_lock.c
52
cgroups = task->cgroups;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
64
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
68
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
69
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/rcu_read_lock.c
74
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
93
struct task_struct *task, *real_parent;
tools/testing/selftests/bpf/progs/rcu_read_lock.c
96
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/rcu_read_lock.c
97
real_parent = task->real_parent;
tools/testing/selftests/bpf/progs/strobemeta.h
512
static void *read_strobe_meta(struct task_struct *task,
tools/testing/selftests/bpf/progs/strobemeta.h
532
tls_base = (void *)task;
tools/testing/selftests/bpf/progs/strobemeta.h
601
struct task_struct *task;
tools/testing/selftests/bpf/progs/strobemeta.h
615
task = (struct task_struct *)bpf_get_current_task();
tools/testing/selftests/bpf/progs/strobemeta.h
616
sample_end = read_strobe_meta(task, &sample->metadata);
tools/testing/selftests/bpf/progs/struct_ops_assoc.c
28
struct task_struct *task;
tools/testing/selftests/bpf/progs/struct_ops_assoc.c
31
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/struct_ops_assoc.c
32
if (!test_pid || task->pid != test_pid)
tools/testing/selftests/bpf/progs/struct_ops_assoc.c
75
struct task_struct *task;
tools/testing/selftests/bpf/progs/struct_ops_assoc.c
78
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/struct_ops_assoc.c
79
if (!test_pid || task->pid != test_pid)
tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c
29
struct task_struct *task;
tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c
32
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c
33
if (!test_pid || task->pid != test_pid)
tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c
29
struct task_struct *task;
tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c
32
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c
33
if (!test_pid || task->pid != test_pid)
tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
16
struct task_struct *task, struct cgroup *cgrp)
tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
19
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
22
return task;
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c
17
struct task_struct *task, struct cgroup *cgrp)
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c
19
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c
18
struct task_struct *task, struct cgroup *cgrp)
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c
22
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c
24
t = bpf_obj_new(typeof(*task));
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c
17
struct task_struct *task, struct cgroup *cgrp)
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c
19
return (struct task_struct *)&task->jobctl;
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c
17
struct task_struct *task, struct cgroup *cgrp)
tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c
22
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
17
struct task_struct *task)
tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
19
if (task)
tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
20
tgid = task->tgid;
tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
13
struct task_struct *task)
tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
15
tgid = task->tgid;
tools/testing/selftests/bpf/progs/struct_ops_module.c
49
int (*test_maybe_null)(int dummy, struct task_struct *task);
tools/testing/selftests/bpf/progs/struct_ops_module.c
61
int (*test_maybe_null)(int dummy, struct task_struct *task);
tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
17
int BPF_PROG(refcounted, int dummy, struct task_struct *task)
tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
20
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
22
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
12
struct task_struct *task = (struct task_struct *)ctx[1];
tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
15
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
29
struct task_struct *task = (struct task_struct *)ctx[1];
tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
31
bpf_task_release(task);
tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c
14
struct task_struct *task)
tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c
24
struct task_struct *task = (struct task_struct *)ctx[1];
tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c
26
bpf_task_release(task);
tools/testing/selftests/bpf/progs/task_kfunc_common.h
13
struct task_struct __kptr * task;
tools/testing/selftests/bpf/progs/task_kfunc_common.h
53
local.task = NULL;
tools/testing/selftests/bpf/progs/task_kfunc_common.h
68
old = bpf_kptr_xchg(&v->task, acquired);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
104
int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
119
int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
123
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
133
int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
138
v = insert_lookup_task(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
142
kptr = bpf_kptr_xchg(&v->task, NULL);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
153
int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
157
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
166
int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
170
v = insert_lookup_task(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
175
bpf_task_release(v->task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
182
int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
19
static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
194
int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
201
status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
205
local.task = NULL;
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
214
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
218
old = bpf_kptr_xchg(&v->task, acquired);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
228
int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
23
status = tasks_kfunc_map_insert(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
231
bpf_task_release(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
238
int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
242
acquired = bpf_task_from_pid(task->pid);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
252
int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
256
acquired = bpf_task_from_vpid(task->pid);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
266
int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
27
return tasks_kfunc_map_value_lookup(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
271
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
281
int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
283
bpf_strncmp(task->comm, 17, "foo");
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
289
int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
291
bpf_strncmp(task->comm + 1, 16, "foo");
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
297
int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
299
bpf_probe_read_kernel(task->comm, 16, task->comm);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
305
int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
311
bpf_strncmp(task->comm, 16, "foo");
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
317
int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
32
int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
322
if (tasks_kfunc_map_insert(task))
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
325
v = tasks_kfunc_map_value_lookup(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
330
local = v->task;
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
37
v = insert_lookup_task(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
42
acquired = bpf_task_acquire(v->task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
53
int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
69
int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
74
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
84
int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
89
if (!task) {
tools/testing/selftests/bpf/progs/task_kfunc_failure.c
94
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
111
int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
116
return test_acquire_release(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
120
int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
129
int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
136
status = tasks_kfunc_map_insert(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
144
int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
154
status = tasks_kfunc_map_insert(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
160
v = tasks_kfunc_map_value_lookup(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
166
kptr = bpf_kptr_xchg(&v->task, NULL);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
179
kptr = bpf_kptr_xchg(&local->task, kptr);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
187
kptr = bpf_kptr_xchg(&local->task, NULL);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
204
acquired = bpf_kptr_xchg(&local->task, acquired);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
228
int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
23
struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
tools/testing/selftests/bpf/progs/task_kfunc_success.c
237
status = tasks_kfunc_map_insert(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
243
v = tasks_kfunc_map_value_lookup(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
250
kptr = v->task;
tools/testing/selftests/bpf/progs/task_kfunc_success.c
266
int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
299
int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
304
lookup_compare_pid(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
309
int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
332
int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
337
bpf_strncmp(task->comm, 12, "foo");
tools/testing/selftests/bpf/progs/task_kfunc_success.c
338
bpf_strncmp(task->comm, 16, "foo");
tools/testing/selftests/bpf/progs/task_kfunc_success.c
339
bpf_strncmp(&task->comm[8], 4, "foo");
tools/testing/selftests/bpf/progs/task_kfunc_success.c
355
int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
360
acquired = bpf_task_acquire(task->group_leader);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
39
static int test_acquire_release(struct task_struct *task)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
57
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
67
int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_kfunc_success.c
73
acquired = bpf_task_acquire___one(task);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
79
acquired = bpf_task_acquire___two(task, &fake_ctx);
tools/testing/selftests/bpf/progs/task_kfunc_success.c
97
int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/task_local_data.bpf.h
145
static int tld_object_init(struct task_struct *task, struct tld_object *tld_obj)
tools/testing/selftests/bpf/progs/task_local_data.bpf.h
149
tld_obj->data_map = bpf_task_storage_get(&tld_data_map, task, 0, 0);
tools/testing/selftests/bpf/progs/task_local_data.bpf.h
154
tld_obj->key_map = bpf_task_storage_get(&tld_key_map, task, 0,
tools/testing/selftests/bpf/progs/task_local_storage.c
27
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_local_storage.c
30
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_local_storage.c
31
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/task_local_storage.c
34
ptr = bpf_task_storage_get(&enter_id, task, 0,
tools/testing/selftests/bpf/progs/task_local_storage.c
48
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_local_storage.c
51
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_local_storage.c
52
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/task_local_storage.c
55
ptr = bpf_task_storage_get(&enter_id, task, 0,
tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
22
int BPF_PROG(trace_exit_creds, struct task_struct *task)
tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
26
ptr = bpf_task_storage_get(&task_storage, task, 0,
tools/testing/selftests/bpf/progs/task_ls_recursion.c
33
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_ls_recursion.c
36
if (!test_pid || task->pid != test_pid)
tools/testing/selftests/bpf/progs/task_ls_recursion.c
40
ptr = bpf_task_storage_get(&map_a, task, 0,
tools/testing/selftests/bpf/progs/task_ls_recursion.c
46
err = bpf_task_storage_delete(&map_a, task);
tools/testing/selftests/bpf/progs/task_ls_recursion.c
52
ptr = bpf_task_storage_get(&map_b, task, 0,
tools/testing/selftests/bpf/progs/task_ls_recursion.c
63
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_ls_recursion.c
66
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_ls_recursion.c
67
if (!test_pid || task->pid != test_pid)
tools/testing/selftests/bpf/progs/task_ls_recursion.c
70
ptr = bpf_task_storage_get(&map_a, task, 0,
tools/testing/selftests/bpf/progs/task_ls_recursion.c
75
ptr = bpf_task_storage_get(&map_b, task, 0,
tools/testing/selftests/bpf/progs/task_ls_uptr.c
25
struct task_struct *task, *data_task;
tools/testing/selftests/bpf/progs/task_ls_uptr.c
30
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_ls_uptr.c
31
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
25
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
32
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
33
value = bpf_task_storage_get(&task_storage, task, &zero,
tools/testing/selftests/bpf/progs/task_work.c
104
bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work);
tools/testing/selftests/bpf/progs/task_work.c
58
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_work.c
61
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_work.c
68
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
tools/testing/selftests/bpf/progs/task_work.c
76
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_work.c
78
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_work.c
82
bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work);
tools/testing/selftests/bpf/progs/task_work.c
91
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_work.c
94
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_work_fail.c
50
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_work_fail.c
52
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_work_fail.c
56
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
tools/testing/selftests/bpf/progs/task_work_fail.c
64
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_work_fail.c
67
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_work_fail.c
68
bpf_task_work_schedule_resume(task, &tw, &hmap, process_work);
tools/testing/selftests/bpf/progs/task_work_fail.c
76
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_work_fail.c
78
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_work_fail.c
79
bpf_task_work_schedule_resume(task, NULL, &hmap, process_work);
tools/testing/selftests/bpf/progs/task_work_fail.c
88
struct task_struct *task;
tools/testing/selftests/bpf/progs/task_work_fail.c
90
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/task_work_fail.c
94
bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work);
tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
13
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
20
struct task_struct *task;
tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
26
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
29
if (task->pid != target_pid)
tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
32
cgrp = bpf_task_get_cgroup1(task, target_hid);
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
106
&out->comm, task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
53
struct task_struct *task = (void *)bpf_get_current_task();
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
62
if (CORE_READ(&pid, &task->pid) ||
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
63
CORE_READ(&tgid, &task->tgid))
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
70
out->valid[1] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
72
out->valid[2] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
75
out->valid[3] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
78
out->valid[4] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
81
out->valid[5] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
85
out->valid[6] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
89
out->valid[7] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
93
out->valid[8] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
98
out->valid[9] = BPF_CORE_READ(task,
tools/testing/selftests/bpf/progs/test_core_reloc_module.c
40
struct task_struct *task,
tools/testing/selftests/bpf/progs/test_core_reloc_module.c
52
if (BPF_CORE_READ(task, pid) != real_pid || BPF_CORE_READ(task, tgid) != real_tgid)
tools/testing/selftests/bpf/progs/test_core_reloc_module.c
64
out->comm_len = BPF_CORE_READ_STR_INTO(&out->comm, task, comm);
tools/testing/selftests/bpf/progs/test_core_reloc_module.c
74
struct task_struct *task,
tools/testing/selftests/bpf/progs/test_core_reloc_module.c
86
if (task->pid != real_pid || task->tgid != real_tgid)
tools/testing/selftests/bpf/progs/test_core_reloc_module.c
98
out->comm_len = BPF_CORE_READ_STR_INTO(&out->comm, task, comm);
tools/testing/selftests/bpf/progs/test_core_retro.c
28
struct task_struct *task = (void *)bpf_get_current_task();
tools/testing/selftests/bpf/progs/test_core_retro.c
29
int tgid = BPF_CORE_READ(task, tgid);
tools/testing/selftests/bpf/progs/test_module_attach.c
14
struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
tools/testing/selftests/bpf/progs/test_module_attach.c
24
struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx)
tools/testing/selftests/bpf/progs/test_module_attach.c
48
struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
12
int BPF_PROG(rename, struct task_struct *task, char *comm)
tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
16
if ((__u64) task == 0x1234ULL && (__u64) comm == 0x5678ULL) {
tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
18
return (long)task + (long)comm;
tools/testing/selftests/bpf/progs/test_send_signal_kern.c
9
int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, u64 value) __ksym;
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
55
struct task_struct *task;
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
65
task = (struct task_struct *)bpf_get_current_task();
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
66
bpf_core_read_str(&stg->comm, sizeof(stg->comm), &task->comm);
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
67
bpf_core_read_str(&task_comm, sizeof(task_comm), &task->comm);
tools/testing/selftests/bpf/progs/test_skb_helpers.c
20
struct task_struct *task;
tools/testing/selftests/bpf/progs/test_skb_helpers.c
24
task = (struct task_struct *)bpf_get_current_task();
tools/testing/selftests/bpf/progs/test_skb_helpers.c
25
bpf_probe_read_kernel(&tpid , sizeof(tpid), &task->tgid);
tools/testing/selftests/bpf/progs/test_skb_helpers.c
26
bpf_probe_read_kernel_str(&comm, sizeof(comm), &task->comm);
tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
29
struct task_struct *task = (struct task_struct *)bpf_get_current_task();
tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
39
bpf_probe_read_kernel(&tpid , sizeof(tpid), &task->tgid);
tools/testing/selftests/bpf/progs/test_task_local_data.c
32
struct task_struct *task;
tools/testing/selftests/bpf/progs/test_task_local_data.c
35
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/test_task_local_data.c
36
err = tld_object_init(task, &tld_obj);
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
11
long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym;
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
21
int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
29
acquired = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
55
struct task_struct *task;
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
58
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
59
if (local_pid != task->pid)
tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
69
if (!bpf_task_under_cgroup(task, cgrp))
tools/testing/selftests/bpf/progs/type_cast.c
62
struct task_struct *task, *task_dup;
tools/testing/selftests/bpf/progs/type_cast.c
64
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/type_cast.c
65
task_dup = bpf_core_cast(task, struct task_struct);
tools/testing/selftests/bpf/progs/uptr_failure.c
21
struct task_struct *task;
tools/testing/selftests/bpf/progs/uptr_failure.c
24
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/uptr_failure.c
25
v = bpf_task_storage_get(&datamap, task, 0,
tools/testing/selftests/bpf/progs/uptr_failure.c
38
struct task_struct *task;
tools/testing/selftests/bpf/progs/uptr_failure.c
41
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/uptr_failure.c
42
v = bpf_task_storage_get(&datamap, task, 0,
tools/testing/selftests/bpf/progs/uptr_failure.c
55
struct task_struct *task;
tools/testing/selftests/bpf/progs/uptr_failure.c
58
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/uptr_failure.c
59
v = bpf_task_storage_get(&datamap, task, 0,
tools/testing/selftests/bpf/progs/uptr_failure.c
73
struct task_struct *task;
tools/testing/selftests/bpf/progs/uptr_failure.c
76
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/uptr_failure.c
77
v = bpf_task_storage_get(&datamap, task, 0,
tools/testing/selftests/bpf/progs/uptr_update_failure.c
20
struct task_struct *task;
tools/testing/selftests/bpf/progs/uptr_update_failure.c
23
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/uptr_update_failure.c
24
ptr = bpf_task_storage_get(&datamap, task, 0, 0);
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
148
struct task_struct *task;
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
155
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
156
if (!task)
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
159
bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb);
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
168
struct task_struct *task;
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
175
task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
176
if (!task)
tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
179
bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
108
struct task_struct___local *task __arg_trusted __arg_nullable)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
112
if (!task)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
115
return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
129
__weak int subprog_nonnull_task_flavor(struct task_struct___local *task __arg_trusted)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
133
return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
147
__weak int subprog_trusted_destroy(struct task_struct *task __arg_trusted)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
149
bpf_task_release(task); /* should be rejected */
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
15
__weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
157
int BPF_PROG(trusted_destroy_fail, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
159
return subprog_trusted_destroy(task);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
162
__weak int subprog_trusted_acq_rel(struct task_struct *task __arg_trusted)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
166
owned = bpf_task_acquire(task);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
17
if (!task)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
177
int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
179
return subprog_trusted_acq_rel(task);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
182
__weak int subprog_untrusted_bad_tags(struct task_struct *task __arg_untrusted __arg_nullable)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
184
return task->pid;
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
19
return task->pid + task->tgid;
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
210
__weak int subprog_untrusted(const volatile struct task_struct *restrict task __arg_untrusted)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
212
return task->pid;
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
22
__weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_nullable)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
24
return subprog_trusted_task_nullable(task) + subprog_trusted_task_nullable(NULL);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
249
__weak int subprog_untrusted2(struct task_struct *task __arg_untrusted)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
251
return subprog_trusted_task_nullable(task);
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
59
__weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted)
tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
61
return task->pid + task->tgid;
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
149
static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
158
struct task_struct *task = bpf_get_current_task_btf();
tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
161
bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
45
struct task_struct *task)
tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
49
acquired = bpf_get_task_exe_file(task);
tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
102
int BPF_PROG(path_d_path_kfunc_untrusted_from_argument, struct task_struct *task)
tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
109
root = &task->fs->root;
tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
36
struct task_struct *task;
tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
38
task = (struct task_struct *)&x;
tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
40
acquired = bpf_get_task_exe_file(task);
tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
13
TP_PROTO(struct task_struct *task, struct bpf_testmod_test_read_ctx *ctx),
tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
14
TP_ARGS(task, ctx),
tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
22
__entry->pid = task->pid;
tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
23
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
33
TP_PROTO(struct task_struct *task, struct bpf_testmod_test_write_ctx *ctx),
tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
34
TP_ARGS(task, ctx)
tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
38
int (*test_maybe_null)(int dummy, struct task_struct *task);
tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
41
int (*test_refcounted)(int dummy, struct task_struct *task);
tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
43
struct task_struct *(*test_return_ref_kptr)(int dummy, struct task_struct *task,
tools/testing/selftests/bpf/test_maps.c
129
static void test_hashmap_sizes(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
1352
void (*fn)(unsigned int task, void *data),
tools/testing/selftests/bpf/test_maps.c
148
static void test_hashmap_percpu(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
286
static void test_hashmap_walk(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
33
static void test_hashmap(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
357
static void test_arraymap(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
411
static void test_arraymap_percpu(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
507
static void test_devmap(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
521
static void test_devmap_hash(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
535
static void test_queuemap(unsigned int task, void *data)
tools/testing/selftests/bpf/test_maps.c
591
static void test_stackmap(unsigned int task, void *data)
tools/testing/selftests/kvm/include/kvm_util.h
1089
int __pin_task_to_cpu(pthread_t task, int cpu);
tools/testing/selftests/kvm/include/kvm_util.h
1091
static inline void pin_task_to_cpu(pthread_t task, int cpu)
tools/testing/selftests/kvm/include/kvm_util.h
1095
r = __pin_task_to_cpu(task, cpu);
tools/testing/selftests/kvm/include/kvm_util.h
1099
static inline int pin_task_to_any_cpu(pthread_t task)
tools/testing/selftests/kvm/include/kvm_util.h
1103
pin_task_to_cpu(task, cpu);
tools/testing/selftests/kvm/lib/kvm_util.c
629
int __pin_task_to_cpu(pthread_t task, int cpu)
tools/testing/selftests/kvm/lib/kvm_util.c
636
return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset);
tools/testing/selftests/net/bench/page_pool/time_bench.c
362
c->task = kthread_run(invoke_test_on_cpu_func, c,
tools/testing/selftests/net/bench/page_pool/time_bench.c
364
if (IS_ERR(c->task)) {
tools/testing/selftests/net/bench/page_pool/time_bench.c
388
kthread_stop(c->task);
tools/testing/selftests/net/bench/page_pool/time_bench.h
66
struct task_struct *task;
tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c
90
task();
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
83
struct task_struct *task;
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
92
task = __COMPAT_scx_bpf_dsq_peek(dsq_id);
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
93
if (task) {
tools/testing/selftests/sched_ext/peek_dsq.bpf.c
95
record_peek_result(task->pid);
tools/verification/rvgen/rvgen/templates/ltl2k/main.c
26
static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon)
tools/verification/rvgen/rvgen/templates/ltl2k/main.c
39
static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation)
tools/verification/rvgen/rvgen/templates/ltl2k/trace.h
10
TP_ARGS(task, states, atoms, next));
tools/verification/rvgen/rvgen/templates/ltl2k/trace.h
12
TP_PROTO(struct task_struct *task),
tools/verification/rvgen/rvgen/templates/ltl2k/trace.h
13
TP_ARGS(task));
tools/verification/rvgen/rvgen/templates/ltl2k/trace.h
9
TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next),
virt/kvm/kvm_main.c
3867
struct task_struct *task = NULL;
virt/kvm/kvm_main.c
3874
task = get_pid_task(target->pid, PIDTYPE_PID);
virt/kvm/kvm_main.c
3878
if (!task)
virt/kvm/kvm_main.c
3880
ret = yield_to(task, 1);
virt/kvm/kvm_main.c
3881
put_task_struct(task);