arch/arc/include/asm/dsp-impl.h
100
long unsigned int *saveto = &prev->thread.dsp.ACC0_GLO;
arch/arc/include/asm/dsp-impl.h
101
long unsigned int *readfrom = &next->thread.dsp.ACC0_GLO;
arch/arc/include/asm/unwind.h
64
#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
arch/arc/include/asm/unwind.h
65
#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
arch/arc/kernel/asm-offsets.c
19
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
arch/arc/kernel/fpu.c
34
unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
arch/arc/kernel/fpu.c
35
unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
arch/arc/kernel/fpu.c
71
struct arc_fpu *save = &prev->thread.fpu;
arch/arc/kernel/fpu.c
72
struct arc_fpu *restore = &next->thread.fpu;
arch/arc/kernel/kgdb.c
114
current->thread.callee_reg,
arch/arc/kernel/kgdb.c
60
current->thread.callee_reg);
arch/arc/kernel/kgdb.c
66
current->thread.callee_reg);
arch/arc/kernel/kgdb.c
74
(struct callee_regs *) task->thread.callee_reg);
arch/arc/kernel/kprobes.c
171
(struct callee_regs *) current->thread.callee_reg,
arch/arc/kernel/ptrace.c
148
membuf_store(&to, target->thread.fault_address); // efa
arch/arc/kernel/ptrace.c
151
stop_pc_val = target->thread.fault_address;
arch/arc/kernel/ptrace.c
98
struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
arch/arc/kernel/traps.c
45
tsk->thread.fault_address = (__force unsigned int)addr;
arch/arc/kernel/troubleshoot.c
118
address = current->thread.fault_address;
arch/arc/kernel/troubleshoot.c
173
struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
arch/arc/kernel/troubleshoot.c
190
regs->ecr.full, current->thread.fault_address, regs->ret);
arch/arc/kernel/troubleshoot.c
219
current->thread.fault_address = address;
arch/arc/mm/fault.c
184
tsk->thread.fault_address = address;
arch/arm/include/asm/thread_notify.h
29
static inline void thread_notify(unsigned long rc, struct thread_info *thread)
arch/arm/include/asm/thread_notify.h
32
atomic_notifier_call_chain(&thread_notify_head, rc, thread);
arch/arm/include/asm/tls.h
75
struct thread_info *thread;
arch/arm/include/asm/tls.h
77
thread = current_thread_info();
arch/arm/include/asm/tls.h
79
thread->tp_value[0] = val;
arch/arm/kernel/process.c
220
struct thread_info *thread = current_thread_info();
arch/arm/kernel/process.c
225
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
arch/arm/kernel/process.c
226
memset(&thread->fpstate, 0, sizeof(union fp_state));
arch/arm/kernel/process.c
230
thread_notify(THREAD_NOTIFY_FLUSH, thread);
arch/arm/kernel/process.c
240
struct thread_info *thread = task_thread_info(p);
arch/arm/kernel/process.c
243
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
arch/arm/kernel/process.c
252
thread->cpu_domain = get_domain();
arch/arm/kernel/process.c
262
thread->cpu_context.r4 = (unsigned long)args->fn_arg;
arch/arm/kernel/process.c
263
thread->cpu_context.r5 = (unsigned long)args->fn;
arch/arm/kernel/process.c
266
thread->cpu_context.pc = (unsigned long)ret_from_fork;
arch/arm/kernel/process.c
267
thread->cpu_context.sp = (unsigned long)childregs;
arch/arm/kernel/process.c
272
thread->tp_value[0] = tls;
arch/arm/kernel/process.c
273
thread->tp_value[1] = get_tpuser();
arch/arm/kernel/process.c
275
thread_notify(THREAD_NOTIFY_COPY, thread);
arch/arm/kernel/ptrace.c
296
struct thread_info *thread = task_thread_info(tsk);
arch/arm/kernel/ptrace.c
298
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
arch/arm/kernel/ptrace.c
300
iwmmxt_task_disable(thread); /* force it to ram */
arch/arm/kernel/ptrace.c
301
return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
arch/arm/kernel/ptrace.c
310
struct thread_info *thread = task_thread_info(tsk);
arch/arm/kernel/ptrace.c
312
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
arch/arm/kernel/ptrace.c
314
iwmmxt_task_release(thread); /* force a reload */
arch/arm/kernel/ptrace.c
315
return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
arch/arm/kernel/ptrace.c
359
if (current->thread.debug.hbp[i] == bp)
arch/arm/kernel/ptrace.c
374
memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
arch/arm/kernel/ptrace.c
384
struct thread_struct *t = &tsk->thread;
arch/arm/kernel/ptrace.c
448
bp = tsk->thread.debug.hbp[idx];
arch/arm/kernel/ptrace.c
504
bp = tsk->thread.debug.hbp[idx];
arch/arm/kernel/ptrace.c
511
tsk->thread.debug.hbp[idx] = bp;
arch/arm/kernel/ptrace.c
585
struct thread_info *thread = task_thread_info(target);
arch/arm/kernel/ptrace.c
588
&thread->fpstate,
arch/arm/kernel/ptrace.c
615
struct thread_info *thread = task_thread_info(target);
arch/arm/kernel/ptrace.c
616
struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
arch/arm/kernel/ptrace.c
619
vfp_sync_hwstate(thread);
arch/arm/kernel/ptrace.c
637
struct thread_info *thread = task_thread_info(target);
arch/arm/kernel/ptrace.c
642
vfp_sync_hwstate(thread);
arch/arm/kernel/ptrace.c
643
new_vfp = thread->vfpstate.hard;
arch/arm/kernel/ptrace.c
663
thread->vfpstate.hard = new_vfp;
arch/arm/kernel/ptrace.c
664
vfp_flush_hwstate(thread);
arch/arm/kernel/signal.c
276
.trap_no = current->thread.trap_no,
arch/arm/kernel/signal.c
277
.error_code = current->thread.error_code,
arch/arm/kernel/signal.c
278
.fault_address = current->thread.address,
arch/arm/kernel/thumbee.c
32
struct thread_info *thread = t;
arch/arm/kernel/thumbee.c
40
teehbr_write(thread->thumbee_state);
arch/arm/kernel/traps.c
282
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
arch/arm/kernel/traps.c
378
current->thread.error_code = err;
arch/arm/kernel/traps.c
379
current->thread.trap_no = trap;
arch/arm/kernel/xscale-cp0.c
35
struct thread_info *thread = t;
arch/arm/kernel/xscale-cp0.c
39
thread->cpu_context.extra[0] = 0;
arch/arm/kernel/xscale-cp0.c
40
thread->cpu_context.extra[1] = 0;
arch/arm/kernel/xscale-cp0.c
45
dsp_load_state(thread->cpu_context.extra);
arch/arm/kernel/xscale-cp0.c
60
struct thread_info *thread = t;
arch/arm/kernel/xscale-cp0.c
73
iwmmxt_task_release(thread);
arch/arm/kernel/xscale-cp0.c
77
iwmmxt_task_switch(thread);
arch/arm/mm/context.c
107
struct thread_info *thread = t;
arch/arm/mm/context.c
112
pid = task_pid_nr(thread_task(thread)) << ASID_BITS;
arch/arm/mm/fault.c
218
tsk->thread.address = addr;
arch/arm/mm/fault.c
219
tsk->thread.error_code = fsr;
arch/arm/mm/fault.c
220
tsk->thread.trap_no = 14;
arch/arm/nwfpe/fpmodule.c
50
struct thread_info *thread = v;
arch/arm/nwfpe/fpmodule.c
53
nwfpe_init_fpa(&thread->fpstate);
arch/arm/probes/uprobes/core.c
140
utask->autask.saved_trap_no = current->thread.trap_no;
arch/arm/probes/uprobes/core.c
141
current->thread.trap_no = UPROBE_TRAP_NR;
arch/arm/probes/uprobes/core.c
151
WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
arch/arm/probes/uprobes/core.c
153
current->thread.trap_no = utask->autask.saved_trap_no;
arch/arm/probes/uprobes/core.c
164
if (t->thread.trap_no != UPROBE_TRAP_NR)
arch/arm/probes/uprobes/core.c
174
current->thread.trap_no = utask->autask.saved_trap_no;
arch/arm/vfp/vfpmodule.c
104
static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
106
if (vfp_state_in_hw(cpu, thread)) {
arch/arm/vfp/vfpmodule.c
111
thread->vfpstate.hard.cpu = NR_CPUS;
arch/arm/vfp/vfpmodule.c
118
static void vfp_thread_flush(struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
120
union vfp_state *vfp = &thread->vfpstate;
arch/arm/vfp/vfpmodule.c
146
static void vfp_thread_exit(struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
149
union vfp_state *vfp = &thread->vfpstate;
arch/arm/vfp/vfpmodule.c
157
static void vfp_thread_copy(struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
162
thread->vfpstate = parent->vfpstate;
arch/arm/vfp/vfpmodule.c
164
thread->vfpstate.hard.cpu = NR_CPUS;
arch/arm/vfp/vfpmodule.c
189
struct thread_info *thread = v;
arch/arm/vfp/vfpmodule.c
200
cpu = thread->cpu;
arch/arm/vfp/vfpmodule.c
219
vfp_thread_flush(thread);
arch/arm/vfp/vfpmodule.c
223
vfp_thread_exit(thread);
arch/arm/vfp/vfpmodule.c
227
vfp_thread_copy(thread);
arch/arm/vfp/vfpmodule.c
248
current->thread.error_code = 0;
arch/arm/vfp/vfpmodule.c
249
current->thread.trap_no = 6;
arch/arm/vfp/vfpmodule.c
547
void vfp_sync_hwstate(struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
551
if (vfp_state_in_hw(raw_smp_processor_id(), thread)) {
arch/arm/vfp/vfpmodule.c
558
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
arch/arm/vfp/vfpmodule.c
566
void vfp_flush_hwstate(struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
570
vfp_force_reload(cpu, thread);
arch/arm/vfp/vfpmodule.c
582
struct thread_info *thread = current_thread_info();
arch/arm/vfp/vfpmodule.c
583
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
arch/arm/vfp/vfpmodule.c
586
vfp_sync_hwstate(thread);
arch/arm/vfp/vfpmodule.c
607
vfp_flush_hwstate(thread);
arch/arm/vfp/vfpmodule.c
620
struct thread_info *thread = current_thread_info();
arch/arm/vfp/vfpmodule.c
621
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
arch/arm/vfp/vfpmodule.c
625
vfp_flush_hwstate(thread);
arch/arm/vfp/vfpmodule.c
868
struct thread_info *thread = current_thread_info();
arch/arm/vfp/vfpmodule.c
890
if (vfp_state_in_hw(cpu, thread))
arch/arm/vfp/vfpmodule.c
891
vfp_save_state(&thread->vfpstate, fpexc);
arch/arm/vfp/vfpmodule.c
90
static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
arch/arm/vfp/vfpmodule.c
93
if (thread->vfpstate.hard.cpu != cpu)
arch/arm/vfp/vfpmodule.c
96
return vfp_current_hw_state[cpu] == &thread->vfpstate;
arch/arm64/include/asm/compat.h
103
static inline int is_compat_thread(struct thread_info *thread)
arch/arm64/include/asm/compat.h
94
static inline int is_compat_thread(struct thread_info *thread)
arch/arm64/include/asm/compat.h
96
return test_ti_thread_flag(thread, TIF_32BIT);
arch/arm64/include/asm/fpsimd.h
104
static inline bool thread_sm_enabled(struct thread_struct *thread)
arch/arm64/include/asm/fpsimd.h
106
return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
arch/arm64/include/asm/fpsimd.h
109
static inline bool thread_za_enabled(struct thread_struct *thread)
arch/arm64/include/asm/fpsimd.h
111
return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
arch/arm64/include/asm/fpsimd.h
125
static inline void *sve_pffr(struct thread_struct *thread)
arch/arm64/include/asm/fpsimd.h
129
if (system_supports_sme() && thread_sm_enabled(thread))
arch/arm64/include/asm/fpsimd.h
130
vl = thread_get_sme_vl(thread);
arch/arm64/include/asm/fpsimd.h
132
vl = thread_get_sve_vl(thread);
arch/arm64/include/asm/fpsimd.h
134
return (char *)thread->sve_state + sve_ffr_offset(vl);
arch/arm64/include/asm/fpsimd.h
137
static inline void *thread_zt_state(struct thread_struct *thread)
arch/arm64/include/asm/fpsimd.h
140
unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread));
arch/arm64/include/asm/fpsimd.h
141
return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq);
arch/arm64/include/asm/gcs.h
61
return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
arch/arm64/include/asm/gcs.h
73
unsigned long cur_val = task->thread.gcs_el0_mode;
arch/arm64/include/asm/gcs.h
75
cur_val &= task->thread.gcs_el0_locked;
arch/arm64/include/asm/gcs.h
76
new_val &= task->thread.gcs_el0_locked;
arch/arm64/include/asm/mte.h
243
(task->thread.sctlr_user & (1UL << SCTLR_EL1_TCF0_SHIFT)))
arch/arm64/include/asm/pointer_auth.h
117
ptrauth_keys_install_user(¤t->thread.keys_user)
arch/arm64/include/asm/pointer_auth.h
121
ptrauth_keys_init_user(¤t->thread.keys_user); \
arch/arm64/include/asm/pointer_auth.h
131
ptrauth_keys_install_user(&(tsk)->thread.keys_user)
arch/arm64/include/asm/pointer_auth.h
145
ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
arch/arm64/include/asm/pointer_auth.h
147
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
arch/arm64/include/asm/processor.h
204
static inline unsigned int thread_get_vl(struct thread_struct *thread,
arch/arm64/include/asm/processor.h
207
return thread->vl[type];
arch/arm64/include/asm/processor.h
210
static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
arch/arm64/include/asm/processor.h
212
return thread_get_vl(thread, ARM64_VEC_SVE);
arch/arm64/include/asm/processor.h
215
static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
arch/arm64/include/asm/processor.h
217
return thread_get_vl(thread, ARM64_VEC_SME);
arch/arm64/include/asm/processor.h
220
static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
arch/arm64/include/asm/processor.h
222
if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
arch/arm64/include/asm/processor.h
223
return thread_get_sme_vl(thread);
arch/arm64/include/asm/processor.h
225
return thread_get_sve_vl(thread);
arch/arm64/include/asm/processor.h
286
__tls = &(t)->thread.uw.tp2_value; \
arch/arm64/include/asm/processor.h
288
__tls = &(t)->thread.uw.tp_value; \
arch/arm64/include/asm/processor.h
292
#define task_user_tls(t) (&(t)->thread.uw.tp_value)
arch/arm64/include/asm/thread_info.h
49
((unsigned long)(tsk->thread.cpu_context.pc))
arch/arm64/include/asm/thread_info.h
51
((unsigned long)(tsk->thread.cpu_context.sp))
arch/arm64/include/asm/thread_info.h
53
((unsigned long)(tsk->thread.cpu_context.fp))
arch/arm64/kernel/asm-offsets.c
44
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
arch/arm64/kernel/asm-offsets.c
45
DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user));
arch/arm64/kernel/asm-offsets.c
47
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
arch/arm64/kernel/asm-offsets.c
50
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
arch/arm64/kernel/asm-offsets.c
53
DEFINE(THREAD_MTE_CTRL, offsetof(struct task_struct, thread.mte_ctrl));
arch/arm64/kernel/fpsimd.c
1178
if (task->thread.sme_state) {
arch/arm64/kernel/fpsimd.c
1180
memset(task->thread.sme_state, 0,
arch/arm64/kernel/fpsimd.c
1186
task->thread.sme_state =
arch/arm64/kernel/fpsimd.c
1192
kfree(task->thread.sme_state);
arch/arm64/kernel/fpsimd.c
1193
task->thread.sme_state = NULL;
arch/arm64/kernel/fpsimd.c
1314
current->thread.fp_type = FP_STATE_SVE;
arch/arm64/kernel/fpsimd.c
1339
if (!current->thread.sve_state) {
arch/arm64/kernel/fpsimd.c
1391
if (!current->thread.sve_state || !current->thread.sme_state) {
arch/arm64/kernel/fpsimd.c
1466
if (last->st == task->thread.kernel_fpsimd_state &&
arch/arm64/kernel/fpsimd.c
1467
task->thread.kernel_fpsimd_cpu == smp_processor_id())
arch/arm64/kernel/fpsimd.c
1470
fpsimd_load_state(task->thread.kernel_fpsimd_state);
arch/arm64/kernel/fpsimd.c
1476
.st = task->thread.kernel_fpsimd_state,
arch/arm64/kernel/fpsimd.c
1482
fpsimd_save_state(task->thread.kernel_fpsimd_state);
arch/arm64/kernel/fpsimd.c
1485
task->thread.kernel_fpsimd_cpu = smp_processor_id();
arch/arm64/kernel/fpsimd.c
1535
&next->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
1536
wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
arch/arm64/kernel/fpsimd.c
1589
memset(¤t->thread.uw.fpsimd_state, 0,
arch/arm64/kernel/fpsimd.c
1590
sizeof(current->thread.uw.fpsimd_state));
arch/arm64/kernel/fpsimd.c
1596
sve_state = current->thread.sve_state;
arch/arm64/kernel/fpsimd.c
1597
current->thread.sve_state = NULL;
arch/arm64/kernel/fpsimd.c
1606
sme_state = current->thread.sme_state;
arch/arm64/kernel/fpsimd.c
1607
current->thread.sme_state = NULL;
arch/arm64/kernel/fpsimd.c
1610
current->thread.svcr = 0;
arch/arm64/kernel/fpsimd.c
1614
current->thread.uw.fpmr = 0;
arch/arm64/kernel/fpsimd.c
1616
current->thread.fp_type = FP_STATE_FPSIMD;
arch/arm64/kernel/fpsimd.c
1647
last->st = ¤t->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
1648
last->sve_state = current->thread.sve_state;
arch/arm64/kernel/fpsimd.c
1649
last->sme_state = current->thread.sme_state;
arch/arm64/kernel/fpsimd.c
1652
last->svcr = ¤t->thread.svcr;
arch/arm64/kernel/fpsimd.c
1653
last->fpmr = ¤t->thread.uw.fpmr;
arch/arm64/kernel/fpsimd.c
1654
last->fp_type = ¤t->thread.fp_type;
arch/arm64/kernel/fpsimd.c
1656
current->thread.fpsimd_cpu = smp_processor_id();
arch/arm64/kernel/fpsimd.c
1734
current->thread.uw.fpsimd_state = *state;
arch/arm64/kernel/fpsimd.c
1735
if (current->thread.fp_type == FP_STATE_SVE)
arch/arm64/kernel/fpsimd.c
1752
t->thread.fpsimd_cpu = NR_CPUS;
arch/arm64/kernel/fpsimd.c
1753
t->thread.kernel_fpsimd_state = NULL;
arch/arm64/kernel/fpsimd.c
1860
WARN_ON(current->thread.kernel_fpsimd_state != NULL);
arch/arm64/kernel/fpsimd.c
1861
current->thread.kernel_fpsimd_state = state;
arch/arm64/kernel/fpsimd.c
1902
WARN_ON(current->thread.kernel_fpsimd_state != state);
arch/arm64/kernel/fpsimd.c
1903
current->thread.kernel_fpsimd_state = NULL;
arch/arm64/kernel/fpsimd.c
257
return task->thread.vl[type];
arch/arm64/kernel/fpsimd.c
263
task->thread.vl[type] = vl;
arch/arm64/kernel/fpsimd.c
269
return task->thread.vl_onexec[type];
arch/arm64/kernel/fpsimd.c
275
task->thread.vl_onexec[type] = vl;
arch/arm64/kernel/fpsimd.c
369
switch (current->thread.fp_type) {
arch/arm64/kernel/fpsimd.c
375
if (!thread_sm_enabled(¤t->thread))
arch/arm64/kernel/fpsimd.c
407
write_sysreg_s(current->thread.svcr, SYS_SVCR);
arch/arm64/kernel/fpsimd.c
409
if (thread_za_enabled(¤t->thread))
arch/arm64/kernel/fpsimd.c
410
sme_load_state(current->thread.sme_state,
arch/arm64/kernel/fpsimd.c
413
if (thread_sm_enabled(¤t->thread))
arch/arm64/kernel/fpsimd.c
418
write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
arch/arm64/kernel/fpsimd.c
421
WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
arch/arm64/kernel/fpsimd.c
422
sve_load_state(sve_pffr(¤t->thread),
arch/arm64/kernel/fpsimd.c
423
¤t->thread.uw.fpsimd_state.fpsr,
arch/arm64/kernel/fpsimd.c
426
WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD);
arch/arm64/kernel/fpsimd.c
427
fpsimd_load_state(¤t->thread.uw.fpsimd_state);
arch/arm64/kernel/fpsimd.c
664
void *sst = task->thread.sve_state;
arch/arm64/kernel/fpsimd.c
665
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
670
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
arch/arm64/kernel/fpsimd.c
688
void const *sst = task->thread.sve_state;
arch/arm64/kernel/fpsimd.c
689
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
696
vl = thread_get_cur_vl(&task->thread);
arch/arm64/kernel/fpsimd.c
714
if (!thread_sm_enabled(&task->thread))
arch/arm64/kernel/fpsimd.c
717
__fpsimd_zero_vregs(&task->thread.uw.fpsimd_state);
arch/arm64/kernel/fpsimd.c
718
task->thread.uw.fpsimd_state.fpsr = 0x0800009f;
arch/arm64/kernel/fpsimd.c
720
task->thread.uw.fpmr = 0;
arch/arm64/kernel/fpsimd.c
722
task->thread.svcr &= ~SVCR_SM_MASK;
arch/arm64/kernel/fpsimd.c
723
task->thread.fp_type = FP_STATE_FPSIMD;
arch/arm64/kernel/fpsimd.c
735
kfree(task->thread.sve_state);
arch/arm64/kernel/fpsimd.c
736
task->thread.sve_state = NULL;
arch/arm64/kernel/fpsimd.c
751
if (task->thread.sve_state) {
arch/arm64/kernel/fpsimd.c
753
memset(task->thread.sve_state, 0,
arch/arm64/kernel/fpsimd.c
759
task->thread.sve_state =
arch/arm64/kernel/fpsimd.c
772
if (task->thread.fp_type == FP_STATE_SVE)
arch/arm64/kernel/fpsimd.c
787
void *sst = task->thread.sve_state;
arch/arm64/kernel/fpsimd.c
788
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
arch/arm64/kernel/fpsimd.c
790
if (task->thread.fp_type != FP_STATE_SVE)
arch/arm64/kernel/fpsimd.c
793
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
arch/arm64/kernel/fpsimd.c
843
kfree(task->thread.sve_state);
arch/arm64/kernel/fpsimd.c
844
task->thread.sve_state = sve_state;
arch/arm64/kernel/fpsimd.c
848
task->thread.svcr &= ~SVCR_ZA_MASK;
arch/arm64/kernel/fpsimd.c
849
kfree(task->thread.sme_state);
arch/arm64/kernel/fpsimd.c
850
task->thread.sme_state = sme_state;
arch/arm64/kernel/hw_breakpoint.c
229
struct debug_info *debug_info = ¤t->thread.debug;
arch/arm64/kernel/hw_breakpoint.c
633
debug_info = ¤t->thread.debug;
arch/arm64/kernel/hw_breakpoint.c
764
debug_info = ¤t->thread.debug;
arch/arm64/kernel/hw_breakpoint.c
849
struct debug_info *debug_info = ¤t->thread.debug;
arch/arm64/kernel/hw_breakpoint.c
917
current_debug_info = ¤t->thread.debug;
arch/arm64/kernel/hw_breakpoint.c
918
next_debug_info = &next->thread.debug;
arch/arm64/kernel/kgdb.c
131
struct cpu_context *cpu_context = &task->thread.cpu_context;
arch/arm64/kernel/mte.c
211
unsigned long sctlr = task->thread.sctlr_user;
arch/arm64/kernel/mte.c
212
unsigned long mte_ctrl = task->thread.mte_ctrl;
arch/arm64/kernel/mte.c
237
task->thread.sctlr_user = sctlr;
arch/arm64/kernel/mte.c
250
((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
arch/arm64/kernel/mte.c
399
task->thread.mte_ctrl = mte_ctrl;
arch/arm64/kernel/mte.c
404
update_sctlr_el1(task->thread.sctlr_user);
arch/arm64/kernel/mte.c
414
u64 mte_ctrl = task->thread.mte_ctrl;
arch/arm64/kernel/pointer_auth.c
103
if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA)
arch/arm64/kernel/pointer_auth.c
105
if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB)
arch/arm64/kernel/pointer_auth.c
107
if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA)
arch/arm64/kernel/pointer_auth.c
109
if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB)
arch/arm64/kernel/pointer_auth.c
13
struct ptrauth_keys_user *keys = &tsk->thread.keys_user;
arch/arm64/kernel/pointer_auth.c
82
sctlr = tsk->thread.sctlr_user;
arch/arm64/kernel/pointer_auth.c
85
tsk->thread.sctlr_user = sctlr;
arch/arm64/kernel/probes/uprobes.c
106
return t->thread.fault_code != UPROBE_INV_FAULT_CODE;
arch/arm64/kernel/probes/uprobes.c
76
current->thread.fault_code = UPROBE_INV_FAULT_CODE;
arch/arm64/kernel/probes/uprobes.c
90
WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
arch/arm64/kernel/process.c
258
current->thread.uw.tp_value = 0;
arch/arm64/kernel/process.c
291
current->thread.gcspr_el0 = 0;
arch/arm64/kernel/process.c
292
current->thread.gcs_base = 0;
arch/arm64/kernel/process.c
293
current->thread.gcs_size = 0;
arch/arm64/kernel/process.c
294
current->thread.gcs_el0_mode = 0;
arch/arm64/kernel/process.c
295
current->thread.gcs_el0_locked = 0;
arch/arm64/kernel/process.c
308
p->thread.gcs_base = 0;
arch/arm64/kernel/process.c
309
p->thread.gcs_size = 0;
arch/arm64/kernel/process.c
311
p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
arch/arm64/kernel/process.c
312
p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
arch/arm64/kernel/process.c
363
dst->thread.fp_type = FP_STATE_FPSIMD;
arch/arm64/kernel/process.c
364
dst->thread.sve_state = NULL;
arch/arm64/kernel/process.c
374
dst->thread.sme_state = NULL;
arch/arm64/kernel/process.c
376
dst->thread.svcr &= ~SVCR_ZA_MASK;
arch/arm64/kernel/process.c
386
if (!thread_za_enabled(&src->thread))
arch/arm64/kernel/process.c
389
dst->thread.sve_state = kzalloc(sve_state_size(src),
arch/arm64/kernel/process.c
391
if (!dst->thread.sve_state)
arch/arm64/kernel/process.c
394
dst->thread.sme_state = kmemdup(src->thread.sme_state,
arch/arm64/kernel/process.c
397
if (!dst->thread.sme_state) {
arch/arm64/kernel/process.c
398
kfree(dst->thread.sve_state);
arch/arm64/kernel/process.c
399
dst->thread.sve_state = NULL;
arch/arm64/kernel/process.c
404
dst->thread.svcr |= SVCR_ZA_MASK;
arch/arm64/kernel/process.c
419
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
arch/arm64/kernel/process.c
443
p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
arch/arm64/kernel/process.c
470
p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
arch/arm64/kernel/process.c
475
p->thread.tpidr2_el0 = 0;
arch/arm64/kernel/process.c
476
WARN_ON_ONCE(p->thread.svcr & SVCR_ZA_MASK);
arch/arm64/kernel/process.c
485
p->thread.uw.tp_value = tls;
arch/arm64/kernel/process.c
502
p->thread.cpu_context.x19 = (unsigned long)args->fn;
arch/arm64/kernel/process.c
503
p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
arch/arm64/kernel/process.c
506
p->thread.por_el0 = POR_EL0_INIT;
arch/arm64/kernel/process.c
508
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
arch/arm64/kernel/process.c
509
p->thread.cpu_context.sp = (unsigned long)childregs;
arch/arm64/kernel/process.c
514
p->thread.cpu_context.fp = (unsigned long)&childregs->stackframe;
arch/arm64/kernel/process.c
525
current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
arch/arm64/kernel/process.c
533
write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
arch/arm64/kernel/process.c
539
write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
arch/arm64/kernel/process.c
583
current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
arch/arm64/kernel/process.c
593
write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0);
arch/arm64/kernel/process.c
595
if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode)
arch/arm64/kernel/process.c
673
current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
arch/arm64/kernel/process.c
674
if (current->thread.por_el0 != next->thread.por_el0) {
arch/arm64/kernel/process.c
675
write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
arch/arm64/kernel/process.c
738
if (prev->thread.sctlr_user != next->thread.sctlr_user)
arch/arm64/kernel/process.c
739
update_sctlr_el1(next->thread.sctlr_user);
arch/arm64/kernel/ptrace.c
1002
target->thread.fp_type = FP_STATE_SVE;
arch/arm64/kernel/ptrace.c
1016
target->thread.sve_state,
arch/arm64/kernel/ptrace.c
1032
&target->thread.uw.fpsimd_state.fpsr,
arch/arm64/kernel/ptrace.c
1099
if (thread_za_enabled(&target->thread))
arch/arm64/kernel/ptrace.c
1113
if (thread_za_enabled(&target->thread)) {
arch/arm64/kernel/ptrace.c
1116
membuf_write(&to, target->thread.sme_state, end - start);
arch/arm64/kernel/ptrace.c
1164
if (!target->thread.sve_state) {
arch/arm64/kernel/ptrace.c
1166
if (!target->thread.sve_state) {
arch/arm64/kernel/ptrace.c
1176
sme_alloc(target, !thread_za_enabled(&target->thread));
arch/arm64/kernel/ptrace.c
1177
if (!target->thread.sme_state)
arch/arm64/kernel/ptrace.c
1182
target->thread.svcr &= ~SVCR_ZA_MASK;
arch/arm64/kernel/ptrace.c
1200
target->thread.sme_state,
arch/arm64/kernel/ptrace.c
1207
target->thread.svcr |= SVCR_ZA_MASK;
arch/arm64/kernel/ptrace.c
1225
if (thread_za_enabled(&target->thread))
arch/arm64/kernel/ptrace.c
1226
membuf_write(&to, thread_zt_state(&target->thread),
arch/arm64/kernel/ptrace.c
1246
if (!target->thread.sve_state)
arch/arm64/kernel/ptrace.c
1249
if (!thread_za_enabled(&target->thread)) {
arch/arm64/kernel/ptrace.c
1251
if (!target->thread.sme_state)
arch/arm64/kernel/ptrace.c
1256
thread_zt_state(&target->thread),
arch/arm64/kernel/ptrace.c
1259
target->thread.svcr |= SVCR_ZA_MASK;
arch/arm64/kernel/ptrace.c
1362
struct ptrauth_keys_user *keys = &target->thread.keys_user;
arch/arm64/kernel/ptrace.c
1378
struct ptrauth_keys_user *keys = &target->thread.keys_user;
arch/arm64/kernel/ptrace.c
1411
struct ptrauth_keys_user *keys = &target->thread.keys_user;
arch/arm64/kernel/ptrace.c
1427
struct ptrauth_keys_user *keys = &target->thread.keys_user;
arch/arm64/kernel/ptrace.c
1488
current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
arch/arm64/kernel/ptrace.c
1490
return membuf_write(&to, &target->thread.por_el0,
arch/arm64/kernel/ptrace.c
1491
sizeof(target->thread.por_el0));
arch/arm64/kernel/ptrace.c
1505
ctrl = target->thread.por_el0;
arch/arm64/kernel/ptrace.c
1511
target->thread.por_el0 = ctrl;
arch/arm64/kernel/ptrace.c
1521
user_gcs->features_enabled = target->thread.gcs_el0_mode;
arch/arm64/kernel/ptrace.c
1522
user_gcs->features_locked = target->thread.gcs_el0_locked;
arch/arm64/kernel/ptrace.c
1523
user_gcs->gcspr_el0 = target->thread.gcspr_el0;
arch/arm64/kernel/ptrace.c
1529
target->thread.gcs_el0_mode = user_gcs->features_enabled;
arch/arm64/kernel/ptrace.c
1530
target->thread.gcs_el0_locked = user_gcs->features_locked;
arch/arm64/kernel/ptrace.c
1531
target->thread.gcspr_el0 = user_gcs->gcspr_el0;
arch/arm64/kernel/ptrace.c
183
if (current->thread.debug.hbp_break[i] == bp) {
arch/arm64/kernel/ptrace.c
190
if (current->thread.debug.hbp_watch[i] == bp) {
arch/arm64/kernel/ptrace.c
1902
uregs = &target->thread.uw.fpsimd_state;
arch/arm64/kernel/ptrace.c
1929
uregs = &target->thread.uw.fpsimd_state;
arch/arm64/kernel/ptrace.c
1952
return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
arch/arm64/kernel/ptrace.c
1961
compat_ulong_t tls = target->thread.uw.tp_value;
arch/arm64/kernel/ptrace.c
1967
target->thread.uw.tp_value = tls;
arch/arm64/kernel/ptrace.c
210
struct thread_struct *t = &tsk->thread;
arch/arm64/kernel/ptrace.c
2269
ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
arch/arm64/kernel/ptrace.c
229
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
arch/arm64/kernel/ptrace.c
243
bp = tsk->thread.debug.hbp_break[idx];
arch/arm64/kernel/ptrace.c
249
bp = tsk->thread.debug.hbp_watch[idx];
arch/arm64/kernel/ptrace.c
269
tsk->thread.debug.hbp_break[idx] = bp;
arch/arm64/kernel/ptrace.c
276
tsk->thread.debug.hbp_watch[idx] = bp;
arch/arm64/kernel/ptrace.c
599
uregs = &target->thread.uw.fpsimd_state;
arch/arm64/kernel/ptrace.c
631
newstate = target->thread.uw.fpsimd_state;
arch/arm64/kernel/ptrace.c
638
target->thread.uw.fpsimd_state = newstate;
arch/arm64/kernel/ptrace.c
670
ret = membuf_store(&to, target->thread.uw.tp_value);
arch/arm64/kernel/ptrace.c
672
ret = membuf_store(&to, target->thread.tpidr2_el0);
arch/arm64/kernel/ptrace.c
686
tls[0] = target->thread.uw.tp_value;
arch/arm64/kernel/ptrace.c
688
tls[1] = target->thread.tpidr2_el0;
arch/arm64/kernel/ptrace.c
694
target->thread.uw.tp_value = tls[0];
arch/arm64/kernel/ptrace.c
696
target->thread.tpidr2_el0 = tls[1];
arch/arm64/kernel/ptrace.c
710
return membuf_store(&to, target->thread.uw.fpmr);
arch/arm64/kernel/ptrace.c
723
fpmr = target->thread.uw.fpmr;
arch/arm64/kernel/ptrace.c
729
target->thread.uw.fpmr = fpmr;
arch/arm64/kernel/ptrace.c
772
if (thread_sm_enabled(&target->thread))
arch/arm64/kernel/ptrace.c
778
if (active && target->thread.fp_type == FP_STATE_SVE)
arch/arm64/kernel/ptrace.c
849
membuf_write(&to, target->thread.sve_state, end - start);
arch/arm64/kernel/ptrace.c
861
membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
arch/arm64/kernel/ptrace.c
952
if (!target->thread.sme_state)
arch/arm64/kernel/ptrace.c
959
if (!target->thread.sve_state)
arch/arm64/kernel/ptrace.c
973
target->thread.svcr &= ~SVCR_SM_MASK;
arch/arm64/kernel/ptrace.c
977
target->thread.svcr |= SVCR_SM_MASK;
arch/arm64/kernel/ptrace.c
986
memset(¤t->thread.uw.fpsimd_state, 0,
arch/arm64/kernel/ptrace.c
987
sizeof(current->thread.uw.fpsimd_state));
arch/arm64/kernel/ptrace.c
994
target->thread.fp_type = FP_STATE_FPSIMD;
arch/arm64/kernel/signal.c
1051
if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
arch/arm64/kernel/signal.c
1153
if (add_all || current->thread.fault_code) {
arch/arm64/kernel/signal.c
1161
if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) {
arch/arm64/kernel/signal.c
1172
if (add_all || current->thread.fp_type == FP_STATE_SVE ||
arch/arm64/kernel/signal.c
1173
thread_sm_enabled(¤t->thread)) {
arch/arm64/kernel/signal.c
1177
vl = thread_get_cur_vl(¤t->thread);
arch/arm64/kernel/signal.c
1204
if (thread_za_enabled(¤t->thread))
arch/arm64/kernel/signal.c
1214
if (add_all || thread_za_enabled(¤t->thread)) {
arch/arm64/kernel/signal.c
1257
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
arch/arm64/kernel/signal.c
1274
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
arch/arm64/kernel/signal.c
1498
current->thread.svcr &= ~SVCR_ZA_MASK;
arch/arm64/kernel/signal.c
254
¤t->thread.uw.fpsimd_state;
arch/arm64/kernel/signal.c
299
current->thread.svcr &= ~SVCR_SM_MASK;
arch/arm64/kernel/signal.c
300
current->thread.fp_type = FP_STATE_FPSIMD;
arch/arm64/kernel/signal.c
313
__put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
arch/arm64/kernel/signal.c
328
current->thread.uw.fpmr = fpmr;
arch/arm64/kernel/signal.c
371
if (thread_sm_enabled(¤t->thread)) {
arch/arm64/kernel/signal.c
375
} else if (current->thread.fp_type == FP_STATE_SVE) {
arch/arm64/kernel/signal.c
391
current->thread.sve_state,
arch/arm64/kernel/signal.c
454
if (!current->thread.sme_state)
arch/arm64/kernel/signal.c
459
if (!current->thread.sve_state) {
arch/arm64/kernel/signal.c
465
current->thread.svcr |= SVCR_SM_MASK;
arch/arm64/kernel/signal.c
468
current->thread.svcr &= ~SVCR_SM_MASK;
arch/arm64/kernel/signal.c
472
current->thread.fp_type = FP_STATE_SVE;
arch/arm64/kernel/signal.c
474
err = __copy_from_user(current->thread.sve_state,
arch/arm64/kernel/signal.c
540
if (thread_za_enabled(¤t->thread))
arch/arm64/kernel/signal.c
556
current->thread.sme_state,
arch/arm64/kernel/signal.c
580
current->thread.svcr &= ~SVCR_ZA_MASK;
arch/arm64/kernel/signal.c
590
if (!current->thread.sve_state)
arch/arm64/kernel/signal.c
594
if (!current->thread.sme_state) {
arch/arm64/kernel/signal.c
595
current->thread.svcr &= ~SVCR_ZA_MASK;
arch/arm64/kernel/signal.c
600
err = __copy_from_user(current->thread.sme_state,
arch/arm64/kernel/signal.c
608
current->thread.svcr |= SVCR_ZA_MASK;
arch/arm64/kernel/signal.c
618
if (WARN_ON(!thread_za_enabled(¤t->thread)))
arch/arm64/kernel/signal.c
631
thread_zt_state(¤t->thread),
arch/arm64/kernel/signal.c
643
if (!thread_za_enabled(¤t->thread))
arch/arm64/kernel/signal.c
655
err = __copy_from_user(thread_zt_state(¤t->thread),
arch/arm64/kernel/signal.c
698
__put_user_error(current->thread.gcs_el0_mode,
arch/arm64/kernel/signal.c
734
current->thread.gcs_el0_mode = enabled;
arch/arm64/kernel/signal32.c
180
current->thread.uw.fpsimd_state = fpsimd;
arch/arm64/kernel/signal32.c
387
__put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
arch/arm64/kernel/signal32.c
389
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
arch/arm64/kernel/signal32.c
95
¤t->thread.uw.fpsimd_state;
arch/arm64/kernel/sys_compat.c
91
current->thread.uw.tp_value = regs->regs[0];
arch/arm64/kernel/traps.c
242
unsigned long esr = tsk->thread.fault_code;
arch/arm64/kernel/traps.c
297
current->thread.fault_address = 0;
arch/arm64/kernel/traps.c
298
current->thread.fault_code = err;
arch/arm64/kernel/traps.c
893
current->thread.fault_address = 0;
arch/arm64/kernel/traps.c
894
current->thread.fault_code = esr;
arch/arm64/mm/fault.c
414
current->thread.fault_address = address;
arch/arm64/mm/fault.c
428
if (!is_ttbr0_addr(current->thread.fault_address)) {
arch/arm64/mm/fault.c
466
current->thread.fault_code = esr;
arch/arm64/mm/gcs.c
143
if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)
arch/arm64/mm/gcs.c
146
if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE)
arch/arm64/mm/gcs.c
149
if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH)
arch/arm64/mm/gcs.c
163
if (task->thread.gcs_base)
arch/arm64/mm/gcs.c
164
vm_munmap(task->thread.gcs_base, task->thread.gcs_size);
arch/arm64/mm/gcs.c
166
task->thread.gcspr_el0 = 0;
arch/arm64/mm/gcs.c
167
task->thread.gcs_base = 0;
arch/arm64/mm/gcs.c
168
task->thread.gcs_size = 0;
arch/arm64/mm/gcs.c
194
if (task->thread.gcs_base || task->thread.gcspr_el0)
arch/arm64/mm/gcs.c
205
task->thread.gcspr_el0 = gcs + size - sizeof(u64);
arch/arm64/mm/gcs.c
206
task->thread.gcs_base = gcs;
arch/arm64/mm/gcs.c
207
task->thread.gcs_size = size;
arch/arm64/mm/gcs.c
209
write_sysreg_s(task->thread.gcspr_el0,
arch/arm64/mm/gcs.c
213
task->thread.gcs_el0_mode = arg;
arch/arm64/mm/gcs.c
229
return put_user(task->thread.gcs_el0_mode, arg);
arch/arm64/mm/gcs.c
245
task->thread.gcs_el0_locked |= arg;
arch/arm64/mm/gcs.c
53
tsk->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
arch/arm64/mm/gcs.c
64
tsk->thread.gcs_base = addr;
arch/arm64/mm/gcs.c
65
tsk->thread.gcs_size = size;
arch/arm64/mm/gcs.c
66
tsk->thread.gcspr_el0 = addr + size - sizeof(u64);
arch/csky/include/asm/switch_to.h
12
save_to_user_fp(&prev->thread.user_fp);
arch/csky/include/asm/switch_to.h
13
restore_from_user_fp(&next->thread.user_fp);
arch/csky/include/asm/thread_info.h
37
((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8))
arch/csky/include/asm/thread_info.h
40
((unsigned long)(tsk->thread.sp))
arch/csky/include/asm/thread_info.h
43
((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15))
arch/csky/kernel/asm-offsets.c
16
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
arch/csky/kernel/probes/uprobes.c
105
current->thread.trap_no = utask->autask.saved_trap_no;
arch/csky/kernel/probes/uprobes.c
52
utask->autask.saved_trap_no = current->thread.trap_no;
arch/csky/kernel/probes/uprobes.c
53
current->thread.trap_no = UPROBE_TRAP_NR;
arch/csky/kernel/probes/uprobes.c
66
WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
arch/csky/kernel/probes/uprobes.c
67
current->thread.trap_no = utask->autask.saved_trap_no;
arch/csky/kernel/probes/uprobes.c
78
if (t->thread.trap_no != UPROBE_TRAP_NR)
arch/csky/kernel/process.c
42
save_to_user_fp(&p->thread.user_fp);
arch/csky/kernel/process.c
49
p->thread.sp = (unsigned long)childstack;
arch/csky/kernel/process.c
75
memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu));
arch/csky/kernel/ptrace.c
117
struct user_fp *regs = (struct user_fp *)&target->thread.user_fp;
arch/csky/kernel/ptrace.c
143
struct user_fp *regs = (struct user_fp *)&target->thread.user_fp;
arch/csky/kernel/traps.c
135
current->thread.trap_no = trap_no(regs);
arch/csky/kernel/traps.c
163
current->thread.trap_no = trap_no(regs);
arch/csky/kernel/traps.c
190
current->thread.trap_no = trap_no(regs);
arch/csky/mm/fault.c
202
if (kprobe_page_fault(regs, tsk->thread.trap_no))
arch/csky/mm/fault.c
56
current->thread.trap_no = trap_no(regs);
arch/csky/mm/fault.c
75
current->thread.trap_no = trap_no(regs);
arch/hexagon/kernel/asm-offsets.c
86
OFFSET(_TASK_STRUCT_THREAD, task_struct, thread);
arch/hexagon/kernel/process.c
134
fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
arch/hexagon/kernel/process.c
77
p->thread.switch_sp = ss;
arch/hexagon/kernel/smp.c
176
struct thread_info *thread = (struct thread_info *)idle->stack;
arch/hexagon/kernel/smp.c
179
thread->cpu = cpu;
arch/hexagon/kernel/smp.c
182
stack_start = ((void *) thread) + THREAD_SIZE;
arch/hexagon/kernel/traps.c
101
task->thread.switch_sp)->fp;
arch/loongarch/include/asm/asmmacro.h
108
.macro fpu_save_cc thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
129
.macro fpu_restore_cc thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
150
.macro fpu_save_cc thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
170
.macro fpu_restore_cc thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
191
.macro fpu_save_double thread tmp
arch/loongarch/include/asm/asmmacro.h
20
.macro cpu_save_nonscratch thread
arch/loongarch/include/asm/asmmacro.h
228
.macro fpu_restore_double thread tmp
arch/loongarch/include/asm/asmmacro.h
265
.macro lsx_save_data thread tmp
arch/loongarch/include/asm/asmmacro.h
302
.macro lsx_restore_data thread tmp
arch/loongarch/include/asm/asmmacro.h
339
.macro lsx_save_all thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
345
.macro lsx_restore_all thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
35
.macro cpu_restore_nonscratch thread
arch/loongarch/include/asm/asmmacro.h
356
.macro lsx_save_all_upper thread base tmp
arch/loongarch/include/asm/asmmacro.h
398
.macro lsx_restore_all_upper thread base tmp
arch/loongarch/include/asm/asmmacro.h
475
.macro lasx_save_data thread tmp
arch/loongarch/include/asm/asmmacro.h
50
.macro fpu_save_csr thread tmp
arch/loongarch/include/asm/asmmacro.h
512
.macro lasx_restore_data thread tmp
arch/loongarch/include/asm/asmmacro.h
549
.macro lasx_save_all thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
555
.macro lasx_restore_all thread tmp0 tmp1
arch/loongarch/include/asm/asmmacro.h
565
.macro lasx_save_all_upper thread base tmp
arch/loongarch/include/asm/asmmacro.h
574
.macro lasx_restore_all_upper thread base tmp
arch/loongarch/include/asm/asmmacro.h
70
.macro fpu_restore_csr thread tmp0 tmp1
arch/loongarch/include/asm/elf.h
273
current->thread.vdso = &vdso_info; \
arch/loongarch/include/asm/elf.h
290
current->thread.vdso = &vdso_info; \
arch/loongarch/include/asm/fpu.h
121
_restore_fp(¤t->thread.fpu);
arch/loongarch/include/asm/fpu.h
137
_save_fp(&tsk->thread.fpu);
arch/loongarch/include/asm/fpu.h
165
unsigned int fcsr = current->thread.fpu.fcsr;
arch/loongarch/include/asm/fpu.h
175
_save_fp(&tsk->thread.fpu);
arch/loongarch/include/asm/fpu.h
181
_restore_fp(&tsk->thread.fpu);
arch/loongarch/include/asm/fpu.h
195
_save_lasx(¤t->thread.fpu);
arch/loongarch/include/asm/fpu.h
200
_save_lsx(¤t->thread.fpu);
arch/loongarch/include/asm/fpu.h
204
_save_fp(¤t->thread.fpu);
arch/loongarch/include/asm/fpu.h
232
_save_lsx(&t->thread.fpu);
arch/loongarch/include/asm/fpu.h
238
_restore_lsx(&t->thread.fpu);
arch/loongarch/include/asm/fpu.h
250
_restore_lsx_upper(&t->thread.fpu);
arch/loongarch/include/asm/fpu.h
280
_save_lasx(&t->thread.fpu);
arch/loongarch/include/asm/fpu.h
286
_restore_lasx(&t->thread.fpu);
arch/loongarch/include/asm/fpu.h
298
_restore_lasx_upper(&t->thread.fpu);
arch/loongarch/include/asm/lbt.h
63
_restore_lbt(¤t->thread.lbt);
arch/loongarch/include/asm/lbt.h
78
_save_lbt(&tsk->thread.lbt);
arch/loongarch/include/asm/processor.h
145
#define thread_saved_ra(tsk) (tsk->thread.sched_ra)
arch/loongarch/include/asm/processor.h
146
#define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
arch/loongarch/kernel/asm-offsets.c
103
OFFSET(THREAD_REG01, task_struct, thread.reg01);
arch/loongarch/kernel/asm-offsets.c
104
OFFSET(THREAD_REG03, task_struct, thread.reg03);
arch/loongarch/kernel/asm-offsets.c
105
OFFSET(THREAD_REG22, task_struct, thread.reg22);
arch/loongarch/kernel/asm-offsets.c
106
OFFSET(THREAD_REG23, task_struct, thread.reg23);
arch/loongarch/kernel/asm-offsets.c
107
OFFSET(THREAD_REG24, task_struct, thread.reg24);
arch/loongarch/kernel/asm-offsets.c
108
OFFSET(THREAD_REG25, task_struct, thread.reg25);
arch/loongarch/kernel/asm-offsets.c
109
OFFSET(THREAD_REG26, task_struct, thread.reg26);
arch/loongarch/kernel/asm-offsets.c
110
OFFSET(THREAD_REG27, task_struct, thread.reg27);
arch/loongarch/kernel/asm-offsets.c
111
OFFSET(THREAD_REG28, task_struct, thread.reg28);
arch/loongarch/kernel/asm-offsets.c
112
OFFSET(THREAD_REG29, task_struct, thread.reg29);
arch/loongarch/kernel/asm-offsets.c
113
OFFSET(THREAD_REG30, task_struct, thread.reg30);
arch/loongarch/kernel/asm-offsets.c
114
OFFSET(THREAD_REG31, task_struct, thread.reg31);
arch/loongarch/kernel/asm-offsets.c
115
OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
arch/loongarch/kernel/asm-offsets.c
116
OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
arch/loongarch/kernel/asm-offsets.c
118
thread.csr_crmd);
arch/loongarch/kernel/asm-offsets.c
120
thread.csr_prmd);
arch/loongarch/kernel/asm-offsets.c
122
thread.csr_euen);
arch/loongarch/kernel/asm-offsets.c
124
thread.csr_ecfg);
arch/loongarch/kernel/asm-offsets.c
126
OFFSET(THREAD_FPU, task_struct, thread.fpu);
arch/loongarch/kernel/asm-offsets.c
129
thread.csr_badvaddr);
arch/loongarch/kernel/asm-offsets.c
131
thread.error_code);
arch/loongarch/kernel/asm-offsets.c
132
OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
arch/loongarch/kernel/hw_breakpoint.c
159
memset(tsk->thread.hbp_break, 0, sizeof(tsk->thread.hbp_break));
arch/loongarch/kernel/hw_breakpoint.c
160
memset(tsk->thread.hbp_watch, 0, sizeof(tsk->thread.hbp_watch));
arch/loongarch/kernel/hw_breakpoint.c
169
struct thread_struct *t = &tsk->thread;
arch/loongarch/kernel/kfpu.c
54
_save_lasx(¤t->thread.fpu);
arch/loongarch/kernel/kfpu.c
59
_save_lsx(¤t->thread.fpu);
arch/loongarch/kernel/kfpu.c
63
_save_fp(¤t->thread.fpu);
arch/loongarch/kernel/kfpu.c
79
_restore_lasx(¤t->thread.fpu);
arch/loongarch/kernel/kfpu.c
84
_restore_lsx(¤t->thread.fpu);
arch/loongarch/kernel/kfpu.c
88
_restore_fp(¤t->thread.fpu);
arch/loongarch/kernel/kgdb.c
135
memcpy(mem, (void *)¤t->thread.fpu.fcsr, reg_size);
arch/loongarch/kernel/kgdb.c
138
memcpy(mem, (void *)¤t->thread.fpu.fcc + reg_offset, reg_size);
arch/loongarch/kernel/kgdb.c
141
memcpy(mem, (void *)¤t->thread.fpu.fpr[reg_offset], reg_size);
arch/loongarch/kernel/kgdb.c
176
memcpy((void *)¤t->thread.fpu.fcsr, mem, reg_size);
arch/loongarch/kernel/kgdb.c
179
memcpy((void *)¤t->thread.fpu.fcc + reg_offset, mem, reg_size);
arch/loongarch/kernel/kgdb.c
182
memcpy((void *)¤t->thread.fpu.fpr[reg_offset], mem, reg_size);
arch/loongarch/kernel/kgdb.c
202
gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
arch/loongarch/kernel/kgdb.c
204
gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
arch/loongarch/kernel/kgdb.c
207
gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
arch/loongarch/kernel/kgdb.c
208
gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
arch/loongarch/kernel/kgdb.c
209
gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
arch/loongarch/kernel/kgdb.c
210
gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
arch/loongarch/kernel/kgdb.c
211
gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
arch/loongarch/kernel/kgdb.c
212
gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
arch/loongarch/kernel/kgdb.c
213
gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
arch/loongarch/kernel/kgdb.c
214
gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
arch/loongarch/kernel/kgdb.c
215
gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
arch/loongarch/kernel/kgdb.c
220
gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
arch/loongarch/kernel/process.c
139
memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
arch/loongarch/kernel/process.c
141
memcpy(dst, src, offsetof(struct task_struct, thread.lbt.scr0));
arch/loongarch/kernel/process.c
144
memcpy(&dst->thread.lbt, &src->thread.lbt, sizeof(struct loongarch_lbt));
arch/loongarch/kernel/process.c
184
p->thread.sched_cfa = 0;
arch/loongarch/kernel/process.c
185
p->thread.csr_euen = 0;
arch/loongarch/kernel/process.c
186
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
arch/loongarch/kernel/process.c
187
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
arch/loongarch/kernel/process.c
188
p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
arch/loongarch/kernel/process.c
191
p->thread.reg03 = childksp;
arch/loongarch/kernel/process.c
192
p->thread.reg23 = (unsigned long)args->fn;
arch/loongarch/kernel/process.c
193
p->thread.reg24 = (unsigned long)args->fn_arg;
arch/loongarch/kernel/process.c
194
p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm;
arch/loongarch/kernel/process.c
195
p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm;
arch/loongarch/kernel/process.c
197
childregs->csr_euen = p->thread.csr_euen;
arch/loongarch/kernel/process.c
198
childregs->csr_crmd = p->thread.csr_crmd;
arch/loongarch/kernel/process.c
199
childregs->csr_prmd = p->thread.csr_prmd;
arch/loongarch/kernel/process.c
200
childregs->csr_ecfg = p->thread.csr_ecfg;
arch/loongarch/kernel/process.c
210
p->thread.reg03 = (unsigned long) childregs;
arch/loongarch/kernel/process.c
211
p->thread.reg01 = (unsigned long) ret_from_fork_asm;
arch/loongarch/kernel/process.c
212
p->thread.sched_ra = (unsigned long) ret_from_fork_asm;
arch/loongarch/kernel/process.c
322
if (current->thread.vdso) {
arch/loongarch/kernel/process.c
324
top -= PAGE_ALIGN(current->thread.vdso->size);
arch/loongarch/kernel/process.c
92
current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
arch/loongarch/kernel/ptrace.c
1045
struct thread_struct *thread = &tsk->thread;
arch/loongarch/kernel/ptrace.c
1047
bp = thread->hbp_break[0];
arch/loongarch/kernel/ptrace.c
1060
thread->hbp_break[0] = bp;
arch/loongarch/kernel/ptrace.c
1087
task->thread.single_step = task_pt_regs(task)->csr_era;
arch/loongarch/kernel/ptrace.c
123
return membuf_write(to, &target->thread.fpu.fpr,
arch/loongarch/kernel/ptrace.c
134
fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
arch/loongarch/kernel/ptrace.c
153
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
arch/loongarch/kernel/ptrace.c
158
r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
arch/loongarch/kernel/ptrace.c
159
r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
arch/loongarch/kernel/ptrace.c
169
&target->thread.fpu.fpr,
arch/loongarch/kernel/ptrace.c
187
set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
arch/loongarch/kernel/ptrace.c
212
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
arch/loongarch/kernel/ptrace.c
220
&target->thread.fpu.fcc, fcc_start,
arch/loongarch/kernel/ptrace.c
223
&target->thread.fpu.fcsr, fcsr_start,
arch/loongarch/kernel/ptrace.c
271
membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
arch/loongarch/kernel/ptrace.c
297
} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
arch/loongarch/kernel/ptrace.c
299
membuf_write(&to, &target->thread.fpu.fpr, wr_size);
arch/loongarch/kernel/ptrace.c
302
copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
arch/loongarch/kernel/ptrace.c
319
if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
arch/loongarch/kernel/ptrace.c
322
&target->thread.fpu.fpr,
arch/loongarch/kernel/ptrace.c
327
sizeof(target->thread.fpu.fpr[0]));
arch/loongarch/kernel/ptrace.c
332
&target->thread.fpu.fpr[i],
arch/loongarch/kernel/ptrace.c
349
r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
arch/loongarch/kernel/ptrace.c
350
r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
arch/loongarch/kernel/ptrace.c
351
r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
arch/loongarch/kernel/ptrace.c
352
r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
arch/loongarch/kernel/ptrace.c
353
r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
arch/loongarch/kernel/ptrace.c
354
r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
arch/loongarch/kernel/ptrace.c
365
const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
arch/loongarch/kernel/ptrace.c
369
&target->thread.lbt.scr0,
arch/loongarch/kernel/ptrace.c
370
0, 4 * sizeof(target->thread.lbt.scr0));
arch/loongarch/kernel/ptrace.c
372
&target->thread.lbt.eflags,
arch/loongarch/kernel/ptrace.c
375
&target->thread.fpu.ftop,
arch/loongarch/kernel/ptrace.c
395
if (current->thread.hbp_break[i] == bp)
arch/loongarch/kernel/ptrace.c
399
if (current->thread.hbp_watch[i] == bp)
arch/loongarch/kernel/ptrace.c
416
bp = tsk->thread.hbp_break[idx];
arch/loongarch/kernel/ptrace.c
422
bp = tsk->thread.hbp_watch[idx];
arch/loongarch/kernel/ptrace.c
439
tsk->thread.hbp_break[idx] = bp;
arch/loongarch/kernel/ptrace.c
445
tsk->thread.hbp_watch[idx] = bp;
arch/loongarch/kernel/ptrace.c
57
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
arch/loongarch/kernel/signal.c
1012
ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
arch/loongarch/kernel/signal.c
109
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
arch/loongarch/kernel/signal.c
111
err |= __get_user(current->thread.fpu.fcc, fcc);
arch/loongarch/kernel/signal.c
112
err |= __get_user(current->thread.fpu.fcsr, fcsr);
arch/loongarch/kernel/signal.c
126
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
arch/loongarch/kernel/signal.c
128
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
arch/loongarch/kernel/signal.c
131
err |= __put_user(current->thread.fpu.fcc, fcc);
arch/loongarch/kernel/signal.c
132
err |= __put_user(current->thread.fpu.fcsr, fcsr);
arch/loongarch/kernel/signal.c
148
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
arch/loongarch/kernel/signal.c
150
set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
arch/loongarch/kernel/signal.c
152
err |= __get_user(current->thread.fpu.fcc, fcc);
arch/loongarch/kernel/signal.c
153
err |= __get_user(current->thread.fpu.fcsr, fcsr);
arch/loongarch/kernel/signal.c
167
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
arch/loongarch/kernel/signal.c
169
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
arch/loongarch/kernel/signal.c
171
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 2),
arch/loongarch/kernel/signal.c
173
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 3),
arch/loongarch/kernel/signal.c
176
err |= __put_user(current->thread.fpu.fcc, fcc);
arch/loongarch/kernel/signal.c
177
err |= __put_user(current->thread.fpu.fcsr, fcsr);
arch/loongarch/kernel/signal.c
193
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
arch/loongarch/kernel/signal.c
195
set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
arch/loongarch/kernel/signal.c
197
set_fpr64(¤t->thread.fpu.fpr[i], 2, fpr_val);
arch/loongarch/kernel/signal.c
199
set_fpr64(¤t->thread.fpu.fpr[i], 3, fpr_val);
arch/loongarch/kernel/signal.c
201
err |= __get_user(current->thread.fpu.fcc, fcc);
arch/loongarch/kernel/signal.c
202
err |= __get_user(current->thread.fpu.fcsr, fcsr);
arch/loongarch/kernel/signal.c
214
err |= __put_user(current->thread.lbt.scr0, ®s[0]);
arch/loongarch/kernel/signal.c
215
err |= __put_user(current->thread.lbt.scr1, ®s[1]);
arch/loongarch/kernel/signal.c
216
err |= __put_user(current->thread.lbt.scr2, ®s[2]);
arch/loongarch/kernel/signal.c
217
err |= __put_user(current->thread.lbt.scr3, ®s[3]);
arch/loongarch/kernel/signal.c
218
err |= __put_user(current->thread.lbt.eflags, eflags);
arch/loongarch/kernel/signal.c
229
err |= __get_user(current->thread.lbt.scr0, ®s[0]);
arch/loongarch/kernel/signal.c
230
err |= __get_user(current->thread.lbt.scr1, ®s[1]);
arch/loongarch/kernel/signal.c
231
err |= __get_user(current->thread.lbt.scr2, ®s[2]);
arch/loongarch/kernel/signal.c
232
err |= __get_user(current->thread.lbt.scr3, ®s[3]);
arch/loongarch/kernel/signal.c
233
err |= __get_user(current->thread.lbt.eflags, eflags);
arch/loongarch/kernel/signal.c
242
return __put_user(current->thread.fpu.ftop, ftop);
arch/loongarch/kernel/signal.c
249
return __get_user(current->thread.fpu.ftop, ftop);
arch/loongarch/kernel/signal.c
807
switch (current->thread.error_code) {
arch/loongarch/kernel/signal.c
89
__put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
arch/loongarch/kernel/signal.c
92
err |= __put_user(current->thread.fpu.fcc, fcc);
arch/loongarch/kernel/signal.c
93
err |= __put_user(current->thread.fpu.fcsr, fcsr);
arch/loongarch/kernel/stacktrace.c
58
regs->regs[22] = task->thread.reg22;
arch/loongarch/kernel/traps.c
147
regs.csr_era = task->thread.reg01;
arch/loongarch/kernel/traps.c
149
regs.regs[3] = task->thread.reg03;
arch/loongarch/kernel/traps.c
150
regs.regs[22] = task->thread.reg22;
arch/loongarch/kernel/traps.c
408
current->thread.trap_nr, SIGSEGV);
arch/loongarch/kernel/traps.c
514
if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
arch/loongarch/kernel/traps.c
640
current->thread.trap_nr = read_csr_excode();
arch/loongarch/kernel/traps.c
765
current->thread.trap_nr = read_csr_excode();
arch/loongarch/kernel/traps.c
767
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
arch/loongarch/kernel/traps.c
829
if (pc == current->thread.single_step) {
arch/loongarch/kernel/traps.c
864
current->thread.trap_nr = read_csr_excode();
arch/loongarch/kernel/traps.c
866
if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
arch/loongarch/kernel/traps.c
874
current->thread.error_code = 1;
arch/loongarch/kernel/unaligned.c
454
set_fpr64(¤t->thread.fpu.fpr[insn.reg3_format.rd], 0, value);
arch/loongarch/kernel/unaligned.c
464
value = get_fpr64(¤t->thread.fpu.fpr[insn.reg3_format.rd], 0);
arch/loongarch/kernel/uprobes.c
42
utask->autask.saved_trap_nr = current->thread.trap_nr;
arch/loongarch/kernel/uprobes.c
43
current->thread.trap_nr = UPROBE_TRAP_NR;
arch/loongarch/kernel/uprobes.c
53
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
arch/loongarch/kernel/uprobes.c
54
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/loongarch/kernel/uprobes.c
64
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/loongarch/kernel/uprobes.c
70
if (t->thread.trap_nr != UPROBE_TRAP_NR)
arch/loongarch/kernel/vdso.c
86
struct loongarch_vdso_info *info = current->thread.vdso;
arch/loongarch/mm/fault.c
127
current->thread.csr_badvaddr = address;
arch/loongarch/mm/fault.c
128
current->thread.trap_nr = read_csr_excode();
arch/loongarch/mm/fault.c
145
current->thread.csr_badvaddr = address;
arch/loongarch/mm/fault.c
147
current->thread.error_code = 1;
arch/loongarch/mm/fault.c
149
current->thread.error_code = 2;
arch/loongarch/mm/fault.c
150
current->thread.trap_nr = read_csr_excode();
arch/loongarch/mm/fault.c
185
if (kprobe_page_fault(regs, current->thread.trap_nr))
arch/m68k/include/asm/math-emu.h
128
#define FPDATA ((struct fp_data *)current->thread.fp)
arch/m68k/include/asm/mmu_context.h
106
mmuar = task->thread.ksp;
arch/m68k/include/asm/processor.h
169
if ((tsk)->thread.esp0 > PAGE_SIZE && \
arch/m68k/include/asm/processor.h
170
(virt_addr_valid((tsk)->thread.esp0))) \
arch/m68k/include/asm/processor.h
171
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
arch/m68k/include/asm/processor.h
173
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
arch/m68k/include/asm/processor.h
175
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
arch/m68k/kernel/asm-offsets.c
27
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
arch/m68k/kernel/process.c
154
p->thread.ksp = (unsigned long)frame;
arch/m68k/kernel/process.c
155
p->thread.esp0 = (unsigned long)&frame->regs;
arch/m68k/kernel/process.c
161
p->thread.fc = USER_DATA;
arch/m68k/kernel/process.c
170
p->thread.usp = 0;
arch/m68k/kernel/process.c
177
p->thread.usp = usp ?: rdusp();
arch/m68k/kernel/process.c
185
asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
arch/m68k/kernel/process.c
187
if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
arch/m68k/kernel/process.c
194
: "m" (p->thread.fp[0]),
arch/m68k/kernel/process.c
195
"m" (p->thread.fpcntl[0]),
arch/m68k/kernel/process.c
196
"m" (p->thread.fpcntl[1]),
arch/m68k/kernel/process.c
197
"m" (p->thread.fpcntl[2])
arch/m68k/kernel/process.c
203
: "m" (p->thread.fp[0]),
arch/m68k/kernel/process.c
204
"m" (p->thread.fpcntl[0])
arch/m68k/kernel/process.c
210
asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
arch/m68k/kernel/process.c
223
memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
arch/m68k/kernel/process.c
224
memcpy(fpu->fpregs, current->thread.fp, 96);
arch/m68k/kernel/process.c
275
fp = ((struct switch_stack *)p->thread.ksp)->a6;
arch/m68k/kernel/process.c
96
current->thread.fc = USER_DATA;
arch/m68k/kernel/ptrace.c
107
addr = &task->thread.usp;
arch/m68k/kernel/ptrace.c
109
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
arch/m68k/kernel/ptrace.c
114
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
arch/m68k/kernel/ptrace.c
181
tmp = child->thread.fp[regno - 21];
arch/m68k/kernel/ptrace.c
222
child->thread.fp[regno - 21] = data;
arch/m68k/kernel/ptrace.c
252
if (copy_to_user(datap, &child->thread.fp,
arch/m68k/kernel/ptrace.c
258
if (copy_from_user(&child->thread.fp, datap,
arch/m68k/kernel/ptrace.c
82
addr = &task->thread.usp;
arch/m68k/kernel/ptrace.c
84
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
arch/m68k/kernel/ptrace.c
89
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
arch/m68k/kernel/signal.c
1097
current->thread.esp0 = (unsigned long) regs;
arch/m68k/kernel/signal.c
263
memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
arch/m68k/kernel/signal.c
264
memcpy(current->thread.fp, sc->sc_fpregs, 24);
arch/m68k/kernel/signal.c
342
if (__copy_from_user(current->thread.fpcntl,
arch/m68k/kernel/signal.c
346
if (__copy_from_user(current->thread.fp,
arch/m68k/kernel/signal.c
435
memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
arch/m68k/kernel/signal.c
436
memcpy(sc->sc_fpregs, current->thread.fp, 24);
arch/m68k/kernel/signal.c
493
current->thread.fpcntl, 12);
arch/m68k/kernel/signal.c
496
current->thread.fp, 96);
arch/m68k/kernel/signal.c
668
current->thread.esp0 = (unsigned long)&new->ptregs;
arch/m68k/kernel/traps.c
1141
current->thread.esp0 = ssp;
arch/m68k/kernel/traps.c
234
if (wba != current->thread.faddr)
arch/m68k/kernel/traps.c
352
current->thread.signo = SIGBUS;
arch/m68k/kernel/traps.c
353
current->thread.faddr = fp->un.fmt7.faddr;
arch/m68k/kernel/traps.c
757
current->thread.esp0 = (unsigned long) fp;
arch/m68k/kernel/traps.c
940
stack = (unsigned long *)task->thread.esp0;
arch/m68k/mm/fault.c
191
current->thread.signo = SIGBUS;
arch/m68k/mm/fault.c
192
current->thread.faddr = address;
arch/m68k/mm/fault.c
196
current->thread.signo = SIGBUS;
arch/m68k/mm/fault.c
197
current->thread.code = BUS_ADRERR;
arch/m68k/mm/fault.c
198
current->thread.faddr = address;
arch/m68k/mm/fault.c
204
current->thread.signo = SIGSEGV;
arch/m68k/mm/fault.c
205
current->thread.code = SEGV_MAPERR;
arch/m68k/mm/fault.c
206
current->thread.faddr = address;
arch/m68k/mm/fault.c
210
current->thread.signo = SIGSEGV;
arch/m68k/mm/fault.c
211
current->thread.code = SEGV_ACCERR;
arch/m68k/mm/fault.c
212
current->thread.faddr = address;
arch/m68k/mm/fault.c
29
signo = current->thread.signo;
arch/m68k/mm/fault.c
30
si_code = current->thread.code;
arch/m68k/mm/fault.c
31
addr = (void __user *)current->thread.faddr;
arch/microblaze/include/asm/mmu_context_mm.h
117
tsk->thread.pgdir = next->pgd;
arch/microblaze/include/asm/mmu_context_mm.h
130
current->thread.pgdir = mm->pgd;
arch/microblaze/kernel/asm-offsets.c
80
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
arch/microblaze/kernel/kgdb.c
97
unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
arch/microblaze/mm/fault.c
175
struct pt_regs *uregs = current->thread.regs;
arch/mips/cavium-octeon/cpu.c
31
prefetch(¤t->thread.cp2);
arch/mips/cavium-octeon/cpu.c
36
octeon_cop2_restore(&(current->thread.cp2));
arch/mips/cavium-octeon/octeon-crypto.c
36
octeon_cop2_save(&(current->thread.cp2));
arch/mips/include/asm/asmmacro-32.h
16
.macro fpu_save_single thread tmp=t0
arch/mips/include/asm/asmmacro-32.h
40
.macro fpu_restore_single thread tmp=t0
arch/mips/include/asm/asmmacro-32.h
64
.macro cpu_save_nonscratch thread
arch/mips/include/asm/asmmacro-32.h
77
.macro cpu_restore_nonscratch thread
arch/mips/include/asm/asmmacro-64.h
17
.macro cpu_save_nonscratch thread
arch/mips/include/asm/asmmacro-64.h
30
.macro cpu_restore_nonscratch thread
arch/mips/include/asm/asmmacro.h
108
.macro fpu_save_16odd thread
arch/mips/include/asm/asmmacro.h
132
.macro fpu_save_double thread status tmp
arch/mips/include/asm/asmmacro.h
143
.macro fpu_restore_16even thread tmp=t0
arch/mips/include/asm/asmmacro.h
167
.macro fpu_restore_16odd thread
arch/mips/include/asm/asmmacro.h
191
.macro fpu_restore_double thread status tmp
arch/mips/include/asm/asmmacro.h
524
.macro msa_save_all thread
arch/mips/include/asm/asmmacro.h
568
.macro msa_restore_all thread
arch/mips/include/asm/asmmacro.h
84
.macro fpu_save_16even thread tmp=t0
arch/mips/include/asm/cop2.h
19
#define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2)
arch/mips/include/asm/cop2.h
20
#define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2)
arch/mips/include/asm/dsp.h
41
tsk->thread.dsp.dspr[0] = mfhi1(); \
arch/mips/include/asm/dsp.h
42
tsk->thread.dsp.dspr[1] = mflo1(); \
arch/mips/include/asm/dsp.h
43
tsk->thread.dsp.dspr[2] = mfhi2(); \
arch/mips/include/asm/dsp.h
44
tsk->thread.dsp.dspr[3] = mflo2(); \
arch/mips/include/asm/dsp.h
45
tsk->thread.dsp.dspr[4] = mfhi3(); \
arch/mips/include/asm/dsp.h
46
tsk->thread.dsp.dspr[5] = mflo3(); \
arch/mips/include/asm/dsp.h
47
tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK); \
arch/mips/include/asm/dsp.h
58
mthi1(tsk->thread.dsp.dspr[0]); \
arch/mips/include/asm/dsp.h
59
mtlo1(tsk->thread.dsp.dspr[1]); \
arch/mips/include/asm/dsp.h
60
mthi2(tsk->thread.dsp.dspr[2]); \
arch/mips/include/asm/dsp.h
61
mtlo2(tsk->thread.dsp.dspr[3]); \
arch/mips/include/asm/dsp.h
62
mthi3(tsk->thread.dsp.dspr[4]); \
arch/mips/include/asm/dsp.h
63
mtlo3(tsk->thread.dsp.dspr[5]); \
arch/mips/include/asm/dsp.h
64
wrdsp(tsk->thread.dsp.dspcontrol, DSP_MASK); \
arch/mips/include/asm/dsp.h
78
tsk->thread.dsp.dspr; \
arch/mips/include/asm/elf.h
336
current->thread.abi = &mips_abi; \
arch/mips/include/asm/elf.h
354
current->thread.abi = &mips_abi_n32; \
arch/mips/include/asm/elf.h
369
current->thread.abi = &mips_abi_32; \
arch/mips/include/asm/elf.h
404
current->thread.abi = &mips_abi; \
arch/mips/include/asm/fpu.h
132
if (current->thread.fpu.fcr31 & FPU_CSR_NAN2008) {
arch/mips/include/asm/fpu.h
184
tsk->thread.fpu.fcr31 =
arch/mips/include/asm/fpu.h
228
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
arch/mips/include/asm/fpu.h
263
return tsk->thread.fpu.fpr;
arch/mips/include/asm/switch_to.h
60
prev->cpus_mask = prev->thread.user_cpus_allowed; \
arch/mips/include/asm/switch_to.h
62
next->thread.emulated_fp = 0; \
arch/mips/include/asm/switch_to.h
90
unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
arch/mips/include/asm/switch_to.h
95
next->thread.fpu.fcr31 &= ~fcr31; \
arch/mips/kernel/asm-offsets.c
124
OFFSET(THREAD_REG16, task_struct, thread.reg16);
arch/mips/kernel/asm-offsets.c
125
OFFSET(THREAD_REG17, task_struct, thread.reg17);
arch/mips/kernel/asm-offsets.c
126
OFFSET(THREAD_REG18, task_struct, thread.reg18);
arch/mips/kernel/asm-offsets.c
127
OFFSET(THREAD_REG19, task_struct, thread.reg19);
arch/mips/kernel/asm-offsets.c
128
OFFSET(THREAD_REG20, task_struct, thread.reg20);
arch/mips/kernel/asm-offsets.c
129
OFFSET(THREAD_REG21, task_struct, thread.reg21);
arch/mips/kernel/asm-offsets.c
130
OFFSET(THREAD_REG22, task_struct, thread.reg22);
arch/mips/kernel/asm-offsets.c
131
OFFSET(THREAD_REG23, task_struct, thread.reg23);
arch/mips/kernel/asm-offsets.c
132
OFFSET(THREAD_REG29, task_struct, thread.reg29);
arch/mips/kernel/asm-offsets.c
133
OFFSET(THREAD_REG30, task_struct, thread.reg30);
arch/mips/kernel/asm-offsets.c
134
OFFSET(THREAD_REG31, task_struct, thread.reg31);
arch/mips/kernel/asm-offsets.c
136
thread.cp0_status);
arch/mips/kernel/asm-offsets.c
139
thread.cp0_badvaddr);
arch/mips/kernel/asm-offsets.c
141
thread.cp0_baduaddr);
arch/mips/kernel/asm-offsets.c
143
thread.error_code);
arch/mips/kernel/asm-offsets.c
144
OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
arch/mips/kernel/asm-offsets.c
152
OFFSET(THREAD_FPU, task_struct, thread.fpu);
arch/mips/kernel/asm-offsets.c
154
OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
arch/mips/kernel/asm-offsets.c
155
OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
arch/mips/kernel/asm-offsets.c
156
OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
arch/mips/kernel/asm-offsets.c
157
OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
arch/mips/kernel/asm-offsets.c
158
OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
arch/mips/kernel/asm-offsets.c
159
OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
arch/mips/kernel/asm-offsets.c
160
OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
arch/mips/kernel/asm-offsets.c
161
OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
arch/mips/kernel/asm-offsets.c
162
OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
arch/mips/kernel/asm-offsets.c
163
OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
arch/mips/kernel/asm-offsets.c
164
OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
arch/mips/kernel/asm-offsets.c
165
OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
arch/mips/kernel/asm-offsets.c
166
OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
arch/mips/kernel/asm-offsets.c
167
OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
arch/mips/kernel/asm-offsets.c
168
OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
arch/mips/kernel/asm-offsets.c
169
OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
arch/mips/kernel/asm-offsets.c
170
OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
arch/mips/kernel/asm-offsets.c
171
OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
arch/mips/kernel/asm-offsets.c
172
OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
arch/mips/kernel/asm-offsets.c
173
OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
arch/mips/kernel/asm-offsets.c
174
OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
arch/mips/kernel/asm-offsets.c
175
OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
arch/mips/kernel/asm-offsets.c
176
OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
arch/mips/kernel/asm-offsets.c
177
OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
arch/mips/kernel/asm-offsets.c
178
OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
arch/mips/kernel/asm-offsets.c
179
OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
arch/mips/kernel/asm-offsets.c
180
OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
arch/mips/kernel/asm-offsets.c
181
OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
arch/mips/kernel/asm-offsets.c
182
OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
arch/mips/kernel/asm-offsets.c
183
OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
arch/mips/kernel/asm-offsets.c
184
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
arch/mips/kernel/asm-offsets.c
185
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
arch/mips/kernel/asm-offsets.c
187
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
arch/mips/kernel/asm-offsets.c
188
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
arch/mips/kernel/asm-offsets.c
327
OFFSET(THREAD_CP2, task_struct, thread.cp2);
arch/mips/kernel/asm-offsets.c
330
OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
arch/mips/kernel/branch.c
156
fcr31 = current->thread.fpu.fcr31;
arch/mips/kernel/branch.c
696
bit = get_fpr32(¤t->thread.fpu.fpr[reg], 0) & 0x1;
arch/mips/kernel/branch.c
714
fcr31 = current->thread.fpu.fcr31;
arch/mips/kernel/elf.c
318
t->thread.fpu.fcr31 = c->fpu_csr31;
arch/mips/kernel/elf.c
322
t->thread.fpu.fcr31 &= ~FPU_CSR_NAN2008;
arch/mips/kernel/elf.c
324
t->thread.fpu.fcr31 &= ~FPU_CSR_ABS2008;
arch/mips/kernel/elf.c
328
t->thread.fpu.fcr31 |= FPU_CSR_NAN2008;
arch/mips/kernel/elf.c
330
t->thread.fpu.fcr31 |= FPU_CSR_ABS2008;
arch/mips/kernel/kgdb.c
146
memcpy((void *)¤t->thread.fpu.fcr31, mem,
arch/mips/kernel/kgdb.c
154
memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem,
arch/mips/kernel/kgdb.c
181
memcpy(mem, (void *)¤t->thread.fpu.fcr31,
arch/mips/kernel/kgdb.c
190
memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg],
arch/mips/kernel/kgdb.c
238
*(ptr++) = p->thread.reg16;
arch/mips/kernel/kgdb.c
239
*(ptr++) = p->thread.reg17;
arch/mips/kernel/kgdb.c
240
*(ptr++) = p->thread.reg18;
arch/mips/kernel/kgdb.c
241
*(ptr++) = p->thread.reg19;
arch/mips/kernel/kgdb.c
242
*(ptr++) = p->thread.reg20;
arch/mips/kernel/kgdb.c
243
*(ptr++) = p->thread.reg21;
arch/mips/kernel/kgdb.c
244
*(ptr++) = p->thread.reg22;
arch/mips/kernel/kgdb.c
245
*(ptr++) = p->thread.reg23;
arch/mips/kernel/kgdb.c
252
*(ptr++) = p->thread.reg29;
arch/mips/kernel/kgdb.c
253
*(ptr++) = p->thread.reg30;
arch/mips/kernel/kgdb.c
254
*(ptr++) = p->thread.reg31;
arch/mips/kernel/kgdb.c
256
*(ptr++) = p->thread.cp0_status;
arch/mips/kernel/kgdb.c
274
*(ptr++) = p->thread.reg31;
arch/mips/kernel/mips-mt-fpaff.c
116
cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
arch/mips/kernel/mips-mt-fpaff.c
181
cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
arch/mips/kernel/mips-r2-to-r6-emul.c
1178
err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
arch/mips/kernel/mips-r2-to-r6-emul.c
1185
*fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
arch/mips/kernel/mips-r2-to-r6-emul.c
1186
current->thread.fpu.fcr31 &= ~res;
arch/mips/kernel/mips-r2-to-r6-emul.c
1199
current->thread.cp0_baduaddr = (unsigned long)fault_addr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1209
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1282
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1356
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1426
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1501
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1620
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1739
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1857
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1969
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
1974
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2025
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2030
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
204
csr = current->thread.fpu.fcr31;
arch/mips/kernel/mips-r2-to-r6-emul.c
2088
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2093
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2149
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
2154
current->thread.cp0_baduaddr = vaddr;
arch/mips/kernel/mips-r2-to-r6-emul.c
227
csr = current->thread.fpu.fcr31;
arch/mips/kernel/process.c
123
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
arch/mips/kernel/process.c
139
unsigned long status = p->thread.cp0_status;
arch/mips/kernel/process.c
141
p->thread.reg16 = (unsigned long)args->fn;
arch/mips/kernel/process.c
142
p->thread.reg17 = (unsigned long)args->fn_arg;
arch/mips/kernel/process.c
143
p->thread.reg29 = childksp;
arch/mips/kernel/process.c
144
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
arch/mips/kernel/process.c
162
p->thread.reg29 = (unsigned long) childregs;
arch/mips/kernel/process.c
163
p->thread.reg31 = (unsigned long) ret_from_fork;
arch/mips/kernel/process.c
168
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
arch/mips/kernel/process.c
531
struct thread_struct *t = &tsk->thread;
arch/mips/kernel/process.c
64
atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
arch/mips/kernel/process.c
673
sp = task->thread.reg29 + schedule_mfi.frame_size;
arch/mips/kernel/process.c
693
if (current->thread.abi) {
arch/mips/kernel/process.c
694
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
arch/mips/kernel/ptrace.c
1138
tmp = child->thread.fpu.fcr31;
arch/mips/kernel/ptrace.c
1183
tmp = child->thread.dsp.dspcontrol;
arch/mips/kernel/ptrace.c
1271
child->thread.dsp.dspcontrol = data;
arch/mips/kernel/ptrace.c
148
__put_user(child->thread.watch.mips3264.watchlo[i],
arch/mips/kernel/ptrace.c
150
__put_user(child->thread.watch.mips3264.watchhi[i] &
arch/mips/kernel/ptrace.c
200
child->thread.watch.mips3264.watchlo[i] = lt[i];
arch/mips/kernel/ptrace.c
202
child->thread.watch.mips3264.watchhi[i] = ht[i];
arch/mips/kernel/ptrace.c
354
fcr31 = child->thread.fpu.fcr31;
arch/mips/kernel/ptrace.c
356
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
arch/mips/kernel/ptrace.c
376
__put_user(child->thread.fpu.fcr31, data + 64);
arch/mips/kernel/ptrace.c
416
membuf_write(to, &target->thread.fpu,
arch/mips/kernel/ptrace.c
432
membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
arch/mips/kernel/ptrace.c
444
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
arch/mips/kernel/ptrace.c
449
membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
arch/mips/kernel/ptrace.c
464
&target->thread.fpu,
arch/mips/kernel/ptrace.c
489
set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
arch/mips/kernel/ptrace.c
524
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
arch/mips/kernel/ptrace.c
616
membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
arch/mips/kernel/ptrace.c
629
.fcsr = target->thread.fpu.fcr31,
arch/mips/kernel/ptrace.c
631
.msacsr = target->thread.fpu.msacsr,
arch/mips/kernel/ptrace.c
640
} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
arch/mips/kernel/ptrace.c
642
membuf_write(&to, &target->thread.fpu.fpr, wr_size);
arch/mips/kernel/ptrace.c
646
sizeof(target->thread.fpu.fpr[0]));
arch/mips/kernel/ptrace.c
664
if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
arch/mips/kernel/ptrace.c
667
&target->thread.fpu.fpr,
arch/mips/kernel/ptrace.c
672
sizeof(target->thread.fpu.fpr[0]));
arch/mips/kernel/ptrace.c
677
&target->thread.fpu.fpr[i],
arch/mips/kernel/ptrace.c
686
target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
arch/mips/kernel/ptrace.c
687
target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
arch/mips/kernel/ptrace.c
713
dspregs[i] = target->thread.dsp.dspr[i];
arch/mips/kernel/ptrace.c
714
dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
arch/mips/kernel/ptrace.c
749
target->thread.dsp.dspr[i] = (s32)dspregs[i];
arch/mips/kernel/ptrace.c
752
target->thread.dsp.dspcontrol = (s32)dspregs[i];
arch/mips/kernel/ptrace.c
779
dspregs[i] = target->thread.dsp.dspr[i];
arch/mips/kernel/ptrace.c
780
dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
arch/mips/kernel/ptrace.c
815
target->thread.dsp.dspr[i] = dspregs[i];
arch/mips/kernel/ptrace.c
818
target->thread.dsp.dspcontrol = dspregs[i];
arch/mips/kernel/ptrace32.c
117
tmp = child->thread.fpu.fcr31;
arch/mips/kernel/ptrace32.c
157
tmp = child->thread.dsp.dspcontrol;
arch/mips/kernel/ptrace32.c
215
memset(&child->thread.fpu, ~0,
arch/mips/kernel/ptrace32.c
216
sizeof(child->thread.fpu));
arch/mips/kernel/ptrace32.c
217
child->thread.fpu.fcr31 = 0;
arch/mips/kernel/ptrace32.c
233
child->thread.fpu.fcr31 = data;
arch/mips/kernel/ptrace32.c
262
child->thread.dsp.dspcontrol = data;
arch/mips/kernel/signal.c
102
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
arch/mips/kernel/signal.c
104
err |= __get_user(current->thread.fpu.fcr31, csr);
arch/mips/kernel/signal.c
128
struct mips_abi *abi = current->thread.abi;
arch/mips/kernel/signal.c
137
struct mips_abi *abi = current->thread.abi;
arch/mips/kernel/signal.c
195
err = __put_user(current->thread.fpu.msacsr, &msa->csr);
arch/mips/kernel/signal.c
198
val = get_fpr64(¤t->thread.fpu.fpr[i], 1);
arch/mips/kernel/signal.c
241
current->thread.fpu.msacsr = csr;
arch/mips/kernel/signal.c
245
set_fpr64(¤t->thread.fpu.fpr[i], 1, val);
arch/mips/kernel/signal.c
327
struct mips_abi *abi = current->thread.abi;
arch/mips/kernel/signal.c
380
struct mips_abi *abi = current->thread.abi;
arch/mips/kernel/signal.c
73
struct mips_abi *abi = current->thread.abi;
arch/mips/kernel/signal.c
82
__put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
arch/mips/kernel/signal.c
820
struct mips_abi *abi = current->thread.abi;
arch/mips/kernel/signal.c
85
err |= __put_user(current->thread.fpu.fcr31, csr);
arch/mips/kernel/signal.c
885
regs->regs[2] = current->thread.abi->restart;
arch/mips/kernel/signal.c
92
struct mips_abi *abi = current->thread.abi;
arch/mips/kernel/stacktrace.c
86
regs->regs[29] = tsk->thread.reg29;
arch/mips/kernel/stacktrace.c
88
regs->cp0_epc = tsk->thread.reg31;
arch/mips/kernel/traps.c
1037
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
arch/mips/kernel/traps.c
1079
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
arch/mips/kernel/traps.c
1085
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
arch/mips/kernel/traps.c
1091
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
arch/mips/kernel/traps.c
1097
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
arch/mips/kernel/traps.c
1125
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
arch/mips/kernel/traps.c
1180
¤t->thread.cp0_baduaddr,
arch/mips/kernel/traps.c
1189
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
arch/mips/kernel/traps.c
1191
if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
arch/mips/kernel/traps.c
1295
write_msa_csr(current->thread.fpu.msacsr);
arch/mips/kernel/traps.c
1349
write_msa_csr(current->thread.fpu.msacsr);
arch/mips/kernel/traps.c
1386
current->thread.fpu.fcr31);
arch/mips/kernel/traps.c
1478
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
arch/mips/kernel/traps.c
1485
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
arch/mips/kernel/traps.c
1486
current->thread.fpu.fcr31 &= ~fcr31;
arch/mips/kernel/traps.c
1514
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
arch/mips/kernel/traps.c
1516
current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
arch/mips/kernel/traps.c
221
regs.regs[29] = task->thread.reg29;
arch/mips/kernel/traps.c
223
regs.cp0_epc = task->thread.reg31;
arch/mips/kernel/traps.c
403
if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
arch/mips/kernel/traps.c
489
if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
arch/mips/kernel/traps.c
854
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
arch/mips/kernel/traps.c
861
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
arch/mips/kernel/traps.c
862
current->thread.fpu.fcr31 &= ~fcr31;
arch/mips/kernel/traps.c
883
if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
arch/mips/kernel/traps.c
906
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
arch/mips/kernel/traps.c
913
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
arch/mips/kernel/traps.c
914
current->thread.fpu.fcr31 &= ~fcr31;
arch/mips/kernel/traps.c
939
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
arch/mips/kernel/traps.c
948
current->thread.user_cpus_allowed
arch/mips/kernel/traps.c
975
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
arch/mips/kernel/traps.c
980
if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
arch/mips/kernel/unaligned.c
1535
current->thread.cp0_baduaddr = regs->cp0_badvaddr;
arch/mips/kernel/unaligned.c
447
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
arch/mips/kernel/unaligned.c
479
fpr = ¤t->thread.fpu.fpr[wd];
arch/mips/kernel/unaligned.c
968
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
arch/mips/kernel/uprobes.c
119
utask->autask.saved_trap_nr = current->thread.trap_nr;
arch/mips/kernel/uprobes.c
120
current->thread.trap_nr = UPROBE_TRAP_NR;
arch/mips/kernel/uprobes.c
130
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/mips/kernel/uprobes.c
148
if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
arch/mips/kernel/uprobes.c
194
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/mips/kernel/vdso.c
79
struct mips_vdso_image *image = current->thread.abi->vdso;
arch/mips/kernel/watch.c
20
struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
arch/mips/kernel/watch.c
53
¤t->thread.watch.mips3264;
arch/mips/loongson64/cop2-ex.c
131
value_next = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
arch/mips/loongson64/cop2-ex.c
137
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
arch/mips/loongson64/cop2-ex.c
203
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
arch/mips/loongson64/cop2-ex.c
219
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
arch/mips/loongson64/cop2-ex.c
283
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
arch/mips/loongson64/cop2-ex.c
301
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
arch/mips/loongson64/cop2-ex.c
98
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
arch/mips/loongson64/cop2-ex.c
99
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
arch/mips/math-emu/cp1emu.c
1191
fpr = ¤t->thread.fpu.fpr[MIPSInst_RT(ir)];
arch/mips/math-emu/cp1emu.c
706
fpr = ¤t->thread.fpu.fpr[insn.i_format.rt];
arch/mips/math-emu/cp1emu.c
736
fcr31 = current->thread.fpu.fcr31;
arch/mips/math-emu/dsemul.c
154
fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
arch/mips/math-emu/dsemul.c
180
fr_idx = atomic_read(¤t->thread.bd_emu_frame);
arch/mips/math-emu/dsemul.c
193
regs->cp0_epc = current->thread.bd_emu_branch_pc;
arch/mips/math-emu/dsemul.c
195
regs->cp0_epc = current->thread.bd_emu_cont_pc;
arch/mips/math-emu/dsemul.c
197
atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
arch/mips/math-emu/dsemul.c
246
fr_idx = atomic_read(¤t->thread.bd_emu_frame);
arch/mips/math-emu/dsemul.c
282
current->thread.bd_emu_branch_pc = branch_pc;
arch/mips/math-emu/dsemul.c
283
current->thread.bd_emu_cont_pc = cont_pc;
arch/mips/math-emu/dsemul.c
284
atomic_set(¤t->thread.bd_emu_frame, fr_idx);
arch/mips/math-emu/dsemul.c
301
xcp->cp0_epc = current->thread.bd_emu_cont_pc;
arch/mips/math-emu/ieee754.h
170
#define ieee754_csr (*(struct _ieee754_csr *)(¤t->thread.fpu.fcr31))
arch/mips/mm/fault.c
197
tsk->thread.cp0_badvaddr = address;
arch/mips/mm/fault.c
198
tsk->thread.error_code = write;
arch/mips/mm/fault.c
215
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
arch/mips/mm/fault.c
223
current->thread.cp0_baduaddr = address;
arch/mips/mm/fault.c
270
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
arch/mips/mm/fault.c
271
tsk->thread.cp0_badvaddr = address;
arch/mips/mm/fault.c
63
current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
arch/nios2/include/asm/processor.h
70
#define KSTK_EIP(tsk) ((tsk)->thread.kregs->ea)
arch/nios2/include/asm/processor.h
71
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
arch/nios2/kernel/asm-offsets.c
18
OFFSET(TASK_THREAD, task_struct, thread);
arch/nios2/kernel/kgdb.c
100
gdb_regs[GDB_SP] = p->thread.kregs->sp;
arch/nios2/kernel/kgdb.c
101
gdb_regs[GDB_PC] = p->thread.kregs->ea;
arch/nios2/kernel/process.c
123
p->thread.ksp = (unsigned long) childstack;
arch/nios2/kernel/process.c
124
p->thread.kregs = childregs;
arch/nios2/kernel/process.c
136
p->thread.kregs = childregs;
arch/nios2/kernel/process.c
137
p->thread.ksp = (unsigned long) childstack;
arch/nios2/kernel/process.c
228
fp = ((struct switch_stack *)p->thread.ksp)->fp; /* ;dgt2 */
arch/nios2/kernel/setup.c
169
init_task.thread.kregs = &fake_regs;
arch/nios2/kernel/signal.c
240
current->thread.kregs = regs;
arch/nios2/kernel/traps.c
68
stack = (unsigned long *)task->thread.ksp;
arch/openrisc/include/asm/fpu.h
10
task->thread.fpcsr = mfspr(SPR_FPCSR);
arch/openrisc/include/asm/fpu.h
15
mtspr(SPR_FPCSR, task->thread.fpcsr);
arch/openrisc/kernel/asm-offsets.c
43
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
arch/openrisc/kernel/ptrace.c
101
return membuf_store(&to, target->thread.fpcsr);
arch/openrisc/kernel/ptrace.c
111
&target->thread.fpcsr, 0, 4);
arch/openrisc/kernel/signal.c
48
err = __copy_from_user(¤t->thread.fpcsr, &sc->fpcsr, sizeof(unsigned long));
arch/openrisc/kernel/signal.c
65
err = __copy_to_user(&sc->fpcsr, ¤t->thread.fpcsr, sizeof(unsigned long));
arch/openrisc/kernel/traps.c
189
fpcsr = current->thread.fpcsr;
arch/openrisc/kernel/traps.c
203
current->thread.fpcsr &= ~SPR_FPCSR_ALLF;
arch/parisc/include/asm/elf.h
238
current->thread.map_base = DEFAULT_MAP_BASE; \
arch/parisc/include/asm/elf.h
239
current->thread.task_size = DEFAULT_TASK_SIZE; \
arch/parisc/include/asm/elf.h
248
current->thread.map_base = DEFAULT_MAP_BASE32; \
arch/parisc/include/asm/elf.h
249
current->thread.task_size = DEFAULT_TASK_SIZE32; \
arch/parisc/include/asm/processor.h
112
#define task_pt_regs(tsk) ((struct pt_regs *)&((tsk)->thread.regs))
arch/parisc/include/asm/processor.h
124
(task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
arch/parisc/include/asm/processor.h
132
put_user(((task)->thread.flags & PARISC_UAC_MASK) \
arch/parisc/include/asm/processor.h
27
#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
arch/parisc/include/asm/processor.h
274
#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
arch/parisc/include/asm/processor.h
275
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
arch/parisc/include/asm/processor.h
29
#define TASK_UNMAPPED_BASE (current->thread.map_base)
arch/parisc/kernel/asm-offsets.c
100
DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
arch/parisc/kernel/asm-offsets.c
101
DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
arch/parisc/kernel/asm-offsets.c
102
DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
arch/parisc/kernel/asm-offsets.c
103
DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
arch/parisc/kernel/asm-offsets.c
104
DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
arch/parisc/kernel/asm-offsets.c
105
DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
arch/parisc/kernel/asm-offsets.c
106
DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
arch/parisc/kernel/asm-offsets.c
107
DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
arch/parisc/kernel/asm-offsets.c
108
DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
arch/parisc/kernel/asm-offsets.c
109
DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
arch/parisc/kernel/asm-offsets.c
110
DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
arch/parisc/kernel/asm-offsets.c
111
DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
arch/parisc/kernel/asm-offsets.c
112
DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
arch/parisc/kernel/asm-offsets.c
113
DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
arch/parisc/kernel/asm-offsets.c
114
DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
arch/parisc/kernel/asm-offsets.c
115
DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
arch/parisc/kernel/asm-offsets.c
116
DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
arch/parisc/kernel/asm-offsets.c
117
DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
arch/parisc/kernel/asm-offsets.c
118
DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
arch/parisc/kernel/asm-offsets.c
119
DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
arch/parisc/kernel/asm-offsets.c
120
DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
arch/parisc/kernel/asm-offsets.c
121
DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
arch/parisc/kernel/asm-offsets.c
122
DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
arch/parisc/kernel/asm-offsets.c
123
DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
arch/parisc/kernel/asm-offsets.c
124
DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
arch/parisc/kernel/asm-offsets.c
125
DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
arch/parisc/kernel/asm-offsets.c
126
DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
arch/parisc/kernel/asm-offsets.c
127
DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
arch/parisc/kernel/asm-offsets.c
128
DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
arch/parisc/kernel/asm-offsets.c
129
DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
arch/parisc/kernel/asm-offsets.c
130
DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
arch/parisc/kernel/asm-offsets.c
131
DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
arch/parisc/kernel/asm-offsets.c
132
DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
arch/parisc/kernel/asm-offsets.c
133
DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
arch/parisc/kernel/asm-offsets.c
134
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
arch/parisc/kernel/asm-offsets.c
50
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
arch/parisc/kernel/asm-offsets.c
51
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
arch/parisc/kernel/asm-offsets.c
52
DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
arch/parisc/kernel/asm-offsets.c
53
DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
arch/parisc/kernel/asm-offsets.c
54
DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
arch/parisc/kernel/asm-offsets.c
55
DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
arch/parisc/kernel/asm-offsets.c
56
DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
arch/parisc/kernel/asm-offsets.c
57
DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
arch/parisc/kernel/asm-offsets.c
58
DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
arch/parisc/kernel/asm-offsets.c
59
DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
arch/parisc/kernel/asm-offsets.c
60
DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
arch/parisc/kernel/asm-offsets.c
61
DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
arch/parisc/kernel/asm-offsets.c
62
DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
arch/parisc/kernel/asm-offsets.c
63
DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
arch/parisc/kernel/asm-offsets.c
64
DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
arch/parisc/kernel/asm-offsets.c
65
DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
arch/parisc/kernel/asm-offsets.c
66
DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
arch/parisc/kernel/asm-offsets.c
67
DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
arch/parisc/kernel/asm-offsets.c
68
DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
arch/parisc/kernel/asm-offsets.c
69
DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
arch/parisc/kernel/asm-offsets.c
70
DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
arch/parisc/kernel/asm-offsets.c
71
DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
arch/parisc/kernel/asm-offsets.c
72
DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
arch/parisc/kernel/asm-offsets.c
73
DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
arch/parisc/kernel/asm-offsets.c
74
DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
arch/parisc/kernel/asm-offsets.c
75
DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
arch/parisc/kernel/asm-offsets.c
76
DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
arch/parisc/kernel/asm-offsets.c
77
DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
arch/parisc/kernel/asm-offsets.c
78
DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
arch/parisc/kernel/asm-offsets.c
79
DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
arch/parisc/kernel/asm-offsets.c
80
DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
arch/parisc/kernel/asm-offsets.c
81
DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
arch/parisc/kernel/asm-offsets.c
82
DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
arch/parisc/kernel/asm-offsets.c
83
DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
arch/parisc/kernel/asm-offsets.c
84
DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
arch/parisc/kernel/asm-offsets.c
85
DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
arch/parisc/kernel/asm-offsets.c
86
DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
arch/parisc/kernel/asm-offsets.c
87
DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
arch/parisc/kernel/asm-offsets.c
88
DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
arch/parisc/kernel/asm-offsets.c
89
DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
arch/parisc/kernel/asm-offsets.c
90
DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
arch/parisc/kernel/asm-offsets.c
91
DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
arch/parisc/kernel/asm-offsets.c
92
DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
arch/parisc/kernel/asm-offsets.c
93
DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
arch/parisc/kernel/asm-offsets.c
94
DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
arch/parisc/kernel/asm-offsets.c
95
DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
arch/parisc/kernel/asm-offsets.c
96
DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
arch/parisc/kernel/asm-offsets.c
97
DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
arch/parisc/kernel/asm-offsets.c
98
DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
arch/parisc/kernel/asm-offsets.c
99
DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
arch/parisc/kernel/process.c
210
struct pt_regs *cregs = &(p->thread.regs);
arch/parisc/kernel/traps.c
254
if (current->thread.flags & PARISC_KERNEL_DEATH) {
arch/parisc/kernel/traps.c
259
current->thread.flags |= PARISC_KERNEL_DEATH;
arch/parisc/kernel/unaligned.c
386
if (current->thread.flags & PARISC_UAC_SIGBUS) {
arch/parisc/kernel/unaligned.c
390
if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
arch/parisc/kernel/unaligned.c
405
if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
arch/parisc/kernel/unwind.c
408
struct pt_regs *r = &t->thread.regs;
arch/powerpc/include/asm/book3s/32/kup.h
107
current->thread.kuap = (__force u32)to;
arch/powerpc/include/asm/book3s/32/kup.h
113
u32 kuap = current->thread.kuap;
arch/powerpc/include/asm/book3s/32/kup.h
120
current->thread.kuap = KUAP_NONE;
arch/powerpc/include/asm/book3s/32/kup.h
126
unsigned long flags = current->thread.kuap;
arch/powerpc/include/asm/book3s/32/kup.h
129
current->thread.kuap = KUAP_NONE;
arch/powerpc/include/asm/book3s/32/kup.h
139
current->thread.kuap = flags;
arch/powerpc/include/asm/book3s/32/kup.h
60
unsigned long kuap = current->thread.kuap;
arch/powerpc/include/asm/book3s/32/kup.h
66
current->thread.kuap = KUAP_NONE;
arch/powerpc/include/asm/book3s/32/kup.h
78
current->thread.kuap = KUAP_NONE;
arch/powerpc/include/asm/book3s/32/kup.h
85
current->thread.kuap = regs->kuap;
arch/powerpc/include/asm/book3s/32/kup.h
92
unsigned long kuap = current->thread.kuap;
arch/powerpc/include/asm/book3s/64/kup.h
218
if (current->thread.regs)
arch/powerpc/include/asm/book3s/64/kup.h
219
return current->thread.regs->amr;
arch/powerpc/include/asm/book3s/64/kup.h
225
if (current->thread.regs)
arch/powerpc/include/asm/book3s/64/kup.h
226
return current->thread.regs->iamr;
arch/powerpc/include/asm/cputhreads.h
107
void book3e_start_thread(int thread, unsigned long addr);
arch/powerpc/include/asm/cputhreads.h
108
void book3e_stop_thread(int thread);
arch/powerpc/include/asm/interrupt.h
144
unsigned long dbcr0 = current->thread.debug.dbcr0;
arch/powerpc/include/asm/nohash/kup-booke.h
41
mtspr(SPRN_PID, current->thread.pid);
arch/powerpc/include/asm/nohash/kup-booke.h
49
mtspr(SPRN_PID, current->thread.pid);
arch/powerpc/include/asm/nohash/kup-booke.h
78
uaccess_begin_booke(current->thread.pid);
arch/powerpc/include/asm/nohash/kup-booke.h
98
uaccess_begin_booke(current->thread.pid);
arch/powerpc/include/asm/processor.h
311
#define task_pt_regs(tsk) ((tsk)->thread.regs)
arch/powerpc/include/asm/processor.h
315
#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
arch/powerpc/include/asm/processor.h
316
#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
arch/powerpc/include/asm/ps3.h
503
void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
arch/powerpc/include/asm/reg_booke.h
341
#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
arch/powerpc/include/asm/reg_booke.h
375
#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
arch/powerpc/include/asm/sfp-machine.h
108
#define __FPU_FPSCR (current->thread.spefscr)
arch/powerpc/include/asm/sfp-machine.h
128
#define __FPU_FPSCR (current->thread.fp_state.fpscr)
arch/powerpc/include/asm/switch_to.h
116
t->thread.ebbrr = 0;
arch/powerpc/include/asm/switch_to.h
117
t->thread.ebbhr = 0;
arch/powerpc/include/asm/switch_to.h
118
t->thread.bescr = 0;
arch/powerpc/include/asm/switch_to.h
119
t->thread.mmcr2 = 0;
arch/powerpc/include/asm/switch_to.h
120
t->thread.mmcr0 = 0;
arch/powerpc/include/asm/switch_to.h
121
t->thread.siar = 0;
arch/powerpc/include/asm/switch_to.h
122
t->thread.sdar = 0;
arch/powerpc/include/asm/switch_to.h
123
t->thread.sier = 0;
arch/powerpc/include/asm/switch_to.h
124
t->thread.used_ebb = 0;
arch/powerpc/include/asm/tm.h
13
extern void tm_reclaim(struct thread_struct *thread,
arch/powerpc/include/asm/tm.h
16
extern void tm_recheckpoint(struct thread_struct *thread);
arch/powerpc/include/asm/tm.h
17
extern void tm_save_sprs(struct thread_struct *thread);
arch/powerpc/include/asm/tm.h
18
extern void tm_restore_sprs(struct thread_struct *thread);
arch/powerpc/kernel/align.c
117
unsigned long *evr = ¤t->thread.evr[reg];
arch/powerpc/kernel/asm-offsets.c
80
OFFSET(THREAD, task_struct, thread);
arch/powerpc/kernel/dexcr.c
117
task->thread.dexcr_onexec |= aspect;
arch/powerpc/kernel/dexcr.c
119
task->thread.dexcr_onexec &= ~aspect;
arch/powerpc/kernel/dexcr.c
19
current->thread.dexcr_onexec = mfspr(SPRN_DEXCR);
arch/powerpc/kernel/dexcr.c
70
if (aspect & task->thread.dexcr_onexec)
arch/powerpc/kernel/hw_breakpoint.c
583
struct thread_struct *t = &tsk->thread;
arch/powerpc/kernel/interrupt.c
84
unsigned long dbcr0 = current->thread.debug.dbcr0;
arch/powerpc/kernel/kgdb.c
193
struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
arch/powerpc/kernel/kgdb.c
214
PACK64(ptr, p->thread.evr[reg]);
arch/powerpc/kernel/kgdb.c
334
memcpy(mem, ¤t->thread.evr[regno-32],
arch/powerpc/kernel/kgdb.c
359
memcpy(¤t->thread.evr[regno-32], mem,
arch/powerpc/kernel/process.c
1023
extern void __tm_recheckpoint(struct thread_struct *thread);
arch/powerpc/kernel/process.c
1025
void tm_recheckpoint(struct thread_struct *thread)
arch/powerpc/kernel/process.c
1029
if (!(thread->regs->msr & MSR_TM))
arch/powerpc/kernel/process.c
1042
tm_restore_sprs(thread);
arch/powerpc/kernel/process.c
1044
__tm_recheckpoint(thread);
arch/powerpc/kernel/process.c
1065
if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
arch/powerpc/kernel/process.c
1066
tm_restore_sprs(&new->thread);
arch/powerpc/kernel/process.c
1071
new->pid, new->thread.regs->msr);
arch/powerpc/kernel/process.c
1073
tm_recheckpoint(&new->thread);
arch/powerpc/kernel/process.c
1080
new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
arch/powerpc/kernel/process.c
1095
prev->thread.load_tm++;
arch/powerpc/kernel/process.c
1097
if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
arch/powerpc/kernel/process.c
1098
prev->thread.regs->msr &= ~MSR_TM;
arch/powerpc/kernel/process.c
1133
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
arch/powerpc/kernel/process.c
1138
current->thread.load_fp = 1;
arch/powerpc/kernel/process.c
1141
current->thread.load_vec = 1;
arch/powerpc/kernel/process.c
1197
if (!current->thread.regs)
arch/powerpc/kernel/process.c
1200
usermsr = current->thread.regs->msr;
arch/powerpc/kernel/process.c
1210
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
arch/powerpc/kernel/process.c
1211
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
arch/powerpc/kernel/process.c
1212
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
arch/powerpc/kernel/process.c
1213
current->thread.regs->msr &= ~MSR_TM;
arch/powerpc/kernel/process.c
1221
save_sprs(¤t->thread);
arch/powerpc/kernel/process.c
1285
new_thread = &new->thread;
arch/powerpc/kernel/process.c
1286
old_thread = ¤t->thread;
arch/powerpc/kernel/process.c
1306
switch_booke_debug_regs(&new->thread.debug);
arch/powerpc/kernel/process.c
1321
save_sprs(&prev->thread);
arch/powerpc/kernel/process.c
1366
if (current->thread.regs)
arch/powerpc/kernel/process.c
1367
restore_math(current->thread.regs);
arch/powerpc/kernel/process.c
156
msr = tsk->thread.regs->msr;
arch/powerpc/kernel/process.c
1593
set_debug_reg_defaults(¤t->thread);
arch/powerpc/kernel/process.c
160
regs_set_return_msr(tsk->thread.regs, msr);
arch/powerpc/kernel/process.c
1608
if (!current->thread.regs) {
arch/powerpc/kernel/process.c
1610
current->thread.regs = regs - 1;
arch/powerpc/kernel/process.c
1614
current->thread.regs->amr = default_amr;
arch/powerpc/kernel/process.c
1615
current->thread.regs->iamr = default_iamr;
arch/powerpc/kernel/process.c
1620
current->thread.dexcr = current->thread.dexcr_onexec;
arch/powerpc/kernel/process.c
1621
mtspr(SPRN_DEXCR, current->thread.dexcr);
arch/powerpc/kernel/process.c
1668
if (t->thread.tidr)
arch/powerpc/kernel/process.c
1671
t->thread.tidr = (u16)task_pid_nr(t);
arch/powerpc/kernel/process.c
1672
mtspr(SPRN_TIDR, t->thread.tidr);
arch/powerpc/kernel/process.c
1722
p->thread.ksp_vsid = sp_vsid;
arch/powerpc/kernel/process.c
1756
p->thread.regs = NULL; /* no user register state */
arch/powerpc/kernel/process.c
179
if (tsk->thread.regs) {
arch/powerpc/kernel/process.c
1810
p->thread.regs = childregs;
arch/powerpc/kernel/process.c
1835
p->thread.ksp = sp;
arch/powerpc/kernel/process.c
1839
p->thread.ptrace_bps[i] = NULL;
arch/powerpc/kernel/process.c
1843
p->thread.fp_save_area = NULL;
arch/powerpc/kernel/process.c
1846
p->thread.vr_save_area = NULL;
arch/powerpc/kernel/process.c
1849
p->thread.kuap = KUAP_NONE;
arch/powerpc/kernel/process.c
1852
p->thread.pid = MMU_NO_CONTEXT;
arch/powerpc/kernel/process.c
1859
p->thread.dscr_inherit = current->thread.dscr_inherit;
arch/powerpc/kernel/process.c
1860
p->thread.dscr = mfspr(SPRN_DSCR);
arch/powerpc/kernel/process.c
1863
p->thread.tidr = 0;
arch/powerpc/kernel/process.c
1867
p->thread.hashkeyr = current->thread.hashkeyr;
arch/powerpc/kernel/process.c
1870
p->thread.dexcr = mfspr(SPRN_DEXCR);
arch/powerpc/kernel/process.c
189
if (tsk->thread.regs->msr & MSR_FP) {
arch/powerpc/kernel/process.c
1955
current->thread.used_vsr = 0;
arch/powerpc/kernel/process.c
1957
current->thread.load_slb = 0;
arch/powerpc/kernel/process.c
1958
current->thread.load_fp = 0;
arch/powerpc/kernel/process.c
1960
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
arch/powerpc/kernel/process.c
1961
current->thread.fp_save_area = NULL;
arch/powerpc/kernel/process.c
1964
memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
arch/powerpc/kernel/process.c
1965
current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
arch/powerpc/kernel/process.c
1966
current->thread.vr_save_area = NULL;
arch/powerpc/kernel/process.c
1967
current->thread.vrsave = 0;
arch/powerpc/kernel/process.c
1968
current->thread.used_vr = 0;
arch/powerpc/kernel/process.c
1969
current->thread.load_vec = 0;
arch/powerpc/kernel/process.c
1972
memset(current->thread.evr, 0, sizeof(current->thread.evr));
arch/powerpc/kernel/process.c
1973
current->thread.acc = 0;
arch/powerpc/kernel/process.c
1974
current->thread.spefscr = 0;
arch/powerpc/kernel/process.c
1975
current->thread.used_spe = 0;
arch/powerpc/kernel/process.c
1978
current->thread.tm_tfhar = 0;
arch/powerpc/kernel/process.c
1979
current->thread.tm_texasr = 0;
arch/powerpc/kernel/process.c
1980
current->thread.tm_tfiar = 0;
arch/powerpc/kernel/process.c
1981
current->thread.load_tm = 0;
arch/powerpc/kernel/process.c
1985
current->thread.hashkeyr = get_random_long();
arch/powerpc/kernel/process.c
1986
mtspr(SPRN_HASHKEYR, current->thread.hashkeyr);
arch/powerpc/kernel/process.c
1997
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/process.c
2018
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
arch/powerpc/kernel/process.c
2019
tsk->thread.fpexc_mode = val &
arch/powerpc/kernel/process.c
2035
tsk->thread.fpexc_mode = __pack_fe01(val);
arch/powerpc/kernel/process.c
2038
| tsk->thread.fpexc_mode);
arch/powerpc/kernel/process.c
2047
if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
arch/powerpc/kernel/process.c
2062
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
arch/powerpc/kernel/process.c
2063
val = tsk->thread.fpexc_mode;
arch/powerpc/kernel/process.c
2068
val = __unpack_fe01(tsk->thread.fpexc_mode);
arch/powerpc/kernel/process.c
2075
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/process.c
2096
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/process.c
2119
tsk->thread.align_ctl = val;
arch/powerpc/kernel/process.c
2125
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
arch/powerpc/kernel/process.c
213
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
arch/powerpc/kernel/process.c
223
MSR_TM_ACTIVE(current->thread.regs->msr))
arch/powerpc/kernel/process.c
2231
sp = p->thread.ksp;
arch/powerpc/kernel/process.c
2301
sp = tsk->thread.ksp;
arch/powerpc/kernel/process.c
239
msr = tsk->thread.regs->msr;
arch/powerpc/kernel/process.c
243
regs_set_return_msr(tsk->thread.regs, msr);
arch/powerpc/kernel/process.c
264
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
arch/powerpc/kernel/process.c
274
MSR_TM_ACTIVE(current->thread.regs->msr))
arch/powerpc/kernel/process.c
287
if (tsk->thread.regs) {
arch/powerpc/kernel/process.c
289
if (tsk->thread.regs->msr & MSR_VEC) {
arch/powerpc/kernel/process.c
302
unsigned long msr = tsk->thread.regs->msr;
arch/powerpc/kernel/process.c
334
if (current->thread.regs &&
arch/powerpc/kernel/process.c
335
(current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
arch/powerpc/kernel/process.c
345
MSR_TM_ACTIVE(current->thread.regs->msr))
arch/powerpc/kernel/process.c
354
if (tsk->thread.regs) {
arch/powerpc/kernel/process.c
356
if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
arch/powerpc/kernel/process.c
383
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
arch/powerpc/kernel/process.c
392
if (tsk->thread.regs) {
arch/powerpc/kernel/process.c
394
if (tsk->thread.regs->msr & MSR_SPE) {
arch/powerpc/kernel/process.c
396
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
arch/powerpc/kernel/process.c
425
if (!tsk->thread.regs)
arch/powerpc/kernel/process.c
430
usermsr = tsk->thread.regs->msr;
arch/powerpc/kernel/process.c
454
if (current->thread.load_fp) {
arch/powerpc/kernel/process.c
455
current->thread.load_fp++;
arch/powerpc/kernel/process.c
463
load_fp_state(¤t->thread.fp_state);
arch/powerpc/kernel/process.c
473
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
arch/powerpc/kernel/process.c
474
current->thread.load_vec++;
arch/powerpc/kernel/process.c
482
load_vr_state(¤t->thread.vr_state);
arch/powerpc/kernel/process.c
483
current->thread.used_vr = 1;
arch/powerpc/kernel/process.c
499
current->thread.used_vsr = 1;
arch/powerpc/kernel/process.c
547
fpexc_mode = current->thread.fpexc_mode;
arch/powerpc/kernel/process.c
567
if (!tsk->thread.regs)
arch/powerpc/kernel/process.c
570
usermsr = tsk->thread.regs->msr;
arch/powerpc/kernel/process.c
593
if (tsk->thread.regs) {
arch/powerpc/kernel/process.c
597
if (tsk->thread.regs->msr & MSR_SPE)
arch/powerpc/kernel/process.c
598
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
arch/powerpc/kernel/process.c
611
current->thread.trap_nr = TRAP_HWBKPT;
arch/powerpc/kernel/process.c
638
current->thread.hw_brk[0] = null_brk;
arch/powerpc/kernel/process.c
639
current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
arch/powerpc/kernel/process.c
647
info = ¤t->thread.hw_brk[i];
arch/powerpc/kernel/process.c
653
current->thread.hw_brk[i] = null_brk;
arch/powerpc/kernel/process.c
654
current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
arch/powerpc/kernel/process.c
661
current->thread.trap_nr = TRAP_HWBKPT;
arch/powerpc/kernel/process.c
690
static void set_debug_reg_defaults(struct thread_struct *thread)
arch/powerpc/kernel/process.c
692
thread->debug.iac1 = thread->debug.iac2 = 0;
arch/powerpc/kernel/process.c
694
thread->debug.iac3 = thread->debug.iac4 = 0;
arch/powerpc/kernel/process.c
696
thread->debug.dac1 = thread->debug.dac2 = 0;
arch/powerpc/kernel/process.c
698
thread->debug.dvc1 = thread->debug.dvc2 = 0;
arch/powerpc/kernel/process.c
700
thread->debug.dbcr0 = 0;
arch/powerpc/kernel/process.c
705
thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
arch/powerpc/kernel/process.c
711
thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
arch/powerpc/kernel/process.c
713
thread->debug.dbcr1 = 0;
arch/powerpc/kernel/process.c
751
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
arch/powerpc/kernel/process.c
765
static void set_debug_reg_defaults(struct thread_struct *thread)
arch/powerpc/kernel/process.c
771
thread->hw_brk[i] = null_brk;
arch/powerpc/kernel/process.c
773
set_breakpoint(i, &thread->hw_brk[i]);
arch/powerpc/kernel/process.c
796
&new->thread.hw_brk[i])))
arch/powerpc/kernel/process.c
799
__set_breakpoint(i, &new->thread.hw_brk[i]);
arch/powerpc/kernel/process.c
91
if (tsk == current && tsk->thread.regs &&
arch/powerpc/kernel/process.c
92
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
arch/powerpc/kernel/process.c
928
return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
arch/powerpc/kernel/process.c
94
regs_set_return_msr(&tsk->thread.ckpt_regs,
arch/powerpc/kernel/process.c
95
tsk->thread.regs->msr);
arch/powerpc/kernel/process.c
951
giveup_all(container_of(thr, struct task_struct, thread));
arch/powerpc/kernel/process.c
978
tm_reclaim_thread(¤t->thread, cause);
arch/powerpc/kernel/process.c
993
struct thread_struct *thr = &tsk->thread;
arch/powerpc/kernel/prom.c
780
init_task.thread.fscr = mfspr(SPRN_FSCR);
arch/powerpc/kernel/ptrace/ptrace-adv.c
10
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
110
task->thread.debug.dac1 = data & ~0x3UL;
arch/powerpc/kernel/ptrace/ptrace-adv.c
112
if (task->thread.debug.dac1 == 0) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
114
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
arch/powerpc/kernel/ptrace/ptrace-adv.c
115
task->thread.debug.dbcr1)) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
117
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
128
task->thread.debug.dbcr0 |= DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
13
task->thread.debug.dbcr0 &= ~DBCR0_BT;
arch/powerpc/kernel/ptrace/ptrace-adv.c
14
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
arch/powerpc/kernel/ptrace/ptrace-adv.c
144
int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
arch/powerpc/kernel/ptrace/ptrace-adv.c
145
int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
arch/powerpc/kernel/ptrace/ptrace-adv.c
146
int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
arch/powerpc/kernel/ptrace/ptrace-adv.c
147
int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
arch/powerpc/kernel/ptrace/ptrace-adv.c
165
child->thread.debug.iac1 = bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
166
child->thread.debug.iac2 = bp_info->addr2;
arch/powerpc/kernel/ptrace/ptrace-adv.c
167
child->thread.debug.dbcr0 |= DBCR0_IAC1;
arch/powerpc/kernel/ptrace/ptrace-adv.c
176
child->thread.debug.iac3 = bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
177
child->thread.debug.iac4 = bp_info->addr2;
arch/powerpc/kernel/ptrace/ptrace-adv.c
178
child->thread.debug.dbcr0 |= DBCR0_IAC3;
arch/powerpc/kernel/ptrace/ptrace-adv.c
199
child->thread.debug.iac1 = bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
200
child->thread.debug.dbcr0 |= DBCR0_IAC1;
arch/powerpc/kernel/ptrace/ptrace-adv.c
206
child->thread.debug.iac2 = bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
207
child->thread.debug.dbcr0 |= DBCR0_IAC2;
arch/powerpc/kernel/ptrace/ptrace-adv.c
211
child->thread.debug.iac3 = bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
212
child->thread.debug.dbcr0 |= DBCR0_IAC3;
arch/powerpc/kernel/ptrace/ptrace-adv.c
215
child->thread.debug.iac4 = bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
216
child->thread.debug.dbcr0 |= DBCR0_IAC4;
arch/powerpc/kernel/ptrace/ptrace-adv.c
22
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
223
child->thread.debug.dbcr0 |= DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
224
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
arch/powerpc/kernel/ptrace/ptrace-adv.c
233
if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
arch/powerpc/kernel/ptrace/ptrace-adv.c
238
child->thread.debug.iac2 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
241
child->thread.debug.iac1 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
242
child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
arch/powerpc/kernel/ptrace/ptrace-adv.c
245
if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
arch/powerpc/kernel/ptrace/ptrace-adv.c
25
task->thread.debug.dbcr0 &= ~DBCR0_IC;
arch/powerpc/kernel/ptrace/ptrace-adv.c
251
child->thread.debug.iac2 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
252
child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
arch/powerpc/kernel/ptrace/ptrace-adv.c
256
if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
arch/powerpc/kernel/ptrace/ptrace-adv.c
26
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
arch/powerpc/kernel/ptrace/ptrace-adv.c
261
child->thread.debug.iac4 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
264
child->thread.debug.iac3 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
265
child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
arch/powerpc/kernel/ptrace/ptrace-adv.c
268
if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
arch/powerpc/kernel/ptrace/ptrace-adv.c
274
child->thread.debug.iac4 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
275
child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
arch/powerpc/kernel/ptrace/ptrace-adv.c
305
child->thread.debug.dac1 = (unsigned long)bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
308
child->thread.debug.dvc1 =
arch/powerpc/kernel/ptrace/ptrace-adv.c
310
child->thread.debug.dbcr2 |=
arch/powerpc/kernel/ptrace/ptrace-adv.c
316
} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
326
child->thread.debug.dac2 = (unsigned long)bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
329
child->thread.debug.dvc2 =
arch/powerpc/kernel/ptrace/ptrace-adv.c
331
child->thread.debug.dbcr2 |=
arch/powerpc/kernel/ptrace/ptrace-adv.c
339
child->thread.debug.dbcr0 |= DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
34
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
340
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
arch/powerpc/kernel/ptrace/ptrace-adv.c
351
child->thread.debug.dac1 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
354
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
355
child->thread.debug.dac2 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
356
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
arch/powerpc/kernel/ptrace/ptrace-adv.c
358
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
arch/powerpc/kernel/ptrace/ptrace-adv.c
361
child->thread.debug.dvc1 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
368
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
arch/powerpc/kernel/ptrace/ptrace-adv.c
371
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
arch/powerpc/kernel/ptrace/ptrace-adv.c
374
child->thread.debug.dvc2 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
376
child->thread.debug.dac2 = 0;
arch/powerpc/kernel/ptrace/ptrace-adv.c
418
if (child->thread.debug.dbcr0 &
arch/powerpc/kernel/ptrace/ptrace-adv.c
423
child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
arch/powerpc/kernel/ptrace/ptrace-adv.c
425
child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
arch/powerpc/kernel/ptrace/ptrace-adv.c
426
child->thread.debug.dac1 = bp_info->addr;
arch/powerpc/kernel/ptrace/ptrace-adv.c
427
child->thread.debug.dac2 = bp_info->addr2;
arch/powerpc/kernel/ptrace/ptrace-adv.c
429
child->thread.debug.dbcr2 |= DBCR2_DAC12M;
arch/powerpc/kernel/ptrace/ptrace-adv.c
43
task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
arch/powerpc/kernel/ptrace/ptrace-adv.c
431
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
arch/powerpc/kernel/ptrace/ptrace-adv.c
433
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
434
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
arch/powerpc/kernel/ptrace/ptrace-adv.c
47
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
arch/powerpc/kernel/ptrace/ptrace-adv.c
48
task->thread.debug.dbcr1)) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
486
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
arch/powerpc/kernel/ptrace/ptrace-adv.c
487
child->thread.debug.dbcr1)) {
arch/powerpc/kernel/ptrace/ptrace-adv.c
488
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
489
regs_set_return_msr(child->thread.regs,
arch/powerpc/kernel/ptrace/ptrace-adv.c
490
child->thread.regs->msr & ~MSR_DE);
arch/powerpc/kernel/ptrace/ptrace-adv.c
52
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
arch/powerpc/kernel/ptrace/ptrace-adv.c
80
return put_user(child->thread.debug.dac1, datalp);
arch/powerpc/kernel/ptrace/ptrace-adv.c
85
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-adv.c
88
struct thread_struct *thread = &task->thread;
arch/powerpc/kernel/ptrace/ptrace-altivec.c
104
vrsave.word = target->thread.vrsave;
arch/powerpc/kernel/ptrace/ptrace-altivec.c
111
target->thread.vrsave = vrsave.word;
arch/powerpc/kernel/ptrace/ptrace-altivec.c
26
return target->thread.used_vr ? regset->n : 0;
arch/powerpc/kernel/ptrace/ptrace-altivec.c
56
membuf_write(&to, &target->thread.vr_state, 33 * sizeof(vector128));
arch/powerpc/kernel/ptrace/ptrace-altivec.c
61
vrsave.word = target->thread.vrsave;
arch/powerpc/kernel/ptrace/ptrace-altivec.c
91
&target->thread.vr_state, 0,
arch/powerpc/kernel/ptrace/ptrace-fpu.c
23
*data = ((u32 *)child->thread.fp_state.fpr)[fpidx];
arch/powerpc/kernel/ptrace/ptrace-fpu.c
25
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
arch/powerpc/kernel/ptrace/ptrace-fpu.c
27
*data = child->thread.fp_state.fpscr;
arch/powerpc/kernel/ptrace/ptrace-fpu.c
49
((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
arch/powerpc/kernel/ptrace/ptrace-fpu.c
51
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
arch/powerpc/kernel/ptrace/ptrace-fpu.c
53
child->thread.fp_state.fpscr = data;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
12
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
121
bp = thread->ptrace_bps[0];
arch/powerpc/kernel/ptrace/ptrace-noadv.c
125
thread->ptrace_bps[0] = NULL;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
142
thread->ptrace_bps[0] = bp;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
143
thread->hw_brk[0] = hw_brk;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
154
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
arch/powerpc/kernel/ptrace/ptrace-noadv.c
157
thread->ptrace_bps[0] = NULL;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
165
task->thread.hw_brk[0] = hw_brk;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
170
static int find_empty_ptrace_bp(struct thread_struct *thread)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
175
if (!thread->ptrace_bps[i])
arch/powerpc/kernel/ptrace/ptrace-noadv.c
182
static int find_empty_hw_brk(struct thread_struct *thread)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
187
if (!thread->hw_brk[i].address)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
198
struct thread_struct *thread = &child->thread;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
21
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
233
i = find_empty_ptrace_bp(thread);
arch/powerpc/kernel/ptrace/ptrace-noadv.c
244
thread->ptrace_bps[i] = bp;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
246
thread->ptrace_bps[i] = NULL;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
256
i = find_empty_hw_brk(&child->thread);
arch/powerpc/kernel/ptrace/ptrace-noadv.c
263
child->thread.hw_brk[i] = brk;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
272
struct thread_struct *thread = &child->thread;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
279
bp = thread->ptrace_bps[data - 1];
arch/powerpc/kernel/ptrace/ptrace-noadv.c
282
thread->ptrace_bps[data - 1] = NULL;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
288
if (!(child->thread.hw_brk[data - 1].flags & HW_BRK_FLAG_DISABLED) &&
arch/powerpc/kernel/ptrace/ptrace-noadv.c
289
child->thread.hw_brk[data - 1].address == 0)
arch/powerpc/kernel/ptrace/ptrace-noadv.c
292
child->thread.hw_brk[data - 1].address = 0;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
293
child->thread.hw_brk[data - 1].type = 0;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
294
child->thread.hw_brk[data - 1].flags = 0;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
30
struct pt_regs *regs = task->thread.regs;
arch/powerpc/kernel/ptrace/ptrace-noadv.c
68
dabr_fake = ((child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
arch/powerpc/kernel/ptrace/ptrace-noadv.c
69
(child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
arch/powerpc/kernel/ptrace/ptrace-noadv.c
82
struct thread_struct *thread = &task->thread;
arch/powerpc/kernel/ptrace/ptrace-novsx.c
30
return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-novsx.c
60
&target->thread.fp_state, 0, -1);
arch/powerpc/kernel/ptrace/ptrace-spe.c
22
return target->thread.used_spe ? regset->n : 0;
arch/powerpc/kernel/ptrace/ptrace-spe.c
30
membuf_write(&to, &target->thread.evr, sizeof(target->thread.evr));
arch/powerpc/kernel/ptrace/ptrace-spe.c
35
return membuf_write(&to, &target->thread.acc,
arch/powerpc/kernel/ptrace/ptrace-spe.c
48
&target->thread.evr,
arch/powerpc/kernel/ptrace/ptrace-spe.c
49
0, sizeof(target->thread.evr));
arch/powerpc/kernel/ptrace/ptrace-spe.c
56
&target->thread.acc,
arch/powerpc/kernel/ptrace/ptrace-spe.c
57
sizeof(target->thread.evr), -1);
arch/powerpc/kernel/ptrace/ptrace-tm.c
104
membuf_write(&to, &target->thread.ckpt_regs, sizeof(struct user_pt_regs));
arch/powerpc/kernel/ptrace/ptrace-tm.c
144
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
152
&target->thread.ckpt_regs,
arch/powerpc/kernel/ptrace/ptrace-tm.c
168
&target->thread.ckpt_regs.orig_gpr3,
arch/powerpc/kernel/ptrace/ptrace-tm.c
205
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
238
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
247
buf[i] = target->thread.TS_CKFPR(i);
arch/powerpc/kernel/ptrace/ptrace-tm.c
248
buf[32] = target->thread.ckfp_state.fpscr;
arch/powerpc/kernel/ptrace/ptrace-tm.c
28
tm_save_sprs(&tsk->thread);
arch/powerpc/kernel/ptrace/ptrace-tm.c
283
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
291
buf[i] = target->thread.TS_CKFPR(i);
arch/powerpc/kernel/ptrace/ptrace-tm.c
292
buf[32] = target->thread.ckfp_state.fpscr;
arch/powerpc/kernel/ptrace/ptrace-tm.c
299
target->thread.TS_CKFPR(i) = buf[i];
arch/powerpc/kernel/ptrace/ptrace-tm.c
300
target->thread.ckfp_state.fpscr = buf[32];
arch/powerpc/kernel/ptrace/ptrace-tm.c
317
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
34
return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
arch/powerpc/kernel/ptrace/ptrace-tm.c
354
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
362
membuf_write(&to, &target->thread.ckvr_state, 33 * sizeof(vector128));
arch/powerpc/kernel/ptrace/ptrace-tm.c
367
vrsave.word = target->thread.ckvrsave;
arch/powerpc/kernel/ptrace/ptrace-tm.c
39
task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
arch/powerpc/kernel/ptrace/ptrace-tm.c
40
task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
arch/powerpc/kernel/ptrace/ptrace-tm.c
404
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
411
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state,
arch/powerpc/kernel/ptrace/ptrace-tm.c
422
vrsave.word = target->thread.ckvrsave;
arch/powerpc/kernel/ptrace/ptrace-tm.c
426
target->thread.ckvrsave = vrsave.word;
arch/powerpc/kernel/ptrace/ptrace-tm.c
445
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
449
return target->thread.used_vsr ? regset->n : 0;
arch/powerpc/kernel/ptrace/ptrace-tm.c
46
set_trap(&task->thread.ckpt_regs, trap);
arch/powerpc/kernel/ptrace/ptrace-tm.c
478
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
488
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/ptrace/ptrace-tm.c
522
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
532
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/ptrace/ptrace-tm.c
538
target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
arch/powerpc/kernel/ptrace/ptrace-tm.c
591
membuf_write(&to, &target->thread.tm_tfhar, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
593
membuf_write(&to, &target->thread.tm_texasr, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
595
return membuf_write(&to, &target->thread.tm_tfiar, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
63
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
637
&target->thread.tm_tfhar, 0, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
642
&target->thread.tm_texasr, sizeof(u64),
arch/powerpc/kernel/ptrace/ptrace-tm.c
648
&target->thread.tm_tfiar,
arch/powerpc/kernel/ptrace/ptrace-tm.c
658
if (MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
670
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
673
return membuf_write(&to, &target->thread.tm_tar, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
685
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
689
&target->thread.tm_tar, 0, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
698
if (MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
711
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
714
return membuf_write(&to, &target->thread.tm_ppr, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
726
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
730
&target->thread.tm_ppr, 0, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
739
if (MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
751
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
754
return membuf_write(&to, &target->thread.tm_dscr, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
766
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-tm.c
770
&target->thread.tm_dscr, 0, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-tm.c
778
&target->thread.ckpt_regs.gpr[0]);
arch/powerpc/kernel/ptrace/ptrace-tm.c
787
&target->thread.ckpt_regs.gpr[0]);
arch/powerpc/kernel/ptrace/ptrace-tm.c
97
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
arch/powerpc/kernel/ptrace/ptrace-view.c
111
return task->thread.regs->msr | task->thread.fpexc_mode;
arch/powerpc/kernel/ptrace/ptrace-view.c
116
unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) |
arch/powerpc/kernel/ptrace/ptrace-view.c
118
regs_set_return_msr(task->thread.regs, newmsr);
arch/powerpc/kernel/ptrace/ptrace-view.c
125
*data = task->thread.dscr;
arch/powerpc/kernel/ptrace/ptrace-view.c
131
task->thread.dscr = dscr;
arch/powerpc/kernel/ptrace/ptrace-view.c
132
task->thread.dscr_inherit = 1;
arch/powerpc/kernel/ptrace/ptrace-view.c
153
set_trap(task->thread.regs, trap);
arch/powerpc/kernel/ptrace/ptrace-view.c
164
if (task->thread.regs == NULL || !data)
arch/powerpc/kernel/ptrace/ptrace-view.c
188
*data = ((unsigned long *)task->thread.regs)[regno];
arch/powerpc/kernel/ptrace/ptrace-view.c
200
if (task->thread.regs == NULL)
arch/powerpc/kernel/ptrace/ptrace-view.c
212
((unsigned long *)task->thread.regs)[regno] = data;
arch/powerpc/kernel/ptrace/ptrace-view.c
225
if (target->thread.regs == NULL)
arch/powerpc/kernel/ptrace/ptrace-view.c
228
membuf_write(&to, target->thread.regs, sizeof(struct user_pt_regs));
arch/powerpc/kernel/ptrace/ptrace-view.c
245
if (target->thread.regs == NULL)
arch/powerpc/kernel/ptrace/ptrace-view.c
249
target->thread.regs,
arch/powerpc/kernel/ptrace/ptrace-view.c
265
&target->thread.regs->orig_gpr3,
arch/powerpc/kernel/ptrace/ptrace-view.c
293
if (!target->thread.regs)
arch/powerpc/kernel/ptrace/ptrace-view.c
296
return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-view.c
303
if (!target->thread.regs)
arch/powerpc/kernel/ptrace/ptrace-view.c
307
&target->thread.regs->ppr, 0, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-view.c
313
return membuf_write(&to, &target->thread.dscr, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-view.c
320
&target->thread.dscr, 0, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-view.c
327
return membuf_write(&to, &target->thread.tar, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-view.c
334
&target->thread.tar, 0, sizeof(u64));
arch/powerpc/kernel/ptrace/ptrace-view.c
342
if (target->thread.used_ebb)
arch/powerpc/kernel/ptrace/ptrace-view.c
358
if (!target->thread.used_ebb)
arch/powerpc/kernel/ptrace/ptrace-view.c
361
return membuf_write(&to, &target->thread.ebbrr, 3 * sizeof(unsigned long));
arch/powerpc/kernel/ptrace/ptrace-view.c
377
if (target->thread.used_ebb)
arch/powerpc/kernel/ptrace/ptrace-view.c
380
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ebbrr,
arch/powerpc/kernel/ptrace/ptrace-view.c
385
&target->thread.ebbhr, sizeof(unsigned long),
arch/powerpc/kernel/ptrace/ptrace-view.c
390
&target->thread.bescr, 2 * sizeof(unsigned long),
arch/powerpc/kernel/ptrace/ptrace-view.c
415
return membuf_write(&to, &target->thread.siar, 5 * sizeof(unsigned long));
arch/powerpc/kernel/ptrace/ptrace-view.c
433
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.siar,
arch/powerpc/kernel/ptrace/ptrace-view.c
438
&target->thread.sdar, sizeof(unsigned long),
arch/powerpc/kernel/ptrace/ptrace-view.c
443
&target->thread.sier, 2 * sizeof(unsigned long),
arch/powerpc/kernel/ptrace/ptrace-view.c
448
&target->thread.mmcr2, 3 * sizeof(unsigned long),
arch/powerpc/kernel/ptrace/ptrace-view.c
453
&target->thread.mmcr0, 4 * sizeof(unsigned long),
arch/powerpc/kernel/ptrace/ptrace-view.c
472
membuf_store(&to, (u64)lower_32_bits(target->thread.dexcr));
arch/powerpc/kernel/ptrace/ptrace-view.c
496
return membuf_store(&to, target->thread.hashkeyr);
arch/powerpc/kernel/ptrace/ptrace-view.c
506
return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.hashkeyr,
arch/powerpc/kernel/ptrace/ptrace-view.c
528
membuf_store(&to, target->thread.regs->amr);
arch/powerpc/kernel/ptrace/ptrace-view.c
529
membuf_store(&to, target->thread.regs->iamr);
arch/powerpc/kernel/ptrace/ptrace-view.c
562
target->thread.regs->amr = (new_amr & default_uamor) |
arch/powerpc/kernel/ptrace/ptrace-view.c
563
(target->thread.regs->amr & ~default_uamor);
arch/powerpc/kernel/ptrace/ptrace-view.c
822
if (target->thread.regs == NULL)
arch/powerpc/kernel/ptrace/ptrace-view.c
826
&target->thread.regs->gpr[0]);
arch/powerpc/kernel/ptrace/ptrace-view.c
834
if (target->thread.regs == NULL)
arch/powerpc/kernel/ptrace/ptrace-view.c
838
&target->thread.regs->gpr[0]);
arch/powerpc/kernel/ptrace/ptrace-vsx.c
109
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/ptrace/ptrace-vsx.c
139
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/ptrace/ptrace-vsx.c
145
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
arch/powerpc/kernel/ptrace/ptrace-vsx.c
31
buf[i] = target->thread.TS_FPR(i);
arch/powerpc/kernel/ptrace/ptrace-vsx.c
32
buf[32] = target->thread.fp_state.fpscr;
arch/powerpc/kernel/ptrace/ptrace-vsx.c
59
buf[i] = target->thread.TS_FPR(i);
arch/powerpc/kernel/ptrace/ptrace-vsx.c
60
buf[32] = target->thread.fp_state.fpscr;
arch/powerpc/kernel/ptrace/ptrace-vsx.c
68
target->thread.TS_FPR(i) = buf[i];
arch/powerpc/kernel/ptrace/ptrace-vsx.c
69
target->thread.fp_state.fpscr = buf[32];
arch/powerpc/kernel/ptrace/ptrace-vsx.c
82
return target->thread.used_vsr ? regset->n : 0;
arch/powerpc/kernel/ptrace/ptrace.c
55
if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
arch/powerpc/kernel/ptrace/ptrace.c
76
if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
arch/powerpc/kernel/ptrace/ptrace32.c
138
tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0];
arch/powerpc/kernel/ptrace/ptrace32.c
197
((unsigned int *)child->thread.fp_state.fpr)
arch/powerpc/kernel/ptrace/ptrace32.c
240
tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0];
arch/powerpc/kernel/ptrace/ptrace32.c
257
ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
arch/powerpc/kernel/ptrace/ptrace32.c
260
(child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
arch/powerpc/kernel/ptrace/ptrace32.c
261
(child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
arch/powerpc/kernel/ptrace/ptrace32.c
97
tmp = ((unsigned int *)child->thread.fp_state.fpr)
arch/powerpc/kernel/setup_64.c
233
init_task.thread.fscr &= ~FSCR_SCV;
arch/powerpc/kernel/setup_64.c
239
init_task.thread.fscr &= ~FSCR_SCV;
arch/powerpc/kernel/signal.c
102
task->thread.TS_CKFPR(i) = buf[i];
arch/powerpc/kernel/signal.c
103
task->thread.ckfp_state.fpscr = buf[i];
arch/powerpc/kernel/signal.c
116
buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/signal.c
129
task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
arch/powerpc/kernel/signal.c
254
check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
arch/powerpc/kernel/signal.c
259
set_trap_norestart(tsk->thread.regs);
arch/powerpc/kernel/signal.c
272
if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type)
arch/powerpc/kernel/signal.c
273
__set_breakpoint(i, &tsk->thread.hw_brk[i]);
arch/powerpc/kernel/signal.c
278
thread_change_pc(tsk, tsk->thread.regs);
arch/powerpc/kernel/signal.c
280
rseq_signal_deliver(&ksig, tsk->thread.regs);
arch/powerpc/kernel/signal.c
291
set_trap_norestart(tsk->thread.regs);
arch/powerpc/kernel/signal.c
304
BUG_ON(regs != current->thread.regs);
arch/powerpc/kernel/signal.c
335
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal.c
34
buf[i] = task->thread.TS_FPR(i);
arch/powerpc/kernel/signal.c
345
ret = tsk->thread.ckpt_regs.gpr[1];
arch/powerpc/kernel/signal.c
35
buf[i] = task->thread.fp_state.fpscr;
arch/powerpc/kernel/signal.c
48
task->thread.TS_FPR(i) = buf[i];
arch/powerpc/kernel/signal.c
49
task->thread.fp_state.fpscr = buf[i];
arch/powerpc/kernel/signal.c
62
buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
arch/powerpc/kernel/signal.c
75
task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
arch/powerpc/kernel/signal.c
88
buf[i] = task->thread.TS_CKFPR(i);
arch/powerpc/kernel/signal.c
89
buf[i] = task->thread.ckfp_state.fpscr;
arch/powerpc/kernel/signal.h
105
unsafe_put_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \
arch/powerpc/kernel/signal.h
115
unsafe_get_user(__t->thread.TS_CKFPR(i), &buf[i], label);\
arch/powerpc/kernel/signal.h
116
unsafe_get_user(__t->thread.ckfp_state.fpscr, &buf[i], failed); \
arch/powerpc/kernel/signal.h
125
unsafe_get_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \
arch/powerpc/kernel/signal.h
132
unsafe_copy_to_user(to, (task)->thread.fp_state.fpr, \
arch/powerpc/kernel/signal.h
136
unsafe_copy_from_user((task)->thread.fp_state.fpr, from, \
arch/powerpc/kernel/signal.h
142
return __copy_to_user(to, task->thread.fp_state.fpr,
arch/powerpc/kernel/signal.h
149
return __copy_from_user(task->thread.fp_state.fpr, from,
arch/powerpc/kernel/signal.h
155
unsafe_copy_to_user(to, (task)->thread.ckfp_state.fpr, \
arch/powerpc/kernel/signal.h
160
return __copy_to_user(to, task->thread.ckfp_state.fpr,
arch/powerpc/kernel/signal.h
167
return __copy_from_user(task->thread.ckfp_state.fpr, from,
arch/powerpc/kernel/signal.h
54
unsafe_put_user(__t->thread.TS_FPR(i), &buf[i], label); \
arch/powerpc/kernel/signal.h
55
unsafe_put_user(__t->thread.fp_state.fpscr, &buf[i], label); \
arch/powerpc/kernel/signal.h
64
unsafe_put_user(__t->thread.fp_state.fpr[i][TS_VSRLOWOFFSET], \
arch/powerpc/kernel/signal.h
74
unsafe_get_user(__t->thread.TS_FPR(i), &buf[i], label); \
arch/powerpc/kernel/signal.h
75
unsafe_get_user(__t->thread.fp_state.fpscr, &buf[i], label); \
arch/powerpc/kernel/signal.h
84
unsafe_get_user(__t->thread.fp_state.fpr[i][TS_VSRLOWOFFSET], \
arch/powerpc/kernel/signal.h
95
unsafe_put_user(__t->thread.TS_CKFPR(i), &buf[i], label);\
arch/powerpc/kernel/signal.h
96
unsafe_put_user(__t->thread.ckfp_state.fpscr, &buf[i], label); \
arch/powerpc/kernel/signal_32.c
1201
unsigned long new_dbcr0 = current->thread.debug.dbcr0;
arch/powerpc/kernel/signal_32.c
1216
current->thread.debug.dbcr1)) {
arch/powerpc/kernel/signal_32.c
1251
current->thread.debug.dbcr0 = new_dbcr0;
arch/powerpc/kernel/signal_32.c
252
if (current->thread.used_vr)
arch/powerpc/kernel/signal_32.c
255
current->thread.vrsave = mfspr(SPRN_VRSAVE);
arch/powerpc/kernel/signal_32.c
258
if (current->thread.used_vsr && ctx_has_vsx_region)
arch/powerpc/kernel/signal_32.c
262
if (current->thread.used_spe)
arch/powerpc/kernel/signal_32.c
278
if (current->thread.used_vr) {
arch/powerpc/kernel/signal_32.c
279
unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
arch/powerpc/kernel/signal_32.c
293
unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
arch/powerpc/kernel/signal_32.c
310
if (current->thread.used_vsr && ctx_has_vsx_region) {
arch/powerpc/kernel/signal_32.c
317
if (current->thread.used_spe) {
arch/powerpc/kernel/signal_32.c
318
unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
arch/powerpc/kernel/signal_32.c
327
unsafe_put_user(current->thread.spefscr,
arch/powerpc/kernel/signal_32.c
365
current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
arch/powerpc/kernel/signal_32.c
373
unsafe_save_general_regs(¤t->thread.ckpt_regs, frame, failed);
arch/powerpc/kernel/signal_32.c
385
if (current->thread.used_vr) {
arch/powerpc/kernel/signal_32.c
386
unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
arch/powerpc/kernel/signal_32.c
390
¤t->thread.vr_state,
arch/powerpc/kernel/signal_32.c
394
¤t->thread.ckvr_state,
arch/powerpc/kernel/signal_32.c
408
unsafe_put_user(current->thread.ckvrsave,
arch/powerpc/kernel/signal_32.c
411
unsafe_put_user(current->thread.vrsave,
arch/powerpc/kernel/signal_32.c
414
unsafe_put_user(current->thread.ckvrsave,
arch/powerpc/kernel/signal_32.c
429
if (current->thread.used_vsr) {
arch/powerpc/kernel/signal_32.c
501
unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
arch/powerpc/kernel/signal_32.c
503
current->thread.used_vr = true;
arch/powerpc/kernel/signal_32.c
504
} else if (current->thread.used_vr)
arch/powerpc/kernel/signal_32.c
505
memset(¤t->thread.vr_state, 0,
arch/powerpc/kernel/signal_32.c
509
unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
arch/powerpc/kernel/signal_32.c
511
mtspr(SPRN_VRSAVE, current->thread.vrsave);
arch/powerpc/kernel/signal_32.c
527
current->thread.used_vsr = true;
arch/powerpc/kernel/signal_32.c
528
} else if (current->thread.used_vsr)
arch/powerpc/kernel/signal_32.c
530
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
arch/powerpc/kernel/signal_32.c
544
BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
arch/powerpc/kernel/signal_32.c
548
unsafe_copy_from_user(¤t->thread.spe, &sr->mc_vregs,
arch/powerpc/kernel/signal_32.c
549
sizeof(current->thread.spe), failed);
arch/powerpc/kernel/signal_32.c
550
current->thread.used_spe = true;
arch/powerpc/kernel/signal_32.c
551
} else if (current->thread.used_spe)
arch/powerpc/kernel/signal_32.c
552
memset(¤t->thread.spe, 0, sizeof(current->thread.spe));
arch/powerpc/kernel/signal_32.c
555
unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
arch/powerpc/kernel/signal_32.c
591
unsafe_restore_general_regs(¤t->thread.ckpt_regs, sr, failed);
arch/powerpc/kernel/signal_32.c
592
unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
arch/powerpc/kernel/signal_32.c
601
unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
arch/powerpc/kernel/signal_32.c
603
current->thread.used_vr = true;
arch/powerpc/kernel/signal_32.c
604
} else if (current->thread.used_vr) {
arch/powerpc/kernel/signal_32.c
605
memset(¤t->thread.vr_state, 0,
arch/powerpc/kernel/signal_32.c
607
memset(¤t->thread.ckvr_state, 0,
arch/powerpc/kernel/signal_32.c
612
unsafe_get_user(current->thread.ckvrsave,
arch/powerpc/kernel/signal_32.c
615
mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
arch/powerpc/kernel/signal_32.c
628
current->thread.used_vsr = true;
arch/powerpc/kernel/signal_32.c
629
} else if (current->thread.used_vsr)
arch/powerpc/kernel/signal_32.c
631
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
arch/powerpc/kernel/signal_32.c
632
current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
arch/powerpc/kernel/signal_32.c
644
unsafe_copy_from_user(¤t->thread.vr_state, &tm_sr->mc_vregs,
arch/powerpc/kernel/signal_32.c
648
unsafe_get_user(current->thread.vrsave,
arch/powerpc/kernel/signal_32.c
659
current->thread.used_vsr = true;
arch/powerpc/kernel/signal_32.c
695
current->thread.tm_texasr |= TEXASR_FS;
arch/powerpc/kernel/signal_32.c
697
tm_recheckpoint(¤t->thread);
arch/powerpc/kernel/signal_32.c
702
load_fp_state(¤t->thread.fp_state);
arch/powerpc/kernel/signal_32.c
703
regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
arch/powerpc/kernel/signal_32.c
706
load_vr_state(¤t->thread.vr_state);
arch/powerpc/kernel/signal_32.c
744
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal_32.c
803
tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
arch/powerpc/kernel/signal_32.c
844
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal_32.c
894
tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
arch/powerpc/kernel/signal_64.c
100
if (tsk->thread.used_vsr)
arch/powerpc/kernel/signal_64.c
129
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal_64.c
140
if (tsk->thread.used_vr) {
arch/powerpc/kernel/signal_64.c
142
unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
arch/powerpc/kernel/signal_64.c
152
unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
arch/powerpc/kernel/signal_64.c
170
if (tsk->thread.used_vsr && ctx_has_vsx_region) {
arch/powerpc/kernel/signal_64.c
224
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal_64.c
237
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
arch/powerpc/kernel/signal_64.c
244
if (tsk->thread.used_vr) {
arch/powerpc/kernel/signal_64.c
246
err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
arch/powerpc/kernel/signal_64.c
253
&tsk->thread.vr_state,
arch/powerpc/kernel/signal_64.c
257
&tsk->thread.ckvr_state,
arch/powerpc/kernel/signal_64.c
269
tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
arch/powerpc/kernel/signal_64.c
270
err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
arch/powerpc/kernel/signal_64.c
272
err |= __put_user(tsk->thread.vrsave,
arch/powerpc/kernel/signal_64.c
275
err |= __put_user(tsk->thread.ckvrsave,
arch/powerpc/kernel/signal_64.c
296
if (tsk->thread.used_vsr) {
arch/powerpc/kernel/signal_64.c
318
&tsk->thread.ckpt_regs, GP_REGS_SIZE);
arch/powerpc/kernel/signal_64.c
345
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal_64.c
395
unsafe_copy_from_user(&tsk->thread.vr_state, v_regs,
arch/powerpc/kernel/signal_64.c
397
tsk->thread.used_vr = true;
arch/powerpc/kernel/signal_64.c
398
} else if (tsk->thread.used_vr) {
arch/powerpc/kernel/signal_64.c
399
memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
arch/powerpc/kernel/signal_64.c
403
unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
arch/powerpc/kernel/signal_64.c
405
tsk->thread.vrsave = 0;
arch/powerpc/kernel/signal_64.c
407
mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
arch/powerpc/kernel/signal_64.c
420
tsk->thread.used_vsr = true;
arch/powerpc/kernel/signal_64.c
423
tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
arch/powerpc/kernel/signal_64.c
446
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal_64.c
458
err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
arch/powerpc/kernel/signal_64.c
470
err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
arch/powerpc/kernel/signal_64.c
486
err |= __get_user(tsk->thread.ckpt_regs.ctr,
arch/powerpc/kernel/signal_64.c
488
err |= __get_user(tsk->thread.ckpt_regs.link,
arch/powerpc/kernel/signal_64.c
490
err |= __get_user(tsk->thread.ckpt_regs.xer,
arch/powerpc/kernel/signal_64.c
492
err |= __get_user(tsk->thread.ckpt_regs.ccr,
arch/powerpc/kernel/signal_64.c
519
err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
arch/powerpc/kernel/signal_64.c
521
err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
arch/powerpc/kernel/signal_64.c
523
current->thread.used_vr = true;
arch/powerpc/kernel/signal_64.c
525
else if (tsk->thread.used_vr) {
arch/powerpc/kernel/signal_64.c
526
memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
arch/powerpc/kernel/signal_64.c
527
memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
arch/powerpc/kernel/signal_64.c
531
err |= __get_user(tsk->thread.ckvrsave,
arch/powerpc/kernel/signal_64.c
533
err |= __get_user(tsk->thread.vrsave,
arch/powerpc/kernel/signal_64.c
537
tsk->thread.vrsave = 0;
arch/powerpc/kernel/signal_64.c
538
tsk->thread.ckvrsave = 0;
arch/powerpc/kernel/signal_64.c
541
mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
arch/powerpc/kernel/signal_64.c
557
tsk->thread.used_vsr = true;
arch/powerpc/kernel/signal_64.c
560
tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
arch/powerpc/kernel/signal_64.c
561
tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
arch/powerpc/kernel/signal_64.c
567
tsk->thread.tm_texasr |= TEXASR_FS;
arch/powerpc/kernel/signal_64.c
597
tm_recheckpoint(&tsk->thread);
arch/powerpc/kernel/signal_64.c
601
load_fp_state(&tsk->thread.fp_state);
arch/powerpc/kernel/signal_64.c
602
regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
arch/powerpc/kernel/signal_64.c
605
load_vr_state(&tsk->thread.vr_state);
arch/powerpc/kernel/signal_64.c
830
regs_set_return_msr(current->thread.regs,
arch/powerpc/kernel/signal_64.c
831
current->thread.regs->msr & ~MSR_TS_MASK);
arch/powerpc/kernel/signal_64.c
863
struct pt_regs *regs = tsk->thread.regs;
arch/powerpc/kernel/signal_64.c
91
if (tsk->thread.used_vr)
arch/powerpc/kernel/signal_64.c
921
tsk->thread.fp_state.fpscr = 0;
arch/powerpc/kernel/signal_64.c
94
tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
arch/powerpc/kernel/stacktrace.c
41
sp = task->thread.ksp;
arch/powerpc/kernel/stacktrace.c
87
sp = task->thread.ksp;
arch/powerpc/kernel/syscalls.c
115
regs_set_return_msr(current->thread.regs,
arch/powerpc/kernel/syscalls.c
116
current->thread.regs->msr ^ MSR_LE);
arch/powerpc/kernel/sysfs.c
161
if (!current->thread.dscr_inherit) {
arch/powerpc/kernel/sysfs.c
162
current->thread.dscr = *(unsigned long *)val;
arch/powerpc/kernel/traps.c
1202
code = __parse_fpscr(current->thread.fp_state.fpscr);
arch/powerpc/kernel/traps.c
1431
current->thread.dscr = regs->gpr[rd];
arch/powerpc/kernel/traps.c
1432
current->thread.dscr_inherit = 1;
arch/powerpc/kernel/traps.c
1433
mtspr(SPRN_DSCR, current->thread.dscr);
arch/powerpc/kernel/traps.c
1463
code = __parse_fpscr(current->thread.fp_state.fpscr);
arch/powerpc/kernel/traps.c
1658
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
arch/powerpc/kernel/traps.c
1728
current->thread.load_tm++;
arch/powerpc/kernel/traps.c
1731
tm_restore_sprs(¤t->thread);
arch/powerpc/kernel/traps.c
1808
current->thread.dscr = regs->gpr[rd];
arch/powerpc/kernel/traps.c
1809
current->thread.dscr_inherit = 1;
arch/powerpc/kernel/traps.c
1810
current->thread.fscr |= FSCR_DSCR;
arch/powerpc/kernel/traps.c
1811
mtspr(SPRN_FSCR, current->thread.fscr);
arch/powerpc/kernel/traps.c
1883
current->thread.load_fp = 1;
arch/powerpc/kernel/traps.c
1888
tm_recheckpoint(¤t->thread);
arch/powerpc/kernel/traps.c
1901
current->thread.load_vec = 1;
arch/powerpc/kernel/traps.c
1902
tm_recheckpoint(¤t->thread);
arch/powerpc/kernel/traps.c
1903
current->thread.used_vr = 1;
arch/powerpc/kernel/traps.c
1919
current->thread.used_vsr = 1;
arch/powerpc/kernel/traps.c
1924
current->thread.load_vec = 1;
arch/powerpc/kernel/traps.c
1925
current->thread.load_fp = 1;
arch/powerpc/kernel/traps.c
1927
tm_recheckpoint(¤t->thread);
arch/powerpc/kernel/traps.c
1978
current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
arch/powerpc/kernel/traps.c
1989
current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
arch/powerpc/kernel/traps.c
1995
current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
arch/powerpc/kernel/traps.c
2000
current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
arch/powerpc/kernel/traps.c
2006
current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
arch/powerpc/kernel/traps.c
2016
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
arch/powerpc/kernel/traps.c
2017
current->thread.debug.dbcr1))
arch/powerpc/kernel/traps.c
2021
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
arch/powerpc/kernel/traps.c
2024
mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
arch/powerpc/kernel/traps.c
2031
current->thread.debug.dbsr = debug_status;
arch/powerpc/kernel/traps.c
2048
current->thread.debug.dbcr0 &= ~DBCR0_BT;
arch/powerpc/kernel/traps.c
2049
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
arch/powerpc/kernel/traps.c
2083
current->thread.debug.dbcr0 &= ~DBCR0_IC;
arch/powerpc/kernel/traps.c
2084
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
arch/powerpc/kernel/traps.c
2085
current->thread.debug.dbcr1))
arch/powerpc/kernel/traps.c
2089
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
arch/powerpc/kernel/traps.c
2127
current->thread.vr_state.vscr.u[3] |= 0x10000;
arch/powerpc/kernel/traps.c
2159
spefscr = current->thread.spefscr;
arch/powerpc/kernel/traps.c
2160
fpexc_mode = current->thread.fpexc_mode;
arch/powerpc/kernel/traps.c
354
current->thread.trap_nr = code;
arch/powerpc/kernel/traps.c
581
#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
arch/powerpc/kernel/traps.c
582
#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
arch/powerpc/kernel/traps.c
925
vdst = (u8 *)¤t->thread.vr_state.vr[t];
arch/powerpc/kernel/traps.c
927
vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
arch/powerpc/kernel/uprobes.c
116
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
arch/powerpc/kernel/uprobes.c
118
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/powerpc/kernel/uprobes.c
172
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/powerpc/kernel/uprobes.c
68
autask->saved_trap_nr = current->thread.trap_nr;
arch/powerpc/kernel/uprobes.c
69
current->thread.trap_nr = UPROBE_TRAP_NR;
arch/powerpc/kernel/uprobes.c
99
if (t->thread.trap_nr != UPROBE_TRAP_NR)
arch/powerpc/kernel/vecemu.c
280
vrs = current->thread.vr_state.vr;
arch/powerpc/kernel/vecemu.c
329
¤t->thread.vr_state.vscr.u[3]);
arch/powerpc/kernel/vecemu.c
334
¤t->thread.vr_state.vscr.u[3]);
arch/powerpc/kvm/book3s_hv.c
5111
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
arch/powerpc/kvm/book3s_hv.c
5112
(current->thread.regs->msr & MSR_TM)) {
arch/powerpc/kvm/book3s_hv.c
5113
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
arch/powerpc/kvm/book3s_hv_p9_entry.c
169
current->thread.tidr != vcpu->arch.tid)
arch/powerpc/kvm/book3s_hv_p9_entry.c
170
mtspr(SPRN_TIDR, current->thread.tidr);
arch/powerpc/kvm/book3s_hv_p9_entry.c
177
if (current->thread.fscr != vcpu->arch.fscr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
178
mtspr(SPRN_FSCR, current->thread.fscr);
arch/powerpc/kvm/book3s_hv_p9_entry.c
179
if (current->thread.dscr != vcpu->arch.dscr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
18
current->thread.vrsave != vcpu->arch.vrsave)
arch/powerpc/kvm/book3s_hv_p9_entry.c
180
mtspr(SPRN_DSCR, current->thread.dscr);
arch/powerpc/kvm/book3s_hv_p9_entry.c
190
vcpu->arch.vrsave != current->thread.vrsave)
arch/powerpc/kvm/book3s_hv_p9_entry.c
191
mtspr(SPRN_VRSAVE, current->thread.vrsave);
arch/powerpc/kvm/book3s_hv_p9_entry.c
194
if (vcpu->arch.bescr != current->thread.bescr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
195
mtspr(SPRN_BESCR, current->thread.bescr);
arch/powerpc/kvm/book3s_hv_p9_entry.c
196
if (vcpu->arch.ebbhr != current->thread.ebbhr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
197
mtspr(SPRN_EBBHR, current->thread.ebbhr);
arch/powerpc/kvm/book3s_hv_p9_entry.c
198
if (vcpu->arch.ebbrr != current->thread.ebbrr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
199
mtspr(SPRN_EBBRR, current->thread.ebbrr);
arch/powerpc/kvm/book3s_hv_p9_entry.c
214
if (vcpu->arch.tar != current->thread.tar)
arch/powerpc/kvm/book3s_hv_p9_entry.c
215
mtspr(SPRN_TAR, current->thread.tar);
arch/powerpc/kvm/book3s_hv_p9_entry.c
23
if (current->thread.ebbhr != vcpu->arch.ebbhr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
25
if (current->thread.ebbrr != vcpu->arch.ebbrr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
27
if (current->thread.bescr != vcpu->arch.bescr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
32
current->thread.tidr != vcpu->arch.tid)
arch/powerpc/kvm/book3s_hv_p9_entry.c
40
if (current->thread.fscr != vcpu->arch.fscr)
arch/powerpc/kvm/book3s_hv_p9_entry.c
42
if (current->thread.dscr != vcpu->arch.dscr)
arch/powerpc/kvm/book3s_pr.c
1012
current->thread.tar = mfspr(SPRN_TAR);
arch/powerpc/kvm/book3s_pr.c
145
if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
arch/powerpc/kvm/book3s_pr.c
152
current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
arch/powerpc/kvm/book3s_pr.c
176
if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
arch/powerpc/kvm/book3s_pr.c
799
struct thread_struct *t = ¤t->thread;
arch/powerpc/kvm/book3s_pr.c
829
if (current->thread.regs->msr & MSR_VEC)
arch/powerpc/kvm/book3s_pr.c
851
mtspr(SPRN_TAR, current->thread.tar);
arch/powerpc/kvm/book3s_pr.c
862
struct thread_struct *t = ¤t->thread;
arch/powerpc/kvm/book3s_pr.c
934
lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
arch/powerpc/kvm/book3s_pr.c
954
current->thread.regs->msr |= lost_ext;
arch/powerpc/kvm/booke.c
160
if (!(current->thread.regs->msr & MSR_FP)) {
arch/powerpc/kvm/booke.c
164
current->thread.fp_save_area = &vcpu->arch.fp;
arch/powerpc/kvm/booke.c
165
current->thread.regs->msr |= MSR_FP;
arch/powerpc/kvm/booke.c
177
if (current->thread.regs->msr & MSR_FP)
arch/powerpc/kvm/booke.c
179
current->thread.fp_save_area = NULL;
arch/powerpc/kvm/booke.c
202
if (!(current->thread.regs->msr & MSR_VEC)) {
arch/powerpc/kvm/booke.c
206
current->thread.vr_save_area = &vcpu->arch.vr;
arch/powerpc/kvm/booke.c
207
current->thread.regs->msr |= MSR_VEC;
arch/powerpc/kvm/booke.c
2112
current->thread.kvm_vcpu = vcpu;
arch/powerpc/kvm/booke.c
2117
current->thread.kvm_vcpu = NULL;
arch/powerpc/kvm/booke.c
221
if (current->thread.regs->msr & MSR_VEC)
arch/powerpc/kvm/booke.c
223
current->thread.vr_save_area = NULL;
arch/powerpc/kvm/booke.c
802
debug = current->thread.debug;
arch/powerpc/kvm/booke.c
803
current->thread.debug = vcpu->arch.dbg_reg;
arch/powerpc/kvm/booke.c
815
current->thread.debug = debug;
arch/powerpc/kvm/booke_emulate.c
373
current->thread.debug = vcpu->arch.dbg_reg;
arch/powerpc/kvm/mpic.c
116
struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
arch/powerpc/lib/sstep.c
1029
buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
arch/powerpc/lib/sstep.c
1030
buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
arch/powerpc/lib/sstep.c
1042
buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
arch/powerpc/lib/sstep.c
614
current->thread.TS_FPR(rn) = u.l[0];
arch/powerpc/lib/sstep.c
621
current->thread.TS_FPR(rn) = u.l[1];
arch/powerpc/lib/sstep.c
650
u.l[0] = current->thread.TS_FPR(rn);
arch/powerpc/lib/sstep.c
662
u.l[1] = current->thread.TS_FPR(rn);
arch/powerpc/lib/sstep.c
703
current->thread.vr_state.vr[rn] = u.v;
arch/powerpc/lib/sstep.c
729
u.v = current->thread.vr_state.vr[rn];
arch/powerpc/lib/sstep.c
983
current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
arch/powerpc/lib/sstep.c
984
current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
arch/powerpc/lib/sstep.c
996
current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
arch/powerpc/math-emu/math.c
332
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
333
op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f);
arch/powerpc/math-emu/math.c
334
op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f);
arch/powerpc/math-emu/math.c
338
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
339
op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f);
arch/powerpc/math-emu/math.c
340
op2 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f);
arch/powerpc/math-emu/math.c
344
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
345
op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f);
arch/powerpc/math-emu/math.c
346
op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f);
arch/powerpc/math-emu/math.c
347
op3 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f);
arch/powerpc/math-emu/math.c
353
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
363
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
368
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
372
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
373
op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f);
arch/powerpc/math-emu/math.c
377
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
378
op1 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f);
arch/powerpc/math-emu/math.c
383
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
392
op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f);
arch/powerpc/math-emu/math.c
400
op2 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f);
arch/powerpc/math-emu/math.c
401
op3 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f);
arch/powerpc/math-emu/math.c
421
op1 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f);
arch/powerpc/math-emu/math_efp.c
200
vc.wp[0] = current->thread.evr[fc];
arch/powerpc/math-emu/math_efp.c
202
va.wp[0] = current->thread.evr[fa];
arch/powerpc/math-emu/math_efp.c
204
vb.wp[0] = current->thread.evr[fb];
arch/powerpc/math-emu/math_efp.c
680
&= ~(FP_EX_INVALID | FP_EX_UNDERFLOW) | current->thread.spefscr_last;
arch/powerpc/math-emu/math_efp.c
683
current->thread.spefscr_last = __FPU_FPSCR;
arch/powerpc/math-emu/math_efp.c
685
current->thread.evr[fc] = vc.wp[0];
arch/powerpc/math-emu/math_efp.c
695
if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
arch/powerpc/math-emu/math_efp.c
697
&& (current->thread.fpexc_mode & PR_FP_EXC_DIV))
arch/powerpc/math-emu/math_efp.c
700
&& (current->thread.fpexc_mode & PR_FP_EXC_OVF))
arch/powerpc/math-emu/math_efp.c
703
&& (current->thread.fpexc_mode & PR_FP_EXC_UND))
arch/powerpc/math-emu/math_efp.c
706
&& (current->thread.fpexc_mode & PR_FP_EXC_RES))
arch/powerpc/math-emu/math_efp.c
709
&& (current->thread.fpexc_mode & PR_FP_EXC_INV))
arch/powerpc/math-emu/math_efp.c
756
s_hi = current->thread.evr[fc] & SIGN_BIT_S;
arch/powerpc/math-emu/math_efp.c
757
fgpr.wp[0] = current->thread.evr[fc];
arch/powerpc/math-emu/math_efp.c
802
s_hi = current->thread.evr[fb] & SIGN_BIT_S;
arch/powerpc/math-emu/math_efp.c
811
s_hi = current->thread.evr[fb] & SIGN_BIT_S;
arch/powerpc/math-emu/math_efp.c
883
current->thread.evr[fc] = fgpr.wp[0];
arch/powerpc/math-emu/math_efp.c
888
if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
arch/powerpc/math-emu/math_efp.c
889
return (current->thread.fpexc_mode & PR_FP_EXC_RES) ? 1 : 0;
arch/powerpc/mm/book3s32/kuap.c
12
current->thread.sr0 |= SR_KS;
arch/powerpc/mm/book3s32/mmu.c
333
if (!current->thread.regs)
arch/powerpc/mm/book3s32/mmu.c
337
if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
arch/powerpc/mm/book3s64/hash_utils.c
2144
trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
arch/powerpc/mm/book3s64/hash_utils.c
2170
if (local && cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
arch/powerpc/mm/book3s64/hash_utils.c
2171
MSR_TM_ACTIVE(current->thread.regs->msr)) {
arch/powerpc/mm/book3s64/pkeys.c
320
current->thread.regs->amr = old_amr | new_amr_bits;
arch/powerpc/mm/book3s64/pkeys.c
331
current->thread.regs->iamr = old_iamr | new_iamr_bits;
arch/powerpc/mm/book3s64/slb.c
436
tsk->thread.load_slb++;
arch/powerpc/mm/book3s64/slb.c
437
if (!tsk->thread.load_slb) {
arch/powerpc/mm/fault.c
146
current->thread.trap_nr = BUS_ADRERR;
arch/powerpc/mm/mmu_context.c
20
tsk->thread.pgdir = mm->pgd;
arch/powerpc/mm/mmu_context.c
22
tsk->thread.sr0 = mm->context.sr0;
arch/powerpc/mm/mmu_context.c
25
tsk->thread.pid = mm->context.id;
arch/powerpc/mm/mmu_context.c
35
tsk->thread.pid = mm->context.id;
arch/powerpc/mm/nohash/mmu_context.c
299
tsk->thread.pid = id;
arch/powerpc/mm/pgtable.c
42
return current->thread.regs && TRAP(current->thread.regs) == 0x400;
arch/powerpc/perf/core-book3s.c
614
if (!is_ebb_event(event) || current->thread.used_ebb)
arch/powerpc/perf/core-book3s.c
623
current->thread.used_ebb = 1;
arch/powerpc/perf/core-book3s.c
624
current->thread.mmcr0 |= MMCR0_PMXE;
arch/powerpc/perf/core-book3s.c
632
current->thread.siar = mfspr(SPRN_SIAR);
arch/powerpc/perf/core-book3s.c
633
current->thread.sier = mfspr(SPRN_SIER);
arch/powerpc/perf/core-book3s.c
634
current->thread.sdar = mfspr(SPRN_SDAR);
arch/powerpc/perf/core-book3s.c
635
current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
arch/powerpc/perf/core-book3s.c
636
current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
arch/powerpc/perf/core-book3s.c
638
current->thread.mmcr3 = mfspr(SPRN_MMCR3);
arch/powerpc/perf/core-book3s.c
639
current->thread.sier2 = mfspr(SPRN_SIER2);
arch/powerpc/perf/core-book3s.c
640
current->thread.sier3 = mfspr(SPRN_SIER3);
arch/powerpc/perf/core-book3s.c
659
mmcr0 |= current->thread.mmcr0;
arch/powerpc/perf/core-book3s.c
666
if (!(current->thread.mmcr0 & MMCR0_PMXE))
arch/powerpc/perf/core-book3s.c
669
mtspr(SPRN_SIAR, current->thread.siar);
arch/powerpc/perf/core-book3s.c
670
mtspr(SPRN_SIER, current->thread.sier);
arch/powerpc/perf/core-book3s.c
671
mtspr(SPRN_SDAR, current->thread.sdar);
arch/powerpc/perf/core-book3s.c
680
mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2);
arch/powerpc/perf/core-book3s.c
683
mtspr(SPRN_MMCR3, current->thread.mmcr3);
arch/powerpc/perf/core-book3s.c
684
mtspr(SPRN_SIER2, current->thread.sier2);
arch/powerpc/perf/core-book3s.c
685
mtspr(SPRN_SIER3, current->thread.sier3);
arch/powerpc/perf/core-book3s.c
731
if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO)
arch/powerpc/perf/core-book3s.c
735
if (ebb && !(current->thread.bescr & BESCR_GE))
arch/powerpc/platforms/book3s/vas-api.c
361
struct pt_regs *regs = current->thread.regs;
arch/powerpc/platforms/powernv/idle.c
259
unsigned long thread = 1UL << cpu_thread_in_core(cpu);
arch/powerpc/platforms/powernv/idle.c
266
BUG_ON(s & thread);
arch/powerpc/platforms/powernv/idle.c
269
new = s | thread;
arch/powerpc/platforms/powernv/idle.c
319
unsigned long thread = 1UL << cpu_thread_in_core(cpu);
arch/powerpc/platforms/powernv/idle.c
330
BUG_ON(!(*state & thread));
arch/powerpc/platforms/powernv/idle.c
331
*state &= ~thread;
arch/powerpc/platforms/powernv/idle.c
414
*state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
arch/powerpc/platforms/powernv/idle.c
430
if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
arch/powerpc/platforms/powernv/idle.c
431
*state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
arch/powerpc/platforms/powernv/idle.c
437
WARN_ON(*state & thread);
arch/powerpc/platforms/pseries/hotplug-cpu.c
301
u32 thread;
arch/powerpc/platforms/pseries/hotplug-cpu.c
311
thread = be32_to_cpu(intserv[i]);
arch/powerpc/platforms/pseries/hotplug-cpu.c
313
if (get_hard_smp_processor_id(cpu) != thread)
arch/powerpc/platforms/pseries/hotplug-cpu.c
323
"with physical id 0x%x\n", thread);
arch/powerpc/platforms/pseries/hotplug-cpu.c
334
u32 thread;
arch/powerpc/platforms/pseries/hotplug-cpu.c
344
thread = be32_to_cpu(intserv[i]);
arch/powerpc/platforms/pseries/hotplug-cpu.c
346
if (get_hard_smp_processor_id(cpu) != thread)
arch/powerpc/platforms/pseries/hotplug-cpu.c
374
thread);
arch/powerpc/platforms/pseries/hotplug-cpu.c
390
u32 thread;
arch/powerpc/platforms/pseries/hotplug-cpu.c
400
thread = be32_to_cpu(intserv[i]);
arch/powerpc/platforms/pseries/hotplug-cpu.c
402
if (get_hard_smp_processor_id(cpu) != thread)
arch/powerpc/platforms/pseries/hotplug-cpu.c
425
"with physical id 0x%x\n", thread);
arch/powerpc/sysdev/fsl_rcpm.c
137
int thread = cpu_thread_in_core(cpu);
arch/powerpc/sysdev/fsl_rcpm.c
139
book3e_stop_thread(thread);
arch/powerpc/xmon/xmon.c
3262
tsk->thread.ksp, tsk->thread.regs,
arch/riscv/include/asm/compat.h
22
static inline int is_compat_thread(struct thread_info *thread)
arch/riscv/include/asm/compat.h
27
return test_ti_thread_flag(thread, TIF_32BIT);
arch/riscv/include/asm/entry-common.h
19
riscv_v_vstate_restore(¤t->thread.vstate, regs);
arch/riscv/include/asm/simd.h
43
if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) && current->thread.kernel_vstate.datap) {
arch/riscv/include/asm/switch_to.h
108
#define __set_prev_cpu(thread) ((thread).prev_cpu = smp_processor_id())
arch/riscv/include/asm/switch_to.h
110
#define __set_prev_cpu(thread)
arch/riscv/include/asm/switch_to.h
117
__set_prev_cpu(__prev->thread); \
arch/riscv/include/asm/switch_to.h
78
envcfg = (task->thread.envcfg & ~mask) | val;
arch/riscv/include/asm/switch_to.h
79
task->thread.envcfg = envcfg;
arch/riscv/include/asm/switch_to.h
88
:: "r" (next->thread.envcfg) : "memory");
arch/riscv/include/asm/switch_to.h
98
bool stale_thread = task->thread.force_icache_flush;
arch/riscv/include/asm/switch_to.h
99
bool thread_migrated = smp_processor_id() != task->thread.prev_cpu;
arch/riscv/include/asm/vector.h
336
return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY);
arch/riscv/include/asm/vector.h
341
return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE);
arch/riscv/include/asm/vector.h
347
task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY;
arch/riscv/include/asm/vector.h
353
task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE;
arch/riscv/include/asm/vector.h
358
return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V);
arch/riscv/include/asm/vector.h
376
WARN_ON(prev->thread.riscv_v_flags & RISCV_V_CTX_DEPTH_MASK);
arch/riscv/include/asm/vector.h
378
prev->thread.riscv_v_flags |= RISCV_PREEMPT_V_IN_SCHEDULE;
arch/riscv/include/asm/vector.h
381
__riscv_v_vstate_save(&prev->thread.kernel_vstate,
arch/riscv/include/asm/vector.h
382
prev->thread.kernel_vstate.datap);
arch/riscv/include/asm/vector.h
387
riscv_v_vstate_save(&prev->thread.vstate, regs);
arch/riscv/include/asm/vector.h
391
if (next->thread.riscv_v_flags & RISCV_PREEMPT_V_IN_SCHEDULE) {
arch/riscv/include/asm/vector.h
392
next->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_IN_SCHEDULE;
arch/riscv/include/asm/vector.h
58
return READ_ONCE(current->thread.riscv_v_flags);
arch/riscv/kernel/asm-offsets.c
24
OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
arch/riscv/kernel/asm-offsets.c
25
OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
arch/riscv/kernel/asm-offsets.c
26
OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
arch/riscv/kernel/asm-offsets.c
27
OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
arch/riscv/kernel/asm-offsets.c
28
OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
arch/riscv/kernel/asm-offsets.c
29
OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
arch/riscv/kernel/asm-offsets.c
30
OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
arch/riscv/kernel/asm-offsets.c
300
offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
301
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
304
offsetof(struct task_struct, thread.sp)
arch/riscv/kernel/asm-offsets.c
305
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
308
offsetof(struct task_struct, thread.s[0])
arch/riscv/kernel/asm-offsets.c
309
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
31
OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
arch/riscv/kernel/asm-offsets.c
312
offsetof(struct task_struct, thread.s[1])
arch/riscv/kernel/asm-offsets.c
313
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
316
offsetof(struct task_struct, thread.s[2])
arch/riscv/kernel/asm-offsets.c
317
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
32
OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
arch/riscv/kernel/asm-offsets.c
320
offsetof(struct task_struct, thread.s[3])
arch/riscv/kernel/asm-offsets.c
321
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
324
offsetof(struct task_struct, thread.s[4])
arch/riscv/kernel/asm-offsets.c
325
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
328
offsetof(struct task_struct, thread.s[5])
arch/riscv/kernel/asm-offsets.c
329
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
33
OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
arch/riscv/kernel/asm-offsets.c
332
offsetof(struct task_struct, thread.s[6])
arch/riscv/kernel/asm-offsets.c
333
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
336
offsetof(struct task_struct, thread.s[7])
arch/riscv/kernel/asm-offsets.c
337
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
34
OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
arch/riscv/kernel/asm-offsets.c
340
offsetof(struct task_struct, thread.s[8])
arch/riscv/kernel/asm-offsets.c
341
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
344
offsetof(struct task_struct, thread.s[9])
arch/riscv/kernel/asm-offsets.c
345
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
348
offsetof(struct task_struct, thread.s[10])
arch/riscv/kernel/asm-offsets.c
349
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
35
OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
arch/riscv/kernel/asm-offsets.c
352
offsetof(struct task_struct, thread.s[11])
arch/riscv/kernel/asm-offsets.c
353
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
356
offsetof(struct task_struct, thread.sum)
arch/riscv/kernel/asm-offsets.c
357
- offsetof(struct task_struct, thread.ra)
arch/riscv/kernel/asm-offsets.c
36
OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
arch/riscv/kernel/asm-offsets.c
361
offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
362
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
365
offsetof(struct task_struct, thread.fstate.f[1])
arch/riscv/kernel/asm-offsets.c
366
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
369
offsetof(struct task_struct, thread.fstate.f[2])
arch/riscv/kernel/asm-offsets.c
37
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
arch/riscv/kernel/asm-offsets.c
370
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
373
offsetof(struct task_struct, thread.fstate.f[3])
arch/riscv/kernel/asm-offsets.c
374
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
377
offsetof(struct task_struct, thread.fstate.f[4])
arch/riscv/kernel/asm-offsets.c
378
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
38
OFFSET(TASK_THREAD_SUM, task_struct, thread.sum);
arch/riscv/kernel/asm-offsets.c
381
offsetof(struct task_struct, thread.fstate.f[5])
arch/riscv/kernel/asm-offsets.c
382
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
385
offsetof(struct task_struct, thread.fstate.f[6])
arch/riscv/kernel/asm-offsets.c
386
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
389
offsetof(struct task_struct, thread.fstate.f[7])
arch/riscv/kernel/asm-offsets.c
390
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
393
offsetof(struct task_struct, thread.fstate.f[8])
arch/riscv/kernel/asm-offsets.c
394
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
397
offsetof(struct task_struct, thread.fstate.f[9])
arch/riscv/kernel/asm-offsets.c
398
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
401
offsetof(struct task_struct, thread.fstate.f[10])
arch/riscv/kernel/asm-offsets.c
402
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
405
offsetof(struct task_struct, thread.fstate.f[11])
arch/riscv/kernel/asm-offsets.c
406
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
409
offsetof(struct task_struct, thread.fstate.f[12])
arch/riscv/kernel/asm-offsets.c
410
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
413
offsetof(struct task_struct, thread.fstate.f[13])
arch/riscv/kernel/asm-offsets.c
414
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
417
offsetof(struct task_struct, thread.fstate.f[14])
arch/riscv/kernel/asm-offsets.c
418
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
421
offsetof(struct task_struct, thread.fstate.f[15])
arch/riscv/kernel/asm-offsets.c
422
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
425
offsetof(struct task_struct, thread.fstate.f[16])
arch/riscv/kernel/asm-offsets.c
426
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
429
offsetof(struct task_struct, thread.fstate.f[17])
arch/riscv/kernel/asm-offsets.c
430
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
433
offsetof(struct task_struct, thread.fstate.f[18])
arch/riscv/kernel/asm-offsets.c
434
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
437
offsetof(struct task_struct, thread.fstate.f[19])
arch/riscv/kernel/asm-offsets.c
438
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
441
offsetof(struct task_struct, thread.fstate.f[20])
arch/riscv/kernel/asm-offsets.c
442
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
445
offsetof(struct task_struct, thread.fstate.f[21])
arch/riscv/kernel/asm-offsets.c
446
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
449
offsetof(struct task_struct, thread.fstate.f[22])
arch/riscv/kernel/asm-offsets.c
450
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
453
offsetof(struct task_struct, thread.fstate.f[23])
arch/riscv/kernel/asm-offsets.c
454
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
457
offsetof(struct task_struct, thread.fstate.f[24])
arch/riscv/kernel/asm-offsets.c
458
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
461
offsetof(struct task_struct, thread.fstate.f[25])
arch/riscv/kernel/asm-offsets.c
462
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
465
offsetof(struct task_struct, thread.fstate.f[26])
arch/riscv/kernel/asm-offsets.c
466
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
469
offsetof(struct task_struct, thread.fstate.f[27])
arch/riscv/kernel/asm-offsets.c
470
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
473
offsetof(struct task_struct, thread.fstate.f[28])
arch/riscv/kernel/asm-offsets.c
474
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
477
offsetof(struct task_struct, thread.fstate.f[29])
arch/riscv/kernel/asm-offsets.c
478
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
481
offsetof(struct task_struct, thread.fstate.f[30])
arch/riscv/kernel/asm-offsets.c
482
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
485
offsetof(struct task_struct, thread.fstate.f[31])
arch/riscv/kernel/asm-offsets.c
486
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
489
offsetof(struct task_struct, thread.fstate.fcsr)
arch/riscv/kernel/asm-offsets.c
490
- offsetof(struct task_struct, thread.fstate.f[0])
arch/riscv/kernel/asm-offsets.c
58
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
arch/riscv/kernel/asm-offsets.c
59
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
arch/riscv/kernel/asm-offsets.c
60
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
arch/riscv/kernel/asm-offsets.c
61
OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]);
arch/riscv/kernel/asm-offsets.c
62
OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]);
arch/riscv/kernel/asm-offsets.c
63
OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]);
arch/riscv/kernel/asm-offsets.c
64
OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]);
arch/riscv/kernel/asm-offsets.c
65
OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]);
arch/riscv/kernel/asm-offsets.c
66
OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]);
arch/riscv/kernel/asm-offsets.c
67
OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]);
arch/riscv/kernel/asm-offsets.c
68
OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
arch/riscv/kernel/asm-offsets.c
69
OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
arch/riscv/kernel/asm-offsets.c
70
OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
arch/riscv/kernel/asm-offsets.c
71
OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
arch/riscv/kernel/asm-offsets.c
72
OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
arch/riscv/kernel/asm-offsets.c
73
OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
arch/riscv/kernel/asm-offsets.c
74
OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
arch/riscv/kernel/asm-offsets.c
75
OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
arch/riscv/kernel/asm-offsets.c
76
OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
arch/riscv/kernel/asm-offsets.c
77
OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
arch/riscv/kernel/asm-offsets.c
78
OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
arch/riscv/kernel/asm-offsets.c
79
OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
arch/riscv/kernel/asm-offsets.c
80
OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
arch/riscv/kernel/asm-offsets.c
81
OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
arch/riscv/kernel/asm-offsets.c
82
OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
arch/riscv/kernel/asm-offsets.c
83
OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
arch/riscv/kernel/asm-offsets.c
84
OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
arch/riscv/kernel/asm-offsets.c
85
OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
arch/riscv/kernel/asm-offsets.c
86
OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
arch/riscv/kernel/asm-offsets.c
87
OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
arch/riscv/kernel/asm-offsets.c
88
OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
arch/riscv/kernel/asm-offsets.c
89
OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
arch/riscv/kernel/asm-offsets.c
90
OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
arch/riscv/kernel/compat_signal.c
56
err = __copy_from_user(¤t->thread.fstate, state, sizeof(*state));
arch/riscv/kernel/compat_signal.c
84
err = __copy_to_user(state, ¤t->thread.fstate, sizeof(*state));
arch/riscv/kernel/cpufeature.c
1174
current->thread.envcfg |= ENVCFG_CBZE;
arch/riscv/kernel/cpufeature.c
1179
current->thread.envcfg |= ENVCFG_CBCFE;
arch/riscv/kernel/kernel_mode_vector.c
128
kvstate = ¤t->thread.kernel_vstate;
arch/riscv/kernel/kernel_mode_vector.c
147
uvstate = ¤t->thread.vstate;
arch/riscv/kernel/kernel_mode_vector.c
171
struct __riscv_v_ext_state *vstate = ¤t->thread.kernel_vstate;
arch/riscv/kernel/kernel_mode_vector.c
218
riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
arch/riscv/kernel/kernel_mode_vector.c
23
WRITE_ONCE(current->thread.riscv_v_flags, flags);
arch/riscv/kernel/kernel_mode_vector.c
86
return ¤t->thread.riscv_v_flags;
arch/riscv/kernel/kgdb.c
237
gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
arch/riscv/kernel/kgdb.c
238
gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
arch/riscv/kernel/kgdb.c
239
gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
arch/riscv/kernel/kgdb.c
240
gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
arch/riscv/kernel/kgdb.c
241
gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
arch/riscv/kernel/kgdb.c
242
gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
arch/riscv/kernel/kgdb.c
243
gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
arch/riscv/kernel/kgdb.c
244
gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
arch/riscv/kernel/kgdb.c
245
gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
arch/riscv/kernel/kgdb.c
246
gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
arch/riscv/kernel/kgdb.c
247
gdb_regs[DBG_REG_S9_OFF] = task->thread.s[9];
arch/riscv/kernel/kgdb.c
248
gdb_regs[DBG_REG_S10_OFF] = task->thread.s[10];
arch/riscv/kernel/kgdb.c
249
gdb_regs[DBG_REG_S11_OFF] = task->thread.s[11];
arch/riscv/kernel/kgdb.c
250
gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
arch/riscv/kernel/probes/uprobes.c
112
current->thread.bad_cause = utask->autask.saved_cause;
arch/riscv/kernel/probes/uprobes.c
63
utask->autask.saved_cause = current->thread.bad_cause;
arch/riscv/kernel/probes/uprobes.c
64
current->thread.bad_cause = UPROBE_TRAP_NR;
arch/riscv/kernel/probes/uprobes.c
75
WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
arch/riscv/kernel/probes/uprobes.c
76
current->thread.bad_cause = utask->autask.saved_cause;
arch/riscv/kernel/probes/uprobes.c
85
if (t->thread.bad_cause != UPROBE_TRAP_NR)
arch/riscv/kernel/process.c
191
memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
arch/riscv/kernel/process.c
197
kfree(current->thread.vstate.datap);
arch/riscv/kernel/process.c
198
memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
arch/riscv/kernel/process.c
219
memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
arch/riscv/kernel/process.c
220
memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
arch/riscv/kernel/process.c
250
memset(&p->thread.s, 0, sizeof(p->thread.s));
arch/riscv/kernel/process.c
259
p->thread.s[0] = (unsigned long)args->fn;
arch/riscv/kernel/process.c
260
p->thread.s[1] = (unsigned long)args->fn_arg;
arch/riscv/kernel/process.c
261
p->thread.ra = (unsigned long)ret_from_fork_kernel_asm;
arch/riscv/kernel/process.c
279
p->thread.ra = (unsigned long)ret_from_fork_user_asm;
arch/riscv/kernel/process.c
281
p->thread.riscv_v_flags = 0;
arch/riscv/kernel/process.c
284
p->thread.sp = (unsigned long)childregs; /* kernel sp */
arch/riscv/kernel/process.c
386
switch (task->thread.envcfg & ENVCFG_PMM) {
arch/riscv/kernel/process.c
55
tsk->thread.align_ctl = val;
arch/riscv/kernel/process.c
64
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
arch/riscv/kernel/ptrace.c
114
riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
arch/riscv/kernel/ptrace.c
223
struct __riscv_v_ext_state *vstate = &target->thread.vstate;
arch/riscv/kernel/ptrace.c
64
struct __riscv_d_ext_state *fstate = &target->thread.fstate;
arch/riscv/kernel/ptrace.c
80
struct __riscv_d_ext_state *fstate = &target->thread.fstate;
arch/riscv/kernel/ptrace.c
99
struct __riscv_v_ext_state *vstate = &target->thread.vstate;
arch/riscv/kernel/signal.c
102
err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize);
arch/riscv/kernel/signal.c
129
err = __copy_from_user(¤t->thread.vstate, &state->v_state,
arch/riscv/kernel/signal.c
142
return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
arch/riscv/kernel/signal.c
50
err = __copy_from_user(¤t->thread.fstate, state, sizeof(*state));
arch/riscv/kernel/signal.c
65
err = __copy_to_user(state, ¤t->thread.fstate, sizeof(*state));
arch/riscv/kernel/signal.c
93
riscv_v_vstate_save(¤t->thread.vstate, regs);
arch/riscv/kernel/signal.c
97
err = __copy_to_user(&state->v_state, ¤t->thread.vstate,
arch/riscv/kernel/stacktrace.c
121
sp = task->thread.sp;
arch/riscv/kernel/stacktrace.c
122
pc = task->thread.ra;
arch/riscv/kernel/stacktrace.c
66
fp = task->thread.s[0];
arch/riscv/kernel/stacktrace.c
67
sp = task->thread.sp;
arch/riscv/kernel/stacktrace.c
68
pc = task->thread.ra;
arch/riscv/kernel/traps.c
135
current->thread.bad_cause = regs->cause;
arch/riscv/kernel/traps.c
291
current->thread.bad_cause = regs->cause;
arch/riscv/kernel/traps_misaligned.c
235
if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
arch/riscv/kernel/traps_misaligned.c
346
if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
arch/riscv/kernel/usercfi.c
100
task->thread.envcfg |= ENVCFG_LPE;
arch/riscv/kernel/usercfi.c
102
task->thread.envcfg &= ~ENVCFG_LPE;
arch/riscv/kernel/usercfi.c
104
csr_write(CSR_ENVCFG, task->thread.envcfg);
arch/riscv/kernel/usercfi.c
70
task->thread.envcfg |= ENVCFG_SSE;
arch/riscv/kernel/usercfi.c
72
task->thread.envcfg &= ~ENVCFG_SSE;
arch/riscv/kernel/usercfi.c
74
csr_write(CSR_ENVCFG, task->thread.envcfg);
arch/riscv/kernel/vector.c
133
riscv_v_thread_ctx_alloc(riscv_v_kernel_cachep, &tsk->thread.kernel_vstate);
arch/riscv/kernel/vector.c
139
if (tsk->thread.vstate.datap)
arch/riscv/kernel/vector.c
140
kmem_cache_free(riscv_v_user_cachep, tsk->thread.vstate.datap);
arch/riscv/kernel/vector.c
142
if (tsk->thread.kernel_vstate.datap)
arch/riscv/kernel/vector.c
143
kmem_cache_free(riscv_v_kernel_cachep, tsk->thread.kernel_vstate.datap);
arch/riscv/kernel/vector.c
153
return VSTATE_CTRL_GET_CUR(tsk->thread.vstate_ctrl);
arch/riscv/kernel/vector.c
158
return VSTATE_CTRL_GET_NEXT(tsk->thread.vstate_ctrl);
arch/riscv/kernel/vector.c
163
return VSTATE_CTRL_GET_INHERIT(tsk->thread.vstate_ctrl);
arch/riscv/kernel/vector.c
175
tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK;
arch/riscv/kernel/vector.c
176
tsk->thread.vstate_ctrl |= ctrl;
arch/riscv/kernel/vector.c
212
WARN_ON(current->thread.vstate.datap);
arch/riscv/kernel/vector.c
219
if (riscv_v_thread_ctx_alloc(riscv_v_user_cachep, ¤t->thread.vstate)) {
arch/riscv/kernel/vector.c
260
return current->thread.vstate_ctrl & PR_RISCV_V_VSTATE_CTRL_MASK;
arch/riscv/mm/cacheflush.c
256
current->thread.force_icache_flush = true;
arch/riscv/mm/cacheflush.c
270
current->thread.force_icache_flush = false;
arch/riscv/mm/fault.c
326
tsk->thread.bad_cause = cause;
arch/riscv/mm/fault.c
357
tsk->thread.bad_cause = cause;
arch/riscv/mm/fault.c
384
tsk->thread.bad_cause = cause;
arch/riscv/mm/fault.c
396
tsk->thread.bad_cause = cause;
arch/riscv/mm/fault.c
438
tsk->thread.bad_cause = cause;
arch/s390/include/asm/fpu.h
160
struct thread_struct *thread = ¤t->thread;
arch/s390/include/asm/fpu.h
162
if (!thread->ufpu_flags)
arch/s390/include/asm/fpu.h
164
load_fpu_state(&thread->ufpu, thread->ufpu_flags);
arch/s390/include/asm/fpu.h
165
thread->ufpu_flags = 0;
arch/s390/include/asm/fpu.h
168
static __always_inline void __save_user_fpu_regs(struct thread_struct *thread, int flags)
arch/s390/include/asm/fpu.h
170
save_fpu_state(&thread->ufpu, flags);
arch/s390/include/asm/fpu.h
171
__atomic_or(flags, &thread->ufpu_flags);
arch/s390/include/asm/fpu.h
176
struct thread_struct *thread = ¤t->thread;
arch/s390/include/asm/fpu.h
179
mask = __atomic_or(KERNEL_FPC | KERNEL_VXR, &thread->kfpu_flags);
arch/s390/include/asm/fpu.h
180
flags = ~READ_ONCE(thread->ufpu_flags) & (KERNEL_FPC | KERNEL_VXR);
arch/s390/include/asm/fpu.h
182
__save_user_fpu_regs(thread, flags);
arch/s390/include/asm/fpu.h
184
WRITE_ONCE(thread->kfpu_flags, mask);
arch/s390/include/asm/fpu.h
189
struct thread_struct *thread = ¤t->thread;
arch/s390/include/asm/fpu.h
192
mask = __atomic_or(flags, &thread->kfpu_flags);
arch/s390/include/asm/fpu.h
194
uflags = READ_ONCE(thread->ufpu_flags);
arch/s390/include/asm/fpu.h
196
__save_user_fpu_regs(thread, ~uflags & flags);
arch/s390/include/asm/fpu.h
208
WRITE_ONCE(current->thread.kfpu_flags, mask);
arch/s390/include/asm/fpu.h
247
static inline void save_kernel_fpu_regs(struct thread_struct *thread)
arch/s390/include/asm/fpu.h
249
if (!thread->kfpu_flags)
arch/s390/include/asm/fpu.h
251
save_fpu_state(&thread->kfpu, thread->kfpu_flags);
arch/s390/include/asm/fpu.h
254
static inline void restore_kernel_fpu_regs(struct thread_struct *thread)
arch/s390/include/asm/fpu.h
256
if (!thread->kfpu_flags)
arch/s390/include/asm/fpu.h
258
load_fpu_state(&thread->kfpu, thread->kfpu_flags);
arch/s390/include/asm/processor.h
257
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
arch/s390/include/asm/stacktrace.h
94
return (unsigned long)task->thread.ksp;
arch/s390/kernel/asm-offsets.c
22
OFFSET(__TASK_thread, task_struct, thread);
arch/s390/kernel/guarded_storage.c
101
if (!sibling->thread.gs_bc_cb)
arch/s390/kernel/guarded_storage.c
18
kfree(tsk->thread.gs_cb);
arch/s390/kernel/guarded_storage.c
19
kfree(tsk->thread.gs_bc_cb);
arch/s390/kernel/guarded_storage.c
26
if (!current->thread.gs_cb) {
arch/s390/kernel/guarded_storage.c
34
current->thread.gs_cb = gs_cb;
arch/s390/kernel/guarded_storage.c
42
if (current->thread.gs_cb) {
arch/s390/kernel/guarded_storage.c
44
kfree(current->thread.gs_cb);
arch/s390/kernel/guarded_storage.c
45
current->thread.gs_cb = NULL;
arch/s390/kernel/guarded_storage.c
56
gs_cb = current->thread.gs_bc_cb;
arch/s390/kernel/guarded_storage.c
61
current->thread.gs_bc_cb = gs_cb;
arch/s390/kernel/guarded_storage.c
72
gs_cb = current->thread.gs_bc_cb;
arch/s390/kernel/guarded_storage.c
73
current->thread.gs_bc_cb = NULL;
arch/s390/kernel/guarded_storage.c
84
gs_cb = current->thread.gs_bc_cb;
arch/s390/kernel/guarded_storage.c
86
kfree(current->thread.gs_cb);
arch/s390/kernel/guarded_storage.c
87
current->thread.gs_bc_cb = NULL;
arch/s390/kernel/guarded_storage.c
90
current->thread.gs_cb = gs_cb;
arch/s390/kernel/irq.c
158
current->thread.last_break = regs->last_break;
arch/s390/kernel/irq.c
197
current->thread.last_break = regs->last_break;
arch/s390/kernel/perf_regs.c
22
fp = *(freg_t *)(current->thread.ufpu.vxrs + idx);
arch/s390/kernel/process.c
100
dst->thread.gs_cb = NULL;
arch/s390/kernel/process.c
101
dst->thread.gs_bc_cb = NULL;
arch/s390/kernel/process.c
118
p->thread.ksp = (unsigned long) frame;
arch/s390/kernel/process.c
120
save_access_regs(&p->thread.acrs[0]);
arch/s390/kernel/process.c
123
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
arch/s390/kernel/process.c
124
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
arch/s390/kernel/process.c
126
p->thread.per_flags = 0;
arch/s390/kernel/process.c
128
p->thread.user_timer = 0;
arch/s390/kernel/process.c
129
p->thread.guest_timer = 0;
arch/s390/kernel/process.c
130
p->thread.system_timer = 0;
arch/s390/kernel/process.c
131
p->thread.hardirq_timer = 0;
arch/s390/kernel/process.c
132
p->thread.softirq_timer = 0;
arch/s390/kernel/process.c
133
p->thread.last_break = 1;
arch/s390/kernel/process.c
168
p->thread.acrs[0] = (unsigned int)(tls >> 32);
arch/s390/kernel/process.c
169
p->thread.acrs[1] = (unsigned int)tls;
arch/s390/kernel/process.c
182
current->thread.ufpu.fpc = 0;
arch/s390/kernel/process.c
189
save_kernel_fpu_regs(&prev->thread);
arch/s390/kernel/process.c
190
save_access_regs(&prev->thread.acrs[0]);
arch/s390/kernel/process.c
191
save_ri_cb(prev->thread.ri_cb);
arch/s390/kernel/process.c
192
save_gs_cb(prev->thread.gs_cb);
arch/s390/kernel/process.c
194
restore_kernel_fpu_regs(&next->thread);
arch/s390/kernel/process.c
195
restore_access_regs(&next->thread.acrs[0]);
arch/s390/kernel/process.c
196
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);
arch/s390/kernel/process.c
197
restore_gs_cb(next->thread.gs_cb);
arch/s390/kernel/process.c
90
dst->thread.kfpu_flags = 0;
arch/s390/kernel/process.c
99
dst->thread.ri_cb = NULL;
arch/s390/kernel/ptrace.c
144
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
arch/s390/kernel/ptrace.c
145
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
arch/s390/kernel/ptrace.c
148
task->thread.per_flags = 0;
arch/s390/kernel/ptrace.c
159
PER_EVENT_IFETCH : child->thread.per_user.control;
arch/s390/kernel/ptrace.c
163
0 : child->thread.per_user.start;
arch/s390/kernel/ptrace.c
167
-1UL : child->thread.per_user.end;
arch/s390/kernel/ptrace.c
174
return child->thread.per_user.start;
arch/s390/kernel/ptrace.c
177
return child->thread.per_user.end;
arch/s390/kernel/ptrace.c
181
child->thread.per_event.cause << (BITS_PER_LONG - 16);
arch/s390/kernel/ptrace.c
184
return child->thread.per_event.address;
arch/s390/kernel/ptrace.c
188
child->thread.per_event.paid << (BITS_PER_LONG - 8);
arch/s390/kernel/ptrace.c
227
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
arch/s390/kernel/ptrace.c
229
tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
arch/s390/kernel/ptrace.c
248
tmp = child->thread.ufpu.fpc;
arch/s390/kernel/ptrace.c
256
tmp = *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset);
arch/s390/kernel/ptrace.c
307
child->thread.per_user.control =
arch/s390/kernel/ptrace.c
311
child->thread.per_user.start = data;
arch/s390/kernel/ptrace.c
314
child->thread.per_user.end = data;
arch/s390/kernel/ptrace.c
367
child->thread.acrs[15] = (unsigned int) (data >> 32);
arch/s390/kernel/ptrace.c
369
*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
arch/s390/kernel/ptrace.c
390
child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32);
arch/s390/kernel/ptrace.c
397
*(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = data;
arch/s390/kernel/ptrace.c
43
struct thread_struct *thread = &task->thread;
arch/s390/kernel/ptrace.c
469
return put_user(child->thread.last_break, (unsigned long __user *)data);
arch/s390/kernel/ptrace.c
473
child->thread.per_flags &= ~PER_FLAG_NO_TE;
arch/s390/kernel/ptrace.c
478
child->thread.per_flags |= PER_FLAG_NO_TE;
arch/s390/kernel/ptrace.c
479
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
arch/s390/kernel/ptrace.c
482
if (!machine_has_tx() || (child->thread.per_flags & PER_FLAG_NO_TE))
arch/s390/kernel/ptrace.c
486
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
arch/s390/kernel/ptrace.c
489
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
arch/s390/kernel/ptrace.c
490
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
arch/s390/kernel/ptrace.c
493
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
arch/s390/kernel/ptrace.c
494
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
arch/s390/kernel/ptrace.c
515
save_access_regs(target->thread.acrs);
arch/s390/kernel/ptrace.c
530
save_access_regs(target->thread.acrs);
arch/s390/kernel/ptrace.c
553
restore_access_regs(target->thread.acrs);
arch/s390/kernel/ptrace.c
567
fp_regs.fpc = target->thread.ufpu.fpc;
arch/s390/kernel/ptrace.c
568
fpregs_store(&fp_regs, &target->thread.ufpu);
arch/s390/kernel/ptrace.c
583
convert_vx_to_fp(fprs, target->thread.ufpu.vxrs);
arch/s390/kernel/ptrace.c
585
u32 ufpc[2] = { target->thread.ufpu.fpc, 0 };
arch/s390/kernel/ptrace.c
592
target->thread.ufpu.fpc = ufpc[0];
arch/s390/kernel/ptrace.c
600
convert_fp_to_vx(target->thread.ufpu.vxrs, fprs);
arch/s390/kernel/ptrace.c
608
return membuf_store(&to, target->thread.last_break);
arch/s390/kernel/ptrace.c
628
size = sizeof(target->thread.trap_tdb.data);
arch/s390/kernel/ptrace.c
629
return membuf_write(&to, target->thread.trap_tdb.data, size);
arch/s390/kernel/ptrace.c
64
if (task->thread.per_flags & PER_FLAG_NO_TE)
arch/s390/kernel/ptrace.c
652
vxrs[i] = target->thread.ufpu.vxrs[i].low;
arch/s390/kernel/ptrace.c
670
vxrs[i] = target->thread.ufpu.vxrs[i].low;
arch/s390/kernel/ptrace.c
675
target->thread.ufpu.vxrs[i].low = vxrs[i];
arch/s390/kernel/ptrace.c
68
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
arch/s390/kernel/ptrace.c
688
return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW,
arch/s390/kernel/ptrace.c
69
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
arch/s390/kernel/ptrace.c
705
target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1);
arch/s390/kernel/ptrace.c
713
return membuf_store(&to, target->thread.system_call);
arch/s390/kernel/ptrace.c
721
unsigned int *data = &target->thread.system_call;
arch/s390/kernel/ptrace.c
730
struct gs_cb *data = target->thread.gs_cb;
arch/s390/kernel/ptrace.c
751
if (!target->thread.gs_cb) {
arch/s390/kernel/ptrace.c
756
if (!target->thread.gs_cb)
arch/s390/kernel/ptrace.c
761
gs_cb = *target->thread.gs_cb;
arch/s390/kernel/ptrace.c
769
if (!target->thread.gs_cb)
arch/s390/kernel/ptrace.c
770
target->thread.gs_cb = data;
arch/s390/kernel/ptrace.c
771
*target->thread.gs_cb = gs_cb;
arch/s390/kernel/ptrace.c
774
restore_gs_cb(target->thread.gs_cb);
arch/s390/kernel/ptrace.c
78
if (task->thread.gs_cb)
arch/s390/kernel/ptrace.c
784
struct gs_cb *data = target->thread.gs_bc_cb;
arch/s390/kernel/ptrace.c
798
struct gs_cb *data = target->thread.gs_bc_cb;
arch/s390/kernel/ptrace.c
806
target->thread.gs_bc_cb = data;
arch/s390/kernel/ptrace.c
842
struct runtime_instr_cb *data = target->thread.ri_cb;
arch/s390/kernel/ptrace.c
863
if (!target->thread.ri_cb) {
arch/s390/kernel/ptrace.c
869
if (target->thread.ri_cb) {
arch/s390/kernel/ptrace.c
873
ri_cb = *target->thread.ri_cb;
arch/s390/kernel/ptrace.c
89
new.control.val = thread->per_user.control;
arch/s390/kernel/ptrace.c
893
if (!target->thread.ri_cb)
arch/s390/kernel/ptrace.c
894
target->thread.ri_cb = data;
arch/s390/kernel/ptrace.c
895
*target->thread.ri_cb = ri_cb;
arch/s390/kernel/ptrace.c
897
load_runtime_instr_cb(target->thread.ri_cb);
arch/s390/kernel/ptrace.c
90
new.start.val = thread->per_user.start;
arch/s390/kernel/ptrace.c
91
new.end.val = thread->per_user.end;
arch/s390/kernel/runtime_instr.c
28
kfree(tsk->thread.ri_cb);
arch/s390/kernel/runtime_instr.c
36
if (!task->thread.ri_cb)
arch/s390/kernel/runtime_instr.c
41
kfree(task->thread.ri_cb);
arch/s390/kernel/runtime_instr.c
42
task->thread.ri_cb = NULL;
arch/s390/kernel/runtime_instr.c
85
if (!current->thread.ri_cb) {
arch/s390/kernel/runtime_instr.c
90
cb = current->thread.ri_cb;
arch/s390/kernel/runtime_instr.c
98
current->thread.ri_cb = cb;
arch/s390/kernel/signal.c
110
save_access_regs(current->thread.acrs);
arch/s390/kernel/signal.c
117
restore_access_regs(current->thread.acrs);
arch/s390/kernel/signal.c
131
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
arch/s390/kernel/signal.c
133
fpregs_store(&user_sregs.fpregs, ¤t->thread.ufpu);
arch/s390/kernel/signal.c
164
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
arch/s390/kernel/signal.c
165
sizeof(current->thread.acrs));
arch/s390/kernel/signal.c
167
fpregs_load(&user_sregs.fpregs, ¤t->thread.ufpu);
arch/s390/kernel/signal.c
183
vxrs[i] = current->thread.ufpu.vxrs[i].low;
arch/s390/kernel/signal.c
187
current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
arch/s390/kernel/signal.c
204
__copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
arch/s390/kernel/signal.c
209
current->thread.ufpu.vxrs[i].low = vxrs[i];
arch/s390/kernel/signal.c
350
regs->gprs[6] = current->thread.last_break;
arch/s390/kernel/signal.c
411
regs->gprs[5] = current->thread.last_break;
arch/s390/kernel/signal.c
449
current->thread.system_call =
arch/s390/kernel/signal.c
454
if (current->thread.system_call) {
arch/s390/kernel/signal.c
455
regs->int_code = current->thread.system_call;
arch/s390/kernel/signal.c
486
if (current->thread.system_call) {
arch/s390/kernel/signal.c
487
regs->int_code = current->thread.system_call;
arch/s390/kernel/smp.c
277
lc->user_timer = tsk->thread.user_timer;
arch/s390/kernel/smp.c
278
lc->guest_timer = tsk->thread.guest_timer;
arch/s390/kernel/smp.c
279
lc->system_timer = tsk->thread.system_timer;
arch/s390/kernel/smp.c
280
lc->hardirq_timer = tsk->thread.hardirq_timer;
arch/s390/kernel/smp.c
281
lc->softirq_timer = tsk->thread.softirq_timer;
arch/s390/kernel/syscall.c
106
current->thread.last_break = regs->last_break;
arch/s390/kernel/traps.c
183
vic = (current->thread.ufpu.fpc & 0xf00) >> 8;
arch/s390/kernel/traps.c
209
if (current->thread.ufpu.fpc & FPC_DXC_MASK)
arch/s390/kernel/traps.c
210
do_fp_trap(regs, current->thread.ufpu.fpc);
arch/s390/kernel/traps.c
348
current->thread.gmap_teid.val = regs->int_parm_long;
arch/s390/kernel/traps.c
349
current->thread.gmap_int_code = regs->int_code & 0xffff;
arch/s390/kernel/traps.c
359
current->thread.last_break = regs->last_break;
arch/s390/kernel/traps.c
363
current->thread.trap_tdb = lc->pgm_tdb;
arch/s390/kernel/traps.c
367
struct per_event *ev = ¤t->thread.per_event;
arch/s390/kernel/traps.c
40
address = current->thread.trap_tdb.data[3];
arch/s390/kernel/traps.c
78
force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __force __user *)current->thread.per_event.address);
arch/s390/kernel/unwind_bc.c
151
sp = task->thread.ksp;
arch/s390/kernel/uprobes.c
100
current->thread.per_user.control, regs)) {
arch/s390/kernel/uprobes.c
102
current->thread.per_event.address = utask->vaddr;
arch/s390/kernel/uprobes.c
139
current->thread.per_event.address = current->utask->vaddr;
arch/s390/kernel/uprobes.c
242
if (!(current->thread.per_user.control & PER_EVENT_STORE))
arch/s390/kernel/uprobes.c
244
if ((void *)current->thread.per_user.start > (addr + len))
arch/s390/kernel/uprobes.c
246
if ((void *)current->thread.per_user.end < addr)
arch/s390/kernel/uprobes.c
248
current->thread.per_event.address = regs->psw.addr;
arch/s390/kernel/uprobes.c
249
current->thread.per_event.cause = PER_EVENT_STORE >> 16;
arch/s390/kernel/uprobes.c
69
regs->psw.addr >= current->thread.per_user.start &&
arch/s390/kernel/uprobes.c
70
regs->psw.addr <= current->thread.per_user.end)
arch/s390/kernel/uprobes.c
99
if (check_per_event(current->thread.per_event.cause,
arch/s390/kernel/vtime.c
143
user = update_tsk_timer(&tsk->thread.user_timer, lc->user_timer);
arch/s390/kernel/vtime.c
144
guest = update_tsk_timer(&tsk->thread.guest_timer, lc->guest_timer);
arch/s390/kernel/vtime.c
145
system = update_tsk_timer(&tsk->thread.system_timer, lc->system_timer);
arch/s390/kernel/vtime.c
146
hardirq = update_tsk_timer(&tsk->thread.hardirq_timer, lc->hardirq_timer);
arch/s390/kernel/vtime.c
147
softirq = update_tsk_timer(&tsk->thread.softirq_timer, lc->softirq_timer);
arch/s390/kernel/vtime.c
176
prev->thread.user_timer = lc->user_timer;
arch/s390/kernel/vtime.c
177
prev->thread.guest_timer = lc->guest_timer;
arch/s390/kernel/vtime.c
178
prev->thread.system_timer = lc->system_timer;
arch/s390/kernel/vtime.c
179
prev->thread.hardirq_timer = lc->hardirq_timer;
arch/s390/kernel/vtime.c
180
prev->thread.softirq_timer = lc->softirq_timer;
arch/s390/kernel/vtime.c
181
lc->user_timer = current->thread.user_timer;
arch/s390/kernel/vtime.c
182
lc->guest_timer = current->thread.guest_timer;
arch/s390/kernel/vtime.c
183
lc->system_timer = current->thread.system_timer;
arch/s390/kernel/vtime.c
184
lc->hardirq_timer = current->thread.hardirq_timer;
arch/s390/kernel/vtime.c
185
lc->softirq_timer = current->thread.softirq_timer;
arch/s390/kernel/wti.c
158
.store = &wti_state.thread,
arch/s390/kernel/wti.c
180
sched_setscheduler(st->thread, SCHED_FIFO, &wti_sched_param);
arch/s390/kernel/wti.c
34
struct task_struct *thread;
arch/s390/kernel/wti.c
99
wake_up_process(st->thread);
arch/s390/kvm/intercept.c
219
if (current->thread.per_flags & PER_FLAG_NO_TE)
arch/s390/kvm/interrupt.c
536
save_gs_cb(current->thread.gs_cb);
arch/s390/kvm/kvm-s390.c
4439
hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
arch/s390/kvm/kvm-s390.c
4443
return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
arch/s390/kvm/kvm-s390.c
4478
current->thread.gmap_int_code = 0;
arch/s390/kvm/kvm-s390.c
4524
KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
arch/s390/kvm/kvm-s390.c
4526
current->thread.gmap_int_code, current->thread.gmap_teid.val);
arch/s390/kvm/kvm-s390.c
4556
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
arch/s390/kvm/kvm-s390.c
4560
switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
arch/s390/kvm/kvm-s390.c
4583
current->thread.gmap_int_code, current->comm,
arch/s390/kvm/kvm-s390.c
4613
current->thread.gmap_int_code, current->thread.gmap_teid.val);
arch/s390/kvm/kvm-s390.c
4816
if (current->thread.gs_cb) {
arch/s390/kvm/kvm-s390.c
4817
vcpu->arch.host_gscb = current->thread.gs_cb;
arch/s390/kvm/kvm-s390.c
4821
current->thread.gs_cb = (struct gs_cb *)
arch/s390/kvm/kvm-s390.c
4823
restore_gs_cb(current->thread.gs_cb);
arch/s390/kvm/kvm-s390.c
4883
save_gs_cb(current->thread.gs_cb);
arch/s390/kvm/kvm-s390.c
4884
current->thread.gs_cb = vcpu->arch.host_gscb;
arch/s390/kvm/kvm-s390.h
586
if (current->thread.gmap_int_code == PGM_PROTECTION)
arch/s390/kvm/kvm-s390.h
588
return test_facility(75) && (current->thread.gmap_teid.fsi == TEID_FSI_STORE);
arch/s390/kvm/priv.c
62
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
arch/s390/kvm/priv.c
63
restore_gs_cb(current->thread.gs_cb);
arch/s390/kvm/vsie.c
1153
current->thread.gmap_int_code = 0;
arch/s390/kvm/vsie.c
1191
else if (current->thread.gmap_int_code)
arch/s390/kvm/vsie.c
933
if ((current->thread.gmap_int_code & PGM_INT_CODE_MASK) == PGM_PROTECTION)
arch/s390/kvm/vsie.c
936
current->thread.gmap_teid.addr * PAGE_SIZE, 1);
arch/s390/kvm/vsie.c
938
rc = gaccess_shadow_fault(vcpu, sg, current->thread.gmap_teid.addr * PAGE_SIZE, NULL, wr);
arch/s390/kvm/vsie.c
941
current->thread.gmap_teid.addr * PAGE_SIZE, wr);
arch/s390/kvm/vsie.c
943
vsie_page->fault_addr = current->thread.gmap_teid.addr * PAGE_SIZE;
arch/s390/mm/pfault.c
145
if (tsk->thread.pfault_wait == 1) {
arch/s390/mm/pfault.c
153
tsk->thread.pfault_wait = 0;
arch/s390/mm/pfault.c
154
list_del(&tsk->thread.list);
arch/s390/mm/pfault.c
168
tsk->thread.pfault_wait = -1;
arch/s390/mm/pfault.c
174
if (tsk->thread.pfault_wait == 1) {
arch/s390/mm/pfault.c
177
} else if (tsk->thread.pfault_wait == -1) {
arch/s390/mm/pfault.c
183
tsk->thread.pfault_wait = 0;
arch/s390/mm/pfault.c
193
tsk->thread.pfault_wait = 1;
arch/s390/mm/pfault.c
194
list_add(&tsk->thread.list, &pfault_list);
arch/s390/mm/pfault.c
212
struct thread_struct *thread, *next;
arch/s390/mm/pfault.c
216
list_for_each_entry_safe(thread, next, &pfault_list, list) {
arch/s390/mm/pfault.c
217
thread->pfault_wait = 0;
arch/s390/mm/pfault.c
218
list_del(&thread->list);
arch/s390/mm/pfault.c
219
tsk = container_of(thread, struct task_struct, thread);
arch/sh/include/asm/fpu.h
47
tsk->thread.fpu_counter = 0;
arch/sh/include/asm/processor_32.h
168
#define thread_saved_pc(tsk) (tsk->thread.pc)
arch/sh/include/asm/switch_to_32.h
13
(u32 *)&tsk->thread.dsp_status; \
arch/sh/include/asm/switch_to_32.h
36
(u32 *)&tsk->thread.dsp_status + 14; \
arch/sh/include/asm/switch_to_32.h
8
(!!(tsk->thread.dsp_status.status & SR_DSP))
arch/sh/include/asm/switch_to_32.h
85
__ts1 = (u32 *)&prev->thread.sp; \
arch/sh/include/asm/switch_to_32.h
86
__ts2 = (u32 *)&prev->thread.pc; \
arch/sh/include/asm/switch_to_32.h
89
__ts6 = (u32 *)&next->thread.sp; \
arch/sh/include/asm/switch_to_32.h
90
__ts7 = next->thread.pc; \
arch/sh/kernel/cpu/fpu.c
22
if (!tsk->thread.xstate) {
arch/sh/kernel/cpu/fpu.c
23
tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
arch/sh/kernel/cpu/fpu.c
25
if (!tsk->thread.xstate)
arch/sh/kernel/cpu/fpu.c
30
struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
arch/sh/kernel/cpu/fpu.c
34
struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
arch/sh/kernel/cpu/fpu.c
51
tsk->thread.fpu_counter++;
arch/sh/kernel/cpu/sh2a/fpu.c
456
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
arch/sh/kernel/cpu/sh2a/fpu.c
458
denormal_to_double (&tsk->thread.xstate->hardfpu,
arch/sh/kernel/cpu/sh2a/fpu.c
473
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
arch/sh/kernel/cpu/sh2a/fpu.c
474
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
arch/sh/kernel/cpu/sh2a/fpu.c
475
fpscr = tsk->thread.xstate->hardfpu.fpscr;
arch/sh/kernel/cpu/sh2a/fpu.c
485
| tsk->thread.xstate->hardfpu.fp_regs[n+1];
arch/sh/kernel/cpu/sh2a/fpu.c
487
| tsk->thread.xstate->hardfpu.fp_regs[m+1];
arch/sh/kernel/cpu/sh2a/fpu.c
492
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
arch/sh/kernel/cpu/sh2a/fpu.c
493
tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
arch/sh/kernel/cpu/sh2a/fpu.c
502
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
arch/sh/kernel/cpu/sh2a/fpu.c
516
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
arch/sh/kernel/cpu/sh2a/fpu.c
517
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
arch/sh/kernel/cpu/sh2a/fpu.c
518
fpscr = tsk->thread.xstate->hardfpu.fpscr;
arch/sh/kernel/cpu/sh2a/fpu.c
52
: "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
arch/sh/kernel/cpu/sh2a/fpu.c
528
| tsk->thread.xstate->hardfpu.fp_regs[n+1];
arch/sh/kernel/cpu/sh2a/fpu.c
530
| tsk->thread.xstate->hardfpu.fp_regs[m+1];
arch/sh/kernel/cpu/sh2a/fpu.c
535
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
arch/sh/kernel/cpu/sh2a/fpu.c
536
tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
arch/sh/kernel/cpu/sh2a/fpu.c
545
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
arch/sh/kernel/cpu/sh2a/fpu.c
563
tsk->thread.xstate->hardfpu.fpscr &=
arch/sh/kernel/cpu/sh2a/fpu.c
84
: "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
arch/sh/kernel/cpu/sh4/fpu.c
134
:"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
arch/sh/kernel/cpu/sh4/fpu.c
230
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
arch/sh/kernel/cpu/sh4/fpu.c
232
denormal_to_double(&tsk->thread.xstate->hardfpu,
arch/sh/kernel/cpu/sh4/fpu.c
248
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
arch/sh/kernel/cpu/sh4/fpu.c
249
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
arch/sh/kernel/cpu/sh4/fpu.c
250
fpscr = tsk->thread.xstate->hardfpu.fpscr;
arch/sh/kernel/cpu/sh4/fpu.c
260
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
arch/sh/kernel/cpu/sh4/fpu.c
262
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
arch/sh/kernel/cpu/sh4/fpu.c
264
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
arch/sh/kernel/cpu/sh4/fpu.c
265
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
arch/sh/kernel/cpu/sh4/fpu.c
271
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
arch/sh/kernel/cpu/sh4/fpu.c
286
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
arch/sh/kernel/cpu/sh4/fpu.c
287
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
arch/sh/kernel/cpu/sh4/fpu.c
288
fpscr = tsk->thread.xstate->hardfpu.fpscr;
arch/sh/kernel/cpu/sh4/fpu.c
298
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
arch/sh/kernel/cpu/sh4/fpu.c
300
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
arch/sh/kernel/cpu/sh4/fpu.c
305
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
arch/sh/kernel/cpu/sh4/fpu.c
306
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
arch/sh/kernel/cpu/sh4/fpu.c
315
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
arch/sh/kernel/cpu/sh4/fpu.c
330
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
arch/sh/kernel/cpu/sh4/fpu.c
331
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
arch/sh/kernel/cpu/sh4/fpu.c
332
fpscr = tsk->thread.xstate->hardfpu.fpscr;
arch/sh/kernel/cpu/sh4/fpu.c
342
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
arch/sh/kernel/cpu/sh4/fpu.c
344
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
arch/sh/kernel/cpu/sh4/fpu.c
348
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
arch/sh/kernel/cpu/sh4/fpu.c
349
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
arch/sh/kernel/cpu/sh4/fpu.c
355
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
arch/sh/kernel/cpu/sh4/fpu.c
368
hx = tsk->thread.xstate->hardfpu.fp_regs[m];
arch/sh/kernel/cpu/sh4/fpu.c
370
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
arch/sh/kernel/cpu/sh4/fpu.c
375
llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
arch/sh/kernel/cpu/sh4/fpu.c
376
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
arch/sh/kernel/cpu/sh4/fpu.c
378
tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
arch/sh/kernel/cpu/sh4/fpu.c
397
int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
arch/sh/kernel/cpu/sh4/fpu.c
409
tsk->thread.xstate->hardfpu.fpscr &=
arch/sh/kernel/cpu/sh4/fpu.c
411
tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
arch/sh/kernel/cpu/sh4/fpu.c
414
tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
arch/sh/kernel/cpu/sh4/fpu.c
418
if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
arch/sh/kernel/cpu/sh4/fpu.c
84
:"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
arch/sh/kernel/dumpstack.c
150
sp = (unsigned long *)tsk->thread.sp;
arch/sh/kernel/hw_breakpoint.c
265
struct thread_struct *t = &tsk->thread;
arch/sh/kernel/kgdb.c
242
gdb_regs[GDB_R15] = p->thread.sp;
arch/sh/kernel/kgdb.c
243
gdb_regs[GDB_PC] = p->thread.pc;
arch/sh/kernel/process.c
29
if (src->thread.xstate) {
arch/sh/kernel/process.c
30
dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
arch/sh/kernel/process.c
32
if (!dst->thread.xstate)
arch/sh/kernel/process.c
34
memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
arch/sh/kernel/process.c
42
if (tsk->thread.xstate) {
arch/sh/kernel/process.c
43
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
arch/sh/kernel/process.c
44
tsk->thread.xstate = NULL;
arch/sh/kernel/process_32.c
106
p->thread.dsp_status = tsk->thread.dsp_status;
arch/sh/kernel/process_32.c
110
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
arch/sh/kernel/process_32.c
113
p->thread.sp = (unsigned long) childregs;
arch/sh/kernel/process_32.c
116
p->thread.pc = (unsigned long) ret_from_kernel_thread;
arch/sh/kernel/process_32.c
124
p->thread.fpu_counter = 0;
arch/sh/kernel/process_32.c
136
p->thread.pc = (unsigned long) ret_from_fork;
arch/sh/kernel/process_32.c
147
struct thread_struct *next_t = &next->thread;
arch/sh/kernel/process_32.c
156
if (next->thread.fpu_counter > 5)
arch/sh/kernel/process_32.c
174
if (next->thread.fpu_counter > 5)
arch/sh/kernel/process_32.c
191
unsigned long schedule_frame = (unsigned long)p->thread.sp;
arch/sh/kernel/ptrace_32.c
177
return membuf_write(&to, target->thread.xstate,
arch/sh/kernel/ptrace_32.c
196
&target->thread.xstate->hardfpu, 0, -1);
arch/sh/kernel/ptrace_32.c
199
&target->thread.xstate->softfpu, 0, -1);
arch/sh/kernel/ptrace_32.c
215
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
arch/sh/kernel/ptrace_32.c
226
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
arch/sh/kernel/ptrace_32.c
369
tmp = ((unsigned long *)child->thread.xstate)
arch/sh/kernel/ptrace_32.c
404
((unsigned long *)child->thread.xstate)
arch/sh/kernel/ptrace_32.c
77
struct thread_struct *thread = &tsk->thread;
arch/sh/kernel/ptrace_32.c
81
bp = thread->ptrace_bps[0];
arch/sh/kernel/ptrace_32.c
94
thread->ptrace_bps[0] = bp;
arch/sh/kernel/signal_32.c
109
return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
arch/sh/kernel/signal_32.c
85
return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
arch/sh/kernel/smp.c
221
stack_start.sp = tsk->thread.sp;
arch/sh/kernel/stacktrace.c
75
unsigned long *sp = (unsigned long *)tsk->thread.sp;
arch/sh/kernel/traps_32.c
642
current->thread.dsp_status.status |= SR_DSP;
arch/sh/math-emu/math.c
497
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
arch/sh/mm/alignment.c
72
if (current->thread.flags & SH_THREAD_UAC_SIGBUS) {
arch/sh/mm/alignment.c
77
if (current->thread.flags & SH_THREAD_UAC_NOPRINT)
arch/sh/mm/alignment.c
85
return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK,
arch/sh/mm/alignment.c
91
tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) |
arch/sparc/include/asm/processor_32.h
85
#define task_pt_regs(tsk) ((tsk)->thread.kregs)
arch/sparc/include/asm/processor_32.h
86
#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
arch/sparc/include/asm/processor_32.h
87
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
arch/sparc/include/asm/ptrace.h
47
struct thread_info *thread;
arch/sparc/include/asm/sfp-machine_32.h
185
#define FP_ROUNDMODE ((current->thread.fsr >> 30) & 0x3)
arch/sparc/include/asm/sfp-machine_32.h
187
#define FP_ROUNDMODE ((last_task_used_math->thread.fsr >> 30) & 0x3)
arch/sparc/include/asm/sfp-machine_32.h
201
#define FP_INHIBIT_RESULTS ((current->thread.fsr >> 23) & _fex)
arch/sparc/include/asm/sfp-machine_32.h
203
#define FP_INHIBIT_RESULTS ((last_task_used_math->thread.fsr >> 23) & _fex)
arch/sparc/include/asm/sfp-machine_32.h
207
#define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f)
arch/sparc/include/asm/sfp-machine_32.h
209
#define FP_TRAPPING_EXCEPTIONS ((last_task_used_math->thread.fsr >> 23) & 0x1f)
arch/sparc/include/asm/switch_to_32.h
21
fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
arch/sparc/include/asm/switch_to_32.h
22
&(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
arch/sparc/include/asm/switch_to_32.h
24
(prv)->thread.kregs->psr &= ~PSR_EF; \
arch/sparc/include/asm/switch_to_32.h
34
(nxt)->thread.kregs->psr&=~PSR_EF; \
arch/sparc/include/asm/switch_to_64.h
25
trap_block[current_thread_info()->cpu].thread = \
arch/sparc/include/asm/trap_block.h
28
struct thread_info *thread;
arch/sparc/kernel/asm-offsets.c
52
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
arch/sparc/kernel/process_32.c
186
fpsave(&tsk->thread.float_regs[0], &tsk->thread.fsr,
arch/sparc/kernel/process_32.c
187
&tsk->thread.fpqueue[0], &tsk->thread.fpqdepth);
arch/sparc/kernel/process_32.c
207
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
arch/sparc/kernel/process_32.c
208
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
arch/sparc/kernel/process_32.c
278
fpsave(&p->thread.float_regs[0], &p->thread.fsr,
arch/sparc/kernel/process_32.c
279
&p->thread.fpqueue[0], &p->thread.fpqdepth);
arch/sparc/kernel/process_32.c
300
p->thread.kregs = childregs;
arch/sparc/kernel/process_32.c
317
ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
arch/sparc/kernel/process_32.c
318
ti->kwim = current->thread.fork_kwim;
arch/sparc/kernel/process_64.c
221
rp->thread = tp;
arch/sparc/kernel/process_64.c
233
while (!gp->thread && ++limit < 100) {
arch/sparc/kernel/process_64.c
270
tp = gp->thread;
arch/sparc/kernel/prom_64.c
384
int cpu, unsigned int *thread)
arch/sparc/kernel/prom_64.c
417
if (thread) {
arch/sparc/kernel/prom_64.c
429
*thread = proc_id;
arch/sparc/kernel/ptrace_32.c
111
struct pt_regs *regs = target->thread.kregs;
arch/sparc/kernel/ptrace_32.c
175
membuf_write(&to, target->thread.float_regs, 32 * sizeof(u32));
arch/sparc/kernel/ptrace_32.c
177
membuf_write(&to, &target->thread.fsr, sizeof(u32));
arch/sparc/kernel/ptrace_32.c
187
unsigned long *fpregs = target->thread.float_regs;
arch/sparc/kernel/ptrace_32.c
203
&target->thread.fsr,
arch/sparc/kernel/ptrace_32.c
248
const struct pt_regs *regs = target->thread.kregs;
arch/sparc/kernel/ptrace_32.c
265
struct pt_regs *regs = target->thread.kregs;
arch/sparc/kernel/ptrace_32.c
295
membuf_write(&to, &target->thread.float_regs, 32 * sizeof(u32));
arch/sparc/kernel/ptrace_32.c
296
membuf_write(&to, &target->thread.fsr, sizeof(u32));
arch/sparc/kernel/ptrace_32.c
305
unsigned long *fpregs = target->thread.float_regs;
arch/sparc/kernel/ptrace_32.c
318
&target->thread.fsr,
arch/sparc/kernel/ptrace_32.c
363
unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
arch/sparc/kernel/ptrace_32.c
87
const struct pt_regs *regs = target->thread.kregs;
arch/sparc/kernel/sigutil_32.c
21
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
arch/sparc/kernel/sigutil_32.c
22
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
arch/sparc/kernel/sigutil_32.c
29
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
arch/sparc/kernel/sigutil_32.c
30
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
arch/sparc/kernel/sigutil_32.c
36
¤t->thread.float_regs[0],
arch/sparc/kernel/sigutil_32.c
38
err |= __put_user(current->thread.fsr, &fpu->si_fsr);
arch/sparc/kernel/sigutil_32.c
39
err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
arch/sparc/kernel/sigutil_32.c
40
if (current->thread.fpqdepth != 0)
arch/sparc/kernel/sigutil_32.c
42
¤t->thread.fpqueue[0],
arch/sparc/kernel/sigutil_32.c
71
err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0],
arch/sparc/kernel/sigutil_32.c
73
err |= __get_user(current->thread.fsr, &fpu->si_fsr);
arch/sparc/kernel/sigutil_32.c
74
err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
arch/sparc/kernel/sigutil_32.c
75
if (current->thread.fpqdepth != 0)
arch/sparc/kernel/sigutil_32.c
76
err |= __copy_from_user(¤t->thread.fpqueue[0],
arch/sparc/kernel/traps_32.c
172
fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr,
arch/sparc/kernel/traps_32.c
173
&fptask->thread.fpqueue[0], &fptask->thread.fpqdepth);
arch/sparc/kernel/traps_32.c
177
fpload(¤t->thread.float_regs[0], ¤t->thread.fsr);
arch/sparc/kernel/traps_32.c
188
fpload(¤t->thread.float_regs[0], ¤t->thread.fsr);
arch/sparc/kernel/traps_32.c
225
fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr,
arch/sparc/kernel/traps_32.c
226
&fpt->thread.fpqueue[0], &fpt->thread.fpqdepth);
arch/sparc/kernel/traps_32.c
228
printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr);
arch/sparc/kernel/traps_32.c
231
switch ((fpt->thread.fsr & 0x1c000)) {
arch/sparc/kernel/traps_32.c
256
fpload(¤t->thread.float_regs[0], ¤t->thread.fsr);
arch/sparc/kernel/traps_32.c
279
fsr = fpt->thread.fsr;
arch/sparc/kernel/traps_64.c
2831
p->thread = t;
arch/sparc/kernel/traps_64.c
2869
thread) ||
arch/sparc/kernel/unaligned_32.c
61
die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
arch/sparc/math-emu/math_32.c
172
printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
arch/sparc/math-emu/math_32.c
173
for (i = 0; i < fpt->thread.fpqdepth; i++)
arch/sparc/math-emu/math_32.c
174
printk("%d: %08lx at %08lx\n", i, fpt->thread.fpqueue[i].insn,
arch/sparc/math-emu/math_32.c
175
(unsigned long)fpt->thread.fpqueue[i].insn_addr);
arch/sparc/math-emu/math_32.c
178
if (fpt->thread.fpqdepth == 0) { /* no queue, guilty insn is at regs->pc */
arch/sparc/math-emu/math_32.c
183
retcode = do_one_mathemu(insn, &fpt->thread.fsr, fpt->thread.float_regs);
arch/sparc/math-emu/math_32.c
194
for (i = 0; i < fpt->thread.fpqdepth; i++) {
arch/sparc/math-emu/math_32.c
195
retcode = do_one_mathemu(fpt->thread.fpqueue[i].insn, &(fpt->thread.fsr), fpt->thread.float_regs);
arch/sparc/math-emu/math_32.c
201
fpt->thread.fsr &= ~(0x3000 | FSR_CEXC_MASK);
arch/sparc/math-emu/math_32.c
203
fpt->thread.fsr &= ~0x3000;
arch/sparc/math-emu/math_32.c
204
fpt->thread.fpqdepth = 0;
arch/sparc/mm/fault_32.c
337
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
arch/sparc/mm/fault_32.c
342
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
arch/um/drivers/chan_user.c
299
int pid, thread, count, thread_fd = -1;
arch/um/drivers/chan_user.c
312
thread = winch_tramp(fd, port, &thread_fd, &stack);
arch/um/drivers/chan_user.c
313
if (thread < 0)
arch/um/drivers/chan_user.c
316
register_winch_irq(thread_fd, fd, thread, port, stack);
arch/um/include/asm/processor-generic.h
31
} thread;
arch/um/include/asm/processor-generic.h
85
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
arch/um/include/asm/uaccess.h
52
current->thread.segv_continue = NULL; \
arch/um/include/asm/uaccess.h
64
current->thread.segv_continue = NULL; \
arch/um/kernel/exec.c
25
arch_flush_thread(¤t->thread.arch);
arch/um/kernel/process.c
115
if (current->thread.prev_sched != NULL)
arch/um/kernel/process.c
116
schedule_tail(current->thread.prev_sched);
arch/um/kernel/process.c
117
current->thread.prev_sched = NULL;
arch/um/kernel/process.c
119
fn = current->thread.request.thread.proc;
arch/um/kernel/process.c
120
arg = current->thread.request.thread.arg;
arch/um/kernel/process.c
126
userspace(¤t->thread.regs.regs);
arch/um/kernel/process.c
132
schedule_tail(current->thread.prev_sched);
arch/um/kernel/process.c
141
current->thread.prev_sched = NULL;
arch/um/kernel/process.c
143
userspace(¤t->thread.regs.regs);
arch/um/kernel/process.c
154
p->thread = (struct thread_struct) INIT_THREAD;
arch/um/kernel/process.c
157
memcpy(&p->thread.regs.regs, current_pt_regs(),
arch/um/kernel/process.c
158
sizeof(p->thread.regs.regs));
arch/um/kernel/process.c
159
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
arch/um/kernel/process.c
161
REGS_SP(p->thread.regs.regs.gp) = sp;
arch/um/kernel/process.c
165
arch_copy_thread(¤t->thread.arch, &p->thread.arch);
arch/um/kernel/process.c
167
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
arch/um/kernel/process.c
168
p->thread.request.thread.proc = args->fn;
arch/um/kernel/process.c
169
p->thread.request.thread.arg = args->fn_arg;
arch/um/kernel/process.c
173
new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
arch/um/kernel/process.c
289
sp = p->thread.switch_buf->JB_SP;
arch/um/kernel/process.c
75
to->thread.prev_sched = from;
arch/um/kernel/process.c
78
switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
arch/um/kernel/process.c
81
return current->thread.prev_sched;
arch/um/kernel/process.c
86
struct pt_regs *regs = ¤t->thread.regs;
arch/um/kernel/skas/process.c
39
init_task.thread.request.thread.proc = start_kernel_proc;
arch/um/kernel/skas/process.c
40
init_task.thread.request.thread.arg = NULL;
arch/um/kernel/skas/process.c
42
&init_task.thread.switch_buf);
arch/um/kernel/smp.c
157
idle->thread.request.thread.proc = start_secondary;
arch/um/kernel/smp.c
158
idle->thread.request.thread.arg = NULL;
arch/um/kernel/smp.c
160
new_thread(task_stack_page(idle), &idle->thread.switch_buf,
arch/um/kernel/smp.c
162
os_start_secondary(opaque, &idle->thread.switch_buf);
arch/um/kernel/stacktrace.c
22
struct pt_regs *segv_regs = tsk->thread.segv_regs;
arch/um/kernel/sysrq.c
32
struct pt_regs *segv_regs = current->thread.segv_regs;
arch/um/kernel/trap.c
262
current->thread.arch.faultinfo = fi;
arch/um/kernel/trap.c
269
do_signal(¤t->thread.regs);
arch/um/kernel/trap.c
317
current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
arch/um/kernel/trap.c
337
if (!current->thread.segv_continue) {
arch/um/kernel/trap.c
341
mc_set_rip(mc, current->thread.segv_continue);
arch/um/kernel/trap.c
342
current->thread.segv_continue = NULL;
arch/um/kernel/trap.c
382
current->thread.arch.faultinfo = fi;
arch/um/kernel/trap.c
386
current->thread.arch.faultinfo = fi;
arch/um/kernel/trap.c
392
current->thread.segv_regs = NULL;
arch/um/kernel/trap.c
417
current->thread.arch.faultinfo = *fi;
arch/x86/coco/sev/vc-handle.c
79
struct thread_struct *t = ¤t->thread;
arch/x86/entry/vsyscall/vsyscall_64.c
101
struct thread_struct *thread = ¤t->thread;
arch/x86/entry/vsyscall/vsyscall_64.c
103
thread->error_code = X86_PF_USER | X86_PF_WRITE;
arch/x86/entry/vsyscall/vsyscall_64.c
104
thread->cr2 = ptr;
arch/x86/entry/vsyscall/vsyscall_64.c
105
thread->trap_nr = X86_TRAP_PF;
arch/x86/events/intel/p4.c
1227
static int p4_next_cntr(int thread, unsigned long *used_mask,
arch/x86/events/intel/p4.c
1233
j = bind->cntr[thread][i];
arch/x86/events/intel/p4.c
1248
unsigned int i, thread, num;
arch/x86/events/intel/p4.c
1259
thread = p4_ht_thread(cpu);
arch/x86/events/intel/p4.c
1272
escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
arch/x86/events/intel/p4.c
1283
cntr_idx = p4_next_cntr(thread, used_mask, bind);
arch/x86/events/intel/p4.c
955
int thread = p4_ht_config_thread(hwc->config);
arch/x86/events/intel/p4.c
962
escr_addr = bind->escr_msr[thread];
arch/x86/hyperv/hv_vtl.c
129
rsp = (unsigned long)idle->thread.sp;
arch/x86/include/asm/elf.h
167
elf_common_init(¤t->thread, _r, 0)
arch/x86/include/asm/elf.h
170
elf_common_init(¤t->thread, regs, __USER_DS)
arch/x86/include/asm/stacktrace.h
79
return &((struct inactive_task_frame *)task->thread.sp)->bp;
arch/x86/include/asm/stacktrace.h
98
return (unsigned long *)task->thread.sp;
arch/x86/include/asm/suspend_64.h
58
#define loaddebug(thread,register) \
arch/x86/include/asm/suspend_64.h
59
set_debugreg((thread)->debugreg##register, register)
arch/x86/include/asm/switch_to.h
57
static inline void refresh_sysenter_cs(struct thread_struct *thread)
arch/x86/include/asm/switch_to.h
60
if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
arch/x86/include/asm/switch_to.h
63
this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
arch/x86/include/asm/switch_to.h
64
wrmsrq(MSR_IA32_SYSENTER_CS, thread->sysenter_cs);
arch/x86/include/asm/switch_to.h
73
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
arch/x86/include/asm/uv/uv_geo.h
50
thread:4;
arch/x86/kernel/acpi/sleep.c
131
current->thread.sp = (unsigned long)temp_stack + sizeof(temp_stack);
arch/x86/kernel/asm-offsets.c
45
OFFSET(TASK_threadsp, task_struct, thread.sp);
arch/x86/kernel/cet.c
64
tsk->thread.error_code = error_code;
arch/x86/kernel/cet.c
65
tsk->thread.trap_nr = X86_TRAP_CP;
arch/x86/kernel/cpu/bus_lock.c
288
current->thread.error_code = 0;
arch/x86/kernel/cpu/bus_lock.c
289
current->thread.trap_nr = X86_TRAP_AC;
arch/x86/kernel/cpu/common.c
2481
memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
arch/x86/kernel/cpu/proc.c
196
dump_x86_features(m, task->thread.features);
arch/x86/kernel/cpu/proc.c
200
dump_x86_features(m, task->thread.features_locked);
arch/x86/kernel/dumpstack.c
435
current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
arch/x86/kernel/fpu/core.c
750
*offset = sizeof(struct task_struct) - offsetof(struct task_struct, thread);
arch/x86/kernel/fpu/regset.c
171
ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf, &target->thread.pkru);
arch/x86/kernel/fpu/regset.c
181
if (target->thread.features & ARCH_SHSTK_SHSTK)
arch/x86/kernel/fpu/regset.c
354
env->fos = tsk->thread.ds;
arch/x86/kernel/fpu/xstate.c
1274
tsk->thread.pkru, copy_mode);
arch/x86/kernel/fpu/xstate.c
1413
return copy_uabi_to_xstate(x86_task_fpu(tsk)->fpstate, NULL, ubuf, &tsk->thread.pkru);
arch/x86/kernel/fpu/xstate.c
1653
struct task_struct *thread, *leader = current->group_leader;
arch/x86/kernel/fpu/xstate.c
1661
for_each_thread(leader, thread) {
arch/x86/kernel/fpu/xstate.c
1662
if (thread->sas_ss_size && thread->sas_ss_size < framesize)
arch/x86/kernel/hw_breakpoint.c
473
struct thread_struct *t = &tsk->thread;
arch/x86/kernel/hw_breakpoint.c
571
if ((current->thread.virtual_dr6 & DR_TRAP_BITS) ||
arch/x86/kernel/ioport.c
181
struct thread_struct *t = ¤t->thread;
arch/x86/kernel/ioport.c
25
if (current->thread.io_bitmap) {
arch/x86/kernel/ioport.c
30
refcount_inc(¤t->thread.io_bitmap->refcnt);
arch/x86/kernel/ioport.c
31
tsk->thread.io_bitmap = current->thread.io_bitmap;
arch/x86/kernel/ioport.c
39
struct thread_struct *t = &tsk->thread;
arch/x86/kernel/ioport.c
55
struct io_bitmap *iobm = tsk->thread.io_bitmap;
arch/x86/kernel/ioport.c
57
tsk->thread.io_bitmap = NULL;
arch/x86/kernel/ioport.c
73
struct thread_struct *t = ¤t->thread;
arch/x86/kernel/kgdb.c
153
gdb_regs[GDB_BP] = ((struct inactive_task_frame *)p->thread.sp)->bp;
arch/x86/kernel/kgdb.c
176
gdb_regs[GDB_SP] = p->thread.sp;
arch/x86/kernel/kgdb.c
634
tsk->thread.virtual_dr6 |= (DR_TRAP0 << i);
arch/x86/kernel/process.c
111
dst->thread.vm86 = NULL;
arch/x86/kernel/process.c
130
struct thread_struct *t = &tsk->thread;
arch/x86/kernel/process.c
187
p->thread.sp = (unsigned long) fork_frame;
arch/x86/kernel/process.c
188
p->thread.io_bitmap = NULL;
arch/x86/kernel/process.c
190
p->thread.iopl_warn = 0;
arch/x86/kernel/process.c
191
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
arch/x86/kernel/process.c
195
p->thread.fsindex = current->thread.fsindex;
arch/x86/kernel/process.c
196
p->thread.fsbase = current->thread.fsbase;
arch/x86/kernel/process.c
197
p->thread.gsindex = current->thread.gsindex;
arch/x86/kernel/process.c
198
p->thread.gsbase = current->thread.gsbase;
arch/x86/kernel/process.c
200
savesegment(es, p->thread.es);
arch/x86/kernel/process.c
201
savesegment(ds, p->thread.ds);
arch/x86/kernel/process.c
206
p->thread.sp0 = (unsigned long) (childregs + 1);
arch/x86/kernel/process.c
207
savesegment(gs, p->thread.gs);
arch/x86/kernel/process.c
230
p->thread.pkru = pkru_get_init_value();
arch/x86/kernel/process.c
240
p->thread.pkru = read_pkru();
arch/x86/kernel/process.c
289
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
arch/x86/kernel/process.c
473
struct thread_struct *t = ¤t->thread;
arch/x86/kernel/process_32.c
157
struct thread_struct *prev = &prev_p->thread,
arch/x86/kernel/process_32.c
158
*next = &next_p->thread;
arch/x86/kernel/process_64.c
270
prev_p->thread.fsbase = 0;
arch/x86/kernel/process_64.c
272
prev_p->thread.gsbase = 0;
arch/x86/kernel/process_64.c
278
savesegment(fs, task->thread.fsindex);
arch/x86/kernel/process_64.c
279
savesegment(gs, task->thread.gsindex);
arch/x86/kernel/process_64.c
286
task->thread.fsbase = rdfsbase();
arch/x86/kernel/process_64.c
287
task->thread.gsbase = __rdgsbase_inactive();
arch/x86/kernel/process_64.c
289
save_base_legacy(task, task->thread.fsindex, FS);
arch/x86/kernel/process_64.c
290
save_base_legacy(task, task->thread.gsindex, GS);
arch/x86/kernel/process_64.c
430
base = get_desc_base(&task->thread.tls_array[idx]);
arch/x86/kernel/process_64.c
492
(task->thread.fsindex == 0))
arch/x86/kernel/process_64.c
493
fsbase = task->thread.fsbase;
arch/x86/kernel/process_64.c
495
fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
arch/x86/kernel/process_64.c
507
(task->thread.gsindex == 0))
arch/x86/kernel/process_64.c
508
gsbase = task->thread.gsbase;
arch/x86/kernel/process_64.c
510
gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
arch/x86/kernel/process_64.c
519
task->thread.fsbase = fsbase;
arch/x86/kernel/process_64.c
526
task->thread.gsbase = gsbase;
arch/x86/kernel/process_64.c
612
struct thread_struct *prev = &prev_p->thread;
arch/x86/kernel/process_64.c
613
struct thread_struct *next = &next_p->thread;
arch/x86/kernel/process_64.c
890
task->thread.gsbase = arg2;
arch/x86/kernel/process_64.c
893
task->thread.gsindex = 0;
arch/x86/kernel/process_64.c
920
task->thread.fsbase = arg2;
arch/x86/kernel/process_64.c
922
task->thread.fsindex = 0;
arch/x86/kernel/ptrace.c
1412
tsk->thread.trap_nr = X86_TRAP_DB;
arch/x86/kernel/ptrace.c
1413
tsk->thread.error_code = error_code;
arch/x86/kernel/ptrace.c
195
retval = task->thread.gs;
arch/x86/kernel/ptrace.c
233
task->thread.gs = value;
arch/x86/kernel/ptrace.c
263
return task->thread.fsindex;
arch/x86/kernel/ptrace.c
269
return task->thread.gsindex;
arch/x86/kernel/ptrace.c
275
return task->thread.ds;
arch/x86/kernel/ptrace.c
281
return task->thread.es;
arch/x86/kernel/ptrace.c
310
task->thread.fsindex = value;
arch/x86/kernel/ptrace.c
313
task->thread.gsindex = value;
arch/x86/kernel/ptrace.c
316
task->thread.ds = value;
arch/x86/kernel/ptrace.c
319
task->thread.es = value;
arch/x86/kernel/ptrace.c
476
struct thread_struct *thread = &(current->thread);
arch/x86/kernel/ptrace.c
483
if (thread->ptrace_bps[i] == bp)
arch/x86/kernel/ptrace.c
487
thread->virtual_dr6 |= (DR_TRAP0 << i);
arch/x86/kernel/ptrace.c
562
struct thread_struct *thread = &tsk->thread;
arch/x86/kernel/ptrace.c
568
old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
arch/x86/kernel/ptrace.c
575
struct perf_event *bp = thread->ptrace_bps[i];
arch/x86/kernel/ptrace.c
588
thread->ptrace_bps[i] = bp;
arch/x86/kernel/ptrace.c
613
struct thread_struct *thread = &tsk->thread;
arch/x86/kernel/ptrace.c
618
struct perf_event *bp = thread->ptrace_bps[index];
arch/x86/kernel/ptrace.c
623
val = thread->virtual_dr6 ^ DR6_RESERVED; /* Flip back to arch polarity */
arch/x86/kernel/ptrace.c
625
val = thread->ptrace_dr7;
arch/x86/kernel/ptrace.c
633
struct thread_struct *t = &tsk->thread;
arch/x86/kernel/ptrace.c
672
struct thread_struct *thread = &tsk->thread;
arch/x86/kernel/ptrace.c
679
thread->virtual_dr6 = val ^ DR6_RESERVED; /* Flip to positive polarity */
arch/x86/kernel/ptrace.c
684
thread->ptrace_dr7 = val;
arch/x86/kernel/ptrace.c
696
struct io_bitmap *iobm = target->thread.io_bitmap;
arch/x86/kernel/ptrace.c
705
struct io_bitmap *iobm = target->thread.io_bitmap;
arch/x86/kernel/ptrace.c
896
child->thread.fsbase =
arch/x86/kernel/ptrace.c
905
child->thread.gsbase =
arch/x86/kernel/shstk.c
159
struct thread_shstk *shstk = ¤t->thread.shstk;
arch/x86/kernel/shstk.c
189
memset(¤t->thread.shstk, 0, sizeof(struct thread_shstk));
arch/x86/kernel/shstk.c
190
current->thread.features = 0;
arch/x86/kernel/shstk.c
191
current->thread.features_locked = 0;
arch/x86/kernel/shstk.c
197
struct thread_shstk *shstk = &tsk->thread.shstk;
arch/x86/kernel/shstk.c
33
return current->thread.features & features;
arch/x86/kernel/shstk.c
38
current->thread.features |= features;
arch/x86/kernel/shstk.c
43
current->thread.features &= ~features;
arch/x86/kernel/shstk.c
448
struct thread_shstk *shstk = &tsk->thread.shstk;
arch/x86/kernel/shstk.c
581
return put_user(task->thread.features, (unsigned long __user *)arg2);
arch/x86/kernel/shstk.c
585
task->thread.features_locked |= features;
arch/x86/kernel/shstk.c
592
task->thread.features_locked &= ~features;
arch/x86/kernel/shstk.c
599
if (features & task->thread.features_locked)
arch/x86/kernel/signal_32.c
234
unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
arch/x86/kernel/signal_32.c
235
unsafe_put_user(current->thread.error_code, &sc->err, Efault);
arch/x86/kernel/signal_32.c
246
unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
arch/x86/kernel/signal_64.c
119
unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
arch/x86/kernel/signal_64.c
120
unsafe_put_user(current->thread.error_code, &sc->err, Efault);
arch/x86/kernel/signal_64.c
132
unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
arch/x86/kernel/smpboot.c
1008
idle->thread.sp = (unsigned long)task_pt_regs(idle);
arch/x86/kernel/smpboot.c
1013
initial_stack = idle->thread.sp;
arch/x86/kernel/tls.c
106
if (t == ¤t->thread)
arch/x86/kernel/tls.c
175
if (p->thread.fsindex == modified_sel)
arch/x86/kernel/tls.c
176
p->thread.fsbase = info.base_addr;
arch/x86/kernel/tls.c
178
if (p->thread.gsindex == modified_sel)
arch/x86/kernel/tls.c
179
p->thread.gsbase = info.base_addr;
arch/x86/kernel/tls.c
231
fill_user_desc(&info, idx, &p->thread.tls_array[index]);
arch/x86/kernel/tls.c
24
struct thread_struct *t = ¤t->thread;
arch/x86/kernel/tls.c
246
struct thread_struct *t = &target->thread;
arch/x86/kernel/tls.c
260
for (pos = 0, tls = target->thread.tls_array; to.left; pos++, tls++) {
arch/x86/kernel/tls.c
88
struct thread_struct *t = &p->thread;
arch/x86/kernel/traps.c
1322
current->thread.virtual_dr6 = (dr6 & DR_STEP);
arch/x86/kernel/traps.c
1354
dr6 |= current->thread.virtual_dr6;
arch/x86/kernel/traps.c
1437
task->thread.error_code = 0;
arch/x86/kernel/traps.c
1438
task->thread.trap_nr = trapnr;
arch/x86/kernel/traps.c
1452
task->thread.trap_nr = trapnr;
arch/x86/kernel/traps.c
1453
task->thread.error_code = 0;
arch/x86/kernel/traps.c
296
tsk->thread.error_code = error_code;
arch/x86/kernel/traps.c
297
tsk->thread.trap_nr = trapnr;
arch/x86/kernel/traps.c
313
tsk->thread.error_code = error_code;
arch/x86/kernel/traps.c
314
tsk->thread.trap_nr = trapnr;
arch/x86/kernel/traps.c
665
tsk->thread.error_code = error_code;
arch/x86/kernel/traps.c
666
tsk->thread.trap_nr = X86_TRAP_DF;
arch/x86/kernel/traps.c
804
struct thread_struct *t = ¤t->thread;
arch/x86/kernel/traps.c
890
current->thread.error_code = error_code;
arch/x86/kernel/traps.c
891
current->thread.trap_nr = trapnr;
arch/x86/kernel/traps.c
907
current->thread.error_code = error_code;
arch/x86/kernel/traps.c
908
current->thread.trap_nr = trapnr;
arch/x86/kernel/umip.c
316
tsk->thread.cr2 = (unsigned long)addr;
arch/x86/kernel/umip.c
317
tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE;
arch/x86/kernel/umip.c
318
tsk->thread.trap_nr = X86_TRAP_PF;
arch/x86/kernel/unwind_orc.c
734
struct inactive_task_frame *frame = (void *)task->thread.sp;
arch/x86/kernel/unwind_orc.c
736
state->sp = task->thread.sp + sizeof(*frame);
arch/x86/kernel/uprobes.c
1616
utask->autask.saved_trap_nr = current->thread.trap_nr;
arch/x86/kernel/uprobes.c
1617
current->thread.trap_nr = UPROBE_TRAP_NR;
arch/x86/kernel/uprobes.c
1639
if (t->thread.trap_nr != UPROBE_TRAP_NR)
arch/x86/kernel/uprobes.c
1658
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
arch/x86/kernel/uprobes.c
1659
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/x86/kernel/uprobes.c
1732
current->thread.trap_nr = utask->autask.saved_trap_nr;
arch/x86/kernel/vm86_32.c
100
struct vm86 *vm86 = current->thread.vm86;
arch/x86/kernel/vm86_32.c
145
tsk->thread.sp0 = vm86->saved_sp0;
arch/x86/kernel/vm86_32.c
146
tsk->thread.sysenter_cs = __KERNEL_CS;
arch/x86/kernel/vm86_32.c
148
refresh_sysenter_cs(&tsk->thread);
arch/x86/kernel/vm86_32.c
202
struct vm86 *vm86 = tsk->thread.vm86;
arch/x86/kernel/vm86_32.c
237
tsk->thread.vm86 = vm86;
arch/x86/kernel/vm86_32.c
326
vm86->saved_sp0 = tsk->thread.sp0;
arch/x86/kernel/vm86_32.c
331
tsk->thread.sp0 += 16;
arch/x86/kernel/vm86_32.c
334
tsk->thread.sysenter_cs = 0;
arch/x86/kernel/vm86_32.c
335
refresh_sysenter_cs(&tsk->thread);
arch/x86/kernel/vm86_32.c
379
set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
arch/x86/kernel/vm86_32.c
389
set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
arch/x86/kernel/vm86_32.c
404
return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
arch/x86/kernel/vm86_32.c
499
struct vm86 *vm86 = current->thread.vm86;
arch/x86/kernel/vm86_32.c
529
struct vm86 *vm86 = current->thread.vm86;
arch/x86/kernel/vm86_32.c
541
current->thread.trap_nr = trapno;
arch/x86/kernel/vm86_32.c
542
current->thread.error_code = error_code;
arch/x86/kernel/vm86_32.c
554
struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus;
arch/x86/kernel/vm86_32.c
87
#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
arch/x86/kernel/vm86_32.c
88
#define VEFLAGS (current->thread.vm86->veflags)
arch/x86/kvm/vmx/tdx.c
802
vt->msr_host_kernel_gs_base = current->thread.gsbase;
arch/x86/kvm/vmx/vmx.c
1399
fs_sel = current->thread.fsindex;
arch/x86/kvm/vmx/vmx.c
1400
gs_sel = current->thread.gsindex;
arch/x86/kvm/vmx/vmx.c
1401
fs_base = current->thread.fsbase;
arch/x86/kvm/vmx/vmx.c
1402
vt->msr_host_kernel_gs_base = current->thread.gsbase;
arch/x86/math-emu/fpu_entry.c
232
current->thread.trap_nr = X86_TRAP_MF;
arch/x86/math-emu/fpu_entry.c
233
current->thread.error_code = 0;
arch/x86/math-emu/fpu_entry.c
625
current->thread.trap_nr = X86_TRAP_MF;
arch/x86/math-emu/fpu_entry.c
626
current->thread.error_code = 0;
arch/x86/mm/fault.c
633
tsk->thread.trap_nr = X86_TRAP_PF;
arch/x86/mm/fault.c
634
tsk->thread.error_code = error_code | X86_PF_USER;
arch/x86/mm/fault.c
635
tsk->thread.cr2 = address;
arch/x86/um/asm/processor.h
20
(address + 65536 + 32 * sizeof(unsigned long) >= UPT_SP(¤t->thread.regs.regs))
arch/x86/um/asm/processor.h
39
#define task_pt_regs(t) (&(t)->thread.regs)
arch/x86/um/asm/processor_32.h
38
static inline void arch_flush_thread(struct arch_thread *thread)
arch/x86/um/asm/processor_32.h
41
memset(&thread->tls_array, 0, sizeof(thread->tls_array));
arch/x86/um/asm/processor_64.h
22
static inline void arch_flush_thread(struct arch_thread *thread)
arch/x86/um/ptrace.c
117
struct user_fxsr_struct *fxsave = (void *)target->thread.regs.regs.fp;
arch/x86/um/ptrace.c
152
struct user_fxsr_struct *fxsave = (void *)target->thread.regs.regs.fp;
arch/x86/um/ptrace_32.c
113
child->thread.arch.debugregs[addr] = data;
arch/x86/um/ptrace_32.c
148
return mask & child->thread.regs.regs.gp[reg_offsets[regno]];
arch/x86/um/ptrace_32.c
167
tmp = child->thread.arch.debugregs[addr];
arch/x86/um/ptrace_32.c
67
UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
arch/x86/um/ptrace_32.c
91
child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
arch/x86/um/ptrace_32.c
96
child->thread.regs.regs.gp[reg_offsets[regno]] = value;
arch/x86/um/ptrace_64.c
101
child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
arch/x86/um/ptrace_64.c
108
child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value;
arch/x86/um/ptrace_64.c
125
child->thread.arch.debugregs[addr] = data;
arch/x86/um/ptrace_64.c
169
return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]];
arch/x86/um/ptrace_64.c
187
tmp = child->thread.arch.debugregs[addr];
arch/x86/um/ptrace_64.c
79
UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
arch/x86/um/shared/sysdep/faultinfo_32.h
41
"=m" (current->thread.segv_continue) :: \
arch/x86/um/shared/sysdep/faultinfo_64.h
41
"=m" (current->thread.segv_continue) :: \
arch/x86/um/signal.c
115
struct faultinfo * fi = ¤t->thread.arch.faultinfo;
arch/x86/um/signal.c
222
err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, 0);
arch/x86/um/signal.c
341
unsigned long sp = PT_REGS_SP(¤t->thread.regs);
arch/x86/um/signal.c
353
if (copy_sc_from_user(¤t->thread.regs, sc))
arch/x86/um/signal.c
357
PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1;
arch/x86/um/signal.c
358
return PT_REGS_SYSCALL_RET(¤t->thread.regs);
arch/x86/um/signal.c
449
unsigned long sp = PT_REGS_SP(¤t->thread.regs);
arch/x86/um/signal.c
460
if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext))
arch/x86/um/signal.c
464
PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1;
arch/x86/um/signal.c
465
return PT_REGS_SYSCALL_RET(¤t->thread.regs);
arch/x86/um/syscalls_64.c
23
current->thread.regs.regs.gp[FS_BASE / sizeof(unsigned long)] =
arch/x86/um/syscalls_64.c
28
current->thread.regs.regs.gp[GS_BASE / sizeof(unsigned long)] =
arch/x86/um/syscalls_64.c
33
ret = put_user(current->thread.regs.regs.gp[FS_BASE / sizeof(unsigned long)], arg2);
arch/x86/um/syscalls_64.c
36
ret = put_user(current->thread.regs.regs.gp[GS_BASE / sizeof(unsigned long)], arg2);
arch/x86/um/tls_32.c
133
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
arch/x86/um/tls_32.c
157
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
arch/x86/um/tls_32.c
200
struct thread_struct *t = &task->thread;
arch/x86/um/tls_32.c
234
struct thread_struct *t = &task->thread;
arch/x86/um/tls_32.c
62
struct thread_struct *t = &task->thread;
arch/x86/um/tls_32.c
93
&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
arch/x86/um/tls_64.c
15
t->thread.regs.regs.gp[FS_BASE / sizeof(unsigned long)] = tls;
arch/xtensa/include/asm/stacktrace.h
27
sp = task->thread.sp;
arch/xtensa/kernel/asm-offsets.c
79
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
arch/xtensa/kernel/asm-offsets.c
97
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
arch/xtensa/kernel/asm-offsets.c
98
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
arch/xtensa/kernel/hw_breakpoint.c
237
struct thread_struct *t = &tsk->thread;
arch/xtensa/kernel/hw_breakpoint.c
260
memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
arch/xtensa/kernel/hw_breakpoint.c
261
memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
arch/xtensa/kernel/process.c
284
p->thread.sp = (unsigned long)childregs;
arch/xtensa/kernel/process.c
287
p->thread.sp = (unsigned long)childregs - 16;
arch/xtensa/kernel/process.c
297
p->thread.ra = MAKE_RA_FOR_CALL(
arch/xtensa/kernel/process.c
334
p->thread.ra = MAKE_RA_FOR_CALL(
arch/xtensa/kernel/process.c
351
((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
arch/xtensa/kernel/process.c
352
((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
arch/xtensa/kernel/process.c
383
sp = p->thread.sp;
arch/xtensa/kernel/process.c
384
pc = MAKE_PC_FROM_RA(p->thread.ra, _text);
arch/xtensa/kernel/ptrace.c
373
if (current->thread.ptrace_bp[i] == bp)
arch/xtensa/kernel/ptrace.c
378
if (current->thread.ptrace_wp[i] == bp)
arch/xtensa/kernel/ptrace.c
427
bp = child->thread.ptrace_wp[idx];
arch/xtensa/kernel/ptrace.c
429
bp = child->thread.ptrace_bp[idx];
arch/xtensa/kernel/ptrace.c
466
bp = child->thread.ptrace_wp[idx];
arch/xtensa/kernel/ptrace.c
472
bp = child->thread.ptrace_bp[idx];
arch/xtensa/kernel/ptrace.c
482
child->thread.ptrace_wp[idx] = bp;
arch/xtensa/kernel/ptrace.c
484
child->thread.ptrace_bp[idx] = bp;
crypto/algboss.c
157
thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
crypto/algboss.c
158
if (IS_ERR(thread))
crypto/algboss.c
189
struct task_struct *thread;
crypto/algboss.c
206
thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
crypto/algboss.c
207
if (IS_ERR(thread))
crypto/algboss.c
77
struct task_struct *thread;
drivers/acpi/acpi_dbg.c
402
acpi_aml_io.thread = NULL;
drivers/acpi/acpi_dbg.c
434
acpi_aml_io.thread = t;
drivers/acpi/acpi_dbg.c
52
struct task_struct *thread;
drivers/acpi/acpi_dbg.c
69
return acpi_aml_io.thread ? true : false;
drivers/acpi/acpi_dbg.c
86
current != acpi_aml_io.thread)
drivers/acpi/acpica/acdispat.h
175
acpi_ds_call_control_method(struct acpi_thread_state *thread,
drivers/acpi/acpica/acdispat.h
300
*thread);
drivers/acpi/acpica/acdispat.h
317
*thread);
drivers/acpi/acpica/acdispat.h
321
struct acpi_thread_state *thread);
drivers/acpi/acpica/acdispat.h
326
*thread);
drivers/acpi/acpica/acinterp.h
261
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread);
drivers/acpi/acpica/aclocal.h
686
struct acpi_thread_state thread;
drivers/acpi/acpica/acstruct.h
90
struct acpi_thread_state *thread;
drivers/acpi/acpica/dbobject.c
37
struct acpi_thread_state *thread;
drivers/acpi/acpica/dbobject.c
66
thread = walk_state->thread;
drivers/acpi/acpica/dbobject.c
67
if (!thread) {
drivers/acpi/acpica/dsdebug.c
120
thread = walk_state->thread;
drivers/acpi/acpica/dsdebug.c
121
if (!thread) {
drivers/acpi/acpica/dsdebug.c
137
next_walk_state = thread->walk_state_list;
drivers/acpi/acpica/dsdebug.c
94
struct acpi_thread_state *thread;
drivers/acpi/acpica/dsmethod.c
344
&& (walk_state->thread->current_sync_level >
drivers/acpi/acpica/dsmethod.c
350
walk_state->thread->current_sync_level));
drivers/acpi/acpica/dsmethod.c
361
(walk_state->thread->thread_id !=
drivers/acpi/acpica/dsmethod.c
380
walk_state->thread->current_sync_level;
drivers/acpi/acpica/dsmethod.c
383
walk_state->thread->thread_id;
drivers/acpi/acpica/dsmethod.c
394
walk_state->thread->current_sync_level =
drivers/acpi/acpica/dsmethod.c
456
acpi_ds_call_control_method(struct acpi_thread_state *thread,
drivers/acpi/acpica/dsmethod.c
512
thread);
drivers/acpi/acpica/dsmethod.c
588
acpi_ds_pop_walk_state(thread);
drivers/acpi/acpica/dsmethod.c
770
walk_state->thread->current_sync_level =
drivers/acpi/acpica/dswstate.c
413
*thread)
drivers/acpi/acpica/dswstate.c
417
if (!thread) {
drivers/acpi/acpica/dswstate.c
422
thread->walk_state_list));
drivers/acpi/acpica/dswstate.c
424
return (thread->walk_state_list);
drivers/acpi/acpica/dswstate.c
442
struct acpi_thread_state *thread)
drivers/acpi/acpica/dswstate.c
446
walk_state->next = thread->walk_state_list;
drivers/acpi/acpica/dswstate.c
447
thread->walk_state_list = walk_state;
drivers/acpi/acpica/dswstate.c
466
struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
drivers/acpi/acpica/dswstate.c
472
walk_state = thread->walk_state_list;
drivers/acpi/acpica/dswstate.c
478
thread->walk_state_list = walk_state->next;
drivers/acpi/acpica/dswstate.c
512
*thread)
drivers/acpi/acpica/dswstate.c
527
walk_state->thread = thread;
drivers/acpi/acpica/dswstate.c
539
if (thread) {
drivers/acpi/acpica/dswstate.c
540
acpi_ds_push_walk_state(walk_state, thread);
drivers/acpi/acpica/exmutex.c
205
if (!walk_state->thread) {
drivers/acpi/acpica/exmutex.c
21
struct acpi_thread_state *thread);
drivers/acpi/acpica/exmutex.c
216
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
drivers/acpi/acpica/exmutex.c
221
walk_state->thread->current_sync_level));
drivers/acpi/acpica/exmutex.c
229
walk_state->thread->current_sync_level,
drivers/acpi/acpica/exmutex.c
231
walk_state->thread));
drivers/acpi/acpica/exmutex.c
235
walk_state->thread->thread_id);
drivers/acpi/acpica/exmutex.c
241
obj_desc->mutex.owner_thread = walk_state->thread;
drivers/acpi/acpica/exmutex.c
243
walk_state->thread->current_sync_level;
drivers/acpi/acpica/exmutex.c
244
walk_state->thread->current_sync_level =
drivers/acpi/acpica/exmutex.c
249
acpi_ex_link_mutex(obj_desc, walk_state->thread);
drivers/acpi/acpica/exmutex.c
255
walk_state->thread->current_sync_level,
drivers/acpi/acpica/exmutex.c
365
if (!walk_state->thread) {
drivers/acpi/acpica/exmutex.c
37
struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
drivers/acpi/acpica/exmutex.c
376
if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
drivers/acpi/acpica/exmutex.c
380
(u32)walk_state->thread->thread_id,
drivers/acpi/acpica/exmutex.c
39
if (!thread) {
drivers/acpi/acpica/exmutex.c
399
walk_state->thread->current_sync_level));
drivers/acpi/acpica/exmutex.c
415
walk_state->thread->current_sync_level,
drivers/acpi/acpica/exmutex.c
418
walk_state->thread));
drivers/acpi/acpica/exmutex.c
436
walk_state->thread->current_sync_level,
drivers/acpi/acpica/exmutex.c
461
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
drivers/acpi/acpica/exmutex.c
463
union acpi_operand_object *next = thread->acquired_mutex_list;
drivers/acpi/acpica/exmutex.c
491
thread->current_sync_level =
drivers/acpi/acpica/exmutex.c
61
thread->acquired_mutex_list = obj_desc->mutex.next;
drivers/acpi/acpica/exmutex.c
80
struct acpi_thread_state *thread)
drivers/acpi/acpica/exmutex.c
84
list_head = thread->acquired_mutex_list;
drivers/acpi/acpica/exmutex.c
99
thread->acquired_mutex_list = obj_desc;
drivers/acpi/acpica/psparse.c
411
struct acpi_thread_state *thread;
drivers/acpi/acpica/psparse.c
428
thread = acpi_ut_create_thread_state();
drivers/acpi/acpica/psparse.c
429
if (!thread) {
drivers/acpi/acpica/psparse.c
443
walk_state->thread = thread;
drivers/acpi/acpica/psparse.c
450
walk_state->thread->current_sync_level =
drivers/acpi/acpica/psparse.c
454
acpi_ds_push_walk_state(walk_state, thread);
drivers/acpi/acpica/psparse.c
460
acpi_gbl_current_walk_list = thread;
drivers/acpi/acpica/psparse.c
503
acpi_ds_call_control_method(thread, walk_state,
drivers/acpi/acpica/psparse.c
514
walk_state = acpi_ds_get_current_walk_state(thread);
drivers/acpi/acpica/psparse.c
553
walk_state = acpi_ds_pop_walk_state(thread);
drivers/acpi/acpica/psparse.c
585
walk_state = acpi_ds_get_current_walk_state(thread);
drivers/acpi/acpica/psparse.c
682
acpi_ex_release_all_mutexes(thread);
drivers/acpi/acpica/psparse.c
684
(union acpi_generic_state, thread));
drivers/acpi/acpica/utstate.c
129
state->thread.thread_id = acpi_os_get_thread_id();
drivers/acpi/acpica/utstate.c
133
if (!state->thread.thread_id) {
drivers/acpi/acpica/utstate.c
135
state->thread.thread_id = (acpi_thread_id) 1;
drivers/android/binder.c
1526
static void binder_thread_dec_tmpref(struct binder_thread *thread)
drivers/android/binder.c
1532
binder_inner_proc_lock(thread->proc);
drivers/android/binder.c
1533
atomic_dec(&thread->tmp_ref);
drivers/android/binder.c
1534
if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
drivers/android/binder.c
1535
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
1536
binder_free_thread(thread);
drivers/android/binder.c
1539
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
2028
struct binder_thread *thread,
drivers/android/binder.c
2192
if (thread)
drivers/android/binder.c
2193
thread->looper_need_return = true;
drivers/android/binder.c
2207
struct binder_thread *thread,
drivers/android/binder.c
2216
binder_transaction_buffer_release(proc, thread, buffer,
drivers/android/binder.c
2222
struct binder_thread *thread)
drivers/android/binder.c
2225
struct binder_proc *proc = thread->proc;
drivers/android/binder.c
2238
proc->pid, thread->pid, (u64)fp->binder,
drivers/android/binder.c
2251
&thread->todo, &rdata);
drivers/android/binder.c
2275
struct binder_thread *thread)
drivers/android/binder.c
2277
struct binder_proc *proc = thread->proc;
drivers/android/binder.c
2287
proc->pid, thread->pid, fp->handle);
drivers/android/binder.c
2348
struct binder_thread *thread,
drivers/android/binder.c
2351
struct binder_proc *proc = thread->proc;
drivers/android/binder.c
2364
proc->pid, thread->pid,
drivers/android/binder.c
2374
proc->pid, thread->pid, fd);
drivers/android/binder.c
2659
struct binder_thread *thread,
drivers/android/binder.c
2665
struct binder_proc *proc = thread->proc;
drivers/android/binder.c
2674
proc->pid, thread->pid, (u64)fda->num_fds);
drivers/android/binder.c
2681
proc->pid, thread->pid, (u64)fda->num_fds);
drivers/android/binder.c
2699
proc->pid, thread->pid);
drivers/android/binder.c
2713
ret = binder_translate_fd(fd, offset, t, thread,
drivers/android/binder.c
2723
struct binder_thread *thread,
drivers/android/binder.c
2732
struct binder_proc *proc = thread->proc;
drivers/android/binder.c
2746
proc->pid, thread->pid);
drivers/android/binder.c
2755
proc->pid, thread->pid);
drivers/android/binder.c
2763
proc->pid, thread->pid);
drivers/android/binder.c
2845
struct binder_thread *thread)
drivers/android/binder.c
2856
BUG_ON(thread);
drivers/android/binder.c
2871
(thread && thread->is_dead)) {
drivers/android/binder.c
2877
if (!thread && !pending_async)
drivers/android/binder.c
2878
thread = binder_select_thread_ilocked(proc);
drivers/android/binder.c
2880
if (thread) {
drivers/android/binder.c
2881
binder_enqueue_thread_work_ilocked(thread, &t->work);
drivers/android/binder.c
2900
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
drivers/android/binder.c
3056
struct binder_thread *thread,
drivers/android/binder.c
3094
e->from_thread = thread->pid;
drivers/android/binder.c
3101
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
drivers/android/binder.c
3107
thread->pid, proc->pid);
drivers/android/binder.c
3119
t->from_tid = thread->pid;
drivers/android/binder.c
3128
t->from = thread;
drivers/android/binder.c
3132
in_reply_to = thread->transaction_stack;
drivers/android/binder.c
3136
proc->pid, thread->pid);
drivers/android/binder.c
3142
if (in_reply_to->to_thread != thread) {
drivers/android/binder.c
3145
proc->pid, thread->pid, in_reply_to->debug_id,
drivers/android/binder.c
3158
thread->transaction_stack = in_reply_to->to_parent;
drivers/android/binder.c
3166
thread->pid, proc->pid);
drivers/android/binder.c
3173
proc->pid, thread->pid,
drivers/android/binder.c
3208
proc->pid, thread->pid, tr->target.handle);
drivers/android/binder.c
3224
proc->pid, thread->pid);
drivers/android/binder.c
3233
proc->pid, thread->pid);
drivers/android/binder.c
3242
thread->pid, proc->pid);
drivers/android/binder.c
3251
thread->pid, proc->pid);
drivers/android/binder.c
3259
w = list_first_entry_or_null(&thread->todo,
drivers/android/binder.c
3273
proc->pid, thread->pid);
drivers/android/binder.c
3281
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
drivers/android/binder.c
3284
tmp = thread->transaction_stack;
drivers/android/binder.c
3285
if (tmp->to_thread != thread) {
drivers/android/binder.c
3288
proc->pid, thread->pid, tmp->debug_id,
drivers/android/binder.c
3326
thread->pid, proc->pid);
drivers/android/binder.c
3337
proc->pid, thread->pid, t->debug_id,
drivers/android/binder.c
3344
proc->pid, thread->pid, t->debug_id,
drivers/android/binder.c
3357
thread->pid, proc->pid);
drivers/android/binder.c
3367
thread->pid, proc->pid);
drivers/android/binder.c
3429
proc->pid, thread->pid);
drivers/android/binder.c
3437
proc->pid, thread->pid, (u64)tr->offsets_size);
drivers/android/binder.c
3445
proc->pid, thread->pid,
drivers/android/binder.c
3473
thread->pid, proc->pid);
drivers/android/binder.c
3493
proc->pid, thread->pid);
drivers/android/binder.c
3503
proc->pid, thread->pid,
drivers/android/binder.c
3526
ret = binder_translate_binder(fp, t, thread);
drivers/android/binder.c
3534
thread->pid, proc->pid);
drivers/android/binder.c
3546
ret = binder_translate_handle(fp, t, thread);
drivers/android/binder.c
3553
thread->pid, proc->pid);
drivers/android/binder.c
3566
thread, in_reply_to);
drivers/android/binder.c
3575
thread->pid, proc->pid);
drivers/android/binder.c
3599
proc->pid, thread->pid);
drivers/android/binder.c
3612
proc->pid, thread->pid);
drivers/android/binder.c
3627
proc->pid, thread->pid,
drivers/android/binder.c
3638
thread, in_reply_to);
drivers/android/binder.c
3646
thread->pid, proc->pid);
drivers/android/binder.c
3664
proc->pid, thread->pid);
drivers/android/binder.c
3675
thread->pid, proc->pid);
drivers/android/binder.c
3688
thread, bp,
drivers/android/binder.c
3699
thread->pid, proc->pid);
drivers/android/binder.c
3710
proc->pid, thread->pid, hdr->type);
drivers/android/binder.c
3724
proc->pid, thread->pid);
drivers/android/binder.c
3735
proc->pid, thread->pid);
drivers/android/binder.c
3750
binder_enqueue_thread_work(thread, tcomplete);
drivers/android/binder.c
3774
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
drivers/android/binder.c
3775
t->from_parent = thread->transaction_stack;
drivers/android/binder.c
3776
thread->transaction_stack = t;
drivers/android/binder.c
3782
binder_pop_transaction_ilocked(thread, t);
drivers/android/binder.c
3808
binder_enqueue_thread_work(thread, tcomplete);
drivers/android/binder.c
3828
proc->pid, thread->pid,
drivers/android/binder.c
3874
proc->pid, thread->pid, reply ? "reply" :
drivers/android/binder.c
3904
BUG_ON(thread->return_error.cmd != BR_OK);
drivers/android/binder.c
3908
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
drivers/android/binder.c
3909
binder_enqueue_thread_work(thread, &thread->return_error.work);
drivers/android/binder.c
3913
binder_set_extended_error(&thread->ee, t_debug_id,
drivers/android/binder.c
3916
thread->return_error.cmd = return_error;
drivers/android/binder.c
3917
binder_enqueue_thread_work(thread, &thread->return_error.work);
drivers/android/binder.c
3923
struct binder_thread *thread,
drivers/android/binder.c
3936
proc->pid, thread->pid, handle_cookie->handle);
drivers/android/binder.c
3945
proc->pid, thread->pid);
drivers/android/binder.c
3976
struct binder_thread *thread,
drivers/android/binder.c
3986
proc->pid, thread->pid, handle_cookie->handle);
drivers/android/binder.c
3995
proc->pid, thread->pid);
drivers/android/binder.c
4004
proc->pid, thread->pid, (u64)freeze->cookie,
drivers/android/binder.c
4035
struct binder_thread *thread,
drivers/android/binder.c
4053
proc->pid, thread->pid, (u64)cookie);
drivers/android/binder.c
4082
struct binder_thread *thread,
drivers/android/binder.c
4111
binder_release_entire_buffer(proc, thread, buffer, is_failure);
drivers/android/binder.c
4116
struct binder_thread *thread,
drivers/android/binder.c
4126
while (ptr < end && thread->return_error.cmd == BR_OK) {
drivers/android/binder.c
4136
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
drivers/android/binder.c
4162
proc->pid, thread->pid);
drivers/android/binder.c
4178
proc->pid, thread->pid,
drivers/android/binder.c
4198
proc->pid, thread->pid, debug_string,
drivers/android/binder.c
4204
proc->pid, thread->pid, debug_string,
drivers/android/binder.c
4225
proc->pid, thread->pid,
drivers/android/binder.c
4234
proc->pid, thread->pid,
drivers/android/binder.c
4246
proc->pid, thread->pid,
drivers/android/binder.c
4256
proc->pid, thread->pid,
drivers/android/binder.c
4269
proc->pid, thread->pid,
drivers/android/binder.c
4298
proc->pid, thread->pid,
drivers/android/binder.c
4303
proc->pid, thread->pid,
drivers/android/binder.c
4310
proc->pid, thread->pid,
drivers/android/binder.c
4314
binder_free_buf(proc, thread, buffer, false);
drivers/android/binder.c
4325
binder_transaction(proc, thread, &tr.transaction_data,
drivers/android/binder.c
4336
binder_transaction(proc, thread, &tr,
drivers/android/binder.c
4344
proc->pid, thread->pid);
drivers/android/binder.c
4346
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
drivers/android/binder.c
4347
thread->looper |= BINDER_LOOPER_STATE_INVALID;
drivers/android/binder.c
4349
proc->pid, thread->pid);
drivers/android/binder.c
4351
thread->looper |= BINDER_LOOPER_STATE_INVALID;
drivers/android/binder.c
4353
proc->pid, thread->pid);
drivers/android/binder.c
4358
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
drivers/android/binder.c
4364
proc->pid, thread->pid);
drivers/android/binder.c
4365
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
drivers/android/binder.c
4366
thread->looper |= BINDER_LOOPER_STATE_INVALID;
drivers/android/binder.c
4368
proc->pid, thread->pid);
drivers/android/binder.c
4370
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
drivers/android/binder.c
4375
proc->pid, thread->pid);
drivers/android/binder.c
4376
thread->looper |= BINDER_LOOPER_STATE_EXITED;
drivers/android/binder.c
4399
WARN_ON(thread->return_error.cmd !=
drivers/android/binder.c
4401
thread->return_error.cmd = BR_ERROR;
drivers/android/binder.c
4403
thread,
drivers/android/binder.c
4404
&thread->return_error.work);
drivers/android/binder.c
4408
proc->pid, thread->pid);
drivers/android/binder.c
4416
proc->pid, thread->pid,
drivers/android/binder.c
4428
proc->pid, thread->pid,
drivers/android/binder.c
4440
proc->pid, thread->pid);
drivers/android/binder.c
4462
proc->pid, thread->pid);
drivers/android/binder.c
4470
proc->pid, thread->pid,
drivers/android/binder.c
4481
if (thread->looper &
drivers/android/binder.c
4485
thread,
drivers/android/binder.c
4527
proc->pid, thread->pid, (u64)cookie,
drivers/android/binder.c
4531
proc->pid, thread->pid, (u64)cookie);
drivers/android/binder.c
4538
if (thread->looper &
drivers/android/binder.c
4542
thread, &death->work);
drivers/android/binder.c
4560
error = binder_request_freeze_notification(proc, thread,
drivers/android/binder.c
4573
error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
drivers/android/binder.c
4586
error = binder_freeze_notification_done(proc, thread, cookie);
drivers/android/binder.c
4593
proc->pid, thread->pid, cmd);
drivers/android/binder.c
4602
struct binder_thread *thread, uint32_t cmd)
drivers/android/binder.c
4608
atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
drivers/android/binder.c
4613
struct binder_thread *thread,
drivers/android/binder.c
463
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
drivers/android/binder.c
4634
binder_stat_br(proc, thread, cmd);
drivers/android/binder.c
4636
proc->pid, thread->pid, cmd_name, node_debug_id,
drivers/android/binder.c
4643
static int binder_wait_for_work(struct binder_thread *thread,
drivers/android/binder.c
4647
struct binder_proc *proc = thread->proc;
drivers/android/binder.c
4652
prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
drivers/android/binder.c
4653
if (binder_has_work_ilocked(thread, do_proc_work))
drivers/android/binder.c
4656
list_add(&thread->waiting_thread_node,
drivers/android/binder.c
466
WARN_ON(!list_empty(&thread->waiting_thread_node));
drivers/android/binder.c
4661
list_del_init(&thread->waiting_thread_node);
drivers/android/binder.c
4667
finish_wait(&thread->wait, &wait);
drivers/android/binder.c
467
binder_enqueue_work_ilocked(work, &thread->todo);
drivers/android/binder.c
4730
struct binder_thread *thread,
drivers/android/binder.c
4749
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
drivers/android/binder.c
4752
thread->looper |= BINDER_LOOPER_STATE_WAITING;
drivers/android/binder.c
4755
!!thread->transaction_stack,
drivers/android/binder.c
4756
!binder_worklist_empty(proc, &thread->todo));
drivers/android/binder.c
4758
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
drivers/android/binder.c
4761
proc->pid, thread->pid, thread->looper);
drivers/android/binder.c
4769
if (!binder_has_work(thread, wait_for_proc_work))
drivers/android/binder.c
4772
ret = binder_wait_for_work(thread, wait_for_proc_work);
drivers/android/binder.c
4775
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
drivers/android/binder.c
4791
if (!binder_worklist_empty_ilocked(&thread->todo))
drivers/android/binder.c
4792
list = &thread->todo;
drivers/android/binder.c
4800
if (ptr - buffer == 4 && !thread->looper_need_return)
drivers/android/binder.c
481
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
drivers/android/binder.c
4810
if (binder_worklist_empty_ilocked(&thread->todo))
drivers/android/binder.c
4811
thread->process_todo = false;
drivers/android/binder.c
4830
binder_stat_br(proc, thread, cmd);
drivers/android/binder.c
484
WARN_ON(!list_empty(&thread->waiting_thread_node));
drivers/android/binder.c
4849
binder_stat_br(proc, thread, cmd);
drivers/android/binder.c
485
binder_enqueue_work_ilocked(work, &thread->todo);
drivers/android/binder.c
4852
proc->pid, thread->pid);
drivers/android/binder.c
4890
proc->pid, thread->pid,
drivers/android/binder.c
4913
proc, thread, &ptr, node_ptr,
drivers/android/binder.c
4918
proc, thread, &ptr, node_ptr,
drivers/android/binder.c
492
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
drivers/android/binder.c
4923
proc, thread, &ptr, node_ptr,
drivers/android/binder.c
4928
proc, thread, &ptr, node_ptr,
drivers/android/binder.c
493
thread->pid == current->pid && !thread->process_todo)
drivers/android/binder.c
4934
proc->pid, thread->pid,
drivers/android/binder.c
494
wake_up_interruptible_sync(&thread->wait);
drivers/android/binder.c
4957
proc->pid, thread->pid,
drivers/android/binder.c
496
thread->process_todo = true;
drivers/android/binder.c
4978
binder_stat_br(proc, thread, cmd);
drivers/android/binder.c
5001
binder_stat_br(proc, thread, BR_FROZEN_BINDER);
drivers/android/binder.c
5019
binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
drivers/android/binder.c
5025
proc->pid, thread->pid, w->type);
drivers/android/binder.c
5077
binder_free_buf(proc, thread, buffer, true);
drivers/android/binder.c
508
binder_enqueue_thread_work(struct binder_thread *thread,
drivers/android/binder.c
5080
proc->pid, thread->pid,
drivers/android/binder.c
5089
binder_stat_br(proc, thread, cmd);
drivers/android/binder.c
511
binder_inner_proc_lock(thread->proc);
drivers/android/binder.c
512
binder_enqueue_thread_work_ilocked(thread, work);
drivers/android/binder.c
5128
binder_stat_br(proc, thread, cmd);
drivers/android/binder.c
513
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
5131
proc->pid, thread->pid,
drivers/android/binder.c
5143
binder_inner_proc_lock(thread->proc);
drivers/android/binder.c
5144
t->to_parent = thread->transaction_stack;
drivers/android/binder.c
5145
t->to_thread = thread;
drivers/android/binder.c
5146
thread->transaction_stack = t;
drivers/android/binder.c
5147
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
5159
list_empty(&thread->proc->waiting_threads) &&
drivers/android/binder.c
5161
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
drivers/android/binder.c
5168
proc->pid, thread->pid);
drivers/android/binder.c
5171
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
drivers/android/binder.c
5251
struct binder_thread *thread = NULL;
drivers/android/binder.c
5257
thread = rb_entry(parent, struct binder_thread, rb_node);
drivers/android/binder.c
5259
if (current->pid < thread->pid)
drivers/android/binder.c
5261
else if (current->pid > thread->pid)
drivers/android/binder.c
5264
return thread;
drivers/android/binder.c
5268
thread = new_thread;
drivers/android/binder.c
5270
thread->proc = proc;
drivers/android/binder.c
5271
thread->pid = current->pid;
drivers/android/binder.c
5272
atomic_set(&thread->tmp_ref, 0);
drivers/android/binder.c
5273
init_waitqueue_head(&thread->wait);
drivers/android/binder.c
5274
INIT_LIST_HEAD(&thread->todo);
drivers/android/binder.c
5275
rb_link_node(&thread->rb_node, parent, p);
drivers/android/binder.c
5276
rb_insert_color(&thread->rb_node, &proc->threads);
drivers/android/binder.c
5277
thread->looper_need_return = true;
drivers/android/binder.c
5278
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
drivers/android/binder.c
5279
thread->return_error.cmd = BR_OK;
drivers/android/binder.c
5280
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
drivers/android/binder.c
5281
thread->reply_error.cmd = BR_OK;
drivers/android/binder.c
5282
thread->ee.command = BR_OK;
drivers/android/binder.c
5284
return thread;
drivers/android/binder.c
5289
struct binder_thread *thread;
drivers/android/binder.c
5293
thread = binder_get_thread_ilocked(proc, NULL);
drivers/android/binder.c
5295
if (!thread) {
drivers/android/binder.c
5296
new_thread = kzalloc_obj(*thread);
drivers/android/binder.c
5300
thread = binder_get_thread_ilocked(proc, new_thread);
drivers/android/binder.c
5302
if (thread != new_thread)
drivers/android/binder.c
5305
return thread;
drivers/android/binder.c
5331
static void binder_free_thread(struct binder_thread *thread)
drivers/android/binder.c
5333
BUG_ON(!list_empty(&thread->todo));
drivers/android/binder.c
5335
binder_proc_dec_tmpref(thread->proc);
drivers/android/binder.c
5336
kfree(thread);
drivers/android/binder.c
5340
struct binder_thread *thread)
drivers/android/binder.c
5347
binder_inner_proc_lock(thread->proc);
drivers/android/binder.c
5359
atomic_inc(&thread->tmp_ref);
drivers/android/binder.c
5360
rb_erase(&thread->rb_node, &proc->threads);
drivers/android/binder.c
5361
t = thread->transaction_stack;
drivers/android/binder.c
5364
if (t->to_thread == thread)
drivers/android/binder.c
5369
thread->is_dead = true;
drivers/android/binder.c
5376
proc->pid, thread->pid,
drivers/android/binder.c
5378
(t->to_thread == thread) ? "in" : "out");
drivers/android/binder.c
5380
if (t->to_thread == thread) {
drivers/android/binder.c
5381
thread->proc->outstanding_txns--;
drivers/android/binder.c
5389
} else if (t->from == thread) {
drivers/android/binder.c
5407
if (thread->looper & BINDER_LOOPER_STATE_POLL)
drivers/android/binder.c
5408
wake_up_pollfree(&thread->wait);
drivers/android/binder.c
5410
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
5419
if (thread->looper & BINDER_LOOPER_STATE_POLL)
drivers/android/binder.c
5424
binder_release_work(proc, &thread->todo);
drivers/android/binder.c
5425
binder_thread_dec_tmpref(thread);
drivers/android/binder.c
5433
struct binder_thread *thread = NULL;
drivers/android/binder.c
5436
thread = binder_get_thread(proc);
drivers/android/binder.c
5437
if (!thread)
drivers/android/binder.c
5440
binder_inner_proc_lock(thread->proc);
drivers/android/binder.c
5441
thread->looper |= BINDER_LOOPER_STATE_POLL;
drivers/android/binder.c
5442
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
drivers/android/binder.c
5444
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
5446
poll_wait(filp, &thread->wait, wait);
drivers/android/binder.c
5448
if (binder_has_work(thread, wait_for_proc_work))
drivers/android/binder.c
5455
struct binder_thread *thread)
drivers/android/binder.c
5467
proc->pid, thread->pid,
drivers/android/binder.c
5472
ret = binder_thread_write(proc, thread,
drivers/android/binder.c
5483
ret = binder_thread_read(proc, thread, bwr.read_buffer,
drivers/android/binder.c
5497
proc->pid, thread->pid,
drivers/android/binder.c
551
static void binder_free_thread(struct binder_thread *thread);
drivers/android/binder.c
555
static bool binder_has_work_ilocked(struct binder_thread *thread,
drivers/android/binder.c
558
return thread->process_todo ||
drivers/android/binder.c
559
thread->looper_need_return ||
drivers/android/binder.c
561
!binder_worklist_empty_ilocked(&thread->proc->todo));
drivers/android/binder.c
5612
struct binder_thread *thread;
drivers/android/binder.c
5618
thread = rb_entry(n, struct binder_thread, rb_node);
drivers/android/binder.c
5619
if (thread->transaction_stack)
drivers/android/binder.c
564
static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
drivers/android/binder.c
568
binder_inner_proc_lock(thread->proc);
drivers/android/binder.c
569
has_work = binder_has_work_ilocked(thread, do_proc_work);
drivers/android/binder.c
570
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
575
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
drivers/android/binder.c
5757
static int binder_ioctl_get_extended_error(struct binder_thread *thread,
drivers/android/binder.c
5762
binder_inner_proc_lock(thread->proc);
drivers/android/binder.c
5763
ee = thread->ee;
drivers/android/binder.c
5764
binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
drivers/android/binder.c
5765
binder_inner_proc_unlock(thread->proc);
drivers/android/binder.c
577
return !thread->transaction_stack &&
drivers/android/binder.c
5777
struct binder_thread *thread;
drivers/android/binder.c
578
binder_worklist_empty_ilocked(&thread->todo);
drivers/android/binder.c
5786
thread = binder_get_thread(proc);
drivers/android/binder.c
5787
if (thread == NULL) {
drivers/android/binder.c
5794
ret = binder_ioctl_write_read(filp, arg, thread);
drivers/android/binder.c
5830
proc->pid, thread->pid);
drivers/android/binder.c
5831
binder_thread_release(proc, thread);
drivers/android/binder.c
5832
thread = NULL;
drivers/android/binder.c
585
struct binder_thread *thread;
drivers/android/binder.c
588
thread = rb_entry(n, struct binder_thread, rb_node);
drivers/android/binder.c
589
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
drivers/android/binder.c
590
binder_available_for_proc_work_ilocked(thread)) {
drivers/android/binder.c
592
wake_up_interruptible_sync(&thread->wait);
drivers/android/binder.c
594
wake_up_interruptible(&thread->wait);
drivers/android/binder.c
5971
ret = binder_ioctl_get_extended_error(thread, ubuf);
drivers/android/binder.c
5981
if (thread)
drivers/android/binder.c
5982
thread->looper_need_return = false;
drivers/android/binder.c
614
struct binder_thread *thread;
drivers/android/binder.c
6164
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
drivers/android/binder.c
6166
thread->looper_need_return = true;
drivers/android/binder.c
6167
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
drivers/android/binder.c
6168
wake_up_interruptible(&thread->wait);
drivers/android/binder.c
617
thread = list_first_entry_or_null(&proc->waiting_threads,
drivers/android/binder.c
621
if (thread)
drivers/android/binder.c
622
list_del_init(&thread->waiting_thread_node);
drivers/android/binder.c
624
return thread;
drivers/android/binder.c
6293
struct binder_thread *thread;
drivers/android/binder.c
6295
thread = rb_entry(n, struct binder_thread, rb_node);
drivers/android/binder.c
6298
active_transactions += binder_thread_release(proc, thread);
drivers/android/binder.c
644
struct binder_thread *thread,
drivers/android/binder.c
649
if (thread) {
drivers/android/binder.c
6490
struct binder_thread *thread,
drivers/android/binder.c
6499
thread->pid, thread->looper,
drivers/android/binder.c
6500
thread->looper_need_return,
drivers/android/binder.c
6501
atomic_read(&thread->tmp_ref));
drivers/android/binder.c
6503
t = thread->transaction_stack;
drivers/android/binder.c
6505
if (t->from == thread) {
drivers/android/binder.c
6506
print_binder_transaction_ilocked(m, thread->proc,
drivers/android/binder.c
6509
} else if (t->to_thread == thread) {
drivers/android/binder.c
651
wake_up_interruptible_sync(&thread->wait);
drivers/android/binder.c
6510
print_binder_transaction_ilocked(m, thread->proc,
drivers/android/binder.c
6514
print_binder_transaction_ilocked(m, thread->proc,
drivers/android/binder.c
6519
list_for_each_entry(w, &thread->todo, entry) {
drivers/android/binder.c
6520
print_binder_work_ilocked(m, thread->proc, " ",
drivers/android/binder.c
653
wake_up_interruptible(&thread->wait);
drivers/android/binder.c
675
struct binder_thread *thread = binder_select_thread_ilocked(proc);
drivers/android/binder.c
677
binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
drivers/android/binder.c
6787
struct binder_thread *thread;
drivers/android/binder.c
6801
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
drivers/android/binder.c
844
struct binder_thread *thread = container_of(target_list,
drivers/android/binder.c
847
BUG_ON(&thread->todo != target_list);
drivers/android/binder.c
848
binder_enqueue_deferred_thread_work_ilocked(thread,
drivers/android/binder/rust_binder_events.h
34
TP_PROTO(bool reply, rust_binder_transaction t, struct task_struct *thread),
drivers/android/binder/rust_binder_events.h
35
TP_ARGS(reply, t, thread),
drivers/android/binder/rust_binder_events.h
52
__entry->to_thread = thread ? thread->pid : 0;
drivers/base/devtmpfs.c
120
wake_up_process(thread);
drivers/base/devtmpfs.c
133
if (!thread)
drivers/base/devtmpfs.c
160
if (!thread)
drivers/base/devtmpfs.c
185
d_inode(dentry)->i_private = &thread;
drivers/base/devtmpfs.c
246
d_inode(dentry)->i_private = &thread;
drivers/base/devtmpfs.c
261
if (d_inode(dentry)->i_private == &thread)
drivers/base/devtmpfs.c
299
if (inode->i_private != &thread)
drivers/base/devtmpfs.c
367
if (!thread)
drivers/base/devtmpfs.c
40
static struct task_struct *thread;
drivers/base/devtmpfs.c
497
thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
drivers/base/devtmpfs.c
498
if (!IS_ERR(thread)) {
drivers/base/devtmpfs.c
501
err = PTR_ERR(thread);
drivers/base/devtmpfs.c
502
thread = NULL;
drivers/base/devtmpfs.c
508
thread = NULL;
drivers/bluetooth/btmrvl_main.c
585
struct btmrvl_thread *thread = data;
drivers/bluetooth/btmrvl_main.c
586
struct btmrvl_private *priv = thread->priv;
drivers/bluetooth/btmrvl_main.c
595
add_wait_queue(&thread->wait_q, &wait);
drivers/bluetooth/btmrvl_main.c
613
remove_wait_queue(&thread->wait_q, &wait);
drivers/char/ipmi/ipmi_ipmb.c
394
if (iidev->thread) {
drivers/char/ipmi/ipmi_ipmb.c
395
struct task_struct *t = iidev->thread;
drivers/char/ipmi/ipmi_ipmb.c
397
iidev->thread = NULL;
drivers/char/ipmi/ipmi_ipmb.c
533
iidev->thread = kthread_run(ipmi_ipmb_thread, iidev,
drivers/char/ipmi/ipmi_ipmb.c
535
if (IS_ERR(iidev->thread)) {
drivers/char/ipmi/ipmi_ipmb.c
536
rv = PTR_ERR(iidev->thread);
drivers/char/ipmi/ipmi_ipmb.c
58
struct task_struct *thread;
drivers/char/ipmi/ipmi_si_intf.c
1203
new_smi->thread = kthread_run(ipmi_thread, new_smi,
drivers/char/ipmi/ipmi_si_intf.c
1205
if (IS_ERR(new_smi->thread)) {
drivers/char/ipmi/ipmi_si_intf.c
1208
PTR_ERR(new_smi->thread));
drivers/char/ipmi/ipmi_si_intf.c
1209
new_smi->thread = NULL;
drivers/char/ipmi/ipmi_si_intf.c
1868
if (smi_info->thread != NULL) {
drivers/char/ipmi/ipmi_si_intf.c
1869
kthread_stop(smi_info->thread);
drivers/char/ipmi/ipmi_si_intf.c
1870
smi_info->thread = NULL;
drivers/char/ipmi/ipmi_si_intf.c
249
struct task_struct *thread;
drivers/char/ipmi/ipmi_si_intf.c
364
if (smi_info->thread)
drivers/char/ipmi/ipmi_si_intf.c
365
wake_up_process(smi_info->thread);
drivers/char/ipmi/ipmi_si_intf.c
899
if (smi_info->thread)
drivers/char/ipmi/ipmi_si_intf.c
900
wake_up_process(smi_info->thread);
drivers/char/ipmi/ipmi_ssif.c
1271
if (ssif_info->thread)
drivers/char/ipmi/ipmi_ssif.c
1272
kthread_stop(ssif_info->thread);
drivers/char/ipmi/ipmi_ssif.c
1886
ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
drivers/char/ipmi/ipmi_ssif.c
1888
if (IS_ERR(ssif_info->thread)) {
drivers/char/ipmi/ipmi_ssif.c
1889
rv = PTR_ERR(ssif_info->thread);
drivers/char/ipmi/ipmi_ssif.c
261
struct task_struct *thread;
drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
29
PM_INFO_REGSET_ENTRY(pm.thread, MIN_PWR_ACK_PENDING),
drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
190
__u32 thread;
drivers/crypto/mxs-dcp.c
1153
sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
drivers/crypto/mxs-dcp.c
1155
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
drivers/crypto/mxs-dcp.c
1157
ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
drivers/crypto/mxs-dcp.c
1161
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
drivers/crypto/mxs-dcp.c
1163
if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
drivers/crypto/mxs-dcp.c
1165
ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
drivers/crypto/mxs-dcp.c
1211
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
drivers/crypto/mxs-dcp.c
1214
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
drivers/crypto/mxs-dcp.c
1232
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
drivers/crypto/mxs-dcp.c
1233
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
drivers/crypto/mxs-dcp.c
487
wake_up_process(sdcp->thread[actx->chan]);
drivers/crypto/mxs-dcp.c
81
struct task_struct *thread[DCP_MAX_CHANS];
drivers/crypto/mxs-dcp.c
834
wake_up_process(sdcp->thread[actx->chan]);
drivers/dma/dmatest.c
1000
init_waitqueue_head(&thread->done_wait);
drivers/dma/dmatest.c
1002
thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
drivers/dma/dmatest.c
1004
if (IS_ERR(thread->task)) {
drivers/dma/dmatest.c
1007
kfree(thread);
drivers/dma/dmatest.c
1012
get_task_struct(thread->task);
drivers/dma/dmatest.c
1013
list_add_tail(&thread->node, &dtc->threads);
drivers/dma/dmatest.c
1014
thread->pending = true;
drivers/dma/dmatest.c
1138
struct dmatest_thread *thread;
drivers/dma/dmatest.c
1141
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
1142
wake_up_process(thread->task);
drivers/dma/dmatest.c
1326
struct dmatest_thread *thread;
drivers/dma/dmatest.c
1329
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
257
struct dmatest_thread *thread;
drivers/dma/dmatest.c
259
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
260
if (!thread->done && !thread->pending)
drivers/dma/dmatest.c
273
struct dmatest_thread *thread;
drivers/dma/dmatest.c
275
list_for_each_entry(thread, &dtc->threads, node) {
drivers/dma/dmatest.c
276
if (thread->pending)
drivers/dma/dmatest.c
437
struct dmatest_thread *thread =
drivers/dma/dmatest.c
439
if (!thread->done) {
drivers/dma/dmatest.c
576
struct dmatest_thread *thread = data;
drivers/dma/dmatest.c
577
struct dmatest_done *done = &thread->test_done;
drivers/dma/dmatest.c
611
thread->pending = false;
drivers/dma/dmatest.c
612
info = thread->info;
drivers/dma/dmatest.c
614
chan = thread->chan;
drivers/dma/dmatest.c
618
src = &thread->src;
drivers/dma/dmatest.c
619
dst = &thread->dst;
drivers/dma/dmatest.c
620
if (thread->type == DMA_MEMCPY) {
drivers/dma/dmatest.c
624
} else if (thread->type == DMA_MEMSET) {
drivers/dma/dmatest.c
629
} else if (thread->type == DMA_XOR) {
drivers/dma/dmatest.c
635
} else if (thread->type == DMA_PQ) {
drivers/dma/dmatest.c
793
if (thread->type == DMA_MEMCPY)
drivers/dma/dmatest.c
797
else if (thread->type == DMA_MEMSET)
drivers/dma/dmatest.c
802
else if (thread->type == DMA_XOR)
drivers/dma/dmatest.c
807
else if (thread->type == DMA_PQ) {
drivers/dma/dmatest.c
844
wait_event_freezable_timeout(thread->done_wait,
drivers/dma/dmatest.c
942
thread->done = true;
drivers/dma/dmatest.c
950
struct dmatest_thread *thread;
drivers/dma/dmatest.c
954
list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
drivers/dma/dmatest.c
955
ret = kthread_stop(thread->task);
drivers/dma/dmatest.c
957
thread->task->comm, ret);
drivers/dma/dmatest.c
958
list_del(&thread->node);
drivers/dma/dmatest.c
959
put_task_struct(thread->task);
drivers/dma/dmatest.c
960
kfree(thread);
drivers/dma/dmatest.c
973
struct dmatest_thread *thread;
drivers/dma/dmatest.c
990
thread = kzalloc_obj(struct dmatest_thread);
drivers/dma/dmatest.c
991
if (!thread) {
drivers/dma/dmatest.c
996
thread->info = info;
drivers/dma/dmatest.c
997
thread->chan = dtc->chan;
drivers/dma/dmatest.c
998
thread->type = type;
drivers/dma/dmatest.c
999
thread->test_done.wait = &thread->done_wait;
drivers/dma/img-mdc-dma.c
123
unsigned int thread;
drivers/dma/img-mdc-dma.c
216
(mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
drivers/dma/img-mdc-dma.c
217
(mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
drivers/dma/img-mdc-dma.c
218
(mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
drivers/dma/img-mdc-dma.c
549
val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
drivers/dma/img-mdc-dma.c
550
(mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
drivers/dma/img-mdc-dma.c
551
(mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
drivers/dma/img-mdc-dma.c
823
mchan->thread = dma_spec->args[2];
drivers/dma/pl330.c
2052
ret = pl330_submit_req(pch->thread, desc);
drivers/dma/pl330.c
2089
spin_lock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2090
_stop(pch->thread);
drivers/dma/pl330.c
2091
spin_unlock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2096
spin_lock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2097
pl330_start_thread(pch->thread);
drivers/dma/pl330.c
2098
spin_unlock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2114
spin_lock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2115
pl330_start_thread(pch->thread);
drivers/dma/pl330.c
2116
spin_unlock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2170
pch->thread = pl330_request_channel(pl330);
drivers/dma/pl330.c
2171
if (!pch->thread) {
drivers/dma/pl330.c
2289
_stop(pch->thread);
drivers/dma/pl330.c
2290
pch->thread->req[0].desc = NULL;
drivers/dma/pl330.c
2291
pch->thread->req[1].desc = NULL;
drivers/dma/pl330.c
2292
pch->thread->req_running = -1;
drivers/dma/pl330.c
2338
_stop(pch->thread);
drivers/dma/pl330.c
2362
pl330_release_channel(pch->thread);
drivers/dma/pl330.c
2363
pch->thread = NULL;
drivers/dma/pl330.c
2376
struct pl330_thread *thrd = pch->thread;
drivers/dma/pl330.c
2419
spin_lock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2421
if (pch->thread->req_running != -1)
drivers/dma/pl330.c
2422
running = pch->thread->req[pch->thread->req_running].desc;
drivers/dma/pl330.c
2424
last_enq = pch->thread->req[pch->thread->lstenq].desc;
drivers/dma/pl330.c
2465
spin_unlock(&pch->thread->dmac->lock);
drivers/dma/pl330.c
2935
if (!pch->thread || thrd->id != pch->thread->id)
drivers/dma/pl330.c
3109
pch->thread = NULL;
drivers/dma/pl330.c
3185
if (pch->thread) {
drivers/dma/pl330.c
3228
if (pch->thread) {
drivers/dma/pl330.c
447
struct pl330_thread *thread;
drivers/firmware/psci/psci_checker.c
386
struct task_struct *thread;
drivers/firmware/psci/psci_checker.c
397
thread = kthread_create_on_cpu(suspend_test_thread,
drivers/firmware/psci/psci_checker.c
400
if (IS_ERR(thread))
drivers/firmware/psci/psci_checker.c
403
threads[nb_threads++] = thread;
drivers/firmware/tegra/bpmp-tegra186.c
297
unsigned int index = bpmp->soc->channels.thread.offset + i;
drivers/firmware/tegra/bpmp-tegra210.c
194
unsigned int index = bpmp->soc->channels.thread.offset + i;
drivers/firmware/tegra/bpmp.c
250
unsigned long timeout = bpmp->soc->channels.thread.timeout;
drivers/firmware/tegra/bpmp.c
251
unsigned int count = bpmp->soc->channels.thread.count;
drivers/firmware/tegra/bpmp.c
388
timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
drivers/firmware/tegra/bpmp.c
677
count = bpmp->soc->channels.thread.count;
drivers/firmware/tegra/bpmp.c
719
bpmp->threaded.count = bpmp->soc->channels.thread.count;
drivers/firmware/tegra/bpmp.c
78
count = bpmp->soc->channels.thread.count;
drivers/firmware/tegra/bpmp.c
847
.thread = {
drivers/firmware/tegra/bpmp.c
870
.thread = {
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1140
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1152
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1173
adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data);
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
435
adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
343
uint32_t wave, uint32_t thread, uint32_t start,
drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
52
u32 thread, vpgr_or_sgpr;
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4486
uint32_t thread, uint32_t regno,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4492
(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4538
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4543
adev, wave, thread,
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
1036
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
1041
adev, wave, thread,
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
986
uint32_t thread, uint32_t regno,
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
992
(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
825
uint32_t thread, uint32_t regno,
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
831
(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
888
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
893
adev, wave, thread,
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
638
uint32_t thread, uint32_t regno,
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
644
(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
701
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
705
wave_read_regs(adev, xcc_id, wave, thread,
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
2954
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
2961
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
4016
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
4023
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
5166
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
5173
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1941
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1948
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1986
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1991
adev, simd, wave, thread,
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
727
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
734
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
773
uint32_t wave, uint32_t thread,
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
777
wave_read_regs(adev, xcc_id, simd, wave, thread,
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2939
struct task_struct *thread = NULL;
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2964
thread = get_pid_task(pid, PIDTYPE_PID);
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2965
if (!thread) {
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2970
mm = get_task_mm(thread);
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2980
create_process = thread && thread != current && ptrace_parent(thread) == current;
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
2983
target = create_process ? kfd_create_process(thread) :
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3150
if (thread)
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3151
put_task_struct(thread);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1059
struct kfd_process *create_process(const struct task_struct *thread, bool primary);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1063
struct kfd_process *kfd_create_process(struct task_struct *thread);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1006
p = find_process_by_mm(thread->mm);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1589
struct kfd_process *create_process(const struct task_struct *thread, bool primary)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1601
process->mm = thread->mm;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
1602
process->lead_thread = thread->group_leader;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
68
static struct kfd_process *find_process(const struct task_struct *thread,
drivers/gpu/drm/amd/amdkfd/kfd_process.c
923
struct kfd_process *kfd_create_process(struct task_struct *thread)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
928
if (!(thread->mm && mmget_not_zero(thread->mm)))
drivers/gpu/drm/amd/amdkfd/kfd_process.c
961
process = find_process(thread, true);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
965
process = create_process(thread, true);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
982
mmput(thread->mm);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
999
static struct kfd_process *find_process(const struct task_struct *thread,
drivers/gpu/drm/i915/gt/selftest_migrate.c
687
struct threaded_migrate *thread;
drivers/gpu/drm/i915/gt/selftest_migrate.c
692
thread = kzalloc_objs(*thread, n_cpus);
drivers/gpu/drm/i915/gt/selftest_migrate.c
693
if (!thread)
drivers/gpu/drm/i915/gt/selftest_migrate.c
699
thread[i].migrate = migrate;
drivers/gpu/drm/i915/gt/selftest_migrate.c
700
thread[i].prng =
drivers/gpu/drm/i915/gt/selftest_migrate.c
703
tsk = kthread_run(fn, &thread[i], "igt-%d", i);
drivers/gpu/drm/i915/gt/selftest_migrate.c
710
thread[i].tsk = tsk;
drivers/gpu/drm/i915/gt/selftest_migrate.c
723
struct task_struct *tsk = thread[i].tsk;
drivers/gpu/drm/i915/gt/selftest_migrate.c
734
kfree(thread);
drivers/gpu/drm/i915/gt/selftest_slpc.c
490
struct slpc_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/gt/selftest_slpc.c
492
thread->result = run_test(thread->gt, TILE_INTERACTION);
drivers/gpu/drm/i915/gvt/scheduler.c
1263
kthread_stop(scheduler->thread[i]);
drivers/gpu/drm/i915/gvt/scheduler.c
1281
scheduler->thread[i] = kthread_run(workload_thread, engine,
drivers/gpu/drm/i915/gvt/scheduler.c
1283
if (IS_ERR(scheduler->thread[i])) {
drivers/gpu/drm/i915/gvt/scheduler.c
1285
ret = PTR_ERR(scheduler->thread[i]);
drivers/gpu/drm/i915/gvt/scheduler.h
55
struct task_struct *thread[I915_NUM_ENGINES];
drivers/gpu/drm/i915/selftests/i915_request.c
1467
struct parallel_thread *thread =
drivers/gpu/drm/i915/selftests/i915_request.c
1468
container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
1469
struct intel_engine_cs *engine = thread->engine;
drivers/gpu/drm/i915/selftests/i915_request.c
1500
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
1505
struct parallel_thread *thread =
drivers/gpu/drm/i915/selftests/i915_request.c
1506
container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
1507
struct intel_engine_cs *engine = thread->engine;
drivers/gpu/drm/i915/selftests/i915_request.c
1529
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
1557
struct parallel_thread *thread =
drivers/gpu/drm/i915/selftests/i915_request.c
1558
container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
1559
struct intel_engine_cs *engine = thread->engine;
drivers/gpu/drm/i915/selftests/i915_request.c
1572
thread->result = -ENOMEM;
drivers/gpu/drm/i915/selftests/i915_request.c
1606
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
2965
struct p_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
2966
struct perf_stats *p = &thread->p;
drivers/gpu/drm/i915/selftests/i915_request.c
2976
thread->result = PTR_ERR(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
2983
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
3034
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
3039
struct p_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
3040
struct perf_stats *p = &thread->p;
drivers/gpu/drm/i915/selftests/i915_request.c
3051
thread->result = PTR_ERR(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
3058
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
3111
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
3116
struct p_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
3117
struct perf_stats *p = &thread->p;
drivers/gpu/drm/i915/selftests/i915_request.c
3127
thread->result = PTR_ERR(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
313
struct smoke_thread *thread = container_of(work, typeof(*thread), work);
drivers/gpu/drm/i915/selftests/i915_request.c
3134
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
314
struct smoketest *t = thread->t;
drivers/gpu/drm/i915/selftests/i915_request.c
3176
thread->result = err;
drivers/gpu/drm/i915/selftests/i915_request.c
334
thread->result = -ENOMEM;
drivers/gpu/drm/i915/selftests/i915_request.c
344
while (!READ_ONCE(thread->stop)) {
drivers/gpu/drm/i915/selftests/i915_request.c
452
thread->result = err;
drivers/gpu/drm/imagination/pvr_rogue_meta.h
86
#define SP_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUA0_ID)
drivers/gpu/drm/imagination/pvr_rogue_meta.h
87
#define PC_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUPC_ID)
drivers/gpu/drm/vc4/vc4_gem.c
374
submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
drivers/gpu/drm/vc4/vc4_gem.c
381
V3D_WRITE(V3D_CTNCA(thread), start);
drivers/gpu/drm/vc4/vc4_gem.c
382
V3D_WRITE(V3D_CTNEA(thread), end);
drivers/hv/hv_balloon.c
1993
dm_device.thread =
drivers/hv/hv_balloon.c
1995
if (IS_ERR(dm_device.thread)) {
drivers/hv/hv_balloon.c
1996
ret = PTR_ERR(dm_device.thread);
drivers/hv/hv_balloon.c
2006
dm_device.thread = NULL;
drivers/hv/hv_balloon.c
2031
kthread_stop(dm->thread);
drivers/hv/hv_balloon.c
2067
if (dm->thread) {
drivers/hv/hv_balloon.c
2068
kthread_stop(dm->thread);
drivers/hv/hv_balloon.c
2069
dm->thread = NULL;
drivers/hv/hv_balloon.c
2089
dm_device.thread =
drivers/hv/hv_balloon.c
2091
if (IS_ERR(dm_device.thread)) {
drivers/hv/hv_balloon.c
2092
ret = PTR_ERR(dm_device.thread);
drivers/hv/hv_balloon.c
2093
dm_device.thread = NULL;
drivers/hv/hv_balloon.c
540
struct task_struct *thread;
drivers/iio/buffer/industrialio-triggered-buffer.c
120
irqreturn_t (*thread)(int irq, void *p),
drivers/iio/buffer/industrialio-triggered-buffer.c
127
ret = iio_triggered_buffer_setup_ext(indio_dev, h, thread, direction,
drivers/iio/buffer/industrialio-triggered-buffer.c
41
irqreturn_t (*thread)(int irq, void *p),
drivers/iio/buffer/industrialio-triggered-buffer.c
66
thread,
drivers/iio/industrialio-trigger.c
300
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
drivers/iio/industrialio-trigger.c
366
irqreturn_t (*thread)(int irq, void *p),
drivers/iio/industrialio-trigger.c
386
pf->thread = thread;
drivers/iio/industrialio-triggered-event.c
32
irqreturn_t (*thread)(int irq, void *p))
drivers/iio/industrialio-triggered-event.c
35
thread,
drivers/infiniband/hw/hfi1/driver.c
1001
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
drivers/infiniband/hw/hfi1/driver.c
1051
last = skip_rcv_packet(&packet, thread);
drivers/infiniband/hw/hfi1/driver.c
1056
last = process_rcv_packet(&packet, thread);
drivers/infiniband/hw/hfi1/driver.c
665
static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
drivers/infiniband/hw/hfi1/driver.c
667
if (thread) {
drivers/infiniband/hw/hfi1/driver.c
679
static inline int check_max_packet(struct hfi1_packet *packet, int thread)
drivers/infiniband/hw/hfi1/driver.c
684
ret = max_packet_exceeded(packet, thread);
drivers/infiniband/hw/hfi1/driver.c
688
static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
drivers/infiniband/hw/hfi1/driver.c
699
ret = check_max_packet(packet, thread);
drivers/infiniband/hw/hfi1/driver.c
741
static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
drivers/infiniband/hw/hfi1/driver.c
782
ret = check_max_packet(packet, thread);
drivers/infiniband/hw/hfi1/driver.c
852
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
drivers/infiniband/hw/hfi1/driver.c
866
last = process_rcv_packet(&packet, thread);
drivers/infiniband/hw/hfi1/driver.c
878
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
drivers/infiniband/hw/hfi1/driver.c
895
last = process_rcv_packet(&packet, thread);
drivers/infiniband/hw/hfi1/hfi.h
1438
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
drivers/infiniband/hw/hfi1/hfi.h
1439
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
drivers/infiniband/hw/hfi1/hfi.h
1440
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
drivers/infiniband/hw/hfi1/msix.c
127
irq_handler_t thread,
drivers/infiniband/hw/hfi1/msix.c
130
int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
drivers/infiniband/hw/hfi1/msix.c
74
irq_handler_t handler, irq_handler_t thread,
drivers/infiniband/hw/hfi1/msix.c
97
ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
drivers/isdn/mISDN/stack.c
301
task_cputime(st->thread, &utime, &stime);
drivers/isdn/mISDN/stack.c
307
dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
drivers/isdn/mISDN/stack.c
316
st->thread = NULL;
drivers/isdn/mISDN/stack.c
402
newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
drivers/isdn/mISDN/stack.c
404
if (IS_ERR(newst->thread)) {
drivers/isdn/mISDN/stack.c
405
err = PTR_ERR(newst->thread);
drivers/isdn/mISDN/stack.c
629
if (st->thread) {
drivers/macintosh/therm_adt746x.c
556
th->thread = kthread_run(monitor_task, th, "kfand");
drivers/macintosh/therm_adt746x.c
557
if (th->thread == ERR_PTR(-ENOMEM)) {
drivers/macintosh/therm_adt746x.c
559
th->thread = NULL;
drivers/macintosh/therm_adt746x.c
575
if (th->thread != NULL)
drivers/macintosh/therm_adt746x.c
576
kthread_stop(th->thread);
drivers/macintosh/therm_adt746x.c
84
struct task_struct *thread;
drivers/mailbox/mtk-cmdq-mailbox.c
186
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
drivers/mailbox/mtk-cmdq-mailbox.c
190
writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
drivers/mailbox/mtk-cmdq-mailbox.c
193
if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
drivers/mailbox/mtk-cmdq-mailbox.c
196
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
drivers/mailbox/mtk-cmdq-mailbox.c
199
(u32)(thread->base - cmdq->base));
drivers/mailbox/mtk-cmdq-mailbox.c
206
static void cmdq_thread_resume(struct cmdq_thread *thread)
drivers/mailbox/mtk-cmdq-mailbox.c
208
writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
drivers/mailbox/mtk-cmdq-mailbox.c
226
static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
drivers/mailbox/mtk-cmdq-mailbox.c
230
writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
drivers/mailbox/mtk-cmdq-mailbox.c
231
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
drivers/mailbox/mtk-cmdq-mailbox.c
235
(u32)(thread->base - cmdq->base));
drivers/mailbox/mtk-cmdq-mailbox.c
242
static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
drivers/mailbox/mtk-cmdq-mailbox.c
244
cmdq_thread_reset(cmdq, thread);
drivers/mailbox/mtk-cmdq-mailbox.c
245
writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
drivers/mailbox/mtk-cmdq-mailbox.c
249
static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
drivers/mailbox/mtk-cmdq-mailbox.c
251
writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
drivers/mailbox/mtk-cmdq-mailbox.c
252
thread->base + CMDQ_THR_CURR_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
258
struct cmdq_thread *thread = task->thread;
drivers/mailbox/mtk-cmdq-mailbox.c
260
&thread->task_busy_list, typeof(*task), list_entry);
drivers/mailbox/mtk-cmdq-mailbox.c
271
cmdq_thread_invalidate_fetched_data(thread);
drivers/mailbox/mtk-cmdq-mailbox.c
274
static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
drivers/mailbox/mtk-cmdq-mailbox.c
276
return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
drivers/mailbox/mtk-cmdq-mailbox.c
285
mbox_chan_received_data(task->thread->chan, &data);
drivers/mailbox/mtk-cmdq-mailbox.c
292
struct cmdq_thread *thread = task->thread;
drivers/mailbox/mtk-cmdq-mailbox.c
297
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
drivers/mailbox/mtk-cmdq-mailbox.c
298
next_task = list_first_entry_or_null(&thread->task_busy_list,
drivers/mailbox/mtk-cmdq-mailbox.c
302
thread->base + CMDQ_THR_CURR_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
303
cmdq_thread_resume(thread);
drivers/mailbox/mtk-cmdq-mailbox.c
307
struct cmdq_thread *thread)
drivers/mailbox/mtk-cmdq-mailbox.c
314
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
drivers/mailbox/mtk-cmdq-mailbox.c
315
writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
drivers/mailbox/mtk-cmdq-mailbox.c
323
if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
drivers/mailbox/mtk-cmdq-mailbox.c
333
gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
336
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
drivers/mailbox/mtk-cmdq-mailbox.c
355
if (list_empty(&thread->task_busy_list))
drivers/mailbox/mtk-cmdq-mailbox.c
356
cmdq_thread_disable(cmdq, thread);
drivers/mailbox/mtk-cmdq-mailbox.c
370
struct cmdq_thread *thread = &cmdq->thread[bit];
drivers/mailbox/mtk-cmdq-mailbox.c
372
spin_lock_irqsave(&thread->chan->lock, flags);
drivers/mailbox/mtk-cmdq-mailbox.c
373
cmdq_thread_irq_handler(cmdq, thread);
drivers/mailbox/mtk-cmdq-mailbox.c
374
spin_unlock_irqrestore(&thread->chan->lock, flags);
drivers/mailbox/mtk-cmdq-mailbox.c
407
struct cmdq_thread *thread;
drivers/mailbox/mtk-cmdq-mailbox.c
414
thread = &cmdq->thread[i];
drivers/mailbox/mtk-cmdq-mailbox.c
415
if (!list_empty(&thread->task_busy_list)) {
drivers/mailbox/mtk-cmdq-mailbox.c
450
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
drivers/mailbox/mtk-cmdq-mailbox.c
466
task->thread = thread;
drivers/mailbox/mtk-cmdq-mailbox.c
469
if (list_empty(&thread->task_busy_list)) {
drivers/mailbox/mtk-cmdq-mailbox.c
476
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
drivers/mailbox/mtk-cmdq-mailbox.c
479
writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
481
writel(gce_addr, thread->base + CMDQ_THR_END_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
483
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
drivers/mailbox/mtk-cmdq-mailbox.c
484
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
drivers/mailbox/mtk-cmdq-mailbox.c
485
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
drivers/mailbox/mtk-cmdq-mailbox.c
487
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
drivers/mailbox/mtk-cmdq-mailbox.c
488
gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
490
gce_addr = readl(thread->base + CMDQ_THR_END_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
497
thread->base + CMDQ_THR_CURR_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
503
thread->base + CMDQ_THR_END_ADDR);
drivers/mailbox/mtk-cmdq-mailbox.c
504
cmdq_thread_resume(thread);
drivers/mailbox/mtk-cmdq-mailbox.c
506
list_move_tail(&task->list_entry, &thread->task_busy_list);
drivers/mailbox/mtk-cmdq-mailbox.c
518
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
drivers/mailbox/mtk-cmdq-mailbox.c
525
spin_lock_irqsave(&thread->chan->lock, flags);
drivers/mailbox/mtk-cmdq-mailbox.c
526
if (list_empty(&thread->task_busy_list))
drivers/mailbox/mtk-cmdq-mailbox.c
529
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
drivers/mailbox/mtk-cmdq-mailbox.c
532
cmdq_thread_irq_handler(cmdq, thread);
drivers/mailbox/mtk-cmdq-mailbox.c
533
if (list_empty(&thread->task_busy_list))
drivers/mailbox/mtk-cmdq-mailbox.c
536
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
drivers/mailbox/mtk-cmdq-mailbox.c
542
cmdq_thread_disable(cmdq, thread);
drivers/mailbox/mtk-cmdq-mailbox.c
551
spin_unlock_irqrestore(&thread->chan->lock, flags);
drivers/mailbox/mtk-cmdq-mailbox.c
559
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
drivers/mailbox/mtk-cmdq-mailbox.c
571
spin_lock_irqsave(&thread->chan->lock, flags);
drivers/mailbox/mtk-cmdq-mailbox.c
572
if (list_empty(&thread->task_busy_list))
drivers/mailbox/mtk-cmdq-mailbox.c
575
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
drivers/mailbox/mtk-cmdq-mailbox.c
576
if (!cmdq_thread_is_in_wfe(thread))
drivers/mailbox/mtk-cmdq-mailbox.c
579
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
drivers/mailbox/mtk-cmdq-mailbox.c
583
mbox_chan_received_data(task->thread->chan, &data);
drivers/mailbox/mtk-cmdq-mailbox.c
588
cmdq_thread_resume(thread);
drivers/mailbox/mtk-cmdq-mailbox.c
589
cmdq_thread_disable(cmdq, thread);
drivers/mailbox/mtk-cmdq-mailbox.c
592
spin_unlock_irqrestore(&thread->chan->lock, flags);
drivers/mailbox/mtk-cmdq-mailbox.c
599
cmdq_thread_resume(thread);
drivers/mailbox/mtk-cmdq-mailbox.c
600
spin_unlock_irqrestore(&thread->chan->lock, flags);
drivers/mailbox/mtk-cmdq-mailbox.c
601
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
drivers/mailbox/mtk-cmdq-mailbox.c
604
(u32)(thread->base - cmdq->base));
drivers/mailbox/mtk-cmdq-mailbox.c
624
struct cmdq_thread *thread;
drivers/mailbox/mtk-cmdq-mailbox.c
629
thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
drivers/mailbox/mtk-cmdq-mailbox.c
630
thread->priority = sp->args[1];
drivers/mailbox/mtk-cmdq-mailbox.c
631
thread->chan = &mbox->chans[ind];
drivers/mailbox/mtk-cmdq-mailbox.c
735
cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
drivers/mailbox/mtk-cmdq-mailbox.c
736
sizeof(*cmdq->thread), GFP_KERNEL);
drivers/mailbox/mtk-cmdq-mailbox.c
737
if (!cmdq->thread)
drivers/mailbox/mtk-cmdq-mailbox.c
741
cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
drivers/mailbox/mtk-cmdq-mailbox.c
743
INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
drivers/mailbox/mtk-cmdq-mailbox.c
744
cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
drivers/mailbox/mtk-cmdq-mailbox.c
80
struct cmdq_thread *thread;
drivers/mailbox/mtk-cmdq-mailbox.c
90
struct cmdq_thread *thread;
drivers/md/bcache/btree.c
2094
check_state.infos[i].thread =
drivers/md/bcache/btree.c
2098
if (IS_ERR(check_state.infos[i].thread)) {
drivers/md/bcache/btree.c
2101
kthread_stop(check_state.infos[i].thread);
drivers/md/bcache/btree.h
225
struct task_struct *thread;
drivers/md/bcache/writeback.c
1029
state.infos[i].thread =
drivers/md/bcache/writeback.c
1032
if (IS_ERR(state.infos[i].thread)) {
drivers/md/bcache/writeback.c
1036
kthread_stop(state.infos[i].thread);
drivers/md/bcache/writeback.h
34
struct task_struct *thread;
drivers/md/dm-raid.c
3790
md_wakeup_thread(mddev->thread);
drivers/md/dm-vdo/dedupe.c
2030
struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue());
drivers/md/dm-vdo/dedupe.c
2032
vdo_register_allocating_thread(&thread->allocating_thread, NULL);
drivers/md/dm-vdo/funnel-workqueue.c
318
struct task_struct *thread = NULL;
drivers/md/dm-vdo/funnel-workqueue.c
349
thread = kthread_run(work_queue_runner, queue, "%s:%s", thread_name_prefix,
drivers/md/dm-vdo/funnel-workqueue.c
351
if (IS_ERR(thread)) {
drivers/md/dm-vdo/funnel-workqueue.c
353
return (int) PTR_ERR(thread);
drivers/md/dm-vdo/funnel-workqueue.c
356
queue->thread = thread;
drivers/md/dm-vdo/funnel-workqueue.c
452
if (queue->thread == NULL)
drivers/md/dm-vdo/funnel-workqueue.c
456
kthread_stop(queue->thread);
drivers/md/dm-vdo/funnel-workqueue.c
457
queue->thread = NULL;
drivers/md/dm-vdo/funnel-workqueue.c
489
if (queue->thread != NULL) {
drivers/md/dm-vdo/funnel-workqueue.c
490
task_state_report = task_state_to_char(queue->thread);
drivers/md/dm-vdo/funnel-workqueue.c
62
struct task_struct *thread;
drivers/md/dm-vdo/indexer/funnel-requestqueue.c
223
&queue->thread);
drivers/md/dm-vdo/indexer/funnel-requestqueue.c
273
vdo_join_threads(queue->thread);
drivers/md/dm-vdo/indexer/funnel-requestqueue.c
59
struct thread *thread;
drivers/md/dm-vdo/indexer/index.c
57
struct thread *thread;
drivers/md/dm-vdo/indexer/index.c
733
struct thread *writer_thread = NULL;
drivers/md/dm-vdo/indexer/index.c
736
if (writer->thread != NULL) {
drivers/md/dm-vdo/indexer/index.c
737
writer_thread = writer->thread;
drivers/md/dm-vdo/indexer/index.c
738
writer->thread = NULL;
drivers/md/dm-vdo/indexer/index.c
797
result = vdo_create_thread(close_chapters, writer, "writer", &writer->thread);
drivers/md/dm-vdo/indexer/volume.c
1629
result = vdo_allocate(config->read_threads, struct thread *, "reader threads",
drivers/md/dm-vdo/indexer/volume.h
117
struct thread **reader_threads;
drivers/md/dm-vdo/thread-registry.c
28
struct registered_thread *thread;
drivers/md/dm-vdo/thread-registry.c
36
list_for_each_entry(thread, ®istry->links, links) {
drivers/md/dm-vdo/thread-registry.c
37
if (thread->task == current) {
drivers/md/dm-vdo/thread-registry.c
39
list_del_rcu(&thread->links);
drivers/md/dm-vdo/thread-registry.c
51
INIT_LIST_HEAD(&thread->links);
drivers/md/dm-vdo/thread-registry.c
57
struct registered_thread *thread;
drivers/md/dm-vdo/thread-registry.c
61
list_for_each_entry(thread, ®istry->links, links) {
drivers/md/dm-vdo/thread-registry.c
62
if (thread->task == current) {
drivers/md/dm-vdo/thread-registry.c
63
list_del_rcu(&thread->links);
drivers/md/dm-vdo/thread-registry.c
74
INIT_LIST_HEAD(&thread->links);
drivers/md/dm-vdo/thread-registry.c
80
struct registered_thread *thread;
drivers/md/dm-vdo/thread-registry.c
84
list_for_each_entry_rcu(thread, ®istry->links, links) {
drivers/md/dm-vdo/thread-registry.c
85
if (thread->task == current) {
drivers/md/dm-vdo/thread-registry.c
86
result = thread->pointer;
drivers/md/dm-vdo/thread-utils.c
101
while (wait_for_completion_interruptible(&thread->thread_done))
drivers/md/dm-vdo/thread-utils.c
105
hlist_del(&thread->thread_links);
drivers/md/dm-vdo/thread-utils.c
107
vdo_free(thread);
drivers/md/dm-vdo/thread-utils.c
37
struct thread *thread = arg;
drivers/md/dm-vdo/thread-utils.c
39
thread->thread_task = current;
drivers/md/dm-vdo/thread-utils.c
41
hlist_add_head(&thread->thread_links, &thread_list);
drivers/md/dm-vdo/thread-utils.c
44
thread->thread_function(thread->thread_data);
drivers/md/dm-vdo/thread-utils.c
46
complete(&thread->thread_done);
drivers/md/dm-vdo/thread-utils.c
51
const char *name, struct thread **new_thread)
drivers/md/dm-vdo/thread-utils.c
56
struct thread *thread;
drivers/md/dm-vdo/thread-utils.c
59
result = vdo_allocate(1, struct thread, __func__, &thread);
drivers/md/dm-vdo/thread-utils.c
65
thread->thread_function = thread_function;
drivers/md/dm-vdo/thread-utils.c
66
thread->thread_data = thread_data;
drivers/md/dm-vdo/thread-utils.c
67
init_completion(&thread->thread_done);
drivers/md/dm-vdo/thread-utils.c
83
task = kthread_run(thread_starter, thread, "%.*s:%s",
drivers/md/dm-vdo/thread-utils.c
87
task = kthread_run(thread_starter, thread, "%s", name);
drivers/md/dm-vdo/thread-utils.c
91
vdo_free(thread);
drivers/md/dm-vdo/thread-utils.c
95
*new_thread = thread;
drivers/md/dm-vdo/thread-utils.c
99
void vdo_join_threads(struct thread *thread)
drivers/md/dm-vdo/thread-utils.h
13
struct thread;
drivers/md/dm-vdo/thread-utils.h
17
const char *name, struct thread **new_thread);
drivers/md/dm-vdo/thread-utils.h
18
void vdo_join_threads(struct thread *thread);
drivers/md/dm-vdo/vdo.c
1008
.next = thread->listeners,
drivers/md/dm-vdo/vdo.c
1011
thread->listeners = read_only_listener;
drivers/md/dm-vdo/vdo.c
1146
struct vdo_thread *thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vdo.c
1148
thread->is_read_only = true;
drivers/md/dm-vdo/vdo.c
1149
listener = thread->listeners;
drivers/md/dm-vdo/vdo.c
1247
struct vdo_thread *thread;
drivers/md/dm-vdo/vdo.c
1250
thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vdo.c
1251
if (thread->is_read_only) {
drivers/md/dm-vdo/vdo.c
1257
thread->is_read_only = true;
drivers/md/dm-vdo/vdo.c
132
struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue());
drivers/md/dm-vdo/vdo.c
134
vdo_register_allocating_thread(&thread->allocating_thread,
drivers/md/dm-vdo/vdo.c
135
&thread->vdo->allocations_allowed);
drivers/md/dm-vdo/vdo.c
1603
struct vdo_thread *thread;
drivers/md/dm-vdo/vdo.c
1609
thread = vdo_get_work_queue_owner(queue);
drivers/md/dm-vdo/vdo.c
1610
thread_id = thread->thread_id;
drivers/md/dm-vdo/vdo.c
1613
BUG_ON(thread_id >= thread->vdo->thread_config.thread_count);
drivers/md/dm-vdo/vdo.c
1614
BUG_ON(thread != &thread->vdo->threads[thread_id]);
drivers/md/dm-vdo/vdo.c
414
struct vdo_thread *thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vdo.c
420
if (thread->queue != NULL) {
drivers/md/dm-vdo/vdo.c
421
return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type),
drivers/md/dm-vdo/vdo.c
426
thread->vdo = vdo;
drivers/md/dm-vdo/vdo.c
427
thread->thread_id = thread_id;
drivers/md/dm-vdo/vdo.c
429
return vdo_make_work_queue(vdo->thread_name_prefix, queue_name, thread,
drivers/md/dm-vdo/vdo.c
430
type, queue_count, contexts, &thread->queue);
drivers/md/dm-vdo/vdo.c
641
static void free_listeners(struct vdo_thread *thread)
drivers/md/dm-vdo/vdo.c
645
for (listener = vdo_forget(thread->listeners); listener != NULL; listener = next) {
drivers/md/dm-vdo/vdo.c
991
struct vdo_thread *thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vio.h
179
thread_id_t thread)
drivers/md/dm-vdo/vio.h
182
vdo_set_completion_callback(&vio->completion, callback, thread);
drivers/md/md-bitmap.c
1487
struct md_thread *thread;
drivers/md/md-bitmap.c
1490
thread = rcu_dereference(mddev->thread);
drivers/md/md-bitmap.c
1492
if (!thread)
drivers/md/md-bitmap.c
1495
if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT)
drivers/md/md-bitmap.c
1496
thread->timeout = timeout;
drivers/md/md-bitmap.c
2251
md_wakeup_thread(mddev->thread);
drivers/md/md-bitmap.c
2674
md_wakeup_thread(mddev->thread);
drivers/md/md-bitmap.c
2761
md_wakeup_thread(mddev->thread);
drivers/md/md-cluster.c
1084
md_wakeup_thread(mddev->thread);
drivers/md/md-cluster.c
295
static void recover_bitmaps(struct md_thread *thread)
drivers/md/md-cluster.c
297
struct mddev *mddev = thread->mddev;
drivers/md/md-cluster.c
352
md_wakeup_thread(mddev->thread);
drivers/md/md-cluster.c
468
md_wakeup_thread(mddev->thread);
drivers/md/md-cluster.c
546
struct md_thread *thread;
drivers/md/md-cluster.c
552
thread = rcu_dereference_protected(mddev->thread, true);
drivers/md/md-cluster.c
553
if (!thread) {
drivers/md/md-cluster.c
559
wait_event(thread->wqueue,
drivers/md/md-cluster.c
576
md_wakeup_thread(mddev->thread);
drivers/md/md-cluster.c
651
static void recv_daemon(struct md_thread *thread)
drivers/md/md-cluster.c
653
struct md_cluster_info *cinfo = thread->mddev->cluster_info;
drivers/md/md-cluster.c
669
ret = process_recvd_msg(thread->mddev, &msg);
drivers/md/md-cluster.c
731
md_wakeup_thread(mddev->thread);
drivers/md/md-llbitmap.c
677
md_wakeup_thread(mddev->thread);
drivers/md/md-llbitmap.c
681
md_wakeup_thread(mddev->thread);
drivers/md/md.c
102
static void md_wakeup_thread_directly(struct md_thread __rcu **thread);
drivers/md/md.c
10435
md_wakeup_thread(rdev->mddev->thread);
drivers/md/md.c
10606
md_wakeup_thread(mddev->thread);
drivers/md/md.c
4216
if (!mddev->thread)
drivers/md/md.c
4670
md_wakeup_thread(mddev->thread);
drivers/md/md.c
5210
md_wakeup_thread(mddev->thread);
drivers/md/md.c
5329
md_wakeup_thread(mddev->thread);
drivers/md/md.c
538
md_wakeup_thread(mddev->thread);
drivers/md/md.c
6445
md_wakeup_thread(mddev->thread);
drivers/md/md.c
6895
md_unregister_thread(mddev, &mddev->thread);
drivers/md/md.c
7539
if (!mddev->thread)
drivers/md/md.c
7605
if (!mddev->thread)
drivers/md/md.c
7628
if (!mddev->pers->quiesce || !mddev->thread)
drivers/md/md.c
7977
if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
drivers/md/md.c
8473
struct md_thread *thread = arg;
drivers/md/md.c
8499
(thread->wqueue,
drivers/md/md.c
8500
test_bit(THREAD_WAKEUP, &thread->flags)
drivers/md/md.c
8502
thread->timeout);
drivers/md/md.c
8504
clear_bit(THREAD_WAKEUP, &thread->flags);
drivers/md/md.c
8508
thread->run(thread);
drivers/md/md.c
8514
static void md_wakeup_thread_directly(struct md_thread __rcu **thread)
drivers/md/md.c
8519
t = rcu_dereference(*thread);
drivers/md/md.c
8525
void __md_wakeup_thread(struct md_thread __rcu *thread)
drivers/md/md.c
8529
t = rcu_dereference(thread);
drivers/md/md.c
8542
struct md_thread *thread;
drivers/md/md.c
8544
thread = kzalloc_obj(struct md_thread);
drivers/md/md.c
8545
if (!thread)
drivers/md/md.c
8548
init_waitqueue_head(&thread->wqueue);
drivers/md/md.c
8550
thread->run = run;
drivers/md/md.c
8551
thread->mddev = mddev;
drivers/md/md.c
8552
thread->timeout = MAX_SCHEDULE_TIMEOUT;
drivers/md/md.c
8553
thread->tsk = kthread_run(md_thread, thread,
drivers/md/md.c
8555
mdname(thread->mddev),
drivers/md/md.c
8557
if (IS_ERR(thread->tsk)) {
drivers/md/md.c
8558
kfree(thread);
drivers/md/md.c
8561
return thread;
drivers/md/md.c
8567
struct md_thread *thread = rcu_dereference_protected(*threadp,
drivers/md/md.c
8570
if (!thread)
drivers/md/md.c
8576
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
drivers/md/md.c
8577
kthread_stop(thread->tsk);
drivers/md/md.c
8578
kfree(thread);
drivers/md/md.c
8601
md_wakeup_thread(mddev->thread);
drivers/md/md.c
9088
md_wakeup_thread(mddev->thread);
drivers/md/md.c
9111
md_wakeup_thread(mddev->thread);
drivers/md/md.c
9127
md_wakeup_thread(mddev->thread);
drivers/md/md.c
9164
md_wakeup_thread(mddev->thread);
drivers/md/md.c
9490
void md_do_sync(struct md_thread *thread)
drivers/md/md.c
9492
struct mddev *mddev = thread->mddev;
drivers/md/md.c
956
md_wakeup_thread(mddev->thread);
drivers/md/md.c
9815
md_wakeup_thread(mddev->thread);
drivers/md/md.h
467
struct md_thread __rcu *thread; /* management thread */
drivers/md/md.h
864
void (*run) (struct md_thread *thread);
drivers/md/md.h
885
#define md_wakeup_thread(thread) do { \
drivers/md/md.h
887
__md_wakeup_thread(thread); \
drivers/md/md.h
900
void (*run)(struct md_thread *thread),
drivers/md/md.h
904
extern void __md_wakeup_thread(struct md_thread __rcu *thread);
drivers/md/md.h
929
extern void md_do_sync(struct md_thread *thread);
drivers/md/md.h
974
md_wakeup_thread(mddev->thread);
drivers/md/raid1.c
1280
md_wakeup_thread(mddev->thread);
drivers/md/raid1.c
1669
md_wakeup_thread(mddev->thread);
drivers/md/raid1.c
2604
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid1.c
2655
static void raid1d(struct md_thread *thread)
drivers/md/raid1.c
2657
struct mddev *mddev = thread->mddev;
drivers/md/raid1.c
290
md_wakeup_thread(mddev->thread);
drivers/md/raid1.c
3163
rcu_assign_pointer(conf->thread,
drivers/md/raid1.c
3165
if (!conf->thread)
drivers/md/raid1.c
3234
md_unregister_thread(mddev, &conf->thread);
drivers/md/raid1.c
3251
md_unregister_thread(mddev, &conf->thread);
drivers/md/raid1.c
3270
rcu_assign_pointer(mddev->thread, conf->thread);
drivers/md/raid1.c
3271
rcu_assign_pointer(conf->thread, NULL);
drivers/md/raid1.c
3279
md_unregister_thread(mddev, &mddev->thread);
drivers/md/raid1.c
3421
md_wakeup_thread(mddev->thread);
drivers/md/raid1.h
109
struct md_thread __rcu *thread;
drivers/md/raid10.c
1096
md_wakeup_thread(mddev->thread);
drivers/md/raid10.c
1284
md_wakeup_thread(mddev->thread);
drivers/md/raid10.c
1389
md_wakeup_thread(mddev->thread);
drivers/md/raid10.c
2950
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid10.c
2960
static void raid10d(struct md_thread *thread)
drivers/md/raid10.c
2962
struct mddev *mddev = thread->mddev;
drivers/md/raid10.c
312
md_wakeup_thread(mddev->thread);
drivers/md/raid10.c
3906
rcu_assign_pointer(conf->thread,
drivers/md/raid10.c
3908
if (!conf->thread)
drivers/md/raid10.c
3969
rcu_assign_pointer(mddev->thread, conf->thread);
drivers/md/raid10.c
3970
rcu_assign_pointer(conf->thread, NULL);
drivers/md/raid10.c
4115
md_unregister_thread(mddev, &mddev->thread);
drivers/md/raid10.c
4690
md_wakeup_thread(mddev->thread);
drivers/md/raid10.c
957
struct md_thread *thread;
drivers/md/raid10.c
974
thread = rcu_dereference_protected(conf->mddev->thread, true);
drivers/md/raid10.c
980
if (thread->tsk == current) {
drivers/md/raid10.h
98
struct md_thread __rcu *thread;
drivers/md/raid5-cache.c
1491
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5-cache.c
1519
md_wakeup_thread(log->rdev->mddev->thread);
drivers/md/raid5-cache.c
1546
static void r5l_reclaim_thread(struct md_thread *thread)
drivers/md/raid5-cache.c
1548
struct mddev *mddev = thread->mddev;
drivers/md/raid5-cache.c
1577
struct md_thread *thread = rcu_dereference_protected(
drivers/md/raid5-cache.c
1583
kthread_park(thread->tsk);
drivers/md/raid5-cache.c
1587
kthread_unpark(thread->tsk);
drivers/md/raid5-cache.c
2768
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5-cache.c
2823
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5-cache.c
3050
struct md_thread *thread;
drivers/md/raid5-cache.c
3109
thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
drivers/md/raid5-cache.c
3111
if (!thread)
drivers/md/raid5-cache.c
3114
thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
drivers/md/raid5-cache.c
3115
rcu_assign_pointer(log->reclaim_thread, thread);
drivers/md/raid5-cache.c
602
md_wakeup_thread(log->rdev->mddev->thread);
drivers/md/raid5-ppl.c
604
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
203
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
276
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
282
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
364
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
3681
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
4078
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
410
if (unlikely(!conf->mddev->thread) ||
drivers/md/raid5.c
415
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
5295
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
5362
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
5840
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.c
6007
md_wakeup_thread(mddev->thread);
drivers/md/raid5.c
6383
md_wakeup_thread(mddev->thread);
drivers/md/raid5.c
6491
md_wakeup_thread(mddev->thread);
drivers/md/raid5.c
6764
static void raid5d(struct md_thread *thread)
drivers/md/raid5.c
6766
struct mddev *mddev = thread->mddev;
drivers/md/raid5.c
7698
rcu_assign_pointer(conf->thread,
drivers/md/raid5.c
7700
if (!conf->thread) {
drivers/md/raid5.c
7983
rcu_assign_pointer(mddev->thread, conf->thread);
drivers/md/raid5.c
7984
rcu_assign_pointer(conf->thread, NULL);
drivers/md/raid5.c
8100
md_unregister_thread(mddev, &mddev->thread);
drivers/md/raid5.c
8814
md_wakeup_thread(mddev->thread);
drivers/md/raid5.c
994
md_wakeup_thread(conf->mddev->thread);
drivers/md/raid5.h
677
struct md_thread __rcu *thread;
drivers/media/common/videobuf2/videobuf2-core.c
3173
struct task_struct *thread;
drivers/media/common/videobuf2/videobuf2-core.c
3270
threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
drivers/media/common/videobuf2/videobuf2-core.c
3271
if (IS_ERR(threadio->thread)) {
drivers/media/common/videobuf2/videobuf2-core.c
3272
ret = PTR_ERR(threadio->thread);
drivers/media/common/videobuf2/videobuf2-core.c
3273
threadio->thread = NULL;
drivers/media/common/videobuf2/videobuf2-core.c
3296
err = kthread_stop(threadio->thread);
drivers/media/common/videobuf2/videobuf2-core.c
3298
threadio->thread = NULL;
drivers/media/dvb-core/dvb_ca_en50221.c
1027
wake_up_process(ca->thread);
drivers/media/dvb-core/dvb_ca_en50221.c
135
struct task_struct *thread;
drivers/media/dvb-core/dvb_ca_en50221.c
1930
ca->thread = kthread_run(dvb_ca_en50221_thread, ca, "kdvb-ca-%i:%i",
drivers/media/dvb-core/dvb_ca_en50221.c
1932
if (IS_ERR(ca->thread)) {
drivers/media/dvb-core/dvb_ca_en50221.c
1933
ret = PTR_ERR(ca->thread);
drivers/media/dvb-core/dvb_ca_en50221.c
1973
kthread_stop(ca->thread);
drivers/media/dvb-core/dvb_frontend.c
101
struct task_struct *thread;
drivers/media/dvb-core/dvb_frontend.c
2685
if (fepriv->thread)
drivers/media/dvb-core/dvb_frontend.c
2694
if (fepriv->thread)
drivers/media/dvb-core/dvb_frontend.c
2703
if (fepriv->thread)
drivers/media/dvb-core/dvb_frontend.c
2712
if (fepriv->thread)
drivers/media/dvb-core/dvb_frontend.c
2802
mfepriv->thread)) {
drivers/media/dvb-core/dvb_frontend.c
2815
mfepriv->thread) {
drivers/media/dvb-core/dvb_frontend.c
796
fepriv->thread = NULL;
drivers/media/dvb-core/dvb_frontend.c
819
if (!fepriv->thread)
drivers/media/dvb-core/dvb_frontend.c
822
kthread_stop(fepriv->thread);
drivers/media/dvb-core/dvb_frontend.c
828
if (fepriv->thread)
drivers/media/dvb-core/dvb_frontend.c
831
fepriv->thread);
drivers/media/dvb-core/dvb_frontend.c
864
if (fepriv->thread) {
drivers/media/dvb-core/dvb_frontend.c
878
fepriv->thread = NULL;
drivers/media/dvb-core/dvb_frontend.c
891
fepriv->thread = fe_thread;
drivers/media/i2c/tvaudio.c
135
struct task_struct *thread;
drivers/media/i2c/tvaudio.c
1835
if (chip->thread)
drivers/media/i2c/tvaudio.c
1836
wake_up_process(chip->thread);
drivers/media/i2c/tvaudio.c
1882
if (chip->thread) {
drivers/media/i2c/tvaudio.c
2048
chip->thread = NULL;
drivers/media/i2c/tvaudio.c
2059
chip->thread = kthread_run(chip_thread, chip, "%s",
drivers/media/i2c/tvaudio.c
2061
if (IS_ERR(chip->thread)) {
drivers/media/i2c/tvaudio.c
2063
chip->thread = NULL;
drivers/media/i2c/tvaudio.c
2075
if (chip->thread) {
drivers/media/i2c/tvaudio.c
2077
kthread_stop(chip->thread);
drivers/media/i2c/tvaudio.c
2078
chip->thread = NULL;
drivers/media/i2c/tvaudio.c
322
wake_up_process(chip->thread);
drivers/media/pci/pt3/pt3.c
439
adap->thread->comm);
drivers/media/pci/pt3/pt3.c
454
adap->thread->comm);
drivers/media/pci/pt3/pt3.c
460
struct task_struct *thread;
drivers/media/pci/pt3/pt3.c
463
thread = kthread_run(pt3_fetch_thread, adap, "pt3-ad%i-dmx%i",
drivers/media/pci/pt3/pt3.c
465
if (IS_ERR(thread)) {
drivers/media/pci/pt3/pt3.c
466
int ret = PTR_ERR(thread);
drivers/media/pci/pt3/pt3.c
468
adap->thread = NULL;
drivers/media/pci/pt3/pt3.c
474
adap->thread = thread;
drivers/media/pci/pt3/pt3.c
490
ret = kthread_stop(adap->thread);
drivers/media/pci/pt3/pt3.c
491
adap->thread = NULL;
drivers/media/pci/pt3/pt3.c
518
if (adap->num_feeds > 0 || !adap->thread)
drivers/media/pci/pt3/pt3.c
604
if (adap->thread)
drivers/media/pci/pt3/pt3.h
121
struct task_struct *thread;
drivers/media/pci/saa7134/saa7134-tvaudio.c
1014
dev->thread.thread = NULL;
drivers/media/pci/saa7134/saa7134-tvaudio.c
1015
dev->thread.scan1 = dev->thread.scan2 = 0;
drivers/media/pci/saa7134/saa7134-tvaudio.c
1019
dev->thread.thread = kthread_run(my_thread, dev, "%s", dev->name);
drivers/media/pci/saa7134/saa7134-tvaudio.c
1020
if (IS_ERR(dev->thread.thread)) {
drivers/media/pci/saa7134/saa7134-tvaudio.c
1041
if (dev->thread.thread && !dev->thread.stopped)
drivers/media/pci/saa7134/saa7134-tvaudio.c
1042
kthread_stop(dev->thread.thread);
drivers/media/pci/saa7134/saa7134-tvaudio.c
1054
} else if (dev->thread.thread) {
drivers/media/pci/saa7134/saa7134-tvaudio.c
1055
dev->thread.mode = UNSET;
drivers/media/pci/saa7134/saa7134-tvaudio.c
1056
dev->thread.scan2++;
drivers/media/pci/saa7134/saa7134-tvaudio.c
1058
if (!dev->insuspend && !dev->thread.stopped)
drivers/media/pci/saa7134/saa7134-tvaudio.c
1059
wake_up_process(dev->thread.thread);
drivers/media/pci/saa7134/saa7134-tvaudio.c
304
if (dev->thread.scan1 == dev->thread.scan2 &&
drivers/media/pci/saa7134/saa7134-tvaudio.c
314
return dev->thread.scan1 != dev->thread.scan2;
drivers/media/pci/saa7134/saa7134-tvaudio.c
477
dev->thread.scan1 = dev->thread.scan2;
drivers/media/pci/saa7134/saa7134-tvaudio.c
479
dev->thread.scan1);
drivers/media/pci/saa7134/saa7134-tvaudio.c
518
if (dev->thread.scan1 != dev->thread.scan2)
drivers/media/pci/saa7134/saa7134-tvaudio.c
592
if (UNSET == dev->thread.mode) {
drivers/media/pci/saa7134/saa7134-tvaudio.c
596
mode = dev->thread.mode;
drivers/media/pci/saa7134/saa7134-tvaudio.c
606
dev->thread.stopped = 1;
drivers/media/pci/saa7134/saa7134-tvaudio.c
773
dev->thread.scan1 = dev->thread.scan2;
drivers/media/pci/saa7134/saa7134-tvaudio.c
775
dev->thread.scan1);
drivers/media/pci/saa7134/saa7134-tvaudio.c
851
dev->thread.stopped = 1;
drivers/media/pci/saa7134/saa7134-video.c
1403
mode = dev->thread.mode;
drivers/media/pci/saa7134/saa7134-video.c
1409
dev->thread.mode = t->audmode;
drivers/media/pci/saa7134/saa7134.h
455
struct task_struct *thread;
drivers/media/pci/saa7134/saa7134.h
641
struct saa7134_thread thread;
drivers/media/rc/rc-core-priv.h
50
struct task_struct *thread;
drivers/media/rc/rc-ir-raw.c
229
if (!dev->raw || !dev->raw->thread)
drivers/media/rc/rc-ir-raw.c
232
wake_up_process(dev->raw->thread);
drivers/media/rc/rc-ir-raw.c
634
struct task_struct *thread;
drivers/media/rc/rc-ir-raw.c
636
thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
drivers/media/rc/rc-ir-raw.c
637
if (IS_ERR(thread))
drivers/media/rc/rc-ir-raw.c
638
return PTR_ERR(thread);
drivers/media/rc/rc-ir-raw.c
640
dev->raw->thread = thread;
drivers/media/rc/rc-ir-raw.c
665
kthread_stop(dev->raw->thread);
drivers/media/usb/pvrusb2/pvrusb2-dvb.c
100
kthread_stop(adap->thread);
drivers/media/usb/pvrusb2/pvrusb2-dvb.c
101
adap->thread = NULL;
drivers/media/usb/pvrusb2/pvrusb2-dvb.c
170
adap->thread = kthread_run(pvr2_dvb_feed_thread, adap, "pvrusb2-dvb");
drivers/media/usb/pvrusb2/pvrusb2-dvb.c
172
if (IS_ERR(adap->thread)) {
drivers/media/usb/pvrusb2/pvrusb2-dvb.c
173
ret = PTR_ERR(adap->thread);
drivers/media/usb/pvrusb2/pvrusb2-dvb.c
174
adap->thread = NULL;
drivers/media/usb/pvrusb2/pvrusb2-dvb.c
99
if (adap->thread) {
drivers/media/usb/pvrusb2/pvrusb2-dvb.h
29
struct task_struct *thread;
drivers/misc/amd-sbi/rmi-core.c
121
input->thread = thread_id << 1;
drivers/misc/amd-sbi/rmi-core.c
151
u16 thread)
drivers/misc/amd-sbi/rmi-core.c
157
if (thread > 127) {
drivers/misc/amd-sbi/rmi-core.c
158
thread -= 128;
drivers/misc/amd-sbi/rmi-core.c
166
prepare_cpuid_input_message(&input, thread,
drivers/misc/amd-sbi/rmi-core.c
175
u16 thread)
drivers/misc/amd-sbi/rmi-core.c
179
prepare_cpuid_input_message_ext(&input, thread,
drivers/misc/amd-sbi/rmi-core.c
193
u16 thread;
drivers/misc/amd-sbi/rmi-core.c
204
thread = msg->cpu_in_out >> CPUID_MCA_THRD_INDEX;
drivers/misc/amd-sbi/rmi-core.c
212
ret = rmi_cpuid_input(data, msg, thread);
drivers/misc/amd-sbi/rmi-core.c
217
ret = rmi_cpuid_input_ext(data, msg, thread);
drivers/misc/amd-sbi/rmi-core.c
265
u16 thread)
drivers/misc/amd-sbi/rmi-core.c
271
if (thread > 127) {
drivers/misc/amd-sbi/rmi-core.c
272
thread -= 128;
drivers/misc/amd-sbi/rmi-core.c
280
prepare_mca_msr_input_message(&input, thread,
drivers/misc/amd-sbi/rmi-core.c
288
u16 thread)
drivers/misc/amd-sbi/rmi-core.c
292
prepare_mca_msr_input_message_ext(&input, thread,
drivers/misc/amd-sbi/rmi-core.c
306
u16 thread;
drivers/misc/amd-sbi/rmi-core.c
317
thread = msg->mcamsr_in_out >> CPUID_MCA_THRD_INDEX;
drivers/misc/amd-sbi/rmi-core.c
325
ret = rmi_mcamsr_input(data, msg, thread);
drivers/misc/amd-sbi/rmi-core.c
330
ret = rmi_mcamsr_input_ext(data, msg, thread);
drivers/misc/amd-sbi/rmi-core.c
58
u8 thread; /* thread number */
drivers/misc/amd-sbi/rmi-core.c
97
input->thread = thread_id << 1;
drivers/misc/ocxl/file.c
130
ctx->tidr = current->thread.tidr;
drivers/mtd/nand/raw/cadence-nand-controller.c
1039
u8 thread)
drivers/mtd/nand/raw/cadence-nand-controller.c
1047
BIT(thread), true);
drivers/mtd/nand/raw/cadence-nand-controller.c
1061
reg |= FIELD_PREP(CMD_REG0_TN, thread);
drivers/mtd/nand/raw/cadence-nand-controller.c
1071
u8 thread)
drivers/mtd/nand/raw/cadence-nand-controller.c
1076
irq_mask.trd_status = BIT(thread);
drivers/mtd/nand/raw/cadence-nand-controller.c
1077
irq_mask.trd_error = BIT(thread);
drivers/mtd/nand/raw/cadence-nand-controller.c
1082
status = cadence_nand_cdma_send(cdns_ctrl, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
1408
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1004
unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1007
if (test_bit(thread, &port->priv->lock_map))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1008
spin_lock_irqsave(&port->bm_lock[thread], flags);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1022
mvpp2_thread_write_relaxed(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1031
mvpp2_thread_write_relaxed(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1033
mvpp2_thread_write_relaxed(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1036
if (test_bit(thread, &port->priv->lock_map))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1037
spin_unlock_irqrestore(&port->bm_lock[thread], flags);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1431
u32 thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1437
thread = mvpp2_cpu_to_thread(port->priv, cpu);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1439
mvpp2_thread_write(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1441
mvpp2_thread_write(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1453
u32 val, thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1459
thread = mvpp2_cpu_to_thread(port->priv, cpu);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1466
mvpp2_thread_write(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1468
mvpp2_thread_write(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
158
static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
161
writel(data, priv->swth_base[thread] + offset);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
164
static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
167
return readl(priv->swth_base[thread] + offset);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
170
static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
173
writel_relaxed(data, priv->swth_base[thread] + offset);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
176
static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
179
return readl_relaxed(priv->swth_base[thread] + offset);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2493
unsigned int thread =
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2496
MVPP2_AGGR_TXQ_STATUS_REG(thread));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2515
unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2520
mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2522
val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2536
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2547
for (thread = 0; thread < port->priv->nthreads; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2550
txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2726
unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2731
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2732
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2742
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2750
for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2751
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2752
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2864
if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2880
unsigned int thread)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2891
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2908
unsigned int thread, struct mvpp2 *priv)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2923
MVPP2_AGGR_TXQ_INDEX_REG(thread));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2934
mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2935
mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2946
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2965
thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2966
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2971
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2972
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2973
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3057
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3082
thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3083
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3084
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3085
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3094
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3110
thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3111
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3112
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3114
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3116
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3117
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3119
val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3121
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3132
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3151
for (thread = 0; thread < port->priv->nthreads; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3152
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3184
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3186
for (thread = 0; thread < port->priv->nthreads; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3187
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3213
thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3214
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3215
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3216
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3225
unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3228
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3229
val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3231
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3247
pending = mvpp2_thread_read(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3253
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3256
for (thread = 0; thread < port->priv->nthreads; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3257
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3639
unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3646
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3648
aggr_txq = &port->priv->aggr_txqs[thread];
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3670
unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3682
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3683
aggr_txq = &port->priv->aggr_txqs[thread];
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4086
unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4087
struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4192
unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4193
struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4365
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4370
thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4374
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4375
aggr_txq = &port->priv->aggr_txqs[thread];
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4377
if (test_bit(thread, &port->priv->lock_map))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4378
spin_lock_irqsave(&port->tx_lock[thread], flags);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4433
struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4465
struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4475
if (test_bit(thread, &port->priv->lock_map))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4476
spin_unlock_irqrestore(&port->tx_lock[thread], flags);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4497
unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4520
mvpp2_thread_write(port->priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
466
unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
468
*dma_addr = mvpp2_thread_read(priv, thread,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
470
*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
476
val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4888
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4903
for (thread = 0; thread < port->priv->nthreads; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4904
port_pcpu = per_cpu_ptr(port->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5988
unsigned int thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6044
for (thread = 0; thread < priv->nthreads; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6045
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6046
txq_pcpu->thread = thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
620
unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
626
mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6837
unsigned int ntxqs, nrxqs, thread;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
7008
for (thread = 0; thread < priv->nthreads; thread++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
7009
port_pcpu = per_cpu_ptr(port->pcpu, thread);
drivers/net/ethernet/qualcomm/qca_spi.c
684
struct task_struct *thread;
drivers/net/ethernet/qualcomm/qca_spi.c
693
thread = kthread_run((void *)qcaspi_spi_thread,
drivers/net/ethernet/qualcomm/qca_spi.c
696
if (IS_ERR(thread)) {
drivers/net/ethernet/qualcomm/qca_spi.c
699
return PTR_ERR(thread);
drivers/net/ethernet/qualcomm/qca_spi.c
702
qca->spi_thread = thread;
drivers/net/ethernet/realtek/8139too.c
1015
INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
drivers/net/ethernet/realtek/8139too.c
1106
cancel_delayed_work_sync(&tp->thread);
drivers/net/ethernet/realtek/8139too.c
1596
container_of(work, struct rtl8139_private, thread.work);
drivers/net/ethernet/realtek/8139too.c
1612
schedule_delayed_work(&tp->thread, thr_delay);
drivers/net/ethernet/realtek/8139too.c
1628
schedule_delayed_work(&tp->thread, next_tick);
drivers/net/ethernet/realtek/8139too.c
1642
container_of(work, struct rtl8139_private, thread.work);
drivers/net/ethernet/realtek/8139too.c
1696
INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
drivers/net/ethernet/realtek/8139too.c
1697
schedule_delayed_work(&tp->thread, next_tick);
drivers/net/ethernet/realtek/8139too.c
612
struct delayed_work thread;
drivers/net/wireless/rsi/rsi_common.h
60
struct rsi_thread *thread,
drivers/net/wireless/rsi/rsi_common.h
64
init_completion(&thread->completion);
drivers/net/wireless/rsi/rsi_common.h
65
atomic_set(&thread->thread_done, 0);
drivers/net/wireless/rsi/rsi_common.h
66
thread->task = kthread_run(func_ptr, common, "%s", name);
drivers/net/wireless/rsi/rsi_common.h
67
if (IS_ERR(thread->task))
drivers/net/wireless/rsi/rsi_common.h
68
return (int)PTR_ERR(thread->task);
drivers/net/wwan/t7xx/t7xx_port.h
132
struct task_struct *thread;
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
248
port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name);
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
249
if (IS_ERR(port->thread)) {
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
251
return PTR_ERR(port->thread);
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
263
if (port->thread)
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
264
kthread_stop(port->thread);
drivers/of/cpu.c
119
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
drivers/of/cpu.c
124
if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
drivers/of/cpu.c
14
u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread)
drivers/of/cpu.c
21
if (!cell || !ac || ((sizeof(*cell) * ac * (thread + 1)) > len))
drivers/of/cpu.c
24
cell += ac * thread;
drivers/of/cpu.c
53
const char *prop_name, int cpu, unsigned int *thread)
drivers/of/cpu.c
69
if (thread)
drivers/of/cpu.c
70
*thread = tid;
drivers/of/cpu.c
85
int cpu, unsigned int *thread)
drivers/of/cpu.c
94
cpu, thread))
drivers/of/cpu.c
97
return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
drivers/pcmcia/cs.c
176
if (!socket->thread) {
drivers/pcmcia/cs.c
213
if (socket->thread)
drivers/pcmcia/cs.c
214
kthread_stop(socket->thread);
drivers/pcmcia/cs.c
580
skt->thread = current;
drivers/pcmcia/cs.c
589
skt->thread = NULL;
drivers/pcmcia/cs.c
686
if (s->thread) {
drivers/pcmcia/cs.c
691
wake_up_process(s->thread);
drivers/pcmcia/cs.c
711
if (s->thread) {
drivers/pcmcia/cs.c
716
wake_up_process(s->thread);
drivers/platform/surface/aggregator/ssh_packet_layer.c
1156
ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "ssam_serial_hub-tx");
drivers/platform/surface/aggregator/ssh_packet_layer.c
1157
if (IS_ERR(ptl->tx.thread))
drivers/platform/surface/aggregator/ssh_packet_layer.c
1158
return PTR_ERR(ptl->tx.thread);
drivers/platform/surface/aggregator/ssh_packet_layer.c
1173
if (!IS_ERR_OR_NULL(ptl->tx.thread)) {
drivers/platform/surface/aggregator/ssh_packet_layer.c
1186
status = kthread_stop(ptl->tx.thread);
drivers/platform/surface/aggregator/ssh_packet_layer.c
1187
ptl->tx.thread = NULL;
drivers/platform/surface/aggregator/ssh_packet_layer.c
1847
if (ptl->rx.thread)
drivers/platform/surface/aggregator/ssh_packet_layer.c
1850
ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl,
drivers/platform/surface/aggregator/ssh_packet_layer.c
1852
if (IS_ERR(ptl->rx.thread))
drivers/platform/surface/aggregator/ssh_packet_layer.c
1853
return PTR_ERR(ptl->rx.thread);
drivers/platform/surface/aggregator/ssh_packet_layer.c
1868
if (ptl->rx.thread) {
drivers/platform/surface/aggregator/ssh_packet_layer.c
1869
status = kthread_stop(ptl->rx.thread);
drivers/platform/surface/aggregator/ssh_packet_layer.c
1870
ptl->rx.thread = NULL;
drivers/platform/surface/aggregator/ssh_packet_layer.c
2041
ptl->tx.thread = NULL;
drivers/platform/surface/aggregator/ssh_packet_layer.c
2047
ptl->rx.thread = NULL;
drivers/platform/surface/aggregator/ssh_packet_layer.h
101
struct task_struct *thread;
drivers/platform/surface/aggregator/ssh_packet_layer.h
94
struct task_struct *thread;
drivers/ps3/ps3-lpm.c
1043
void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
drivers/s390/net/qeth_core_main.c
1028
unsigned long thread)
drivers/s390/net/qeth_core_main.c
1034
if (!(card->thread_allowed_mask & thread))
drivers/s390/net/qeth_core_main.c
1036
else if (card->thread_start_mask & thread)
drivers/s390/net/qeth_core_main.c
1039
card->thread_start_mask |= thread;
drivers/s390/net/qeth_core_main.c
1046
unsigned long thread)
drivers/s390/net/qeth_core_main.c
1051
card->thread_start_mask &= ~thread;
drivers/s390/net/qeth_core_main.c
1057
unsigned long thread)
drivers/s390/net/qeth_core_main.c
1062
card->thread_running_mask &= ~thread;
drivers/s390/net/qeth_core_main.c
1067
static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
drivers/s390/net/qeth_core_main.c
1073
if (card->thread_start_mask & thread) {
drivers/s390/net/qeth_core_main.c
1074
if ((card->thread_allowed_mask & thread) &&
drivers/s390/net/qeth_core_main.c
1075
!(card->thread_running_mask & thread)) {
drivers/s390/net/qeth_core_main.c
1077
card->thread_start_mask &= ~thread;
drivers/s390/net/qeth_core_main.c
1078
card->thread_running_mask |= thread;
drivers/s390/net/qeth_core_main.c
1086
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
drivers/s390/net/qeth_core_main.c
1091
(rc = __qeth_do_run_thread(card, thread)) >= 0);
drivers/s390/net/qeth_core_main.c
1553
static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
drivers/s390/net/qeth_core_main.c
1563
rc = (card->thread_start_mask & thread);
drivers/s390/scsi/zfcp_erp.c
1643
struct task_struct *thread;
drivers/s390/scsi/zfcp_erp.c
1645
thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
drivers/s390/scsi/zfcp_erp.c
1647
if (IS_ERR(thread)) {
drivers/s390/scsi/zfcp_erp.c
1650
return PTR_ERR(thread);
drivers/s390/scsi/zfcp_erp.c
1653
adapter->erp_thread = thread;
drivers/scsi/aacraid/aacraid.h
1599
struct task_struct *thread;
drivers/scsi/aacraid/commctrl.c
332
kthread_stop(dev->thread);
drivers/scsi/aacraid/commctrl.c
335
dev->thread = kthread_run(aac_command_thread, dev,
drivers/scsi/aacraid/commsup.c
1489
if (aac->thread && aac->thread->pid != current->pid) {
drivers/scsi/aacraid/commsup.c
1491
kthread_stop(aac->thread);
drivers/scsi/aacraid/commsup.c
1492
aac->thread = NULL;
drivers/scsi/aacraid/commsup.c
1576
aac->thread = kthread_run(aac_command_thread, aac, "%s",
drivers/scsi/aacraid/commsup.c
1578
if (IS_ERR(aac->thread)) {
drivers/scsi/aacraid/commsup.c
1579
retval = PTR_ERR(aac->thread);
drivers/scsi/aacraid/commsup.c
1580
aac->thread = NULL;
drivers/scsi/aacraid/linit.c
1531
kthread_stop(aac->thread);
drivers/scsi/aacraid/linit.c
1532
aac->thread = NULL;
drivers/scsi/aacraid/linit.c
1705
aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
drivers/scsi/aacraid/linit.c
1706
if (IS_ERR(aac->thread)) {
drivers/scsi/aacraid/linit.c
1708
error = PTR_ERR(aac->thread);
drivers/scsi/aacraid/linit.c
1709
aac->thread = NULL;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2608
struct task_struct *thread;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2612
thread = kthread_create_on_cpu(bnx2fc_percpu_io_thread,
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2614
if (IS_ERR(thread))
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2615
return PTR_ERR(thread);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2617
p->iothread = thread;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2618
wake_up_process(thread);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2625
struct task_struct *thread;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2633
thread = p->iothread;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2646
if (thread)
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2647
kthread_stop(thread);
drivers/scsi/bnx2i/bnx2i_init.c
414
struct task_struct *thread;
drivers/scsi/bnx2i/bnx2i_init.c
418
thread = kthread_create_on_cpu(bnx2i_percpu_io_thread, (void *)p,
drivers/scsi/bnx2i/bnx2i_init.c
420
if (IS_ERR(thread))
drivers/scsi/bnx2i/bnx2i_init.c
421
return PTR_ERR(thread);
drivers/scsi/bnx2i/bnx2i_init.c
423
p->iothread = thread;
drivers/scsi/bnx2i/bnx2i_init.c
424
wake_up_process(thread);
drivers/scsi/bnx2i/bnx2i_init.c
431
struct task_struct *thread;
drivers/scsi/bnx2i/bnx2i_init.c
437
thread = p->iothread;
drivers/scsi/bnx2i/bnx2i_init.c
449
if (thread)
drivers/scsi/bnx2i/bnx2i_init.c
450
kthread_stop(thread);
drivers/scsi/qedi/qedi_main.c
1952
struct task_struct *thread;
drivers/scsi/qedi/qedi_main.c
1954
thread = kthread_create_on_cpu(qedi_percpu_io_thread, (void *)p,
drivers/scsi/qedi/qedi_main.c
1956
if (IS_ERR(thread))
drivers/scsi/qedi/qedi_main.c
1957
return PTR_ERR(thread);
drivers/scsi/qedi/qedi_main.c
1959
p->iothread = thread;
drivers/scsi/qedi/qedi_main.c
1960
wake_up_process(thread);
drivers/scsi/qedi/qedi_main.c
1968
struct task_struct *thread;
drivers/scsi/qedi/qedi_main.c
1972
thread = p->iothread;
drivers/scsi/qedi/qedi_main.c
1983
if (thread)
drivers/scsi/qedi/qedi_main.c
1984
kthread_stop(thread);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
201
int thread)
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
207
if ((thread >= SH_CSS_MAX_SP_THREADS) || (thread < 0) ||
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
210
q = &css_queues.host2sp_buffer_queue_handles[thread][id];
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
74
int thread
drivers/staging/media/av7110/av7110.c
2332
struct task_struct *thread;
drivers/staging/media/av7110/av7110.c
2656
thread = kthread_run(arm_thread, (void *)av7110, "arm_mon");
drivers/staging/media/av7110/av7110.c
2657
if (IS_ERR(thread)) {
drivers/staging/media/av7110/av7110.c
2658
ret = PTR_ERR(thread);
drivers/staging/media/av7110/av7110.c
2661
av7110->arm_thread = thread;
drivers/staging/media/ipu3/ipu3-abi.h
277
#define IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread, queue) \
drivers/staging/media/ipu3/ipu3-abi.h
278
(0 << 24 | (thread) << 16 | (queue) << 8)
drivers/staging/media/ipu3/ipu3-css.c
1058
static u8 imgu_css_queue_pos(struct imgu_css *css, int queue, int thread)
drivers/staging/media/ipu3/ipu3-css.c
1066
return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) :
drivers/staging/media/ipu3/ipu3-css.c
1072
int queue, int thread, u32 data)
drivers/staging/media/ipu3/ipu3-css.c
1082
size = readb(&q->host2sp_bufq_info[thread][queue].size);
drivers/staging/media/ipu3/ipu3-css.c
1083
start = readb(&q->host2sp_bufq_info[thread][queue].start);
drivers/staging/media/ipu3/ipu3-css.c
1084
end = readb(&q->host2sp_bufq_info[thread][queue].end);
drivers/staging/media/ipu3/ipu3-css.c
1099
writel(data, &q->host2sp_bufq[thread][queue][end]);
drivers/staging/media/ipu3/ipu3-css.c
1100
writeb(end2, &q->host2sp_bufq_info[thread][queue].end);
drivers/tty/mips_ejtag_fdc.c
1037
kthread_stop(priv->thread);
drivers/tty/mips_ejtag_fdc.c
1067
kthread_stop(priv->thread);
drivers/tty/mips_ejtag_fdc.c
1099
priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
drivers/tty/mips_ejtag_fdc.c
1101
if (IS_ERR(priv->thread)) {
drivers/tty/mips_ejtag_fdc.c
1102
ret = PTR_ERR(priv->thread);
drivers/tty/mips_ejtag_fdc.c
150
struct task_struct *thread;
drivers/tty/mips_ejtag_fdc.c
964
priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
drivers/tty/mips_ejtag_fdc.c
966
if (IS_ERR(priv->thread)) {
drivers/tty/mips_ejtag_fdc.c
967
ret = PTR_ERR(priv->thread);
drivers/usb/atm/usbatm.c
1068
instance->thread = NULL;
drivers/usb/atm/usbatm.c
1223
if (instance->thread != NULL)
drivers/usb/atm/usbatm.c
1224
send_sig(SIGTERM, instance->thread, 1);
drivers/usb/atm/usbatm.c
969
instance->thread = NULL;
drivers/usb/atm/usbatm.c
987
instance->thread = t;
drivers/usb/atm/usbatm.h
147
struct task_struct *thread;
drivers/vfio/virqfd.c
100
if (virqfd->thread)
drivers/vfio/virqfd.c
101
virqfd->thread(virqfd->opaque, virqfd->data);
drivers/vfio/virqfd.c
113
void (*thread)(void *, void *),
drivers/vfio/virqfd.c
128
virqfd->thread = thread;
drivers/vfio/virqfd.c
180
if ((!handler || handler(opaque, data)) && thread)
drivers/vfio/virqfd.c
220
if (*pvirqfd && (*pvirqfd)->thread)
drivers/vfio/virqfd.c
52
virqfd->thread)
drivers/virt/acrn/acrn_drv.h
131
struct task_struct *thread;
drivers/virt/acrn/ioreq.c
442
client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
drivers/virt/acrn/ioreq.c
444
if (IS_ERR(client->thread)) {
drivers/virt/acrn/ioreq.c
477
kthread_stop(client->thread);
drivers/w1/w1.c
233
wake_up_process(md->thread);
drivers/w1/w1_int.c
135
dev->thread = kthread_run(&w1_process, dev, "%s", dev->name);
drivers/w1/w1_int.c
136
if (IS_ERR(dev->thread)) {
drivers/w1/w1_int.c
137
retval = PTR_ERR(dev->thread);
drivers/w1/w1_int.c
158
kthread_stop(dev->thread);
drivers/w1/w1_int.c
179
kthread_stop(dev->thread);
drivers/w1/w1_netlink.c
689
wake_up_process(dev->thread);
fs/binfmt_elf.c
1690
struct elf_thread_core_info *thread;
fs/binfmt_elf.c
1877
info->thread = kzalloc_flex(*info->thread, notes, info->thread_notes);
fs/binfmt_elf.c
1878
if (unlikely(!info->thread))
fs/binfmt_elf.c
1881
info->thread->task = dump_task;
fs/binfmt_elf.c
1888
t->next = info->thread->next;
fs/binfmt_elf.c
1889
info->thread->next = t;
fs/binfmt_elf.c
1895
for (t = info->thread; t != NULL; t = t->next)
fs/binfmt_elf.c
1925
struct elf_thread_core_info *t = info->thread;
fs/binfmt_elf.c
1957
struct elf_thread_core_info *threads = info->thread;
fs/ceph/super.h
892
struct task_struct *thread;
fs/ceph/super.h
898
.thread = current, \
fs/ceph/super.h
924
if (ctx->thread == current) {
fs/hfs/catalog.c
204
fd->search_key->cat.ParID = rec.thread.ParID;
fs/hfs/catalog.c
205
len = fd->search_key->cat.CName.len = rec.thread.CName.len;
fs/hfs/catalog.c
210
memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len);
fs/hfs/catalog.c
70
memset(rec->thread.reserved, 0, sizeof(rec->thread.reserved));
fs/hfs/catalog.c
71
rec->thread.ParID = cpu_to_be32(parentid);
fs/hfs/catalog.c
72
hfs_asc2mac(sb, &rec->thread.CName, name);
fs/hfs/dir.c
94
be32_to_cpu(entry.thread.ParID), DT_DIR))
fs/hfsplus/catalog.c
183
entry->thread.reserved = 0;
fs/hfsplus/catalog.c
184
entry->thread.parentID = cpu_to_be32(parentid);
fs/hfsplus/catalog.c
185
err = hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
fs/hfsplus/catalog.c
190
return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
fs/hfsplus/catalog.c
212
if (be16_to_cpu(tmp.thread.nodeName.length) > 255) {
fs/hfsplus/catalog.c
218
be32_to_cpu(tmp.thread.parentID),
fs/hfsplus/catalog.c
219
&tmp.thread.nodeName);
fs/hfsplus/dir.c
182
be32_to_cpu(entry.thread.parentID), DT_DIR))
fs/resctrl/pseudo_lock.c
692
struct task_struct *thread;
fs/resctrl/pseudo_lock.c
719
thread = kthread_run_on_cpu(resctrl_arch_measure_cycles_lat_fn,
fs/resctrl/pseudo_lock.c
722
thread = kthread_run_on_cpu(resctrl_arch_measure_l2_residency,
fs/resctrl/pseudo_lock.c
725
thread = kthread_run_on_cpu(resctrl_arch_measure_l3_residency,
fs/resctrl/pseudo_lock.c
730
if (IS_ERR(thread)) {
fs/resctrl/pseudo_lock.c
731
ret = PTR_ERR(thread);
fs/resctrl/pseudo_lock.c
800
struct task_struct *thread;
fs/resctrl/pseudo_lock.c
823
thread = kthread_run_on_cpu(resctrl_arch_pseudo_lock_fn, plr,
fs/resctrl/pseudo_lock.c
825
if (IS_ERR(thread)) {
fs/resctrl/pseudo_lock.c
826
ret = PTR_ERR(thread);
include/linux/cpu.h
43
int cpu, unsigned int *thread);
include/linux/hfs_common.h
446
struct hfs_cat_thread thread;
include/linux/hfs_common.h
585
struct hfsplus_cat_thread thread;
include/linux/iio/trigger_consumer.h
33
irqreturn_t (*thread)(int irq, void *p);
include/linux/iio/trigger_consumer.h
43
irqreturn_t (*thread)(int irq, void *p),
include/linux/iio/triggered_buffer.h
14
irqreturn_t (*thread)(int irq, void *p),
include/linux/iio/triggered_buffer.h
20
#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
include/linux/iio/triggered_buffer.h
21
iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \
include/linux/iio/triggered_buffer.h
28
irqreturn_t (*thread)(int irq, void *p),
include/linux/iio/triggered_buffer.h
33
#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
include/linux/iio/triggered_buffer.h
34
devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \
include/linux/iio/triggered_event.h
9
irqreturn_t (*thread)(int irq, void *p));
include/linux/interrupt.h
132
struct task_struct *thread;
include/linux/mISDNif.h
508
struct task_struct *thread;
include/linux/netdevice.h
406
struct task_struct *thread;
include/linux/of.h
368
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
include/linux/of.h
374
extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
include/linux/of.h
686
unsigned int *thread)
include/linux/sched.h
1640
struct thread_struct thread;
include/linux/sched/task.h
191
*size = arch_task_struct_size - offsetof(struct task_struct, thread);
include/linux/vfio.h
378
void (*thread)(void *, void *);
include/linux/vfio.h
389
void (*thread)(void *, void *), void *data,
include/linux/w1.h
232
struct task_struct *thread;
include/pcmcia/ss.h
184
struct task_struct *thread;
include/soc/tegra/bpmp.h
27
} cpu_tx, thread, cpu_rx;
init/init_task.c
154
.thread = INIT_THREAD,
io_uring/fdinfo.c
182
tsk = rcu_dereference(sq->thread);
io_uring/io_uring.c
2591
if (unlikely(ctx->sq_data->thread == NULL)) {
io_uring/sqpoll.c
305
rcu_assign_pointer(sqd->thread, NULL);
io_uring/sqpoll.c
418
rcu_assign_pointer(sqd->thread, NULL);
io_uring/sqpoll.c
485
ret = (attached && !sqd->thread) ? -ENXIO : 0;
io_uring/sqpoll.c
524
rcu_assign_pointer(sqd->thread, tsk);
io_uring/sqpoll.h
11
struct task_struct __rcu *thread;
io_uring/sqpoll.h
36
return rcu_dereference_protected(sqd->thread,
kernel/cpu.c
1151
if (!st->thread)
kernel/cpu.c
1202
.store = &cpuhp_state.thread,
kernel/cpu.c
1225
kthread_unpark(this_cpu_read(cpuhp_state.thread));
kernel/cpu.c
1308
kthread_park(st->thread);
kernel/cpu.c
1321
kthread_unpark(st->thread);
kernel/cpu.c
71
struct task_struct *thread;
kernel/cpu.c
778
wake_up_process(st->thread);
kernel/cpu.c
808
kthread_unpark(st->thread);
kernel/debug/gdbstub.c
495
struct task_struct *thread;
kernel/debug/gdbstub.c
499
thread = kgdb_usethread;
kernel/debug/gdbstub.c
500
if (!thread) {
kernel/debug/gdbstub.c
501
thread = kgdb_info[ks->cpu].task;
kernel/debug/gdbstub.c
512
if (thread == kgdb_info[i].task)
kernel/debug/gdbstub.c
532
sleeping_thread_to_gdb_regs(gdb_regs, thread);
kernel/debug/gdbstub.c
813
struct task_struct *thread;
kernel/debug/gdbstub.c
820
thread = getthread(ks->linux_regs, ks->threadid);
kernel/debug/gdbstub.c
821
if (!thread && ks->threadid > 0) {
kernel/debug/gdbstub.c
825
kgdb_usethread = thread;
kernel/debug/gdbstub.c
835
thread = getthread(ks->linux_regs, ks->threadid);
kernel/debug/gdbstub.c
836
if (!thread && ks->threadid > 0) {
kernel/debug/gdbstub.c
840
kgdb_contthread = thread;
kernel/debug/gdbstub.c
851
struct task_struct *thread;
kernel/debug/gdbstub.c
854
thread = getthread(ks->linux_regs, ks->threadid);
kernel/debug/gdbstub.c
855
if (thread)
kernel/debug/kdb/kdb_main.c
2204
(void *)(&p->thread),
kernel/exit.c
638
struct task_struct *thread, *reaper;
kernel/exit.c
640
thread = find_alive_thread(father);
kernel/exit.c
641
if (thread)
kernel/exit.c
642
return thread;
kernel/exit.c
661
thread = find_alive_thread(reaper);
kernel/exit.c
662
if (thread)
kernel/exit.c
663
return thread;
kernel/fork.c
846
*offset += offsetof(struct task_struct, thread);
kernel/irq/handle.c
144
wake_up_state(action->thread, TASK_INTERRUPTIBLE);
kernel/irq/handle.c
68
if (action->thread->flags & PF_EXITING)
kernel/irq/manage.c
1233
if (!action || !action->thread)
kernel/irq/manage.c
1236
wake_up_process(action->thread);
kernel/irq/manage.c
1304
if (action->thread)
kernel/irq/manage.c
1422
new->thread = get_task_struct(t);
kernel/irq/manage.c
1837
if (new->thread) {
kernel/irq/manage.c
1838
struct task_struct *t = new->thread;
kernel/irq/manage.c
1840
new->thread = NULL;
kernel/irq/manage.c
1843
if (new->secondary && new->secondary->thread) {
kernel/irq/manage.c
1844
struct task_struct *t = new->secondary->thread;
kernel/irq/manage.c
1846
new->secondary->thread = NULL;
kernel/irq/manage.c
1957
if (action->thread) {
kernel/irq/manage.c
1958
kthread_stop_put(action->thread);
kernel/irq/manage.c
1959
if (action->secondary && action->secondary->thread)
kernel/irq/manage.c
1960
kthread_stop_put(action->secondary->thread);
kernel/irq/manage.c
204
if (action->thread) {
kernel/irq/manage.c
206
wake_up_process(action->thread);
kernel/irq/manage.c
208
if (action->secondary && action->secondary->thread) {
kernel/irq/manage.c
210
wake_up_process(action->secondary->thread);
kernel/printk/printk_ringbuffer_kunit_test.c
258
struct task_struct *thread;
kernel/printk/printk_ringbuffer_kunit_test.c
299
thread = kthread_run_on_cpu(prbtest_writer, thread_data, cpu,
kernel/printk/printk_ringbuffer_kunit_test.c
301
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, thread);
kernel/printk/printk_ringbuffer_kunit_test.c
302
prbtest_add_kthread_cleanup(test, thread);
kernel/sched/cpufreq_schedutil.c
35
struct task_struct *thread;
kernel/sched/cpufreq_schedutil.c
657
struct task_struct *thread;
kernel/sched/cpufreq_schedutil.c
681
thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
kernel/sched/cpufreq_schedutil.c
684
if (IS_ERR(thread)) {
kernel/sched/cpufreq_schedutil.c
685
pr_err("failed to create sugov thread: %pe\n", thread);
kernel/sched/cpufreq_schedutil.c
686
return PTR_ERR(thread);
kernel/sched/cpufreq_schedutil.c
689
ret = sched_setattr_nocheck(thread, &attr);
kernel/sched/cpufreq_schedutil.c
691
kthread_stop(thread);
kernel/sched/cpufreq_schedutil.c
696
sg_policy->thread = thread;
kernel/sched/cpufreq_schedutil.c
698
set_cpus_allowed_ptr(thread, policy->related_cpus);
kernel/sched/cpufreq_schedutil.c
700
kthread_bind_mask(thread, policy->related_cpus);
kernel/sched/cpufreq_schedutil.c
705
wake_up_process(thread);
kernel/sched/cpufreq_schedutil.c
717
kthread_stop(sg_policy->thread);
kernel/seccomp.c
490
struct task_struct *thread, *caller;
kernel/seccomp.c
497
for_each_thread(caller, thread) {
kernel/seccomp.c
501
if (thread == caller)
kernel/seccomp.c
504
if (thread->flags & PF_EXITING)
kernel/seccomp.c
507
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
kernel/seccomp.c
508
(thread->seccomp.mode == SECCOMP_MODE_FILTER &&
kernel/seccomp.c
509
is_ancestor(thread->seccomp.filter,
kernel/seccomp.c
514
failed = task_pid_vnr(thread);
kernel/seccomp.c
600
struct task_struct *thread, *caller;
kernel/seccomp.c
614
for_each_thread(caller, thread) {
kernel/seccomp.c
616
if (thread == caller)
kernel/seccomp.c
623
if (thread->flags & PF_EXITING)
kernel/seccomp.c
634
__seccomp_filter_release(thread->seccomp.filter);
kernel/seccomp.c
637
smp_store_release(&thread->seccomp.filter,
kernel/seccomp.c
639
atomic_set(&thread->seccomp.filter_count,
kernel/seccomp.c
649
task_set_no_new_privs(thread);
kernel/seccomp.c
657
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
kernel/seccomp.c
658
seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
kernel/stop_machine.c
107
wake_up_process(stopper->thread);
kernel/stop_machine.c
317
wake_up_process(stopper1->thread);
kernel/stop_machine.c
318
wake_up_process(stopper2->thread);
kernel/stop_machine.c
38
struct task_struct *thread;
kernel/stop_machine.c
536
kthread_park(stopper->thread);
kernel/stop_machine.c
541
sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
kernel/stop_machine.c
556
kthread_unpark(stopper->thread);
kernel/stop_machine.c
560
.store = &cpu_stopper.thread,
kernel/stop_machine.c
60
if (task != stopper->thread)
kernel/time/posix-cpu-timers.c
59
const bool thread = !!CPUCLOCK_PERTHREAD(clock);
kernel/time/posix-cpu-timers.c
71
return thread ? task_pid(current) : task_tgid(current);
kernel/time/posix-cpu-timers.c
77
if (thread) {
kernel/trace/trace_osnoise.c
1132
osn_var->thread.arrival_time = time_get();
kernel/trace/trace_osnoise.c
1134
set_int_safe_time(osn_var, &osn_var->thread.delta_start);
kernel/trace/trace_osnoise.c
1136
osn_var->thread.count++;
kernel/trace/trace_osnoise.c
1157
duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start);
kernel/trace/trace_osnoise.c
1159
trace_thread_noise(t, osn_var->thread.arrival_time, duration);
kernel/trace/trace_osnoise.c
1161
osn_var->thread.arrival_time = 0;
kernel/trace/trace_osnoise.c
1319
s->thread_count = osn_var->thread.count;
kernel/trace/trace_osnoise.c
1335
s->thread_count = osn_var->thread.count - s->thread_count;
kernel/trace/trace_osnoise.c
1738
osn_var->thread.arrival_time = time_get();
kernel/trace/trace_osnoise.c
1762
copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
kernel/trace/trace_osnoise.c
1768
copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
kernel/trace/trace_osnoise.c
215
struct osn_thread thread;
kernel/trace/trace_osnoise.c
374
osn_var->thread.delta_start = 0;
kernel/trace/trace_osnoise.c
375
osn_var->thread.arrival_time = 0;
kernel/trace/trace_osnoise.c
734
if (osn_var->thread.delta_start)
kernel/trace/trace_osnoise.c
735
osn_var->thread.delta_start += duration;
net/core/dev.c
13205
napi->thread = this_cpu_read(backlog_napi);
net/core/dev.c
1656
n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
net/core/dev.c
1658
if (IS_ERR(n->thread)) {
net/core/dev.c
1659
err = PTR_ERR(n->thread);
net/core/dev.c
1661
n->thread = NULL;
net/core/dev.c
4945
struct task_struct *thread;
net/core/dev.c
4956
thread = READ_ONCE(napi->thread);
net/core/dev.c
4957
if (thread) {
net/core/dev.c
4958
if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
net/core/dev.c
4962
wake_up_process(thread);
net/core/dev.c
7165
kthread_stop(napi->thread);
net/core/dev.c
7166
napi->thread = NULL;
net/core/dev.c
7183
if (!napi->thread) {
net/core/dev.c
7200
if (!threaded && napi->thread) {
net/core/dev.c
7221
if (!napi->thread) {
net/core/dev.c
7642
if (n->dev->threaded && n->thread)
net/core/dev.c
7688
if (napi->thread) {
net/core/dev.c
7689
kthread_stop(napi->thread);
net/core/dev.c
7690
napi->thread = NULL;
net/core/netdev-genl.c
191
if (napi->thread) {
net/core/netdev-genl.c
192
pid = task_pid_nr(napi->thread);
security/landlock/tsync.c
334
const struct task_struct *caller, *thread;
security/landlock/tsync.c
341
for_each_thread(caller, thread) {
security/landlock/tsync.c
343
if (thread == caller)
security/landlock/tsync.c
347
if (thread->flags & PF_EXITING)
security/landlock/tsync.c
351
if (tsync_works_contains_task(works, thread))
security/landlock/tsync.c
374
struct task_struct *thread;
security/landlock/tsync.c
382
for_each_thread(caller, thread) {
security/landlock/tsync.c
384
if (thread == caller)
security/landlock/tsync.c
388
if (thread->flags & PF_EXITING)
security/landlock/tsync.c
392
if (tsync_works_contains_task(works, thread))
security/landlock/tsync.c
402
ctx = tsync_works_provide(works, thread);
security/landlock/tsync.c
418
err = task_work_add(thread, &ctx->work, TWA_SIGNAL);
tools/lib/perf/evlist.c
336
int cpu_map_idx, int thread, u64 id)
tools/lib/perf/evlist.c
339
struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
tools/lib/perf/evlist.c
357
int cpu_map_idx, int thread, u64 id)
tools/lib/perf/evlist.c
359
if (!SID(evsel, cpu_map_idx, thread))
tools/lib/perf/evlist.c
362
perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
tools/lib/perf/evlist.c
368
int cpu_map_idx, int thread, int fd)
tools/lib/perf/evlist.c
375
if (!SID(evsel, cpu_map_idx, thread))
tools/lib/perf/evlist.c
406
perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
tools/lib/perf/evlist.c
491
static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
tools/lib/perf/evlist.c
493
struct perf_sample_id *sid = SID(evsel, cpu, thread);
tools/lib/perf/evlist.c
497
sid->tid = perf_thread_map__pid(evsel->threads, thread);
tools/lib/perf/evlist.c
542
int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
tools/lib/perf/evlist.c
554
if (evsel->system_wide && thread)
tools/lib/perf/evlist.c
573
fd = FD(evsel, cpu, thread);
tools/lib/perf/evlist.c
623
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
tools/lib/perf/evlist.c
626
perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
tools/lib/perf/evlist.c
639
int cpu, thread, idx = 0;
tools/lib/perf/evlist.c
646
for (thread = 0; thread < nr_threads; thread++, idx++) {
tools/lib/perf/evlist.c
650
if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
tools/lib/perf/evlist.c
682
int cpu, thread;
tools/lib/perf/evlist.c
690
for (thread = 0; thread < nr_threads; thread++) {
tools/lib/perf/evlist.c
692
thread, &output, &output_overwrite, &nr_mmaps))
tools/lib/perf/evsel.c
100
static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
tools/lib/perf/evsel.c
117
fd = FD(leader, cpu_map_idx, thread);
tools/lib/perf/evsel.c
130
int idx, thread, err = 0;
tools/lib/perf/evsel.c
161
for (thread = 0; thread < threads->nr; thread++) {
tools/lib/perf/evsel.c
164
evsel_fd = FD(evsel, idx, thread);
tools/lib/perf/evsel.c
170
err = get_group_fd(evsel, idx, thread, &group_fd);
tools/lib/perf/evsel.c
175
threads->map[thread].pid,
tools/lib/perf/evsel.c
195
int thread;
tools/lib/perf/evsel.c
197
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
tools/lib/perf/evsel.c
198
int *fd = FD(evsel, cpu_map_idx, thread);
tools/lib/perf/evsel.c
238
int idx, thread;
tools/lib/perf/evsel.c
244
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
tools/lib/perf/evsel.c
245
int *fd = FD(evsel, idx, thread);
tools/lib/perf/evsel.c
250
perf_mmap__munmap(MMAP(evsel, idx, thread));
tools/lib/perf/evsel.c
260
int ret, idx, thread;
tools/lib/perf/evsel.c
273
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
tools/lib/perf/evsel.c
274
int *fd = FD(evsel, idx, thread);
tools/lib/perf/evsel.c
281
map = MMAP(evsel, idx, thread);
tools/lib/perf/evsel.c
295
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
tools/lib/perf/evsel.c
297
int *fd = FD(evsel, cpu_map_idx, thread);
tools/lib/perf/evsel.c
299
if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
tools/lib/perf/evsel.c
302
return MMAP(evsel, cpu_map_idx, thread)->base;
tools/lib/perf/evsel.c
335
int thread, struct perf_counts_values *count)
tools/lib/perf/evsel.c
338
int *fd = FD(evsel, cpu_map_idx, thread);
tools/lib/perf/evsel.c
400
int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
tools/lib/perf/evsel.c
404
int *fd = FD(evsel, cpu_map_idx, thread);
tools/lib/perf/evsel.c
414
return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
tools/lib/perf/evsel.c
416
if (MMAP(evsel, cpu_map_idx, thread) &&
tools/lib/perf/evsel.c
418
!perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
tools/lib/perf/evsel.c
429
int cpu_map_idx, int thread)
tools/lib/perf/evsel.c
431
int *fd = FD(evsel, cpu_map_idx, thread);
tools/lib/perf/evsel.c
443
int thread;
tools/lib/perf/evsel.c
445
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
tools/lib/perf/evsel.c
446
int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
tools/lib/perf/evsel.c
460
int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
tools/lib/perf/evsel.c
467
err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
tools/lib/perf/evsel.c
70
int idx, thread;
tools/lib/perf/evsel.c
73
for (thread = 0; thread < nthreads; thread++) {
tools/lib/perf/evsel.c
74
int *fd = FD(evsel, idx, thread);
tools/lib/perf/include/internal/evlist.h
129
int cpu_map_idx, int thread, u64 id);
tools/lib/perf/include/internal/evlist.h
133
int cpu_map_idx, int thread, int fd);
tools/lib/perf/include/perf/event.h
432
__u32 thread;
tools/lib/perf/include/perf/evsel.h
36
LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
tools/lib/perf/include/perf/evsel.h
37
LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
tools/lib/perf/include/perf/evsel.h
41
LIBPERF_API int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread);
tools/perf/arch/arm/tests/dwarf-unwind.c
14
struct thread *thread, u64 *regs)
tools/perf/arch/arm/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/arm/tests/dwarf-unwind.c
46
struct thread *thread)
tools/perf/arch/arm/tests/dwarf-unwind.c
62
return sample_ustack(sample, thread, buf);
tools/perf/arch/arm64/tests/dwarf-unwind.c
14
struct thread *thread, u64 *regs)
tools/perf/arch/arm64/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/arm64/tests/dwarf-unwind.c
46
struct thread *thread)
tools/perf/arch/arm64/tests/dwarf-unwind.c
62
return sample_ustack(sample, thread, buf);
tools/perf/arch/powerpc/tests/dwarf-unwind.c
14
struct thread *thread, u64 *regs)
tools/perf/arch/powerpc/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/powerpc/tests/dwarf-unwind.c
46
struct thread *thread)
tools/perf/arch/powerpc/tests/dwarf-unwind.c
62
return sample_ustack(sample, thread, buf);
tools/perf/arch/powerpc/util/skip-callchain-idx.c
212
int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
tools/perf/arch/powerpc/util/skip-callchain-idx.c
226
thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
tools/perf/arch/x86/tests/dwarf-unwind.c
14
struct thread *thread, u64 *regs)
tools/perf/arch/x86/tests/dwarf-unwind.c
29
map = maps__find(thread__maps(thread), (u64)sp);
tools/perf/arch/x86/tests/dwarf-unwind.c
54
struct thread *thread)
tools/perf/arch/x86/tests/dwarf-unwind.c
78
return sample_ustack(sample, thread, buf);
tools/perf/bench/epoll-ctl.c
276
ret = pthread_create(&w->thread, attrp, workerfn,
tools/perf/bench/epoll-ctl.c
392
ret = pthread_join(worker[i].thread, NULL);
tools/perf/bench/epoll-ctl.c
69
pthread_t thread;
tools/perf/bench/epoll-wait.c
121
pthread_t thread;
tools/perf/bench/epoll-wait.c
365
ret = pthread_create(&w->thread, attrp, workerfn,
tools/perf/bench/futex-hash.c
201
ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
tools/perf/bench/futex-hash.c
222
ret = pthread_join(worker[i].thread, NULL);
tools/perf/bench/futex-hash.c
47
pthread_t thread;
tools/perf/bench/futex-lock-pi.c
158
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
tools/perf/bench/futex-lock-pi.c
226
ret = pthread_join(worker[i].thread, NULL);
tools/perf/bench/futex-lock-pi.c
30
pthread_t thread;
tools/perf/bench/sched-messaging.c
164
ret = pthread_create(&worker->thread, &attr, func, ctx);
tools/perf/bench/sched-messaging.c
205
pthread_join(worker->thread, &thread_status);
tools/perf/bench/sched-messaging.c
60
pthread_t thread;
tools/perf/builtin-c2c.c
1179
return scnprintf(hpp->buf, hpp->size, "%*d", width, thread__pid(he->thread));
tools/perf/builtin-c2c.c
1186
return thread__pid(left->thread) - thread__pid(right->thread);
tools/perf/builtin-c2c.c
338
thread__set_lbr_stitch_enable(al.thread, true);
tools/perf/builtin-inject.c
1012
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
tools/perf/builtin-inject.c
1013
if (thread == NULL) {
tools/perf/builtin-inject.c
1019
if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
tools/perf/builtin-inject.c
1024
sample__for_each_callchain_node(thread, evsel, sample, PERF_MAX_STACK_DEPTH,
tools/perf/builtin-inject.c
1027
thread__put(thread);
tools/perf/builtin-inject.c
398
struct thread *thread;
tools/perf/builtin-inject.c
417
thread = machine__find_thread(machine, sample->tid, sample->pid);
tools/perf/builtin-inject.c
418
if (thread == NULL)
tools/perf/builtin-inject.c
422
ret = thread__resolve_callchain(thread, cursor, evsel, sample,
tools/perf/builtin-inject.c
425
thread__put(thread);
tools/perf/builtin-inject.c
475
struct thread *thread;
tools/perf/builtin-inject.c
481
thread = machine__findnew_thread(machine, pid, tid);
tools/perf/builtin-inject.c
482
if (thread == NULL) {
tools/perf/builtin-inject.c
488
nsi = nsinfo__get(thread__nsinfo(thread));
tools/perf/builtin-inject.c
501
dso = machine__findnew_vdso(machine, thread);
tools/perf/builtin-inject.c
513
thread__put(thread);
tools/perf/builtin-inject.c
997
struct thread *thread;
tools/perf/builtin-kmem.c
411
al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
tools/perf/builtin-kmem.c
967
struct thread *thread = machine__findnew_thread(machine, sample->pid,
tools/perf/builtin-kmem.c
970
if (thread == NULL) {
tools/perf/builtin-kmem.c
979
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
tools/perf/builtin-kmem.c
986
thread__put(thread);
tools/perf/builtin-kvm.c
1140
struct thread *thread;
tools/perf/builtin-kvm.c
1152
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
tools/perf/builtin-kvm.c
1153
if (thread == NULL) {
tools/perf/builtin-kvm.c
1159
if (!handle_kvm_event(kvm, thread, evsel, sample))
tools/perf/builtin-kvm.c
1162
thread__put(thread);
tools/perf/builtin-kvm.c
919
struct vcpu_event_record *per_vcpu_record(struct thread *thread,
tools/perf/builtin-kvm.c
924
if (!thread__priv(thread) && kvm_entry_event(evsel)) {
tools/perf/builtin-kvm.c
926
struct machine *machine = maps__machine(thread__maps(thread));
tools/perf/builtin-kvm.c
927
uint16_t e_machine = thread__e_machine(thread, machine, /*e_flags=*/NULL);
tools/perf/builtin-kvm.c
936
thread__set_priv(thread, vcpu_record);
tools/perf/builtin-kvm.c
939
return thread__priv(thread);
tools/perf/builtin-kvm.c
943
struct thread *thread,
tools/perf/builtin-kvm.c
951
vcpu_record = per_vcpu_record(thread, evsel, sample);
tools/perf/builtin-kwork.c
695
struct thread *thread;
tools/perf/builtin-kwork.c
703
thread = machine__findnew_thread(machine, sample->pid, sample->pid);
tools/perf/builtin-kwork.c
704
if (thread == NULL) {
tools/perf/builtin-kwork.c
711
if (thread__resolve_callchain(thread, cursor, evsel, sample,
tools/perf/builtin-kwork.c
735
thread__put(thread);
tools/perf/builtin-lock.c
1305
struct thread *t;
tools/perf/builtin-lock.c
1342
struct thread *t;
tools/perf/builtin-lock.c
1437
struct thread *thread = machine__findnew_thread(machine, sample->pid,
tools/perf/builtin-lock.c
1440
if (thread == NULL) {
tools/perf/builtin-lock.c
1451
thread__put(thread);
tools/perf/builtin-lock.c
1630
struct thread *t;
tools/perf/builtin-lock.c
1681
struct thread *t;
tools/perf/builtin-lock.c
847
struct thread *thread;
tools/perf/builtin-lock.c
858
thread = machine__findnew_thread(machine, -1, sample->pid);
tools/perf/builtin-lock.c
859
if (thread == NULL)
tools/perf/builtin-lock.c
865
ret = thread__resolve_callchain(thread, cursor, evsel, sample,
tools/perf/builtin-lock.c
868
thread__put(thread);
tools/perf/builtin-lock.c
873
thread__put(thread);
tools/perf/builtin-lock.c
903
struct thread *thread;
tools/perf/builtin-lock.c
908
thread = machine__findnew_thread(machine, -1, sample->pid);
tools/perf/builtin-lock.c
909
if (thread == NULL)
tools/perf/builtin-lock.c
914
ret = thread__resolve_callchain(thread, cursor, evsel, sample,
tools/perf/builtin-lock.c
916
thread__put(thread);
tools/perf/builtin-record.c
123
static __thread struct record_thread *thread;
tools/perf/builtin-record.c
1579
!bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
tools/perf/builtin-record.c
1580
thread->mask->affinity.nbits)) {
tools/perf/builtin-record.c
1581
bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits);
tools/perf/builtin-record.c
1582
bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits,
tools/perf/builtin-record.c
1583
map->affinity_mask.bits, thread->mask->affinity.nbits);
tools/perf/builtin-record.c
1584
sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
tools/perf/builtin-record.c
1585
(cpu_set_t *)thread->mask->affinity.bits);
tools/perf/builtin-record.c
1587
pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu());
tools/perf/builtin-record.c
1588
mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity");
tools/perf/builtin-record.c
1625
thread->bytes_transferred += src_size;
tools/perf/builtin-record.c
1626
thread->bytes_compressed += compressed;
tools/perf/builtin-record.c
1649
nr_mmaps = thread->nr_mmaps;
tools/perf/builtin-record.c
1650
maps = overwrite ? thread->overwrite_maps : thread->maps;
tools/perf/builtin-record.c
1746
thread = arg;
tools/perf/builtin-record.c
1747
thread->tid = gettid();
tools/perf/builtin-record.c
1749
err = write(thread->pipes.ack[1], &msg, sizeof(msg));
tools/perf/builtin-record.c
1751
pr_warning("threads[%d]: failed to notify on start: %m\n", thread->tid);
tools/perf/builtin-record.c
1753
pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
tools/perf/builtin-record.c
1755
pollfd = &thread->pollfd;
tools/perf/builtin-record.c
1756
ctlfd_pos = thread->ctlfd_pos;
tools/perf/builtin-record.c
1759
unsigned long long hits = thread->samples;
tools/perf/builtin-record.c
1761
if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
tools/perf/builtin-record.c
1764
if (hits == thread->samples) {
tools/perf/builtin-record.c
1773
thread->waking++;
tools/perf/builtin-record.c
1782
close(thread->pipes.msg[0]);
tools/perf/builtin-record.c
1783
thread->pipes.msg[0] = -1;
tools/perf/builtin-record.c
1790
record__mmap_read_all(thread->rec, true);
tools/perf/builtin-record.c
1792
err = write(thread->pipes.ack[1], &msg, sizeof(msg));
tools/perf/builtin-record.c
1794
pr_warning("threads[%d]: failed to notify on termination: %m\n", thread->tid);
tools/perf/builtin-record.c
2319
thread->tid, tid);
tools/perf/builtin-record.c
2332
thread = &thread_data[0];
tools/perf/builtin-record.c
2368
thread->tid, rec->thread_data[t].tid);
tools/perf/builtin-record.c
2371
sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
tools/perf/builtin-record.c
2372
(cpu_set_t *)thread->mask->affinity.bits);
tools/perf/builtin-record.c
2374
pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
tools/perf/builtin-record.c
267
thread->bytes_written += size;
tools/perf/builtin-record.c
2727
unsigned long long hits = thread->samples;
tools/perf/builtin-record.c
2782
thread->waking = 0;
tools/perf/builtin-record.c
2796
if (hits == thread->samples) {
tools/perf/builtin-record.c
2799
err = fdarray__poll(&thread->pollfd, -1);
tools/perf/builtin-record.c
2806
thread->waking++;
tools/perf/builtin-record.c
2808
if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP,
tools/perf/builtin-record.c
2812
err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
tools/perf/builtin-record.c
665
thread->samples++;
tools/perf/builtin-record.c
678
thread->samples++;
tools/perf/builtin-report.c
1004
struct thread *parent;
tools/perf/builtin-report.c
1050
task__print_level(machine, task->thread, fp);
tools/perf/builtin-report.c
302
thread__set_lbr_stitch_enable(al.thread, true);
tools/perf/builtin-report.c
904
static int thread_level(struct machine *machine, const struct thread *thread)
tools/perf/builtin-report.c
906
struct thread *parent_thread;
tools/perf/builtin-report.c
909
if (thread__tid(thread) <= 0)
tools/perf/builtin-report.c
912
if (thread__ppid(thread) <= 0)
tools/perf/builtin-report.c
915
parent_thread = machine__find_thread(machine, -1, thread__ppid(thread));
tools/perf/builtin-report.c
917
pr_err("Missing parent thread of %d\n", thread__tid(thread));
tools/perf/builtin-report.c
925
static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp)
tools/perf/builtin-report.c
927
int level = thread_level(machine, thread);
tools/perf/builtin-report.c
929
thread__pid(thread), thread__tid(thread),
tools/perf/builtin-report.c
930
thread__ppid(thread), level, "");
tools/perf/builtin-report.c
932
fprintf(fp, "%s\n", thread__comm_str(thread));
tools/perf/builtin-report.c
934
maps__fprintf_task(thread__maps(thread), comm_indent, fp);
tools/perf/builtin-report.c
952
struct thread *a = task_a->thread;
tools/perf/builtin-report.c
953
struct thread *b = task_b->thread;
tools/perf/builtin-report.c
981
struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a));
tools/perf/builtin-report.c
992
struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b));
tools/perf/builtin-sched.c
1004
BUG_ON(!RC_CHK_EQUAL(thread, atoms->thread));
tools/perf/builtin-sched.c
1039
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
tools/perf/builtin-sched.c
1047
atoms->thread = thread__get(thread);
tools/perf/builtin-sched.c
1132
thread__zput(atoms->thread);
tools/perf/builtin-sched.c
1145
struct thread *sched_out, *sched_in;
tools/perf/builtin-sched.c
116
struct thread *thread;
tools/perf/builtin-sched.c
1213
struct thread *thread = machine__findnew_thread(machine, -1, pid);
tools/perf/builtin-sched.c
1214
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
tools/perf/builtin-sched.c
1218
if (thread == NULL)
tools/perf/builtin-sched.c
1223
if (thread_atoms_insert(sched, thread))
tools/perf/builtin-sched.c
1225
atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
tools/perf/builtin-sched.c
1237
thread__put(thread);
tools/perf/builtin-sched.c
1249
struct thread *wakee;
tools/perf/builtin-sched.c
1311
struct thread *migrant;
tools/perf/builtin-sched.c
1364
if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
tools/perf/builtin-sched.c
1371
ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
tools/perf/builtin-sched.c
1374
ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
tools/perf/builtin-sched.c
1375
thread__tid(work_list->thread));
tools/perf/builtin-sched.c
1396
if (RC_CHK_EQUAL(l->thread, r->thread))
tools/perf/builtin-sched.c
1398
l_tid = thread__tid(l->thread);
tools/perf/builtin-sched.c
1399
r_tid = thread__tid(r->thread);
tools/perf/builtin-sched.c
1404
return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
tools/perf/builtin-sched.c
1542
static bool thread__has_color(struct thread *thread)
tools/perf/builtin-sched.c
1544
return thread__priv(thread) != NULL;
tools/perf/builtin-sched.c
1547
static struct thread*
tools/perf/builtin-sched.c
1550
struct thread *thread = machine__findnew_thread(machine, pid, tid);
tools/perf/builtin-sched.c
1553
if (!sched->map.color_pids || !thread || thread__priv(thread))
tools/perf/builtin-sched.c
1554
return thread;
tools/perf/builtin-sched.c
1559
thread__set_priv(thread, color ? ((void*)1) : NULL);
tools/perf/builtin-sched.c
1560
return thread;
tools/perf/builtin-sched.c
1586
struct thread *curr_thread = sched->curr_thread[cpu.cpu];
tools/perf/builtin-sched.c
1587
struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
tools/perf/builtin-sched.c
1592
struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
tools/perf/builtin-sched.c
1634
struct thread *sched_in, *sched_out;
tools/perf/builtin-sched.c
185
struct thread **curr_thread;
tools/perf/builtin-sched.c
186
struct thread **curr_out_thread;
tools/perf/builtin-sched.c
1888
struct thread *thread;
tools/perf/builtin-sched.c
1896
thread = machine__find_thread(machine, sample->pid, sample->tid);
tools/perf/builtin-sched.c
1897
if (!thread) {
tools/perf/builtin-sched.c
1902
tr = thread__get_runtime(thread);
tools/perf/builtin-sched.c
1904
thread__put(thread);
tools/perf/builtin-sched.c
1909
thread__put(thread);
tools/perf/builtin-sched.c
2046
static char *timehist_get_commstr(struct thread *thread)
tools/perf/builtin-sched.c
2049
const char *comm = thread__comm_str(thread);
tools/perf/builtin-sched.c
2050
pid_t tid = thread__tid(thread);
tools/perf/builtin-sched.c
2051
pid_t pid = thread__pid(thread);
tools/perf/builtin-sched.c
2072
struct thread *thread,
tools/perf/builtin-sched.c
2077
struct thread_runtime *tr = thread__priv(thread);
tools/perf/builtin-sched.c
2167
struct thread *thread,
tools/perf/builtin-sched.c
2170
struct thread_runtime *tr = thread__priv(thread);
tools/perf/builtin-sched.c
2192
c = (thread__tid(thread) == 0) ? 'i' : 's';
tools/perf/builtin-sched.c
2200
if (!thread__comm_set(thread)) {
tools/perf/builtin-sched.c
2202
thread__set_comm(thread, prev_comm, sample->time);
tools/perf/builtin-sched.c
2205
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
tools/perf/builtin-sched.c
2208
printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample));
tools/perf/builtin-sched.c
2219
printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
tools/perf/builtin-sched.c
2229
if (thread__tid(thread) == 0)
tools/perf/builtin-sched.c
2333
struct thread *thread;
tools/perf/builtin-sched.c
2336
thread = machine__findnew_thread(machine, sample->pid, sample->pid);
tools/perf/builtin-sched.c
2337
if (thread == NULL) {
tools/perf/builtin-sched.c
2343
thread__put(thread);
tools/perf/builtin-sched.c
2349
if (thread__resolve_callchain(thread, cursor, evsel, sample,
tools/perf/builtin-sched.c
2354
thread__put(thread);
tools/perf/builtin-sched.c
2359
thread__put(thread);
tools/perf/builtin-sched.c
2381
static int init_idle_thread(struct thread *thread)
tools/perf/builtin-sched.c
2385
thread__set_comm(thread, idle_comm, 0);
tools/perf/builtin-sched.c
2395
thread__set_priv(thread, itr);
tools/perf/builtin-sched.c
2408
idle_threads = zalloc(ncpu * sizeof(struct thread *));
tools/perf/builtin-sched.c
2436
struct thread *idle = idle_threads[i];
tools/perf/builtin-sched.c
2452
static struct thread *get_idle_thread(int cpu)
tools/perf/builtin-sched.c
2462
p = realloc(idle_threads, j * sizeof(struct thread *));
tools/perf/builtin-sched.c
2466
idle_threads = (struct thread **) p;
tools/perf/builtin-sched.c
2502
static struct thread *timehist_get_thread(struct perf_sched *sched,
tools/perf/builtin-sched.c
2507
struct thread *thread;
tools/perf/builtin-sched.c
2510
thread = get_idle_thread(sample->cpu);
tools/perf/builtin-sched.c
2511
if (thread == NULL)
tools/perf/builtin-sched.c
2516
thread = machine__findnew_thread(machine, sample->pid,
tools/perf/builtin-sched.c
2518
if (thread == NULL) {
tools/perf/builtin-sched.c
2525
struct thread *idle;
tools/perf/builtin-sched.c
2539
itr->last_thread = thread__get(thread);
tools/perf/builtin-sched.c
2547
return thread;
tools/perf/builtin-sched.c
2551
struct thread *thread,
tools/perf/builtin-sched.c
2559
if (thread__is_filtered(thread)) {
tools/perf/builtin-sched.c
2571
tr = thread__get_runtime(thread);
tools/perf/builtin-sched.c
2598
struct thread *awakened)
tools/perf/builtin-sched.c
2600
struct thread *thread;
tools/perf/builtin-sched.c
2603
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
tools/perf/builtin-sched.c
2604
if (thread == NULL)
tools/perf/builtin-sched.c
2608
if (timehist_skip_sample(sched, thread, evsel, sample) &&
tools/perf/builtin-sched.c
2610
thread__put(thread);
tools/perf/builtin-sched.c
2619
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
tools/perf/builtin-sched.c
2628
thread__put(thread);
tools/perf/builtin-sched.c
2647
struct thread *thread;
tools/perf/builtin-sched.c
2652
thread = machine__findnew_thread(machine, 0, pid);
tools/perf/builtin-sched.c
2653
if (thread == NULL)
tools/perf/builtin-sched.c
2656
tr = thread__get_runtime(thread);
tools/perf/builtin-sched.c
2658
thread__put(thread);
tools/perf/builtin-sched.c
2668
timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
tools/perf/builtin-sched.c
2670
thread__put(thread);
tools/perf/builtin-sched.c
2678
struct thread *migrated)
tools/perf/builtin-sched.c
2680
struct thread *thread;
tools/perf/builtin-sched.c
2692
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
tools/perf/builtin-sched.c
2693
if (thread == NULL)
tools/perf/builtin-sched.c
2696
if (timehist_skip_sample(sched, thread, evsel, sample) &&
tools/perf/builtin-sched.c
2698
thread__put(thread);
tools/perf/builtin-sched.c
2717
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
tools/perf/builtin-sched.c
2726
thread__put(thread);
tools/perf/builtin-sched.c
2736
struct thread *thread;
tools/perf/builtin-sched.c
2741
thread = machine__findnew_thread(machine, 0, pid);
tools/perf/builtin-sched.c
2742
if (thread == NULL)
tools/perf/builtin-sched.c
2745
tr = thread__get_runtime(thread);
tools/perf/builtin-sched.c
2747
thread__put(thread);
tools/perf/builtin-sched.c
2757
machine, thread);
tools/perf/builtin-sched.c
2759
thread__put(thread);
tools/perf/builtin-sched.c
2768
struct thread *thread;
tools/perf/builtin-sched.c
2774
thread = get_idle_thread(sample->cpu);
tools/perf/builtin-sched.c
2776
thread = machine__findnew_thread(machine, -1, next_pid);
tools/perf/builtin-sched.c
2778
if (thread == NULL)
tools/perf/builtin-sched.c
2781
tr = thread__get_runtime(thread);
tools/perf/builtin-sched.c
2785
thread__put(thread);
tools/perf/builtin-sched.c
2797
struct thread *thread = NULL;
tools/perf/builtin-sched.c
2814
thread = timehist_get_thread(sched, sample, machine, evsel);
tools/perf/builtin-sched.c
2815
if (thread == NULL) {
tools/perf/builtin-sched.c
2820
if (timehist_skip_sample(sched, thread, evsel, sample))
tools/perf/builtin-sched.c
2823
tr = thread__get_runtime(thread);
tools/perf/builtin-sched.c
2859
if (!sched->idle_hist || thread__tid(thread) == 0) {
tools/perf/builtin-sched.c
287
struct thread *last_thread;
tools/perf/builtin-sched.c
2894
timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
tools/perf/builtin-sched.c
2921
thread__put(thread);
tools/perf/builtin-sched.c
294
static struct thread **idle_threads;
tools/perf/builtin-sched.c
2950
static void print_thread_runtime(struct thread *t,
tools/perf/builtin-sched.c
2973
static void print_thread_waittime(struct thread *t,
tools/perf/builtin-sched.c
2998
static int show_thread_runtime(struct thread *t, void *priv)
tools/perf/builtin-sched.c
3081
struct thread *t;
tools/perf/builtin-sched.c
3426
const char *comm = thread__comm_str(data->thread), *this_comm;
tools/perf/builtin-sched.c
3435
this_comm = thread__comm_str(this->thread);
tools/perf/builtin-sched.c
685
err = pthread_create(&task->thread, &attr, thread_func, parms);
tools/perf/builtin-sched.c
703
err = pthread_join(task->thread, NULL);
tools/perf/builtin-sched.c
75
pthread_t thread;
tools/perf/builtin-sched.c
898
struct thread *child, *parent;
tools/perf/builtin-sched.c
939
static struct thread_runtime *thread__init_runtime(struct thread *thread)
tools/perf/builtin-sched.c
949
thread__set_priv(thread, r);
tools/perf/builtin-sched.c
954
static struct thread_runtime *thread__get_runtime(struct thread *thread)
tools/perf/builtin-sched.c
958
tr = thread__priv(thread);
tools/perf/builtin-sched.c
960
tr = thread__init_runtime(thread);
tools/perf/builtin-sched.c
986
thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
tools/perf/builtin-sched.c
990
struct work_atoms key = { .thread = thread };
tools/perf/builtin-script.c
1000
thread__find_map_fb(thread, sample->cpumode, to, &alt);
tools/perf/builtin-script.c
1017
struct thread *thread,
tools/perf/builtin-script.c
1036
thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
tools/perf/builtin-script.c
1037
thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
tools/perf/builtin-script.c
1055
struct thread *thread,
tools/perf/builtin-script.c
1074
if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
tools/perf/builtin-script.c
1078
if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
tools/perf/builtin-script.c
1098
struct machine *machine, struct thread *thread,
tools/perf/builtin-script.c
1136
if (!thread__find_map(thread, *cpumode, start, &al) || (dso = map__dso(al.map)) == NULL) {
tools/perf/builtin-script.c
1205
static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
tools/perf/builtin-script.c
1211
thread__find_map(thread, cpumode, addr, &al);
tools/perf/builtin-script.c
1215
thread__srccode_state(thread));
tools/perf/builtin-script.c
1229
int printed = fprintf_insn_asm(x->machine, x->thread, x->cpumode, x->is64bit,
tools/perf/builtin-script.c
1250
struct thread *thread,
tools/perf/builtin-script.c
1266
thread__find_map(thread, x->cpumode, ip, &al);
tools/perf/builtin-script.c
1310
static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
tools/perf/builtin-script.c
1318
thread__find_map(thread, cpumode, addr, &al);
tools/perf/builtin-script.c
1352
struct thread *thread,
tools/perf/builtin-script.c
1373
x.thread = thread;
tools/perf/builtin-script.c
1385
machine, thread, &x.is64bit, &x.cpumode, false);
tools/perf/builtin-script.c
1387
printed += ip__fprintf_sym(entries[nr - 1].from, thread,
tools/perf/builtin-script.c
1391
evsel, thread, br_cntr);
tools/perf/builtin-script.c
1393
printed += print_srccode(thread, x.cpumode, entries[nr - 1].from);
tools/perf/builtin-script.c
1405
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
tools/perf/builtin-script.c
1410
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
tools/perf/builtin-script.c
1419
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
tools/perf/builtin-script.c
1424
&total_cycles, evsel, thread, br_cntr);
tools/perf/builtin-script.c
1426
printed += print_srccode(thread, x.cpumode, ip);
tools/perf/builtin-script.c
1438
print_srccode(thread, x.cpumode, ip);
tools/perf/builtin-script.c
1469
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
tools/perf/builtin-script.c
1470
printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
tools/perf/builtin-script.c
1474
machine, thread, &x.is64bit, &x.cpumode, false);
tools/perf/builtin-script.c
1484
print_srccode(thread, x.cpumode, sample->ip);
tools/perf/builtin-script.c
1506
print_srccode(thread, x.cpumode, start + off);
tools/perf/builtin-script.c
1513
struct thread *thread,
tools/perf/builtin-script.c
1523
thread__resolve(thread, &al, sample);
tools/perf/builtin-script.c
1542
struct thread *thread,
tools/perf/builtin-script.c
1551
if (!addr_al->thread)
tools/perf/builtin-script.c
1552
thread__resolve(thread, addr_al, sample);
tools/perf/builtin-script.c
1571
struct thread *thread,
tools/perf/builtin-script.c
1576
size_t depth = thread_stack__depth(thread, sample->cpu);
tools/perf/builtin-script.c
1587
if (thread__ts(thread) && sample->flags & PERF_IP_FLAG_RETURN)
tools/perf/builtin-script.c
1590
name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
tools/perf/builtin-script.c
1622
struct thread *thread,
tools/perf/builtin-script.c
1628
perf_sample__fetch_insn(sample, thread, machine);
tools/perf/builtin-script.c
1638
printed += sample__fprintf_insn_asm(sample, thread, machine, fp, al);
tools/perf/builtin-script.c
1641
printed += perf_sample__fprintf_brstackinsn(sample, evsel, thread, attr, machine, fp);
tools/perf/builtin-script.c
1662
struct thread *thread,
tools/perf/builtin-script.c
1673
printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
tools/perf/builtin-script.c
1682
if (thread__resolve_callchain(al->thread, cursor, evsel,
tools/perf/builtin-script.c
1705
printed += perf_sample__fprintf_addr(sample, thread, evsel, fp);
tools/perf/builtin-script.c
1713
printed += perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
tools/perf/builtin-script.c
1717
thread__srccode_state(thread));
tools/perf/builtin-script.c
2093
struct thread *thread;
tools/perf/builtin-script.c
2107
perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
tools/perf/builtin-script.c
2122
perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
tools/perf/builtin-script.c
2276
static void perf_sample__fprint_metric(struct thread *thread,
tools/perf/builtin-script.c
2287
.thread = thread,
tools/perf/builtin-script.c
2376
struct thread *thread,
tools/perf/builtin-script.c
2380
int depth = thread_stack__depth(thread, sample->cpu);
tools/perf/builtin-script.c
2385
if (thread__filter(thread)) {
tools/perf/builtin-script.c
2386
if (depth <= thread__filter_entry_depth(thread)) {
tools/perf/builtin-script.c
2387
thread__set_filter(thread, false);
tools/perf/builtin-script.c
2394
const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
tools/perf/builtin-script.c
2404
thread__set_filter(thread, true);
tools/perf/builtin-script.c
2405
thread__set_filter_entry_depth(thread, depth);
tools/perf/builtin-script.c
2422
struct thread *thread = al->thread;
tools/perf/builtin-script.c
2435
perf_sample__fprintf_start(script, sample, thread, evsel,
tools/perf/builtin-script.c
2454
perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
tools/perf/builtin-script.c
2472
perf_sample__fprintf_addr(sample, thread, evsel, fp);
tools/perf/builtin-script.c
2501
thread__set_lbr_stitch_enable(al->thread, true);
tools/perf/builtin-script.c
2505
if (thread__resolve_callchain(al->thread, cursor, evsel,
tools/perf/builtin-script.c
2517
thread__e_machine(thread, machine, &e_flags),
tools/perf/builtin-script.c
2524
thread__e_machine(thread, machine, &e_flags),
tools/perf/builtin-script.c
2530
perf_sample__fprintf_brstack(sample, thread, evsel, fp);
tools/perf/builtin-script.c
2532
perf_sample__fprintf_brstacksym(sample, thread, evsel, fp);
tools/perf/builtin-script.c
2534
perf_sample__fprintf_brstackoff(sample, thread, evsel, fp);
tools/perf/builtin-script.c
2538
perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
tools/perf/builtin-script.c
2555
thread__srccode_state(thread)))
tools/perf/builtin-script.c
2560
perf_sample__fprint_metric(thread, evsel, sample, fp);
tools/perf/builtin-script.c
2571
int idx, thread;
tools/perf/builtin-script.c
2581
for (thread = 0; thread < nthreads; thread++) {
tools/perf/builtin-script.c
2585
counts = perf_counts(counter->counts, idx, thread);
tools/perf/builtin-script.c
2589
perf_thread_map__pid(counter->core.threads, thread),
tools/perf/builtin-script.c
2681
if (!al.thread && machine__resolve(machine, &al, sample) < 0) {
tools/perf/builtin-script.c
2691
if (!show_event(sample, evsel, al.thread, &al, &addr_al))
tools/perf/builtin-script.c
2709
if (!addr_al.thread)
tools/perf/builtin-script.c
2710
thread__resolve(al.thread, &addr_al, sample);
tools/perf/builtin-script.c
2773
if (!show_event(sample, evsel, al.thread, &al, NULL))
tools/perf/builtin-script.c
2779
perf_sample__fprintf_start(scr, sample, al.thread, evsel,
tools/perf/builtin-script.c
2789
if (thread__resolve_callchain(al.thread, cursor, evsel,
tools/perf/builtin-script.c
2892
struct thread *thread = NULL;
tools/perf/builtin-script.c
2905
thread = machine__findnew_thread(machine, pid, tid);
tools/perf/builtin-script.c
2908
perf_sample__fprintf_start(script, sample, thread, evsel,
tools/perf/builtin-script.c
2914
thread__put(thread);
tools/perf/builtin-script.c
818
struct thread *thread,
tools/perf/builtin-script.c
848
const char *comm = thread ? thread__comm_str(thread) : ":-1";
tools/perf/builtin-script.c
978
struct thread *thread,
tools/perf/builtin-script.c
999
thread__find_map_fb(thread, sample->cpumode, from, &alf);
tools/perf/builtin-stat.c
163
bool node, socket, die, cluster, cache, core, thread, no_aggr;
tools/perf/builtin-stat.c
183
if (opt_mode->thread)
tools/perf/builtin-stat.c
2514
OPT_BOOLEAN(0, "per-thread", &opt_mode.thread, "aggregate counts per thread"),
tools/perf/builtin-stat.c
271
static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread,
tools/perf/builtin-stat.c
274
struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread);
tools/perf/builtin-stat.c
277
return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
tools/perf/builtin-stat.c
281
static int read_single_counter(struct evsel *counter, int cpu_map_idx, int thread)
tools/perf/builtin-stat.c
283
int err = evsel__read_counter(counter, cpu_map_idx, thread);
tools/perf/builtin-stat.c
293
perf_counts(counter->counts, cpu_map_idx, thread);
tools/perf/builtin-stat.c
298
old_count = perf_counts(counter->prev_raw_counts, cpu_map_idx, thread);
tools/perf/builtin-stat.c
325
int thread;
tools/perf/builtin-stat.c
330
for (thread = 0; thread < nthreads; thread++) {
tools/perf/builtin-stat.c
333
count = perf_counts(counter->counts, cpu_map_idx, thread);
tools/perf/builtin-stat.c
339
if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) &&
tools/perf/builtin-stat.c
340
read_single_counter(counter, cpu_map_idx, thread)) {
tools/perf/builtin-stat.c
342
perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0;
tools/perf/builtin-stat.c
343
perf_counts(counter->counts, cpu_map_idx, thread)->run = 0;
tools/perf/builtin-stat.c
347
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false);
tools/perf/builtin-stat.c
350
if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) {
tools/perf/builtin-timechart.c
555
if (thread__find_symbol(al.thread, cpumode, ip, &tal))
tools/perf/builtin-top.c
1253
pthread_t thread, thread_process;
tools/perf/builtin-top.c
1343
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
tools/perf/builtin-top.c
1380
pthread_join(thread, NULL);
tools/perf/builtin-top.c
782
thread__set_lbr_stitch_enable(al.thread, true);
tools/perf/builtin-trace.c
1616
static struct thread_trace *thread__trace(struct thread *thread, struct trace *trace)
tools/perf/builtin-trace.c
162
struct thread *current;
tools/perf/builtin-trace.c
1620
if (thread == NULL)
tools/perf/builtin-trace.c
1623
if (thread__priv(thread) == NULL)
tools/perf/builtin-trace.c
1624
thread__set_priv(thread, thread_trace__new(trace));
tools/perf/builtin-trace.c
1626
if (thread__priv(thread) == NULL)
tools/perf/builtin-trace.c
1629
ttrace = thread__priv(thread);
tools/perf/builtin-trace.c
1643
struct thread_trace *ttrace = thread__priv(arg->thread);
tools/perf/builtin-trace.c
1689
struct file *thread__files_entry(struct thread *thread, int fd)
tools/perf/builtin-trace.c
1691
return thread_trace__files_entry(thread__priv(thread), fd);
tools/perf/builtin-trace.c
1694
static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
tools/perf/builtin-trace.c
1696
struct thread_trace *ttrace = thread__priv(thread);
tools/perf/builtin-trace.c
1712
static int thread__read_fd_path(struct thread *thread, int fd)
tools/perf/builtin-trace.c
1718
if (thread__pid(thread) == thread__tid(thread)) {
tools/perf/builtin-trace.c
1720
"/proc/%d/fd/%d", thread__pid(thread), fd);
tools/perf/builtin-trace.c
1724
thread__pid(thread), thread__tid(thread), fd);
tools/perf/builtin-trace.c
1736
return trace__set_fd_pathname(thread, fd, pathname);
tools/perf/builtin-trace.c
1739
static const char *thread__fd_path(struct thread *thread, int fd,
tools/perf/builtin-trace.c
1742
struct thread_trace *ttrace = thread__priv(thread);
tools/perf/builtin-trace.c
1754
if (thread__read_fd_path(thread, fd))
tools/perf/builtin-trace.c
1765
const char *path = thread__fd_path(arg->thread, fd, arg->trace);
tools/perf/builtin-trace.c
1776
struct thread *thread = machine__find_thread(trace->host, pid, pid);
tools/perf/builtin-trace.c
1778
if (thread) {
tools/perf/builtin-trace.c
1779
const char *path = thread__fd_path(thread, fd, trace);
tools/perf/builtin-trace.c
1784
thread__put(thread);
tools/perf/builtin-trace.c
1795
struct thread_trace *ttrace = thread__priv(arg->thread);
tools/perf/builtin-trace.c
1803
static void thread__set_filename_pos(struct thread *thread, const char *bf,
tools/perf/builtin-trace.c
1806
struct thread_trace *ttrace = thread__priv(thread);
tools/perf/builtin-trace.c
1839
thread__set_filename_pos(arg->thread, bf, ptr);
tools/perf/builtin-trace.c
1911
static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
tools/perf/builtin-trace.c
1917
printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
tools/perf/builtin-trace.c
1918
printed += fprintf(fp, "%d ", thread__tid(thread));
tools/perf/builtin-trace.c
1924
static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
tools/perf/builtin-trace.c
1933
return printed + trace__fprintf_comm_tid(trace, thread, fp);
tools/perf/builtin-trace.c
2407
struct trace *trace, struct thread *thread)
tools/perf/builtin-trace.c
2421
.thread = thread,
tools/perf/builtin-trace.c
2424
struct thread_trace *ttrace = thread__priv(thread);
tools/perf/builtin-trace.c
2635
static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
tools/perf/builtin-trace.c
2680
thread__comm_str(thread), thread__pid(thread),
tools/perf/builtin-trace.c
2681
thread__tid(thread));
tools/perf/builtin-trace.c
2722
struct perf_sample *sample, struct thread *thread)
tools/perf/builtin-trace.c
2731
thread__comm_str(thread),
tools/perf/builtin-trace.c
2784
struct thread *thread;
tools/perf/builtin-trace.c
2791
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
tools/perf/builtin-trace.c
2792
e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
tools/perf/builtin-trace.c
2796
ttrace = thread__trace(thread, trace);
tools/perf/builtin-trace.c
2800
trace__fprintf_sample(trace, evsel, sample, thread);
tools/perf/builtin-trace.c
2829
args, augmented_args, augmented_args_size, trace, thread);
tools/perf/builtin-trace.c
2835
trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
tools/perf/builtin-trace.c
2847
if (trace->current != thread) {
tools/perf/builtin-trace.c
2849
trace->current = thread__get(thread);
tools/perf/builtin-trace.c
2853
thread__put(thread);
tools/perf/builtin-trace.c
2861
struct thread *thread;
tools/perf/builtin-trace.c
2870
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
tools/perf/builtin-trace.c
2871
e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
tools/perf/builtin-trace.c
2875
ttrace = thread__trace(thread, trace);
tools/perf/builtin-trace.c
2885
printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
tools/perf/builtin-trace.c
2889
thread__put(thread);
tools/perf/builtin-trace.c
2907
err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
tools/perf/builtin-trace.c
2930
struct thread *thread;
tools/perf/builtin-trace.c
2936
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
tools/perf/builtin-trace.c
2937
e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
tools/perf/builtin-trace.c
2941
ttrace = thread__trace(thread, trace);
tools/perf/builtin-trace.c
2945
trace__fprintf_sample(trace, evsel, sample, thread);
tools/perf/builtin-trace.c
2950
thread__update_stats(thread, ttrace, id, sample, ret, trace);
tools/perf/builtin-trace.c
2953
trace__set_fd_pathname(thread, ret, ttrace->filename.name);
tools/perf/builtin-trace.c
2980
trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
tools/perf/builtin-trace.c
3020
.thread = thread,
tools/perf/builtin-trace.c
3029
struct thread *child = machine__find_thread(trace->host, ret, ret);
tools/perf/builtin-trace.c
3057
thread__put(thread);
tools/perf/builtin-trace.c
3065
struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
tools/perf/builtin-trace.c
3072
if (!thread)
tools/perf/builtin-trace.c
3075
ttrace = thread__priv(thread);
tools/perf/builtin-trace.c
3117
thread__put(thread);
tools/perf/builtin-trace.c
3128
struct thread *thread = machine__findnew_thread(trace->host,
tools/perf/builtin-trace.c
3131
struct thread_trace *ttrace = thread__trace(thread, trace);
tools/perf/builtin-trace.c
3139
thread__put(thread);
tools/perf/builtin-trace.c
3185
struct thread *thread, void *augmented_args, int augmented_args_size)
tools/perf/builtin-trace.c
3203
.thread = thread,
tools/perf/builtin-trace.c
3258
struct thread *thread;
tools/perf/builtin-trace.c
3264
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
tools/perf/builtin-trace.c
3283
if (thread)
tools/perf/builtin-trace.c
3284
trace__fprintf_comm_tid(trace, thread, trace->output);
tools/perf/builtin-trace.c
3288
int e_machine = thread
tools/perf/builtin-trace.c
3289
? thread__e_machine(thread, trace->host, /*e_flags=*/NULL)
tools/perf/builtin-trace.c
3321
trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
tools/perf/builtin-trace.c
3341
thread__put(thread);
tools/perf/builtin-trace.c
3367
struct thread *thread;
tools/perf/builtin-trace.c
3375
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
tools/perf/builtin-trace.c
3388
ttrace = thread__trace(thread, trace);
tools/perf/builtin-trace.c
3403
thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
tools/perf/builtin-trace.c
3405
trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
tools/perf/builtin-trace.c
3415
thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
tools/perf/builtin-trace.c
3418
thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
tools/perf/builtin-trace.c
3439
thread__put(thread);
tools/perf/builtin-trace.c
3468
struct thread *thread;
tools/perf/builtin-trace.c
3473
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
tools/perf/builtin-trace.c
3474
if (thread && thread__is_filtered(thread))
tools/perf/builtin-trace.c
3484
thread__put(thread);
tools/perf/builtin-trace.c
4121
struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
tools/perf/builtin-trace.c
4123
while (thread && nr < ARRAY_SIZE(pids)) {
tools/perf/builtin-trace.c
4124
struct thread *parent = machine__find_thread(trace->host,
tools/perf/builtin-trace.c
4125
thread__ppid(thread),
tools/perf/builtin-trace.c
4126
thread__ppid(thread));
tools/perf/builtin-trace.c
4137
thread__put(thread);
tools/perf/builtin-trace.c
4138
thread = parent;
tools/perf/builtin-trace.c
4140
thread__put(thread);
tools/perf/builtin-trace.c
4917
static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
tools/perf/builtin-trace.c
4920
struct thread_trace *ttrace = thread__priv(thread);
tools/perf/builtin-trace.c
4921
int e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
tools/perf/builtin-trace.c
4929
printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread));
tools/perf/builtin-trace.c
4957
unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread));
tools/perf/builtin-trace.c
4958
unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread));
tools/perf/builtin-trace.c
4964
return thread__tid(a->thread) < thread__tid(b->thread)
tools/perf/builtin-trace.c
4966
: (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0);
tools/perf/builtin-trace.c
4980
printed += trace__fprintf_thread(fp, pos->thread, trace);
tools/perf/scripts/python/Perf-Trace-Util/Context.c
93
if (c->sample->ip && !c->sample->insn_len && thread__maps(c->al->thread)) {
tools/perf/scripts/python/Perf-Trace-Util/Context.c
94
struct machine *machine = maps__machine(thread__maps(c->al->thread));
tools/perf/scripts/python/Perf-Trace-Util/Context.c
96
perf_sample__fetch_insn(c->sample, c->al->thread, machine);
tools/perf/tests/code-reading.c
378
struct thread *thread,
tools/perf/tests/code-reading.c
396
if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
tools/perf/tests/code-reading.c
446
ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
tools/perf/tests/code-reading.c
534
struct thread *thread;
tools/perf/tests/code-reading.c
545
thread = machine__findnew_thread(machine, sample.pid, sample.tid);
tools/perf/tests/code-reading.c
546
if (!thread) {
tools/perf/tests/code-reading.c
552
ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread,
tools/perf/tests/code-reading.c
554
thread__put(thread);
tools/perf/tests/code-reading.c
687
struct thread *thread;
tools/perf/tests/code-reading.c
759
thread = machine__findnew_thread(machine, pid, pid);
tools/perf/tests/code-reading.c
760
if (!thread) {
tools/perf/tests/code-reading.c
844
thread__put(thread);
tools/perf/tests/dwarf-unwind.c
101
if (test__arch_unwind_sample(&sample, thread)) {
tools/perf/tests/dwarf-unwind.c
106
err = unwind__get_entries(unwind_entry, &cnt, thread,
tools/perf/tests/dwarf-unwind.c
128
struct thread *thread = *(struct thread **)p1;
tools/perf/tests/dwarf-unwind.c
134
global_unwind_retval = test_dwarf_unwind__thread(thread);
tools/perf/tests/dwarf-unwind.c
137
global_unwind_retval = test_dwarf_unwind__thread(thread);
tools/perf/tests/dwarf-unwind.c
144
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
tools/perf/tests/dwarf-unwind.c
146
struct thread *array[2] = {thread, thread};
tools/perf/tests/dwarf-unwind.c
158
_bsearch(array, &thread, 2, sizeof(struct thread **),
tools/perf/tests/dwarf-unwind.c
163
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
tools/perf/tests/dwarf-unwind.c
167
ret = test_dwarf_unwind__krava_3(thread);
tools/perf/tests/dwarf-unwind.c
172
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
tools/perf/tests/dwarf-unwind.c
176
ret = test_dwarf_unwind__krava_2(thread);
tools/perf/tests/dwarf-unwind.c
186
struct thread *thread;
tools/perf/tests/dwarf-unwind.c
208
thread = machine__find_thread(machine, pid, pid);
tools/perf/tests/dwarf-unwind.c
209
if (!thread) {
tools/perf/tests/dwarf-unwind.c
214
err = test_dwarf_unwind__krava_1(thread);
tools/perf/tests/dwarf-unwind.c
215
thread__put(thread);
tools/perf/tests/dwarf-unwind.c
47
int test_dwarf_unwind__thread(struct thread *thread);
tools/perf/tests/dwarf-unwind.c
49
int test_dwarf_unwind__krava_3(struct thread *thread);
tools/perf/tests/dwarf-unwind.c
50
int test_dwarf_unwind__krava_2(struct thread *thread);
tools/perf/tests/dwarf-unwind.c
51
int test_dwarf_unwind__krava_1(struct thread *thread);
tools/perf/tests/dwarf-unwind.c
94
NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
tools/perf/tests/hists_common.c
100
if (thread == NULL)
tools/perf/tests/hists_common.c
103
thread__set_comm(thread, fake_threads[i].comm, 0);
tools/perf/tests/hists_common.c
104
thread__put(thread);
tools/perf/tests/hists_common.c
185
i, thread__comm_str(he->thread),
tools/perf/tests/hists_common.c
214
i, thread__comm_str(he->thread), thread__tid(he->thread),
tools/perf/tests/hists_common.c
96
struct thread *thread;
tools/perf/tests/hists_common.c
98
thread = machine__findnew_thread(machine, fake_threads[i].pid,
tools/perf/tests/hists_cumulate.c
114
thread__put(fake_samples[i].thread);
tools/perf/tests/hists_cumulate.c
115
fake_samples[i].thread = thread__get(al.thread);
tools/perf/tests/hists_cumulate.c
160
thread__zput(fake_samples[i].thread);
tools/perf/tests/hists_cumulate.c
166
#define COMM(he) (thread__comm_str(he->thread))
tools/perf/tests/hists_cumulate.c
20
struct thread *thread;
tools/perf/tests/hists_filter.c
180
hists->thread_filter = fake_samples[9].thread;
tools/perf/tests/hists_filter.c
19
struct thread *thread;
tools/perf/tests/hists_filter.c
302
hists->thread_filter = fake_samples[1].thread;
tools/perf/tests/hists_filter.c
92
thread__put(fake_samples[i].thread);
tools/perf/tests/hists_filter.c
93
fake_samples[i].thread = thread__get(al.thread);
tools/perf/tests/hists_link.c
118
thread__put(fake_samples[i][k].thread);
tools/perf/tests/hists_link.c
119
fake_samples[i][k].thread = thread__get(al.thread);
tools/perf/tests/hists_link.c
148
struct thread *t, struct map *m, struct symbol *s)
tools/perf/tests/hists_link.c
151
if (RC_CHK_EQUAL(samples->thread, t) &&
tools/perf/tests/hists_link.c
183
he->thread, he->ms.map, he->ms.sym)) {
tools/perf/tests/hists_link.c
20
struct thread *thread;
tools/perf/tests/hists_link.c
235
he->thread, he->ms.map, he->ms.sym) &&
tools/perf/tests/hists_link.c
238
he->thread, he->ms.map, he->ms.sym)) {
tools/perf/tests/hists_link.c
98
thread__put(fake_common_samples[k].thread);
tools/perf/tests/hists_link.c
99
fake_common_samples[k].thread = thread__get(al.thread);
tools/perf/tests/hists_output.c
131
#define COMM(he) (thread__comm_str(he->thread))
tools/perf/tests/hists_output.c
135
#define PID(he) (thread__tid(he->thread))
tools/perf/tests/hists_output.c
21
struct thread *thread;
tools/perf/tests/hists_output.c
80
fake_samples[i].thread = al.thread;
tools/perf/tests/mmap-thread-lookup.c
191
struct thread *thread;
tools/perf/tests/mmap-thread-lookup.c
194
thread = machine__findnew_thread(machine, getpid(), td->tid);
tools/perf/tests/mmap-thread-lookup.c
198
thread__find_map(thread, PERF_RECORD_MISC_USER,
tools/perf/tests/mmap-thread-lookup.c
201
thread__put(thread);
tools/perf/tests/stat.c
75
TEST_ASSERT_VAL("wrong thread", st->thread == 2);
tools/perf/tests/symbols.c
106
PROT_EXEC, /*flags=*/0, filename, ti->thread);
tools/perf/tests/symbols.c
19
struct thread *thread;
tools/perf/tests/symbols.c
33
ti->thread = machine__findnew_thread(ti->machine, 100, 100);
tools/perf/tests/symbols.c
34
if (!ti->thread) {
tools/perf/tests/symbols.c
45
thread__put(ti->thread);
tools/perf/tests/tests.h
205
struct thread;
tools/perf/tests/tests.h
208
struct thread *thread);
tools/perf/tests/thread-maps-share.c
13
struct thread *leader;
tools/perf/tests/thread-maps-share.c
14
struct thread *t1, *t2, *t3;
tools/perf/tests/thread-maps-share.c
18
struct thread *other, *other_leader;
tools/perf/trace/beauty/beauty.h
113
struct thread *thread;
tools/perf/trace/beauty/beauty.h
38
struct thread;
tools/perf/trace/beauty/beauty.h
45
struct file *thread__files_entry(struct thread *thread, int fd);
tools/perf/trace/beauty/ioctl.c
179
struct file *file = thread__files_entry(arg->thread, fd);
tools/perf/trace/beauty/pid.c
10
if (thread != NULL) {
tools/perf/trace/beauty/pid.c
11
if (!thread__comm_set(thread))
tools/perf/trace/beauty/pid.c
12
thread__set_comm_from_proc(thread);
tools/perf/trace/beauty/pid.c
14
if (thread__comm_set(thread))
tools/perf/trace/beauty/pid.c
16
" (%s)", thread__comm_str(thread));
tools/perf/trace/beauty/pid.c
17
thread__put(thread);
tools/perf/trace/beauty/pid.c
8
struct thread *thread = machine__findnew_thread(trace->host, pid, pid);
tools/perf/ui/browsers/annotate.c
1201
err = thread__get_arch(ms->thread, &browser.arch);
tools/perf/ui/browsers/annotate.c
1211
annotate_he.thread = thread__get(he->thread);
tools/perf/ui/browsers/annotate.c
1264
thread__zput(annotate_he.thread);
tools/perf/ui/browsers/annotate.c
604
target_ms.thread = ms->thread;
tools/perf/ui/browsers/hists.c
2317
static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
tools/perf/ui/browsers/hists.c
2319
return browser->he_selection->thread;
tools/perf/ui/browsers/hists.c
2458
struct thread *thread;
tools/perf/ui/browsers/hists.c
2575
struct thread *thread = act->thread;
tools/perf/ui/browsers/hists.c
2577
if ((!hists__has(browser->hists, thread) &&
tools/perf/ui/browsers/hists.c
2578
!hists__has(browser->hists, comm)) || thread == NULL)
tools/perf/ui/browsers/hists.c
2588
thread__comm_set(thread) ? thread__comm_str(thread) : "";
tools/perf/ui/browsers/hists.c
2590
if (hists__has(browser->hists, thread)) {
tools/perf/ui/browsers/hists.c
2592
comm_set_str, thread__tid(thread));
tools/perf/ui/browsers/hists.c
2598
browser->hists->thread_filter = thread__get(thread);
tools/perf/ui/browsers/hists.c
2610
char **optstr, struct thread *thread)
tools/perf/ui/browsers/hists.c
2615
if ((!hists__has(browser->hists, thread) &&
tools/perf/ui/browsers/hists.c
2616
!hists__has(browser->hists, comm)) || thread == NULL)
tools/perf/ui/browsers/hists.c
2620
comm_set_str = thread__comm_set(thread) ? thread__comm_str(thread) : "";
tools/perf/ui/browsers/hists.c
2621
if (hists__has(browser->hists, thread)) {
tools/perf/ui/browsers/hists.c
2623
in_out, comm_set_str, thread__tid(thread));
tools/perf/ui/browsers/hists.c
2630
act->thread = thread;
tools/perf/ui/browsers/hists.c
2736
if (act->thread)
tools/perf/ui/browsers/hists.c
2737
len += strlen(thread__comm_str(act->thread));
tools/perf/ui/browsers/hists.c
2745
if (act->thread) {
tools/perf/ui/browsers/hists.c
2747
thread__comm_str(act->thread));
tools/perf/ui/browsers/hists.c
2785
struct thread *thread, struct symbol *sym,
tools/perf/ui/browsers/hists.c
2789
if (thread) {
tools/perf/ui/browsers/hists.c
2791
thread__comm_str(thread), tstr) < 0)
tools/perf/ui/browsers/hists.c
2802
act->thread = thread;
tools/perf/ui/browsers/hists.c
2811
struct thread *thread, struct symbol *sym)
tools/perf/ui/browsers/hists.c
2816
n = add_script_opt_2(act, optstr, thread, sym, "");
tools/perf/ui/browsers/hists.c
2830
n += add_script_opt_2(act, optstr, thread, sym, tstr);
tools/perf/ui/browsers/hists.c
3083
struct thread *thread = NULL;
tools/perf/ui/browsers/hists.c
3094
thread = hist_browser__selected_thread(browser);
tools/perf/ui/browsers/hists.c
3192
maps__machine(thread__maps(browser->selection->thread)
tools/perf/ui/browsers/hists.c
3202
actions->thread = thread;
tools/perf/ui/browsers/hists.c
3222
actions->thread = NULL;
tools/perf/ui/browsers/hists.c
3305
actions->thread = thread;
tools/perf/ui/browsers/hists.c
3372
&options[nr_options], thread);
tools/perf/ui/browsers/hists.c
3388
if (hists__has(hists, thread) && thread) {
tools/perf/ui/browsers/hists.c
3392
thread, NULL);
tools/perf/ui/hist.c
330
if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
tools/perf/ui/hist.c
331
(b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
tools/perf/ui/stdio/hist.c
905
maps__fprintf(thread__maps(h->thread), fp);
tools/perf/util/addr_location.c
31
thread__zput(al->thread);
tools/perf/util/addr_location.c
36
thread__put(dst->thread);
tools/perf/util/addr_location.c
39
dst->thread = thread__get(src->thread);
tools/perf/util/addr_location.c
9
al->thread = NULL;
tools/perf/util/addr_location.h
13
struct thread *thread;
tools/perf/util/addr_location.h
7
struct thread;
tools/perf/util/annotate-arch/annotate-loongarch.c
54
if (maps__find_ams(thread__maps(ms->thread), &target) == 0 &&
tools/perf/util/annotate-arch/annotate-loongarch.c
98
if (maps__find_ams(thread__maps(ms->thread), &target) == 0 &&
tools/perf/util/annotate-arch/annotate-s390.c
53
if (maps__find_ams(thread__maps(ms->thread), &target) == 0 &&
tools/perf/util/annotate-data.c
732
sym = thread__find_symbol_fb(dloc->thread, dloc->cpumode,
tools/perf/util/annotate-data.h
121
struct thread *thread;
tools/perf/util/annotate-data.h
22
struct thread;
tools/perf/util/annotate.c
1022
err = thread__get_arch(ms->thread, &arch);
tools/perf/util/annotate.c
1270
thread__get_arch(ms->thread, &apd.arch);
tools/perf/util/annotate.c
1375
thread__get_arch(apd->he->ms.thread, &apd->arch);
tools/perf/util/annotate.c
2833
.thread = he->thread,
tools/perf/util/annotate.c
986
int thread__get_arch(struct thread *thread, const struct arch **parch)
tools/perf/util/annotate.c
993
if (!thread) {
tools/perf/util/annotate.c
998
machine = maps__machine(thread__maps(thread));
tools/perf/util/annotate.c
999
e_machine = thread__e_machine(thread, machine, &e_flags);
tools/perf/util/annotate.h
589
int thread__get_arch(struct thread *thread, const struct arch **parch);
tools/perf/util/arm-spe.c
119
struct thread *thread;
tools/perf/util/arm-spe.c
1579
thread__zput(speq->thread);
tools/perf/util/arm-spe.c
294
thread__zput(speq->thread);
tools/perf/util/arm-spe.c
298
if ((!speq->thread) && (speq->tid != -1)) {
tools/perf/util/arm-spe.c
299
speq->thread = machine__find_thread(spe->machine, -1,
tools/perf/util/arm-spe.c
303
if (speq->thread) {
tools/perf/util/arm-spe.c
304
speq->pid = thread__pid(speq->thread);
tools/perf/util/arm-spe.c
306
speq->cpu = thread__cpu(speq->thread);
tools/perf/util/arm64-frame-pointer-unwind-support.c
38
u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thread, int usr_idx)
tools/perf/util/arm64-frame-pointer-unwind-support.c
64
ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
tools/perf/util/arm64-frame-pointer-unwind-support.h
10
u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thread, int user_idx);
tools/perf/util/arm64-frame-pointer-unwind-support.h
8
struct thread;
tools/perf/util/bpf_lock_contention.c
579
struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid);
tools/perf/util/bpf_lock_contention.c
743
struct thread *idle = machine__findnew_thread(machine,
tools/perf/util/build-id.c
62
struct thread *thread = machine__findnew_thread(machine, sample->pid,
tools/perf/util/build-id.c
65
if (thread == NULL) {
tools/perf/util/build-id.c
72
if (thread__find_map(thread, sample->cpumode, sample->ip, &al))
tools/perf/util/build-id.c
77
sample__for_each_callchain_node(thread, evsel, sample, PERF_MAX_STACK_DEPTH,
tools/perf/util/build-id.c
81
thread__put(thread);
tools/perf/util/callchain.c
1046
.thread = thread__get(list->ms.thread),
tools/perf/util/callchain.c
1134
return thread__resolve_callchain(al->thread, cursor, evsel, sample,
tools/perf/util/callchain.c
1153
if (node->ms.thread)
tools/perf/util/callchain.c
1154
machine = maps__machine(thread__maps(node->ms.thread));
tools/perf/util/callchain.c
1168
if (maps__equal(thread__maps(al->thread), machine__kernel_maps(machine))) {
tools/perf/util/callchain.c
1809
int sample__for_each_callchain_node(struct thread *thread, struct evsel *evsel,
tools/perf/util/callchain.c
1820
ret = __thread__resolve_callchain(thread, cursor, evsel, sample,
tools/perf/util/callchain.h
15
struct thread;
tools/perf/util/callchain.h
277
int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain);
tools/perf/util/callchain.h
279
static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
tools/perf/util/callchain.h
317
int sample__for_each_callchain_node(struct thread *thread, struct evsel *evsel,
tools/perf/util/capstone.c
188
static size_t print_insn_x86(struct thread *thread, u8 cpumode, struct cs_insn *insn,
tools/perf/util/capstone.c
199
thread__find_symbol(thread, cpumode, op->imm, &al)) {
tools/perf/util/capstone.c
216
struct thread *thread __maybe_unused,
tools/perf/util/capstone.c
237
printed = print_insn_x86(thread, cpumode, &insn[0], print_opts, fp);
tools/perf/util/capstone.c
376
if (capstone_init(maps__machine(thread__maps(args->ms->thread)), &handle, is_64bit,
tools/perf/util/capstone.c
487
if (capstone_init(maps__machine(thread__maps(args->ms->thread)), &handle, is_64bit,
tools/perf/util/capstone.h
15
struct thread;
tools/perf/util/capstone.h
18
ssize_t capstone__fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode,
tools/perf/util/capstone.h
28
struct thread *thread __maybe_unused,
tools/perf/util/counts.h
20
perf_counts(struct perf_counts *counts, int cpu_map_idx, int thread)
tools/perf/util/counts.h
22
return xyarray__entry(counts->values, cpu_map_idx, thread);
tools/perf/util/counts.h
26
perf_counts__is_loaded(struct perf_counts *counts, int cpu_map_idx, int thread)
tools/perf/util/counts.h
28
return *((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread));
tools/perf/util/counts.h
32
perf_counts__set_loaded(struct perf_counts *counts, int cpu_map_idx, int thread, bool loaded)
tools/perf/util/counts.h
34
*((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread)) = loaded;
tools/perf/util/cs-etm.c
1126
if (!thread__find_map(tidq->thread, cpumode, address, &al))
tools/perf/util/cs-etm.c
1141
len = dso__data_read_offset(dso, maps__machine(thread__maps(tidq->thread)),
tools/perf/util/cs-etm.c
1482
thread__zput(tidq->thread);
tools/perf/util/cs-etm.c
1483
tidq->thread = machine__find_thread(machine, -1, tid);
tools/perf/util/cs-etm.c
1487
if (!tidq->thread)
tools/perf/util/cs-etm.c
1488
tidq->thread = machine__idle_thread(machine);
tools/perf/util/cs-etm.c
1580
sample.pid = thread__pid(tidq->thread);
tools/perf/util/cs-etm.c
1581
sample.tid = thread__tid(tidq->thread);
tools/perf/util/cs-etm.c
2605
if (tid == -1 || thread__tid(tidq->thread) == tid)
tools/perf/util/cs-etm.c
2747
struct thread *th;
tools/perf/util/cs-etm.c
2772
struct thread *th;
tools/perf/util/cs-etm.c
618
tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1,
tools/perf/util/cs-etm.c
767
tidq->prev_packet_thread = thread__get(tidq->thread);
tools/perf/util/cs-etm.c
88
struct thread *thread;
tools/perf/util/cs-etm.c
89
struct thread *prev_packet_thread;
tools/perf/util/cs-etm.c
940
thread__zput(tidq->thread);
tools/perf/util/data-convert-json.c
192
output_json_key_format(out, true, 3, "pid", "%i", thread__pid(al.thread));
tools/perf/util/data-convert-json.c
193
output_json_key_format(out, true, 3, "tid", "%i", thread__tid(al.thread));
tools/perf/util/data-convert-json.c
197
else if (thread__cpu(al.thread) >= 0)
tools/perf/util/data-convert-json.c
198
output_json_key_format(out, true, 3, "cpu", "%i", thread__cpu(al.thread));
tools/perf/util/data-convert-json.c
200
output_json_key_string(out, true, 3, "comm", thread__comm_str(al.thread));
tools/perf/util/data-convert-json.c
237
ok = thread__find_symbol(al.thread, cpumode, ip, &tal);
tools/perf/util/db-export.c
109
struct thread *main_thread)
tools/perf/util/db-export.c
134
struct thread *thread)
tools/perf/util/db-export.c
141
return dbe->export_comm_thread(dbe, db_id, comm, thread);
tools/perf/util/db-export.c
184
err = db_export__dso(dbe, dso, maps__machine(thread__maps(al->thread)));
tools/perf/util/db-export.c
211
struct thread *thread,
tools/perf/util/db-export.c
231
err = thread__resolve_callchain(thread, cursor, evsel,
tools/perf/util/db-export.c
258
al.thread = thread__get(thread);
tools/perf/util/db-export.c
294
static int db_export__threads(struct db_export *dbe, struct thread *thread,
tools/perf/util/db-export.c
295
struct thread *main_thread,
tools/perf/util/db-export.c
323
if (thread != main_thread) {
tools/perf/util/db-export.c
328
bool export_comm_thread = comm && !thread__db_id(thread);
tools/perf/util/db-export.c
330
err = db_export__thread(dbe, thread, machine, main_thread);
tools/perf/util/db-export.c
335
err = db_export__comm_thread(dbe, comm, thread);
tools/perf/util/db-export.c
341
curr_comm = thread__comm(thread);
tools/perf/util/db-export.c
343
return db_export__comm(dbe, curr_comm, thread);
tools/perf/util/db-export.c
352
struct thread *thread = al->thread;
tools/perf/util/db-export.c
359
struct thread *main_thread;
tools/perf/util/db-export.c
364
if (thread__maps(thread))
tools/perf/util/db-export.c
365
machine = maps__machine(thread__maps(thread));
tools/perf/util/db-export.c
377
main_thread = thread__main_thread(machine, thread);
tools/perf/util/db-export.c
379
err = db_export__threads(dbe, thread, main_thread, machine, &comm);
tools/perf/util/db-export.c
394
thread, sample,
tools/perf/util/db-export.c
408
err = thread_stack__process(thread, comm, sample, al,
tools/perf/util/db-export.c
536
struct thread *thread = machine__find_thread(machine, pid, tid);
tools/perf/util/db-export.c
537
struct thread *main_thread;
tools/perf/util/db-export.c
540
if (!thread || !thread__comm_set(thread))
tools/perf/util/db-export.c
543
*is_idle = !thread__pid(thread) && !thread__tid(thread);
tools/perf/util/db-export.c
545
main_thread = thread__main_thread(machine, thread);
tools/perf/util/db-export.c
547
err = db_export__threads(dbe, thread, main_thread, machine, comm_ptr);
tools/perf/util/db-export.c
549
*db_id = thread__db_id(thread);
tools/perf/util/db-export.c
553
thread__put(thread);
tools/perf/util/db-export.c
62
int db_export__thread(struct db_export *dbe, struct thread *thread,
tools/perf/util/db-export.c
63
struct machine *machine, struct thread *main_thread)
tools/perf/util/db-export.c
67
if (thread__db_id(thread))
tools/perf/util/db-export.c
70
thread__set_db_id(thread, ++dbe->thread_last_db_id);
tools/perf/util/db-export.c
76
return dbe->export_thread(dbe, thread, main_thread_db_id,
tools/perf/util/db-export.c
83
struct thread *thread)
tools/perf/util/db-export.c
88
return dbe->export_comm(dbe, comm, thread);
tools/perf/util/db-export.c
94
struct thread *thread)
tools/perf/util/db-export.c
99
return __db_export__comm(dbe, comm, thread);
tools/perf/util/db-export.h
15
struct thread;
tools/perf/util/db-export.h
44
int (*export_thread)(struct db_export *dbe, struct thread *thread,
tools/perf/util/db-export.h
47
struct thread *thread);
tools/perf/util/db-export.h
49
struct comm *comm, struct thread *thread);
tools/perf/util/db-export.h
84
int db_export__thread(struct db_export *dbe, struct thread *thread,
tools/perf/util/db-export.h
85
struct machine *machine, struct thread *main_thread);
tools/perf/util/db-export.h
87
struct thread *thread);
tools/perf/util/db-export.h
89
struct thread *main_thread);
tools/perf/util/db-export.h
91
struct thread *thread);
tools/perf/util/debug.c
314
struct thread *thread = NULL;
tools/perf/util/debug.c
321
thread = machine__find_thread(machine, pid, pid);
tools/perf/util/debug.c
324
if (!machine || !thread) {
tools/perf/util/debug.c
342
if (thread && thread__find_map(thread, PERF_RECORD_MISC_USER, addr, &al)) {
tools/perf/util/debug.c
356
thread__put(thread);
tools/perf/util/disasm.c
1070
if (!maps__find_ams(thread__maps(args->ms->thread), &target) &&
tools/perf/util/disasm.c
273
if (maps__find_ams(thread__maps(ms->thread), &target) == 0 &&
tools/perf/util/disasm.c
407
if (maps__find_ams(thread__maps(ms->thread), &target) == 0 &&
tools/perf/util/dlfilter.c
109
d_al->comm = al->thread ? thread__comm_str(al->thread) : ":-1";
tools/perf/util/dlfilter.c
128
if (!addr_al->thread) {
tools/perf/util/dlfilter.c
129
struct thread *thread = get_thread(d);
tools/perf/util/dlfilter.c
131
if (!thread)
tools/perf/util/dlfilter.c
133
thread__resolve(thread, addr_al, d->sample);
tools/perf/util/dlfilter.c
169
struct thread *thread;
tools/perf/util/dlfilter.c
175
thread = get_thread(d);
tools/perf/util/dlfilter.c
176
if (!thread)
tools/perf/util/dlfilter.c
180
thread__find_symbol_fb(thread, d->sample->cpumode, address, &al);
tools/perf/util/dlfilter.c
230
if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
tools/perf/util/dlfilter.c
233
if (thread__maps(al->thread)) {
tools/perf/util/dlfilter.c
234
struct machine *machine = maps__machine(thread__maps(al->thread));
tools/perf/util/dlfilter.c
237
perf_sample__fetch_insn(d->sample, al->thread, machine);
tools/perf/util/dlfilter.c
317
thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
tools/perf/util/dlfilter.c
77
if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
tools/perf/util/dlfilter.c
82
static struct thread *get_thread(struct dlfilter *d)
tools/perf/util/dlfilter.c
86
return al ? al->thread : NULL;
tools/perf/util/drm_pmu.c
663
int evsel__drm_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/drm_pmu.c
667
int pid = perf_thread_map__pid(evsel->core.threads, thread);
tools/perf/util/drm_pmu.c
676
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
tools/perf/util/drm_pmu.c
678
count = perf_counts(evsel->counts, cpu_map_idx, thread);
tools/perf/util/drm_pmu.h
37
int evsel__drm_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread);
tools/perf/util/dump-insn.h
13
struct thread *thread;
tools/perf/util/dump-insn.h
9
struct thread;
tools/perf/util/event.c
442
struct thread *thread = machine__findnew_thread(machine,
tools/perf/util/event.c
449
if (thread) {
tools/perf/util/event.c
450
machine__remove_thread(machine, thread);
tools/perf/util/event.c
451
thread__put(thread);
tools/perf/util/event.c
694
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/event.c
697
struct maps *maps = thread__maps(thread);
tools/perf/util/event.c
702
thread__zput(al->thread);
tools/perf/util/event.c
703
al->thread = thread__get(thread);
tools/perf/util/event.c
757
struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/event.c
760
struct map *map = thread__find_map(thread, cpumode, addr, al);
tools/perf/util/event.c
761
struct machine *machine = maps__machine(thread__maps(thread));
tools/perf/util/event.c
767
return thread__find_map(thread, addr_cpumode, addr, al);
tools/perf/util/event.c
770
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
tools/perf/util/event.c
774
if (thread__find_map(thread, cpumode, addr, al))
tools/perf/util/event.c
779
struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
tools/perf/util/event.c
783
if (thread__find_map_fb(thread, cpumode, addr, al))
tools/perf/util/event.c
808
struct thread *thread;
tools/perf/util/event.c
812
thread = machine__findnew_guest_code(machine, sample->pid);
tools/perf/util/event.c
814
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
tools/perf/util/event.c
815
if (thread == NULL)
tools/perf/util/event.c
818
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
tools/perf/util/event.c
819
thread__find_map(thread, sample->cpumode, sample->ip, al);
tools/perf/util/event.c
826
if (thread__is_filtered(thread))
tools/perf/util/event.c
829
thread__put(thread);
tools/perf/util/event.c
830
thread = NULL;
tools/perf/util/event.c
924
void thread__resolve(struct thread *thread, struct addr_location *al,
tools/perf/util/event.c
927
thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
tools/perf/util/evlist.h
85
} thread;
tools/perf/util/evsel.c
1895
void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
tools/perf/util/evsel.c
1903
tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
tools/perf/util/evsel.c
1904
*perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
tools/perf/util/evsel.c
1911
static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/evsel.c
1913
struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
tools/perf/util/evsel.c
1915
return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
tools/perf/util/evsel.c
1918
static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
tools/perf/util/evsel.c
1923
count = perf_counts(counter->counts, cpu_map_idx, thread);
tools/perf/util/evsel.c
1926
evsel__tpebs_read(counter, cpu_map_idx, thread);
tools/perf/util/evsel.c
1927
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
tools/perf/util/evsel.c
1936
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
tools/perf/util/evsel.c
1993
static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
tools/perf/util/evsel.c
2021
evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
tools/perf/util/evsel.c
2027
static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
tools/perf/util/evsel.c
2048
if (FD(leader, cpu_map_idx, thread) < 0)
tools/perf/util/evsel.c
2051
if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
tools/perf/util/evsel.c
2054
return evsel__process_group_data(leader, cpu_map_idx, thread, data);
tools/perf/util/evsel.c
2078
int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/evsel.c
2081
return evsel__tool_pmu_read(evsel, cpu_map_idx, thread);
tools/perf/util/evsel.c
2084
return evsel__hwmon_pmu_read(evsel, cpu_map_idx, thread);
tools/perf/util/evsel.c
2087
return evsel__drm_pmu_read(evsel, cpu_map_idx, thread);
tools/perf/util/evsel.c
2090
return evsel__tpebs_read(evsel, cpu_map_idx, thread);
tools/perf/util/evsel.c
2093
return evsel__read_group(evsel, cpu_map_idx, thread);
tools/perf/util/evsel.c
2095
return evsel__read_one(evsel, cpu_map_idx, thread);
tools/perf/util/evsel.c
2098
int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
tools/perf/util/evsel.c
2103
if (FD(evsel, cpu_map_idx, thread) < 0)
tools/perf/util/evsel.c
2109
if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
tools/perf/util/evsel.c
2112
evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
tools/perf/util/evsel.c
2114
*perf_counts(evsel->counts, cpu_map_idx, thread) = count;
tools/perf/util/evsel.c
2139
static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/evsel.c
2157
fd = FD(leader, cpu_map_idx, thread);
tools/perf/util/evsel.c
2170
for (int thread = thread_idx; thread < nr_threads - 1; thread++)
tools/perf/util/evsel.c
2171
FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
tools/perf/util/evsel.c
2201
int thread, int err)
tools/perf/util/evsel.c
2203
pid_t ignore_pid = perf_thread_map__pid(threads, thread);
tools/perf/util/evsel.c
2224
if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
tools/perf/util/evsel.c
2227
if (thread_map__remove(threads, thread))
tools/perf/util/evsel.c
2786
int idx, thread, nthreads;
tools/perf/util/evsel.c
2839
for (thread = 0; thread < nthreads; thread++) {
tools/perf/util/evsel.c
2842
if (thread >= nthreads)
tools/perf/util/evsel.c
2846
pid = perf_thread_map__pid(threads, thread);
tools/perf/util/evsel.c
2848
group_fd = get_group_fd(evsel, idx, thread);
tools/perf/util/evsel.c
2863
FD(evsel, idx, thread) = fd;
tools/perf/util/evsel.c
2917
idx, threads, thread, err)) {
tools/perf/util/evsel.c
2940
threads->err_thread = thread;
tools/perf/util/evsel.c
2944
while (--thread >= 0) {
tools/perf/util/evsel.c
2945
if (FD(evsel, idx, thread) >= 0)
tools/perf/util/evsel.c
2946
close(FD(evsel, idx, thread));
tools/perf/util/evsel.c
2947
FD(evsel, idx, thread) = -1;
tools/perf/util/evsel.c
2949
thread = nthreads;
tools/perf/util/evsel.c
4136
int cpu_map_idx, thread;
tools/perf/util/evsel.c
4145
for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
tools/perf/util/evsel.c
4146
thread++) {
tools/perf/util/evsel.c
4147
int fd = FD(evsel, cpu_map_idx, thread);
tools/perf/util/evsel.c
4150
cpu_map_idx, thread, fd) < 0)
tools/perf/util/evsel.h
243
void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
tools/perf/util/evsel.h
394
int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread);
tools/perf/util/evsel.h
396
int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale);
tools/perf/util/evsel.h
405
static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/evsel.h
407
return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, false);
tools/perf/util/evsel.h
417
static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/evsel.h
419
return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, true);
tools/perf/util/hist.c
1023
thread__put(al->thread);
tools/perf/util/hist.c
1024
al->thread = thread__get(bi[i].to.ms.thread);
tools/perf/util/hist.c
110
len = thread__comm_len(h->thread);
tools/perf/util/hist.c
1231
.thread = al->thread,
tools/perf/util/hist.c
1232
.comm = thread__comm(al->thread),
tools/perf/util/hist.c
1235
.thread = al->thread,
tools/perf/util/hist.c
1464
thread__zput(he->thread);
tools/perf/util/hist.c
2286
!RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
tools/perf/util/hist.c
254
struct cgroup *cgrp = cgroup__find(maps__machine(thread__maps(h->ms.thread))->env,
tools/perf/util/hist.c
2914
struct thread *thread = hists->thread_filter;
tools/perf/util/hist.c
2966
if (thread) {
tools/perf/util/hist.c
2967
if (hists__has(hists, thread)) {
tools/perf/util/hist.c
2970
(thread__comm_set(thread) ? thread__comm_str(thread) : ""),
tools/perf/util/hist.c
2971
thread__tid(thread));
tools/perf/util/hist.c
2975
(thread__comm_set(thread) ? thread__comm_str(thread) : ""));
tools/perf/util/hist.c
539
he->ms.thread = thread__get(he->ms.thread);
tools/perf/util/hist.c
555
he->branch_info->from.ms.thread = thread__get(he->branch_info->from.ms.thread);
tools/perf/util/hist.c
557
he->branch_info->to.ms.thread = thread__get(he->branch_info->to.ms.thread);
tools/perf/util/hist.c
590
he->thread = thread__get(he->thread);
tools/perf/util/hist.c
803
struct namespaces *ns = thread__namespaces(al->thread);
tools/perf/util/hist.c
805
.thread = al->thread,
tools/perf/util/hist.c
806
.comm = thread__comm(al->thread),
tools/perf/util/hist.c
813
.thread = al->thread,
tools/perf/util/hist.c
893
.thread = al->thread,
tools/perf/util/hist.h
102
struct thread;
tools/perf/util/hist.h
123
struct thread *thread_filter;
tools/perf/util/hist.h
249
struct thread *thread;
tools/perf/util/hist.h
523
int thread;
tools/perf/util/hwmon_pmu.c
766
int idx = 0, thread = 0, nthreads, err = 0;
tools/perf/util/hwmon_pmu.c
774
for (thread = 0; thread < nthreads; thread++) {
tools/perf/util/hwmon_pmu.c
782
FD(evsel, idx, thread) = fd;
tools/perf/util/hwmon_pmu.c
793
threads->err_thread = thread;
tools/perf/util/hwmon_pmu.c
796
while (--thread >= 0) {
tools/perf/util/hwmon_pmu.c
797
if (FD(evsel, idx, thread) >= 0)
tools/perf/util/hwmon_pmu.c
798
close(FD(evsel, idx, thread));
tools/perf/util/hwmon_pmu.c
799
FD(evsel, idx, thread) = -1;
tools/perf/util/hwmon_pmu.c
801
thread = nthreads;
tools/perf/util/hwmon_pmu.c
807
int evsel__hwmon_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/hwmon_pmu.c
815
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
tools/perf/util/hwmon_pmu.c
817
count = perf_counts(evsel->counts, cpu_map_idx, thread);
tools/perf/util/hwmon_pmu.c
818
fd = FD(evsel, cpu_map_idx, thread);
tools/perf/util/hwmon_pmu.h
165
int evsel__hwmon_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread);
tools/perf/util/intel-bts.c
323
struct thread *thread;
tools/perf/util/intel-bts.c
329
thread = machine__find_thread(machine, -1, btsq->tid);
tools/perf/util/intel-bts.c
330
if (!thread)
tools/perf/util/intel-bts.c
333
len = thread__memcpy(thread, machine, buf, ip, INTEL_PT_INSN_BUF_SZ, &x86_64);
tools/perf/util/intel-bts.c
342
thread__put(thread);
tools/perf/util/intel-bts.c
410
struct thread *thread)
tools/perf/util/intel-bts.c
433
thread_stack__event(thread, btsq->cpu, btsq->sample_flags,
tools/perf/util/intel-bts.c
451
struct thread *thread;
tools/perf/util/intel-bts.c
458
thread = machine__find_thread(btsq->bts->machine, -1,
tools/perf/util/intel-bts.c
460
if (thread)
tools/perf/util/intel-bts.c
461
btsq->pid = thread__pid(thread);
tools/perf/util/intel-bts.c
463
thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
tools/perf/util/intel-bts.c
502
!btsq->bts->synth_opts.thread_stack && thread &&
tools/perf/util/intel-bts.c
505
thread_stack__set_trace_nr(thread, btsq->cpu, buffer->buffer_nr + 1);
tools/perf/util/intel-bts.c
507
err = intel_bts_process_buffer(btsq, buffer, thread);
tools/perf/util/intel-bts.c
520
thread__put(thread);
tools/perf/util/intel-pt.c
1005
thread = ptq->thread;
tools/perf/util/intel-pt.c
1006
if (!thread)
tools/perf/util/intel-pt.c
1010
if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map))
tools/perf/util/intel-pt.c
1240
struct thread *thread = machine__findnew_thread(pt->machine,
tools/perf/util/intel-pt.c
1244
thread_stack__sample_late(thread, sample->cpu, pt->chain,
tools/perf/util/intel-pt.c
1278
struct thread *thread = machine__findnew_thread(pt->machine,
tools/perf/util/intel-pt.c
1282
thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
tools/perf/util/intel-pt.c
1287
thread__put(thread);
tools/perf/util/intel-pt.c
1406
thread__zput(ptq->thread);
tools/perf/util/intel-pt.c
1457
vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
tools/perf/util/intel-pt.c
1489
thread__zput(ptq->thread);
tools/perf/util/intel-pt.c
1492
if (!ptq->thread && ptq->tid != -1)
tools/perf/util/intel-pt.c
1493
ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
tools/perf/util/intel-pt.c
1495
if (ptq->thread) {
tools/perf/util/intel-pt.c
1496
ptq->pid = thread__pid(ptq->thread);
tools/perf/util/intel-pt.c
1498
ptq->cpu = thread__cpu(ptq->thread);
tools/perf/util/intel-pt.c
1827
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
tools/perf/util/intel-pt.c
1834
thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
tools/perf/util/intel-pt.c
202
struct thread *thread;
tools/perf/util/intel-pt.c
204
struct thread *guest_thread;
tools/perf/util/intel-pt.c
205
struct thread *unknown_guest_thread;
tools/perf/util/intel-pt.c
2466
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
tools/perf/util/intel-pt.c
2495
thread_stack__br_sample(ptq->thread, ptq->cpu,
tools/perf/util/intel-pt.c
2896
thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
tools/perf/util/intel-pt.c
2902
thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
tools/perf/util/intel-pt.c
3301
thread__zput(ptq->thread);
tools/perf/util/intel-pt.c
3307
ptq->thread = machine__find_thread(m, -1, ptq->tid);
tools/perf/util/intel-pt.c
3308
if (ptq->thread)
tools/perf/util/intel-pt.c
3309
ptq->pid = thread__pid(ptq->thread);
tools/perf/util/intel-pt.c
3313
ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
tools/perf/util/intel-pt.c
3632
static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/intel-pt.c
3636
if (!thread__find_map(thread, cpumode, addr, al))
tools/perf/util/intel-pt.c
3650
struct thread *thread = pt->unknown_thread;
tools/perf/util/intel-pt.c
3664
if (intel_pt_find_map(thread, cpumode, addr, &al)) {
tools/perf/util/intel-pt.c
69
struct thread *unknown_thread;
tools/perf/util/intel-pt.c
755
struct thread *thread;
tools/perf/util/intel-pt.c
791
thread = ptq->guest_thread;
tools/perf/util/intel-pt.c
792
if (!thread) {
tools/perf/util/intel-pt.c
798
thread = ptq->unknown_guest_thread;
tools/perf/util/intel-pt.c
801
thread = ptq->thread;
tools/perf/util/intel-pt.c
802
if (!thread) {
tools/perf/util/intel-pt.c
808
thread = ptq->pt->unknown_thread;
tools/perf/util/intel-pt.c
815
if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) {
tools/perf/util/intel-pt.c
988
struct thread *thread;
tools/perf/util/intel-tpebs.c
561
int evsel__tpebs_read(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/intel-tpebs.c
569
if (cpu_map_idx != 0 || thread != 0)
tools/perf/util/intel-tpebs.c
573
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
tools/perf/util/intel-tpebs.c
575
count = perf_counts(evsel->counts, cpu_map_idx, thread);
tools/perf/util/intel-tpebs.h
23
int evsel__tpebs_read(struct evsel *evsel, int cpu_map_idx, int thread);
tools/perf/util/jitdump.c
820
struct thread *thread = machine__findnew_thread(machine, pid, pid);
tools/perf/util/jitdump.c
822
if (!thread) {
tools/perf/util/jitdump.c
827
thread__set_priv(thread, (void *)true);
tools/perf/util/jitdump.c
828
thread__put(thread);
tools/perf/util/jitdump.c
833
struct thread *thread = machine__find_thread(machine, pid, pid);
tools/perf/util/jitdump.c
836
if (!thread)
tools/perf/util/jitdump.c
839
priv = thread__priv(thread);
tools/perf/util/jitdump.c
840
thread__put(thread);
tools/perf/util/jitdump.c
853
struct thread *thread;
tools/perf/util/jitdump.c
860
thread = machine__findnew_thread(machine, pid, tid);
tools/perf/util/jitdump.c
861
if (thread == NULL) {
tools/perf/util/jitdump.c
866
nsi = nsinfo__get(thread__nsinfo(thread));
tools/perf/util/jitdump.c
867
thread__put(thread);
tools/perf/util/machine.c
110
struct thread *thread = machine__findnew_thread(machine, -1,
tools/perf/util/machine.c
113
if (thread == NULL)
tools/perf/util/machine.c
116
thread__set_guest_comm(thread, pid);
tools/perf/util/machine.c
117
thread__put(thread);
tools/perf/util/machine.c
1732
struct thread *thread;
tools/perf/util/machine.c
1766
thread = machine__findnew_thread(machine, event->mmap2.pid,
tools/perf/util/machine.c
1768
if (thread == NULL)
tools/perf/util/machine.c
1775
event->mmap2.filename, thread);
tools/perf/util/machine.c
1780
ret = thread__insert_map(thread, map);
tools/perf/util/machine.c
1784
thread__put(thread);
tools/perf/util/machine.c
1791
thread__put(thread);
tools/perf/util/machine.c
1800
struct thread *thread;
tools/perf/util/machine.c
1823
thread = machine__findnew_thread(machine, event->mmap.pid,
tools/perf/util/machine.c
1825
if (thread == NULL)
tools/perf/util/machine.c
1833
&dso_id_empty, prot, /*flags=*/0, event->mmap.filename, thread);
tools/perf/util/machine.c
1838
ret = thread__insert_map(thread, map);
tools/perf/util/machine.c
1842
thread__put(thread);
tools/perf/util/machine.c
1849
thread__put(thread);
tools/perf/util/machine.c
1855
void machine__remove_thread(struct machine *machine, struct thread *th)
tools/perf/util/machine.c
1863
struct thread *thread = machine__find_thread(machine,
tools/perf/util/machine.c
1866
struct thread *parent = machine__findnew_thread(machine,
tools/perf/util/machine.c
1891
if (thread != NULL) {
tools/perf/util/machine.c
1892
machine__remove_thread(machine, thread);
tools/perf/util/machine.c
1893
thread__put(thread);
tools/perf/util/machine.c
1896
thread = machine__findnew_thread(machine, event->fork.pid,
tools/perf/util/machine.c
1915
if (thread == NULL || parent == NULL ||
tools/perf/util/machine.c
1916
thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
tools/perf/util/machine.c
1920
thread__put(thread);
tools/perf/util/machine.c
1929
struct thread *thread = machine__find_thread(machine,
tools/perf/util/machine.c
1938
if (thread != NULL) {
tools/perf/util/machine.c
1940
thread__set_exited(thread, /*exited=*/true);
tools/perf/util/machine.c
1942
machine__remove_thread(machine, thread);
tools/perf/util/machine.c
1944
thread__put(thread);
tools/perf/util/machine.c
2000
static void ip__resolve_ams(struct thread *thread,
tools/perf/util/machine.c
2014
thread__find_cpumode_addr_location(thread, ip, /*symbols=*/true, &al);
tools/perf/util/machine.c
2019
ams->ms.thread = thread__get(al.thread);
tools/perf/util/machine.c
2027
static void ip__resolve_data(struct thread *thread,
tools/perf/util/machine.c
2035
thread__find_symbol(thread, m, addr, &al);
tools/perf/util/machine.c
2040
ams->ms.thread = thread__get(al.thread);
tools/perf/util/machine.c
2056
ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip);
tools/perf/util/machine.c
2057
ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi),
tools/perf/util/machine.c
2123
.thread = thread__get(ms->thread),
tools/perf/util/machine.c
2146
static int add_callchain_ip(struct thread *thread,
tools/perf/util/machine.c
2169
thread__find_cpumode_addr_location(thread, ip, symbols, &al);
tools/perf/util/machine.c
2197
thread__find_symbol(thread, *cpumode, ip, &al);
tools/perf/util/machine.c
2199
thread__find_map(thread, *cpumode, ip, &al);
tools/perf/util/machine.c
2223
ms.thread = thread__get(al.thread);
tools/perf/util/machine.c
2254
ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
tools/perf/util/machine.c
2255
ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
tools/perf/util/machine.c
2327
static int lbr_callchain_add_kernel_ip(struct thread *thread,
tools/perf/util/machine.c
2342
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2353
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2364
static void save_lbr_cursor_node(struct thread *thread,
tools/perf/util/machine.c
2368
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
tools/perf/util/machine.c
2386
lbr_stitch->prev_lbr_cursor[idx].ms.thread = thread__get(cursor->curr->ms.thread);
tools/perf/util/machine.c
2393
static int lbr_callchain_add_lbr_ip(struct thread *thread,
tools/perf/util/machine.c
2415
if (thread__lbr_stitch(thread)) {
tools/perf/util/machine.c
2436
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2449
if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
tools/perf/util/machine.c
2461
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2467
save_lbr_cursor_node(thread, cursor, i);
tools/perf/util/machine.c
2476
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2482
save_lbr_cursor_node(thread, cursor, i);
tools/perf/util/machine.c
2496
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2507
static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
tools/perf/util/machine.c
2510
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
tools/perf/util/machine.c
2532
static struct stitch_list *get_stitch_node(struct thread *thread)
tools/perf/util/machine.c
2534
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
tools/perf/util/machine.c
2548
static bool has_stitched_lbr(struct thread *thread,
tools/perf/util/machine.c
2558
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
tools/perf/util/machine.c
2604
stitch_node = get_stitch_node(thread);
tools/perf/util/machine.c
2611
stitch_node->cursor.ms.thread =
tools/perf/util/machine.c
2612
thread__get(lbr_stitch->prev_lbr_cursor[i].ms.thread);
tools/perf/util/machine.c
2624
static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
tools/perf/util/machine.c
2626
if (thread__lbr_stitch(thread))
tools/perf/util/machine.c
2629
thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
tools/perf/util/machine.c
2630
if (!thread__lbr_stitch(thread))
tools/perf/util/machine.c
2633
thread__lbr_stitch(thread)->prev_lbr_cursor =
tools/perf/util/machine.c
2635
if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
tools/perf/util/machine.c
2638
thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
tools/perf/util/machine.c
2640
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
tools/perf/util/machine.c
2641
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
tools/perf/util/machine.c
2646
free(thread__lbr_stitch(thread));
tools/perf/util/machine.c
2647
thread__set_lbr_stitch(thread, NULL);
tools/perf/util/machine.c
2650
thread__set_lbr_stitch_enable(thread, false);
tools/perf/util/machine.c
2661
static int resolve_lbr_callchain_sample(struct thread *thread,
tools/perf/util/machine.c
2687
if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
tools/perf/util/machine.c
2688
(max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
tools/perf/util/machine.c
2689
lbr_stitch = thread__lbr_stitch(thread);
tools/perf/util/machine.c
2691
stitched_lbr = has_stitched_lbr(thread, sample,
tools/perf/util/machine.c
2708
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
tools/perf/util/machine.c
2714
err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
tools/perf/util/machine.c
2720
err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
tools/perf/util/machine.c
2727
err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
tools/perf/util/machine.c
2731
err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
tools/perf/util/machine.c
2737
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
tools/perf/util/machine.c
2749
static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
tools/perf/util/machine.c
2761
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2771
struct thread *thread, int usr_idx)
tools/perf/util/machine.c
2773
if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
tools/perf/util/machine.c
2774
return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
tools/perf/util/machine.c
2779
static int thread__resolve_callchain_sample(struct thread *thread,
tools/perf/util/machine.c
2804
err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
tools/perf/util/machine.c
2816
skip_idx = arch_skip_callchain_idx(thread, chain);
tools/perf/util/machine.c
2868
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2875
err = add_callchain_ip(thread, cursor, parent, root_al,
tools/perf/util/machine.c
2894
err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
tools/perf/util/machine.c
2916
err = find_prev_cpumode(chain, thread, cursor, parent,
tools/perf/util/machine.c
2934
leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
tools/perf/util/machine.c
2943
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2951
err = add_callchain_ip(thread, cursor, parent,
tools/perf/util/machine.c
2987
static int thread__resolve_callchain_unwind(struct thread *thread,
tools/perf/util/machine.c
3007
thread, sample, max_stack, false);
tools/perf/util/machine.c
3010
int __thread__resolve_callchain(struct thread *thread,
tools/perf/util/machine.c
3027
ret = thread__resolve_callchain_sample(thread, cursor,
tools/perf/util/machine.c
3033
ret = thread__resolve_callchain_unwind(thread, cursor,
tools/perf/util/machine.c
3037
ret = thread__resolve_callchain_unwind(thread, cursor,
tools/perf/util/machine.c
3042
ret = thread__resolve_callchain_sample(thread, cursor,
tools/perf/util/machine.c
3052
int (*fn)(struct thread *thread, void *p),
tools/perf/util/machine.c
3059
int (*fn)(struct thread *thread, void *p),
tools/perf/util/machine.c
3080
static int thread_list_cb(struct thread *thread, void *data)
tools/perf/util/machine.c
3088
entry->thread = thread__get(thread);
tools/perf/util/machine.c
3103
thread__zput(pos->thread);
tools/perf/util/machine.c
3120
struct thread *thread;
tools/perf/util/machine.c
3134
thread = machine__findnew_thread(machine, pid, tid);
tools/perf/util/machine.c
3135
if (!thread)
tools/perf/util/machine.c
3138
thread__set_cpu(thread, cpu);
tools/perf/util/machine.c
3139
thread__put(thread);
tools/perf/util/machine.c
386
static struct thread *findnew_guest_code(struct machine *machine,
tools/perf/util/machine.c
390
struct thread *host_thread;
tools/perf/util/machine.c
391
struct thread *thread;
tools/perf/util/machine.c
397
thread = machine__findnew_thread(machine, -1, pid);
tools/perf/util/machine.c
398
if (!thread)
tools/perf/util/machine.c
402
if (!maps__empty(thread__maps(thread)))
tools/perf/util/machine.c
403
return thread;
tools/perf/util/machine.c
409
thread__set_guest_comm(thread, pid);
tools/perf/util/machine.c
415
err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
tools/perf/util/machine.c
420
return thread;
tools/perf/util/machine.c
423
thread__zput(thread);
tools/perf/util/machine.c
427
struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
tools/perf/util/machine.c
435
struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
tools/perf/util/machine.c
476
struct thread *th, pid_t pid)
tools/perf/util/machine.c
478
struct thread *leader;
tools/perf/util/machine.c
526
static struct thread *__machine__findnew_thread(struct machine *machine,
tools/perf/util/machine.c
531
struct thread *th = threads__find(&machine->threads, tid);
tools/perf/util/machine.c
563
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
tools/perf/util/machine.c
568
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
tools/perf/util/machine.c
581
struct thread *machine__idle_thread(struct machine *machine)
tools/perf/util/machine.c
583
struct thread *thread = machine__findnew_thread(machine, 0, 0);
tools/perf/util/machine.c
585
if (!thread || thread__set_comm(thread, "swapper", 0) ||
tools/perf/util/machine.c
586
thread__set_namespaces(thread, 0, NULL))
tools/perf/util/machine.c
589
return thread;
tools/perf/util/machine.c
593
struct thread *thread)
tools/perf/util/machine.c
596
return thread__exec_comm(thread);
tools/perf/util/machine.c
598
return thread__comm(thread);
tools/perf/util/machine.c
604
struct thread *thread = machine__findnew_thread(machine,
tools/perf/util/machine.c
616
if (thread == NULL ||
tools/perf/util/machine.c
617
__thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
tools/perf/util/machine.c
622
thread__put(thread);
tools/perf/util/machine.c
631
struct thread *thread = machine__findnew_thread(machine,
tools/perf/util/machine.c
647
if (thread == NULL ||
tools/perf/util/machine.c
648
thread__set_namespaces(thread, sample->time, &event->namespaces)) {
tools/perf/util/machine.c
653
thread__put(thread);
tools/perf/util/machine.c
67
static void thread__set_guest_comm(struct thread *thread, pid_t pid)
tools/perf/util/machine.c
72
thread__set_comm(thread, comm, 0);
tools/perf/util/machine.c
939
static int machine_fprintf_cb(struct thread *thread, void *data)
tools/perf/util/machine.c
944
args->printed += thread__fprintf(thread, args->fp);
tools/perf/util/machine.h
105
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
tools/perf/util/machine.h
107
struct thread *machine__idle_thread(struct machine *machine);
tools/perf/util/machine.h
109
struct thread *thread);
tools/perf/util/machine.h
166
struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid);
tools/perf/util/machine.h
167
struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid);
tools/perf/util/machine.h
179
void machine__remove_thread(struct machine *machine, struct thread *th);
tools/perf/util/machine.h
188
int __thread__resolve_callchain(struct thread *thread,
tools/perf/util/machine.h
197
static inline int thread__resolve_callchain(struct thread *thread,
tools/perf/util/machine.h
20
struct thread;
tools/perf/util/machine.h
205
return __thread__resolve_callchain(thread,
tools/perf/util/machine.h
234
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
tools/perf/util/machine.h
286
int (*fn)(struct thread *thread, void *p),
tools/perf/util/machine.h
289
int (*fn)(struct thread *thread, void *p),
tools/perf/util/machine.h
294
struct thread *thread;
tools/perf/util/map.c
125
char *filename, struct thread *thread)
tools/perf/util/map.c
142
nsi = nsinfo__get(thread__nsinfo(thread));
tools/perf/util/map.c
167
dso = machine__findnew_vdso(machine, thread);
tools/perf/util/map.h
145
struct thread;
tools/perf/util/map.h
179
char *filename, struct thread *thread);
tools/perf/util/map_symbol.c
20
dst->thread = thread__get(src->thread);
tools/perf/util/map_symbol.c
9
thread__zput(ms->thread);
tools/perf/util/map_symbol.h
13
struct thread *thread;
tools/perf/util/map_symbol.h
7
struct thread;
tools/perf/util/powerpc-vpadtl.c
45
struct thread *thread;
tools/perf/util/print_insn.c
45
ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode,
tools/perf/util/print_insn.c
49
return capstone__fprintf_insn_asm(machine, thread, cpumode, is64bit, code, code_size,
tools/perf/util/print_insn.c
53
size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread,
tools/perf/util/print_insn.c
60
printed = fprintf_insn_asm(machine, thread, sample->cpumode, is64bit,
tools/perf/util/print_insn.h
15
size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread,
tools/perf/util/print_insn.h
18
ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode,
tools/perf/util/print_insn.h
9
struct thread;
tools/perf/util/python.c
1148
int cpu = 0, cpu_idx, thread = 0, thread_idx;
tools/perf/util/python.c
1156
if (!PyArg_ParseTuple(args, "ii", &cpu, &thread))
tools/perf/util/python.c
1164
thread_idx = perf_thread_map__idx(evsel->core.threads, thread);
tools/perf/util/python.c
1167
thread);
tools/perf/util/python.c
1410
int ret, cpu = 0, cpu_idx = 0, thread = 0, thread_idx = 0;
tools/perf/util/python.c
1418
if (!PyArg_ParseTuple(args, "sii", &metric, &cpu, &thread))
tools/perf/util/python.c
1445
thread_idx = perf_thread_map__idx(pos2->core.threads, thread);
tools/perf/util/python.c
1457
metric, cpu, thread);
tools/perf/util/sample.c
101
e_machine = thread__e_machine(thread, machine, /*e_flags=*/NULL);
tools/perf/util/sample.c
103
len = thread__memcpy(thread, machine, sample->insn,
tools/perf/util/sample.c
91
struct thread *thread,
tools/perf/util/sample.h
134
struct thread *thread,
tools/perf/util/sample.h
9
struct thread;
tools/perf/util/scripting-engines/trace-event-perl.c
275
if (thread__resolve_callchain(al->thread, cursor, evsel,
tools/perf/util/scripting-engines/trace-event-perl.c
346
struct thread *thread = al->thread;
tools/perf/util/scripting-engines/trace-event-perl.c
356
const char *comm = thread__comm_str(thread);
tools/perf/util/scripting-engines/trace-event-python.c
1180
static int python_export_thread(struct db_export *dbe, struct thread *thread,
tools/perf/util/scripting-engines/trace-event-python.c
1188
tuple_set_d64(t, 0, thread__db_id(thread));
tools/perf/util/scripting-engines/trace-event-python.c
1191
tuple_set_s32(t, 3, thread__pid(thread));
tools/perf/util/scripting-engines/trace-event-python.c
1192
tuple_set_s32(t, 4, thread__tid(thread));
tools/perf/util/scripting-engines/trace-event-python.c
1202
struct thread *thread)
tools/perf/util/scripting-engines/trace-event-python.c
1211
tuple_set_d64(t, 2, thread__db_id(thread));
tools/perf/util/scripting-engines/trace-event-python.c
1223
struct comm *comm, struct thread *thread)
tools/perf/util/scripting-engines/trace-event-python.c
1232
tuple_set_d64(t, 2, thread__db_id(thread));
tools/perf/util/scripting-engines/trace-event-python.c
1316
tuple_set_d64(t, 2, maps__machine(thread__maps(es->al->thread))->db_id);
tools/perf/util/scripting-engines/trace-event-python.c
1317
tuple_set_d64(t, 3, thread__db_id(es->al->thread));
tools/perf/util/scripting-engines/trace-event-python.c
1410
tuple_set_d64(t, 1, thread__db_id(cr->thread));
tools/perf/util/scripting-engines/trace-event-python.c
1661
process_stat(struct evsel *counter, struct perf_cpu cpu, int thread, u64 tstamp,
tools/perf/util/scripting-engines/trace-event-python.c
1682
PyTuple_SetItem(t, n++, _PyLong_FromLong(thread));
tools/perf/util/scripting-engines/trace-event-python.c
1703
for (int thread = 0; thread < perf_thread_map__nr(threads); thread++) {
tools/perf/util/scripting-engines/trace-event-python.c
1709
perf_thread_map__pid(threads, thread), tstamp,
tools/perf/util/scripting-engines/trace-event-python.c
1710
perf_counts(counter->counts, idx, thread));
tools/perf/util/scripting-engines/trace-event-python.c
1877
SET_TABLE_HANDLER(thread);
tools/perf/util/scripting-engines/trace-event-python.c
407
if (thread__resolve_callchain(al->thread, cursor, evsel,
tools/perf/util/scripting-engines/trace-event-python.c
485
struct thread *thread)
tools/perf/util/scripting-engines/trace-event-python.c
524
thread__find_map_fb(thread, sample->cpumode,
tools/perf/util/scripting-engines/trace-event-python.c
530
thread__find_map_fb(thread, sample->cpumode,
tools/perf/util/scripting-engines/trace-event-python.c
573
struct thread *thread)
tools/perf/util/scripting-engines/trace-event-python.c
597
thread__find_symbol_fb(thread, sample->cpumode,
tools/perf/util/scripting-engines/trace-event-python.c
603
thread__find_symbol_fb(thread, sample->cpumode,
tools/perf/util/scripting-engines/trace-event-python.c
887
_PyUnicode_FromString(thread__comm_str(al->thread)));
tools/perf/util/scripting-engines/trace-event-python.c
893
brstack = python_process_brstack(sample, al->thread);
tools/perf/util/scripting-engines/trace-event-python.c
896
brstacksym = python_process_brstacksym(sample, al->thread);
tools/perf/util/scripting-engines/trace-event-python.c
928
if (al->thread)
tools/perf/util/scripting-engines/trace-event-python.c
929
e_machine = thread__e_machine(al->thread, /*machine=*/NULL, &e_flags);
tools/perf/util/scripting-engines/trace-event-python.c
954
const char *comm = thread__comm_str(al->thread);
tools/perf/util/session.c
1127
struct thread *thread = machine__find_thread(machine, sample->pid, sample->pid);
tools/perf/util/session.c
1129
e_machine = thread__e_machine(thread, machine, &e_flags);
tools/perf/util/session.c
1884
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
tools/perf/util/session.c
1891
struct thread *thread = machine__idle_thread(&session->machines.host);
tools/perf/util/session.c
1894
thread__put(thread);
tools/perf/util/session.c
1895
return thread ? 0 : -1;
tools/perf/util/session.c
2015
static int perf_session__flush_thread_stack(struct thread *thread,
tools/perf/util/session.c
2018
return thread_stack__flush(thread);
tools/perf/util/session.c
2825
struct thread *thread;
tools/perf/util/session.c
2832
thread = machine__idle_thread(machine);
tools/perf/util/session.c
2833
if (!thread)
tools/perf/util/session.c
2835
thread__put(thread);
tools/perf/util/session.c
2846
struct thread *thread = machine__findnew_thread(machine, pid, tid);
tools/perf/util/session.c
2848
if (!thread)
tools/perf/util/session.c
2850
thread__set_guest_cpu(thread, guest_cpu);
tools/perf/util/session.c
2851
thread__put(thread);
tools/perf/util/session.c
2972
static int perf_session__e_machine_cb(struct thread *thread, void *_args)
tools/perf/util/session.c
2976
args->e_machine = thread__e_machine(thread, /*machine=*/NULL, &args->e_flags);
tools/perf/util/session.c
674
event->stat.thread = bswap_32(event->stat.thread);
tools/perf/util/session.h
140
struct thread *thread,
tools/perf/util/session.h
165
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid);
tools/perf/util/session.h
18
struct thread;
tools/perf/util/sideband_evlist.c
130
evlist->thread.done = 0;
tools/perf/util/sideband_evlist.c
131
if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
tools/perf/util/sideband_evlist.c
146
evlist->thread.done = 1;
tools/perf/util/sideband_evlist.c
147
pthread_join(evlist->thread.th, NULL);
tools/perf/util/sideband_evlist.c
52
if (evlist->thread.done)
tools/perf/util/sort.c
1019
struct cgroup *cgrp = cgroup__find(maps__machine(thread__maps(he->ms.thread))->env,
tools/perf/util/sort.c
113
return thread__tid(right->thread) - thread__tid(left->thread);
tools/perf/util/sort.c
119
const char *comm = thread__comm_str(he->thread);
tools/perf/util/sort.c
122
return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
tools/perf/util/sort.c
128
const struct thread *th = arg;
tools/perf/util/sort.c
133
return th && !RC_CHK_EQUAL(he->thread, th);
tools/perf/util/sort.c
149
return thread__pid(right->thread) - thread__pid(left->thread);
tools/perf/util/sort.c
155
int tgid = thread__pid(he->thread);
tools/perf/util/sort.c
159
if (thread__pid(he->thread) == thread__tid(he->thread)) {
tools/perf/util/sort.c
160
comm = thread__comm_str(he->thread);
tools/perf/util/sort.c
162
struct maps *maps = thread__maps(he->thread);
tools/perf/util/sort.c
163
struct thread *leader = machine__find_thread(maps__machine(maps),
tools/perf/util/sort.c
1765
if (thread__pid(left->thread) > thread__pid(right->thread))
tools/perf/util/sort.c
1767
if (thread__pid(left->thread) < thread__pid(right->thread))
tools/perf/util/sort.c
2832
MK_SORT_ENTRY_CHK(thread)
tools/perf/util/sort.c
3579
list->thread = 1;
tools/perf/util/stat.c
251
for (int thread = 0; thread < nthreads; thread++) {
tools/perf/util/stat.c
253
*perf_counts(evsel->counts, idx, thread) =
tools/perf/util/stat.c
254
*perf_counts(evsel->prev_raw_counts, idx, thread);
tools/perf/util/stat.c
387
int cpu_map_idx, int thread,
tools/perf/util/stat.c
403
evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
tools/perf/util/stat.c
407
struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
tools/perf/util/stat.c
416
ps->aggr[thread].nr++;
tools/perf/util/stat.c
466
int idx, thread;
tools/perf/util/stat.c
468
for (thread = 0; thread < nthreads; thread++) {
tools/perf/util/stat.c
470
if (process_counter_values(config, counter, idx, thread,
tools/perf/util/stat.c
471
perf_counts(counter->counts, idx, thread)))
tools/perf/util/stat.c
671
ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
tools/perf/util/stat.c
674
st->cpu, st->thread, evsel__name(counter));
tools/perf/util/stat.c
688
st->id, st->cpu, st->thread);
tools/perf/util/synthetic-events.c
1420
struct perf_cpu cpu, u32 thread, u64 id,
tools/perf/util/synthetic-events.c
1433
event.thread = thread;
tools/perf/util/synthetic-events.c
876
int err = -1, thread, j;
tools/perf/util/synthetic-events.c
897
for (thread = 0; thread < threads->nr; ++thread) {
tools/perf/util/synthetic-events.c
900
perf_thread_map__pid(threads, thread), 0,
tools/perf/util/synthetic-events.c
911
if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
tools/perf/util/synthetic-events.h
88
int perf_event__synthesize_stat(const struct perf_tool *tool, struct perf_cpu cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
tools/perf/util/thread-stack.c
1010
err = thread_stack__call_return(thread, ts, --ts->cnt,
tools/perf/util/thread-stack.c
1101
int thread_stack__process(struct thread *thread, struct comm *comm,
tools/perf/util/thread-stack.c
1107
struct thread_stack *ts = thread__stack(thread, sample->cpu);
tools/perf/util/thread-stack.c
1113
thread_stack__reset(thread, ts);
tools/perf/util/thread-stack.c
1118
ts = thread_stack__new(thread, sample->cpu, crp, true, 0);
tools/perf/util/thread-stack.c
1129
if (ts->comm != comm && thread__pid(thread) == thread__tid(thread)) {
tools/perf/util/thread-stack.c
113
static inline bool thread_stack__per_cpu(struct thread *thread)
tools/perf/util/thread-stack.c
1130
err = __thread_stack__flush(thread, ts);
tools/perf/util/thread-stack.c
115
return !(thread__tid(thread) || thread__pid(thread));
tools/perf/util/thread-stack.c
1185
return thread_stack__pop_ks(thread, ts, sample, ref);
tools/perf/util/thread-stack.c
1196
err = thread_stack__pop_cp(thread, ts, sample->addr,
tools/perf/util/thread-stack.c
1201
err = thread_stack__no_call_return(thread, ts, sample,
tools/perf/util/thread-stack.c
1205
err = thread_stack__trace_begin(thread, ts, sample->time, ref);
tools/perf/util/thread-stack.c
1232
size_t thread_stack__depth(struct thread *thread, int cpu)
tools/perf/util/thread-stack.c
1234
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
136
static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
tools/perf/util/thread-stack.c
158
if (thread__maps(thread) && maps__machine(thread__maps(thread))) {
tools/perf/util/thread-stack.c
159
struct machine *machine = maps__machine(thread__maps(thread));
tools/perf/util/thread-stack.c
160
uint16_t e_machine = thread__e_machine(thread, machine, /*e_flags=*/NULL);
tools/perf/util/thread-stack.c
173
static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
178
struct thread_stack *ts = thread__ts(thread), *new_ts;
tools/perf/util/thread-stack.c
182
if (thread_stack__per_cpu(thread) && cpu > 0)
tools/perf/util/thread-stack.c
192
free(thread__ts(thread));
tools/perf/util/thread-stack.c
193
thread__set_ts(thread, new_ts);
tools/perf/util/thread-stack.c
197
if (thread_stack__per_cpu(thread) && cpu > 0 &&
tools/perf/util/thread-stack.c
202
thread_stack__init(ts, thread, crp, callstack, br_stack_sz))
tools/perf/util/thread-stack.c
208
static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
tools/perf/util/thread-stack.c
210
struct thread_stack *ts = thread__ts(thread);
tools/perf/util/thread-stack.c
226
static inline struct thread_stack *thread__stack(struct thread *thread,
tools/perf/util/thread-stack.c
229
if (!thread)
tools/perf/util/thread-stack.c
232
if (thread_stack__per_cpu(thread))
tools/perf/util/thread-stack.c
233
return thread__cpu_stack(thread, cpu);
tools/perf/util/thread-stack.c
235
return thread__ts(thread);
tools/perf/util/thread-stack.c
298
static int thread_stack__call_return(struct thread *thread,
tools/perf/util/thread-stack.c
305
.thread = thread,
tools/perf/util/thread-stack.c
338
static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
tools/perf/util/thread-stack.c
352
err = thread_stack__call_return(thread, ts, --ts->cnt,
tools/perf/util/thread-stack.c
364
int thread_stack__flush(struct thread *thread)
tools/perf/util/thread-stack.c
366
struct thread_stack *ts = thread__ts(thread);
tools/perf/util/thread-stack.c
372
int ret = __thread_stack__flush(thread, ts + pos);
tools/perf/util/thread-stack.c
406
int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
tools/perf/util/thread-stack.c
410
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
412
if (!thread)
tools/perf/util/thread-stack.c
416
ts = thread_stack__new(thread, cpu, NULL, callstack, br_stack_sz);
tools/perf/util/thread-stack.c
432
__thread_stack__flush(thread, ts);
tools/perf/util/thread-stack.c
473
void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
tools/perf/util/thread-stack.c
475
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
482
__thread_stack__flush(thread, ts);
tools/perf/util/thread-stack.c
487
static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
tools/perf/util/thread-stack.c
489
__thread_stack__flush(thread, ts);
tools/perf/util/thread-stack.c
494
static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
tools/perf/util/thread-stack.c
498
__thread_stack__free(thread, ts);
tools/perf/util/thread-stack.c
503
void thread_stack__free(struct thread *thread)
tools/perf/util/thread-stack.c
505
struct thread_stack *ts = thread__ts(thread);
tools/perf/util/thread-stack.c
510
__thread_stack__free(thread, ts + pos);
tools/perf/util/thread-stack.c
511
free(thread__ts(thread));
tools/perf/util/thread-stack.c
512
thread__set_ts(thread, NULL);
tools/perf/util/thread-stack.c
521
void thread_stack__sample(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
525
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
564
void thread_stack__sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
568
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
617
void thread_stack__br_sample(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
620
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
678
void thread_stack__br_sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.c
682
struct thread_stack *ts = thread__stack(thread, cpu);
tools/perf/util/thread-stack.c
814
static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
tools/perf/util/thread-stack.c
827
return thread_stack__call_return(thread, ts, --ts->cnt,
tools/perf/util/thread-stack.c
833
return thread_stack__call_return(thread, ts, --ts->cnt,
tools/perf/util/thread-stack.c
844
err = thread_stack__call_return(thread, ts,
tools/perf/util/thread-stack.c
851
return thread_stack__call_return(thread, ts, --ts->cnt,
tools/perf/util/thread-stack.c
886
static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
tools/perf/util/thread-stack.c
894
err = thread_stack__call_return(thread, ts, --ts->cnt,
tools/perf/util/thread-stack.c
903
static int thread_stack__no_call_return(struct thread *thread,
tools/perf/util/thread-stack.c
922
err = thread_stack__pop_ks(thread, ts, sample, ref);
tools/perf/util/thread-stack.c
934
err = thread_stack__pop_ks(thread, ts, sample, ref);
tools/perf/util/thread-stack.c
951
err = thread_stack__call_return(thread, ts, --ts->cnt,
tools/perf/util/thread-stack.c
994
return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
tools/perf/util/thread-stack.c
997
static int thread_stack__trace_begin(struct thread *thread,
tools/perf/util/thread-stack.h
104
int thread_stack__process(struct thread *thread, struct comm *comm,
tools/perf/util/thread-stack.h
14
struct thread;
tools/perf/util/thread-stack.h
54
struct thread *thread;
tools/perf/util/thread-stack.h
82
int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
tools/perf/util/thread-stack.h
85
void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
tools/perf/util/thread-stack.h
86
void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
tools/perf/util/thread-stack.h
88
void thread_stack__sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.h
91
void thread_stack__br_sample(struct thread *thread, int cpu,
tools/perf/util/thread-stack.h
93
void thread_stack__br_sample_late(struct thread *thread, int cpu,
tools/perf/util/thread-stack.h
96
int thread_stack__flush(struct thread *thread);
tools/perf/util/thread-stack.h
97
void thread_stack__free(struct thread *thread);
tools/perf/util/thread-stack.h
98
size_t thread_stack__depth(struct thread *thread, int cpu);
tools/perf/util/thread.c
100
if (thread__maps(thread)) {
tools/perf/util/thread.c
101
maps__put(thread__maps(thread));
tools/perf/util/thread.c
102
thread__set_maps(thread, NULL);
tools/perf/util/thread.c
104
down_write(thread__namespaces_lock(thread));
tools/perf/util/thread.c
106
thread__namespaces_list(thread), list) {
tools/perf/util/thread.c
110
up_write(thread__namespaces_lock(thread));
tools/perf/util/thread.c
112
down_write(thread__comm_lock(thread));
tools/perf/util/thread.c
113
list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
tools/perf/util/thread.c
117
up_write(thread__comm_lock(thread));
tools/perf/util/thread.c
119
nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
tools/perf/util/thread.c
120
srccode_state_free(thread__srccode_state(thread));
tools/perf/util/thread.c
122
exit_rwsem(thread__namespaces_lock(thread));
tools/perf/util/thread.c
123
exit_rwsem(thread__comm_lock(thread));
tools/perf/util/thread.c
124
thread__free_stitch_list(thread);
tools/perf/util/thread.c
127
thread__priv_destructor(thread__priv(thread));
tools/perf/util/thread.c
129
RC_CHK_FREE(thread);
tools/perf/util/thread.c
132
struct thread *thread__get(struct thread *thread)
tools/perf/util/thread.c
134
struct thread *result;
tools/perf/util/thread.c
136
if (RC_CHK_GET(result, thread))
tools/perf/util/thread.c
137
refcount_inc(thread__refcnt(thread));
tools/perf/util/thread.c
142
void thread__put(struct thread *thread)
tools/perf/util/thread.c
144
if (thread && refcount_dec_and_test(thread__refcnt(thread)))
tools/perf/util/thread.c
145
thread__delete(thread);
tools/perf/util/thread.c
147
RC_CHK_PUT(thread);
tools/perf/util/thread.c
150
static struct namespaces *__thread__namespaces(struct thread *thread)
tools/perf/util/thread.c
152
if (list_empty(thread__namespaces_list(thread)))
tools/perf/util/thread.c
155
return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
tools/perf/util/thread.c
158
struct namespaces *thread__namespaces(struct thread *thread)
tools/perf/util/thread.c
162
down_read(thread__namespaces_lock(thread));
tools/perf/util/thread.c
163
ns = __thread__namespaces(thread);
tools/perf/util/thread.c
164
up_read(thread__namespaces_lock(thread));
tools/perf/util/thread.c
169
static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
tools/perf/util/thread.c
172
struct namespaces *new, *curr = __thread__namespaces(thread);
tools/perf/util/thread.c
178
list_add(&new->list, thread__namespaces_list(thread));
tools/perf/util/thread.c
193
int thread__set_namespaces(struct thread *thread, u64 timestamp,
tools/perf/util/thread.c
198
down_write(thread__namespaces_lock(thread));
tools/perf/util/thread.c
199
ret = __thread__set_namespaces(thread, timestamp, event);
tools/perf/util/thread.c
200
up_write(thread__namespaces_lock(thread));
tools/perf/util/thread.c
204
static struct comm *__thread__comm(struct thread *thread)
tools/perf/util/thread.c
205
SHARED_LOCKS_REQUIRED(thread__comm_lock(thread))
tools/perf/util/thread.c
207
if (list_empty(thread__comm_list(thread)))
tools/perf/util/thread.c
210
return list_first_entry(thread__comm_list(thread), struct comm, list);
tools/perf/util/thread.c
213
struct comm *thread__comm(struct thread *thread)
tools/perf/util/thread.c
217
down_read(thread__comm_lock(thread));
tools/perf/util/thread.c
218
res = __thread__comm(thread);
tools/perf/util/thread.c
219
up_read(thread__comm_lock(thread));
tools/perf/util/thread.c
223
struct comm *thread__exec_comm(struct thread *thread)
tools/perf/util/thread.c
227
down_read(thread__comm_lock(thread));
tools/perf/util/thread.c
228
list_for_each_entry(comm, thread__comm_list(thread), list) {
tools/perf/util/thread.c
230
up_read(thread__comm_lock(thread));
tools/perf/util/thread.c
236
up_read(thread__comm_lock(thread));
tools/perf/util/thread.c
244
if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
tools/perf/util/thread.c
25
int thread__init_maps(struct thread *thread, struct machine *machine)
tools/perf/util/thread.c
250
static int ____thread__set_comm(struct thread *thread, const char *str,
tools/perf/util/thread.c
252
EXCLUSIVE_LOCKS_REQUIRED(thread__comm_lock(thread))
tools/perf/util/thread.c
254
struct comm *new, *curr = __thread__comm(thread);
tools/perf/util/thread.c
257
if (!thread__comm_set(thread)) {
tools/perf/util/thread.c
265
list_add(&new->list, thread__comm_list(thread));
tools/perf/util/thread.c
268
unwind__flush_access(thread__maps(thread));
tools/perf/util/thread.c
27
pid_t pid = thread__pid(thread);
tools/perf/util/thread.c
271
thread__set_comm_set(thread, true);
tools/perf/util/thread.c
276
int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
tools/perf/util/thread.c
281
down_write(thread__comm_lock(thread));
tools/perf/util/thread.c
282
ret = ____thread__set_comm(thread, str, timestamp, exec);
tools/perf/util/thread.c
283
up_write(thread__comm_lock(thread));
tools/perf/util/thread.c
287
int thread__set_comm_from_proc(struct thread *thread)
tools/perf/util/thread.c
29
if (pid == thread__tid(thread) || pid == -1) {
tools/perf/util/thread.c
295
thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
tools/perf/util/thread.c
298
err = thread__set_comm(thread, comm, 0);
tools/perf/util/thread.c
30
thread__set_maps(thread, maps__new(machine));
tools/perf/util/thread.c
304
static const char *__thread__comm_str(struct thread *thread)
tools/perf/util/thread.c
305
SHARED_LOCKS_REQUIRED(thread__comm_lock(thread))
tools/perf/util/thread.c
307
const struct comm *comm = __thread__comm(thread);
tools/perf/util/thread.c
315
const char *thread__comm_str(struct thread *thread)
tools/perf/util/thread.c
319
down_read(thread__comm_lock(thread));
tools/perf/util/thread.c
32
struct thread *leader = machine__findnew_thread(machine, pid, pid);
tools/perf/util/thread.c
320
str = __thread__comm_str(thread);
tools/perf/util/thread.c
321
up_read(thread__comm_lock(thread));
tools/perf/util/thread.c
326
static int __thread__comm_len(struct thread *thread, const char *comm)
tools/perf/util/thread.c
330
thread__set_comm_len(thread, strlen(comm));
tools/perf/util/thread.c
332
return thread__var_comm_len(thread);
tools/perf/util/thread.c
336
int thread__comm_len(struct thread *thread)
tools/perf/util/thread.c
338
int comm_len = thread__var_comm_len(thread);
tools/perf/util/thread.c
343
down_read(thread__comm_lock(thread));
tools/perf/util/thread.c
344
comm = __thread__comm_str(thread);
tools/perf/util/thread.c
345
comm_len = __thread__comm_len(thread, comm);
tools/perf/util/thread.c
346
up_read(thread__comm_lock(thread));
tools/perf/util/thread.c
35
thread__set_maps(thread, maps__get(thread__maps(leader)));
tools/perf/util/thread.c
352
size_t thread__fprintf(struct thread *thread, FILE *fp)
tools/perf/util/thread.c
354
return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
tools/perf/util/thread.c
355
maps__fprintf(thread__maps(thread), fp);
tools/perf/util/thread.c
358
int thread__insert_map(struct thread *thread, struct map *map)
tools/perf/util/thread.c
362
ret = unwind__prepare_access(thread__maps(thread), map, NULL);
tools/perf/util/thread.c
366
return maps__fixup_overlap_and_insert(thread__maps(thread), map);
tools/perf/util/thread.c
384
static int thread__prepare_access(struct thread *thread)
tools/perf/util/thread.c
391
args.maps = thread__maps(thread);
tools/perf/util/thread.c
392
maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args);
tools/perf/util/thread.c
398
static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
tools/perf/util/thread.c
40
return thread__maps(thread) ? 0 : -1;
tools/perf/util/thread.c
401
if (thread__pid(thread) == thread__pid(parent))
tools/perf/util/thread.c
402
return thread__prepare_access(thread);
tools/perf/util/thread.c
404
if (maps__equal(thread__maps(thread), thread__maps(parent))) {
tools/perf/util/thread.c
406
thread__pid(thread), thread__tid(thread),
tools/perf/util/thread.c
411
return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0;
tools/perf/util/thread.c
414
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
tools/perf/util/thread.c
421
err = thread__set_comm(thread, comm, timestamp);
tools/perf/util/thread.c
426
thread__set_ppid(thread, thread__tid(parent));
tools/perf/util/thread.c
427
return thread__clone_maps(thread, parent, do_maps_clone);
tools/perf/util/thread.c
43
struct thread *thread__new(pid_t pid, pid_t tid)
tools/perf/util/thread.c
430
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
tools/perf/util/thread.c
443
thread__find_symbol(thread, cpumodes[i], addr, al);
tools/perf/util/thread.c
445
thread__find_map(thread, cpumodes[i], addr, al);
tools/perf/util/thread.c
46
RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
tools/perf/util/thread.c
47
struct thread *thread;
tools/perf/util/thread.c
485
uint16_t thread__e_machine(struct thread *thread, struct machine *machine, uint32_t *e_flags)
tools/perf/util/thread.c
488
uint16_t e_machine = RC_CHK_ACCESS(thread)->e_machine;
tools/perf/util/thread.c
49
if (ADD_RC_CHK(thread, _thread) != NULL) {
tools/perf/util/thread.c
498
*e_flags = thread__e_flags(thread);
tools/perf/util/thread.c
503
struct maps *maps = thread__maps(thread);
tools/perf/util/thread.c
507
tid = thread__tid(thread);
tools/perf/util/thread.c
508
pid = thread__pid(thread);
tools/perf/util/thread.c
510
struct thread *parent = machine__findnew_thread(machine, pid, pid);
tools/perf/util/thread.c
520
maps__for_each_map(thread__maps(thread), thread__e_machine_callback, &args);
tools/perf/util/thread.c
53
thread__set_pid(thread, pid);
tools/perf/util/thread.c
54
thread__set_tid(thread, tid);
tools/perf/util/thread.c
543
thread__set_e_machine(thread, e_machine);
tools/perf/util/thread.c
544
thread__set_e_flags(thread, local_e_flags);
tools/perf/util/thread.c
55
thread__set_ppid(thread, -1);
tools/perf/util/thread.c
554
struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
tools/perf/util/thread.c
556
if (thread__pid(thread) == thread__tid(thread))
tools/perf/util/thread.c
557
return thread__get(thread);
tools/perf/util/thread.c
559
if (thread__pid(thread) == -1)
tools/perf/util/thread.c
56
thread__set_cpu(thread, -1);
tools/perf/util/thread.c
562
return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
tools/perf/util/thread.c
565
int thread__memcpy(struct thread *thread, struct machine *machine,
tools/perf/util/thread.c
57
thread__set_guest_cpu(thread, -1);
tools/perf/util/thread.c
577
if (!thread__find_map(thread, cpumode, ip, &al)) {
tools/perf/util/thread.c
58
thread__set_e_machine(thread, EM_NONE);
tools/perf/util/thread.c
59
thread__set_lbr_stitch_enable(thread, false);
tools/perf/util/thread.c
598
void thread__free_stitch_list(struct thread *thread)
tools/perf/util/thread.c
60
INIT_LIST_HEAD(thread__namespaces_list(thread));
tools/perf/util/thread.c
600
struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
tools/perf/util/thread.c
61
INIT_LIST_HEAD(thread__comm_list(thread));
tools/perf/util/thread.c
62
init_rwsem(thread__namespaces_lock(thread));
tools/perf/util/thread.c
621
free(thread__lbr_stitch(thread));
tools/perf/util/thread.c
622
thread__set_lbr_stitch(thread, NULL);
tools/perf/util/thread.c
63
init_rwsem(thread__comm_lock(thread));
tools/perf/util/thread.c
70
list_add(&comm->list, thread__comm_list(thread));
tools/perf/util/thread.c
71
refcount_set(thread__refcnt(thread), 1);
tools/perf/util/thread.c
73
RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
tools/perf/util/thread.c
74
srccode_state_init(thread__srccode_state(thread));
tools/perf/util/thread.c
77
return thread;
tools/perf/util/thread.c
80
thread__delete(thread);
tools/perf/util/thread.c
93
void thread__delete(struct thread *thread)
tools/perf/util/thread.c
98
thread_stack__free(thread);
tools/perf/util/thread.h
102
int __thread__set_comm(struct thread *thread, const char *comm, u64 timestamp,
tools/perf/util/thread.h
104
static inline int thread__set_comm(struct thread *thread, const char *comm,
tools/perf/util/thread.h
107
return __thread__set_comm(thread, comm, timestamp, false);
tools/perf/util/thread.h
110
int thread__set_comm_from_proc(struct thread *thread);
tools/perf/util/thread.h
112
int thread__comm_len(struct thread *thread);
tools/perf/util/thread.h
113
struct comm *thread__comm(struct thread *thread);
tools/perf/util/thread.h
114
struct comm *thread__exec_comm(struct thread *thread);
tools/perf/util/thread.h
115
const char *thread__comm_str(struct thread *thread);
tools/perf/util/thread.h
116
int thread__insert_map(struct thread *thread, struct map *map);
tools/perf/util/thread.h
117
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone);
tools/perf/util/thread.h
118
size_t thread__fprintf(struct thread *thread, FILE *fp);
tools/perf/util/thread.h
120
struct thread *thread__main_thread(struct machine *machine, struct thread *thread);
tools/perf/util/thread.h
122
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/thread.h
124
struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
tools/perf/util/thread.h
127
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
tools/perf/util/thread.h
129
struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
tools/perf/util/thread.h
132
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
tools/perf/util/thread.h
135
int thread__memcpy(struct thread *thread, struct machine *machine,
tools/perf/util/thread.h
138
static inline struct maps *thread__maps(struct thread *thread)
tools/perf/util/thread.h
140
return RC_CHK_ACCESS(thread)->maps;
tools/perf/util/thread.h
143
static inline void thread__set_maps(struct thread *thread, struct maps *maps)
tools/perf/util/thread.h
145
RC_CHK_ACCESS(thread)->maps = maps;
tools/perf/util/thread.h
148
static inline pid_t thread__pid(const struct thread *thread)
tools/perf/util/thread.h
150
return RC_CHK_ACCESS(thread)->pid_;
tools/perf/util/thread.h
153
static inline void thread__set_pid(struct thread *thread, pid_t pid_)
tools/perf/util/thread.h
155
RC_CHK_ACCESS(thread)->pid_ = pid_;
tools/perf/util/thread.h
158
static inline pid_t thread__tid(const struct thread *thread)
tools/perf/util/thread.h
160
return RC_CHK_ACCESS(thread)->tid;
tools/perf/util/thread.h
163
static inline void thread__set_tid(struct thread *thread, pid_t tid)
tools/perf/util/thread.h
165
RC_CHK_ACCESS(thread)->tid = tid;
tools/perf/util/thread.h
168
static inline pid_t thread__ppid(const struct thread *thread)
tools/perf/util/thread.h
170
return RC_CHK_ACCESS(thread)->ppid;
tools/perf/util/thread.h
173
static inline void thread__set_ppid(struct thread *thread, pid_t ppid)
tools/perf/util/thread.h
175
RC_CHK_ACCESS(thread)->ppid = ppid;
tools/perf/util/thread.h
178
static inline int thread__cpu(const struct thread *thread)
tools/perf/util/thread.h
180
return RC_CHK_ACCESS(thread)->cpu;
tools/perf/util/thread.h
183
static inline void thread__set_cpu(struct thread *thread, int cpu)
tools/perf/util/thread.h
185
RC_CHK_ACCESS(thread)->cpu = cpu;
tools/perf/util/thread.h
188
static inline int thread__guest_cpu(const struct thread *thread)
tools/perf/util/thread.h
190
return RC_CHK_ACCESS(thread)->guest_cpu;
tools/perf/util/thread.h
193
static inline void thread__set_guest_cpu(struct thread *thread, int guest_cpu)
tools/perf/util/thread.h
195
RC_CHK_ACCESS(thread)->guest_cpu = guest_cpu;
tools/perf/util/thread.h
198
static inline refcount_t *thread__refcnt(struct thread *thread)
tools/perf/util/thread.h
200
return &RC_CHK_ACCESS(thread)->refcnt;
tools/perf/util/thread.h
203
static inline void thread__set_exited(struct thread *thread, bool exited)
tools/perf/util/thread.h
205
RC_CHK_ACCESS(thread)->exited = exited;
tools/perf/util/thread.h
208
static inline bool thread__comm_set(const struct thread *thread)
tools/perf/util/thread.h
210
return RC_CHK_ACCESS(thread)->comm_set;
tools/perf/util/thread.h
213
static inline void thread__set_comm_set(struct thread *thread, bool set)
tools/perf/util/thread.h
215
RC_CHK_ACCESS(thread)->comm_set = set;
tools/perf/util/thread.h
218
static inline int thread__var_comm_len(const struct thread *thread)
tools/perf/util/thread.h
220
return RC_CHK_ACCESS(thread)->comm_len;
tools/perf/util/thread.h
223
static inline void thread__set_comm_len(struct thread *thread, int len)
tools/perf/util/thread.h
225
RC_CHK_ACCESS(thread)->comm_len = len;
tools/perf/util/thread.h
228
static inline struct list_head *thread__namespaces_list(struct thread *thread)
tools/perf/util/thread.h
230
return &RC_CHK_ACCESS(thread)->namespaces_list;
tools/perf/util/thread.h
233
static inline int thread__namespaces_list_empty(const struct thread *thread)
tools/perf/util/thread.h
235
return list_empty(&RC_CHK_ACCESS(thread)->namespaces_list);
tools/perf/util/thread.h
238
static inline struct rw_semaphore *thread__namespaces_lock(struct thread *thread)
tools/perf/util/thread.h
240
return &RC_CHK_ACCESS(thread)->namespaces_lock;
tools/perf/util/thread.h
243
static inline struct rw_semaphore *thread__comm_lock(struct thread *thread)
tools/perf/util/thread.h
245
return &RC_CHK_ACCESS(thread)->comm_lock;
tools/perf/util/thread.h
248
static inline struct list_head *thread__comm_list(struct thread *thread)
tools/perf/util/thread.h
249
SHARED_LOCKS_REQUIRED(thread__comm_lock(thread))
tools/perf/util/thread.h
251
return &RC_CHK_ACCESS(thread)->comm_list;
tools/perf/util/thread.h
254
static inline u64 thread__db_id(const struct thread *thread)
tools/perf/util/thread.h
256
return RC_CHK_ACCESS(thread)->db_id;
tools/perf/util/thread.h
259
static inline void thread__set_db_id(struct thread *thread, u64 db_id)
tools/perf/util/thread.h
261
RC_CHK_ACCESS(thread)->db_id = db_id;
tools/perf/util/thread.h
264
static inline void *thread__priv(struct thread *thread)
tools/perf/util/thread.h
266
return RC_CHK_ACCESS(thread)->priv;
tools/perf/util/thread.h
269
static inline void thread__set_priv(struct thread *thread, void *p)
tools/perf/util/thread.h
271
RC_CHK_ACCESS(thread)->priv = p;
tools/perf/util/thread.h
274
static inline struct thread_stack *thread__ts(struct thread *thread)
tools/perf/util/thread.h
276
return RC_CHK_ACCESS(thread)->ts;
tools/perf/util/thread.h
279
static inline void thread__set_ts(struct thread *thread, struct thread_stack *ts)
tools/perf/util/thread.h
281
RC_CHK_ACCESS(thread)->ts = ts;
tools/perf/util/thread.h
284
static inline struct nsinfo *thread__nsinfo(struct thread *thread)
tools/perf/util/thread.h
286
return RC_CHK_ACCESS(thread)->nsinfo;
tools/perf/util/thread.h
289
static inline struct srccode_state *thread__srccode_state(struct thread *thread)
tools/perf/util/thread.h
291
return &RC_CHK_ACCESS(thread)->srccode_state;
tools/perf/util/thread.h
294
static inline bool thread__filter(const struct thread *thread)
tools/perf/util/thread.h
296
return RC_CHK_ACCESS(thread)->filter;
tools/perf/util/thread.h
299
static inline void thread__set_filter(struct thread *thread, bool filter)
tools/perf/util/thread.h
301
RC_CHK_ACCESS(thread)->filter = filter;
tools/perf/util/thread.h
304
static inline int thread__filter_entry_depth(const struct thread *thread)
tools/perf/util/thread.h
306
return RC_CHK_ACCESS(thread)->filter_entry_depth;
tools/perf/util/thread.h
309
static inline void thread__set_filter_entry_depth(struct thread *thread, int depth)
tools/perf/util/thread.h
311
RC_CHK_ACCESS(thread)->filter_entry_depth = depth;
tools/perf/util/thread.h
314
uint16_t thread__e_machine(struct thread *thread, struct machine *machine, uint32_t *e_flags);
tools/perf/util/thread.h
316
static inline void thread__set_e_machine(struct thread *thread, uint16_t e_machine)
tools/perf/util/thread.h
318
RC_CHK_ACCESS(thread)->e_machine = e_machine;
tools/perf/util/thread.h
32
DECLARE_RC_STRUCT(thread) {
tools/perf/util/thread.h
321
static inline uint32_t thread__e_flags(const struct thread *thread)
tools/perf/util/thread.h
323
return RC_CHK_ACCESS(thread)->e_flags;
tools/perf/util/thread.h
326
static inline void thread__set_e_flags(struct thread *thread, uint32_t e_flags)
tools/perf/util/thread.h
328
RC_CHK_ACCESS(thread)->e_flags = e_flags;
tools/perf/util/thread.h
332
static inline bool thread__lbr_stitch_enable(const struct thread *thread)
tools/perf/util/thread.h
334
return RC_CHK_ACCESS(thread)->lbr_stitch_enable;
tools/perf/util/thread.h
337
static inline void thread__set_lbr_stitch_enable(struct thread *thread, bool en)
tools/perf/util/thread.h
339
RC_CHK_ACCESS(thread)->lbr_stitch_enable = en;
tools/perf/util/thread.h
342
static inline struct lbr_stitch *thread__lbr_stitch(struct thread *thread)
tools/perf/util/thread.h
344
return RC_CHK_ACCESS(thread)->lbr_stitch;
tools/perf/util/thread.h
347
static inline void thread__set_lbr_stitch(struct thread *thread, struct lbr_stitch *lbrs)
tools/perf/util/thread.h
349
RC_CHK_ACCESS(thread)->lbr_stitch = lbrs;
tools/perf/util/thread.h
352
static inline bool thread__is_filtered(struct thread *thread)
tools/perf/util/thread.h
355
!strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread))) {
tools/perf/util/thread.h
360
!intlist__has_entry(symbol_conf.pid_list, thread__pid(thread))) {
tools/perf/util/thread.h
365
!intlist__has_entry(symbol_conf.tid_list, thread__tid(thread))) {
tools/perf/util/thread.h
372
void thread__free_stitch_list(struct thread *thread);
tools/perf/util/thread.h
374
void thread__resolve(struct thread *thread, struct addr_location *al,
tools/perf/util/thread.h
81
struct thread *thread__new(pid_t pid, pid_t tid);
tools/perf/util/thread.h
82
int thread__init_maps(struct thread *thread, struct machine *machine);
tools/perf/util/thread.h
83
void thread__delete(struct thread *thread);
tools/perf/util/thread.h
87
struct thread *thread__get(struct thread *thread);
tools/perf/util/thread.h
88
void thread__put(struct thread *thread);
tools/perf/util/thread.h
90
static inline void __thread__zput(struct thread **thread)
tools/perf/util/thread.h
92
thread__put(*thread);
tools/perf/util/thread.h
93
*thread = NULL;
tools/perf/util/thread.h
96
#define thread__zput(thread) __thread__zput(&thread)
tools/perf/util/thread.h
98
struct namespaces *thread__namespaces(struct thread *thread);
tools/perf/util/thread.h
99
int thread__set_namespaces(struct thread *thread, u64 timestamp,
tools/perf/util/threads.c
109
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
tools/perf/util/threads.c
112
struct thread *res = NULL;
tools/perf/util/threads.c
145
struct thread *old_value;
tools/perf/util/threads.c
154
void threads__remove(struct threads *threads, struct thread *thread)
tools/perf/util/threads.c
156
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
tools/perf/util/threads.c
157
struct thread *old_value;
tools/perf/util/threads.c
160
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
tools/perf/util/threads.c
163
hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value);
tools/perf/util/threads.c
169
int (*fn)(struct thread *thread, void *data),
tools/perf/util/threads.c
179
int rc = fn((struct thread *)cur->pvalue, data);
tools/perf/util/threads.c
64
static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table,
tools/perf/util/threads.c
67
struct thread *th, *res = NULL;
tools/perf/util/threads.c
78
struct thread *th)
tools/perf/util/threads.c
85
struct thread *th)
tools/perf/util/threads.c
92
struct thread *threads__find(struct threads *threads, pid_t tid)
tools/perf/util/threads.c
95
struct thread *res;
tools/perf/util/threads.h
17
struct thread *last_match;
tools/perf/util/threads.h
27
struct thread *threads__find(struct threads *threads, pid_t tid);
tools/perf/util/threads.h
28
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created);
tools/perf/util/threads.h
30
void threads__remove(struct threads *threads, struct thread *thread);
tools/perf/util/threads.h
32
int (*fn)(struct thread *thread, void *data),
tools/perf/util/threads.h
8
struct thread;
tools/perf/util/tool_pmu.c
227
int pid = -1, idx = 0, thread = 0, nthreads, err = 0, old_errno;
tools/perf/util/tool_pmu.c
244
for (thread = 0; thread < nthreads; thread++) {
tools/perf/util/tool_pmu.c
246
pid = perf_thread_map__pid(threads, thread);
tools/perf/util/tool_pmu.c
267
FD(evsel, idx, thread) = fd;
tools/perf/util/tool_pmu.c
272
start_time = xyarray__entry(evsel->start_times, idx, thread);
tools/perf/util/tool_pmu.c
292
threads->err_thread = thread;
tools/perf/util/tool_pmu.c
296
while (--thread >= 0) {
tools/perf/util/tool_pmu.c
297
if (FD(evsel, idx, thread) >= 0)
tools/perf/util/tool_pmu.c
298
close(FD(evsel, idx, thread));
tools/perf/util/tool_pmu.c
299
FD(evsel, idx, thread) = -1;
tools/perf/util/tool_pmu.c
301
thread = nthreads;
tools/perf/util/tool_pmu.c
471
int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
tools/perf/util/tool_pmu.c
479
count = perf_counts(evsel->counts, cpu_map_idx, thread);
tools/perf/util/tool_pmu.c
481
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
tools/perf/util/tool_pmu.c
497
if (cpu_map_idx == 0 && thread == 0) {
tools/perf/util/tool_pmu.c
516
if (cpu_map_idx == 0 && thread == 0)
tools/perf/util/tool_pmu.c
524
int fd = FD(evsel, cpu_map_idx, thread);
tools/perf/util/tool_pmu.c
526
start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
tools/perf/util/tool_pmu.c
536
if (thread == 0) {
tools/perf/util/tool_pmu.h
59
int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread);
tools/perf/util/trace-event.h
15
struct thread;
tools/perf/util/unwind-libdw.c
164
e->ms.thread = thread__get(al.thread);
tools/perf/util/unwind-libdw.c
194
if (!thread__find_map(ui->thread, PERF_RECORD_MISC_USER, addr, &al)) {
tools/perf/util/unwind-libdw.c
253
static bool libdw_set_initial_registers(Dwfl_Thread *thread, void *arg)
tools/perf/util/unwind-libdw.c
300
dwfl_thread_state_register_pc(thread, val);
tools/perf/util/unwind-libdw.c
302
ret = dwfl_thread_state_registers(thread, 0, max_dwarf_reg + 1, dwarf_regs);
tools/perf/util/unwind-libdw.c
343
struct thread *thread,
tools/perf/util/unwind-libdw.c
348
struct maps *maps = thread__maps(thread);
tools/perf/util/unwind-libdw.c
351
uint16_t e_machine = thread__e_machine(thread, machine, &e_flags);
tools/perf/util/unwind-libdw.c
367
.thread = thread,
tools/perf/util/unwind-libdw.c
403
dwfl_attach_state(dwfl, /*elf=*/NULL, thread__tid(thread), &callbacks,
tools/perf/util/unwind-libdw.c
407
err = dwfl_getthread_frames(dwfl, thread__tid(thread), frame_callback,
tools/perf/util/unwind-libdw.c
79
thread__find_symbol(ui->thread, PERF_RECORD_MISC_USER, ip, al);
tools/perf/util/unwind-libdw.h
10
struct thread;
tools/perf/util/unwind-libdw.h
18
struct thread *thread;
tools/perf/util/unwind-libunwind-local.c
346
maps__for_each_map(thread__maps(ui->thread), read_unwind_spec_eh_frame_maps_cb, &args);
tools/perf/util/unwind-libunwind-local.c
439
thread__find_map(ui->thread, PERF_RECORD_MISC_USER, ip, &al);
tools/perf/util/unwind-libunwind-local.c
587
perf_arch_reg_sp(thread__e_machine(ui->thread,
tools/perf/util/unwind-libunwind-local.c
660
static int entry(u64 ip, struct thread *thread,
tools/perf/util/unwind-libunwind-local.c
668
e.ms.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
tools/perf/util/unwind-libunwind-local.c
671
e.ms.thread = thread__get(al.thread);
tools/perf/util/unwind-libunwind-local.c
738
uint16_t e_machine = thread__e_machine(ui->thread, ui->machine, /*e_flags=*/NULL);
tools/perf/util/unwind-libunwind-local.c
757
WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
tools/perf/util/unwind-libunwind-local.c
758
addr_space = maps__addr_space(thread__maps(ui->thread));
tools/perf/util/unwind-libunwind-local.c
794
ret = ips[j] ? entry(ips[j], ui->thread, cb, arg) : 0;
tools/perf/util/unwind-libunwind-local.c
801
struct thread *thread,
tools/perf/util/unwind-libunwind-local.c
807
.thread = thread,
tools/perf/util/unwind-libunwind-local.c
808
.machine = maps__machine(thread__maps(thread)),
tools/perf/util/unwind-libunwind-local.c
98
struct thread *thread;
tools/perf/util/unwind-libunwind.c
83
struct thread *thread,
tools/perf/util/unwind-libunwind.c
87
const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(thread__maps(thread));
tools/perf/util/unwind-libunwind.c
90
return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
tools/perf/util/unwind.h
11
struct thread;
tools/perf/util/unwind.h
25
struct thread *thread,
tools/perf/util/unwind.h
36
struct thread *thread,
tools/perf/util/unwind.h
64
struct thread *thread __maybe_unused,
tools/perf/util/vdso.c
159
struct thread *thread)
tools/perf/util/vdso.c
166
maps__for_each_map(thread__maps(thread), machine__thread_dso_type_maps_cb, &args);
tools/perf/util/vdso.c
265
struct thread *thread,
tools/perf/util/vdso.c
271
dso_type = machine__thread_dso_type(machine, thread);
tools/perf/util/vdso.c
299
struct thread *thread)
tools/perf/util/vdso.c
304
dso_type = machine__thread_dso_type(machine, thread);
tools/perf/util/vdso.c
331
struct thread *thread)
tools/perf/util/vdso.c
344
dso = machine__find_vdso(machine, thread);
tools/perf/util/vdso.c
349
if (__machine__findnew_vdso_compat(machine, thread, vdso_info, &dso))
tools/perf/util/vdso.h
25
struct thread;
tools/perf/util/vdso.h
27
struct dso *machine__findnew_vdso(struct machine *machine, struct thread *thread);
tools/power/acpi/os_specific/service_layers/osunixxf.c
1248
pthread_t thread;
tools/power/acpi/os_specific/service_layers/osunixxf.c
1250
thread = pthread_self();
tools/power/acpi/os_specific/service_layers/osunixxf.c
1251
return (ACPI_CAST_PTHREAD_T(thread));
tools/power/acpi/os_specific/service_layers/osunixxf.c
1272
pthread_t thread;
tools/power/acpi/os_specific/service_layers/osunixxf.c
1276
pthread_create(&thread, NULL, (PTHREAD_CALLBACK) function, context);
tools/testing/selftests/alsa/pcm-test.c
29
pthread_t thread;
tools/testing/selftests/alsa/pcm-test.c
648
ret = pthread_create(&card->thread, NULL, card_thread, card);
tools/testing/selftests/alsa/pcm-test.c
657
ret = pthread_join(card->thread, &thread_ret);
tools/testing/selftests/arm64/gcs/libc-gcs.c
101
ret = pthread_join(thread, &thread_ret);
tools/testing/selftests/arm64/gcs/libc-gcs.c
92
pthread_t thread;
tools/testing/selftests/arm64/gcs/libc-gcs.c
96
ret = pthread_create(&thread, NULL, gcs_test_thread, NULL);
tools/testing/selftests/bpf/bench.c
456
static void set_thread_affinity(pthread_t thread, int cpu)
tools/testing/selftests/bpf/bench.c
463
err = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
117
struct thread *t = &threads[i];
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
148
struct thread *t = &threads[(long)(input)];
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
175
struct thread *t = &threads[(long)(input)];
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
19
static struct thread *threads;
tools/testing/selftests/bpf/network_helpers.c
1219
r = pthread_create(&ctx->thread, NULL, traffic_monitor_thread, ctx);
tools/testing/selftests/bpf/network_helpers.c
1273
pthread_join(ctx->thread, NULL);
tools/testing/selftests/bpf/network_helpers.c
822
pthread_t thread;
tools/testing/selftests/bpf/prog_tests/recursive_attach.c
176
pthread_t thread;
tools/testing/selftests/bpf/prog_tests/recursive_attach.c
183
err = pthread_create(&thread, NULL,
tools/testing/selftests/bpf/prog_tests/recursive_attach.c
213
err = pthread_join(thread, NULL);
tools/testing/selftests/bpf/prog_tests/ringbuf.c
145
pthread_t thread;
tools/testing/selftests/bpf/prog_tests/ringbuf.c
291
err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
tools/testing/selftests/bpf/prog_tests/ringbuf.c
306
err = pthread_tryjoin_np(thread, (void **)&bg_ret);
tools/testing/selftests/bpf/prog_tests/ringbuf.c
329
err = pthread_tryjoin_np(thread, (void **)&bg_ret);
tools/testing/selftests/bpf/prog_tests/ringbuf.c
350
err = pthread_tryjoin_np(thread, (void **)&bg_ret);
tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
54
#define xpthread_create(thread, attr, func, arg) \
tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
56
int __ret = pthread_create((thread), (attr), (func), (arg)); \
tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
63
#define xpthread_join(thread, retval) \
tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
65
int __ret = pthread_join((thread), (retval)); \
tools/testing/selftests/bpf/prog_tests/tailcalls.c
1150
pthread_t thread;
tools/testing/selftests/bpf/prog_tests/tailcalls.c
1162
err = pthread_create(&thread, NULL, poke_update, call);
tools/testing/selftests/bpf/prog_tests/tailcalls.c
1188
ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
125
pthread_t thread[TEST_BASIC_THREAD_NUM];
tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
171
err = pthread_create(&thread[i], NULL, test_task_local_data_basic_thread, skel);
tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
178
pthread_join(thread[i], NULL);
tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
223
pthread_t thread[TEST_RACE_THREAD_NUM];
tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
249
err = pthread_create(&thread[i], NULL, test_task_local_data_race_thread,
tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
257
pthread_join(thread[i], &ret);
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
178
err = pthread_create(&child->thread, NULL, child_thread, child);
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
217
if (!child || !child->thread) {
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
53
pthread_t thread;
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
65
if (child->thread) {
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
66
pthread_join(child->thread, NULL);
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
67
child->thread = 0;
tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
602
pthread_t thread;
tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
604
return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
tools/testing/selftests/cgroup/lib/cgroup_util.c
573
ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
tools/testing/selftests/cgroup/lib/cgroup_util.c
580
thread ? "thread-self" : "self", item);
tools/testing/selftests/cgroup/lib/cgroup_util.c
588
int proc_read_strstr(int pid, bool thread, const char *item, const char *needle)
tools/testing/selftests/cgroup/lib/cgroup_util.c
592
if (proc_read_text(pid, thread, item, buf, sizeof(buf)) < 0)
tools/testing/selftests/cgroup/lib/include/cgroup_util.h
91
extern ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size);
tools/testing/selftests/cgroup/lib/include/cgroup_util.h
92
extern int proc_read_strstr(int pid, bool thread, const char *item, const char *needle);
tools/testing/selftests/coredump/coredump_test_helpers.c
52
pthread_t thread;
tools/testing/selftests/coredump/coredump_test_helpers.c
56
pthread_create(&thread, NULL, do_nothing, NULL);
tools/testing/selftests/drivers/ntsync/ntsync.c
1004
ret = pthread_tryjoin_np(thread, NULL);
tools/testing/selftests/drivers/ntsync/ntsync.c
1036
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
1044
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
1047
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
1055
ret = wait_for_thread(thread, 200);
tools/testing/selftests/drivers/ntsync/ntsync.c
1071
pthread_t thread;
tools/testing/selftests/drivers/ntsync/ntsync.c
1127
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
1130
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
1136
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
1182
pthread_t thread;
tools/testing/selftests/drivers/ntsync/ntsync.c
1224
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
1227
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
1233
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
735
static int wait_for_thread(pthread_t thread, unsigned int ms)
tools/testing/selftests/drivers/ntsync/ntsync.c
743
return pthread_timedjoin_np(thread, NULL, &timeout);
tools/testing/selftests/drivers/ntsync/ntsync.c
755
pthread_t thread;
tools/testing/selftests/drivers/ntsync/ntsync.c
780
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
783
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
792
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
806
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
809
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
816
ret = pthread_tryjoin_np(thread, NULL);
tools/testing/selftests/drivers/ntsync/ntsync.c
824
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
839
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
842
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
850
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
856
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
859
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
867
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
880
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
883
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
891
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
901
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
904
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
912
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
921
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
924
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
930
ret = wait_for_thread(thread, 200);
tools/testing/selftests/drivers/ntsync/ntsync.c
948
pthread_t thread;
tools/testing/selftests/drivers/ntsync/ntsync.c
980
ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
tools/testing/selftests/drivers/ntsync/ntsync.c
983
ret = wait_for_thread(thread, 100);
tools/testing/selftests/drivers/ntsync/ntsync.c
991
ret = pthread_tryjoin_np(thread, NULL);
tools/testing/selftests/futex/functional/futex_wait_timeout.c
131
pthread_t thread;
tools/testing/selftests/futex/functional/futex_wait_timeout.c
136
pthread_create(&thread, NULL, get_pi_lock, NULL);
tools/testing/selftests/kvm/hardware_disable_test.c
58
static inline void check_create_thread(pthread_t *thread, pthread_attr_t *attr,
tools/testing/selftests/kvm/hardware_disable_test.c
63
r = pthread_create(thread, attr, f, arg);
tools/testing/selftests/kvm/hardware_disable_test.c
67
static inline void check_set_affinity(pthread_t thread, cpu_set_t *cpu_set)
tools/testing/selftests/kvm/hardware_disable_test.c
71
r = pthread_setaffinity_np(thread, sizeof(cpu_set_t), cpu_set);
tools/testing/selftests/kvm/hardware_disable_test.c
75
static inline void check_join(pthread_t thread, void **retval)
tools/testing/selftests/kvm/hardware_disable_test.c
79
r = pthread_join(thread, retval);
tools/testing/selftests/kvm/lib/memstress.c
26
pthread_t thread;
tools/testing/selftests/kvm/lib/memstress.c
301
pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
tools/testing/selftests/kvm/lib/memstress.c
319
pthread_join(vcpu_threads[i].thread, NULL);
tools/testing/selftests/kvm/s390/memop.c
676
pthread_t thread;
tools/testing/selftests/kvm/s390/memop.c
681
pthread_create(&thread, NULL, run_guest, &t.vcpu);
tools/testing/selftests/kvm/s390/memop.c
704
pthread_join(thread, NULL);
tools/testing/selftests/kvm/steal_time.c
441
pthread_t thread;
tools/testing/selftests/kvm/steal_time.c
488
pthread_create(&thread, &attr, do_steal_time, NULL);
tools/testing/selftests/kvm/steal_time.c
492
pthread_join(thread, NULL);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
225
static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
tools/testing/selftests/kvm/x86/hyperv_ipi.c
230
r = pthread_cancel(thread);
tools/testing/selftests/kvm/x86/hyperv_ipi.c
234
r = pthread_join(thread, &retval);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
562
static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
567
r = pthread_cancel(thread);
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
571
r = pthread_join(thread, &retval);
tools/testing/selftests/kvm/x86/recalc_apic_map_test.c
40
pthread_t thread;
tools/testing/selftests/kvm/x86/recalc_apic_map_test.c
60
TEST_ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
tools/testing/selftests/kvm/x86/recalc_apic_map_test.c
68
TEST_ASSERT_EQ(pthread_cancel(thread), 0);
tools/testing/selftests/kvm/x86/recalc_apic_map_test.c
69
TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
tools/testing/selftests/kvm/x86/sync_regs_test.c
162
pthread_t thread;
tools/testing/selftests/kvm/x86/sync_regs_test.c
184
TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
tools/testing/selftests/kvm/x86/sync_regs_test.c
202
TEST_ASSERT_EQ(pthread_cancel(thread), 0);
tools/testing/selftests/kvm/x86/sync_regs_test.c
203
TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
231
static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
236
r = pthread_cancel(thread);
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
241
r = pthread_join(thread, &retval);
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
438
pthread_t thread;
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
897
ret = pthread_create(&thread, NULL, &juggle_shinfo_state, (void *)vm);
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
952
ret = pthread_cancel(thread);
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
955
ret = pthread_join(thread, 0);
tools/testing/selftests/landlock/audit_test.c
219
TEST_F(audit, thread)
tools/testing/selftests/landlock/audit_test.c
227
pthread_t thread;
tools/testing/selftests/landlock/audit_test.c
244
ASSERT_EQ(0, pthread_create(&thread, NULL, thread_audit_test,
tools/testing/selftests/landlock/audit_test.c
271
ASSERT_EQ(0, pthread_join(thread, NULL));
tools/testing/selftests/mm/hmm-tests.c
1411
pthread_t thread;
tools/testing/selftests/mm/hmm-tests.c
1435
rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
tools/testing/selftests/mm/hmm-tests.c
1452
pthread_join(thread, &ret);
tools/testing/selftests/mm/uffd-unit-tests.c
284
pthread_t thread;
tools/testing/selftests/mm/uffd-unit-tests.c
292
if (pthread_create(&thread, NULL, fork_event_consumer, &args))
tools/testing/selftests/mm/uffd-unit-tests.c
328
if (pthread_join(thread, NULL))
tools/testing/selftests/namespaces/ns_active_ref_test.c
2178
pthread_t thread;
tools/testing/selftests/namespaces/ns_active_ref_test.c
2196
ret = pthread_create(&thread, NULL, thread_create_namespace, &info);
tools/testing/selftests/namespaces/ns_active_ref_test.c
2205
pthread_join(thread, NULL);
tools/testing/selftests/namespaces/ns_active_ref_test.c
2237
ASSERT_EQ(pthread_join(thread, NULL), 0);
tools/testing/selftests/namespaces/ns_active_ref_test.c
2262
pthread_t thread;
tools/testing/selftests/namespaces/ns_active_ref_test.c
2280
ret = pthread_create(&thread, NULL, thread_create_namespace, &info);
tools/testing/selftests/namespaces/ns_active_ref_test.c
2289
pthread_join(thread, NULL);
tools/testing/selftests/namespaces/ns_active_ref_test.c
2320
pthread_join(thread, NULL);
tools/testing/selftests/pidfd/pidfd_info_test.c
240
pthread_t thread;
tools/testing/selftests/pidfd/pidfd_info_test.c
258
if (pthread_create(&thread, NULL, pidfd_info_pause_thread, &ipc_sockets[1]))
tools/testing/selftests/pidfd/pidfd_info_test.c
418
pthread_t thread;
tools/testing/selftests/pidfd/pidfd_info_test.c
436
if (pthread_create(&thread, NULL, pidfd_info_thread_exec, &ipc_sockets[1]))
tools/testing/selftests/pidfd/pidfd_info_test.c
567
pthread_t thread;
tools/testing/selftests/pidfd/pidfd_info_test.c
585
if (pthread_create(&thread, NULL, pidfd_info_thread_exec_sane, &ipc_sockets[1]))
tools/testing/selftests/pidfd/pidfd_test.c
127
if (pthread_create(&thread, NULL, send_signal_worker,
tools/testing/selftests/pidfd/pidfd_test.c
131
if (pthread_join(thread, &thread_res))
tools/testing/selftests/pidfd/pidfd_test.c
90
pthread_t thread;
tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
54
pthread_t thread;
tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
68
FAIL_IF(pthread_create(&thread, NULL, dscr_explicit_lockstep_thread, (void *)semaphores));
tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
82
FAIL_IF(pthread_join(thread, NULL));
tools/testing/selftests/powerpc/tm/tm-tmspr.c
104
thread = malloc(thread_num * sizeof(pthread_t));
tools/testing/selftests/powerpc/tm/tm-tmspr.c
105
if (thread == NULL)
tools/testing/selftests/powerpc/tm/tm-tmspr.c
110
if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar,
tools/testing/selftests/powerpc/tm/tm-tmspr.c
116
if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i))
tools/testing/selftests/powerpc/tm/tm-tmspr.c
121
if (pthread_join(thread[i], NULL) != 0)
tools/testing/selftests/powerpc/tm/tm-tmspr.c
125
free(thread);
tools/testing/selftests/powerpc/tm/tm-tmspr.c
94
pthread_t *thread;
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
100
if (!thread)
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
104
pthread_create(&thread[i], NULL, &worker, NULL);
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
107
pthread_join(thread[i], NULL);
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
109
free(thread);
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
91
pthread_t *thread;
tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
99
thread = malloc(sizeof(pthread_t)*threads);
tools/testing/selftests/ptrace/vmaccess.c
32
pthread_create(&pt, NULL, thread, NULL);
tools/testing/selftests/ptrace/vmaccess.c
53
pthread_create(&pt, NULL, thread, NULL);
tools/testing/selftests/seccomp/seccomp_bpf.c
4064
pthread_t thread;
tools/testing/selftests/seccomp/seccomp_bpf.c
4089
if (pthread_create(&thread, NULL, do_thread, NULL) ||
tools/testing/selftests/seccomp/seccomp_bpf.c
4090
pthread_join(thread, NULL))
tools/testing/selftests/seccomp/seccomp_bpf.c
4093
if (pthread_create(&thread, NULL, do_thread, NULL) ||
tools/testing/selftests/seccomp/seccomp_bpf.c
4094
pthread_join(thread, NULL))
tools/testing/selftests/seccomp/seccomp_bpf.c
826
pthread_t thread;
tools/testing/selftests/seccomp/seccomp_bpf.c
869
ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false));
tools/testing/selftests/seccomp/seccomp_bpf.c
870
ASSERT_EQ(0, pthread_join(thread, &status));
tools/testing/selftests/seccomp/seccomp_bpf.c
874
ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true));
tools/testing/selftests/seccomp/seccomp_bpf.c
875
ASSERT_EQ(0, pthread_join(thread, &status));
tools/testing/selftests/timens/clock_nanosleep.c
47
pthread_t thread;
tools/testing/selftests/timens/clock_nanosleep.c
71
ret = pthread_create(&thread, NULL, call_nanosleep, &args);
tools/testing/selftests/timens/clock_nanosleep.c
84
pthread_kill(thread, SIGUSR1);
tools/testing/selftests/timens/clock_nanosleep.c
95
pthread_kill(thread, SIGUSR2);
tools/testing/selftests/timens/clock_nanosleep.c
96
pthread_join(thread, NULL);
tools/testing/selftests/timers/posix_timers.c
258
static void check_sig_ign(int thread)
tools/testing/selftests/timers/posix_timers.c
269
if (thread) {
tools/testing/selftests/timers/posix_timers.c
291
if (thread) {
tools/testing/selftests/timers/posix_timers.c
314
if (thread) {
tools/testing/selftests/timers/posix_timers.c
335
if (!thread) {
tools/testing/selftests/timers/threadtest.c
129
void *(*thread)(void *) = shared_thread;
tools/testing/selftests/timers/threadtest.c
144
thread = independent_thread;
tools/testing/selftests/timers/threadtest.c
170
pthread_create(&pth[i], 0, thread, 0);
tools/testing/selftests/ublk/kublk.c
1159
pthread_create(&tinfo[i].thread, NULL,
tools/testing/selftests/ublk/kublk.c
1191
pthread_join(tinfo[i].thread, &thread_ret);
tools/testing/selftests/ublk/kublk.c
956
pthread_t thread;
tools/testing/selftests/x86/fsgsbase.c
571
pthread_t thread;
tools/testing/selftests/x86/fsgsbase.c
613
if (pthread_create(&thread, 0, threadproc, 0) != 0)
tools/testing/selftests/x86/fsgsbase.c
654
if (pthread_join(thread, NULL) != 0)
tools/testing/selftests/x86/ldt_gdt.c
521
pthread_t thread;
tools/testing/selftests/x86/ldt_gdt.c
549
if (pthread_create(&thread, 0, threadproc, 0) != 0)
tools/testing/selftests/x86/ldt_gdt.c
605
if (pthread_join(thread, NULL) != 0)
tools/testing/selftests/x86/sysret_ss_attrs.c
61
pthread_t thread;
tools/testing/selftests/x86/sysret_ss_attrs.c
62
if (pthread_create(&thread, 0, threadproc, 0) != 0)
tools/testing/selftests/x86/test_shadow_stack.c
487
pthread_t thread;
tools/testing/selftests/x86/test_shadow_stack.c
515
if (pthread_create(&thread, NULL, &uffd_thread, &uffd))
tools/testing/selftests/x86/test_shadow_stack.c
521
if (pthread_join(thread, &res))
tools/testing/selftests/x86/xstate.c
175
if (pthread_create(&finfo[i].thread, NULL, check_xstate, &finfo[i]))
tools/testing/selftests/x86/xstate.c
187
err = pthread_join(finfo[i].thread, &thread_retval);
tools/testing/selftests/x86/xstate.c
56
pthread_t thread;
tools/tracing/rtla/src/timerlat_aa.c
244
unsigned long long thread;
tools/tracing/rtla/src/timerlat_aa.c
249
tep_get_field_val(s, event, "context", record, &thread, 1);
tools/tracing/rtla/src/timerlat_aa.c
250
if (!thread)
tools/tracing/rtla/src/timerlat_hist.c
110
data->hist[cpu].thread = calloc(1, sizeof(*data->hist->thread) * (entries + 1));
tools/tracing/rtla/src/timerlat_hist.c
111
if (!data->hist[cpu].thread)
tools/tracing/rtla/src/timerlat_hist.c
159
hist = data->hist[cpu].thread;
tools/tracing/rtla/src/timerlat_hist.c
219
data->hist[j].thread[i] = value_thread[j];
tools/tracing/rtla/src/timerlat_hist.c
23
int *thread;
tools/tracing/rtla/src/timerlat_hist.c
276
data->hist[i].thread[data->entries] = value_thread[i];
tools/tracing/rtla/src/timerlat_hist.c
63
if (data->hist[cpu].thread)
tools/tracing/rtla/src/timerlat_hist.c
64
free(data->hist[cpu].thread);
tools/tracing/rtla/src/timerlat_hist.c
642
total += data->hist[cpu].thread[bucket];
tools/tracing/rtla/src/timerlat_hist.c
644
data->hist[cpu].thread[bucket]);
tools/tracing/rtla/src/timerlat_hist.c
679
data->hist[cpu].thread[data->entries]);
tools/tracing/rtla/src/timerlat_top.c
131
unsigned long long thread,
tools/tracing/rtla/src/timerlat_top.c
141
if (!thread) {
tools/tracing/rtla/src/timerlat_top.c
147
} else if (thread == 1) {
tools/tracing/rtla/src/timerlat_top.c
170
unsigned long long latency, thread;
tools/tracing/rtla/src/timerlat_top.c
177
tep_get_field_val(s, event, "context", record, &thread, 1);
tools/tracing/rtla/src/timerlat_top.c
180
timerlat_top_update(top, cpu, thread, latency);
tools/usb/ffs-test.c
334
struct thread;
tools/usb/ffs-test.c
336
static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes);
tools/usb/ffs-test.c
337
static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes);
tools/usb/ffs-test.c
338
static ssize_t ep0_consume(struct thread *t, const void *buf, size_t nbytes);
tools/usb/ffs-test.c
339
static ssize_t fill_in_buf(struct thread *t, void *buf, size_t nbytes);
tools/usb/ffs-test.c
340
static ssize_t empty_out_buf(struct thread *t, const void *buf, size_t nbytes);
tools/usb/ffs-test.c
347
ssize_t (*in)(struct thread *, void *, size_t);
tools/usb/ffs-test.c
350
ssize_t (*out)(struct thread *, const void *, size_t);
tools/usb/ffs-test.c
379
static void init_thread(struct thread *t)
tools/usb/ffs-test.c
390
struct thread *t = arg;
tools/usb/ffs-test.c
422
struct thread *t = arg;
tools/usb/ffs-test.c
464
static void start_thread(struct thread *t)
tools/usb/ffs-test.c
472
static void join_thread(struct thread *t)
tools/usb/ffs-test.c
483
static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes)
tools/usb/ffs-test.c
488
static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes)
tools/usb/ffs-test.c
501
fill_in_buf(struct thread *ignore, void *buf, size_t nbytes)
tools/usb/ffs-test.c
526
empty_out_buf(struct thread *ignore, const void *buf, size_t nbytes)
tools/usb/ffs-test.c
588
ep0_consume(struct thread *ignore, const void *buf, size_t nbytes)
tools/usb/ffs-test.c
626
static void ep0_init(struct thread *t, bool legacy_descriptors)
tools/usb/testusb.c
124
pthread_t thread;
tools/usb/testusb.c
505
status = pthread_create (&entry->thread, 0, handle_testdev, entry);
tools/usb/testusb.c
528
if (pthread_join (entry->thread, &retval))