Symbol: rseq
fs/binfmt_elf.c
289
NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
include/linux/rseq.h
116
current->rseq.event.sched_switch)
include/linux/rseq.h
122
memset(&t->rseq, 0, sizeof(t->rseq));
include/linux/rseq.h
123
t->rseq.ids.cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
include/linux/rseq.h
146
t->rseq = current->rseq;
include/linux/rseq.h
158
return 1U << get_count_order(offsetof(struct rseq, end));
include/linux/rseq.h
16
if (current->rseq.event.slowpath)
include/linux/rseq.h
20
if (current->rseq.event.sched_switch & current->rseq.event.has_rseq)
include/linux/rseq.h
35
if (current->rseq.event.has_rseq & current->rseq.event.user_irq)
include/linux/rseq.h
38
if (current->rseq.event.has_rseq)
include/linux/rseq.h
51
struct rseq_event *ev = &t->rseq.event;
include/linux/rseq.h
69
t->rseq.event.sched_switch = true;
include/linux/rseq.h
84
t->rseq.event.ids_changed = true;
include/linux/rseq.h
90
if (current->rseq.event.has_rseq) {
include/linux/rseq.h
91
current->rseq.event.ids_changed = true;
include/linux/rseq.h
92
current->rseq.event.sched_switch = true;
include/linux/rseq_entry.h
100
if (likely(!current->rseq.slice.state.granted))
include/linux/rseq_entry.h
108
if (IS_ENABLED(CONFIG_RSEQ_STATS) && t->rseq.slice.state.granted)
include/linux/rseq_entry.h
110
t->rseq.slice.state.granted = false;
include/linux/rseq_entry.h
118
struct rseq __user *rseq;
include/linux/rseq_entry.h
124
state = curr->rseq.slice.state;
include/linux/rseq_entry.h
125
state.enabled &= curr->rseq.event.user_irq;
include/linux/rseq_entry.h
129
rseq = curr->rseq.usrptr;
include/linux/rseq_entry.h
130
scoped_user_rw_access(rseq, efault) {
include/linux/rseq_entry.h
144
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
include/linux/rseq_entry.h
149
unsafe_get_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
include/linux/rseq_entry.h
156
unsafe_put_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
include/linux/rseq_entry.h
161
curr->rseq.slice.state.granted = true;
include/linux/rseq_entry.h
163
curr->rseq.slice.expires = data_race(rseq_slice_ext_nsecs) + ktime_get_mono_fast_ns();
include/linux/rseq_entry.h
231
current->rseq.event.user_irq = true;
include/linux/rseq_entry.h
318
if (unlikely(usig != t->rseq.sig))
include/linux/rseq_entry.h
324
if (unlikely(!t->rseq.event.user_irq))
include/linux/rseq_entry.h
327
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
include/linux/rseq_entry.h
332
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
include/linux/rseq_entry.h
341
t->rseq.event.fatal = true;
include/linux/rseq_entry.h
352
struct rseq __user *rseq = t->rseq.usrptr;
include/linux/rseq_entry.h
359
node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ?
include/linux/rseq_entry.h
360
cpu_to_node(t->rseq.ids.cpu_id) : 0;
include/linux/rseq_entry.h
362
scoped_user_read_access(rseq, efault) {
include/linux/rseq_entry.h
363
unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault);
include/linux/rseq_entry.h
364
if (cpu_id != t->rseq.ids.cpu_id)
include/linux/rseq_entry.h
366
unsafe_get_user(uval, &rseq->cpu_id, efault);
include/linux/rseq_entry.h
369
unsafe_get_user(uval, &rseq->node_id, efault);
include/linux/rseq_entry.h
372
unsafe_get_user(uval, &rseq->mm_cid, efault);
include/linux/rseq_entry.h
373
if (uval != t->rseq.ids.mm_cid)
include/linux/rseq_entry.h
378
t->rseq.event.fatal = true;
include/linux/rseq_entry.h
403
t->rseq.event.fatal = true;
include/linux/rseq_entry.h
445
if (unlikely(usig != t->rseq.sig))
include/linux/rseq_entry.h
449
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
include/linux/rseq_entry.h
455
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
include/linux/rseq_entry.h
464
t->rseq.event.fatal = true;
include/linux/rseq_entry.h
495
struct rseq __user *rseq = t->rseq.usrptr;
include/linux/rseq_entry.h
502
scoped_user_rw_access(rseq, efault) {
include/linux/rseq_entry.h
503
unsafe_put_user(ids->cpu_id, &rseq->cpu_id_start, efault);
include/linux/rseq_entry.h
504
unsafe_put_user(ids->cpu_id, &rseq->cpu_id, efault);
include/linux/rseq_entry.h
505
unsafe_put_user(node_id, &rseq->node_id, efault);
include/linux/rseq_entry.h
506
unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
include/linux/rseq_entry.h
508
unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
include/linux/rseq_entry.h
513
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
include/linux/rseq_entry.h
519
t->rseq.ids.cpu_cid = ids->cpu_cid;
include/linux/rseq_entry.h
546
if (likely(!t->rseq.event.user_irq))
include/linux/rseq_entry.h
605
if (likely(!t->rseq.event.ids_changed)) {
include/linux/rseq_entry.h
606
struct rseq __user *rseq = t->rseq.usrptr;
include/linux/rseq_entry.h
613
scoped_user_rw_access(rseq, efault) {
include/linux/rseq_entry.h
614
unsafe_get_user(csaddr, &rseq->rseq_cs, efault);
include/linux/rseq_entry.h
619
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
include/linux/rseq_entry.h
664
if (unlikely((t->rseq.event.sched_switch))) {
include/linux/rseq_entry.h
671
t->rseq.event.events = 0;
include/linux/rseq_entry.h
697
current->rseq.event.slowpath = true;
include/linux/rseq_entry.h
719
struct rseq_event *ev = &current->rseq.event;
include/linux/rseq_entry.h
732
struct rseq_event *ev = &current->rseq.event;
include/linux/rseq_entry.h
749
struct rseq_event *ev = &current->rseq.event;
include/linux/rseq_types.h
10
struct rseq;
include/linux/rseq_types.h
111
struct rseq __user *usrptr;
include/linux/sched.h
1409
struct rseq_data rseq;
include/linux/syscalls.h
66
struct rseq;
include/linux/syscalls.h
961
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
include/trace/events/rseq.h
24
__entry->cpu_id = t->rseq.ids.cpu_id;
include/trace/events/rseq.h
26
__entry->mm_cid = t->rseq.ids.mm_cid;
include/trace/events/rseq.h
3
#define TRACE_SYSTEM rseq
kernel/ptrace.c
796
.rseq_abi_pointer = (u64)(uintptr_t)task->rseq.usrptr,
kernel/ptrace.c
797
.rseq_abi_size = task->rseq.len,
kernel/ptrace.c
798
.signature = task->rseq.sig,
kernel/rseq.c
246
struct rseq __user *urseq = t->rseq.usrptr;
kernel/rseq.c
295
event = t->rseq.event.sched_switch;
kernel/rseq.c
296
t->rseq.event.all &= evt_mask.all;
kernel/rseq.c
311
t->rseq.event.error = 0;
kernel/rseq.c
349
current->rseq.event.error = 0;
kernel/rseq.c
363
if (!t->rseq.event.has_rseq)
kernel/rseq.c
365
if (get_user(csaddr, &t->rseq.usrptr->rseq_cs))
kernel/rseq.c
410
SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
kernel/rseq.c
418
if (current->rseq.usrptr != rseq || !current->rseq.usrptr)
kernel/rseq.c
420
if (rseq_len != current->rseq.len)
kernel/rseq.c
422
if (current->rseq.sig != sig)
kernel/rseq.c
433
if (current->rseq.usrptr) {
kernel/rseq.c
439
if (current->rseq.usrptr != rseq || rseq_len != current->rseq.len)
kernel/rseq.c
441
if (current->rseq.sig != sig)
kernel/rseq.c
459
(rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) ||
kernel/rseq.c
460
(rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, rseq_alloc_align()) ||
kernel/rseq.c
461
rseq_len < offsetof(struct rseq, end))))
kernel/rseq.c
463
if (!access_ok(rseq, rseq_len))
kernel/rseq.c
473
scoped_user_write_access(rseq, efault) {
kernel/rseq.c
481
unsafe_put_user(0UL, &rseq->rseq_cs, efault);
kernel/rseq.c
482
unsafe_put_user(rseqfl, &rseq->flags, efault);
kernel/rseq.c
484
unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id_start, efault);
kernel/rseq.c
485
unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id, efault);
kernel/rseq.c
486
unsafe_put_user(0U, &rseq->node_id, efault);
kernel/rseq.c
487
unsafe_put_user(0U, &rseq->mm_cid, efault);
kernel/rseq.c
488
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
kernel/rseq.c
495
current->rseq.usrptr = rseq;
kernel/rseq.c
496
current->rseq.len = rseq_len;
kernel/rseq.c
497
current->rseq.sig = sig;
kernel/rseq.c
500
current->rseq.slice.state.enabled = !!(rseqfl & RSEQ_CS_FLAG_SLICE_EXT_ENABLED);
kernel/rseq.c
508
current->rseq.event.has_rseq = true;
kernel/rseq.c
545
if (st->cookie == current && current->rseq.slice.state.granted) {
kernel/rseq.c
566
if ((unlikely(curr->rseq.slice.expires < ktime_get_mono_fast_ns()))) {
kernel/rseq.c
577
hrtimer_start(&st->timer, curr->rseq.slice.expires, HRTIMER_MODE_ABS_PINNED_HARD);
kernel/rseq.c
630
u32 __user *sctrl = &current->rseq.usrptr->slice_ctrl.all;
kernel/rseq.c
650
struct rseq_slice_ctrl ctrl = { .granted = curr->rseq.slice.state.granted };
kernel/rseq.c
677
if (!curr->rseq.event.sched_switch) {
kernel/rseq.c
683
curr->rseq.slice.yielded = 1;
kernel/rseq.c
693
curr->rseq.slice.state.granted = false;
kernel/rseq.c
694
if (put_user(0U, &curr->rseq.usrptr->slice_ctrl.all))
kernel/rseq.c
704
return current->rseq.slice.state.enabled ? PR_RSEQ_SLICE_EXT_ENABLE : 0;
kernel/rseq.c
714
if (!current->rseq.usrptr)
kernel/rseq.c
718
if (enable == !!current->rseq.slice.state.enabled)
kernel/rseq.c
721
if (get_user(rflags, &current->rseq.usrptr->flags))
kernel/rseq.c
724
if (current->rseq.slice.state.enabled)
kernel/rseq.c
735
if (put_user(rflags, &current->rseq.usrptr->flags))
kernel/rseq.c
738
current->rseq.slice.state.enabled = enable;
kernel/rseq.c
764
int yielded = !!current->rseq.slice.yielded;
kernel/rseq.c
766
current->rseq.slice.yielded = 0;
kernel/sys_ni.c
392
COND_SYSCALL(rseq);