rseq
NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
current->rseq.event.sched_switch)
memset(&t->rseq, 0, sizeof(t->rseq));
t->rseq.ids.cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
t->rseq = current->rseq;
return 1U << get_count_order(offsetof(struct rseq, end));
if (current->rseq.event.slowpath)
if (current->rseq.event.sched_switch & current->rseq.event.has_rseq)
if (current->rseq.event.has_rseq & current->rseq.event.user_irq)
if (current->rseq.event.has_rseq)
struct rseq_event *ev = &t->rseq.event;
t->rseq.event.sched_switch = true;
t->rseq.event.ids_changed = true;
if (current->rseq.event.has_rseq) {
current->rseq.event.ids_changed = true;
current->rseq.event.sched_switch = true;
if (likely(!current->rseq.slice.state.granted))
if (IS_ENABLED(CONFIG_RSEQ_STATS) && t->rseq.slice.state.granted)
t->rseq.slice.state.granted = false;
struct rseq __user *rseq;
state = curr->rseq.slice.state;
state.enabled &= curr->rseq.event.user_irq;
rseq = curr->rseq.usrptr;
scoped_user_rw_access(rseq, efault) {
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
unsafe_get_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
unsafe_put_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
curr->rseq.slice.state.granted = true;
curr->rseq.slice.expires = data_race(rseq_slice_ext_nsecs) + ktime_get_mono_fast_ns();
current->rseq.event.user_irq = true;
if (unlikely(usig != t->rseq.sig))
if (unlikely(!t->rseq.event.user_irq))
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
t->rseq.event.fatal = true;
struct rseq __user *rseq = t->rseq.usrptr;
node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ?
cpu_to_node(t->rseq.ids.cpu_id) : 0;
scoped_user_read_access(rseq, efault) {
unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault);
if (cpu_id != t->rseq.ids.cpu_id)
unsafe_get_user(uval, &rseq->cpu_id, efault);
unsafe_get_user(uval, &rseq->node_id, efault);
unsafe_get_user(uval, &rseq->mm_cid, efault);
if (uval != t->rseq.ids.mm_cid)
t->rseq.event.fatal = true;
t->rseq.event.fatal = true;
if (unlikely(usig != t->rseq.sig))
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
t->rseq.event.fatal = true;
struct rseq __user *rseq = t->rseq.usrptr;
scoped_user_rw_access(rseq, efault) {
unsafe_put_user(ids->cpu_id, &rseq->cpu_id_start, efault);
unsafe_put_user(ids->cpu_id, &rseq->cpu_id, efault);
unsafe_put_user(node_id, &rseq->node_id, efault);
unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
t->rseq.ids.cpu_cid = ids->cpu_cid;
if (likely(!t->rseq.event.user_irq))
if (likely(!t->rseq.event.ids_changed)) {
struct rseq __user *rseq = t->rseq.usrptr;
scoped_user_rw_access(rseq, efault) {
unsafe_get_user(csaddr, &rseq->rseq_cs, efault);
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
if (unlikely((t->rseq.event.sched_switch))) {
t->rseq.event.events = 0;
current->rseq.event.slowpath = true;
struct rseq_event *ev = ¤t->rseq.event;
struct rseq_event *ev = ¤t->rseq.event;
struct rseq_event *ev = ¤t->rseq.event;
struct rseq;
struct rseq __user *usrptr;
struct rseq_data rseq;
struct rseq;
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
__entry->cpu_id = t->rseq.ids.cpu_id;
__entry->mm_cid = t->rseq.ids.mm_cid;
#define TRACE_SYSTEM rseq
.rseq_abi_pointer = (u64)(uintptr_t)task->rseq.usrptr,
.rseq_abi_size = task->rseq.len,
.signature = task->rseq.sig,
struct rseq __user *urseq = t->rseq.usrptr;
event = t->rseq.event.sched_switch;
t->rseq.event.all &= evt_mask.all;
t->rseq.event.error = 0;
current->rseq.event.error = 0;
if (!t->rseq.event.has_rseq)
if (get_user(csaddr, &t->rseq.usrptr->rseq_cs))
SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
if (current->rseq.usrptr != rseq || !current->rseq.usrptr)
if (rseq_len != current->rseq.len)
if (current->rseq.sig != sig)
if (current->rseq.usrptr) {
if (current->rseq.usrptr != rseq || rseq_len != current->rseq.len)
if (current->rseq.sig != sig)
(rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) ||
(rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, rseq_alloc_align()) ||
rseq_len < offsetof(struct rseq, end))))
if (!access_ok(rseq, rseq_len))
scoped_user_write_access(rseq, efault) {
unsafe_put_user(0UL, &rseq->rseq_cs, efault);
unsafe_put_user(rseqfl, &rseq->flags, efault);
unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id_start, efault);
unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id, efault);
unsafe_put_user(0U, &rseq->node_id, efault);
unsafe_put_user(0U, &rseq->mm_cid, efault);
unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
current->rseq.usrptr = rseq;
current->rseq.len = rseq_len;
current->rseq.sig = sig;
current->rseq.slice.state.enabled = !!(rseqfl & RSEQ_CS_FLAG_SLICE_EXT_ENABLED);
current->rseq.event.has_rseq = true;
if (st->cookie == current && current->rseq.slice.state.granted) {
if ((unlikely(curr->rseq.slice.expires < ktime_get_mono_fast_ns()))) {
hrtimer_start(&st->timer, curr->rseq.slice.expires, HRTIMER_MODE_ABS_PINNED_HARD);
u32 __user *sctrl = ¤t->rseq.usrptr->slice_ctrl.all;
struct rseq_slice_ctrl ctrl = { .granted = curr->rseq.slice.state.granted };
if (!curr->rseq.event.sched_switch) {
curr->rseq.slice.yielded = 1;
curr->rseq.slice.state.granted = false;
if (put_user(0U, &curr->rseq.usrptr->slice_ctrl.all))
return current->rseq.slice.state.enabled ? PR_RSEQ_SLICE_EXT_ENABLE : 0;
if (!current->rseq.usrptr)
if (enable == !!current->rseq.slice.state.enabled)
if (get_user(rflags, ¤t->rseq.usrptr->flags))
if (current->rseq.slice.state.enabled)
if (put_user(rflags, ¤t->rseq.usrptr->flags))
current->rseq.slice.state.enabled = enable;
int yielded = !!current->rseq.slice.yielded;
current->rseq.slice.yielded = 0;
COND_SYSCALL(rseq);