#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
#include <uapi/linux/sched.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <linux/thread_info.h>
#include <linux/preempt.h>
#include <linux/cpumask_types.h>
#include <linux/cache.h>
#include <linux/irqflags_types.h>
#include <linux/smp_types.h>
#include <linux/pid_types.h>
#include <linux/sem_types.h>
#include <linux/shm.h>
#include <linux/kmsan_types.h>
#include <linux/mutex_types.h>
#include <linux/plist_types.h>
#include <linux/hrtimer_types.h>
#include <linux/timer_types.h>
#include <linux/seccomp_types.h>
#include <linux/nodemask_types.h>
#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
#include <linux/spinlock.h>
#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/netdevice_xmit.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h>
#include <linux/restart_block.h>
#include <linux/rseq_types.h>
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
#include <linux/uidgid_types.h>
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
#include <linux/time64.h>
#ifndef COMPILE_OFFSETS
#include <generated/rq-offsets.h>
#endif
struct audit_context;
struct bio_list;
struct blk_plug;
struct bpf_local_storage;
struct bpf_run_ctx;
struct bpf_net_context;
struct capture_control;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
struct perf_event_context;
struct perf_ctx_data;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
struct task_struct;
struct timespec64;
struct user_event_mm;
#include <linux/sched/ext.h>
#define TASK_RUNNING 0x00000000
#define TASK_INTERRUPTIBLE 0x00000001
#define TASK_UNINTERRUPTIBLE 0x00000002
#define __TASK_STOPPED 0x00000004
#define __TASK_TRACED 0x00000008
#define EXIT_DEAD 0x00000010
#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
#define TASK_PARKED 0x00000040
#define TASK_DEAD 0x00000080
#define TASK_WAKEKILL 0x00000100
#define TASK_WAKING 0x00000200
#define TASK_NOLOAD 0x00000400
#define TASK_NEW 0x00000800
#define TASK_RTLOCK_WAIT 0x00001000
#define TASK_FREEZABLE 0x00002000
#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
#define TASK_FROZEN 0x00008000
#define TASK_STATE_MAX 0x00010000
#define TASK_ANY (TASK_STATE_MAX-1)
#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED __TASK_TRACED
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
#define is_special_task_state(state) \
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
TASK_DEAD | TASK_FROZEN))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
# define debug_normal_state_change(state_value) \
do { \
WARN_ON_ONCE(is_special_task_state(state_value)); \
current->task_state_change = _THIS_IP_; \
} while (0)
# define debug_special_state_change(state_value) \
do { \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
current->task_state_change = _THIS_IP_; \
} while (0)
# define debug_rtlock_wait_set_state() \
do { \
current->saved_state_change = current->task_state_change;\
current->task_state_change = _THIS_IP_; \
} while (0)
# define debug_rtlock_wait_restore_state() \
do { \
current->task_state_change = current->saved_state_change;\
} while (0)
#else
# define debug_normal_state_change(cond) do { } while (0)
# define debug_special_state_change(cond) do { } while (0)
# define debug_rtlock_wait_set_state() do { } while (0)
# define debug_rtlock_wait_restore_state() do { } while (0)
#endif
#define trace_set_current_state(state_value) \
do { \
if (tracepoint_enabled(sched_set_state_tp)) \
__trace_set_current_state(state_value); \
} while (0)
#define __set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
} while (0)
#define set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
trace_set_current_state(state_value); \
smp_store_mb(current->__state, (state_value)); \
} while (0)
#define set_special_state(state_value) \
do { \
unsigned long flags; \
\
raw_spin_lock_irqsave(¤t->pi_lock, flags); \
debug_special_state_change((state_value)); \
trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
} while (0)
#define current_save_and_set_rtlock_wait_state() \
do { \
lockdep_assert_irqs_disabled(); \
raw_spin_lock(¤t->pi_lock); \
current->saved_state = current->__state; \
debug_rtlock_wait_set_state(); \
trace_set_current_state(TASK_RTLOCK_WAIT); \
WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
raw_spin_unlock(¤t->pi_lock); \
} while (0);
#define current_restore_rtlock_saved_state() \
do { \
lockdep_assert_irqs_disabled(); \
raw_spin_lock(¤t->pi_lock); \
debug_rtlock_wait_restore_state(); \
trace_set_current_state(current->saved_state); \
WRITE_ONCE(current->__state, current->saved_state); \
current->saved_state = TASK_RUNNING; \
raw_spin_unlock(¤t->pi_lock); \
} while (0);
#define get_current_state() READ_ONCE(current->__state)
enum {
TASK_COMM_LEN = 16,
};
extern void sched_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
#ifdef CONFIG_PREEMPT_RT
extern void schedule_rtlock(void);
#endif
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
DECLARE_TRACEPOINT(sched_set_state_tp);
extern void __trace_set_current_state(int state_value);
DECLARE_TRACEPOINT(sched_set_need_resched_tp);
extern void __trace_set_need_resched(struct task_struct *curr, int tif);
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
u64 utime;
u64 stime;
raw_spinlock_t lock;
#endif
};
enum vtime_state {
VTIME_INACTIVE = 0,
VTIME_IDLE,
VTIME_SYS,
VTIME_USER,
VTIME_GUEST,
};
struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
unsigned int cpu;
u64 utime;
u64 stime;
u64 gtime;
};
enum uclamp_id {
UCLAMP_MIN = 0,
UCLAMP_MAX,
UCLAMP_CNT
};
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
extern void sched_domains_mutex_lock(void);
extern void sched_domains_mutex_unlock(void);
struct sched_param {
int sched_priority;
};
struct sched_info {
#ifdef CONFIG_SCHED_INFO
unsigned long pcount;
unsigned long long run_delay;
unsigned long long max_run_delay;
unsigned long long min_run_delay;
unsigned long long last_arrival;
unsigned long long last_queued;
struct timespec64 max_run_delay_ts;
#endif
};
# define SCHED_FIXEDPOINT_SHIFT 10
# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
struct load_weight {
unsigned long weight;
u32 inv_weight;
};
struct sched_avg {
u64 last_update_time;
u64 load_sum;
u64 runnable_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
unsigned int util_est;
} ____cacheline_aligned;
#define UTIL_EST_WEIGHT_SHIFT 2
#define UTIL_AVG_UNCHANGED 0x80000000
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 wait_sum;
u64 iowait_count;
u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
s64 sum_sleep_runtime;
u64 block_start;
u64 block_max;
s64 sum_block_runtime;
s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
u64 nr_wakeups_migrate;
u64 nr_wakeups_local;
u64 nr_wakeups_remote;
u64 nr_wakeups_affine;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
#ifdef CONFIG_SCHED_CORE
u64 core_forceidle_sum;
#endif
#endif
} ____cacheline_aligned;
struct sched_entity {
struct load_weight load;
struct rb_node run_node;
u64 deadline;
u64 min_vruntime;
u64 min_slice;
u64 max_slice;
struct list_head group_node;
unsigned char on_rq;
unsigned char sched_delayed;
unsigned char rel_deadline;
unsigned char custom_slice;
u64 exec_start;
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
s64 vlag;
u64 vprot;
u64 slice;
u64 nr_migrations;
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
struct cfs_rq *cfs_rq;
struct cfs_rq *my_q;
unsigned long runnable_weight;
#endif
struct sched_avg avg;
};
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
unsigned short on_rq;
unsigned short on_list;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
struct rt_rq *rt_rq;
struct rt_rq *my_q;
#endif
} __randomize_layout;
struct rq_flags;
typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
struct sched_dl_entity {
struct rb_node rb_node;
u64 dl_runtime;
u64 dl_deadline;
u64 dl_period;
u64 dl_bw;
u64 dl_density;
s64 runtime;
u64 deadline;
unsigned int flags;
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
unsigned int dl_server : 1;
unsigned int dl_server_active : 1;
unsigned int dl_defer : 1;
unsigned int dl_defer_armed : 1;
unsigned int dl_defer_running : 1;
unsigned int dl_defer_idle : 1;
struct hrtimer dl_timer;
struct hrtimer inactive_timer;
struct rq *rq;
dl_server_pick_f server_pick_task;
#ifdef CONFIG_RT_MUTEXES
struct sched_dl_entity *pi_se;
#endif
};
#ifdef CONFIG_UCLAMP_TASK
#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
struct uclamp_se {
unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
unsigned int active : 1;
unsigned int user_defined : 1;
};
#endif
union rcu_special {
struct {
u8 blocked;
u8 need_qs;
u8 exp_hint;
u8 need_mb;
} b;
u32 s;
};
enum perf_event_task_context {
perf_invalid_context = -1,
perf_hw_context = 0,
perf_sw_context,
perf_nr_task_contexts,
};
#define PERF_NR_CONTEXTS 4
struct wake_q_node {
struct wake_q_node *next;
};
struct kmap_ctrl {
#ifdef CONFIG_KMAP_LOCAL
int idx;
pte_t pteval[KM_MAX_IDX];
#endif
};
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned int __state;
unsigned int saved_state;
randomized_struct_fields_start
void *stack;
refcount_t usage;
unsigned int flags;
unsigned int ptrace;
#ifdef CONFIG_MEM_ALLOC_PROFILING
struct alloc_tag *alloc_tag;
#endif
int on_cpu;
struct __call_single_node wake_entry;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
int recent_used_cpu;
int wake_cpu;
int on_rq;
int prio;
int static_prio;
int normal_prio;
unsigned int rt_priority;
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
struct sched_dl_entity *dl_server;
#ifdef CONFIG_SCHED_CLASS_EXT
struct sched_ext_entity scx;
#endif
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
struct rb_node core_node;
unsigned long core_cookie;
unsigned int core_occupation;
#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#ifdef CONFIG_CFS_BANDWIDTH
struct callback_head sched_throttle_work;
struct list_head throttle_node;
bool throttled;
#endif
#endif
#ifdef CONFIG_UCLAMP_TASK
struct uclamp_se uclamp_req[UCLAMP_CNT];
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
struct sched_statistics stats;
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct hlist_head preempt_notifiers;
#endif
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
unsigned int policy;
unsigned long max_allowed_capacity;
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
void *migration_pending;
unsigned short migration_disabled;
unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
struct rcu_node *rcu_blocked_node;
#endif
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
u8 rcu_tasks_holdout;
u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
struct list_head rcu_tasks_holdout_list;
int rcu_tasks_exit_cpu;
struct list_head rcu_tasks_exit_list;
#endif
#ifdef CONFIG_TASKS_TRACE_RCU
int trc_reader_nesting;
struct srcu_ctr __percpu *trc_reader_scp;
#endif
struct sched_info sched_info;
struct list_head tasks;
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
struct mm_struct *mm;
struct mm_struct *active_mm;
int exit_state;
int exit_code;
int exit_signal;
int pdeath_signal;
unsigned long jobctl;
unsigned int personality;
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
unsigned sched_task_hot:1;
unsigned :0;
unsigned sched_remote_wakeup:1;
#ifdef CONFIG_RT_MUTEXES
unsigned sched_rt_mutex:1;
#endif
unsigned in_execve:1;
unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
unsigned restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCG_V1
unsigned in_user_fault:1;
#endif
#ifdef CONFIG_LRU_GEN
unsigned in_lru_fault:1;
#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
#ifdef CONFIG_CGROUPS
unsigned no_cgroup_migration:1;
unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP
unsigned use_memdelay:1;
#endif
#ifdef CONFIG_PSI
unsigned in_memstall:1;
#endif
#ifdef CONFIG_PAGE_OWNER
unsigned in_page_owner:1;
#endif
#ifdef CONFIG_EVENTFD
unsigned in_eventfd:1;
#endif
#ifdef CONFIG_ARCH_HAS_CPU_PASID
unsigned pasid_activated:1;
#endif
#ifdef CONFIG_X86_BUS_LOCK_DETECT
unsigned reported_split_lock:1;
#endif
#ifdef CONFIG_TASK_DELAY_ACCT
unsigned in_thrashing:1;
#endif
unsigned in_nf_duplicate:1;
#ifdef CONFIG_PREEMPT_RT
struct netdev_xmit net_xmit;
#endif
unsigned long atomic_flags;
struct restart_block restart_block;
pid_t pid;
pid_t tgid;
#ifdef CONFIG_STACKPROTECTOR
unsigned long stack_canary;
#endif
struct task_struct __rcu *real_parent;
struct task_struct __rcu *parent;
struct list_head children;
struct list_head sibling;
struct task_struct *group_leader;
struct list_head ptraced;
struct list_head ptrace_entry;
struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
struct list_head thread_node;
struct completion *vfork_done;
int __user *set_child_tid;
int __user *clear_child_tid;
void *worker_private;
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
u64 utimescaled;
u64 stimescaled;
#endif
u64 gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
struct vtime vtime;
#endif
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
#endif
unsigned long nvcsw;
unsigned long nivcsw;
u64 start_time;
u64 start_boottime;
unsigned long min_flt;
unsigned long maj_flt;
struct posix_cputimers posix_cputimers;
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
struct posix_cputimers_work posix_cputimers_work;
#endif
const struct cred __rcu *ptracer_cred;
const struct cred __rcu *real_cred;
const struct cred __rcu *cred;
#ifdef CONFIG_KEYS
struct key *cached_requested_key;
#endif
char comm[TASK_COMM_LEN];
struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
struct sysv_sem sysvsem;
struct sysv_shm sysvshm;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
unsigned long last_switch_count;
unsigned long last_switch_time;
#endif
struct fs_struct *fs;
struct files_struct *files;
#ifdef CONFIG_IO_URING
struct io_uring_task *io_uring;
struct io_restriction *io_uring_restrict;
#endif
struct nsproxy *nsproxy;
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
sigset_t blocked;
sigset_t real_blocked;
sigset_t saved_sigmask;
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
unsigned int sas_ss_flags;
struct callback_head *task_works;
#ifdef CONFIG_AUDIT
#ifdef CONFIG_AUDITSYSCALL
struct audit_context *audit_context;
#endif
kuid_t loginuid;
unsigned int sessionid;
#endif
struct seccomp seccomp;
struct syscall_user_dispatch syscall_dispatch;
u64 parent_exec_id;
u64 self_exec_id;
spinlock_t alloc_lock;
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
#ifdef CONFIG_RT_MUTEXES
struct rb_root_cached pi_waiters;
struct task_struct *pi_top_task;
struct rt_mutex_waiter *pi_blocked_on;
#endif
struct mutex *blocked_on;
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
unsigned long blocker;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
int non_block_count;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
struct irqtrace_events irqtrace;
unsigned int hardirq_threaded;
u64 hardirq_chain_key;
int softirqs_enabled;
int softirq_context;
int irq_config;
#endif
#ifdef CONFIG_PREEMPT_RT
int softirq_disable_cnt;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
unsigned int in_ubsan;
#endif
void *journal_info;
struct bio_list *bio_list;
struct blk_plug *plug;
struct reclaim_state *reclaim_state;
struct io_context *io_context;
#ifdef CONFIG_COMPACTION
struct capture_control *capture_control;
#endif
unsigned long ptrace_message;
kernel_siginfo_t *last_siginfo;
struct task_io_accounting ioac;
#ifdef CONFIG_PSI
unsigned int psi_flags;
#endif
#ifdef CONFIG_TASK_XACCT
u64 acct_rss_mem1;
u64 acct_vm_mem1;
u64 acct_timexpd;
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed;
seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
struct css_set __rcu *cgroups;
struct list_head cg_list;
#ifdef CONFIG_PREEMPT_RT
struct llist_node cg_dead_lnode;
#endif
#endif
#ifdef CONFIG_X86_CPU_RESCTRL
u32 closid;
u32 rmid;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
struct mutex futex_exit_mutex;
unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
u8 perf_recursion[PERF_NR_CONTEXTS];
struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
struct perf_ctx_data __rcu *perf_ctx_data;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
unsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy;
short il_prev;
u8 il_weight;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
int numa_scan_seq;
unsigned int numa_scan_period;
unsigned int numa_scan_period_max;
int numa_preferred_nid;
unsigned long numa_migrate_retry;
u64 node_stamp;
u64 last_task_numa_placement;
u64 last_sum_exec_runtime;
struct callback_head numa_work;
struct numa_group __rcu *numa_group;
unsigned long *numa_faults;
unsigned long total_numa_faults;
unsigned long numa_faults_locality[3];
unsigned long numa_pages_migrated;
#endif
struct rseq_data rseq;
struct sched_mm_cid mm_cid;
struct tlbflush_unmap_batch tlb_ubc;
struct pipe_inode_info *splice_pipe;
struct page_frag task_frag;
#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
struct lazy_mmu_state lazy_mmu_state;
#endif
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
unsigned int fail_nth;
#endif
int nr_dirtied;
int nr_dirtied_pause;
unsigned long dirty_paused_when;
#ifdef CONFIG_LATENCYTOP
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
#endif
u64 timer_slack_ns;
u64 default_timer_slack_ns;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
unsigned int kasan_depth;
#endif
#ifdef CONFIG_KCSAN
struct kcsan_ctx kcsan_ctx;
#ifdef CONFIG_TRACE_IRQFLAGS
struct irqtrace_events kcsan_save_irqtrace;
#endif
#ifdef CONFIG_KCSAN_WEAK_MEMORY
int kcsan_stack_depth;
#endif
#endif
#ifdef CONFIG_KMSAN
struct kmsan_ctx kmsan_ctx;
#endif
#if IS_ENABLED(CONFIG_KUNIT)
struct kunit *kunit_test;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int curr_ret_stack;
int curr_ret_depth;
unsigned long *ret_stack;
unsigned long long ftrace_timestamp;
unsigned long long ftrace_sleeptime;
atomic_t trace_overrun;
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
unsigned long trace_recursion;
#endif
#ifdef CONFIG_KCOV
unsigned int kcov_mode;
unsigned int kcov_size;
void *kcov_area;
struct kcov *kcov;
u64 kcov_handle;
int kcov_sequence;
unsigned int kcov_softirq;
#endif
#ifdef CONFIG_MEMCG_V1
struct mem_cgroup *memcg_in_oom;
#endif
#ifdef CONFIG_MEMCG
unsigned int memcg_nr_pages_over_high;
struct mem_cgroup *active_memcg;
struct obj_cgroup *objcg;
#endif
#ifdef CONFIG_BLK_CGROUP
struct gendisk *throttle_disk;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
# ifdef CONFIG_PREEMPT_RT
unsigned long saved_state_change;
# endif
#endif
struct rcu_head rcu;
refcount_t rcu_users;
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
refcount_t stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCH
int patch_state;
#endif
#ifdef CONFIG_SECURITY
void *security;
#endif
#ifdef CONFIG_BPF_SYSCALL
struct bpf_local_storage __rcu *bpf_storage;
struct bpf_run_ctx *bpf_ctx;
#endif
struct bpf_net_context *bpf_net_context;
#ifdef CONFIG_KSTACK_ERASE
unsigned long lowest_stack;
#endif
#ifdef CONFIG_KSTACK_ERASE_METRICS
unsigned long prev_lowest_stack;
#endif
#ifdef CONFIG_X86_MCE
void __user *mce_vaddr;
__u64 mce_kflags;
u64 mce_addr;
__u64 mce_ripv : 1,
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
int mce_count;
#endif
#ifdef CONFIG_KRETPROBES
struct llist_head kretprobe_instances;
#endif
#ifdef CONFIG_RETHOOK
struct llist_head rethooks;
#endif
#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
struct callback_head l1d_flush_kill;
#endif
#ifdef CONFIG_RV
union rv_task_monitor rv[CONFIG_RV_PER_TASK_MONITORS];
#endif
#ifdef CONFIG_USER_EVENTS
struct user_event_mm *user_event_mm;
#endif
#ifdef CONFIG_UNWIND_USER
struct unwind_task_info unwind_info;
#endif
struct thread_struct thread;
randomized_struct_fields_end
} __attribute__ ((aligned (64)));
#ifdef CONFIG_SCHED_PROXY_EXEC
DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec);
static inline bool sched_proxy_exec(void)
{
return static_branch_likely(&__sched_proxy_exec);
}
#else
static inline bool sched_proxy_exec(void)
{
return false;
}
#endif
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
static inline unsigned int __task_state_index(unsigned int tsk_state,
unsigned int tsk_exit_state)
{
unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
state = TASK_UNINTERRUPTIBLE;
return fls(state);
}
static inline unsigned int task_state_index(struct task_struct *tsk)
{
return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
}
static inline char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1));
return state_char[state];
}
static inline char task_state_to_char(struct task_struct *tsk)
{
return task_index_to_char(task_state_index(tsk));
}
#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
static inline bool __task_lazy_mmu_mode_active(struct task_struct *tsk)
{
struct lazy_mmu_state *state = &tsk->lazy_mmu_state;
return state->enable_count > 0 && state->pause_count == 0;
}
static inline bool is_lazy_mmu_mode_active(void)
{
if (in_interrupt())
return false;
return __task_lazy_mmu_mode_active(current);
}
#endif
extern struct pid *cad_pid;
#define PF_VCPU 0x00000001
#define PF_IDLE 0x00000002
#define PF_EXITING 0x00000004
#define PF_POSTCOREDUMP 0x00000008
#define PF_IO_WORKER 0x00000010
#define PF_WQ_WORKER 0x00000020
#define PF_FORKNOEXEC 0x00000040
#define PF_MCE_PROCESS 0x00000080
#define PF_SUPERPRIV 0x00000100
#define PF_DUMPCORE 0x00000200
#define PF_SIGNALED 0x00000400
#define PF_MEMALLOC 0x00000800
#define PF_NPROC_EXCEEDED 0x00001000
#define PF_USED_MATH 0x00002000
#define PF_USER_WORKER 0x00004000
#define PF_NOFREEZE 0x00008000
#define PF_KCOMPACTD 0x00010000
#define PF_KSWAPD 0x00020000
#define PF_MEMALLOC_NOFS 0x00040000
#define PF_MEMALLOC_NOIO 0x00080000
#define PF_LOCAL_THROTTLE 0x00100000
#define PF_KTHREAD 0x00200000
#define PF_RANDOMIZE 0x00400000
#define PF__HOLE__00800000 0x00800000
#define PF__HOLE__01000000 0x01000000
#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000
#define PF_MCE_EARLY 0x08000000
#define PF_MEMALLOC_PIN 0x10000000
#define PF_BLOCK_TS 0x20000000
#define PF__HOLE__40000000 0x40000000
#define PF_SUSPEND_TASK 0x80000000
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
static __always_inline bool is_percpu_thread(void)
{
return (current->flags & PF_NO_SETAFFINITY) &&
(current->nr_cpus_allowed == 1);
}
static __always_inline bool is_user_task(struct task_struct *task)
{
return task->mm && !(task->flags & (PF_KTHREAD | PF_USER_WORKER));
}
#define PFA_NO_NEW_PRIVS 0
#define PFA_SPREAD_PAGE 1
#define PFA_SPREAD_SLAB 2
#define PFA_SPEC_SSB_DISABLE 3
#define PFA_SPEC_SSB_FORCE_DISABLE 4
#define PFA_SPEC_IB_DISABLE 5
#define PFA_SPEC_IB_FORCE_DISABLE 6
#define PFA_SPEC_SSB_NOEXEC 7
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
{ return test_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_SET(name, func) \
static inline void task_set_##func(struct task_struct *p) \
{ set_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_CLEAR(name, func) \
static inline void task_clear_##func(struct task_struct *p) \
{ clear_bit(PFA_##name, &p->atomic_flags); }
TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
current->flags &= ~flags;
current->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
extern void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
static inline int task_nice(const struct task_struct *p)
{
return PRIO_TO_NICE((p)->static_prio);
}
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
extern void sched_set_fifo_low(struct task_struct *p);
extern void sched_set_fifo_secondary(struct task_struct *p);
extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
static __always_inline bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & PF_IDLE);
}
extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
struct task_struct task;
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
#ifndef CONFIG_THREAD_INFO_IN_TASK
extern struct thread_info init_thread_info;
#endif
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define task_thread_info(task) (&(task)->thread_info)
#else
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
extern struct task_struct *find_get_task_by_vpid(pid_t nr);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
extern void kick_process(struct task_struct *tsk);
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
#define set_task_comm(tsk, from) ({ \
BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \
__set_task_comm(tsk, from, false); \
})
#define get_task_comm(buf, tsk) ({ \
BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \
strscpy_pad(buf, (tsk)->comm); \
buf; \
})
static __always_inline void scheduler_ipi(void)
{
preempt_fold_need_resched();
}
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
bool value)
{
update_ti_thread_flag(task_thread_info(tsk), flag, value);
}
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
if (tracepoint_enabled(sched_set_need_resched_tp) &&
!test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
__trace_set_need_resched(tsk, TIF_NEED_RESCHED);
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
(atomic_long_t *)&task_thread_info(tsk)->flags);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
static inline void set_need_resched_current(void)
{
lockdep_assert_irqs_disabled();
set_tsk_need_resched(current);
set_preempt_need_resched();
}
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
extern int __cond_resched(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
{
return static_call_mod(cond_resched)();
}
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
extern int dynamic_cond_resched(void);
static __always_inline int _cond_resched(void)
{
return dynamic_cond_resched();
}
#else
static inline int _cond_resched(void)
{
return __cond_resched();
}
#endif
#else
static inline int _cond_resched(void)
{
return 0;
}
#endif
#define cond_resched() ({ \
__might_resched(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
extern int __cond_resched_lock(spinlock_t *lock) __must_hold(lock);
extern int __cond_resched_rwlock_read(rwlock_t *lock) __must_hold_shared(lock);
extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock);
#define MIGHT_RESCHED_RCU_SHIFT 8
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
#ifndef CONFIG_PREEMPT_RT
# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
#else
# define PREEMPT_LOCK_RESCHED_OFFSETS \
(PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
#endif
#define cond_resched_lock(lock) ({ \
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
__cond_resched_lock(lock); \
})
#define cond_resched_rwlock_read(lock) ({ \
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
__cond_resched_rwlock_read(lock); \
})
#define cond_resched_rwlock_write(lock) ({ \
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
__cond_resched_rwlock_write(lock); \
})
#ifndef CONFIG_PREEMPT_RT
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
struct mutex *m = p->blocked_on;
if (m)
lockdep_assert_held_once(&m->wait_lock);
return m;
}
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
struct mutex *blocked_on = READ_ONCE(p->blocked_on);
WARN_ON_ONCE(!m);
WARN_ON_ONCE(p != current);
lockdep_assert_held_once(&m->wait_lock);
WARN_ON_ONCE(blocked_on && blocked_on != m);
WRITE_ONCE(p->blocked_on, m);
}
static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
guard(raw_spinlock_irqsave)(&m->wait_lock);
__set_task_blocked_on(p, m);
}
static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
if (m) {
struct mutex *blocked_on = READ_ONCE(p->blocked_on);
lockdep_assert_held_once(&m->wait_lock);
WARN_ON_ONCE(blocked_on && blocked_on != m);
}
WRITE_ONCE(p->blocked_on, NULL);
}
static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
guard(raw_spinlock_irqsave)(&m->wait_lock);
__clear_task_blocked_on(p, m);
}
#else
static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
}
static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
}
#endif
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
}
#ifdef CONFIG_SMP
static inline unsigned int task_cpu(const struct task_struct *p)
{
return READ_ONCE(task_thread_info(p)->cpu);
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
static inline unsigned int task_cpu(const struct task_struct *p)
{
return 0;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}
#endif
static inline bool task_is_runnable(struct task_struct *p)
{
return p->on_rq && !p->se.sched_delayed;
}
extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
#ifndef vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
return false;
}
#endif
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
static inline bool owner_on_cpu(struct task_struct *owner)
{
return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
}
unsigned long sched_cpu_util(int cpu);
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr);
extern int sched_core_idle_cpu(int cpu);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
static inline void sched_core_fork(struct task_struct *p) { }
static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
#endif
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#ifdef CONFIG_MEM_ALLOC_PROFILING
static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
{
swap(current->alloc_tag, tag);
return tag;
}
static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
{
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
#endif
current->alloc_tag = old;
}
#else
#define alloc_tag_save(_tag) NULL
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
#ifdef CONFIG_SCHED_MM_CID
void sched_mm_cid_before_execve(struct task_struct *t);
void sched_mm_cid_after_execve(struct task_struct *t);
void sched_mm_cid_exit(struct task_struct *t);
static __always_inline int task_mm_cid(struct task_struct *t)
{
return t->mm_cid.cid & ~(MM_CID_ONCPU | MM_CID_TRANSIT);
}
#else
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
static inline void sched_mm_cid_exit(struct task_struct *t) { }
static __always_inline int task_mm_cid(struct task_struct *t)
{
return task_cpu(t);
}
#endif
#ifndef MODULE
#ifndef COMPILE_OFFSETS
extern void ___migrate_enable(void);
struct rq;
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#ifdef CONFIG_SMP
#define this_rq_raw() arch_raw_cpu_ptr(&runqueues)
#else
#define this_rq_raw() PERCPU_PTR(&runqueues)
#endif
#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))
static inline void __migrate_enable(void)
{
struct task_struct *p = current;
#ifdef CONFIG_DEBUG_PREEMPT
if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
return;
#endif
if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
}
guard(preempt)();
if (unlikely(p->cpus_ptr != &p->cpus_mask))
___migrate_enable();
barrier();
p->migration_disabled = 0;
this_rq_pinned()--;
}
static inline void __migrate_disable(void)
{
struct task_struct *p = current;
if (p->migration_disabled) {
#ifdef CONFIG_DEBUG_PREEMPT
WARN_ON_ONCE((s16)p->migration_disabled < 0);
#endif
p->migration_disabled++;
return;
}
guard(preempt)();
this_rq_pinned()++;
p->migration_disabled = 1;
}
#else
static inline void __migrate_disable(void) { }
static inline void __migrate_enable(void) { }
#endif
#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
static __always_inline void migrate_disable(void)
{
__migrate_disable();
}
static __always_inline void migrate_enable(void)
{
__migrate_enable();
}
#else
extern void migrate_disable(void);
extern void migrate_enable(void);
#endif
#else
extern void migrate_disable(void);
extern void migrate_enable(void);
#endif
DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
#endif