#ifndef __SCX_COMPAT_BPF_H
#define __SCX_COMPAT_BPF_H
#define __COMPAT_ENUM_OR_ZERO(__type, __ent) \
({ \
__type __ret = 0; \
if (bpf_core_enum_value_exists(__type, __ent)) \
__ret = __ent; \
__ret; \
})
struct cgroup *scx_bpf_task_cgroup___new(struct task_struct *p) __ksym __weak;
#define scx_bpf_task_cgroup(p) \
(bpf_ksym_exists(scx_bpf_task_cgroup___new) ? \
scx_bpf_task_cgroup___new((p)) : NULL)
bool scx_bpf_dsq_move_to_local___new(u64 dsq_id) __ksym __weak;
void scx_bpf_dsq_move_set_slice___new(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
void scx_bpf_dsq_move_set_vtime___new(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
bool scx_bpf_dsq_move___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_dsq_move_vtime___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_consume___old(u64 dsq_id) __ksym __weak;
void scx_bpf_dispatch_from_dsq_set_slice___old(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
void scx_bpf_dispatch_from_dsq_set_vtime___old(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
bool scx_bpf_dispatch_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_dispatch_vtime_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
#define scx_bpf_dsq_move_to_local(dsq_id) \
(bpf_ksym_exists(scx_bpf_dsq_move_to_local___new) ? \
scx_bpf_dsq_move_to_local___new((dsq_id)) : \
scx_bpf_consume___old((dsq_id)))
#define scx_bpf_dsq_move_set_slice(it__iter, slice) \
(bpf_ksym_exists(scx_bpf_dsq_move_set_slice___new) ? \
scx_bpf_dsq_move_set_slice___new((it__iter), (slice)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___old) ? \
scx_bpf_dispatch_from_dsq_set_slice___old((it__iter), (slice)) : \
(void)0))
#define scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
(bpf_ksym_exists(scx_bpf_dsq_move_set_vtime___new) ? \
scx_bpf_dsq_move_set_vtime___new((it__iter), (vtime)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___old) ? \
scx_bpf_dispatch_from_dsq_set_vtime___old((it__iter), (vtime)) : \
(void)0))
#define scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_move___new) ? \
scx_bpf_dsq_move___new((it__iter), (p), (dsq_id), (enq_flags)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq___old) ? \
scx_bpf_dispatch_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
#define scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_move_vtime___new) ? \
scx_bpf_dsq_move_vtime___new((it__iter), (p), (dsq_id), (enq_flags)) : \
(bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___old) ? \
scx_bpf_dispatch_vtime_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \
(bpf_ksym_exists(bpf_cpumask_populate) ? \
(bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)
{
struct task_struct *p = NULL;
struct bpf_iter_scx_dsq it;
if (bpf_ksym_exists(scx_bpf_dsq_peek))
return scx_bpf_dsq_peek(dsq_id);
if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))
p = bpf_iter_scx_dsq_next(&it);
bpf_iter_scx_dsq_destroy(&it);
return p;
}
static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
{
#ifdef HAVE_SCX_ENQ_CPU_SELECTED
#pragma push_macro("SCX_ENQ_CPU_SELECTED")
#undef SCX_ENQ_CPU_SELECTED
u64 flag;
if (!bpf_core_enum_value_exists(enum scx_enq_flags,
SCX_ENQ_CPU_SELECTED))
return true;
flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
return enq_flags & flag;
#pragma pop_macro("SCX_ENQ_CPU_SELECTED")
#else
return true;
#endif
}
#define scx_bpf_now() \
(bpf_ksym_exists(scx_bpf_now) ? \
scx_bpf_now() : \
bpf_ktime_get_ns())
#define __COMPAT_scx_bpf_events(events, size) \
(bpf_ksym_exists(scx_bpf_events) ? \
scx_bpf_events(events, size) : ({}))
#define __COMPAT_scx_bpf_nr_node_ids() \
(bpf_ksym_exists(scx_bpf_nr_node_ids) ? \
scx_bpf_nr_node_ids() : 1U)
#define __COMPAT_scx_bpf_cpu_node(cpu) \
(bpf_ksym_exists(scx_bpf_cpu_node) ? \
scx_bpf_cpu_node(cpu) : 0)
#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \
(bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \
scx_bpf_get_idle_cpumask_node(node) : \
scx_bpf_get_idle_cpumask()) \
#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \
(bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \
scx_bpf_get_idle_smtmask_node(node) : \
scx_bpf_get_idle_smtmask())
#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \
(bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \
scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \
scx_bpf_pick_idle_cpu(cpus_allowed, flags))
#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \
(bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \
scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \
scx_bpf_pick_any_cpu(cpus_allowed, flags))
static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
{
struct rq *rq;
if (bpf_ksym_exists(scx_bpf_cpu_curr))
return scx_bpf_cpu_curr(cpu);
rq = scx_bpf_cpu_rq(cpu);
return rq ? rq->curr : NULL;
}
s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
static inline s32
scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags)
{
if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {
struct scx_bpf_select_cpu_and_args args = {
.prev_cpu = prev_cpu,
.wake_flags = wake_flags,
.flags = flags,
};
return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);
} else {
return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,
cpus_allowed, flags);
}
}
static inline bool
scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
u64 enq_flags)
{
if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {
struct scx_bpf_dsq_insert_vtime_args args = {
.dsq_id = dsq_id,
.slice = slice,
.vtime = vtime,
.enq_flags = enq_flags,
};
return __scx_bpf_dsq_insert_vtime(p, &args);
} else if (bpf_ksym_exists(scx_bpf_dsq_insert_vtime___compat)) {
scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,
enq_flags);
return true;
} else {
scx_bpf_dispatch_vtime___compat(p, dsq_id, slice, vtime,
enq_flags);
return true;
}
}
bool scx_bpf_dsq_insert___v2___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
static inline bool
scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)
{
if (bpf_ksym_exists(scx_bpf_dsq_insert___v2___compat)) {
return scx_bpf_dsq_insert___v2___compat(p, dsq_id, slice, enq_flags);
} else if (bpf_ksym_exists(scx_bpf_dsq_insert___v1)) {
scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags);
return true;
} else {
scx_bpf_dispatch___compat(p, dsq_id, slice, enq_flags);
return true;
}
}
bool scx_bpf_task_set_slice___new(struct task_struct *p, u64 slice) __ksym __weak;
bool scx_bpf_task_set_dsq_vtime___new(struct task_struct *p, u64 vtime) __ksym __weak;
static inline void scx_bpf_task_set_slice(struct task_struct *p, u64 slice)
{
if (bpf_ksym_exists(scx_bpf_task_set_slice___new))
scx_bpf_task_set_slice___new(p, slice);
else
p->scx.slice = slice;
}
static inline void scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)
{
if (bpf_ksym_exists(scx_bpf_task_set_dsq_vtime___new))
scx_bpf_task_set_dsq_vtime___new(p, vtime);
else
p->scx.dsq_vtime = vtime;
}
u32 scx_bpf_reenqueue_local___v1(void) __ksym __weak;
void scx_bpf_reenqueue_local___v2___compat(void) __ksym __weak;
static inline bool __COMPAT_scx_bpf_reenqueue_local_from_anywhere(void)
{
return bpf_ksym_exists(scx_bpf_reenqueue_local___v2___compat);
}
static inline void scx_bpf_reenqueue_local(void)
{
if (__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
scx_bpf_reenqueue_local___v2___compat();
else
scx_bpf_reenqueue_local___v1();
}
#define SCX_OPS_DEFINE(__name, ...) \
SEC(".struct_ops.link") \
struct sched_ext_ops __name = { \
__VA_ARGS__, \
};
#endif