diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index a63a98a96b86..19459dedde41 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -101,7 +101,6 @@ struct rq *scx_bpf_locked_rq(void) __ksym; struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak; u64 scx_bpf_now(void) __ksym __weak; void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak; -bool scx_bpf_sub_dispatch(u64 cgroup_id) __ksym __weak; /* * Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h index 83b3425e63b2..654b566bd94a 100644 --- a/tools/sched_ext/include/scx/compat.bpf.h +++ b/tools/sched_ext/include/scx/compat.bpf.h @@ -108,6 +108,19 @@ static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id) return p; } +/* + * v7.1: scx_bpf_sub_dispatch() for sub-sched dispatch. Preserve until + * we drop the compat layer for older kernels that lack the kfunc. + */ +bool scx_bpf_sub_dispatch___compat(u64 cgroup_id) __ksym __weak; + +static inline bool scx_bpf_sub_dispatch(u64 cgroup_id) +{ + if (bpf_ksym_exists(scx_bpf_sub_dispatch___compat)) + return scx_bpf_sub_dispatch___compat(cgroup_id); + return false; +} + /** * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on * in a compatible way. We will preserve this __COMPAT helper until v6.16.