1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9#ifdef CONFIG_SCHED_CLASS_EXT
10
11void scx_tick(struct rq *rq);
12void init_scx_entity(struct sched_ext_entity *scx);
13void scx_pre_fork(struct task_struct *p);
14int scx_fork(struct task_struct *p);
15void scx_post_fork(struct task_struct *p);
16void scx_cancel_fork(struct task_struct *p);
17bool scx_can_stop_tick(struct rq *rq);
18void scx_rq_activate(struct rq *rq);
19void scx_rq_deactivate(struct rq *rq);
20int scx_check_setscheduler(struct task_struct *p, int policy);
21bool task_should_scx(int policy);
22bool scx_allow_ttwu_queue(const struct task_struct *p);
23void init_sched_ext_class(void);
24
25static inline u32 scx_cpuperf_target(s32 cpu)
26{
27 if (scx_enabled())
28 return cpu_rq(cpu)->scx.cpuperf_target;
29 else
30 return 0;
31}
32
33static inline bool task_on_scx(const struct task_struct *p)
34{
35 return scx_enabled() && p->sched_class == &ext_sched_class;
36}
37
38#ifdef CONFIG_SCHED_CORE
39bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
40 bool in_fi);
41#endif
42
43#else /* CONFIG_SCHED_CLASS_EXT */
44
45static inline void scx_tick(struct rq *rq) {}
46static inline void scx_pre_fork(struct task_struct *p) {}
47static inline int scx_fork(struct task_struct *p) { return 0; }
48static inline void scx_post_fork(struct task_struct *p) {}
49static inline void scx_cancel_fork(struct task_struct *p) {}
50static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
51static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
52static inline void scx_rq_activate(struct rq *rq) {}
53static inline void scx_rq_deactivate(struct rq *rq) {}
54static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
55static inline bool task_on_scx(const struct task_struct *p) { return false; }
56static inline bool scx_allow_ttwu_queue(const struct task_struct *p) { return true; }
57static inline void init_sched_ext_class(void) {}
58
59#endif /* CONFIG_SCHED_CLASS_EXT */
60
61#ifdef CONFIG_SCHED_CLASS_EXT
62void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
63
64static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
65{
66 if (scx_enabled())
67 __scx_update_idle(rq, idle, do_notify);
68}
69#else
70static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
71#endif
72
73#ifdef CONFIG_CGROUP_SCHED
74#ifdef CONFIG_EXT_GROUP_SCHED
75void scx_tg_init(struct task_group *tg);
76int scx_tg_online(struct task_group *tg);
77void scx_tg_offline(struct task_group *tg);
78int scx_cgroup_can_attach(struct cgroup_taskset *tset);
79void scx_cgroup_move_task(struct task_struct *p);
80void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
81void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
82void scx_group_set_idle(struct task_group *tg, bool idle);
83void scx_group_set_bandwidth(struct task_group *tg, u64 period_us, u64 quota_us, u64 burst_us);
84#else /* CONFIG_EXT_GROUP_SCHED */
85static inline void scx_tg_init(struct task_group *tg) {}
86static inline int scx_tg_online(struct task_group *tg) { return 0; }
87static inline void scx_tg_offline(struct task_group *tg) {}
88static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
89static inline void scx_cgroup_move_task(struct task_struct *p) {}
90static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
91static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
92static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
93static inline void scx_group_set_bandwidth(struct task_group *tg, u64 period_us, u64 quota_us, u64 burst_us) {}
94#endif /* CONFIG_EXT_GROUP_SCHED */
95#endif /* CONFIG_CGROUP_SCHED */
96