mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
8195136669
Add sched_ext_ops operations to init/exit cgroups, and track task migrations and config changes. A BPF scheduler may not implement or implement only subset of cgroup features. The implemented features can be indicated using %SCX_OPS_HAS_CGOUP_* flags. If cgroup configuration makes use of features that are not implemented, a warning is triggered. While a BPF scheduler is being enabled and disabled, relevant cgroup operations are locked out using scx_cgroup_rwsem. This avoids situations like task prep taking place while the task is being moved across cgroups, making things easier for BPF schedulers. v7: - cgroup interface file visibility toggling is dropped in favor just warning messages. Dynamically changing interface visiblity caused more confusion than helping. v6: - Updated to reflect the removal of SCX_KF_SLEEPABLE. - Updated to use CONFIG_GROUP_SCHED_WEIGHT and fixes for !CONFIG_FAIR_GROUP_SCHED && CONFIG_EXT_GROUP_SCHED. v5: - Flipped the locking order between scx_cgroup_rwsem and cpus_read_lock() to avoid locking order conflict w/ cpuset. Better documentation around locking. - sched_move_task() takes an early exit if the source and destination are identical. This triggered the warning in scx_cgroup_can_attach() as it left p->scx.cgrp_moving_from uncleared. Updated the cgroup migration path so that ops.cgroup_prep_move() is skipped for identity migrations so that its invocations always match ops.cgroup_move() one-to-one. v4: - Example schedulers moved into their own patches. - Fix build failure when !CONFIG_CGROUP_SCHED, reported by Andrea Righi. v3: - Make scx_example_pair switch all tasks by default. - Convert to BPF inline iterators. - scx_bpf_task_cgroup() is added to determine the current cgroup from CPU controller's POV. This allows BPF schedulers to accurately track CPU cgroup membership. - scx_example_flatcg added. This demonstrates flattened hierarchy implementation of CPU cgroup control and shows significant performance improvement when cgroups which are nested multiple levels are under competition. v2: - Build fixes for different CONFIG combinations. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: David Vernet <dvernet@meta.com> Acked-by: Josh Don <joshdon@google.com> Acked-by: Hao Luo <haoluo@google.com> Acked-by: Barret Rhoden <brho@google.com> Reported-by: kernel test robot <lkp@intel.com> Cc: Andrea Righi <andrea.righi@canonical.com>
92 lines
3.4 KiB
C
92 lines
3.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
|
|
*
|
|
* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
|
|
* Copyright (c) 2022 Tejun Heo <tj@kernel.org>
|
|
* Copyright (c) 2022 David Vernet <dvernet@meta.com>
|
|
*/
|
|
#ifdef CONFIG_SCHED_CLASS_EXT
|
|
|
|
void scx_tick(struct rq *rq);
|
|
void init_scx_entity(struct sched_ext_entity *scx);
|
|
void scx_pre_fork(struct task_struct *p);
|
|
int scx_fork(struct task_struct *p);
|
|
void scx_post_fork(struct task_struct *p);
|
|
void scx_cancel_fork(struct task_struct *p);
|
|
bool scx_can_stop_tick(struct rq *rq);
|
|
void scx_rq_activate(struct rq *rq);
|
|
void scx_rq_deactivate(struct rq *rq);
|
|
int scx_check_setscheduler(struct task_struct *p, int policy);
|
|
bool task_should_scx(struct task_struct *p);
|
|
void init_sched_ext_class(void);
|
|
|
|
static inline u32 scx_cpuperf_target(s32 cpu)
|
|
{
|
|
if (scx_enabled())
|
|
return cpu_rq(cpu)->scx.cpuperf_target;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
static inline bool task_on_scx(const struct task_struct *p)
|
|
{
|
|
return scx_enabled() && p->sched_class == &ext_sched_class;
|
|
}
|
|
|
|
#ifdef CONFIG_SCHED_CORE
|
|
bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
|
|
bool in_fi);
|
|
#endif
|
|
|
|
#else /* CONFIG_SCHED_CLASS_EXT */
|
|
|
|
static inline void scx_tick(struct rq *rq) {}
|
|
static inline void scx_pre_fork(struct task_struct *p) {}
|
|
static inline int scx_fork(struct task_struct *p) { return 0; }
|
|
static inline void scx_post_fork(struct task_struct *p) {}
|
|
static inline void scx_cancel_fork(struct task_struct *p) {}
|
|
static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
|
|
static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
|
|
static inline void scx_rq_activate(struct rq *rq) {}
|
|
static inline void scx_rq_deactivate(struct rq *rq) {}
|
|
static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
|
|
static inline bool task_on_scx(const struct task_struct *p) { return false; }
|
|
static inline void init_sched_ext_class(void) {}
|
|
|
|
#endif /* CONFIG_SCHED_CLASS_EXT */
|
|
|
|
#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
|
|
void __scx_update_idle(struct rq *rq, bool idle);
|
|
|
|
static inline void scx_update_idle(struct rq *rq, bool idle)
|
|
{
|
|
if (scx_enabled())
|
|
__scx_update_idle(rq, idle);
|
|
}
|
|
#else
|
|
static inline void scx_update_idle(struct rq *rq, bool idle) {}
|
|
#endif
|
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
#ifdef CONFIG_EXT_GROUP_SCHED
|
|
int scx_tg_online(struct task_group *tg);
|
|
void scx_tg_offline(struct task_group *tg);
|
|
int scx_cgroup_can_attach(struct cgroup_taskset *tset);
|
|
void scx_move_task(struct task_struct *p);
|
|
void scx_cgroup_finish_attach(void);
|
|
void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
|
|
void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
|
|
void scx_group_set_idle(struct task_group *tg, bool idle);
|
|
#else /* CONFIG_EXT_GROUP_SCHED */
|
|
static inline int scx_tg_online(struct task_group *tg) { return 0; }
|
|
static inline void scx_tg_offline(struct task_group *tg) {}
|
|
static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
|
|
static inline void scx_move_task(struct task_struct *p) {}
|
|
static inline void scx_cgroup_finish_attach(void) {}
|
|
static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
|
|
static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
|
|
static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
|
|
#endif /* CONFIG_EXT_GROUP_SCHED */
|
|
#endif /* CONFIG_CGROUP_SCHED */
|