Various fixes: a deadline scheduler fix, a migration fix, a Sparse fix and a comment fix.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmLuvmwRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1gONQ/+KkkPTeKgGDvrahTfeYZlmRyvcI1R78r9
 yooa8v+DtifznBW2eXDBc8WTruzqr78VyUY+1YSjfKS6FRQWYMficJ3qk3hxgBru
 998KZbvl3jXBBlRkqgGeFlF5Ty2KaryEZgX97a7IF/0xWDgpm972jFkJ/KCo/YTY
 WSQrzutz2FKe71EjK4cAplYxPZIiy/zo2hSGTbsso4M7bO5VLc1Y4qMtFGcCZ7JB
 s9JYkj2Rfz+AS5wioDRcGuec4A4SrroxKszZA6QDDBuhMJukqexO02xs/fxZ2W4Z
 DF4U5MFOrtz9AWSGsf1P6XXbgJO8qTgQXZchFsEcJwypV13w8U0IViXQfD/Pvx2X
 y+WHdnZVIO2sDwOJ15ew7IuoJZ2LsVygrBNFJJaIFOtIz3RzprI0BJN7LeWFALOa
 IPmbtiY8hVwhKmjRgMHWDwJhMEHLuhGx3idiD89w1pknzTUnKDiwLyEUtyynxeGd
 ft9uCvPefrYQVx9AiH7wf0W+fg334FCccC+0f8LyduyftUyQCfZIZY6LUSKuKded
 Odm7k0ngLDPbdZwAHs0Nf/ilRwd91Z7b6hGt5U3ptx+8BPMKB+/k1VoKog7OISPc
 zGaP7DrtuC4sEdX4X6bqX+mEQhpkLcQw15gVGxhKoHqygWNSZrV634aSSXwfVXJx
 eT5m/K9a7L0=
 =CYl5
 -----END PGP SIGNATURE-----

Merge tag 'sched-urgent-2022-08-06' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Various fixes: a deadline scheduler fix, a migration fix, a Sparse fix
  and a comment fix"

* tag 'sched-urgent-2022-08-06' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Do not requeue task on CPU excluded from cpus_mask
  sched/rt: Fix Sparse warnings due to undefined rt.c declarations
  exit: Fix typo in comment: s/sub-theads/sub-threads
  sched, cpuset: Fix dl_cpu_busy() panic due to empty cs->cpus_allowed
This commit is contained in:
Linus Torvalds 2022-08-06 17:34:06 -07:00
commit cac03ac368
5 changed files with 18 additions and 11 deletions

View File

@ -1825,7 +1825,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
} }
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);

View File

@ -2239,7 +2239,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
goto out_unlock; goto out_unlock;
cgroup_taskset_for_each(task, css, tset) { cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task, cs->cpus_allowed); ret = task_can_attach(task, cs->effective_cpus);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = security_task_setscheduler(task); ret = security_task_setscheduler(task);

View File

@ -1051,7 +1051,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
* p->signal fields because the whole thread group is dead * p->signal fields because the whole thread group is dead
* and nobody can change them. * and nobody can change them.
* *
* psig->stats_lock also protects us from our sub-theads * psig->stats_lock also protects us from our sub-threads
* which can reap other children at the same time. Until * which can reap other children at the same time. Until
* we change k_getrusage()-like users to rely on this lock * we change k_getrusage()-like users to rely on this lock
* we have to take ->siglock as well. * we have to take ->siglock as well.

View File

@ -3802,7 +3802,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
} }
static inline bool ttwu_queue_cond(int cpu) static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
{ {
/* /*
* Do not complicate things with the async wake_list while the CPU is * Do not complicate things with the async wake_list while the CPU is
@ -3811,6 +3811,10 @@ static inline bool ttwu_queue_cond(int cpu)
if (!cpu_active(cpu)) if (!cpu_active(cpu))
return false; return false;
/* Ensure the task will still be allowed to run on the CPU. */
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
/* /*
* If the CPU does not share cache, then queue the task on the * If the CPU does not share cache, then queue the task on the
* remote rqs wakelist to avoid accessing remote data. * remote rqs wakelist to avoid accessing remote data.
@ -3840,7 +3844,7 @@ static inline bool ttwu_queue_cond(int cpu)
static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{ {
if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu)) { if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
sched_clock_cpu(cpu); /* Sync clocks across CPUs */ sched_clock_cpu(cpu); /* Sync clocks across CPUs */
__ttwu_queue_wakelist(p, cpu, wake_flags); __ttwu_queue_wakelist(p, cpu, wake_flags);
return true; return true;
@ -9012,7 +9016,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
} }
int task_can_attach(struct task_struct *p, int task_can_attach(struct task_struct *p,
const struct cpumask *cs_cpus_allowed) const struct cpumask *cs_effective_cpus)
{ {
int ret = 0; int ret = 0;
@ -9031,9 +9035,11 @@ int task_can_attach(struct task_struct *p,
} }
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
cs_cpus_allowed)) { cs_effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
if (unlikely(cpu >= nr_cpu_ids))
return -EINVAL;
ret = dl_cpu_busy(cpu, p); ret = dl_cpu_busy(cpu, p);
} }

View File

@ -481,9 +481,6 @@ extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
extern void unregister_rt_sched_group(struct task_group *tg);
extern void free_rt_sched_group(struct task_group *tg);
extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu, struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent); struct sched_rt_entity *parent);
@ -521,6 +518,10 @@ struct cfs_bandwidth { };
#endif /* CONFIG_CGROUP_SCHED */ #endif /* CONFIG_CGROUP_SCHED */
extern void unregister_rt_sched_group(struct task_group *tg);
extern void free_rt_sched_group(struct task_group *tg);
extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
/* /*
* u64_u32_load/u64_u32_store * u64_u32_load/u64_u32_store
* *