mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
sched/deadline: Make find_later_rq() choose a closer CPU in topology
When cpudl_find() returns any among free_cpus, the CPU might not be closer than others, considering sched domain. For example: this_cpu: 15 free_cpus: 0, 1,..., 14 (== later_mask) best_cpu: 0 topology: 0 --+ +--+ 1 --+ | +-- ... --+ 2 --+ | | +--+ | 3 --+ | ... ... 12 --+ | +--+ | 13 --+ | | +-- ... -+ 14 --+ | +--+ 15 --+ In this case, it would be best to select 14 since it's a free CPU and closest to 15 (this_cpu). However, currently the code selects 0 (best_cpu) even though that's just any among free_cpus. Fix it. This (re)aligns the deadline behaviour with the rt behaviour. Signed-off-by: Byungchul Park <byungchul.park@lge.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <bristot@redhat.com> Cc: <juri.lelli@gmail.com> Cc: <kernel-team@lge.com> Cc: <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1495504859-10960-2-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b5dd77c8bd
commit
b18c3ca11c
@ -1798,7 +1798,7 @@ static int find_later_rq(struct task_struct *task)
|
||||
struct sched_domain *sd;
|
||||
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
|
||||
int this_cpu = smp_processor_id();
|
||||
int best_cpu, cpu = task_cpu(task);
|
||||
int cpu = task_cpu(task);
|
||||
|
||||
/* Make sure the mask is initialized first */
|
||||
if (unlikely(!later_mask))
|
||||
@ -1811,17 +1811,14 @@ static int find_later_rq(struct task_struct *task)
|
||||
* We have to consider system topology and task affinity
|
||||
* first, then we can look for a suitable cpu.
|
||||
*/
|
||||
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
|
||||
task, later_mask);
|
||||
if (best_cpu == -1)
|
||||
if (cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask) == -1)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* If we are here, some target has been found,
|
||||
* the most suitable of which is cached in best_cpu.
|
||||
* This is, among the runqueues where the current tasks
|
||||
* have later deadlines than the task's one, the rq
|
||||
* with the latest possible one.
|
||||
* If we are here, some targets have been found, including
|
||||
* the most suitable which is, among the runqueues where the
|
||||
* current tasks have later deadlines than the task's one, the
|
||||
* rq with the latest possible one.
|
||||
*
|
||||
* Now we check how well this matches with task's
|
||||
* affinity and system topology.
|
||||
@ -1841,6 +1838,7 @@ static int find_later_rq(struct task_struct *task)
|
||||
rcu_read_lock();
|
||||
for_each_domain(cpu, sd) {
|
||||
if (sd->flags & SD_WAKE_AFFINE) {
|
||||
int best_cpu;
|
||||
|
||||
/*
|
||||
* If possible, preempting this_cpu is
|
||||
@ -1852,12 +1850,15 @@ static int find_later_rq(struct task_struct *task)
|
||||
return this_cpu;
|
||||
}
|
||||
|
||||
best_cpu = cpumask_first_and(later_mask,
|
||||
sched_domain_span(sd));
|
||||
/*
|
||||
* Last chance: if best_cpu is valid and is
|
||||
* in the mask, that becomes our choice.
|
||||
* Last chance: if a cpu being in both later_mask
|
||||
* and current sd span is valid, that becomes our
|
||||
* choice. Of course, the latest possible cpu is
|
||||
* already under consideration through later_mask.
|
||||
*/
|
||||
if (best_cpu < nr_cpu_ids &&
|
||||
cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
|
||||
if (best_cpu < nr_cpu_ids) {
|
||||
rcu_read_unlock();
|
||||
return best_cpu;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user