[PATCH] sched: remove SMT nice

Remove the SMT-nice feature which idles sibling cpus on SMT cpus to
facilitiate nice working properly where cpu power is shared.  The idling of
cpus in the presence of runnable tasks is considered too fragile, easy to
break with outside code, and the complexity of managing this system if an
architecture comes along with many logical cores sharing cpu power will be
unworkable.

Remove the associated per_cpu_gain variable in sched_domains used only by
this code.

Also:

  The reason is that with dynticks enabled, this code breaks without yet
  further tweaks so dynticks brought on the rapid demise of this code.  So
  either we tweak this code or kill it off entirely.  It was Ingo's preference
  to kill it off.  Either way this needs to happen for 2.6.21 since dynticks
  has gone in.

Signed-off-by: Con Kolivas <kernel@kolivas.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Con Kolivas 2007-03-05 00:30:29 -08:00 committed by Linus Torvalds
parent 759b9775c2
commit 69f7c0a1be
8 changed files with 1 additions and 165 deletions

View File

@ -85,7 +85,6 @@ static inline int node_to_first_cpu(int node)
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \

View File

@ -65,7 +65,6 @@ void build_cpu_to_node_map(void);
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
.per_cpu_gain = 100, \
.cache_nice_tries = 2, \
.busy_idx = 2, \
.idle_idx = 1, \
@ -97,7 +96,6 @@ void build_cpu_to_node_map(void);
.newidle_idx = 0, /* unused */ \
.wake_idx = 1, \
.forkexec_idx = 1, \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \

View File

@ -28,7 +28,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
.busy_factor = 32, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_WAKE_BALANCE, \

View File

@ -57,7 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
.busy_factor = 32, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 3, \
.idle_idx = 1, \
.newidle_idx = 2, \

View File

@ -43,7 +43,6 @@ extern int __node_distance(int, int);
.newidle_idx = 0, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_FORK \
| SD_BALANCE_EXEC \

View File

@ -684,7 +684,6 @@ struct sched_domain {
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */
unsigned int busy_idx;
unsigned int idle_idx;
unsigned int newidle_idx;

View File

@ -96,7 +96,6 @@
.busy_factor = 64, \
.imbalance_pct = 110, \
.cache_nice_tries = 0, \
.per_cpu_gain = 25, \
.busy_idx = 0, \
.idle_idx = 0, \
.newidle_idx = 1, \
@ -128,7 +127,6 @@
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
@ -159,7 +157,6 @@
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
@ -193,7 +190,6 @@
.newidle_idx = 0, /* unused */ \
.wake_idx = 0, /* unused */ \
.forkexec_idx = 0, /* unused */ \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_SERIALIZE, \
.last_balance = jiffies, \

View File

@ -3006,23 +3006,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
}
#endif
static inline void wake_priority_sleeper(struct rq *rq)
{
#ifdef CONFIG_SCHED_SMT
if (!rq->nr_running)
return;
spin_lock(&rq->lock);
/*
* If an SMT sibling task has been put to sleep for priority
* reasons reschedule the idle task to see if it can now run.
*/
if (rq->nr_running)
resched_task(rq->idle);
spin_unlock(&rq->lock);
#endif
}
DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
@ -3239,10 +3222,7 @@ void scheduler_tick(void)
update_cpu_clock(p, rq, now);
if (p == rq->idle)
/* Task on the idle queue */
wake_priority_sleeper(rq);
else
if (p != rq->idle)
task_running_tick(rq, p);
#ifdef CONFIG_SMP
update_load(rq);
@ -3251,136 +3231,6 @@ void scheduler_tick(void)
#endif
}
#ifdef CONFIG_SCHED_SMT
static inline void wakeup_busy_runqueue(struct rq *rq)
{
/* If an SMT runqueue is sleeping due to priority reasons wake it up */
if (rq->curr == rq->idle && rq->nr_running)
resched_task(rq->idle);
}
/*
* Called with interrupt disabled and this_rq's runqueue locked.
*/
static void wake_sleeping_dependent(int this_cpu)
{
struct sched_domain *tmp, *sd = NULL;
int i;
for_each_domain(this_cpu, tmp) {
if (tmp->flags & SD_SHARE_CPUPOWER) {
sd = tmp;
break;
}
}
if (!sd)
return;
for_each_cpu_mask(i, sd->span) {
struct rq *smt_rq = cpu_rq(i);
if (i == this_cpu)
continue;
if (unlikely(!spin_trylock(&smt_rq->lock)))
continue;
wakeup_busy_runqueue(smt_rq);
spin_unlock(&smt_rq->lock);
}
}
/*
* number of 'lost' timeslices this task wont be able to fully
* utilize, if another task runs on a sibling. This models the
* slowdown effect of other tasks running on siblings:
*/
static inline unsigned long
smt_slice(struct task_struct *p, struct sched_domain *sd)
{
return p->time_slice * (100 - sd->per_cpu_gain) / 100;
}
/*
* To minimise lock contention and not have to drop this_rq's runlock we only
* trylock the sibling runqueues and bypass those runqueues if we fail to
* acquire their lock. As we only trylock the normal locking order does not
* need to be obeyed.
*/
static int
dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
{
struct sched_domain *tmp, *sd = NULL;
int ret = 0, i;
/* kernel/rt threads do not participate in dependent sleeping */
if (!p->mm || rt_task(p))
return 0;
for_each_domain(this_cpu, tmp) {
if (tmp->flags & SD_SHARE_CPUPOWER) {
sd = tmp;
break;
}
}
if (!sd)
return 0;
for_each_cpu_mask(i, sd->span) {
struct task_struct *smt_curr;
struct rq *smt_rq;
if (i == this_cpu)
continue;
smt_rq = cpu_rq(i);
if (unlikely(!spin_trylock(&smt_rq->lock)))
continue;
smt_curr = smt_rq->curr;
if (!smt_curr->mm)
goto unlock;
/*
* If a user task with lower static priority than the
* running task on the SMT sibling is trying to schedule,
* delay it till there is proportionately less timeslice
* left of the sibling task to prevent a lower priority
* task from using an unfair proportion of the
* physical cpu's resources. -ck
*/
if (rt_task(smt_curr)) {
/*
* With real time tasks we run non-rt tasks only
* per_cpu_gain% of the time.
*/
if ((jiffies % DEF_TIMESLICE) >
(sd->per_cpu_gain * DEF_TIMESLICE / 100))
ret = 1;
} else {
if (smt_curr->static_prio < p->static_prio &&
!TASK_PREEMPTS_CURR(p, smt_rq) &&
smt_slice(smt_curr, sd) > task_timeslice(p))
ret = 1;
}
unlock:
spin_unlock(&smt_rq->lock);
}
return ret;
}
#else
static inline void wake_sleeping_dependent(int this_cpu)
{
}
static inline int
dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
{
return 0;
}
#endif
#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
void fastcall add_preempt_count(int val)
@ -3507,7 +3357,6 @@ need_resched_nonpreemptible:
if (!rq->nr_running) {
next = rq->idle;
rq->expired_timestamp = 0;
wake_sleeping_dependent(cpu);
goto switch_tasks;
}
}
@ -3547,8 +3396,6 @@ need_resched_nonpreemptible:
}
}
next->sleep_type = SLEEP_NORMAL;
if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next))
next = rq->idle;
switch_tasks:
if (next == rq->idle)
schedstat_inc(rq, sched_goidle);