forked from Minki/linux
74f5187ac8
Chase reported that due to us decrementing calc_load_task prematurely (before the next LOAD_FREQ sample), the load average could be scewed by as much as the number of CPUs in the machine. This patch, based on Chase's patch, cures the problem by keeping the delta of the CPU going into NO_HZ idle separately and folding that in on the next LOAD_FREQ update. This restores the balance and we get strict LOAD_FREQ period samples. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Chase Douglas <chase.douglas@canonical.com> LKML-Reference: <1271934490.1776.343.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
116 lines
2.6 KiB
C
116 lines
2.6 KiB
C
/*
|
|
* idle-task scheduling class.
|
|
*
|
|
* (NOTE: these are not related to SCHED_IDLE tasks which are
|
|
* handled in sched_fair.c)
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
static int
|
|
select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
|
|
{
|
|
return task_cpu(p); /* IDLE tasks as never migrated */
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
/*
|
|
* Idle tasks are unconditionally rescheduled:
|
|
*/
|
|
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
resched_task(rq->idle);
|
|
}
|
|
|
|
static struct task_struct *pick_next_task_idle(struct rq *rq)
|
|
{
|
|
schedstat_inc(rq, sched_goidle);
|
|
calc_load_account_idle(rq);
|
|
return rq->idle;
|
|
}
|
|
|
|
/*
|
|
* It is not legal to sleep in the idle task - print a warning
|
|
* message if some code attempts to do it:
|
|
*/
|
|
static void
|
|
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
raw_spin_unlock_irq(&rq->lock);
|
|
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
|
|
dump_stack();
|
|
raw_spin_lock_irq(&rq->lock);
|
|
}
|
|
|
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
}
|
|
|
|
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
|
{
|
|
}
|
|
|
|
static void set_curr_task_idle(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
static void switched_to_idle(struct rq *rq, struct task_struct *p,
|
|
int running)
|
|
{
|
|
/* Can this actually happen?? */
|
|
if (running)
|
|
resched_task(rq->curr);
|
|
else
|
|
check_preempt_curr(rq, p, 0);
|
|
}
|
|
|
|
static void prio_changed_idle(struct rq *rq, struct task_struct *p,
|
|
int oldprio, int running)
|
|
{
|
|
/* This can happen for hot plug CPUS */
|
|
|
|
/*
|
|
* Reschedule if we are currently running on this runqueue and
|
|
* our priority decreased, or if we are not currently running on
|
|
* this runqueue and our priority is higher than the current's
|
|
*/
|
|
if (running) {
|
|
if (p->prio > oldprio)
|
|
resched_task(rq->curr);
|
|
} else
|
|
check_preempt_curr(rq, p, 0);
|
|
}
|
|
|
|
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Simple, special scheduling class for the per-CPU idle tasks:
|
|
*/
|
|
static const struct sched_class idle_sched_class = {
|
|
/* .next is NULL */
|
|
/* no enqueue/yield_task for idle tasks */
|
|
|
|
/* dequeue is not valid, we print a debug message there: */
|
|
.dequeue_task = dequeue_task_idle,
|
|
|
|
.check_preempt_curr = check_preempt_curr_idle,
|
|
|
|
.pick_next_task = pick_next_task_idle,
|
|
.put_prev_task = put_prev_task_idle,
|
|
|
|
#ifdef CONFIG_SMP
|
|
.select_task_rq = select_task_rq_idle,
|
|
#endif
|
|
|
|
.set_curr_task = set_curr_task_idle,
|
|
.task_tick = task_tick_idle,
|
|
|
|
.get_rr_interval = get_rr_interval_idle,
|
|
|
|
.prio_changed = prio_changed_idle,
|
|
.switched_to = switched_to_idle,
|
|
|
|
/* no .task_new for idle tasks */
|
|
};
|