2011-10-25 08:00:11 +00:00
|
|
|
#include "sched.h"
|
|
|
|
|
2007-07-09 16:51:58 +00:00
|
|
|
/*
|
|
|
|
* idle-task scheduling class.
|
|
|
|
*
|
|
|
|
* (NOTE: these are not related to SCHED_IDLE tasks which are
|
2012-04-02 08:00:44 +00:00
|
|
|
* handled in sched/fair.c)
|
2007-07-09 16:51:58 +00:00
|
|
|
*/
|
|
|
|
|
2008-01-25 20:08:09 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2010-03-24 17:34:10 +00:00
|
|
|
static int
|
2013-10-07 10:29:16 +00:00
|
|
|
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
|
2008-01-25 20:08:09 +00:00
|
|
|
{
|
|
|
|
return task_cpu(p); /* IDLE tasks as never migrated */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
2014-01-23 19:32:21 +00:00
|
|
|
|
2007-07-09 16:51:58 +00:00
|
|
|
/*
|
|
|
|
* Idle tasks are unconditionally rescheduled:
|
|
|
|
*/
|
2009-09-14 17:55:44 +00:00
|
|
|
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
|
2007-07-09 16:51:58 +00:00
|
|
|
{
|
|
|
|
resched_task(rq->idle);
|
|
|
|
}
|
|
|
|
|
2012-02-11 05:05:00 +00:00
|
|
|
static struct task_struct *
|
|
|
|
pick_next_task_idle(struct rq *rq, struct task_struct *prev)
|
2007-07-09 16:51:58 +00:00
|
|
|
{
|
2012-02-11 05:05:00 +00:00
|
|
|
if (prev)
|
|
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
|
|
|
2007-07-09 16:51:58 +00:00
|
|
|
schedstat_inc(rq, sched_goidle);
|
sched: Fix wrong rq's runnable_avg update with rt tasks
The current update of the rq's load can be erroneous when RT
tasks are involved.
The update of the load of a rq that becomes idle, is done only
if the avg_idle is less than sysctl_sched_migration_cost. If RT
tasks and short idle duration alternate, the runnable_avg will
not be updated correctly and the time will be accounted as idle
time when a CFS task wakes up.
A new idle_enter function is called when the next task is the
idle function so the elapsed time will be accounted as run time
in the load of the rq, whatever the average idle time is. The
function update_rq_runnable_avg is removed from idle_balance.
When a RT task is scheduled on an idle CPU, the update of the
rq's load is not done when the rq exit idle state because CFS's
functions are not called. Then, the idle_balance, which is
called just before entering the idle function, updates the rq's
load and makes the assumption that the elapsed time since the
last update, was only running time.
As a consequence, the rq's load of a CPU that only runs a
periodic RT task, is close to LOAD_AVG_MAX whatever the running
duration of the RT task is.
A new idle_exit function is called when the prev task is the
idle function so the elapsed time will be accounted as idle time
in the rq's load.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: linaro-kernel@lists.linaro.org
Cc: peterz@infradead.org
Cc: pjt@google.com
Cc: fweisbec@gmail.com
Cc: efault@gmx.de
Link: http://lkml.kernel.org/r/1366302867-5055-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-04-18 16:34:26 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2014-01-17 14:09:39 +00:00
|
|
|
idle_enter_fair(rq);
|
sched: Fix wrong rq's runnable_avg update with rt tasks
The current update of the rq's load can be erroneous when RT
tasks are involved.
The update of the load of a rq that becomes idle, is done only
if the avg_idle is less than sysctl_sched_migration_cost. If RT
tasks and short idle duration alternate, the runnable_avg will
not be updated correctly and the time will be accounted as idle
time when a CFS task wakes up.
A new idle_enter function is called when the next task is the
idle function so the elapsed time will be accounted as run time
in the load of the rq, whatever the average idle time is. The
function update_rq_runnable_avg is removed from idle_balance.
When a RT task is scheduled on an idle CPU, the update of the
rq's load is not done when the rq exit idle state because CFS's
functions are not called. Then, the idle_balance, which is
called just before entering the idle function, updates the rq's
load and makes the assumption that the elapsed time since the
last update, was only running time.
As a consequence, the rq's load of a CPU that only runs a
periodic RT task, is close to LOAD_AVG_MAX whatever the running
duration of the RT task is.
A new idle_exit function is called when the prev task is the
idle function so the elapsed time will be accounted as idle time
in the rq's load.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: linaro-kernel@lists.linaro.org
Cc: peterz@infradead.org
Cc: pjt@google.com
Cc: fweisbec@gmail.com
Cc: efault@gmx.de
Link: http://lkml.kernel.org/r/1366302867-5055-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-04-18 16:34:26 +00:00
|
|
|
#endif
|
2007-07-09 16:51:58 +00:00
|
|
|
return rq->idle;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is not legal to sleep in the idle task - print a warning
|
|
|
|
* message if some code attempts to do it:
|
|
|
|
*/
|
|
|
|
static void
|
2010-03-24 15:38:48 +00:00
|
|
|
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
|
2007-07-09 16:51:58 +00:00
|
|
|
{
|
2009-11-17 13:28:38 +00:00
|
|
|
raw_spin_unlock_irq(&rq->lock);
|
2009-12-20 13:23:57 +00:00
|
|
|
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
|
2007-07-09 16:51:58 +00:00
|
|
|
dump_stack();
|
2009-11-17 13:28:38 +00:00
|
|
|
raw_spin_lock_irq(&rq->lock);
|
2007-07-09 16:51:58 +00:00
|
|
|
}
|
|
|
|
|
2007-08-09 09:16:49 +00:00
|
|
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
2007-07-09 16:51:58 +00:00
|
|
|
{
|
2014-01-23 19:32:21 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
idle_exit_fair(rq);
|
|
|
|
rq_last_tick_reset(rq);
|
|
|
|
#endif
|
2007-07-09 16:51:58 +00:00
|
|
|
}
|
|
|
|
|
2008-01-25 20:08:29 +00:00
|
|
|
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
2007-07-09 16:51:58 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-10-15 15:00:08 +00:00
|
|
|
static void set_curr_task_idle(struct rq *rq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-01-17 16:03:27 +00:00
|
|
|
static void switched_to_idle(struct rq *rq, struct task_struct *p)
|
2008-01-25 20:08:22 +00:00
|
|
|
{
|
2011-01-25 15:30:03 +00:00
|
|
|
BUG();
|
2008-01-25 20:08:22 +00:00
|
|
|
}
|
|
|
|
|
2011-01-17 16:03:27 +00:00
|
|
|
static void
|
|
|
|
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
|
2008-01-25 20:08:22 +00:00
|
|
|
{
|
2011-01-25 15:30:03 +00:00
|
|
|
BUG();
|
2008-01-25 20:08:22 +00:00
|
|
|
}
|
|
|
|
|
2010-01-14 03:21:52 +00:00
|
|
|
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
|
2009-09-21 01:31:53 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-09 16:51:58 +00:00
|
|
|
/*
|
|
|
|
* Simple, special scheduling class for the per-CPU idle tasks:
|
|
|
|
*/
|
2011-10-25 08:00:11 +00:00
|
|
|
const struct sched_class idle_sched_class = {
|
2007-10-15 15:00:12 +00:00
|
|
|
/* .next is NULL */
|
2007-07-09 16:51:58 +00:00
|
|
|
/* no enqueue/yield_task for idle tasks */
|
|
|
|
|
|
|
|
/* dequeue is not valid, we print a debug message there: */
|
|
|
|
.dequeue_task = dequeue_task_idle,
|
|
|
|
|
|
|
|
.check_preempt_curr = check_preempt_curr_idle,
|
|
|
|
|
|
|
|
.pick_next_task = pick_next_task_idle,
|
|
|
|
.put_prev_task = put_prev_task_idle,
|
|
|
|
|
2007-10-24 16:23:51 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2008-10-22 07:25:26 +00:00
|
|
|
.select_task_rq = select_task_rq_idle,
|
2007-10-24 16:23:51 +00:00
|
|
|
#endif
|
2007-07-09 16:51:58 +00:00
|
|
|
|
2007-10-15 15:00:08 +00:00
|
|
|
.set_curr_task = set_curr_task_idle,
|
2007-07-09 16:51:58 +00:00
|
|
|
.task_tick = task_tick_idle,
|
2008-01-25 20:08:22 +00:00
|
|
|
|
2009-09-21 01:31:53 +00:00
|
|
|
.get_rr_interval = get_rr_interval_idle,
|
|
|
|
|
2008-01-25 20:08:22 +00:00
|
|
|
.prio_changed = prio_changed_idle,
|
|
|
|
.switched_to = switched_to_idle,
|
2007-07-09 16:51:58 +00:00
|
|
|
};
|