sched/preempt: Optimize preemption operations on __schedule() callers

__schedule() disables preemption and some of its callers
(the preempt_schedule*() family) also set PREEMPT_ACTIVE.

So we have two preempt_count() modifications that could be performed
at once.

Lets remove the preemption disablement from __schedule() and pull
this responsibility to its callers in order to optimize preempt_count()
operations in a single place.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-5-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Frederic Weisbecker 2015-05-12 16:41:49 +02:00 committed by Ingo Molnar
parent 90b62b5129
commit b30f0e3ffe
2 changed files with 21 additions and 20 deletions

View File

@ -137,6 +137,18 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
#define preempt_active_enter() \
do { \
preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
barrier(); \
} while (0)
#define preempt_active_exit() \
do { \
barrier(); \
preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
} while (0)
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \

View File

@ -2773,9 +2773,7 @@ again:
* - return from syscall or exception to user-space
* - return from interrupt-handler to user-space
*
* WARNING: all callers must re-check need_resched() afterward and reschedule
* accordingly in case an event triggered the need for rescheduling (such as
* an interrupt waking up a task) while preemption was disabled in __schedule().
* WARNING: must be called with preemption disabled!
*/
static void __sched __schedule(void)
{
@ -2784,7 +2782,6 @@ static void __sched __schedule(void)
struct rq *rq;
int cpu;
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch();
@ -2848,8 +2845,6 @@ static void __sched __schedule(void)
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
sched_preempt_enable_no_resched();
}
static inline void sched_submit_work(struct task_struct *tsk)
@ -2870,7 +2865,9 @@ asmlinkage __visible void __sched schedule(void)
sched_submit_work(tsk);
do {
preempt_disable();
__schedule();
sched_preempt_enable_no_resched();
} while (need_resched());
}
EXPORT_SYMBOL(schedule);
@ -2909,15 +2906,14 @@ void __sched schedule_preempt_disabled(void)
static void __sched notrace preempt_schedule_common(void)
{
do {
__preempt_count_add(PREEMPT_ACTIVE);
preempt_active_enter();
__schedule();
__preempt_count_sub(PREEMPT_ACTIVE);
preempt_active_exit();
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
@ -2964,7 +2960,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
return;
do {
__preempt_count_add(PREEMPT_ACTIVE);
preempt_active_enter();
/*
* Needs preempt disabled in case user_exit() is traced
* and the tracer calls preempt_enable_notrace() causing
@ -2974,8 +2970,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
__schedule();
exception_exit(prev_ctx);
__preempt_count_sub(PREEMPT_ACTIVE);
barrier();
preempt_active_exit();
} while (need_resched());
}
EXPORT_SYMBOL_GPL(preempt_schedule_context);
@ -2999,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
prev_state = exception_enter();
do {
__preempt_count_add(PREEMPT_ACTIVE);
preempt_active_enter();
local_irq_enable();
__schedule();
local_irq_disable();
__preempt_count_sub(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
preempt_active_exit();
} while (need_resched());
exception_exit(prev_state);