Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two smaller fixes - plus a context tracking tracing fix that is a bit bigger" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tracing/context-tracking: Add preempt_schedule_context() for tracing sched: Fix clear NOHZ_BALANCE_KICK sched/x86: Construct all sibling maps if smt
This commit is contained in:
@@ -70,6 +70,46 @@ void user_enter(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/**
|
||||
* preempt_schedule_context - preempt_schedule called by tracing
|
||||
*
|
||||
* The tracing infrastructure uses preempt_enable_notrace to prevent
|
||||
* recursion and tracing preempt enabling caused by the tracing
|
||||
* infrastructure itself. But as tracing can happen in areas coming
|
||||
* from userspace or just about to enter userspace, a preempt enable
|
||||
* can occur before user_exit() is called. This will cause the scheduler
|
||||
* to be called when the system is still in usermode.
|
||||
*
|
||||
* To prevent this, the preempt_enable_notrace will use this function
|
||||
* instead of preempt_schedule() to exit user context if needed before
|
||||
* calling the scheduler.
|
||||
*/
|
||||
void __sched notrace preempt_schedule_context(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
enum ctx_state prev_ctx;
|
||||
|
||||
if (likely(ti->preempt_count || irqs_disabled()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Need to disable preemption in case user_exit() is traced
|
||||
* and the tracer calls preempt_enable_notrace() causing
|
||||
* an infinite recursion.
|
||||
*/
|
||||
preempt_disable_notrace();
|
||||
prev_ctx = exception_enter();
|
||||
preempt_enable_no_resched_notrace();
|
||||
|
||||
preempt_schedule();
|
||||
|
||||
preempt_disable_notrace();
|
||||
exception_exit(prev_ctx);
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(preempt_schedule_context);
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
/**
|
||||
* user_exit - Inform the context tracking that the CPU is
|
||||
|
||||
@@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu)
|
||||
static inline bool got_nohz_idle_kick(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
|
||||
|
||||
if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
|
||||
return false;
|
||||
|
||||
if (idle_cpu(cpu) && !need_resched())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We can't run Idle Load Balance on this CPU for this time so we
|
||||
* cancel it and clear NOHZ_BALANCE_KICK
|
||||
*/
|
||||
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
|
||||
return false;
|
||||
}
|
||||
|
||||
#else /* CONFIG_NO_HZ_COMMON */
|
||||
@@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void)
|
||||
|
||||
void scheduler_ipi(void)
|
||||
{
|
||||
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
|
||||
&& !tick_nohz_full_cpu(smp_processor_id()))
|
||||
if (llist_empty(&this_rq()->wake_list)
|
||||
&& !tick_nohz_full_cpu(smp_processor_id())
|
||||
&& !got_nohz_idle_kick())
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -1417,7 +1430,7 @@ void scheduler_ipi(void)
|
||||
/*
|
||||
* Check if someone kicked us for doing the nohz idle load balance.
|
||||
*/
|
||||
if (unlikely(got_nohz_idle_kick() && !need_resched())) {
|
||||
if (unlikely(got_nohz_idle_kick())) {
|
||||
this_rq()->idle_balance = 1;
|
||||
raise_softirq_irqoff(SCHED_SOFTIRQ);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user