rcu: Reduce dyntick-idle state space

Both extended-quiescent-state entry and exit first update the nesting
counter and then adjust the dyntick-idle state.  This means that there
are four states: (1) Both nesting and dyntick idle indicate idle,
(2) Nesting indicates idle but dyntick idle does not, (3) Nesting indicates
non-idle and dyntick idle does not, and (4) Both nesting and dyntick
idle indicate non-idle.  This commit simplifies the state space by
eliminating #3, reversing the order of updates on exit from extended
quiescent state.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2017-10-02 19:45:10 -07:00
parent 2e672ab2d4
commit a0eb22bf64

View File

@ -928,21 +928,21 @@ void rcu_irq_exit_irqson(void)
* we really have exited idle, and must do the appropriate accounting. * we really have exited idle, and must do the appropriate accounting.
* The caller must have disabled interrupts. * The caller must have disabled interrupts.
*/ */
static void rcu_eqs_exit_common(long long oldval, int user) static void rcu_eqs_exit_common(long long newval, int user)
{ {
RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);) RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
rcu_dynticks_task_exit(); rcu_dynticks_task_exit();
rcu_dynticks_eqs_exit(); rcu_dynticks_eqs_exit();
rcu_cleanup_after_idle(); rcu_cleanup_after_idle();
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) { !user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused = struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id()); idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on exit: not idle task"), trace_rcu_dyntick(TPS("Error on exit: not idle task"),
oldval, rdtp->dynticks_nesting); rdtp->dynticks_nesting, newval);
rcu_ftrace_dump(DUMP_ORIG); rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm, current->pid, current->comm,
@ -967,8 +967,8 @@ static void rcu_eqs_exit(bool user)
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
} else { } else {
__this_cpu_inc(disable_rcu_irq_enter); __this_cpu_inc(disable_rcu_irq_enter);
rcu_eqs_exit_common(DYNTICK_TASK_EXIT_IDLE, user);
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_eqs_exit_common(oldval, user);
__this_cpu_dec(disable_rcu_irq_enter); __this_cpu_dec(disable_rcu_irq_enter);
} }
} }
@ -1037,7 +1037,7 @@ void rcu_user_exit(void)
void rcu_irq_enter(void) void rcu_irq_enter(void)
{ {
struct rcu_dynticks *rdtp; struct rcu_dynticks *rdtp;
long long oldval; long long newval;
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks); rdtp = this_cpu_ptr(&rcu_dynticks);
@ -1046,14 +1046,13 @@ void rcu_irq_enter(void)
if (rdtp->dynticks_nmi_nesting) if (rdtp->dynticks_nmi_nesting)
return; return;
oldval = rdtp->dynticks_nesting; newval = rdtp->dynticks_nesting + 1;
rdtp->dynticks_nesting++; WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && newval == 0);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && if (rdtp->dynticks_nesting)
rdtp->dynticks_nesting == 0); trace_rcu_dyntick(TPS("++="), rdtp->dynticks_nesting, newval);
if (oldval)
trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
else else
rcu_eqs_exit_common(oldval, true); rcu_eqs_exit_common(newval, true);
rdtp->dynticks_nesting++;
} }
/* /*