mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 00:51:44 +00:00
91d1aa43d3
Create a new subsystem that probes on kernel boundaries to keep track of the transitions between level contexts with two basic initial contexts: user or kernel. This is an abstraction of some RCU code that use such tracking to implement its userspace extended quiescent state. We need to pull this up from RCU into this new level of indirection because this tracking is also going to be used to implement an "on demand" generic virtual cputime accounting. A necessary step to shutdown the tick while still accounting the cputime. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Gilad Ben-Yossef <gilad@benyossef.com> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> [ paulmck: fix whitespace error and email address. ] Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
84 lines
2.1 KiB
C
84 lines
2.1 KiB
C
#include <linux/context_tracking.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/hardirq.h>
|
|
|
|
struct context_tracking {
|
|
/*
|
|
* When active is false, hooks are not set to
|
|
* minimize overhead: TIF flags are cleared
|
|
* and calls to user_enter/exit are ignored. This
|
|
* may be further optimized using static keys.
|
|
*/
|
|
bool active;
|
|
enum {
|
|
IN_KERNEL = 0,
|
|
IN_USER,
|
|
} state;
|
|
};
|
|
|
|
static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
|
|
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
|
|
.active = true,
|
|
#endif
|
|
};
|
|
|
|
void user_enter(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Some contexts may involve an exception occuring in an irq,
|
|
* leading to that nesting:
|
|
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
|
|
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
|
|
* helpers are enough to protect RCU uses inside the exception. So
|
|
* just return immediately if we detect we are in an IRQ.
|
|
*/
|
|
if (in_interrupt())
|
|
return;
|
|
|
|
WARN_ON_ONCE(!current->mm);
|
|
|
|
local_irq_save(flags);
|
|
if (__this_cpu_read(context_tracking.active) &&
|
|
__this_cpu_read(context_tracking.state) != IN_USER) {
|
|
__this_cpu_write(context_tracking.state, IN_USER);
|
|
rcu_user_enter();
|
|
}
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
void user_exit(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Some contexts may involve an exception occuring in an irq,
|
|
* leading to that nesting:
|
|
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
|
|
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
|
|
* helpers are enough to protect RCU uses inside the exception. So
|
|
* just return immediately if we detect we are in an IRQ.
|
|
*/
|
|
if (in_interrupt())
|
|
return;
|
|
|
|
local_irq_save(flags);
|
|
if (__this_cpu_read(context_tracking.state) == IN_USER) {
|
|
__this_cpu_write(context_tracking.state, IN_KERNEL);
|
|
rcu_user_exit();
|
|
}
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
void context_tracking_task_switch(struct task_struct *prev,
|
|
struct task_struct *next)
|
|
{
|
|
if (__this_cpu_read(context_tracking.active)) {
|
|
clear_tsk_thread_flag(prev, TIF_NOHZ);
|
|
set_tsk_thread_flag(next, TIF_NOHZ);
|
|
}
|
|
}
|