Merge branch 'rcu-tasks.2014.09.10a' into HEAD
rcu-tasks.2014.09.10a: Add RCU-tasks flavor of RCU.
This commit is contained in:
@@ -47,6 +47,8 @@
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
|
||||
@@ -91,7 +93,7 @@ void __rcu_read_unlock(void)
|
||||
barrier(); /* critical section before exit code. */
|
||||
t->rcu_read_lock_nesting = INT_MIN;
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
t->rcu_read_lock_nesting = 0;
|
||||
@@ -379,3 +381,312 @@ static int __init check_cpu_stall_init(void)
|
||||
early_initcall(check_cpu_stall_init);
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
|
||||
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
|
||||
/*
|
||||
* Simple variant of RCU whose quiescent states are voluntary context switch,
|
||||
* user-space execution, and idle. As such, grace periods can take one good
|
||||
* long time. There are no read-side primitives similar to rcu_read_lock()
|
||||
* and rcu_read_unlock() because this implementation is intended to get
|
||||
* the system into a safe state for some of the manipulations involved in
|
||||
* tracing and the like. Finally, this implementation does not support
|
||||
* high call_rcu_tasks() rates from multiple CPUs. If this is required,
|
||||
* per-CPU callback lists will be needed.
|
||||
*/
|
||||
|
||||
/* Global list of callbacks and associated lock. */
|
||||
static struct rcu_head *rcu_tasks_cbs_head;
|
||||
static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
|
||||
static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
|
||||
|
||||
/* Track exiting tasks in order to allow them to be waited for. */
|
||||
DEFINE_SRCU(tasks_rcu_exit_srcu);
|
||||
|
||||
/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
|
||||
static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
|
||||
module_param(rcu_task_stall_timeout, int, 0644);
|
||||
|
||||
static void rcu_spawn_tasks_kthread(void);
|
||||
|
||||
/*
|
||||
* Post an RCU-tasks callback. First call must be from process context
|
||||
* after the scheduler if fully operational.
|
||||
*/
|
||||
void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
|
||||
{
|
||||
unsigned long flags;
|
||||
bool needwake;
|
||||
|
||||
rhp->next = NULL;
|
||||
rhp->func = func;
|
||||
raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
|
||||
needwake = !rcu_tasks_cbs_head;
|
||||
*rcu_tasks_cbs_tail = rhp;
|
||||
rcu_tasks_cbs_tail = &rhp->next;
|
||||
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
|
||||
if (needwake) {
|
||||
rcu_spawn_tasks_kthread();
|
||||
wake_up(&rcu_tasks_cbs_wq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(call_rcu_tasks);
|
||||
|
||||
/**
|
||||
* synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
|
||||
*
|
||||
* Control will return to the caller some time after a full rcu-tasks
|
||||
* grace period has elapsed, in other words after all currently
|
||||
* executing rcu-tasks read-side critical sections have elapsed. These
|
||||
* read-side critical sections are delimited by calls to schedule(),
|
||||
* cond_resched_rcu_qs(), idle execution, userspace execution, calls
|
||||
* to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
|
||||
*
|
||||
* This is a very specialized primitive, intended only for a few uses in
|
||||
* tracing and other situations requiring manipulation of function
|
||||
* preambles and profiling hooks. The synchronize_rcu_tasks() function
|
||||
* is not (yet) intended for heavy use from multiple CPUs.
|
||||
*
|
||||
* Note that this guarantee implies further memory-ordering guarantees.
|
||||
* On systems with more than one CPU, when synchronize_rcu_tasks() returns,
|
||||
* each CPU is guaranteed to have executed a full memory barrier since the
|
||||
* end of its last RCU-tasks read-side critical section whose beginning
|
||||
* preceded the call to synchronize_rcu_tasks(). In addition, each CPU
|
||||
* having an RCU-tasks read-side critical section that extends beyond
|
||||
* the return from synchronize_rcu_tasks() is guaranteed to have executed
|
||||
* a full memory barrier after the beginning of synchronize_rcu_tasks()
|
||||
* and before the beginning of that RCU-tasks read-side critical section.
|
||||
* Note that these guarantees include CPUs that are offline, idle, or
|
||||
* executing in user mode, as well as CPUs that are executing in the kernel.
|
||||
*
|
||||
* Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
|
||||
* to its caller on CPU B, then both CPU A and CPU B are guaranteed
|
||||
* to have executed a full memory barrier during the execution of
|
||||
* synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
|
||||
* (but again only if the system has more than one CPU).
|
||||
*/
|
||||
void synchronize_rcu_tasks(void)
|
||||
{
|
||||
/* Complain if the scheduler has not started. */
|
||||
rcu_lockdep_assert(!rcu_scheduler_active,
|
||||
"synchronize_rcu_tasks called too soon");
|
||||
|
||||
/* Wait for the grace period. */
|
||||
wait_rcu_gp(call_rcu_tasks);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
|
||||
|
||||
/**
|
||||
* rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
|
||||
*
|
||||
* Although the current implementation is guaranteed to wait, it is not
|
||||
* obligated to, for example, if there are no pending callbacks.
|
||||
*/
|
||||
void rcu_barrier_tasks(void)
|
||||
{
|
||||
/* There is only one callback queue, so this is easy. ;-) */
|
||||
synchronize_rcu_tasks();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
|
||||
|
||||
/* See if tasks are still holding out, complain if so. */
|
||||
static void check_holdout_task(struct task_struct *t,
|
||||
bool needreport, bool *firstreport)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!ACCESS_ONCE(t->rcu_tasks_holdout) ||
|
||||
t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) ||
|
||||
!ACCESS_ONCE(t->on_rq) ||
|
||||
(IS_ENABLED(CONFIG_NO_HZ_FULL) &&
|
||||
!is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
|
||||
ACCESS_ONCE(t->rcu_tasks_holdout) = false;
|
||||
list_del_init(&t->rcu_tasks_holdout_list);
|
||||
put_task_struct(t);
|
||||
return;
|
||||
}
|
||||
if (!needreport)
|
||||
return;
|
||||
if (*firstreport) {
|
||||
pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
|
||||
*firstreport = false;
|
||||
}
|
||||
cpu = task_cpu(t);
|
||||
pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
|
||||
t, ".I"[is_idle_task(t)],
|
||||
"N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
|
||||
t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
|
||||
t->rcu_tasks_idle_cpu, cpu);
|
||||
sched_show_task(t);
|
||||
}
|
||||
|
||||
/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
|
||||
static int __noreturn rcu_tasks_kthread(void *arg)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct task_struct *g, *t;
|
||||
unsigned long lastreport;
|
||||
struct rcu_head *list;
|
||||
struct rcu_head *next;
|
||||
LIST_HEAD(rcu_tasks_holdouts);
|
||||
|
||||
/* FIXME: Add housekeeping affinity. */
|
||||
|
||||
/*
|
||||
* Each pass through the following loop makes one check for
|
||||
* newly arrived callbacks, and, if there are some, waits for
|
||||
* one RCU-tasks grace period and then invokes the callbacks.
|
||||
* This loop is terminated by the system going down. ;-)
|
||||
*/
|
||||
for (;;) {
|
||||
|
||||
/* Pick up any new callbacks. */
|
||||
raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
|
||||
list = rcu_tasks_cbs_head;
|
||||
rcu_tasks_cbs_head = NULL;
|
||||
rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
|
||||
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
|
||||
|
||||
/* If there were none, wait a bit and start over. */
|
||||
if (!list) {
|
||||
wait_event_interruptible(rcu_tasks_cbs_wq,
|
||||
rcu_tasks_cbs_head);
|
||||
if (!rcu_tasks_cbs_head) {
|
||||
WARN_ON(signal_pending(current));
|
||||
schedule_timeout_interruptible(HZ/10);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for all pre-existing t->on_rq and t->nvcsw
|
||||
* transitions to complete. Invoking synchronize_sched()
|
||||
* suffices because all these transitions occur with
|
||||
* interrupts disabled. Without this synchronize_sched(),
|
||||
* a read-side critical section that started before the
|
||||
* grace period might be incorrectly seen as having started
|
||||
* after the grace period.
|
||||
*
|
||||
* This synchronize_sched() also dispenses with the
|
||||
* need for a memory barrier on the first store to
|
||||
* ->rcu_tasks_holdout, as it forces the store to happen
|
||||
* after the beginning of the grace period.
|
||||
*/
|
||||
synchronize_sched();
|
||||
|
||||
/*
|
||||
* There were callbacks, so we need to wait for an
|
||||
* RCU-tasks grace period. Start off by scanning
|
||||
* the task list for tasks that are not already
|
||||
* voluntarily blocked. Mark these tasks and make
|
||||
* a list of them in rcu_tasks_holdouts.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
for_each_process_thread(g, t) {
|
||||
if (t != current && ACCESS_ONCE(t->on_rq) &&
|
||||
!is_idle_task(t)) {
|
||||
get_task_struct(t);
|
||||
t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
|
||||
ACCESS_ONCE(t->rcu_tasks_holdout) = true;
|
||||
list_add(&t->rcu_tasks_holdout_list,
|
||||
&rcu_tasks_holdouts);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* Wait for tasks that are in the process of exiting.
|
||||
* This does only part of the job, ensuring that all
|
||||
* tasks that were previously exiting reach the point
|
||||
* where they have disabled preemption, allowing the
|
||||
* later synchronize_sched() to finish the job.
|
||||
*/
|
||||
synchronize_srcu(&tasks_rcu_exit_srcu);
|
||||
|
||||
/*
|
||||
* Each pass through the following loop scans the list
|
||||
* of holdout tasks, removing any that are no longer
|
||||
* holdouts. When the list is empty, we are done.
|
||||
*/
|
||||
lastreport = jiffies;
|
||||
while (!list_empty(&rcu_tasks_holdouts)) {
|
||||
bool firstreport;
|
||||
bool needreport;
|
||||
int rtst;
|
||||
struct task_struct *t1;
|
||||
|
||||
schedule_timeout_interruptible(HZ);
|
||||
rtst = ACCESS_ONCE(rcu_task_stall_timeout);
|
||||
needreport = rtst > 0 &&
|
||||
time_after(jiffies, lastreport + rtst);
|
||||
if (needreport)
|
||||
lastreport = jiffies;
|
||||
firstreport = true;
|
||||
WARN_ON(signal_pending(current));
|
||||
list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
|
||||
rcu_tasks_holdout_list) {
|
||||
check_holdout_task(t, needreport, &firstreport);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Because ->on_rq and ->nvcsw are not guaranteed
|
||||
* to have a full memory barriers prior to them in the
|
||||
* schedule() path, memory reordering on other CPUs could
|
||||
* cause their RCU-tasks read-side critical sections to
|
||||
* extend past the end of the grace period. However,
|
||||
* because these ->nvcsw updates are carried out with
|
||||
* interrupts disabled, we can use synchronize_sched()
|
||||
* to force the needed ordering on all such CPUs.
|
||||
*
|
||||
* This synchronize_sched() also confines all
|
||||
* ->rcu_tasks_holdout accesses to be within the grace
|
||||
* period, avoiding the need for memory barriers for
|
||||
* ->rcu_tasks_holdout accesses.
|
||||
*
|
||||
* In addition, this synchronize_sched() waits for exiting
|
||||
* tasks to complete their final preempt_disable() region
|
||||
* of execution, cleaning up after the synchronize_srcu()
|
||||
* above.
|
||||
*/
|
||||
synchronize_sched();
|
||||
|
||||
/* Invoke the callbacks. */
|
||||
while (list) {
|
||||
next = list->next;
|
||||
local_bh_disable();
|
||||
list->func(list);
|
||||
local_bh_enable();
|
||||
list = next;
|
||||
cond_resched();
|
||||
}
|
||||
schedule_timeout_uninterruptible(HZ/10);
|
||||
}
|
||||
}
|
||||
|
||||
/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
|
||||
static void rcu_spawn_tasks_kthread(void)
|
||||
{
|
||||
static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
|
||||
static struct task_struct *rcu_tasks_kthread_ptr;
|
||||
struct task_struct *t;
|
||||
|
||||
if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) {
|
||||
smp_mb(); /* Ensure caller sees full kthread. */
|
||||
return;
|
||||
}
|
||||
mutex_lock(&rcu_tasks_kthread_mutex);
|
||||
if (rcu_tasks_kthread_ptr) {
|
||||
mutex_unlock(&rcu_tasks_kthread_mutex);
|
||||
return;
|
||||
}
|
||||
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
|
||||
BUG_ON(IS_ERR(t));
|
||||
smp_mb(); /* Ensure others see full kthread. */
|
||||
ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
|
||||
mutex_unlock(&rcu_tasks_kthread_mutex);
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_TASKS_RCU */
|
||||
|
||||
Reference in New Issue
Block a user