forked from Minki/linux
rcu-tasks: Scan running tasks for RCU Tasks Trace readers
A running task might be within an RCU Tasks Trace read-side critical section for any length of time, but will not be placed on any of the per-CPU rcu_tasks_percpu structure's ->rtp_blkd_tasks lists. Therefore any RCU Tasks Trace grace-period processing that does not scan the full task list must interact with the running tasks. This commit therefore causes the rcu_tasks_trace_pregp_step() function to IPI each CPU in order to place the corresponding task on the holdouts list and to record whether or not it was in an RCU Tasks Trace read-side critical section. Yes, it is possible to avoid adding it to that list if it is not a reader, but that would prevent the system from remembering that this task was in a quiescent state. Which is why the running tasks are unconditionally added to the holdout list. Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Martin KaFai Lau <kafai@fb.com> Cc: KP Singh <kpsingh@kernel.org>
This commit is contained in:
parent
19415004d5
commit
7460ade1fc
@ -14,7 +14,7 @@
|
||||
|
||||
struct rcu_tasks;
|
||||
typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
|
||||
typedef void (*pregp_func_t)(void);
|
||||
typedef void (*pregp_func_t)(struct list_head *hop);
|
||||
typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
|
||||
typedef void (*postscan_func_t)(struct list_head *hop);
|
||||
typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
|
||||
@ -661,7 +661,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
|
||||
struct task_struct *t;
|
||||
|
||||
set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
|
||||
rtp->pregp_func();
|
||||
rtp->pregp_func(&holdouts);
|
||||
|
||||
/*
|
||||
* There were callbacks, so we need to wait for an RCU-tasks
|
||||
@ -791,7 +791,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
|
||||
// disabling.
|
||||
|
||||
/* Pre-grace-period preparation. */
|
||||
static void rcu_tasks_pregp_step(void)
|
||||
static void rcu_tasks_pregp_step(struct list_head *hop)
|
||||
{
|
||||
/*
|
||||
* Wait for all pre-existing t->on_rq and t->nvcsw transitions
|
||||
@ -1449,24 +1449,48 @@ static void trc_wait_for_one_reader(struct task_struct *t,
|
||||
}
|
||||
}
|
||||
|
||||
/* Do first-round processing for the specified task. */
|
||||
static void rcu_tasks_trace_pertask(struct task_struct *t,
|
||||
struct list_head *hop)
|
||||
/*
|
||||
* Initialize for first-round processing for the specified task.
|
||||
* Return false if task is NULL or already taken care of, true otherwise.
|
||||
*/
|
||||
static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
|
||||
{
|
||||
// During early boot when there is only the one boot CPU, there
|
||||
// is no idle task for the other CPUs. Also, the grace-period
|
||||
// kthread is always in a quiescent state. In addition, just return
|
||||
// if this task is already on the list.
|
||||
if (unlikely(t == NULL) || t == current || !list_empty(&t->trc_holdout_list))
|
||||
return;
|
||||
if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
|
||||
return false;
|
||||
|
||||
rcu_st_need_qs(t, 0);
|
||||
t->trc_ipi_to_cpu = -1;
|
||||
trc_wait_for_one_reader(t, hop);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Do first-round processing for the specified task. */
|
||||
static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
|
||||
{
|
||||
if (rcu_tasks_trace_pertask_prep(t, true))
|
||||
trc_wait_for_one_reader(t, hop);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the current CPU's current task on the holdout list.
|
||||
* Calls to this function must be serialized.
|
||||
*/
|
||||
static void rcu_tasks_trace_pertask_handler(void *hop_in)
|
||||
{
|
||||
struct list_head *hop = hop_in;
|
||||
struct task_struct *t = current;
|
||||
|
||||
// Pull in the currently running task, but only if it is currently
|
||||
// in an RCU tasks trace read-side critical section.
|
||||
if (rcu_tasks_trace_pertask_prep(t, false))
|
||||
trc_add_holdout(t, hop);
|
||||
}
|
||||
|
||||
/* Initialize for a new RCU-tasks-trace grace period. */
|
||||
static void rcu_tasks_trace_pregp_step(void)
|
||||
static void rcu_tasks_trace_pregp_step(struct list_head *hop)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@ -1474,9 +1498,14 @@ static void rcu_tasks_trace_pregp_step(void)
|
||||
for_each_possible_cpu(cpu)
|
||||
WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
|
||||
|
||||
// Disable CPU hotplug across the tasklist scan.
|
||||
// Disable CPU hotplug across the CPU scan.
|
||||
// This also waits for all readers in CPU-hotplug code paths.
|
||||
cpus_read_lock();
|
||||
|
||||
// These smp_call_function_single() calls are serialized to
|
||||
// allow safe access to the hop list.
|
||||
for_each_online_cpu(cpu)
|
||||
smp_call_function_single(cpu, rcu_tasks_trace_pertask_handler, hop, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user