mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU update from Paul E. McKenney: " [...] one late-breaking commit. This one was requested for 3.15 by Peter Zijlstra. It is low risk because it adds a new in-kernel API with minimal changes to the existing code. Those minimal changes are the addition of memory barriers and ACCESS_ONCE() macro calls, neither of which should be able to break things. This commit has passed significant rcutorture testing, with these additional additions to rcutorture slated for 3.16. This commit has also been exposed to -next testing. " Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
7de700e680
@ -27,6 +27,16 @@
|
||||
|
||||
#include <linux/cache.h>
|
||||
|
||||
static inline unsigned long get_state_synchronize_rcu(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cond_synchronize_rcu(unsigned long oldstate)
|
||||
{
|
||||
might_sleep();
|
||||
}
|
||||
|
||||
static inline void rcu_barrier_bh(void)
|
||||
{
|
||||
wait_rcu_gp(call_rcu_bh);
|
||||
|
@ -76,6 +76,8 @@ static inline void synchronize_rcu_bh_expedited(void)
|
||||
void rcu_barrier(void);
|
||||
void rcu_barrier_bh(void);
|
||||
void rcu_barrier_sched(void);
|
||||
unsigned long get_state_synchronize_rcu(void);
|
||||
void cond_synchronize_rcu(unsigned long oldstate);
|
||||
|
||||
extern unsigned long rcutorture_testseq;
|
||||
extern unsigned long rcutorture_vernum;
|
||||
|
@ -1421,13 +1421,14 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||
|
||||
/* Advance to a new grace period and initialize state. */
|
||||
record_gp_stall_check_time(rsp);
|
||||
smp_wmb(); /* Record GP times before starting GP. */
|
||||
rsp->gpnum++;
|
||||
/* Record GP times before starting GP, hence smp_store_release(). */
|
||||
smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
|
||||
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
|
||||
/* Exclude any concurrent CPU-hotplug operations. */
|
||||
mutex_lock(&rsp->onoff_mutex);
|
||||
smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
|
||||
|
||||
/*
|
||||
* Set the quiescent-state-needed bits in all the rcu_node
|
||||
@ -1555,10 +1556,11 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
||||
}
|
||||
rnp = rcu_get_root(rsp);
|
||||
raw_spin_lock_irq(&rnp->lock);
|
||||
smp_mb__after_unlock_lock();
|
||||
smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
|
||||
rcu_nocb_gp_set(rnp, nocb);
|
||||
|
||||
rsp->completed = rsp->gpnum; /* Declare grace period done. */
|
||||
/* Declare grace period done. */
|
||||
ACCESS_ONCE(rsp->completed) = rsp->gpnum;
|
||||
trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
|
||||
rsp->fqs_state = RCU_GP_IDLE;
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
@ -2637,6 +2639,58 @@ void synchronize_rcu_bh(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
||||
|
||||
/**
|
||||
* get_state_synchronize_rcu - Snapshot current RCU state
|
||||
*
|
||||
* Returns a cookie that is used by a later call to cond_synchronize_rcu()
|
||||
* to determine whether or not a full grace period has elapsed in the
|
||||
* meantime.
|
||||
*/
|
||||
unsigned long get_state_synchronize_rcu(void)
|
||||
{
|
||||
/*
|
||||
* Any prior manipulation of RCU-protected data must happen
|
||||
* before the load from ->gpnum.
|
||||
*/
|
||||
smp_mb(); /* ^^^ */
|
||||
|
||||
/*
|
||||
* Make sure this load happens before the purportedly
|
||||
* time-consuming work between get_state_synchronize_rcu()
|
||||
* and cond_synchronize_rcu().
|
||||
*/
|
||||
return smp_load_acquire(&rcu_state->gpnum);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
|
||||
|
||||
/**
|
||||
* cond_synchronize_rcu - Conditionally wait for an RCU grace period
|
||||
*
|
||||
* @oldstate: return value from earlier call to get_state_synchronize_rcu()
|
||||
*
|
||||
* If a full RCU grace period has elapsed since the earlier call to
|
||||
* get_state_synchronize_rcu(), just return. Otherwise, invoke
|
||||
* synchronize_rcu() to wait for a full grace period.
|
||||
*
|
||||
* Yes, this function does not take counter wrap into account. But
|
||||
* counter wrap is harmless. If the counter wraps, we have waited for
|
||||
* more than 2 billion grace periods (and way more on a 64-bit system!),
|
||||
* so waiting for one additional grace period should be just fine.
|
||||
*/
|
||||
void cond_synchronize_rcu(unsigned long oldstate)
|
||||
{
|
||||
unsigned long newstate;
|
||||
|
||||
/*
|
||||
* Ensure that this load happens before any RCU-destructive
|
||||
* actions the caller might carry out after we return.
|
||||
*/
|
||||
newstate = smp_load_acquire(&rcu_state->completed);
|
||||
if (ULONG_CMP_GE(oldstate, newstate))
|
||||
synchronize_rcu();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
|
||||
|
||||
static int synchronize_sched_expedited_cpu_stop(void *data)
|
||||
{
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user