mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
Merge branches 'bigrtm.2012.07.04a', 'doctorture.2012.07.02a', 'fixes.2012.07.06a' and 'fnh.2012.07.02a' into HEAD
bigrtm: First steps towards getting RCU out of the way of tens-of-microseconds real-time response on systems compiled with NR_CPUS=4096. Also cleanups for and increased concurrency of rcu_barrier() family of primitives. doctorture: rcutorture and documentation improvements. fixes: Miscellaneous fixes. fnh: RCU_FAST_NO_HZ fixes and improvements.
This commit is contained in:
commit
02a0677b0b
@ -162,9 +162,9 @@ over a rather long period of time, but improvements are always welcome!
|
||||
when publicizing a pointer to a structure that can
|
||||
be traversed by an RCU read-side critical section.
|
||||
|
||||
5. If call_rcu(), or a related primitive such as call_rcu_bh() or
|
||||
call_rcu_sched(), is used, the callback function must be
|
||||
written to be called from softirq context. In particular,
|
||||
5. If call_rcu(), or a related primitive such as call_rcu_bh(),
|
||||
call_rcu_sched(), or call_srcu() is used, the callback function
|
||||
must be written to be called from softirq context. In particular,
|
||||
it cannot block.
|
||||
|
||||
6. Since synchronize_rcu() can block, it cannot be called from
|
||||
@ -202,11 +202,12 @@ over a rather long period of time, but improvements are always welcome!
|
||||
updater uses call_rcu_sched() or synchronize_sched(), then
|
||||
the corresponding readers must disable preemption, possibly
|
||||
by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
|
||||
If the updater uses synchronize_srcu(), the the corresponding
|
||||
readers must use srcu_read_lock() and srcu_read_unlock(),
|
||||
and with the same srcu_struct. The rules for the expedited
|
||||
primitives are the same as for their non-expedited counterparts.
|
||||
Mixing things up will result in confusion and broken kernels.
|
||||
If the updater uses synchronize_srcu() or call_srcu(),
|
||||
the the corresponding readers must use srcu_read_lock() and
|
||||
srcu_read_unlock(), and with the same srcu_struct. The rules for
|
||||
the expedited primitives are the same as for their non-expedited
|
||||
counterparts. Mixing things up will result in confusion and
|
||||
broken kernels.
|
||||
|
||||
One exception to this rule: rcu_read_lock() and rcu_read_unlock()
|
||||
may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
|
||||
@ -333,14 +334,14 @@ over a rather long period of time, but improvements are always welcome!
|
||||
victim CPU from ever going offline.)
|
||||
|
||||
14. SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
|
||||
synchronize_srcu(), and synchronize_srcu_expedited()) may only
|
||||
be invoked from process context. Unlike other forms of RCU, it
|
||||
-is- permissible to block in an SRCU read-side critical section
|
||||
(demarked by srcu_read_lock() and srcu_read_unlock()), hence the
|
||||
"SRCU": "sleepable RCU". Please note that if you don't need
|
||||
to sleep in read-side critical sections, you should be using
|
||||
RCU rather than SRCU, because RCU is almost always faster and
|
||||
easier to use than is SRCU.
|
||||
synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu())
|
||||
may only be invoked from process context. Unlike other forms of
|
||||
RCU, it -is- permissible to block in an SRCU read-side critical
|
||||
section (demarked by srcu_read_lock() and srcu_read_unlock()),
|
||||
hence the "SRCU": "sleepable RCU". Please note that if you
|
||||
don't need to sleep in read-side critical sections, you should be
|
||||
using RCU rather than SRCU, because RCU is almost always faster
|
||||
and easier to use than is SRCU.
|
||||
|
||||
If you need to enter your read-side critical section in a
|
||||
hardirq or exception handler, and then exit that same read-side
|
||||
@ -353,8 +354,8 @@ over a rather long period of time, but improvements are always welcome!
|
||||
cleanup_srcu_struct(). These are passed a "struct srcu_struct"
|
||||
that defines the scope of a given SRCU domain. Once initialized,
|
||||
the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
|
||||
synchronize_srcu(), and synchronize_srcu_expedited(). A given
|
||||
synchronize_srcu() waits only for SRCU read-side critical
|
||||
synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
|
||||
A given synchronize_srcu() waits only for SRCU read-side critical
|
||||
sections governed by srcu_read_lock() and srcu_read_unlock()
|
||||
calls that have been passed the same srcu_struct. This property
|
||||
is what makes sleeping read-side critical sections tolerable --
|
||||
@ -374,7 +375,7 @@ over a rather long period of time, but improvements are always welcome!
|
||||
requiring SRCU's read-side deadlock immunity or low read-side
|
||||
realtime latency.
|
||||
|
||||
Note that, rcu_assign_pointer() relates to SRCU just as they do
|
||||
Note that, rcu_assign_pointer() relates to SRCU just as it does
|
||||
to other forms of RCU.
|
||||
|
||||
15. The whole point of call_rcu(), synchronize_rcu(), and friends
|
||||
|
@ -79,8 +79,6 @@ complete. Pseudo-code using rcu_barrier() is as follows:
|
||||
2. Execute rcu_barrier().
|
||||
3. Allow the module to be unloaded.
|
||||
|
||||
Quick Quiz #1: Why is there no srcu_barrier()?
|
||||
|
||||
The rcutorture module makes use of rcu_barrier in its exit function
|
||||
as follows:
|
||||
|
||||
@ -162,7 +160,7 @@ for any pre-existing callbacks to complete.
|
||||
Then lines 55-62 print status and do operation-specific cleanup, and
|
||||
then return, permitting the module-unload operation to be completed.
|
||||
|
||||
Quick Quiz #2: Is there any other situation where rcu_barrier() might
|
||||
Quick Quiz #1: Is there any other situation where rcu_barrier() might
|
||||
be required?
|
||||
|
||||
Your module might have additional complications. For example, if your
|
||||
@ -242,7 +240,7 @@ reaches zero, as follows:
|
||||
4 complete(&rcu_barrier_completion);
|
||||
5 }
|
||||
|
||||
Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
|
||||
Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
|
||||
immediately (thus incrementing rcu_barrier_cpu_count to the
|
||||
value one), but the other CPU's rcu_barrier_func() invocations
|
||||
are delayed for a full grace period? Couldn't this result in
|
||||
@ -259,12 +257,7 @@ so that your module may be safely unloaded.
|
||||
|
||||
Answers to Quick Quizzes
|
||||
|
||||
Quick Quiz #1: Why is there no srcu_barrier()?
|
||||
|
||||
Answer: Since there is no call_srcu(), there can be no outstanding SRCU
|
||||
callbacks. Therefore, there is no need to wait for them.
|
||||
|
||||
Quick Quiz #2: Is there any other situation where rcu_barrier() might
|
||||
Quick Quiz #1: Is there any other situation where rcu_barrier() might
|
||||
be required?
|
||||
|
||||
Answer: Interestingly enough, rcu_barrier() was not originally
|
||||
@ -278,7 +271,7 @@ Answer: Interestingly enough, rcu_barrier() was not originally
|
||||
implementing rcutorture, and found that rcu_barrier() solves
|
||||
this problem as well.
|
||||
|
||||
Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
|
||||
Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
|
||||
immediately (thus incrementing rcu_barrier_cpu_count to the
|
||||
value one), but the other CPU's rcu_barrier_func() invocations
|
||||
are delayed for a full grace period? Couldn't this result in
|
||||
|
@ -174,11 +174,20 @@ torture_type The type of RCU to test, with string values as follows:
|
||||
and synchronize_rcu_bh_expedited().
|
||||
|
||||
"srcu": srcu_read_lock(), srcu_read_unlock() and
|
||||
call_srcu().
|
||||
|
||||
"srcu_sync": srcu_read_lock(), srcu_read_unlock() and
|
||||
synchronize_srcu().
|
||||
|
||||
"srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
|
||||
synchronize_srcu_expedited().
|
||||
|
||||
"srcu_raw": srcu_read_lock_raw(), srcu_read_unlock_raw(),
|
||||
and call_srcu().
|
||||
|
||||
"srcu_raw_sync": srcu_read_lock_raw(), srcu_read_unlock_raw(),
|
||||
and synchronize_srcu().
|
||||
|
||||
"sched": preempt_disable(), preempt_enable(), and
|
||||
call_rcu_sched().
|
||||
|
||||
|
@ -833,9 +833,9 @@ sched: Critical sections Grace period Barrier
|
||||
|
||||
SRCU: Critical sections Grace period Barrier
|
||||
|
||||
srcu_read_lock synchronize_srcu N/A
|
||||
srcu_read_unlock synchronize_srcu_expedited
|
||||
srcu_read_lock_raw
|
||||
srcu_read_lock synchronize_srcu srcu_barrier
|
||||
srcu_read_unlock call_srcu
|
||||
srcu_read_lock_raw synchronize_srcu_expedited
|
||||
srcu_read_unlock_raw
|
||||
srcu_dereference
|
||||
|
||||
|
@ -168,8 +168,8 @@ extern struct cred init_cred;
|
||||
.children = LIST_HEAD_INIT(tsk.children), \
|
||||
.sibling = LIST_HEAD_INIT(tsk.sibling), \
|
||||
.group_leader = &tsk, \
|
||||
RCU_INIT_POINTER(.real_cred, &init_cred), \
|
||||
RCU_INIT_POINTER(.cred, &init_cred), \
|
||||
RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
|
||||
RCU_POINTER_INITIALIZER(cred, &init_cred), \
|
||||
.comm = INIT_TASK_COMM, \
|
||||
.thread = INIT_THREAD, \
|
||||
.fs = &init_fs, \
|
||||
|
@ -303,7 +303,9 @@ static inline bool key_is_instantiated(const struct key *key)
|
||||
rwsem_is_locked(&((struct key *)(KEY))->sem)))
|
||||
|
||||
#define rcu_assign_keypointer(KEY, PAYLOAD) \
|
||||
(rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD))
|
||||
do { \
|
||||
rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern ctl_table key_sysctls[];
|
||||
|
@ -147,6 +147,7 @@ extern void synchronize_sched(void);
|
||||
|
||||
extern void __rcu_read_lock(void);
|
||||
extern void __rcu_read_unlock(void);
|
||||
extern void rcu_read_unlock_special(struct task_struct *t);
|
||||
void synchronize_rcu(void);
|
||||
|
||||
/*
|
||||
@ -255,6 +256,10 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
|
||||
}
|
||||
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
|
||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
|
||||
extern int rcu_is_cpu_idle(void);
|
||||
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
|
||||
|
||||
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
|
||||
bool rcu_lockdep_current_cpu_online(void);
|
||||
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
|
||||
@ -266,15 +271,6 @@ static inline bool rcu_lockdep_current_cpu_online(void)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
extern int rcu_is_cpu_idle(void);
|
||||
#else /* !CONFIG_PROVE_RCU */
|
||||
static inline int rcu_is_cpu_idle(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* else !CONFIG_PROVE_RCU */
|
||||
|
||||
static inline void rcu_lock_acquire(struct lockdep_map *map)
|
||||
{
|
||||
lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
|
||||
@ -513,10 +509,10 @@ static inline void rcu_preempt_sleep_check(void)
|
||||
(_________p1); \
|
||||
})
|
||||
#define __rcu_assign_pointer(p, v, space) \
|
||||
({ \
|
||||
do { \
|
||||
smp_wmb(); \
|
||||
(p) = (typeof(*v) __force space *)(v); \
|
||||
})
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
@ -851,7 +847,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
*
|
||||
* Assigns the specified value to the specified RCU-protected
|
||||
* pointer, ensuring that any concurrent RCU readers will see
|
||||
* any prior initialization. Returns the value assigned.
|
||||
* any prior initialization.
|
||||
*
|
||||
* Inserts memory barriers on architectures that require them
|
||||
* (which is most of them), and also prevents the compiler from
|
||||
@ -903,25 +899,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
* the reader-accessible portions of the linked structure.
|
||||
*/
|
||||
#define RCU_INIT_POINTER(p, v) \
|
||||
p = (typeof(*v) __force __rcu *)(v)
|
||||
do { \
|
||||
p = (typeof(*v) __force __rcu *)(v); \
|
||||
} while (0)
|
||||
|
||||
static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
|
||||
{
|
||||
return offset < 4096;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
void __kfree_rcu(struct rcu_head *head, unsigned long offset)
|
||||
{
|
||||
typedef void (*rcu_callback)(struct rcu_head *);
|
||||
|
||||
BUILD_BUG_ON(!__builtin_constant_p(offset));
|
||||
|
||||
/* See the kfree_rcu() header comment. */
|
||||
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
|
||||
|
||||
kfree_call_rcu(head, (rcu_callback)offset);
|
||||
}
|
||||
/**
|
||||
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
|
||||
*
|
||||
* GCC-style initialization for an RCU-protected pointer in a structure field.
|
||||
*/
|
||||
#define RCU_POINTER_INITIALIZER(p, v) \
|
||||
.p = (typeof(*v) __force __rcu *)(v)
|
||||
|
||||
/*
|
||||
* Does the specified offset indicate that the corresponding rcu_head
|
||||
@ -935,7 +923,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
|
||||
#define __kfree_rcu(head, offset) \
|
||||
do { \
|
||||
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
|
||||
call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
|
||||
kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
|
@ -53,6 +53,50 @@
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
|
||||
/*
|
||||
* Preemptible RCU implementation for rcu_read_lock().
|
||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
||||
* if we block.
|
||||
*/
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
current->rcu_read_lock_nesting++;
|
||||
barrier(); /* critical section after entry code. */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||
|
||||
/*
|
||||
* Preemptible RCU implementation for rcu_read_unlock().
|
||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
||||
* in an RCU read-side critical section and other special cases.
|
||||
*/
|
||||
void __rcu_read_unlock(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (t->rcu_read_lock_nesting != 1) {
|
||||
--t->rcu_read_lock_nesting;
|
||||
} else {
|
||||
barrier(); /* critical section before exit code. */
|
||||
t->rcu_read_lock_nesting = INT_MIN;
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
t->rcu_read_lock_nesting = 0;
|
||||
}
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
{
|
||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
||||
|
||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
||||
}
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
|
||||
/*
|
||||
* Check for a task exiting while in a preemptible-RCU read-side
|
||||
* critical section, clean up if so. No need to issue warnings,
|
||||
|
@ -172,7 +172,7 @@ void rcu_irq_enter(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
/*
|
||||
* Test whether RCU thinks that the current CPU is idle.
|
||||
@ -183,7 +183,7 @@ int rcu_is_cpu_idle(void)
|
||||
}
|
||||
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
||||
|
||||
#endif /* #ifdef CONFIG_PROVE_RCU */
|
||||
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
/*
|
||||
* Test whether the current CPU was interrupted from idle. Nested
|
||||
|
@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
|
||||
RCU_TRACE(.rcb.name = "rcu_preempt")
|
||||
};
|
||||
|
||||
static void rcu_read_unlock_special(struct task_struct *t);
|
||||
static int rcu_preempted_readers_exp(void);
|
||||
static void rcu_report_exp_done(void);
|
||||
|
||||
@ -526,24 +525,12 @@ void rcu_preempt_note_context_switch(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tiny-preemptible RCU implementation for rcu_read_lock().
|
||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
||||
* if we block.
|
||||
*/
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
current->rcu_read_lock_nesting++;
|
||||
barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||
|
||||
/*
|
||||
* Handle special cases during rcu_read_unlock(), such as needing to
|
||||
* notify RCU core processing or task having blocked during the RCU
|
||||
* read-side critical section.
|
||||
*/
|
||||
static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
void rcu_read_unlock_special(struct task_struct *t)
|
||||
{
|
||||
int empty;
|
||||
int empty_exp;
|
||||
@ -626,38 +613,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tiny-preemptible RCU implementation for rcu_read_unlock().
|
||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
||||
* in an RCU read-side critical section and other special cases.
|
||||
*/
|
||||
void __rcu_read_unlock(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
|
||||
if (t->rcu_read_lock_nesting != 1)
|
||||
--t->rcu_read_lock_nesting;
|
||||
else {
|
||||
t->rcu_read_lock_nesting = INT_MIN;
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
t->rcu_read_lock_nesting = 0;
|
||||
}
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
{
|
||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
||||
|
||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
||||
}
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
|
||||
/*
|
||||
* Check for a quiescent state from the current CPU. When a task blocks,
|
||||
* the task is recorded in the rcu_preempt_ctrlblk structure, which is
|
||||
@ -846,8 +801,6 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
*/
|
||||
int rcu_preempt_needs_cpu(void)
|
||||
{
|
||||
if (!rcu_preempt_running_reader())
|
||||
rcu_preempt_cpu_qs();
|
||||
return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
|
||||
}
|
||||
|
||||
|
@ -206,6 +206,7 @@ static unsigned long boost_starttime; /* jiffies of next boost test start. */
|
||||
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
|
||||
/* and boost task create/destroy. */
|
||||
static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
|
||||
static bool barrier_phase; /* Test phase. */
|
||||
static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
|
||||
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
|
||||
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
|
||||
@ -635,6 +636,17 @@ static void srcu_torture_synchronize(void)
|
||||
synchronize_srcu(&srcu_ctl);
|
||||
}
|
||||
|
||||
static void srcu_torture_call(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head))
|
||||
{
|
||||
call_srcu(&srcu_ctl, head, func);
|
||||
}
|
||||
|
||||
static void srcu_torture_barrier(void)
|
||||
{
|
||||
srcu_barrier(&srcu_ctl);
|
||||
}
|
||||
|
||||
static int srcu_torture_stats(char *page)
|
||||
{
|
||||
int cnt = 0;
|
||||
@ -661,8 +673,8 @@ static struct rcu_torture_ops srcu_ops = {
|
||||
.completed = srcu_torture_completed,
|
||||
.deferred_free = srcu_torture_deferred_free,
|
||||
.sync = srcu_torture_synchronize,
|
||||
.call = NULL,
|
||||
.cb_barrier = NULL,
|
||||
.call = srcu_torture_call,
|
||||
.cb_barrier = srcu_torture_barrier,
|
||||
.stats = srcu_torture_stats,
|
||||
.name = "srcu"
|
||||
};
|
||||
@ -1013,7 +1025,11 @@ rcu_torture_fakewriter(void *arg)
|
||||
do {
|
||||
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
|
||||
udelay(rcu_random(&rand) & 0x3ff);
|
||||
cur_ops->sync();
|
||||
if (cur_ops->cb_barrier != NULL &&
|
||||
rcu_random(&rand) % (nfakewriters * 8) == 0)
|
||||
cur_ops->cb_barrier();
|
||||
else
|
||||
cur_ops->sync();
|
||||
rcu_stutter_wait("rcu_torture_fakewriter");
|
||||
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||
|
||||
@ -1631,6 +1647,7 @@ void rcu_torture_barrier_cbf(struct rcu_head *rcu)
|
||||
static int rcu_torture_barrier_cbs(void *arg)
|
||||
{
|
||||
long myid = (long)arg;
|
||||
bool lastphase = 0;
|
||||
struct rcu_head rcu;
|
||||
|
||||
init_rcu_head_on_stack(&rcu);
|
||||
@ -1638,9 +1655,11 @@ static int rcu_torture_barrier_cbs(void *arg)
|
||||
set_user_nice(current, 19);
|
||||
do {
|
||||
wait_event(barrier_cbs_wq[myid],
|
||||
atomic_read(&barrier_cbs_count) == n_barrier_cbs ||
|
||||
barrier_phase != lastphase ||
|
||||
kthread_should_stop() ||
|
||||
fullstop != FULLSTOP_DONTSTOP);
|
||||
lastphase = barrier_phase;
|
||||
smp_mb(); /* ensure barrier_phase load before ->call(). */
|
||||
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
|
||||
break;
|
||||
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
|
||||
@ -1665,7 +1684,8 @@ static int rcu_torture_barrier(void *arg)
|
||||
do {
|
||||
atomic_set(&barrier_cbs_invoked, 0);
|
||||
atomic_set(&barrier_cbs_count, n_barrier_cbs);
|
||||
/* wake_up() path contains the required barriers. */
|
||||
smp_mb(); /* Ensure barrier_phase after prior assignments. */
|
||||
barrier_phase = !barrier_phase;
|
||||
for (i = 0; i < n_barrier_cbs; i++)
|
||||
wake_up(&barrier_cbs_wq[i]);
|
||||
wait_event(barrier_wq,
|
||||
@ -1684,7 +1704,7 @@ static int rcu_torture_barrier(void *arg)
|
||||
schedule_timeout_interruptible(HZ / 10);
|
||||
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||
VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
|
||||
rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
|
||||
rcutorture_shutdown_absorb("rcu_torture_barrier");
|
||||
while (!kthread_should_stop())
|
||||
schedule_timeout_interruptible(1);
|
||||
return 0;
|
||||
@ -1908,8 +1928,8 @@ rcu_torture_init(void)
|
||||
static struct rcu_torture_ops *torture_ops[] =
|
||||
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
|
||||
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
|
||||
&srcu_ops, &srcu_sync_ops, &srcu_raw_ops,
|
||||
&srcu_raw_sync_ops, &srcu_expedited_ops,
|
||||
&srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
|
||||
&srcu_raw_ops, &srcu_raw_sync_ops,
|
||||
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
|
||||
|
||||
mutex_lock(&fullstop_mutex);
|
||||
|
166
kernel/rcutree.c
166
kernel/rcutree.c
@ -359,7 +359,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
||||
struct task_struct *idle = idle_task(smp_processor_id());
|
||||
|
||||
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
|
||||
ftrace_dump(DUMP_ALL);
|
||||
ftrace_dump(DUMP_ORIG);
|
||||
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
||||
current->pid, current->comm,
|
||||
idle->pid, idle->comm); /* must be idle task! */
|
||||
@ -469,7 +469,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
||||
|
||||
trace_rcu_dyntick("Error on exit: not idle task",
|
||||
oldval, rdtp->dynticks_nesting);
|
||||
ftrace_dump(DUMP_ALL);
|
||||
ftrace_dump(DUMP_ORIG);
|
||||
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
||||
current->pid, current->comm,
|
||||
idle->pid, idle->comm); /* must be idle task! */
|
||||
@ -586,8 +586,6 @@ void rcu_nmi_exit(void)
|
||||
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
|
||||
/**
|
||||
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
|
||||
*
|
||||
@ -605,7 +603,7 @@ int rcu_is_cpu_idle(void)
|
||||
}
|
||||
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
/*
|
||||
* Is the current CPU online? Disable preemption to avoid false positives
|
||||
@ -646,9 +644,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#endif /* #ifdef CONFIG_PROVE_RCU */
|
||||
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
|
||||
|
||||
/**
|
||||
* rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
|
||||
@ -734,7 +730,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||
int cpu;
|
||||
long delta;
|
||||
unsigned long flags;
|
||||
int ndetected;
|
||||
int ndetected = 0;
|
||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||
|
||||
/* Only let one CPU complain about others per time interval. */
|
||||
@ -775,7 +771,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||
*/
|
||||
rnp = rcu_get_root(rsp);
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
ndetected = rcu_print_task_stall(rnp);
|
||||
ndetected += rcu_print_task_stall(rnp);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
|
||||
print_cpu_stall_info_end();
|
||||
@ -938,6 +934,18 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the specified rcu_data structure's callback list to empty.
|
||||
*/
|
||||
static void init_callback_list(struct rcu_data *rdp)
|
||||
{
|
||||
int i;
|
||||
|
||||
rdp->nxtlist = NULL;
|
||||
for (i = 0; i < RCU_NEXT_SIZE; i++)
|
||||
rdp->nxttail[i] = &rdp->nxtlist;
|
||||
}
|
||||
|
||||
/*
|
||||
* Advance this CPU's callbacks, but only if the current grace period
|
||||
* has ended. This may be called only from the CPU to whom the rdp
|
||||
@ -1330,8 +1338,6 @@ static void
|
||||
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||
struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Orphan the callbacks. First adjust the counts. This is safe
|
||||
* because ->onofflock excludes _rcu_barrier()'s adoption of
|
||||
@ -1342,7 +1348,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||
rsp->qlen += rdp->qlen;
|
||||
rdp->n_cbs_orphaned += rdp->qlen;
|
||||
rdp->qlen_lazy = 0;
|
||||
rdp->qlen = 0;
|
||||
ACCESS_ONCE(rdp->qlen) = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1371,9 +1377,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||
}
|
||||
|
||||
/* Finally, initialize the rcu_data structure's list to empty. */
|
||||
rdp->nxtlist = NULL;
|
||||
for (i = 0; i < RCU_NEXT_SIZE; i++)
|
||||
rdp->nxttail[i] = &rdp->nxtlist;
|
||||
init_callback_list(rdp);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1507,6 +1511,9 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
if (need_report & RCU_OFL_TASKS_EXP_GP)
|
||||
rcu_report_exp_rnp(rsp, rnp, true);
|
||||
WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
|
||||
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
|
||||
cpu, rdp->qlen, rdp->nxtlist);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
@ -1594,7 +1601,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
}
|
||||
smp_mb(); /* List handling before counting for rcu_barrier(). */
|
||||
rdp->qlen_lazy -= count_lazy;
|
||||
rdp->qlen -= count;
|
||||
ACCESS_ONCE(rdp->qlen) -= count;
|
||||
rdp->n_cbs_invoked += count;
|
||||
|
||||
/* Reinstate batch limit if we have worked down the excess. */
|
||||
@ -1607,6 +1614,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
rdp->n_force_qs_snap = rsp->n_force_qs;
|
||||
} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
|
||||
rdp->qlen_last_fqs_check = rdp->qlen;
|
||||
WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
@ -1858,50 +1866,22 @@ static void invoke_rcu_core(void)
|
||||
raise_softirq(RCU_SOFTIRQ);
|
||||
}
|
||||
|
||||
static void
|
||||
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
struct rcu_state *rsp, bool lazy)
|
||||
/*
|
||||
* Handle any core-RCU processing required by a call_rcu() invocation.
|
||||
*/
|
||||
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
||||
struct rcu_head *head, unsigned long flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
|
||||
debug_rcu_head_queue(head);
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
|
||||
smp_mb(); /* Ensure RCU update seen before callback registry. */
|
||||
|
||||
/*
|
||||
* Opportunistically note grace-period endings and beginnings.
|
||||
* Note that we might see a beginning right after we see an
|
||||
* end, but never vice versa, since this CPU has to pass through
|
||||
* a quiescent state betweentimes.
|
||||
* If called from an extended quiescent state, invoke the RCU
|
||||
* core in order to force a re-evaluation of RCU's idleness.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
||||
invoke_rcu_core();
|
||||
|
||||
/* Add the callback to our list. */
|
||||
rdp->qlen++;
|
||||
if (lazy)
|
||||
rdp->qlen_lazy++;
|
||||
else
|
||||
rcu_idle_count_callbacks_posted();
|
||||
smp_mb(); /* Count before adding callback for rcu_barrier(). */
|
||||
*rdp->nxttail[RCU_NEXT_TAIL] = head;
|
||||
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
|
||||
|
||||
if (__is_kfree_rcu_offset((unsigned long)func))
|
||||
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
|
||||
rdp->qlen_lazy, rdp->qlen);
|
||||
else
|
||||
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
|
||||
|
||||
/* If interrupts were disabled, don't dive into RCU core. */
|
||||
if (irqs_disabled_flags(flags)) {
|
||||
local_irq_restore(flags);
|
||||
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
||||
if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the grace period if too many callbacks or too long waiting.
|
||||
@ -1934,6 +1914,49 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
}
|
||||
} else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
|
||||
force_quiescent_state(rsp, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
struct rcu_state *rsp, bool lazy)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
|
||||
debug_rcu_head_queue(head);
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
|
||||
smp_mb(); /* Ensure RCU update seen before callback registry. */
|
||||
|
||||
/*
|
||||
* Opportunistically note grace-period endings and beginnings.
|
||||
* Note that we might see a beginning right after we see an
|
||||
* end, but never vice versa, since this CPU has to pass through
|
||||
* a quiescent state betweentimes.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
|
||||
/* Add the callback to our list. */
|
||||
ACCESS_ONCE(rdp->qlen)++;
|
||||
if (lazy)
|
||||
rdp->qlen_lazy++;
|
||||
else
|
||||
rcu_idle_count_callbacks_posted();
|
||||
smp_mb(); /* Count before adding callback for rcu_barrier(). */
|
||||
*rdp->nxttail[RCU_NEXT_TAIL] = head;
|
||||
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
|
||||
|
||||
if (__is_kfree_rcu_offset((unsigned long)func))
|
||||
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
|
||||
rdp->qlen_lazy, rdp->qlen);
|
||||
else
|
||||
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
|
||||
|
||||
/* Go handle any RCU core processing required. */
|
||||
__call_rcu_core(rsp, rdp, head, flags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -1963,28 +1986,16 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
|
||||
* occasionally incorrectly indicate that there are multiple CPUs online
|
||||
* when there was in fact only one the whole time, as this just adds
|
||||
* some overhead: RCU still operates correctly.
|
||||
*
|
||||
* Of course, sampling num_online_cpus() with preemption enabled can
|
||||
* give erroneous results if there are concurrent CPU-hotplug operations.
|
||||
* For example, given a demonic sequence of preemptions in num_online_cpus()
|
||||
* and CPU-hotplug operations, there could be two or more CPUs online at
|
||||
* all times, but num_online_cpus() might well return one (or even zero).
|
||||
*
|
||||
* However, all such demonic sequences require at least one CPU-offline
|
||||
* operation. Furthermore, rcu_blocking_is_gp() giving the wrong answer
|
||||
* is only a problem if there is an RCU read-side critical section executing
|
||||
* throughout. But RCU-sched and RCU-bh read-side critical sections
|
||||
* disable either preemption or bh, which prevents a CPU from going offline.
|
||||
* Therefore, the only way that rcu_blocking_is_gp() can incorrectly return
|
||||
* that there is only one CPU when in fact there was more than one throughout
|
||||
* is when there were no RCU readers in the system. If there are no
|
||||
* RCU readers, the grace period by definition can be of zero length,
|
||||
* regardless of the number of online CPUs.
|
||||
*/
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep(); /* Check for RCU read-side critical section. */
|
||||
return num_online_cpus() <= 1;
|
||||
preempt_disable();
|
||||
ret = num_online_cpus() <= 1;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2473,18 +2484,15 @@ static void __init
|
||||
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i;
|
||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||
|
||||
/* Set up local state, ensuring consistent view of global state. */
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
|
||||
rdp->nxtlist = NULL;
|
||||
for (i = 0; i < RCU_NEXT_SIZE; i++)
|
||||
rdp->nxttail[i] = &rdp->nxtlist;
|
||||
init_callback_list(rdp);
|
||||
rdp->qlen_lazy = 0;
|
||||
rdp->qlen = 0;
|
||||
ACCESS_ONCE(rdp->qlen) = 0;
|
||||
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
||||
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
||||
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
|
||||
|
@ -100,6 +100,7 @@ struct rcu_dynticks {
|
||||
/* # times non-lazy CBs posted to CPU. */
|
||||
unsigned long nonlazy_posted_snap;
|
||||
/* idle-period nonlazy_posted snapshot. */
|
||||
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
|
||||
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
};
|
||||
|
||||
|
@ -83,7 +83,6 @@ struct rcu_state rcu_preempt_state =
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
||||
static struct rcu_state *rcu_state = &rcu_preempt_state;
|
||||
|
||||
static void rcu_read_unlock_special(struct task_struct *t);
|
||||
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
||||
|
||||
/*
|
||||
@ -237,18 +236,6 @@ static void rcu_preempt_note_context_switch(int cpu)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tree-preemptible RCU implementation for rcu_read_lock().
|
||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
||||
* if we block.
|
||||
*/
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
current->rcu_read_lock_nesting++;
|
||||
barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||
|
||||
/*
|
||||
* Check for preempted RCU readers blocking the current grace period
|
||||
* for the specified rcu_node structure. If the caller needs a reliable
|
||||
@ -315,7 +302,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
|
||||
* notify RCU core processing or task having blocked during the RCU
|
||||
* read-side critical section.
|
||||
*/
|
||||
static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
void rcu_read_unlock_special(struct task_struct *t)
|
||||
{
|
||||
int empty;
|
||||
int empty_exp;
|
||||
@ -423,38 +410,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Tree-preemptible RCU implementation for rcu_read_unlock().
|
||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
||||
* in an RCU read-side critical section and other special cases.
|
||||
*/
|
||||
void __rcu_read_unlock(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (t->rcu_read_lock_nesting != 1)
|
||||
--t->rcu_read_lock_nesting;
|
||||
else {
|
||||
barrier(); /* critical section before exit code. */
|
||||
t->rcu_read_lock_nesting = INT_MIN;
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
t->rcu_read_lock_nesting = 0;
|
||||
}
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
{
|
||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
||||
|
||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
||||
}
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
|
||||
|
||||
/*
|
||||
@ -1856,9 +1811,11 @@ static void rcu_idle_count_callbacks_posted(void)
|
||||
*/
|
||||
#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
|
||||
#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
|
||||
#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
|
||||
#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
|
||||
#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
|
||||
|
||||
extern int tick_nohz_enabled;
|
||||
|
||||
/*
|
||||
* Does the specified flavor of RCU have non-lazy callbacks pending on
|
||||
* the specified CPU? Both RCU flavor and CPU are specified by the
|
||||
@ -1935,10 +1892,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
|
||||
return 1;
|
||||
}
|
||||
/* Set up for the possibility that RCU will post a timer. */
|
||||
if (rcu_cpu_has_nonlazy_callbacks(cpu))
|
||||
*delta_jiffies = RCU_IDLE_GP_DELAY;
|
||||
else
|
||||
*delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
|
||||
if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
|
||||
*delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
|
||||
RCU_IDLE_GP_DELAY) - jiffies;
|
||||
} else {
|
||||
*delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
|
||||
*delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1997,6 +1957,7 @@ static void rcu_cleanup_after_idle(int cpu)
|
||||
|
||||
del_timer(&rdtp->idle_gp_timer);
|
||||
trace_rcu_prep_idle("Cleanup after idle");
|
||||
rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2022,6 +1983,18 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
{
|
||||
struct timer_list *tp;
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
int tne;
|
||||
|
||||
/* Handle nohz enablement switches conservatively. */
|
||||
tne = ACCESS_ONCE(tick_nohz_enabled);
|
||||
if (tne != rdtp->tick_nohz_enabled_snap) {
|
||||
if (rcu_cpu_has_callbacks(cpu))
|
||||
invoke_rcu_core(); /* force nohz to see update. */
|
||||
rdtp->tick_nohz_enabled_snap = tne;
|
||||
return;
|
||||
}
|
||||
if (!tne)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If this is an idle re-entry, for example, due to use of
|
||||
@ -2075,10 +2048,11 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
|
||||
trace_rcu_prep_idle("Dyntick with callbacks");
|
||||
rdtp->idle_gp_timer_expires =
|
||||
jiffies + RCU_IDLE_GP_DELAY;
|
||||
round_up(jiffies + RCU_IDLE_GP_DELAY,
|
||||
RCU_IDLE_GP_DELAY);
|
||||
} else {
|
||||
rdtp->idle_gp_timer_expires =
|
||||
jiffies + RCU_IDLE_LAZY_GP_DELAY;
|
||||
round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
|
||||
trace_rcu_prep_idle("Dyntick with lazy callbacks");
|
||||
}
|
||||
tp = &rdtp->idle_gp_timer;
|
||||
@ -2157,6 +2131,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
||||
|
||||
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
||||
{
|
||||
*cp = '\0';
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
|
@ -105,7 +105,7 @@ static ktime_t tick_init_jiffy_update(void)
|
||||
/*
|
||||
* NO HZ enabled ?
|
||||
*/
|
||||
static int tick_nohz_enabled __read_mostly = 1;
|
||||
int tick_nohz_enabled __read_mostly = 1;
|
||||
|
||||
/*
|
||||
* Enable / Disable tickless mode
|
||||
|
Loading…
Reference in New Issue
Block a user