rcu: Add support for consolidated-RCU reader checking

This commit adds RCU-reader checks to list_for_each_entry_rcu() and
hlist_for_each_entry_rcu().  These checks are optional, and are indicated
by a lockdep expression passed to a new optional argument to these two
macros.  If this optional lockdep expression is omitted, these two macros
act as before, checking for an RCU read-side critical section.

Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
[ paulmck: Update to eliminate return within macro and update comment. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
Joel Fernandes (Google) 2019-07-16 18:12:22 -04:00 committed by Paul E. McKenney
parent 9147089bee
commit 28875945ba
4 changed files with 108 additions and 38 deletions

View File

@ -40,6 +40,24 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
*/ */
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
/*
* Check during list traversal that we are within an RCU reader
*/
#define check_arg_count_one(dummy)
#ifdef CONFIG_PROVE_RCU_LIST
#define __list_check_rcu(dummy, cond, extra...) \
({ \
check_arg_count_one(extra); \
RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \
"RCU-list traversed in non-reader section!"); \
})
#else
#define __list_check_rcu(dummy, cond, extra...) \
({ check_arg_count_one(extra); })
#endif
/* /*
* Insert a new entry between two known consecutive entries. * Insert a new entry between two known consecutive entries.
* *
@ -343,14 +361,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @pos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @head: the head for your list. * @head: the head for your list.
* @member: the name of the list_head within the struct. * @member: the name of the list_head within the struct.
* @cond: optional lockdep expression if called from non-RCU protection.
* *
* This list-traversal primitive may safely run concurrently with * This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu() * the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock(). * as long as the traversal is guarded by rcu_read_lock().
*/ */
#define list_for_each_entry_rcu(pos, head, member) \ #define list_for_each_entry_rcu(pos, head, member, cond...) \
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ for (__list_check_rcu(dummy, ## cond, 0), \
&pos->member != (head); \ pos = list_entry_rcu((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/** /**
@ -616,13 +636,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @pos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
* @head: the head for your list. * @head: the head for your list.
* @member: the name of the hlist_node within the struct. * @member: the name of the hlist_node within the struct.
* @cond: optional lockdep expression if called from non-RCU protection.
* *
* This list-traversal primitive may safely run concurrently with * This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu() * the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock(). * as long as the traversal is guarded by rcu_read_lock().
*/ */
#define hlist_for_each_entry_rcu(pos, head, member) \ #define hlist_for_each_entry_rcu(pos, head, member, cond...) \
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ for (__list_check_rcu(dummy, ## cond, 0), \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
typeof(*(pos)), member); \ typeof(*(pos)), member); \
pos; \ pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\

View File

@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void); int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void); int rcu_read_lock_bh_held(void);
int rcu_read_lock_sched_held(void); int rcu_read_lock_sched_held(void);
int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void)
{ {
return !preemptible(); return !preemptible();
} }
static inline int rcu_read_lock_any_held(void)
{
return !preemptible();
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU

View File

@ -8,6 +8,17 @@ menu "RCU Debugging"
config PROVE_RCU config PROVE_RCU
def_bool PROVE_LOCKING def_bool PROVE_LOCKING
config PROVE_RCU_LIST
bool "RCU list lockdep debugging"
depends on PROVE_RCU && RCU_EXPERT
default n
help
Enable RCU lockdep checking for list usages. By default it is
turned off since there are several list RCU users that still
need to be converted to pass a lockdep expression. To prevent
false-positive splats, we keep it default disabled but once all
users are converted, we can remove this config option.
config TORTURE_TEST config TORTURE_TEST
tristate tristate
default n default n

View File

@ -61,9 +61,15 @@ module_param(rcu_normal_after_boot, int, 0);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
/** /**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
* @ret: Best guess answer if lockdep cannot be relied on
* *
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an * Returns true if lockdep must be ignored, in which case *ret contains
* the best guess described below. Otherwise returns false, in which
* case *ret tells the caller nothing and the caller should instead
* consult lockdep.
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, set *ret to nonzero iff in an
* RCU-sched read-side critical section. In absence of * RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
* critical section unless it can prove otherwise. Note that disabling * critical section unless it can prove otherwise. Note that disabling
@ -75,30 +81,44 @@ module_param(rcu_normal_after_boot, int, 0);
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled. * and while lockdep is disabled.
* *
* Note that if the CPU is in the idle loop from an RCU point of * Note that if the CPU is in the idle loop from an RCU point of view (ie:
* view (ie: that we are in the section between rcu_idle_enter() and * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
* rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU * then rcu_read_lock_held() sets *ret to false even if the CPU did an
* did an rcu_read_lock(). The reason for this is that RCU ignores CPUs * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are
* that are in such a section, considering these as in extended quiescent * in such a section, considering these as in extended quiescent state,
* state, so such a CPU is effectively never in an RCU read-side critical * so such a CPU is effectively never in an RCU read-side critical section
* section regardless of what RCU primitives it invokes. This state of * regardless of what RCU primitives it invokes. This state of affairs is
* affairs is required --- we need to keep an RCU-free window in idle * required --- we need to keep an RCU-free window in idle where the CPU may
* where the CPU may possibly enter into low power mode. This way we can * possibly enter into low power mode. This way we can notice an extended
* notice an extended quiescent state to other CPUs that started a grace * quiescent state to other CPUs that started a grace period. Otherwise
* period. Otherwise we would delay any grace period as long as we run in * we would delay any grace period as long as we run in the idle task.
* the idle task.
* *
* Similarly, we avoid claiming an SRCU read lock held if the current * Similarly, we avoid claiming an RCU read lock held if the current
* CPU is offline. * CPU is offline.
*/ */
static bool rcu_read_lock_held_common(bool *ret)
{
if (!debug_lockdep_rcu_enabled()) {
*ret = 1;
return true;
}
if (!rcu_is_watching()) {
*ret = 0;
return true;
}
if (!rcu_lockdep_current_cpu_online()) {
*ret = 0;
return true;
}
return false;
}
int rcu_read_lock_sched_held(void) int rcu_read_lock_sched_held(void)
{ {
if (!debug_lockdep_rcu_enabled()) bool ret;
return 1;
if (!rcu_is_watching()) if (rcu_read_lock_held_common(&ret))
return 0; return ret;
if (!rcu_lockdep_current_cpu_online())
return 0;
return lock_is_held(&rcu_sched_lock_map) || !preemptible(); return lock_is_held(&rcu_sched_lock_map) || !preemptible();
} }
EXPORT_SYMBOL(rcu_read_lock_sched_held); EXPORT_SYMBOL(rcu_read_lock_sched_held);
@ -257,12 +277,10 @@ NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
*/ */
int rcu_read_lock_held(void) int rcu_read_lock_held(void)
{ {
if (!debug_lockdep_rcu_enabled()) bool ret;
return 1;
if (!rcu_is_watching()) if (rcu_read_lock_held_common(&ret))
return 0; return ret;
if (!rcu_lockdep_current_cpu_online())
return 0;
return lock_is_held(&rcu_lock_map); return lock_is_held(&rcu_lock_map);
} }
EXPORT_SYMBOL_GPL(rcu_read_lock_held); EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@ -284,16 +302,28 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held);
*/ */
int rcu_read_lock_bh_held(void) int rcu_read_lock_bh_held(void)
{ {
if (!debug_lockdep_rcu_enabled()) bool ret;
return 1;
if (!rcu_is_watching()) if (rcu_read_lock_held_common(&ret))
return 0; return ret;
if (!rcu_lockdep_current_cpu_online())
return 0;
return in_softirq() || irqs_disabled(); return in_softirq() || irqs_disabled();
} }
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
int rcu_read_lock_any_held(void)
{
bool ret;
if (rcu_read_lock_held_common(&ret))
return ret;
if (lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_sched_lock_map))
return 1;
return !preemptible();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/** /**