rcu: Make rcu_read_lock_sched_held() take boot time into account
Before the scheduler starts, all tasks are non-preemptible by definition. So, during that time, rcu_read_lock_sched_held() needs to always return "true". This patch makes that be so. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267135607-7056-2-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									056ba4a9be
								
							
						
					
					
						commit
						d9f1bb6ad7
					
				| @ -62,6 +62,8 @@ extern int sched_expedited_torture_stats(char *page); | ||||
| 
 | ||||
| /* Internal to kernel */ | ||||
| extern void rcu_init(void); | ||||
| extern int rcu_scheduler_active; | ||||
| extern void rcu_scheduler_starting(void); | ||||
| 
 | ||||
| #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||||
| #include <linux/rcutree.h> | ||||
| @ -140,7 +142,7 @@ static inline int rcu_read_lock_sched_held(void) | ||||
| 
 | ||||
| 	if (debug_locks) | ||||
| 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | ||||
| 	return lockdep_opinion || preempt_count() != 0; | ||||
| 	return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; | ||||
| } | ||||
| 
 | ||||
| #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||||
|  | ||||
| @ -105,10 +105,6 @@ static inline void rcu_exit_nohz(void) | ||||
| 
 | ||||
| #endif /* #else #ifdef CONFIG_NO_HZ */ | ||||
| 
 | ||||
| static inline void rcu_scheduler_starting(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| static inline void exit_rcu(void) | ||||
| { | ||||
| } | ||||
|  | ||||
| @ -35,7 +35,6 @@ struct notifier_block; | ||||
| extern void rcu_sched_qs(int cpu); | ||||
| extern void rcu_bh_qs(int cpu); | ||||
| extern int rcu_needs_cpu(int cpu); | ||||
| extern void rcu_scheduler_starting(void); | ||||
| extern int rcu_expedited_torture_stats(char *page); | ||||
| 
 | ||||
| #ifdef CONFIG_TREE_PREEMPT_RCU | ||||
|  | ||||
| @ -44,6 +44,7 @@ | ||||
| #include <linux/cpu.h> | ||||
| #include <linux/mutex.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/kernel_stat.h> | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||||
| static struct lock_class_key rcu_lock_key; | ||||
| @ -62,6 +63,23 @@ struct lockdep_map rcu_sched_lock_map = | ||||
| EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | ||||
| #endif | ||||
| 
 | ||||
| int rcu_scheduler_active __read_mostly; | ||||
| 
 | ||||
| /*
 | ||||
|  * This function is invoked towards the end of the scheduler's initialization | ||||
|  * process.  Before this is called, the idle task might contain | ||||
|  * RCU read-side critical sections (during which time, this idle | ||||
|  * task is booting the system).  After this function is called, the | ||||
|  * idle tasks are prohibited from containing RCU read-side critical | ||||
|  * sections. | ||||
|  */ | ||||
| void rcu_scheduler_starting(void) | ||||
| { | ||||
| 	WARN_ON(num_online_cpus() != 1); | ||||
| 	WARN_ON(nr_context_switches() > 0); | ||||
| 	rcu_scheduler_active = 1; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Awaken the corresponding synchronize_rcu() instance now that a | ||||
|  * grace period has elapsed. | ||||
|  | ||||
| @ -46,7 +46,6 @@ | ||||
| #include <linux/cpu.h> | ||||
| #include <linux/mutex.h> | ||||
| #include <linux/time.h> | ||||
| #include <linux/kernel_stat.h> | ||||
| 
 | ||||
| #include "rcutree.h" | ||||
| 
 | ||||
| @ -81,9 +80,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | ||||
| struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | ||||
| DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | ||||
| 
 | ||||
| static int rcu_scheduler_active __read_mostly; | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s | ||||
|  * permit this function to be invoked without holding the root rcu_node | ||||
| @ -1565,21 +1561,6 @@ static int rcu_needs_cpu_quick_check(int cpu) | ||||
| 	       rcu_preempt_needs_cpu(cpu); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This function is invoked towards the end of the scheduler's initialization | ||||
|  * process.  Before this is called, the idle task might contain | ||||
|  * RCU read-side critical sections (during which time, this idle | ||||
|  * task is booting the system).  After this function is called, the | ||||
|  * idle tasks are prohibited from containing RCU read-side critical | ||||
|  * sections. | ||||
|  */ | ||||
| void rcu_scheduler_starting(void) | ||||
| { | ||||
| 	WARN_ON(num_online_cpus() != 1); | ||||
| 	WARN_ON(nr_context_switches() > 0); | ||||
| 	rcu_scheduler_active = 1; | ||||
| } | ||||
| 
 | ||||
| static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||||
| static atomic_t rcu_barrier_cpu_count; | ||||
| static DEFINE_MUTEX(rcu_barrier_mutex); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user