rcu: Consolidate RCU-sched update-side function definitions
This commit saves a few lines by consolidating the RCU-sched function definitions at the end of include/linux/rcupdate.h. This consolidation also makes it easier to remove them all when the time comes. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
		
							parent
							
								
									4c7e9c1434
								
							
						
					
					
						commit
						a8bb74acd8
					
				| @ -48,12 +48,6 @@ | ||||
| #define ulong2long(a)		(*(long *)(&(a))) | ||||
| 
 | ||||
| /* Exported common interfaces */ | ||||
| 
 | ||||
| #ifndef CONFIG_TINY_RCU | ||||
| void synchronize_sched(void); | ||||
| void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); | ||||
| #endif | ||||
| 
 | ||||
| void call_rcu(struct rcu_head *head, rcu_callback_t func); | ||||
| void rcu_barrier_tasks(void); | ||||
| void synchronize_rcu(void); | ||||
| @ -170,7 +164,7 @@ void exit_tasks_rcu_finish(void); | ||||
| #define rcu_tasks_qs(t)	do { } while (0) | ||||
| #define rcu_note_voluntary_context_switch(t)		rcu_all_qs() | ||||
| #define call_rcu_tasks call_rcu_sched | ||||
| #define synchronize_rcu_tasks synchronize_sched | ||||
| #define synchronize_rcu_tasks synchronize_rcu | ||||
| static inline void exit_tasks_rcu_start(void) { } | ||||
| static inline void exit_tasks_rcu_finish(void) { } | ||||
| #endif /* #else #ifdef CONFIG_TASKS_RCU */ | ||||
| @ -892,4 +886,34 @@ static inline void rcu_barrier_bh(void) | ||||
| 	rcu_barrier(); | ||||
| } | ||||
| 
 | ||||
| static inline void synchronize_sched(void) | ||||
| { | ||||
| 	synchronize_rcu(); | ||||
| } | ||||
| 
 | ||||
| static inline void synchronize_sched_expedited(void) | ||||
| { | ||||
| 	synchronize_rcu_expedited(); | ||||
| } | ||||
| 
 | ||||
| static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) | ||||
| { | ||||
| 	call_rcu(head, func); | ||||
| } | ||||
| 
 | ||||
| static inline void rcu_barrier_sched(void) | ||||
| { | ||||
| 	rcu_barrier(); | ||||
| } | ||||
| 
 | ||||
| static inline unsigned long get_state_synchronize_sched(void) | ||||
| { | ||||
| 	return get_state_synchronize_rcu(); | ||||
| } | ||||
| 
 | ||||
| static inline void cond_synchronize_sched(unsigned long oldstate) | ||||
| { | ||||
| 	cond_synchronize_rcu(oldstate); | ||||
| } | ||||
| 
 | ||||
| #endif /* __LINUX_RCUPDATE_H */ | ||||
|  | ||||
| @ -36,11 +36,6 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) | ||||
| /* Never flag non-existent other CPUs! */ | ||||
| static inline bool rcu_eqs_special_set(int cpu) { return false; } | ||||
| 
 | ||||
| static inline void synchronize_sched(void) | ||||
| { | ||||
| 	synchronize_rcu(); | ||||
| } | ||||
| 
 | ||||
| static inline unsigned long get_state_synchronize_rcu(void) | ||||
| { | ||||
| 	return 0; | ||||
| @ -51,36 +46,11 @@ static inline void cond_synchronize_rcu(unsigned long oldstate) | ||||
| 	might_sleep(); | ||||
| } | ||||
| 
 | ||||
| static inline unsigned long get_state_synchronize_sched(void) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline void cond_synchronize_sched(unsigned long oldstate) | ||||
| { | ||||
| 	might_sleep(); | ||||
| } | ||||
| 
 | ||||
| extern void rcu_barrier(void); | ||||
| 
 | ||||
| static inline void rcu_barrier_sched(void) | ||||
| { | ||||
| 	rcu_barrier();  /* Only one CPU, so only one list of callbacks! */ | ||||
| } | ||||
| 
 | ||||
| static inline void synchronize_rcu_expedited(void) | ||||
| { | ||||
| 	synchronize_sched(); | ||||
| } | ||||
| 
 | ||||
| static inline void synchronize_sched_expedited(void) | ||||
| { | ||||
| 	synchronize_sched(); | ||||
| } | ||||
| 
 | ||||
| static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) | ||||
| { | ||||
| 	call_rcu(head, func); | ||||
| 	synchronize_rcu(); | ||||
| } | ||||
| 
 | ||||
| static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | ||||
|  | ||||
| @ -46,21 +46,12 @@ static inline void rcu_virt_note_context_switch(int cpu) | ||||
| } | ||||
| 
 | ||||
| void synchronize_rcu_expedited(void); | ||||
| 
 | ||||
| static inline void synchronize_sched_expedited(void) | ||||
| { | ||||
| 	synchronize_rcu_expedited(); | ||||
| } | ||||
| 
 | ||||
| void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); | ||||
| 
 | ||||
| void rcu_barrier(void); | ||||
| void rcu_barrier_sched(void); | ||||
| bool rcu_eqs_special_set(int cpu); | ||||
| unsigned long get_state_synchronize_rcu(void); | ||||
| void cond_synchronize_rcu(unsigned long oldstate); | ||||
| unsigned long get_state_synchronize_sched(void); | ||||
| void cond_synchronize_sched(unsigned long oldstate); | ||||
| 
 | ||||
| void rcu_idle_enter(void); | ||||
| void rcu_idle_exit(void); | ||||
|  | ||||
| @ -2950,19 +2950,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func) | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(call_rcu); | ||||
| 
 | ||||
| /**
 | ||||
|  * call_rcu_sched() - Queue an RCU for invocation after sched grace period. | ||||
|  * @head: structure to be used for queueing the RCU updates. | ||||
|  * @func: actual callback function to be invoked after the grace period | ||||
|  * | ||||
|  * This is transitional. | ||||
|  */ | ||||
| void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) | ||||
| { | ||||
| 	call_rcu(head, func); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(call_rcu_sched); | ||||
| 
 | ||||
| /*
 | ||||
|  * Queue an RCU callback for lazy invocation after a grace period. | ||||
|  * This will likely be later named something like "call_rcu_lazy()", | ||||
| @ -2976,17 +2963,6 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(kfree_call_rcu); | ||||
| 
 | ||||
| /**
 | ||||
|  * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||||
|  * | ||||
|  * This is transitional. | ||||
|  */ | ||||
| void synchronize_sched(void) | ||||
| { | ||||
| 	synchronize_rcu(); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(synchronize_sched); | ||||
| 
 | ||||
| /**
 | ||||
|  * get_state_synchronize_rcu - Snapshot current RCU state | ||||
|  * | ||||
| @ -3028,29 +3004,6 @@ void cond_synchronize_rcu(unsigned long oldstate) | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(cond_synchronize_rcu); | ||||
| 
 | ||||
| /**
 | ||||
|  * get_state_synchronize_sched - Snapshot current RCU-sched state | ||||
|  * | ||||
|  * This is transitional, and only used by rcutorture. | ||||
|  */ | ||||
| unsigned long get_state_synchronize_sched(void) | ||||
| { | ||||
| 	return get_state_synchronize_rcu(); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(get_state_synchronize_sched); | ||||
| 
 | ||||
| /**
 | ||||
|  * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period | ||||
|  * @oldstate: return value from earlier call to get_state_synchronize_sched() | ||||
|  * | ||||
|  * This is transitional and only used by rcutorture. | ||||
|  */ | ||||
| void cond_synchronize_sched(unsigned long oldstate) | ||||
| { | ||||
| 	cond_synchronize_rcu(oldstate); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(cond_synchronize_sched); | ||||
| 
 | ||||
| /*
 | ||||
|  * Check to see if there is any immediate RCU-related work to be done by | ||||
|  * the current CPU, for the specified type of RCU, returning 1 if so and | ||||
| @ -3266,17 +3219,6 @@ void rcu_barrier(void) | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(rcu_barrier); | ||||
| 
 | ||||
| /**
 | ||||
|  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||||
|  * | ||||
|  * This is transitional. | ||||
|  */ | ||||
| void rcu_barrier_sched(void) | ||||
| { | ||||
| 	rcu_barrier(); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||||
| 
 | ||||
| /*
 | ||||
|  * Propagate ->qsinitmask bits up the rcu_node tree to account for the | ||||
|  * first CPU in a given leaf rcu_node structure coming online.  The caller | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user