rcu/sync: Kill rcu_sync_type/gp_type
Now that the RCU flavors have been consolidated, rcu_sync_type makes no sense because none of internal update functions aside from .held() depend on gp_type. This commit therefore removes this field and consolidates the relevant code. Signed-off-by: Oleg Nesterov <oleg@redhat.com> [ paulmck: Added RCU and RCU-bh checks to rcu_sync_is_idle(). ] [ paulmck: And applied subsequent feedback from Oleg Nesterov. ] Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
		
							parent
							
								
									a188339ca5
								
							
						
					
					
						commit
						95bf33b55f
					
				| @ -20,7 +20,7 @@ struct percpu_rw_semaphore { | ||||
| #define DEFINE_STATIC_PERCPU_RWSEM(name)				\ | ||||
| static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name);		\ | ||||
| static struct percpu_rw_semaphore name = {				\ | ||||
| 	.rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC),	\ | ||||
| 	.rss = __RCU_SYNC_INITIALIZER(name.rss),			\ | ||||
| 	.read_count = &__percpu_rwsem_rc_##name,			\ | ||||
| 	.rw_sem = __RWSEM_INITIALIZER(name.rw_sem),			\ | ||||
| 	.writer = __RCUWAIT_INITIALIZER(name.writer),			\ | ||||
|  | ||||
| @ -13,8 +13,6 @@ | ||||
| #include <linux/wait.h> | ||||
| #include <linux/rcupdate.h> | ||||
| 
 | ||||
| enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; | ||||
| 
 | ||||
| /* Structure to mediate between updaters and fastpath-using readers.  */ | ||||
| struct rcu_sync { | ||||
| 	int			gp_state; | ||||
| @ -23,52 +21,38 @@ struct rcu_sync { | ||||
| 
 | ||||
| 	int			cb_state; | ||||
| 	struct rcu_head		cb_head; | ||||
| 
 | ||||
| 	enum rcu_sync_type	gp_type; | ||||
| }; | ||||
| 
 | ||||
| extern void rcu_sync_lockdep_assert(struct rcu_sync *); | ||||
| 
 | ||||
| /**
 | ||||
|  * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? | ||||
|  * @rsp: Pointer to rcu_sync structure to use for synchronization | ||||
|  * | ||||
|  * Returns true if readers are permitted to use their fastpaths. | ||||
|  * Must be invoked within an RCU read-side critical section whose | ||||
|  * flavor matches that of the rcu_sync struture. | ||||
|  * Returns true if readers are permitted to use their fastpaths.  Must be | ||||
|  * invoked within some flavor of RCU read-side critical section. | ||||
|  */ | ||||
| static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) | ||||
| { | ||||
| #ifdef CONFIG_PROVE_RCU | ||||
| 	rcu_sync_lockdep_assert(rsp); | ||||
| #endif | ||||
| 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() && | ||||
| 			 !rcu_read_lock_bh_held() && | ||||
| 			 !rcu_read_lock_sched_held(), | ||||
| 			 "suspicious rcu_sync_is_idle() usage"); | ||||
| 	return !rsp->gp_state; /* GP_IDLE */ | ||||
| } | ||||
| 
 | ||||
| extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); | ||||
| extern void rcu_sync_init(struct rcu_sync *); | ||||
| extern void rcu_sync_enter_start(struct rcu_sync *); | ||||
| extern void rcu_sync_enter(struct rcu_sync *); | ||||
| extern void rcu_sync_exit(struct rcu_sync *); | ||||
| extern void rcu_sync_dtor(struct rcu_sync *); | ||||
| 
 | ||||
| #define __RCU_SYNC_INITIALIZER(name, type) {				\ | ||||
| #define __RCU_SYNC_INITIALIZER(name) {					\ | ||||
| 		.gp_state = 0,						\ | ||||
| 		.gp_count = 0,						\ | ||||
| 		.gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait),	\ | ||||
| 		.cb_state = 0,						\ | ||||
| 		.gp_type = type,					\ | ||||
| 	} | ||||
| 
 | ||||
| #define	__DEFINE_RCU_SYNC(name, type)	\ | ||||
| 	struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) | ||||
| 
 | ||||
| #define DEFINE_RCU_SYNC(name)		\ | ||||
| 	__DEFINE_RCU_SYNC(name, RCU_SYNC) | ||||
| 
 | ||||
| #define DEFINE_RCU_SCHED_SYNC(name)	\ | ||||
| 	__DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) | ||||
| 
 | ||||
| #define DEFINE_RCU_BH_SYNC(name)	\ | ||||
| 	__DEFINE_RCU_SYNC(name, RCU_BH_SYNC) | ||||
| #define	DEFINE_RCU_SYNC(name)	\ | ||||
| 	struct rcu_sync name = __RCU_SYNC_INITIALIZER(name) | ||||
| 
 | ||||
| #endif /* _LINUX_RCU_SYNC_H_ */ | ||||
|  | ||||
| @ -17,7 +17,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem, | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ | ||||
| 	rcu_sync_init(&sem->rss, RCU_SCHED_SYNC); | ||||
| 	rcu_sync_init(&sem->rss); | ||||
| 	__init_rwsem(&sem->rw_sem, name, rwsem_key); | ||||
| 	rcuwait_init(&sem->writer); | ||||
| 	sem->readers_block = 0; | ||||
|  | ||||
| @ -10,65 +10,20 @@ | ||||
| #include <linux/rcu_sync.h> | ||||
| #include <linux/sched.h> | ||||
| 
 | ||||
| #ifdef CONFIG_PROVE_RCU | ||||
| #define __INIT_HELD(func)	.held = func, | ||||
| #else | ||||
| #define __INIT_HELD(func) | ||||
| #endif | ||||
| 
 | ||||
| static const struct { | ||||
| 	void (*sync)(void); | ||||
| 	void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); | ||||
| 	void (*wait)(void); | ||||
| #ifdef CONFIG_PROVE_RCU | ||||
| 	int  (*held)(void); | ||||
| #endif | ||||
| } gp_ops[] = { | ||||
| 	[RCU_SYNC] = { | ||||
| 		.sync = synchronize_rcu, | ||||
| 		.call = call_rcu, | ||||
| 		.wait = rcu_barrier, | ||||
| 		__INIT_HELD(rcu_read_lock_held) | ||||
| 	}, | ||||
| 	[RCU_SCHED_SYNC] = { | ||||
| 		.sync = synchronize_rcu, | ||||
| 		.call = call_rcu, | ||||
| 		.wait = rcu_barrier, | ||||
| 		__INIT_HELD(rcu_read_lock_sched_held) | ||||
| 	}, | ||||
| 	[RCU_BH_SYNC] = { | ||||
| 		.sync = synchronize_rcu, | ||||
| 		.call = call_rcu, | ||||
| 		.wait = rcu_barrier, | ||||
| 		__INIT_HELD(rcu_read_lock_bh_held) | ||||
| 	}, | ||||
| }; | ||||
| 
 | ||||
| enum { GP_IDLE = 0, GP_PENDING, GP_PASSED }; | ||||
| enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; | ||||
| 
 | ||||
| #define	rss_lock	gp_wait.lock | ||||
| 
 | ||||
| #ifdef CONFIG_PROVE_RCU | ||||
| void rcu_sync_lockdep_assert(struct rcu_sync *rsp) | ||||
| { | ||||
| 	RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(), | ||||
| 			 "suspicious rcu_sync_is_idle() usage"); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert); | ||||
| #endif | ||||
| 
 | ||||
| /**
 | ||||
|  * rcu_sync_init() - Initialize an rcu_sync structure | ||||
|  * @rsp: Pointer to rcu_sync structure to be initialized | ||||
|  * @type: Flavor of RCU with which to synchronize rcu_sync structure | ||||
|  */ | ||||
| void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type) | ||||
| void rcu_sync_init(struct rcu_sync *rsp) | ||||
| { | ||||
| 	memset(rsp, 0, sizeof(*rsp)); | ||||
| 	init_waitqueue_head(&rsp->gp_wait); | ||||
| 	rsp->gp_type = type; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
| @ -114,7 +69,7 @@ void rcu_sync_enter(struct rcu_sync *rsp) | ||||
| 
 | ||||
| 	WARN_ON_ONCE(need_wait && need_sync); | ||||
| 	if (need_sync) { | ||||
| 		gp_ops[rsp->gp_type].sync(); | ||||
| 		synchronize_rcu(); | ||||
| 		rsp->gp_state = GP_PASSED; | ||||
| 		wake_up_all(&rsp->gp_wait); | ||||
| 	} else if (need_wait) { | ||||
| @ -167,7 +122,7 @@ static void rcu_sync_func(struct rcu_head *rhp) | ||||
| 		 * to catch a later GP. | ||||
| 		 */ | ||||
| 		rsp->cb_state = CB_PENDING; | ||||
| 		gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); | ||||
| 		call_rcu(&rsp->cb_head, rcu_sync_func); | ||||
| 	} else { | ||||
| 		/*
 | ||||
| 		 * We're at least a GP after rcu_sync_exit(); eveybody will now | ||||
| @ -195,7 +150,7 @@ void rcu_sync_exit(struct rcu_sync *rsp) | ||||
| 	if (!--rsp->gp_count) { | ||||
| 		if (rsp->cb_state == CB_IDLE) { | ||||
| 			rsp->cb_state = CB_PENDING; | ||||
| 			gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); | ||||
| 			call_rcu(&rsp->cb_head, rcu_sync_func); | ||||
| 		} else if (rsp->cb_state == CB_PENDING) { | ||||
| 			rsp->cb_state = CB_REPLAY; | ||||
| 		} | ||||
| @ -220,7 +175,7 @@ void rcu_sync_dtor(struct rcu_sync *rsp) | ||||
| 	spin_unlock_irq(&rsp->rss_lock); | ||||
| 
 | ||||
| 	if (cb_state != CB_IDLE) { | ||||
| 		gp_ops[rsp->gp_type].wait(); | ||||
| 		rcu_barrier(); | ||||
| 		WARN_ON_ONCE(rsp->cb_state != CB_IDLE); | ||||
| 	} | ||||
| } | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user