locking/arch: Rename set_mb() to smp_store_mb()
Since set_mb() is really about an smp_mb() -- not a IO/DMA barrier like mb() rename it to match the recent smp_load_acquire() and smp_store_release(). Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									ab3f02fc23
								
							
						
					
					
						commit
						b92b8b35a2
					
				| @ -1662,7 +1662,7 @@ CPU from reordering them. | ||||
| 
 | ||||
| There are some more advanced barrier functions: | ||||
| 
 | ||||
|  (*) set_mb(var, value) | ||||
|  (*) smp_store_mb(var, value) | ||||
| 
 | ||||
|      This assigns the value to the variable and then inserts a full memory | ||||
|      barrier after it, depending on the function.  It isn't guaranteed to | ||||
| @ -1975,7 +1975,7 @@ after it has altered the task state: | ||||
| 	CPU 1 | ||||
| 	=============================== | ||||
| 	set_current_state(); | ||||
| 	  set_mb(); | ||||
| 	  smp_store_mb(); | ||||
| 	    STORE current->state | ||||
| 	    <general barrier> | ||||
| 	LOAD event_indicated | ||||
| @ -2016,7 +2016,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING: | ||||
| 	CPU 1				CPU 2 | ||||
| 	===============================	=============================== | ||||
| 	set_current_state();		STORE event_indicated | ||||
| 	  set_mb();			wake_up(); | ||||
| 	  smp_store_mb();		wake_up(); | ||||
| 	    STORE current->state	  <write barrier> | ||||
| 	    <general barrier>		  STORE current->state | ||||
| 	LOAD event_indicated | ||||
|  | ||||
| @ -81,7 +81,7 @@ do {									\ | ||||
| #define read_barrier_depends()		do { } while(0) | ||||
| #define smp_read_barrier_depends()	do { } while(0) | ||||
| 
 | ||||
| #define set_mb(var, value)	do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||||
| #define smp_store_mb(var, value)	do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||||
| 
 | ||||
| #define smp_mb__before_atomic()	smp_mb() | ||||
| #define smp_mb__after_atomic()	smp_mb() | ||||
|  | ||||
| @ -114,7 +114,7 @@ do {									\ | ||||
| #define read_barrier_depends()		do { } while(0) | ||||
| #define smp_read_barrier_depends()	do { } while(0) | ||||
| 
 | ||||
| #define set_mb(var, value)	do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||||
| #define smp_store_mb(var, value)	do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||||
| #define nop()		asm volatile("nop"); | ||||
| 
 | ||||
| #define smp_mb__before_atomic()	smp_mb() | ||||
|  | ||||
| @ -77,12 +77,7 @@ do {									\ | ||||
| 	___p1;								\ | ||||
| }) | ||||
| 
 | ||||
| /*
 | ||||
|  * XXX check on this ---I suspect what Linus really wants here is | ||||
|  * acquire vs release semantics but we can't discuss this stuff with | ||||
|  * Linus just yet.  Grrr... | ||||
|  */ | ||||
| #define set_mb(var, value)	do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| #define smp_store_mb(var, value)	do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| 
 | ||||
| /*
 | ||||
|  * The group barrier in front of the rsm & ssm are necessary to ensure | ||||
|  | ||||
| @ -84,7 +84,7 @@ static inline void fence(void) | ||||
| #define read_barrier_depends()		do { } while (0) | ||||
| #define smp_read_barrier_depends()	do { } while (0) | ||||
| 
 | ||||
| #define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||||
| #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||||
| 
 | ||||
| #define smp_store_release(p, v)						\ | ||||
| do {									\ | ||||
|  | ||||
| @ -112,7 +112,7 @@ | ||||
| #define __WEAK_LLSC_MB		"		\n" | ||||
| #endif | ||||
| 
 | ||||
| #define set_mb(var, value) \ | ||||
| #define smp_store_mb(var, value) \ | ||||
| 	do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||||
| 
 | ||||
| #define smp_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | ||||
|  | ||||
| @ -34,7 +34,7 @@ | ||||
| #define rmb()  __asm__ __volatile__ ("sync" : : : "memory") | ||||
| #define wmb()  __asm__ __volatile__ ("sync" : : : "memory") | ||||
| 
 | ||||
| #define set_mb(var, value)	do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| #define smp_store_mb(var, value)	do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| 
 | ||||
| #ifdef __SUBARCH_HAS_LWSYNC | ||||
| #    define SMPWMB      LWSYNC | ||||
|  | ||||
| @ -36,7 +36,7 @@ | ||||
| #define smp_mb__before_atomic()		smp_mb() | ||||
| #define smp_mb__after_atomic()		smp_mb() | ||||
| 
 | ||||
| #define set_mb(var, value)		do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| #define smp_store_mb(var, value)		do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| 
 | ||||
| #define smp_store_release(p, v)						\ | ||||
| do {									\ | ||||
|  | ||||
| @ -32,7 +32,7 @@ | ||||
| #define ctrl_barrier()	__asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | ||||
| #endif | ||||
| 
 | ||||
| #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||||
| #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||||
| 
 | ||||
| #include <asm-generic/barrier.h> | ||||
| 
 | ||||
|  | ||||
| @ -40,7 +40,7 @@ do {	__asm__ __volatile__("ba,pt	%%xcc, 1f\n\t" \ | ||||
| #define dma_rmb()	rmb() | ||||
| #define dma_wmb()	wmb() | ||||
| 
 | ||||
| #define set_mb(__var, __value) \ | ||||
| #define smp_store_mb(__var, __value) \ | ||||
| 	do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) | ||||
| 
 | ||||
| #ifdef CONFIG_SMP | ||||
|  | ||||
| @ -35,12 +35,12 @@ | ||||
| #define smp_mb()	mb() | ||||
| #define smp_rmb()	dma_rmb() | ||||
| #define smp_wmb()	barrier() | ||||
| #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||||
| #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||||
| #else /* !SMP */ | ||||
| #define smp_mb()	barrier() | ||||
| #define smp_rmb()	barrier() | ||||
| #define smp_wmb()	barrier() | ||||
| #define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) | ||||
| #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) | ||||
| #endif /* SMP */ | ||||
| 
 | ||||
| #define read_barrier_depends()		do { } while (0) | ||||
|  | ||||
| @ -39,7 +39,8 @@ | ||||
| #define smp_mb()	barrier() | ||||
| #define smp_rmb()	barrier() | ||||
| #define smp_wmb()	barrier() | ||||
| #define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) | ||||
| 
 | ||||
| #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) | ||||
| 
 | ||||
| #define read_barrier_depends()		do { } while (0) | ||||
| #define smp_read_barrier_depends()	do { } while (0) | ||||
|  | ||||
| @ -189,7 +189,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||||
| 	 * doesn't imply write barrier and the users expect write | ||||
| 	 * barrier semantics on wakeup functions.  The following | ||||
| 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | ||||
| 	 * and is paired with set_mb() in poll_schedule_timeout. | ||||
| 	 * and is paired with smp_store_mb() in poll_schedule_timeout. | ||||
| 	 */ | ||||
| 	smp_wmb(); | ||||
| 	pwq->triggered = 1; | ||||
| @ -244,7 +244,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state, | ||||
| 	/*
 | ||||
| 	 * Prepare for the next iteration. | ||||
| 	 * | ||||
| 	 * The following set_mb() serves two purposes.  First, it's | ||||
| 	 * The following smp_store_mb() serves two purposes.  First, it's | ||||
| 	 * the counterpart rmb of the wmb in pollwake() such that data | ||||
| 	 * written before wake up is always visible after wake up. | ||||
| 	 * Second, the full barrier guarantees that triggered clearing | ||||
| @ -252,7 +252,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state, | ||||
| 	 * this problem doesn't exist for the first iteration as | ||||
| 	 * add_wait_queue() has full barrier semantics. | ||||
| 	 */ | ||||
| 	set_mb(pwq->triggered, 0); | ||||
| 	smp_store_mb(pwq->triggered, 0); | ||||
| 
 | ||||
| 	return rc; | ||||
| } | ||||
|  | ||||
| @ -66,8 +66,8 @@ | ||||
| #define smp_read_barrier_depends()	do { } while (0) | ||||
| #endif | ||||
| 
 | ||||
| #ifndef set_mb | ||||
| #define set_mb(var, value)  do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| #ifndef smp_store_mb | ||||
| #define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); mb(); } while (0) | ||||
| #endif | ||||
| 
 | ||||
| #ifndef smp_mb__before_atomic | ||||
|  | ||||
| @ -252,7 +252,7 @@ extern char ___assert_task_state[1 - 2*!!( | ||||
| #define set_task_state(tsk, state_value)			\ | ||||
| 	do {							\ | ||||
| 		(tsk)->task_state_change = _THIS_IP_;		\ | ||||
| 		set_mb((tsk)->state, (state_value));		\ | ||||
| 		smp_store_mb((tsk)->state, (state_value));		\ | ||||
| 	} while (0) | ||||
| 
 | ||||
| /*
 | ||||
| @ -274,7 +274,7 @@ extern char ___assert_task_state[1 - 2*!!( | ||||
| #define set_current_state(state_value)				\ | ||||
| 	do {							\ | ||||
| 		current->task_state_change = _THIS_IP_;		\ | ||||
| 		set_mb(current->state, (state_value));		\ | ||||
| 		smp_store_mb(current->state, (state_value));		\ | ||||
| 	} while (0) | ||||
| 
 | ||||
| #else | ||||
| @ -282,7 +282,7 @@ extern char ___assert_task_state[1 - 2*!!( | ||||
| #define __set_task_state(tsk, state_value)		\ | ||||
| 	do { (tsk)->state = (state_value); } while (0) | ||||
| #define set_task_state(tsk, state_value)		\ | ||||
| 	set_mb((tsk)->state, (state_value)) | ||||
| 	smp_store_mb((tsk)->state, (state_value)) | ||||
| 
 | ||||
| /*
 | ||||
|  * set_current_state() includes a barrier so that the write of current->state | ||||
| @ -298,7 +298,7 @@ extern char ___assert_task_state[1 - 2*!!( | ||||
| #define __set_current_state(state_value)		\ | ||||
| 	do { current->state = (state_value); } while (0) | ||||
| #define set_current_state(state_value)			\ | ||||
| 	set_mb(current->state, (state_value)) | ||||
| 	smp_store_mb(current->state, (state_value)) | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
|  | ||||
| @ -2055,7 +2055,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | ||||
| { | ||||
| 	/*
 | ||||
| 	 * The task state is guaranteed to be set before another task can | ||||
| 	 * wake it. set_current_state() is implemented using set_mb() and | ||||
| 	 * wake it. set_current_state() is implemented using smp_store_mb() and | ||||
| 	 * queue_me() calls spin_unlock() upon completion, both serializing | ||||
| 	 * access to the hash list and forcing another memory barrier. | ||||
| 	 */ | ||||
|  | ||||
| @ -175,7 +175,7 @@ static void pv_wait_node(struct mcs_spinlock *node) | ||||
| 		 * | ||||
| 		 * Matches the xchg() from pv_kick_node(). | ||||
| 		 */ | ||||
| 		set_mb(pn->state, vcpu_halted); | ||||
| 		smp_store_mb(pn->state, vcpu_halted); | ||||
| 
 | ||||
| 		if (!READ_ONCE(node->locked)) | ||||
| 			pv_wait(&pn->state, vcpu_halted); | ||||
|  | ||||
| @ -341,7 +341,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) | ||||
| 	 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss | ||||
| 	 * an event. | ||||
| 	 */ | ||||
| 	set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ | ||||
| 	smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ | ||||
| 
 | ||||
| 	return timeout; | ||||
| } | ||||
| @ -354,7 +354,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||||
| 	 * doesn't imply write barrier and the users expects write | ||||
| 	 * barrier semantics on wakeup functions.  The following | ||||
| 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | ||||
| 	 * and is paired with set_mb() in wait_woken(). | ||||
| 	 * and is paired with smp_store_mb() in wait_woken(). | ||||
| 	 */ | ||||
| 	smp_wmb(); /* C */ | ||||
| 	wait->flags |= WQ_FLAG_WOKEN; | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user