x86: Generate cmpxchg build failures
Rework the x86 cmpxchg() implementation to generate build failures when used on improper types. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <1254771187.21044.22.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
		
							parent
							
								
									63312b6a6f
								
							
						
					
					
						commit
						f3834b9ef6
					
				| @ -8,14 +8,50 @@ | ||||
|  *       you need to test for the feature in boot_cpu_data. | ||||
|  */ | ||||
| 
 | ||||
| #define xchg(ptr, v)							\ | ||||
| 	((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | ||||
| extern void __xchg_wrong_size(void); | ||||
| 
 | ||||
| /*
 | ||||
|  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||||
|  * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||||
|  *	  but generally the primitive is invalid, *ptr is output argument. --ANK | ||||
|  */ | ||||
| 
 | ||||
| struct __xchg_dummy { | ||||
| 	unsigned long a[100]; | ||||
| }; | ||||
| #define __xg(x) ((struct __xchg_dummy *)(x)) | ||||
| 
 | ||||
| #define __xchg(x, ptr, size)						\ | ||||
| ({									\ | ||||
| 	__typeof(*(ptr)) __x = (x);					\ | ||||
| 	switch (size) {							\ | ||||
| 	case 1:								\ | ||||
| 		asm volatile("xchgb %b0,%1"				\ | ||||
| 			     : "=q" (__x)				\ | ||||
| 			     : "m" (*__xg(ptr)), "0" (__x)		\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 2:								\ | ||||
| 		asm volatile("xchgw %w0,%1"				\ | ||||
| 			     : "=r" (__x)				\ | ||||
| 			     : "m" (*__xg(ptr)), "0" (__x)		\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 4:								\ | ||||
| 		asm volatile("xchgl %0,%1"				\ | ||||
| 			     : "=r" (__x)				\ | ||||
| 			     : "m" (*__xg(ptr)), "0" (__x)		\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	default:							\ | ||||
| 		__xchg_wrong_size();					\ | ||||
| 	}								\ | ||||
| 	__x;								\ | ||||
| }) | ||||
| 
 | ||||
| #define xchg(ptr, v)							\ | ||||
| 	__xchg((v), (ptr), sizeof(*ptr)) | ||||
| 
 | ||||
| /*
 | ||||
|  * The semantics of XCHGCMP8B are a bit strange, this is why | ||||
|  * there is a loop and the loading of %%eax and %%edx has to | ||||
| @ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr, | ||||
| 		       (unsigned int)((value) >> 32))			\ | ||||
| 	 : __set_64bit(ptr, ll_low((value)), ll_high((value)))) | ||||
| 
 | ||||
| /*
 | ||||
|  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||||
|  * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||||
|  *	  but generally the primitive is invalid, *ptr is output argument. --ANK | ||||
|  */ | ||||
| static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||||
| 				   int size) | ||||
| { | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile("xchgb %b0,%1" | ||||
| 			     : "=q" (x) | ||||
| 			     : "m" (*__xg(ptr)), "0" (x) | ||||
| 			     : "memory"); | ||||
| 		break; | ||||
| 	case 2: | ||||
| 		asm volatile("xchgw %w0,%1" | ||||
| 			     : "=r" (x) | ||||
| 			     : "m" (*__xg(ptr)), "0" (x) | ||||
| 			     : "memory"); | ||||
| 		break; | ||||
| 	case 4: | ||||
| 		asm volatile("xchgl %0,%1" | ||||
| 			     : "=r" (x) | ||||
| 			     : "m" (*__xg(ptr)), "0" (x) | ||||
| 			     : "memory"); | ||||
| 		break; | ||||
| 	} | ||||
| 	return x; | ||||
| } | ||||
| extern void __cmpxchg_wrong_size(void); | ||||
| 
 | ||||
| /*
 | ||||
|  * Atomic compare and exchange.  Compare OLD with MEM, if identical, | ||||
|  * store NEW in MEM.  Return the initial value in MEM.  Success is | ||||
|  * indicated by comparing RETURN with OLD. | ||||
|  */ | ||||
| #define __raw_cmpxchg(ptr, old, new, size, lock)			\ | ||||
| ({									\ | ||||
| 	__typeof__(*(ptr)) __ret;					\ | ||||
| 	__typeof__(*(ptr)) __old = (old);				\ | ||||
| 	__typeof__(*(ptr)) __new = (new);				\ | ||||
| 	switch (size) {							\ | ||||
| 	case 1:								\ | ||||
| 		asm volatile(lock "cmpxchgb %b1,%2"			\ | ||||
| 			     : "=a"(__ret)				\ | ||||
| 			     : "q"(__new), "m"(*__xg(ptr)), "0"(__old)	\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 2:								\ | ||||
| 		asm volatile(lock "cmpxchgw %w1,%2"			\ | ||||
| 			     : "=a"(__ret)				\ | ||||
| 			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 4:								\ | ||||
| 		asm volatile(lock "cmpxchgl %1,%2"			\ | ||||
| 			     : "=a"(__ret)				\ | ||||
| 			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	default:							\ | ||||
| 		__cmpxchg_wrong_size();					\ | ||||
| 	}								\ | ||||
| 	__ret;								\ | ||||
| }) | ||||
| 
 | ||||
| #define __cmpxchg(ptr, old, new, size)					\ | ||||
| 	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||||
| 
 | ||||
| #define __sync_cmpxchg(ptr, old, new, size)				\ | ||||
| 	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||||
| 
 | ||||
| #define __cmpxchg_local(ptr, old, new, size)				\ | ||||
| 	__raw_cmpxchg((ptr), (old), (new), (size), "") | ||||
| 
 | ||||
| #ifdef CONFIG_X86_CMPXCHG | ||||
| #define __HAVE_ARCH_CMPXCHG 1 | ||||
| #define cmpxchg(ptr, o, n)						\ | ||||
| 	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\ | ||||
| 				       (unsigned long)(n),		\ | ||||
| 				       sizeof(*(ptr)))) | ||||
| #define sync_cmpxchg(ptr, o, n)						\ | ||||
| 	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	\ | ||||
| 					    (unsigned long)(n),		\ | ||||
| 					    sizeof(*(ptr)))) | ||||
| #define cmpxchg_local(ptr, o, n)					\ | ||||
| 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\ | ||||
| 					     (unsigned long)(n),	\ | ||||
| 					     sizeof(*(ptr)))) | ||||
| 
 | ||||
| #define cmpxchg(ptr, old, new)						\ | ||||
| 	__cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||||
| 
 | ||||
| #define sync_cmpxchg(ptr, old, new)					\ | ||||
| 	__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||||
| 
 | ||||
| #define cmpxchg_local(ptr, old, new)					\ | ||||
| 	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_X86_CMPXCHG64 | ||||
| @ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||||
| 					       (unsigned long long)(n))) | ||||
| #endif | ||||
| 
 | ||||
| static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||||
| 				      unsigned long new, int size) | ||||
| { | ||||
| 	unsigned long prev; | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 2: | ||||
| 		asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 4: | ||||
| 		asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	} | ||||
| 	return old; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Always use locked operations when touching memory shared with a | ||||
|  * hypervisor, since the system may be SMP even if the guest kernel | ||||
|  * isn't. | ||||
|  */ | ||||
| static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||||
| 					   unsigned long old, | ||||
| 					   unsigned long new, int size) | ||||
| { | ||||
| 	unsigned long prev; | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile("lock; cmpxchgb %b1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 2: | ||||
| 		asm volatile("lock; cmpxchgw %w1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 4: | ||||
| 		asm volatile("lock; cmpxchgl %1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	} | ||||
| 	return old; | ||||
| } | ||||
| 
 | ||||
| static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||||
| 					    unsigned long old, | ||||
| 					    unsigned long new, int size) | ||||
| { | ||||
| 	unsigned long prev; | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile("cmpxchgb %b1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 2: | ||||
| 		asm volatile("cmpxchgw %w1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 4: | ||||
| 		asm volatile("cmpxchgl %1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	} | ||||
| 	return old; | ||||
| } | ||||
| 
 | ||||
| static inline unsigned long long __cmpxchg64(volatile void *ptr, | ||||
| 					     unsigned long long old, | ||||
| 					     unsigned long long new) | ||||
|  | ||||
| @ -3,9 +3,6 @@ | ||||
| 
 | ||||
| #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | ||||
| 
 | ||||
| #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ | ||||
| 						 (ptr), sizeof(*(ptr)))) | ||||
| 
 | ||||
| #define __xg(x) ((volatile long *)(x)) | ||||
| 
 | ||||
| static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | ||||
| @ -15,167 +12,118 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | ||||
| 
 | ||||
| #define _set_64bit set_64bit | ||||
| 
 | ||||
| extern void __xchg_wrong_size(void); | ||||
| extern void __cmpxchg_wrong_size(void); | ||||
| 
 | ||||
| /*
 | ||||
|  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||||
|  * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||||
|  *	  but generally the primitive is invalid, *ptr is output argument. --ANK | ||||
|  */ | ||||
| static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||||
| 				   int size) | ||||
| { | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile("xchgb %b0,%1" | ||||
| 			     : "=q" (x) | ||||
| 			     : "m" (*__xg(ptr)), "0" (x) | ||||
| 			     : "memory"); | ||||
| 		break; | ||||
| 	case 2: | ||||
| 		asm volatile("xchgw %w0,%1" | ||||
| 			     : "=r" (x) | ||||
| 			     : "m" (*__xg(ptr)), "0" (x) | ||||
| 			     : "memory"); | ||||
| 		break; | ||||
| 	case 4: | ||||
| 		asm volatile("xchgl %k0,%1" | ||||
| 			     : "=r" (x) | ||||
| 			     : "m" (*__xg(ptr)), "0" (x) | ||||
| 			     : "memory"); | ||||
| 		break; | ||||
| 	case 8: | ||||
| 		asm volatile("xchgq %0,%1" | ||||
| 			     : "=r" (x) | ||||
| 			     : "m" (*__xg(ptr)), "0" (x) | ||||
| 			     : "memory"); | ||||
| 		break; | ||||
| 	} | ||||
| 	return x; | ||||
| } | ||||
| #define __xchg(x, ptr, size)						\ | ||||
| ({									\ | ||||
| 	__typeof(*(ptr)) __x = (x);					\ | ||||
| 	switch (size) {							\ | ||||
| 	case 1:								\ | ||||
| 		asm volatile("xchgb %b0,%1"				\ | ||||
| 			     : "=q" (__x)				\ | ||||
| 			     : "m" (*__xg(ptr)), "0" (__x)		\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 2:								\ | ||||
| 		asm volatile("xchgw %w0,%1"				\ | ||||
| 			     : "=r" (__x)				\ | ||||
| 			     : "m" (*__xg(ptr)), "0" (__x)		\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 4:								\ | ||||
| 		asm volatile("xchgl %k0,%1"				\ | ||||
| 			     : "=r" (__x)				\ | ||||
| 			     : "m" (*__xg(ptr)), "0" (__x)		\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 8:								\ | ||||
| 		asm volatile("xchgq %0,%1"				\ | ||||
| 			     : "=r" (__x)				\ | ||||
| 			     : "m" (*__xg(ptr)), "0" (__x)		\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	default:							\ | ||||
| 		__xchg_wrong_size();					\ | ||||
| 	}								\ | ||||
| 	__x;								\ | ||||
| }) | ||||
| 
 | ||||
| #define xchg(ptr, v)							\ | ||||
| 	__xchg((v), (ptr), sizeof(*ptr)) | ||||
| 
 | ||||
| #define __HAVE_ARCH_CMPXCHG 1 | ||||
| 
 | ||||
| /*
 | ||||
|  * Atomic compare and exchange.  Compare OLD with MEM, if identical, | ||||
|  * store NEW in MEM.  Return the initial value in MEM.  Success is | ||||
|  * indicated by comparing RETURN with OLD. | ||||
|  */ | ||||
| #define __raw_cmpxchg(ptr, old, new, size, lock)			\ | ||||
| ({									\ | ||||
| 	__typeof__(*(ptr)) __ret;					\ | ||||
| 	__typeof__(*(ptr)) __old = (old);				\ | ||||
| 	__typeof__(*(ptr)) __new = (new);				\ | ||||
| 	switch (size) {							\ | ||||
| 	case 1:								\ | ||||
| 		asm volatile(lock "cmpxchgb %b1,%2"			\ | ||||
| 			     : "=a"(__ret)				\ | ||||
| 			     : "q"(__new), "m"(*__xg(ptr)), "0"(__old)	\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 2:								\ | ||||
| 		asm volatile(lock "cmpxchgw %w1,%2"			\ | ||||
| 			     : "=a"(__ret)				\ | ||||
| 			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 4:								\ | ||||
| 		asm volatile(lock "cmpxchgl %k1,%2"			\ | ||||
| 			     : "=a"(__ret)				\ | ||||
| 			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	case 8:								\ | ||||
| 		asm volatile(lock "cmpxchgq %1,%2"			\ | ||||
| 			     : "=a"(__ret)				\ | ||||
| 			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\ | ||||
| 			     : "memory");				\ | ||||
| 		break;							\ | ||||
| 	default:							\ | ||||
| 		__cmpxchg_wrong_size();					\ | ||||
| 	}								\ | ||||
| 	__ret;								\ | ||||
| }) | ||||
| 
 | ||||
| #define __HAVE_ARCH_CMPXCHG 1 | ||||
| #define __cmpxchg(ptr, old, new, size)					\ | ||||
| 	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||||
| 
 | ||||
| static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||||
| 				      unsigned long new, int size) | ||||
| { | ||||
| 	unsigned long prev; | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 2: | ||||
| 		asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 4: | ||||
| 		asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 8: | ||||
| 		asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	} | ||||
| 	return old; | ||||
| } | ||||
| #define __sync_cmpxchg(ptr, old, new, size)				\ | ||||
| 	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||||
| 
 | ||||
| /*
 | ||||
|  * Always use locked operations when touching memory shared with a | ||||
|  * hypervisor, since the system may be SMP even if the guest kernel | ||||
|  * isn't. | ||||
|  */ | ||||
| static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||||
| 					   unsigned long old, | ||||
| 					   unsigned long new, int size) | ||||
| { | ||||
| 	unsigned long prev; | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile("lock; cmpxchgb %b1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 2: | ||||
| 		asm volatile("lock; cmpxchgw %w1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 4: | ||||
| 		asm volatile("lock; cmpxchgl %1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	} | ||||
| 	return old; | ||||
| } | ||||
| #define __cmpxchg_local(ptr, old, new, size)				\ | ||||
| 	__raw_cmpxchg((ptr), (old), (new), (size), "") | ||||
| 
 | ||||
| static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||||
| 					    unsigned long old, | ||||
| 					    unsigned long new, int size) | ||||
| { | ||||
| 	unsigned long prev; | ||||
| 	switch (size) { | ||||
| 	case 1: | ||||
| 		asm volatile("cmpxchgb %b1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 2: | ||||
| 		asm volatile("cmpxchgw %w1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 4: | ||||
| 		asm volatile("cmpxchgl %k1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	case 8: | ||||
| 		asm volatile("cmpxchgq %1,%2" | ||||
| 			     : "=a"(prev) | ||||
| 			     : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||||
| 			     : "memory"); | ||||
| 		return prev; | ||||
| 	} | ||||
| 	return old; | ||||
| } | ||||
| #define cmpxchg(ptr, old, new)						\ | ||||
| 	__cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||||
| 
 | ||||
| #define sync_cmpxchg(ptr, old, new)					\ | ||||
| 	__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) | ||||
| 
 | ||||
| #define cmpxchg_local(ptr, old, new)					\ | ||||
| 	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) | ||||
| 
 | ||||
| #define cmpxchg(ptr, o, n)						\ | ||||
| 	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\ | ||||
| 				       (unsigned long)(n), sizeof(*(ptr)))) | ||||
| #define cmpxchg64(ptr, o, n)						\ | ||||
| ({									\ | ||||
| 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\ | ||||
| 	cmpxchg((ptr), (o), (n));					\ | ||||
| }) | ||||
| #define cmpxchg_local(ptr, o, n)					\ | ||||
| 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\ | ||||
| 					     (unsigned long)(n),	\ | ||||
| 					     sizeof(*(ptr)))) | ||||
| #define sync_cmpxchg(ptr, o, n)						\ | ||||
| 	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	\ | ||||
| 					    (unsigned long)(n),		\ | ||||
| 					    sizeof(*(ptr)))) | ||||
| 
 | ||||
| #define cmpxchg64_local(ptr, o, n)					\ | ||||
| ({									\ | ||||
| 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\ | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user