Use __always_inline for atomic fallback wrappers. When building for size (CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to inline even relatively small static inline functions that are assumed to be inlinable such as atomic ops. This can cause problems, for example in UACCESS regions. While the fallback wrappers aren't pure wrappers, they are trivial nonetheless, and the function they wrap should determine the final inlining policy. For x86 tinyconfig we observe: - vmlinux baseline: 1315988 - vmlinux with patch: 1315928 (-60 bytes) [ tglx: Cherry-picked from KCSAN ] Suggested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Marco Elver <elver@google.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
		
			
				
	
	
		
			24 lines
		
	
	
		
			532 B
		
	
	
	
		
			Plaintext
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			24 lines
		
	
	
		
			532 B
		
	
	
	
		
			Plaintext
		
	
	
		
			Executable File
		
	
	
	
	
| cat << EOF
 | |
| /**
 | |
|  * ${atomic}_fetch_add_unless - add unless the number is already a given value
 | |
|  * @v: pointer of type ${atomic}_t
 | |
|  * @a: the amount to add to v...
 | |
|  * @u: ...unless v is equal to u.
 | |
|  *
 | |
|  * Atomically adds @a to @v, so long as @v was not already @u.
 | |
|  * Returns original value of @v
 | |
|  */
 | |
| static __always_inline ${int}
 | |
| ${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
 | |
| {
 | |
| 	${int} c = ${atomic}_read(v);
 | |
| 
 | |
| 	do {
 | |
| 		if (unlikely(c == u))
 | |
| 			break;
 | |
| 	} while (!${atomic}_try_cmpxchg(v, &c, c + a));
 | |
| 
 | |
| 	return c;
 | |
| }
 | |
| EOF
 |