mirror of
https://github.com/torvalds/linux.git
synced 2024-12-06 19:11:31 +00:00
770345adc3
Some atomics can be implemented in several different ways, e.g. FULL/ACQUIRE/RELEASE ordered atomics can be implemented in terms of RELAXED atomics, and ACQUIRE/RELEASE/RELAXED can be implemented in terms of FULL ordered atomics. Other atomics are optional, and don't exist in some configurations (e.g. not all architectures implement the 128-bit cmpxchg ops). Subsequent patches will require that architectures define a preprocessor symbol for any atomic (or ordering variant) which is optional. This will make the fallback ifdeffery more robust, and simplify future changes. Add the required definitions to arch/sh. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-11-mark.rutland@arm.com
98 lines
2.5 KiB
C
98 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_SH_ATOMIC_LLSC_H
|
|
#define __ASM_SH_ATOMIC_LLSC_H
|
|
|
|
/*
|
|
* SH-4A note:
|
|
*
|
|
* We basically get atomic_xxx_return() for free compared with
|
|
* atomic_xxx(). movli.l/movco.l require r0 due to the instruction
|
|
* encoding, so the retval is automatically set without having to
|
|
* do any special work.
|
|
*/
|
|
/*
|
|
* To get proper branch prediction for the main line, we must branch
|
|
* forward to code at the end of this object's .text section, then
|
|
* branch back to restart the operation.
|
|
*/
|
|
|
|
#define ATOMIC_OP(op) \
|
|
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
"1: movli.l @%2, %0 ! atomic_" #op "\n" \
|
|
" " #op " %1, %0 \n" \
|
|
" movco.l %0, @%2 \n" \
|
|
" bf 1b \n" \
|
|
: "=&z" (tmp) \
|
|
: "r" (i), "r" (&v->counter) \
|
|
: "t"); \
|
|
}
|
|
|
|
#define ATOMIC_OP_RETURN(op) \
|
|
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long temp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
|
|
" " #op " %1, %0 \n" \
|
|
" movco.l %0, @%2 \n" \
|
|
" bf 1b \n" \
|
|
" synco \n" \
|
|
: "=&z" (temp) \
|
|
: "r" (i), "r" (&v->counter) \
|
|
: "t"); \
|
|
\
|
|
return temp; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op) \
|
|
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long res, temp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
"1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \
|
|
" mov %0, %1 \n" \
|
|
" " #op " %2, %0 \n" \
|
|
" movco.l %0, @%3 \n" \
|
|
" bf 1b \n" \
|
|
" synco \n" \
|
|
: "=&z" (temp), "=&r" (res) \
|
|
: "r" (i), "r" (&v->counter) \
|
|
: "t"); \
|
|
\
|
|
return res; \
|
|
}
|
|
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(add)
|
|
ATOMIC_OPS(sub)
|
|
|
|
#define arch_atomic_add_return arch_atomic_add_return
|
|
#define arch_atomic_sub_return arch_atomic_sub_return
|
|
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
|
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(and)
|
|
ATOMIC_OPS(or)
|
|
ATOMIC_OPS(xor)
|
|
|
|
#define arch_atomic_fetch_and arch_atomic_fetch_and
|
|
#define arch_atomic_fetch_or arch_atomic_fetch_or
|
|
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#endif /* __ASM_SH_ATOMIC_LLSC_H */
|