mirror of
https://github.com/torvalds/linux.git
synced 2024-12-09 20:51:43 +00:00
770345adc3
Some atomics can be implemented in several different ways, e.g. FULL/ACQUIRE/RELEASE ordered atomics can be implemented in terms of RELAXED atomics, and ACQUIRE/RELEASE/RELAXED can be implemented in terms of FULL ordered atomics. Other atomics are optional, and don't exist in some configurations (e.g. not all architectures implement the 128-bit cmpxchg ops). Subsequent patches will require that architectures define a preprocessor symbol for any atomic (or ordering variant) which is optional. This will make the fallback ifdeffery more robust, and simplify future changes. Add the required definitions to arch/sh. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-11-mark.rutland@arm.com
96 lines
3.0 KiB
C
96 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_SH_ATOMIC_GRB_H
|
|
#define __ASM_SH_ATOMIC_GRB_H
|
|
|
|
#define ATOMIC_OP(op) \
|
|
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
" .align 2 \n\t" \
|
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
|
" mov.l @%1, %0 \n\t" /* load old value */ \
|
|
" " #op " %2, %0 \n\t" /* $op */ \
|
|
" mov.l %0, @%1 \n\t" /* store new value */ \
|
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
|
: "=&r" (tmp), \
|
|
"+r" (v) \
|
|
: "r" (i) \
|
|
: "memory" , "r0", "r1"); \
|
|
} \
|
|
|
|
#define ATOMIC_OP_RETURN(op) \
|
|
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
" .align 2 \n\t" \
|
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
|
" mov.l @%1, %0 \n\t" /* load old value */ \
|
|
" " #op " %2, %0 \n\t" /* $op */ \
|
|
" mov.l %0, @%1 \n\t" /* store new value */ \
|
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
|
: "=&r" (tmp), \
|
|
"+r" (v) \
|
|
: "r" (i) \
|
|
: "memory" , "r0", "r1"); \
|
|
\
|
|
return tmp; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op) \
|
|
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int res, tmp; \
|
|
\
|
|
__asm__ __volatile__ ( \
|
|
" .align 2 \n\t" \
|
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
|
" mov.l @%2, %0 \n\t" /* load old value */ \
|
|
" mov %0, %1 \n\t" /* save old value */ \
|
|
" " #op " %3, %0 \n\t" /* $op */ \
|
|
" mov.l %0, @%2 \n\t" /* store new value */ \
|
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
|
: "=&r" (tmp), "=&r" (res), "+r" (v) \
|
|
: "r" (i) \
|
|
: "memory" , "r0", "r1"); \
|
|
\
|
|
return res; \
|
|
}
|
|
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(add)
|
|
ATOMIC_OPS(sub)
|
|
|
|
#define arch_atomic_add_return arch_atomic_add_return
|
|
#define arch_atomic_sub_return arch_atomic_sub_return
|
|
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
|
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(and)
|
|
ATOMIC_OPS(or)
|
|
ATOMIC_OPS(xor)
|
|
|
|
#define arch_atomic_fetch_and arch_atomic_fetch_and
|
|
#define arch_atomic_fetch_or arch_atomic_fetch_or
|
|
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#endif /* __ASM_SH_ATOMIC_GRB_H */
|