forked from Minki/linux
57a6566799
The current arm64 __cmpxchg_double{_mb} implementations carry out the
compare exchange by first comparing the old values passed in to the
values read from the pointer provided and by stashing the cumulative
bitwise difference in a 64-bit register.
By comparing the register content against 0, it is possible to detect if
the values read differ from the old values passed in, so that the compare
exchange detects whether it has to bail out or carry on completing the
operation with the exchange.
Given the current implementation, to detect the cmpxchg operation
status, the __cmpxchg_double{_mb} functions should return the 64-bit
stashed bitwise difference so that the caller can detect cmpxchg failure
by comparing the return value content against 0. The current implementation
declares the return value as an int, which means that the 64-bit
value stashing the bitwise difference is truncated before being
returned to the __cmpxchg_double{_mb} callers, which means that
any bitwise difference present in the top 32 bits goes undetected,
triggering false positives and subsequent kernel failures.
This patch fixes the issue by declaring the arm64 __cmpxchg_double{_mb}
return values as a long, so that the bitwise difference is
properly propagated on failure, restoring the expected behaviour.
Fixes: e9a4b79565
("arm64: cmpxchg_dbl: patch in lse instructions when supported by the CPU")
Cc: <stable@vger.kernel.org> # 4.3+
Cc: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
270 lines
7.9 KiB
C
270 lines
7.9 KiB
C
/*
|
|
* Based on arch/arm/include/asm/atomic.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef __ASM_ATOMIC_LL_SC_H
|
|
#define __ASM_ATOMIC_LL_SC_H
|
|
|
|
#ifndef __ARM64_IN_ATOMIC_IMPL
|
|
#error "please don't include this file directly"
|
|
#endif
|
|
|
|
/*
|
|
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens.
|
|
*
|
|
* NOTE: these functions do *not* follow the PCS and must explicitly
|
|
* save any clobbered registers other than x0 (regardless of return
|
|
* value). This is achieved through -fcall-saved-* compiler flags for
|
|
* this file, which unfortunately don't work on a per-function basis
|
|
* (the optimize attribute silently ignores these options).
|
|
*/
|
|
|
|
#define ATOMIC_OP(op, asm_op) \
|
|
__LL_SC_INLINE void \
|
|
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int result; \
|
|
\
|
|
asm volatile("// atomic_" #op "\n" \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ldxr %w0, %2\n" \
|
|
" " #asm_op " %w0, %w0, %w3\n" \
|
|
" stxr %w1, %w0, %2\n" \
|
|
" cbnz %w1, 1b" \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: "Ir" (i)); \
|
|
} \
|
|
__LL_SC_EXPORT(atomic_##op);
|
|
|
|
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
|
|
__LL_SC_INLINE int \
|
|
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int result; \
|
|
\
|
|
asm volatile("// atomic_" #op "_return" #name "\n" \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ld" #acq "xr %w0, %2\n" \
|
|
" " #asm_op " %w0, %w0, %w3\n" \
|
|
" st" #rel "xr %w1, %w0, %2\n" \
|
|
" cbnz %w1, 1b\n" \
|
|
" " #mb \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: "Ir" (i) \
|
|
: cl); \
|
|
\
|
|
return result; \
|
|
} \
|
|
__LL_SC_EXPORT(atomic_##op##_return##name);
|
|
|
|
#define ATOMIC_OPS(...) \
|
|
ATOMIC_OP(__VA_ARGS__) \
|
|
ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)
|
|
|
|
#define ATOMIC_OPS_RLX(...) \
|
|
ATOMIC_OPS(__VA_ARGS__) \
|
|
ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
|
|
ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
|
|
ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)
|
|
|
|
ATOMIC_OPS_RLX(add, add)
|
|
ATOMIC_OPS_RLX(sub, sub)
|
|
|
|
ATOMIC_OP(and, and)
|
|
ATOMIC_OP(andnot, bic)
|
|
ATOMIC_OP(or, orr)
|
|
ATOMIC_OP(xor, eor)
|
|
|
|
#undef ATOMIC_OPS_RLX
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#define ATOMIC64_OP(op, asm_op) \
|
|
__LL_SC_INLINE void \
|
|
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
|
|
{ \
|
|
long result; \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile("// atomic64_" #op "\n" \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ldxr %0, %2\n" \
|
|
" " #asm_op " %0, %0, %3\n" \
|
|
" stxr %w1, %0, %2\n" \
|
|
" cbnz %w1, 1b" \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: "Ir" (i)); \
|
|
} \
|
|
__LL_SC_EXPORT(atomic64_##op);
|
|
|
|
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
|
|
__LL_SC_INLINE long \
|
|
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
|
|
{ \
|
|
long result; \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile("// atomic64_" #op "_return" #name "\n" \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ld" #acq "xr %0, %2\n" \
|
|
" " #asm_op " %0, %0, %3\n" \
|
|
" st" #rel "xr %w1, %0, %2\n" \
|
|
" cbnz %w1, 1b\n" \
|
|
" " #mb \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: "Ir" (i) \
|
|
: cl); \
|
|
\
|
|
return result; \
|
|
} \
|
|
__LL_SC_EXPORT(atomic64_##op##_return##name);
|
|
|
|
#define ATOMIC64_OPS(...) \
|
|
ATOMIC64_OP(__VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__)
|
|
|
|
#define ATOMIC64_OPS_RLX(...) \
|
|
ATOMIC64_OPS(__VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__)
|
|
|
|
ATOMIC64_OPS_RLX(add, add)
|
|
ATOMIC64_OPS_RLX(sub, sub)
|
|
|
|
ATOMIC64_OP(and, and)
|
|
ATOMIC64_OP(andnot, bic)
|
|
ATOMIC64_OP(or, orr)
|
|
ATOMIC64_OP(xor, eor)
|
|
|
|
#undef ATOMIC64_OPS_RLX
|
|
#undef ATOMIC64_OPS
|
|
#undef ATOMIC64_OP_RETURN
|
|
#undef ATOMIC64_OP
|
|
|
|
__LL_SC_INLINE long
|
|
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
|
|
{
|
|
long result;
|
|
unsigned long tmp;
|
|
|
|
asm volatile("// atomic64_dec_if_positive\n"
|
|
" prfm pstl1strm, %2\n"
|
|
"1: ldxr %0, %2\n"
|
|
" subs %0, %0, #1\n"
|
|
" b.lt 2f\n"
|
|
" stlxr %w1, %0, %2\n"
|
|
" cbnz %w1, 1b\n"
|
|
" dmb ish\n"
|
|
"2:"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
:
|
|
: "cc", "memory");
|
|
|
|
return result;
|
|
}
|
|
__LL_SC_EXPORT(atomic64_dec_if_positive);
|
|
|
|
#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
|
|
__LL_SC_INLINE unsigned long \
|
|
__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
|
|
unsigned long old, \
|
|
unsigned long new)) \
|
|
{ \
|
|
unsigned long tmp, oldval; \
|
|
\
|
|
asm volatile( \
|
|
" prfm pstl1strm, %[v]\n" \
|
|
"1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
|
|
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
|
|
" cbnz %" #w "[tmp], 2f\n" \
|
|
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
|
|
" cbnz %w[tmp], 1b\n" \
|
|
" " #mb "\n" \
|
|
" mov %" #w "[oldval], %" #w "[old]\n" \
|
|
"2:" \
|
|
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
|
|
[v] "+Q" (*(unsigned long *)ptr) \
|
|
: [old] "Lr" (old), [new] "r" (new) \
|
|
: cl); \
|
|
\
|
|
return oldval; \
|
|
} \
|
|
__LL_SC_EXPORT(__cmpxchg_case_##name);
|
|
|
|
__CMPXCHG_CASE(w, b, 1, , , , )
|
|
__CMPXCHG_CASE(w, h, 2, , , , )
|
|
__CMPXCHG_CASE(w, , 4, , , , )
|
|
__CMPXCHG_CASE( , , 8, , , , )
|
|
__CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
|
|
__CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
|
|
__CMPXCHG_CASE(w, , acq_4, , a, , "memory")
|
|
__CMPXCHG_CASE( , , acq_8, , a, , "memory")
|
|
__CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
|
|
__CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
|
|
__CMPXCHG_CASE(w, , rel_4, , , l, "memory")
|
|
__CMPXCHG_CASE( , , rel_8, , , l, "memory")
|
|
__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
|
|
__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
|
|
__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
|
|
__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
|
|
|
|
#undef __CMPXCHG_CASE
|
|
|
|
#define __CMPXCHG_DBL(name, mb, rel, cl) \
|
|
__LL_SC_INLINE long \
|
|
__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
|
|
unsigned long old2, \
|
|
unsigned long new1, \
|
|
unsigned long new2, \
|
|
volatile void *ptr)) \
|
|
{ \
|
|
unsigned long tmp, ret; \
|
|
\
|
|
asm volatile("// __cmpxchg_double" #name "\n" \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ldxp %0, %1, %2\n" \
|
|
" eor %0, %0, %3\n" \
|
|
" eor %1, %1, %4\n" \
|
|
" orr %1, %0, %1\n" \
|
|
" cbnz %1, 2f\n" \
|
|
" st" #rel "xp %w0, %5, %6, %2\n" \
|
|
" cbnz %w0, 1b\n" \
|
|
" " #mb "\n" \
|
|
"2:" \
|
|
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
|
|
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
|
|
: cl); \
|
|
\
|
|
return ret; \
|
|
} \
|
|
__LL_SC_EXPORT(__cmpxchg_double##name);
|
|
|
|
__CMPXCHG_DBL( , , , )
|
|
__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
|
|
|
|
#undef __CMPXCHG_DBL
|
|
|
|
#endif /* __ASM_ATOMIC_LL_SC_H */
|