linux/arch/arm64/include/asm/atomic.h
Will Deacon 3a0310eb36 arm64: atomics: fix grossly inconsistent asm constraints for exclusives
Our uses of inline asm constraints for atomic operations are fairly
wild and varied. We basically need to guarantee the following:

  1. Any instructions with barrier implications
     (load-acquire/store-release) have a "memory" clobber

  2. When performing exclusive accesses, the addresing mode is generated
     using the "Q" constraint

  3. Atomic blocks which use the condition flags, have a "cc" clobber

This patch addresses these concerns which, as well as fixing the
semantics of the code, stops GCC complaining about impossible asm
constraints.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2013-02-11 18:16:41 +00:00

306 lines
6.9 KiB
C

/*
* Based on arch/arm/include/asm/atomic.h
*
* Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_ATOMIC_H
#define __ASM_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/*
* On ARM, ordinary assignment (str instruction) doesn't clear the local
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/*
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
*/
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_add\n"
"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc");
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_add_return\n"
"1: ldaxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc", "memory");
return result;
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_sub\n"
"1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc");
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_sub_return\n"
"1: ldaxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc", "memory");
return result;
}
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long tmp;
int oldval;
asm volatile("// atomic_cmpxchg\n"
"1: ldaxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
" stlxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
: "cc", "memory");
return oldval;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
asm volatile("// atomic_clear_mask\n"
"1: ldxr %0, %2\n"
" bic %0, %0, %3\n"
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
: "Ir" (mask)
: "cc");
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
c = old;
return c;
}
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
/*
* 64-bit atomic operations.
*/
#define ATOMIC64_INIT(i) { (i) }
#define atomic64_read(v) (*(volatile long long *)&(v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
static inline void atomic64_add(u64 i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_add\n"
"1: ldxr %0, %2\n"
" add %0, %0, %3\n"
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc");
}
static inline long atomic64_add_return(long i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
"1: ldaxr %0, %2\n"
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc", "memory");
return result;
}
static inline void atomic64_sub(u64 i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_sub\n"
"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc");
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
"1: ldaxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "cc", "memory");
return result;
}
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
{
long oldval;
unsigned long res;
asm volatile("// atomic64_cmpxchg\n"
"1: ldaxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
" stlxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
: "cc", "memory");
return oldval;
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
"1: ldaxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
: "cc", "memory");
return result;
}
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
c = old;
return c != u;
}
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v) atomic64_sub(1LL, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
#endif
#endif