mirror of
https://github.com/torvalds/linux.git
synced 2024-12-16 08:02:17 +00:00
6dc9658fa1
The macrology in cmpxchg.h was designed to allow arbitrary pointer and integer values to be passed through the routines. To support cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we used the idiom "(typeof(val))(typeof(val-val))". This way, in the "size 8" branch of the switch, when the underlying cmpxchg routine returns a 64-bit quantity, we cast it first to a typeof(val-val) quantity (i.e. size_t if "val" is a pointer) with no warnings about casting between pointers and integers of different sizes, then cast onwards to typeof(val), again with no warnings. If val is not a pointer type, the additional cast is a no-op. We can't replace the typeof(val-val) cast with (for example) unsigned long, since then if "val" is really a 64-bit type, we cast away the high bits. HOWEVER, this fails with current gcc (through 4.7 at least) if "val" is a pointer to an incomplete type. Unfortunately gcc isn't smart enough to realize that "val - val" will always be a size_t type even if it's an incomplete type pointer. Accordingly, I've reworked the way we handle the casting. We have given up the ability to use cmpxchg() on 64-bit values on tilepro, which is OK in the kernel since we should use cmpxchg64() explicitly on such values anyway. As a result, I can just use simple "unsigned long" casts internally. As I reworked it, I realized it would be cleaner to move the architecture-specific conditionals for cmpxchg and xchg out of the atomic.h headers and into cmpxchg, and then use the cmpxchg() and xchg() primitives directly in atomic.h and elsewhere. This allowed the cmpxchg.h header to stand on its own without relying on the implicit include of it that is performed by <asm/atomic.h>. It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines from atomic_{32,64}.h into atomic.h. I improved the tests that guard the allowed size of the arguments to the routines to use a __compiletime_error() test. (By avoiding the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as well and use the macros there, which is otherwise impossible due to include order dependency issues.) The tilepro _atomic_xxx internal methods were previously set up to take atomic_t and atomic64_t arguments, which isn't as convenient with the new model, so I modified them to take int or u64 arguments, which is consistent with how they used the arguments internally anyway, so provided some nice simplification there too. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
120 lines
3.4 KiB
C
120 lines
3.4 KiB
C
/*
|
|
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* Do not include directly; use <linux/atomic.h>.
|
|
*/
|
|
|
|
#ifndef _ASM_TILE_ATOMIC_64_H
|
|
#define _ASM_TILE_ATOMIC_64_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <asm/barrier.h>
|
|
#include <arch/spr_def.h>
|
|
|
|
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
|
|
|
|
#define atomic_set(v, i) ((v)->counter = (i))
|
|
|
|
/*
|
|
* The smp_mb() operations throughout are to support the fact that
|
|
* Linux requires memory barriers before and after the operation,
|
|
* on any routine which updates memory and returns a value.
|
|
*/
|
|
|
|
static inline void atomic_add(int i, atomic_t *v)
|
|
{
|
|
__insn_fetchadd4((void *)&v->counter, i);
|
|
}
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
int val;
|
|
smp_mb(); /* barrier for proper semantics */
|
|
val = __insn_fetchadd4((void *)&v->counter, i) + i;
|
|
barrier(); /* the "+ i" above will wait on memory */
|
|
return val;
|
|
}
|
|
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int guess, oldval = v->counter;
|
|
do {
|
|
if (oldval == u)
|
|
break;
|
|
guess = oldval;
|
|
oldval = cmpxchg(&v->counter, guess, guess + a);
|
|
} while (guess != oldval);
|
|
return oldval;
|
|
}
|
|
|
|
/* Now the true 64-bit operations. */
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#define atomic64_read(v) ((v)->counter)
|
|
#define atomic64_set(v, i) ((v)->counter = (i))
|
|
|
|
static inline void atomic64_add(long i, atomic64_t *v)
|
|
{
|
|
__insn_fetchadd((void *)&v->counter, i);
|
|
}
|
|
|
|
static inline long atomic64_add_return(long i, atomic64_t *v)
|
|
{
|
|
int val;
|
|
smp_mb(); /* barrier for proper semantics */
|
|
val = __insn_fetchadd((void *)&v->counter, i) + i;
|
|
barrier(); /* the "+ i" above will wait on memory */
|
|
return val;
|
|
}
|
|
|
|
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
long guess, oldval = v->counter;
|
|
do {
|
|
if (oldval == u)
|
|
break;
|
|
guess = oldval;
|
|
oldval = cmpxchg(&v->counter, guess, guess + a);
|
|
} while (guess != oldval);
|
|
return oldval != u;
|
|
}
|
|
|
|
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
|
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
|
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
|
#define atomic64_inc(v) atomic64_add(1, (v))
|
|
#define atomic64_dec(v) atomic64_sub(1, (v))
|
|
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
|
|
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
|
|
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
/* Atomic dec and inc don't implement barrier, so provide them if needed. */
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
#define smp_mb__before_atomic_inc() smp_mb()
|
|
#define smp_mb__after_atomic_inc() smp_mb()
|
|
|
|
/* Define this to indicate that cmpxchg is an efficient operation. */
|
|
#define __HAVE_ARCH_CMPXCHG
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_TILE_ATOMIC_64_H */
|