forked from Minki/linux
18aecc2b64
This support was partially present in the existing code (look for "__tilegx__" ifdefs) but with this change you can build a working kernel using the TILE-Gx toolchain and ARCH=tilegx. Most of these files are new, generally adding a foo_64.c file where previously there was just a foo_32.c file. The ARCH=tilegx directive redirects to arch/tile, not arch/tilegx, using the existing SRCARCH mechanism in the top-level Makefile. Changes to existing files: - <asm/bitops.h> and <asm/bitops_32.h> changed to factor the include of <asm-generic/bitops/non-atomic.h> in the common header. - <asm/compat.h> and arch/tile/kernel/compat.c changed to remove the "const" markers I had put on compat_sys_execve() when trying to match some recent similar changes to the non-compat execve. It turns out the compat version wasn't "upgraded" to use const. - <asm/opcode-tile_64.h> and <asm/opcode_constants_64.h> were previously included accidentally, with the 32-bit contents. Now they have the proper 64-bit contents. Finally, I had to hack the existing hacky drivers/input/input-compat.h to add yet another "#ifdef" for INPUT_COMPAT_TEST (same as x86_64). Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> [drivers/input]
170 lines
4.9 KiB
C
170 lines
4.9 KiB
C
/*
|
|
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* Do not include directly; use <asm/atomic.h>.
|
|
*/
|
|
|
|
#ifndef _ASM_TILE_ATOMIC_64_H
|
|
#define _ASM_TILE_ATOMIC_64_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <arch/spr_def.h>
|
|
|
|
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
|
|
|
|
#define atomic_set(v, i) ((v)->counter = (i))
|
|
|
|
/*
|
|
* The smp_mb() operations throughout are to support the fact that
|
|
* Linux requires memory barriers before and after the operation,
|
|
* on any routine which updates memory and returns a value.
|
|
*/
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
|
{
|
|
int val;
|
|
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
|
|
smp_mb(); /* barrier for proper semantics */
|
|
val = __insn_cmpexch4((void *)&v->counter, n);
|
|
smp_mb(); /* barrier for proper semantics */
|
|
return val;
|
|
}
|
|
|
|
static inline int atomic_xchg(atomic_t *v, int n)
|
|
{
|
|
int val;
|
|
smp_mb(); /* barrier for proper semantics */
|
|
val = __insn_exch4((void *)&v->counter, n);
|
|
smp_mb(); /* barrier for proper semantics */
|
|
return val;
|
|
}
|
|
|
|
static inline void atomic_add(int i, atomic_t *v)
|
|
{
|
|
__insn_fetchadd4((void *)&v->counter, i);
|
|
}
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
int val;
|
|
smp_mb(); /* barrier for proper semantics */
|
|
val = __insn_fetchadd4((void *)&v->counter, i) + i;
|
|
barrier(); /* the "+ i" above will wait on memory */
|
|
return val;
|
|
}
|
|
|
|
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int guess, oldval = v->counter;
|
|
do {
|
|
if (oldval == u)
|
|
break;
|
|
guess = oldval;
|
|
oldval = atomic_cmpxchg(v, guess, guess + a);
|
|
} while (guess != oldval);
|
|
return oldval != u;
|
|
}
|
|
|
|
/* Now the true 64-bit operations. */
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#define atomic64_read(v) ((v)->counter)
|
|
#define atomic64_set(v, i) ((v)->counter = (i))
|
|
|
|
static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
|
|
{
|
|
long val;
|
|
smp_mb(); /* barrier for proper semantics */
|
|
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
|
|
val = __insn_cmpexch((void *)&v->counter, n);
|
|
smp_mb(); /* barrier for proper semantics */
|
|
return val;
|
|
}
|
|
|
|
static inline long atomic64_xchg(atomic64_t *v, long n)
|
|
{
|
|
long val;
|
|
smp_mb(); /* barrier for proper semantics */
|
|
val = __insn_exch((void *)&v->counter, n);
|
|
smp_mb(); /* barrier for proper semantics */
|
|
return val;
|
|
}
|
|
|
|
static inline void atomic64_add(long i, atomic64_t *v)
|
|
{
|
|
__insn_fetchadd((void *)&v->counter, i);
|
|
}
|
|
|
|
static inline long atomic64_add_return(long i, atomic64_t *v)
|
|
{
|
|
int val;
|
|
smp_mb(); /* barrier for proper semantics */
|
|
val = __insn_fetchadd((void *)&v->counter, i) + i;
|
|
barrier(); /* the "+ i" above will wait on memory */
|
|
return val;
|
|
}
|
|
|
|
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
long guess, oldval = v->counter;
|
|
do {
|
|
if (oldval == u)
|
|
break;
|
|
guess = oldval;
|
|
oldval = atomic64_cmpxchg(v, guess, guess + a);
|
|
} while (guess != oldval);
|
|
return oldval != u;
|
|
}
|
|
|
|
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
|
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
|
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
|
#define atomic64_inc(v) atomic64_add(1, (v))
|
|
#define atomic64_dec(v) atomic64_sub(1, (v))
|
|
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
|
|
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
|
|
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
/* Atomic dec and inc don't implement barrier, so provide them if needed. */
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
#define smp_mb__before_atomic_inc() smp_mb()
|
|
#define smp_mb__after_atomic_inc() smp_mb()
|
|
|
|
#define xchg(ptr, x) \
|
|
((typeof(*(ptr))) \
|
|
((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
|
|
atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
|
|
(sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \
|
|
atomic_long_xchg((atomic_long_t *)(ptr), (long)(x)) : \
|
|
__xchg_called_with_bad_pointer()))
|
|
|
|
#define cmpxchg(ptr, o, n) \
|
|
((typeof(*(ptr))) \
|
|
((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
|
|
atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
|
|
(sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \
|
|
atomic_long_cmpxchg((atomic_long_t *)(ptr), (long)(o), (long)(n)) : \
|
|
__cmpxchg_called_with_bad_pointer()))
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_TILE_ATOMIC_64_H */
|