mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 07:33:56 +00:00
b445e26cbf
In particular, avoid membar instructions in the delay slot of a jmpl instruction. UltraSPARC-I, II, IIi, and IIe have a bug, documented in the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51 The long and short of it is that if the IMU unit misses on a branch or jmpl, and there is a store buffer synchronizing membar in the delay slot, the chip can stop fetching instructions. If interrupts are enabled or some other trap is enabled, the chip will unwedge itself, but performance will suffer. We already had a workaround for this bug in a few spots, but it's better to have the entire tree sanitized for this rule. Signed-off-by: David S. Miller <davem@davemloft.net>
150 lines
3.0 KiB
ArmAsm
150 lines
3.0 KiB
ArmAsm
/* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
|
|
* atomic.S: These things are too big to do inline.
|
|
*
|
|
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
|
|
*/
|
|
|
|
#include <linux/config.h>
|
|
#include <asm/asi.h>
|
|
|
|
.text
|
|
|
|
/* Two versions of the atomic routines, one that
|
|
* does not return a value and does not perform
|
|
* memory barriers, and a second which returns
|
|
* a value and does the barriers.
|
|
*/
|
|
.globl atomic_add
|
|
.type atomic_add,#function
|
|
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
|
|
1: lduw [%o1], %g1
|
|
add %g1, %o0, %g7
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, 1b
|
|
nop
|
|
retl
|
|
nop
|
|
.size atomic_add, .-atomic_add
|
|
|
|
.globl atomic_sub
|
|
.type atomic_sub,#function
|
|
atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
1: lduw [%o1], %g1
|
|
sub %g1, %o0, %g7
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, 1b
|
|
nop
|
|
retl
|
|
nop
|
|
.size atomic_sub, .-atomic_sub
|
|
|
|
/* On SMP we need to use memory barriers to ensure
|
|
* correct memory operation ordering, nop these out
|
|
* for uniprocessor.
|
|
*/
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad;
|
|
#define ATOMIC_POST_BARRIER \
|
|
ba,pt %xcc, 80b; \
|
|
membar #StoreLoad | #StoreStore
|
|
|
|
80: retl
|
|
nop
|
|
#else
|
|
#define ATOMIC_PRE_BARRIER
|
|
#define ATOMIC_POST_BARRIER
|
|
#endif
|
|
|
|
.globl atomic_add_ret
|
|
.type atomic_add_ret,#function
|
|
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
|
ATOMIC_PRE_BARRIER
|
|
1: lduw [%o1], %g1
|
|
add %g1, %o0, %g7
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, 1b
|
|
add %g7, %o0, %g7
|
|
sra %g7, 0, %o0
|
|
ATOMIC_POST_BARRIER
|
|
retl
|
|
nop
|
|
.size atomic_add_ret, .-atomic_add_ret
|
|
|
|
.globl atomic_sub_ret
|
|
.type atomic_sub_ret,#function
|
|
atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
ATOMIC_PRE_BARRIER
|
|
1: lduw [%o1], %g1
|
|
sub %g1, %o0, %g7
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, 1b
|
|
sub %g7, %o0, %g7
|
|
sra %g7, 0, %o0
|
|
ATOMIC_POST_BARRIER
|
|
retl
|
|
nop
|
|
.size atomic_sub_ret, .-atomic_sub_ret
|
|
|
|
.globl atomic64_add
|
|
.type atomic64_add,#function
|
|
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
|
|
1: ldx [%o1], %g1
|
|
add %g1, %o0, %g7
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, 1b
|
|
nop
|
|
retl
|
|
nop
|
|
.size atomic64_add, .-atomic64_add
|
|
|
|
.globl atomic64_sub
|
|
.type atomic64_sub,#function
|
|
atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
1: ldx [%o1], %g1
|
|
sub %g1, %o0, %g7
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, 1b
|
|
nop
|
|
retl
|
|
nop
|
|
.size atomic64_sub, .-atomic64_sub
|
|
|
|
.globl atomic64_add_ret
|
|
.type atomic64_add_ret,#function
|
|
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
|
ATOMIC_PRE_BARRIER
|
|
1: ldx [%o1], %g1
|
|
add %g1, %o0, %g7
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, 1b
|
|
add %g7, %o0, %g7
|
|
mov %g7, %o0
|
|
ATOMIC_POST_BARRIER
|
|
retl
|
|
nop
|
|
.size atomic64_add_ret, .-atomic64_add_ret
|
|
|
|
.globl atomic64_sub_ret
|
|
.type atomic64_sub_ret,#function
|
|
atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
ATOMIC_PRE_BARRIER
|
|
1: ldx [%o1], %g1
|
|
sub %g1, %o0, %g7
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, 1b
|
|
sub %g7, %o0, %g7
|
|
mov %g7, %o0
|
|
ATOMIC_POST_BARRIER
|
|
retl
|
|
nop
|
|
.size atomic64_sub_ret, .-atomic64_sub_ret
|