2007-10-15 23:41:44 +00:00
|
|
|
/* atomic.S: These things are too big to do inline.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2012-11-10 03:37:59 +00:00
|
|
|
* Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
#include <linux/linkage.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/asi.h>
|
2007-10-15 23:41:44 +00:00
|
|
|
#include <asm/backoff.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
.text
|
|
|
|
|
|
|
|
/* Two versions of the atomic routines, one that
|
|
|
|
* does not return a value and does not perform
|
|
|
|
* memory barriers, and a second which returns
|
|
|
|
* a value and does the barriers.
|
|
|
|
*/
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic_add)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic_sub)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2010-08-18 08:03:37 +00:00
|
|
|
add %g1, %o0, %g1
|
2005-04-16 22:20:36 +00:00
|
|
|
retl
|
2010-08-18 08:03:37 +00:00
|
|
|
sra %g1, 0, %o0
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic_add_ret)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2010-08-18 08:03:37 +00:00
|
|
|
sub %g1, %o0, %g1
|
2005-04-16 22:20:36 +00:00
|
|
|
retl
|
2010-08-18 08:03:37 +00:00
|
|
|
sra %g1, 0, %o0
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic_sub_ret)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic64_add)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic64_sub)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 22:42:04 +00:00
|
|
|
nop
|
2010-08-18 08:03:37 +00:00
|
|
|
retl
|
|
|
|
add %g1, %o0, %o0
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic64_add_ret)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-12 03:33:22 +00:00
|
|
|
ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 23:41:44 +00:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 22:20:36 +00:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-19 05:53:26 +00:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 22:42:04 +00:00
|
|
|
nop
|
2010-08-18 08:03:37 +00:00
|
|
|
retl
|
|
|
|
sub %g1, %o0, %o0
|
2007-10-15 23:41:44 +00:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2012-05-12 03:33:22 +00:00
|
|
|
ENDPROC(atomic64_sub_ret)
|
2012-11-10 03:37:59 +00:00
|
|
|
|
|
|
|
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
|
|
|
|
BACKOFF_SETUP(%o2)
|
|
|
|
1: ldx [%o0], %g1
|
|
|
|
brlez,pn %g1, 3f
|
|
|
|
sub %g1, 1, %g7
|
|
|
|
casx [%o0], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
|
|
|
nop
|
|
|
|
3: retl
|
|
|
|
sub %g1, 1, %o0
|
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
|
|
ENDPROC(atomic64_dec_if_positive)
|