2012-03-28 17:30:03 +00:00
|
|
|
#ifndef __SPARC64_BARRIER_H
|
|
|
|
#define __SPARC64_BARRIER_H
|
|
|
|
|
|
|
|
/* These are here in an effort to more fully work around Spitfire Errata
|
|
|
|
* #51. Essentially, if a memory barrier occurs soon after a mispredicted
|
|
|
|
* branch, the chip can stop executing instructions until a trap occurs.
|
|
|
|
* Therefore, if interrupts are disabled, the chip can hang forever.
|
|
|
|
*
|
|
|
|
* It used to be believed that the memory barrier had to be right in the
|
|
|
|
* delay slot, but a case has been traced recently wherein the memory barrier
|
|
|
|
* was one instruction after the branch delay slot and the chip still hung.
|
|
|
|
* The offending sequence was the following in sym_wakeup_done() of the
|
|
|
|
* sym53c8xx_2 driver:
|
|
|
|
*
|
|
|
|
* call sym_ccb_from_dsa, 0
|
|
|
|
* movge %icc, 0, %l0
|
|
|
|
* brz,pn %o0, .LL1303
|
|
|
|
* mov %o0, %l2
|
|
|
|
* membar #LoadLoad
|
|
|
|
*
|
|
|
|
* The branch has to be mispredicted for the bug to occur. Therefore, we put
|
|
|
|
* the memory barrier explicitly into a "branch always, predicted taken"
|
|
|
|
* delay slot to avoid the problem case.
|
|
|
|
*/
|
|
|
|
#define membar_safe(type) \
|
|
|
|
do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
|
|
|
|
" membar " type "\n" \
|
|
|
|
"1:\n" \
|
|
|
|
: : : "memory"); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* The kernel always executes in TSO memory model these days,
|
|
|
|
* and furthermore most sparc64 chips implement more stringent
|
|
|
|
* memory ordering than required by the specifications.
|
|
|
|
*/
|
|
|
|
#define mb() membar_safe("#StoreLoad")
|
|
|
|
#define rmb() __asm__ __volatile__("":::"memory")
|
|
|
|
#define wmb() __asm__ __volatile__("":::"memory")
|
|
|
|
|
|
|
|
#define read_barrier_depends() do { } while(0)
|
|
|
|
#define set_mb(__var, __value) \
|
|
|
|
do { __var = __value; membar_safe("#StoreLoad"); } while(0)
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define smp_mb() mb()
|
|
|
|
#define smp_rmb() rmb()
|
|
|
|
#define smp_wmb() wmb()
|
|
|
|
#else
|
|
|
|
#define smp_mb() __asm__ __volatile__("":::"memory")
|
|
|
|
#define smp_rmb() __asm__ __volatile__("":::"memory")
|
|
|
|
#define smp_wmb() __asm__ __volatile__("":::"memory")
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define smp_read_barrier_depends() do { } while(0)
|
|
|
|
|
2013-11-06 13:57:36 +00:00
|
|
|
#define smp_store_release(p, v) \
|
|
|
|
do { \
|
|
|
|
compiletime_assert_atomic_type(*p); \
|
|
|
|
barrier(); \
|
|
|
|
ACCESS_ONCE(*p) = (v); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define smp_load_acquire(p) \
|
|
|
|
({ \
|
|
|
|
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
|
|
|
|
compiletime_assert_atomic_type(*p); \
|
|
|
|
barrier(); \
|
|
|
|
___p1; \
|
|
|
|
})
|
|
|
|
|
2014-03-13 18:00:35 +00:00
|
|
|
#define smp_mb__before_atomic() barrier()
|
|
|
|
#define smp_mb__after_atomic() barrier()
|
|
|
|
|
2012-03-28 17:30:03 +00:00
|
|
|
#endif /* !(__SPARC64_BARRIER_H) */
|