forked from Minki/linux
144b9c135b
eieio is only a store - store ordering. When used to order an unlock operation loads may leak out of the critical region. This is potentially buggy, one example is if a user wants to atomically read a couple of values. We can solve this with an lwsync which orders everything except store - load. I removed the (now unused) EIEIO_ON_SMP macros and the c versions isync_on_smp and eieio_on_smp now we dont use them. I also removed some old comments that were used to identify inline spinlocks in assembly, they dont make sense now our locks are out of line. Another interesting thing was that read_unlock was using an eieio even though the rest of the spinlock code had already been converted to use lwsync. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
37 lines
640 B
C
37 lines
640 B
C
#ifndef _ASM_POWERPC_SYNCH_H
|
|
#define _ASM_POWERPC_SYNCH_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#ifdef __powerpc64__
|
|
#define __SUBARCH_HAS_LWSYNC
|
|
#endif
|
|
|
|
#ifdef __SUBARCH_HAS_LWSYNC
|
|
# define LWSYNC lwsync
|
|
#else
|
|
# define LWSYNC sync
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define ISYNC_ON_SMP "\n\tisync"
|
|
#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
|
|
#else
|
|
#define ISYNC_ON_SMP
|
|
#define LWSYNC_ON_SMP
|
|
#endif
|
|
|
|
static inline void eieio(void)
|
|
{
|
|
__asm__ __volatile__ ("eieio" : : : "memory");
|
|
}
|
|
|
|
static inline void isync(void)
|
|
{
|
|
__asm__ __volatile__ ("isync" : : : "memory");
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_SYNCH_H */
|