forked from Minki/linux
cd4d09ec6f
Move them to a separate header and have the following dependency: x86/cpufeatures.h <- x86/processor.h <- x86/cpufeature.h This makes it easier to use the header in asm code and not include the whole cpufeature.h and add guards for asm. Suggested-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1453842730-28463-5-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
42 lines
1.0 KiB
C
42 lines
1.0 KiB
C
#ifndef _ASM_UM_BARRIER_H_
|
|
#define _ASM_UM_BARRIER_H_
|
|
|
|
#include <asm/asm.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/cmpxchg.h>
|
|
#include <asm/nops.h>
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/irqflags.h>
|
|
|
|
/*
|
|
* Force strict CPU ordering.
|
|
* And yes, this is required on UP too when we're talking
|
|
* to devices.
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
|
|
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
|
|
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
|
|
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
|
|
|
|
#else /* CONFIG_X86_32 */
|
|
|
|
#define mb() asm volatile("mfence" : : : "memory")
|
|
#define rmb() asm volatile("lfence" : : : "memory")
|
|
#define wmb() asm volatile("sfence" : : : "memory")
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
#ifdef CONFIG_X86_PPRO_FENCE
|
|
#define dma_rmb() rmb()
|
|
#else /* CONFIG_X86_PPRO_FENCE */
|
|
#define dma_rmb() barrier()
|
|
#endif /* CONFIG_X86_PPRO_FENCE */
|
|
#define dma_wmb() barrier()
|
|
|
|
#include <asm-generic/barrier.h>
|
|
|
|
#endif
|