mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 05:11:48 +00:00
f99a250cb6
NOP sequences tend to get used for padding out alternative sections and uarch-specific pipeline flushes in errata workarounds. This patch adds macros for generating these sequences as both inline asm blocks, but also as strings suitable for embedding in other asm blocks directly. Signed-off-by: Will Deacon <will.deacon@arm.com>
115 lines
3.0 KiB
C
115 lines
3.0 KiB
C
/*
|
|
* Based on arch/arm/include/asm/barrier.h
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_BARRIER_H
|
|
#define __ASM_BARRIER_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
|
|
#define nops(n) asm volatile(__nops(n))
|
|
|
|
#define sev() asm volatile("sev" : : : "memory")
|
|
#define wfe() asm volatile("wfe" : : : "memory")
|
|
#define wfi() asm volatile("wfi" : : : "memory")
|
|
|
|
#define isb() asm volatile("isb" : : : "memory")
|
|
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
|
|
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
|
|
|
#define mb() dsb(sy)
|
|
#define rmb() dsb(ld)
|
|
#define wmb() dsb(st)
|
|
|
|
#define dma_rmb() dmb(oshld)
|
|
#define dma_wmb() dmb(oshst)
|
|
|
|
#define __smp_mb() dmb(ish)
|
|
#define __smp_rmb() dmb(ishld)
|
|
#define __smp_wmb() dmb(ishst)
|
|
|
|
#define __smp_store_release(p, v) \
|
|
do { \
|
|
compiletime_assert_atomic_type(*p); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile ("stlrb %w1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile ("stlrh %w1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile ("stlr %w1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
case 8: \
|
|
asm volatile ("stlr %1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
} \
|
|
} while (0)
|
|
|
|
#define __smp_load_acquire(p) \
|
|
({ \
|
|
union { typeof(*p) __val; char __c[1]; } __u; \
|
|
compiletime_assert_atomic_type(*p); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile ("ldarb %w0, %1" \
|
|
: "=r" (*(__u8 *)__u.__c) \
|
|
: "Q" (*p) : "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile ("ldarh %w0, %1" \
|
|
: "=r" (*(__u16 *)__u.__c) \
|
|
: "Q" (*p) : "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile ("ldar %w0, %1" \
|
|
: "=r" (*(__u32 *)__u.__c) \
|
|
: "Q" (*p) : "memory"); \
|
|
break; \
|
|
case 8: \
|
|
asm volatile ("ldar %0, %1" \
|
|
: "=r" (*(__u64 *)__u.__c) \
|
|
: "Q" (*p) : "memory"); \
|
|
break; \
|
|
} \
|
|
__u.__val; \
|
|
})
|
|
|
|
#define smp_cond_load_acquire(ptr, cond_expr) \
|
|
({ \
|
|
typeof(ptr) __PTR = (ptr); \
|
|
typeof(*ptr) VAL; \
|
|
for (;;) { \
|
|
VAL = smp_load_acquire(__PTR); \
|
|
if (cond_expr) \
|
|
break; \
|
|
__cmpwait_relaxed(__PTR, VAL); \
|
|
} \
|
|
VAL; \
|
|
})
|
|
|
|
#include <asm-generic/barrier.h>
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_BARRIER_H */
|