forked from Minki/linux
4b3dc9679c
Nobody seems to be producing !SMP systems anymore, so this is just becoming a source of kernel bugs, particularly if people want to use coherent DMA with non-shared pages. This patch forces CONFIG_SMP=y for arm64, removing a modest amount of code in the process. Signed-off-by: Will Deacon <will.deacon@arm.com>
102 lines
2.8 KiB
C
102 lines
2.8 KiB
C
/*
|
|
* Based on arch/arm/include/asm/barrier.h
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_BARRIER_H
|
|
#define __ASM_BARRIER_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#define sev() asm volatile("sev" : : : "memory")
|
|
#define wfe() asm volatile("wfe" : : : "memory")
|
|
#define wfi() asm volatile("wfi" : : : "memory")
|
|
|
|
#define isb() asm volatile("isb" : : : "memory")
|
|
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
|
|
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
|
|
|
#define mb() dsb(sy)
|
|
#define rmb() dsb(ld)
|
|
#define wmb() dsb(st)
|
|
|
|
#define dma_rmb() dmb(oshld)
|
|
#define dma_wmb() dmb(oshst)
|
|
|
|
#define smp_mb() dmb(ish)
|
|
#define smp_rmb() dmb(ishld)
|
|
#define smp_wmb() dmb(ishst)
|
|
|
|
#define smp_store_release(p, v) \
|
|
do { \
|
|
compiletime_assert_atomic_type(*p); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile ("stlrb %w1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile ("stlrh %w1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile ("stlr %w1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
case 8: \
|
|
asm volatile ("stlr %1, %0" \
|
|
: "=Q" (*p) : "r" (v) : "memory"); \
|
|
break; \
|
|
} \
|
|
} while (0)
|
|
|
|
#define smp_load_acquire(p) \
|
|
({ \
|
|
typeof(*p) ___p1; \
|
|
compiletime_assert_atomic_type(*p); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile ("ldarb %w0, %1" \
|
|
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile ("ldarh %w0, %1" \
|
|
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile ("ldar %w0, %1" \
|
|
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
break; \
|
|
case 8: \
|
|
asm volatile ("ldar %0, %1" \
|
|
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
break; \
|
|
} \
|
|
___p1; \
|
|
})
|
|
|
|
#define read_barrier_depends() do { } while(0)
|
|
#define smp_read_barrier_depends() do { } while(0)
|
|
|
|
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
|
#define nop() asm volatile("nop");
|
|
|
|
#define smp_mb__before_atomic() smp_mb()
|
|
#define smp_mb__after_atomic() smp_mb()
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_BARRIER_H */
|