forked from Minki/linux
47933ad41a
A number of situations currently require the heavyweight smp_mb(), even though there is no need to order prior stores against later loads. Many architectures have much cheaper ways to handle these situations, but the Linux kernel currently has no portable way to make use of them. This commit therefore supplies smp_load_acquire() and smp_store_release() to remedy this situation. The new smp_load_acquire() primitive orders the specified load against any subsequent reads or writes, while the new smp_store_release() primitive orders the specifed store against any prior reads or writes. These primitives allow array-based circular FIFOs to be implemented without an smp_mb(), and also allow a theoretical hole in rcu_assign_pointer() to be closed at no additional expense on most architectures. In addition, the RCU experience transitioning from explicit smp_read_barrier_depends() and smp_wmb() to rcu_dereference() and rcu_assign_pointer(), respectively resulted in substantial improvements in readability. It therefore seems likely that replacing other explicit barriers with smp_load_acquire() and smp_store_release() will provide similar benefits. It appears that roughly half of the explicit barriers in core kernel code might be so replaced. [Changelog by PaulMck] Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Michael Ellerman <michael@ellerman.id.au> Cc: Michael Neuling <mikey@neuling.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Victor Kaplansky <VICTORK@il.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Oleg Nesterov <oleg@redhat.com> Link: http://lkml.kernel.org/r/20131213150640.908486364@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
82 lines
1.9 KiB
C
82 lines
1.9 KiB
C
/*
|
|
* Generic barrier definitions, originally based on MN10300 definitions.
|
|
*
|
|
* It should be possible to use these on really simple architectures,
|
|
* but it serves more as a starting point for new ports.
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public Licence
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the Licence, or (at your option) any later version.
|
|
*/
|
|
#ifndef __ASM_GENERIC_BARRIER_H
|
|
#define __ASM_GENERIC_BARRIER_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#ifndef nop
|
|
#define nop() asm volatile ("nop")
|
|
#endif
|
|
|
|
/*
|
|
* Force strict CPU ordering. And yes, this is required on UP too when we're
|
|
* talking to devices.
|
|
*
|
|
* Fall back to compiler barriers if nothing better is provided.
|
|
*/
|
|
|
|
#ifndef mb
|
|
#define mb() barrier()
|
|
#endif
|
|
|
|
#ifndef rmb
|
|
#define rmb() mb()
|
|
#endif
|
|
|
|
#ifndef wmb
|
|
#define wmb() mb()
|
|
#endif
|
|
|
|
#ifndef read_barrier_depends
|
|
#define read_barrier_depends() do { } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define smp_mb() mb()
|
|
#define smp_rmb() rmb()
|
|
#define smp_wmb() wmb()
|
|
#define smp_read_barrier_depends() read_barrier_depends()
|
|
#else
|
|
#define smp_mb() barrier()
|
|
#define smp_rmb() barrier()
|
|
#define smp_wmb() barrier()
|
|
#define smp_read_barrier_depends() do { } while (0)
|
|
#endif
|
|
|
|
#ifndef set_mb
|
|
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
|
|
#endif
|
|
|
|
#define smp_store_release(p, v) \
|
|
do { \
|
|
compiletime_assert_atomic_type(*p); \
|
|
smp_mb(); \
|
|
ACCESS_ONCE(*p) = (v); \
|
|
} while (0)
|
|
|
|
#define smp_load_acquire(p) \
|
|
({ \
|
|
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
|
|
compiletime_assert_atomic_type(*p); \
|
|
smp_mb(); \
|
|
___p1; \
|
|
})
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
#endif /* __ASM_GENERIC_BARRIER_H */
|