linux/arch/x86/include/asm/spinlock.h
Linus Torvalds 60815cf2e0 kernel: Provide READ_ONCE and ASSIGN_ONCE
As discussed on LKML http://marc.info/?i=54611D86.4040306%40de.ibm.com
 ACCESS_ONCE might fail with specific compilers for non-scalar accesses.
 
 Here is a set of patches to tackle that problem.
 
 The first patch introduce READ_ONCE and ASSIGN_ONCE. If the data structure
 is larger than the machine word size memcpy is used and a warning is emitted.
 The next patches fix up several in-tree users of ACCESS_ONCE on non-scalar
 types.
 
 This merge does not yet contain a patch that forces ACCESS_ONCE to work only
 on scalar types. This is targetted for the next merge window as Linux next
 already contains new offenders regarding ACCESS_ONCE vs. non-scalar types.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJUkrVGAAoJEBF7vIC1phx8stkP/2LmN5y6LOseoEW06xa5MX4m
 cbIKsZNtsGHl7EDcTzzuWs6Sq5/Cj7V3yzeBF7QGbUKOqvFWU3jvpUBCCfjMg37C
 77/Vf0ZPrxTXXxeJ4Ykdy2CGvuMtuYY9TWkrRNKmLU0xex7lGblEzCt9z6+mZviw
 26/DN8ctjkHRvIUAi+7RfQBBc3oSMYAC1mzxYKBAsAFLV+LyFmsGU/4iofZMAsdt
 XFyVXlrLn0Bjx/MeceGkOlMDiVx4FnfccfFaD4hhuTLBJXWitkUK/MRa4JBiXWzH
 agY8942A8/j9wkI2DFp/pqZYqA/sTXLndyOWlhE//ZSti0n0BSJaOx3S27rTLkAc
 5VmZEVyIrS3hyOpyyAi0sSoPkDnjeCHmQg9Rqn34/poKLd7JDrW2UkERNCf/T3eh
 GI2rbhAlZz3v5mIShn8RrxzslWYmOObpMr3HYNUdRk8YUfTf6d6aZ3txHp2nP4mD
 VBAEzsvP9rcVT2caVhU2dnBzeaZAj3zeDxBtjcb3X2osY9tI7qgLc9Fa/fWKgILk
 2evkLcctsae2mlLNGHyaK3Dm/ZmYJv+57MyaQQEZNfZZgeB1y4k0DkxH4w1CFmCi
 s8XlH5voEHgnyjSQXXgc/PNVlkPAKr78ZyTiAfiKmh8rpe41/W4hGcgao7L9Lgiu
 SI0uSwKibuZt4dHGxQuG
 =IQ5o
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux

Pull ACCESS_ONCE cleanup preparation from Christian Borntraeger:
 "kernel: Provide READ_ONCE and ASSIGN_ONCE

  As discussed on LKML http://marc.info/?i=54611D86.4040306%40de.ibm.com
  ACCESS_ONCE might fail with specific compilers for non-scalar
  accesses.

  Here is a set of patches to tackle that problem.

  The first patch introduce READ_ONCE and ASSIGN_ONCE.  If the data
  structure is larger than the machine word size memcpy is used and a
  warning is emitted.  The next patches fix up several in-tree users of
  ACCESS_ONCE on non-scalar types.

  This does not yet contain a patch that forces ACCESS_ONCE to work only
  on scalar types.  This is targetted for the next merge window as Linux
  next already contains new offenders regarding ACCESS_ONCE vs.
  non-scalar types"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux:
  s390/kvm: REPLACE barrier fixup with READ_ONCE
  arm/spinlock: Replace ACCESS_ONCE with READ_ONCE
  arm64/spinlock: Replace ACCESS_ONCE READ_ONCE
  mips/gup: Replace ACCESS_ONCE with READ_ONCE
  x86/gup: Replace ACCESS_ONCE with READ_ONCE
  x86/spinlock: Replace ACCESS_ONCE with READ_ONCE
  mm: replace ACCESS_ONCE with READ_ONCE or barriers
  kernel: Provide READ_ONCE and ASSIGN_ONCE
2014-12-20 16:48:59 -08:00

226 lines
6.1 KiB
C

#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H
#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>
#include <asm/paravirt.h>
#include <asm/bitops.h>
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* These are fair FIFO ticket locks, which support up to 2^16 CPUs.
*
* (the type definitions are in asm/spinlock_types.h)
*/
#ifdef CONFIG_X86_32
# define LOCK_PTR_REG "a"
#else
# define LOCK_PTR_REG "D"
#endif
#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
/*
* On PPro SMP, we use a locked operation to unlock
* (PPro errata 66, 92)
*/
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
# define UNLOCK_LOCK_PREFIX
#endif
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD (1 << 15)
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
{
set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
}
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
{
}
static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
__ticket_t ticket)
{
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.tickets.head == lock.tickets.tail;
}
/*
* Ticket locks are conceptually two parts, one indicating the current head of
* the queue, and the other indicating the current tail. The lock is acquired
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail.
*
* We use an xadd covering *both* parts of the lock, to increment the tail and
* also load the position of the head, which takes care of memory ordering
* issues and should be optimal for the uncontended case. Note the tail must be
* in the high part, because a wide xadd increment of the low part would carry
* up and contaminate the high part.
*/
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
inc = xadd(&lock->tickets, inc);
if (likely(inc.head == inc.tail))
goto out;
inc.tail &= ~TICKET_SLOWPATH_FLAG;
for (;;) {
unsigned count = SPIN_THRESHOLD;
do {
if (READ_ONCE(lock->tickets.head) == inc.tail)
goto out;
cpu_relax();
} while (--count);
__ticket_lock_spinning(lock, inc.tail);
}
out: barrier(); /* make sure nothing creeps before the lock is taken */
}
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
arch_spinlock_t old, new;
old.tickets = READ_ONCE(lock->tickets);
if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
return 0;
new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
/* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
arch_spinlock_t old)
{
arch_spinlock_t new;
BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
/* Perform the unlock on the "before" copy */
old.tickets.head += TICKET_LOCK_INC;
/* Clear the slowpath flag */
new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
/*
* If the lock is uncontended, clear the flag - use cmpxchg in
* case it changes behind our back though.
*/
if (new.tickets.head != new.tickets.tail ||
cmpxchg(&lock->head_tail, old.head_tail,
new.head_tail) != old.head_tail) {
/*
* Lock still has someone queued for it, so wake up an
* appropriate waiter.
*/
__ticket_unlock_kick(lock, old.tickets.head);
}
}
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
static_key_false(&paravirt_ticketlocks_enabled)) {
arch_spinlock_t prev;
prev = *lock;
add_smp(&lock->tickets.head, TICKET_LOCK_INC);
/* add_smp() is a full mb() */
if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
__ticket_unlock_slowpath(lock, prev);
} else
__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
}
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
return tmp.tail != tmp.head;
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
arch_spin_lock(lock);
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_t head = ACCESS_ONCE(lock->tickets.head);
for (;;) {
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
/*
* We need to check "unlocked" in a loop, tmp.head == head
* can be false positive because of overflow.
*/
if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) ||
tmp.head != head)
break;
cpu_relax();
}
}
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
* On x86, we implement read-write locks using the generic qrwlock with
* x86 specific optimization.
*/
#include <asm/qrwlock.h>
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_X86_SPINLOCK_H */