mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 23:51:37 +00:00
a8ad07e524
The qspinlock implementation depends on having well behaved mixed-size atomics. This is true on the more widely-used platforms, but these requirements are somewhat subtle and may not be satisfied by all the platforms that qspinlock is used on. Document these requirements, so ports that use qspinlock can more easily determine if they meet these requirements. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
151 lines
4.7 KiB
C
151 lines
4.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Queued spinlock
|
|
*
|
|
* A 'generic' spinlock implementation that is based on MCS locks. For an
|
|
* architecture that's looking for a 'generic' spinlock, please first consider
|
|
* ticket-lock.h and only come looking here when you've considered all the
|
|
* constraints below and can show your hardware does actually perform better
|
|
* with qspinlock.
|
|
*
|
|
* qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
|
|
* weaker than RCtso if you're power), where regular code only expects atomic_t
|
|
* to be RCpc.
|
|
*
|
|
* qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
|
|
* of atomic operations to behave well together, please audit them carefully to
|
|
* ensure they all have forward progress. Many atomic operations may default to
|
|
* cmpxchg() loops which will not have good forward progress properties on
|
|
* LL/SC architectures.
|
|
*
|
|
* One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
|
|
* do. Carefully read the patches that introduced
|
|
* queued_fetch_set_pending_acquire().
|
|
*
|
|
* qspinlock also heavily relies on mixed size atomic operations, in specific
|
|
* it requires architectures to have xchg16; something which many LL/SC
|
|
* architectures need to implement as a 32bit and+or in order to satisfy the
|
|
* forward progress guarantees mentioned above.
|
|
*
|
|
* Further reading on mixed size atomics that might be relevant:
|
|
*
|
|
* http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
|
|
*
|
|
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
|
|
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
|
|
*
|
|
* Authors: Waiman Long <waiman.long@hpe.com>
|
|
*/
|
|
#ifndef __ASM_GENERIC_QSPINLOCK_H
|
|
#define __ASM_GENERIC_QSPINLOCK_H
|
|
|
|
#include <asm-generic/qspinlock_types.h>
|
|
#include <linux/atomic.h>
|
|
|
|
#ifndef queued_spin_is_locked
|
|
/**
|
|
* queued_spin_is_locked - is the spinlock locked?
|
|
* @lock: Pointer to queued spinlock structure
|
|
* Return: 1 if it is locked, 0 otherwise
|
|
*/
|
|
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
|
|
{
|
|
/*
|
|
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
|
|
* isn't immediately observable.
|
|
*/
|
|
return atomic_read(&lock->val);
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* queued_spin_value_unlocked - is the spinlock structure unlocked?
|
|
* @lock: queued spinlock structure
|
|
* Return: 1 if it is unlocked, 0 otherwise
|
|
*
|
|
* N.B. Whenever there are tasks waiting for the lock, it is considered
|
|
* locked wrt the lockref code to avoid lock stealing by the lockref
|
|
* code and change things underneath the lock. This also allows some
|
|
* optimizations to be applied without conflict with lockref.
|
|
*/
|
|
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
|
|
{
|
|
return !atomic_read(&lock.val);
|
|
}
|
|
|
|
/**
|
|
* queued_spin_is_contended - check if the lock is contended
|
|
* @lock : Pointer to queued spinlock structure
|
|
* Return: 1 if lock contended, 0 otherwise
|
|
*/
|
|
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
|
|
{
|
|
return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
|
|
}
|
|
/**
|
|
* queued_spin_trylock - try to acquire the queued spinlock
|
|
* @lock : Pointer to queued spinlock structure
|
|
* Return: 1 if lock acquired, 0 if failed
|
|
*/
|
|
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
|
|
{
|
|
int val = atomic_read(&lock->val);
|
|
|
|
if (unlikely(val))
|
|
return 0;
|
|
|
|
return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
|
|
}
|
|
|
|
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
|
|
|
#ifndef queued_spin_lock
|
|
/**
|
|
* queued_spin_lock - acquire a queued spinlock
|
|
* @lock: Pointer to queued spinlock structure
|
|
*/
|
|
static __always_inline void queued_spin_lock(struct qspinlock *lock)
|
|
{
|
|
int val = 0;
|
|
|
|
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
|
|
return;
|
|
|
|
queued_spin_lock_slowpath(lock, val);
|
|
}
|
|
#endif
|
|
|
|
#ifndef queued_spin_unlock
|
|
/**
|
|
* queued_spin_unlock - release a queued spinlock
|
|
* @lock : Pointer to queued spinlock structure
|
|
*/
|
|
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
|
|
{
|
|
/*
|
|
* unlock() needs release semantics:
|
|
*/
|
|
smp_store_release(&lock->locked, 0);
|
|
}
|
|
#endif
|
|
|
|
#ifndef virt_spin_lock
|
|
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Remapping spinlock architecture specific functions to the corresponding
|
|
* queued spinlock functions.
|
|
*/
|
|
#define arch_spin_is_locked(l) queued_spin_is_locked(l)
|
|
#define arch_spin_is_contended(l) queued_spin_is_contended(l)
|
|
#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
|
|
#define arch_spin_lock(l) queued_spin_lock(l)
|
|
#define arch_spin_trylock(l) queued_spin_trylock(l)
|
|
#define arch_spin_unlock(l) queued_spin_unlock(l)
|
|
|
|
#endif /* __ASM_GENERIC_QSPINLOCK_H */
|