forked from Minki/linux
ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path
When unlocking a spinlock, all we need to do is increment the owner field of the lock. Since only one CPU can be performing an unlock() operation for a given lock, this doesn't need to be exclusive. This patch simplifies arch_spin_unlock to use non-exclusive accesses when updating the owner field of the lock. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
0a301110b7
commit
20e260b6f4
@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
|
||||||
u32 slock;
|
|
||||||
|
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
lock->tickets.owner++;
|
||||||
__asm__ __volatile__(
|
|
||||||
" mov %1, #1\n"
|
|
||||||
"1: ldrex %0, [%2]\n"
|
|
||||||
" uadd16 %0, %0, %1\n"
|
|
||||||
" strex %1, %0, [%2]\n"
|
|
||||||
" teq %1, #0\n"
|
|
||||||
" bne 1b"
|
|
||||||
: "=&r" (slock), "=&r" (tmp)
|
|
||||||
: "r" (&lock->slock)
|
|
||||||
: "cc");
|
|
||||||
|
|
||||||
dsb_sev();
|
dsb_sev();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user