x86, ticketlock: Make __ticket_spin_trylock common

Make trylock code common regardless of ticket size.

(Also, rename arch_spinlock.slock to head_tail.)

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Jeremy Fitzhardinge 2010-07-13 15:14:26 -07:00 committed by H. Peter Anvin
parent 2994488fe5
commit 229855d6f3
2 changed files with 15 additions and 40 deletions

View File

@ -69,60 +69,33 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
barrier(); /* make sure nothing creeps before the lock is taken */
}
#if (NR_CPUS < 256)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp, new;
arch_spinlock_t old, new;
asm volatile("movzwl %2, %0\n\t"
"cmpb %h0,%b0\n\t"
"leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
: "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");
old.tickets = ACCESS_ONCE(lock->tickets);
if (old.tickets.head != old.tickets.tail)
return 0;
return tmp;
new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
/* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
#if (NR_CPUS < 256)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
: "+m" (lock->slock)
: "+m" (lock->head_tail)
:
: "memory", "cc");
}
#else
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
unsigned tmp;
unsigned new;
asm volatile("movl %2,%0\n\t"
"movl %0,%1\n\t"
"roll $16, %0\n\t"
"cmpl %0,%1\n\t"
"leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
LOCK_PREFIX "cmpxchgl %1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
: "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");
return tmp;
}
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
: "+m" (lock->slock)
: "+m" (lock->head_tail)
:
: "memory", "cc");
}

View File

@ -9,8 +9,10 @@
#if (CONFIG_NR_CPUS < 256)
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
@ -18,14 +20,14 @@ typedef u16 __ticket_t;
typedef struct arch_spinlock {
union {
unsigned int slock;
__ticketpair_t head_tail;
struct __raw_tickets {
__ticket_t head, tail;
} tickets;
};
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { { .slock = 0 } }
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
#include <asm/rwlock.h>