include/asm-x86/spinlock.h: checkpatch cleanups - formatting only

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Joe Perches 2008-03-23 01:03:31 -07:00 committed by Ingo Molnar
parent ceb7ce1052
commit d3bf60a6e4

View File

@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
{ {
short inc = 0x0100; short inc = 0x0100;
__asm__ __volatile__ ( asm volatile (
LOCK_PREFIX "xaddw %w0, %1\n" LOCK_PREFIX "xaddw %w0, %1\n"
"1:\t" "1:\t"
"cmpb %h0, %b0\n\t" "cmpb %h0, %b0\n\t"
@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */ /* don't need lfence here, because loads are in-order */
"jmp 1b\n" "jmp 1b\n"
"2:" "2:"
:"+Q" (inc), "+m" (lock->slock) : "+Q" (inc), "+m" (lock->slock)
: :
:"memory", "cc"); : "memory", "cc");
} }
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@ -104,30 +104,28 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp; int tmp;
short new; short new;
asm volatile( asm volatile("movw %2,%w0\n\t"
"movw %2,%w0\n\t" "cmpb %h0,%b0\n\t"
"cmpb %h0,%b0\n\t" "jne 1f\n\t"
"jne 1f\n\t" "movw %w0,%w1\n\t"
"movw %w0,%w1\n\t" "incb %h1\n\t"
"incb %h1\n\t" "lock ; cmpxchgw %w1,%2\n\t"
"lock ; cmpxchgw %w1,%2\n\t" "1:"
"1:" "sete %b1\n\t"
"sete %b1\n\t" "movzbl %b1,%0\n\t"
"movzbl %b1,%0\n\t" : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
:"=&a" (tmp), "=Q" (new), "+m" (lock->slock) :
: : "memory", "cc");
: "memory", "cc");
return tmp; return tmp;
} }
static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{ {
__asm__ __volatile__( asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock)
:"+m" (lock->slock) :
: : "memory", "cc");
:"memory", "cc");
} }
#else #else
static inline int __raw_spin_is_locked(raw_spinlock_t *lock) static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
@ -149,21 +147,20 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
int inc = 0x00010000; int inc = 0x00010000;
int tmp; int tmp;
__asm__ __volatile__ ( asm volatile("lock ; xaddl %0, %1\n"
"lock ; xaddl %0, %1\n" "movzwl %w0, %2\n\t"
"movzwl %w0, %2\n\t" "shrl $16, %0\n\t"
"shrl $16, %0\n\t" "1:\t"
"1:\t" "cmpl %0, %2\n\t"
"cmpl %0, %2\n\t" "je 2f\n\t"
"je 2f\n\t" "rep ; nop\n\t"
"rep ; nop\n\t" "movzwl %1, %2\n\t"
"movzwl %1, %2\n\t" /* don't need lfence here, because loads are in-order */
/* don't need lfence here, because loads are in-order */ "jmp 1b\n"
"jmp 1b\n" "2:"
"2:" : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
:"+Q" (inc), "+m" (lock->slock), "=r" (tmp) :
: : "memory", "cc");
:"memory", "cc");
} }
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@ -173,31 +170,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp; int tmp;
int new; int new;
asm volatile( asm volatile("movl %2,%0\n\t"
"movl %2,%0\n\t" "movl %0,%1\n\t"
"movl %0,%1\n\t" "roll $16, %0\n\t"
"roll $16, %0\n\t" "cmpl %0,%1\n\t"
"cmpl %0,%1\n\t" "jne 1f\n\t"
"jne 1f\n\t" "addl $0x00010000, %1\n\t"
"addl $0x00010000, %1\n\t" "lock ; cmpxchgl %1,%2\n\t"
"lock ; cmpxchgl %1,%2\n\t" "1:"
"1:" "sete %b1\n\t"
"sete %b1\n\t" "movzbl %b1,%0\n\t"
"movzbl %b1,%0\n\t" : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
:"=&a" (tmp), "=r" (new), "+m" (lock->slock) :
: : "memory", "cc");
: "memory", "cc");
return tmp; return tmp;
} }
static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{ {
__asm__ __volatile__( asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock)
:"+m" (lock->slock) :
: : "memory", "cc");
:"memory", "cc");
} }
#endif #endif