mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 16:41:39 +00:00
Merge branch 'upstream/ticketlock-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen into x86/asm
This commit is contained in:
commit
01acc26908
@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
|
||||
__compiletime_error("Bad argument size for cmpxchg");
|
||||
extern void __xadd_wrong_size(void)
|
||||
__compiletime_error("Bad argument size for xadd");
|
||||
extern void __add_wrong_size(void)
|
||||
__compiletime_error("Bad argument size for add");
|
||||
|
||||
/*
|
||||
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
|
||||
@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
|
||||
#define __X86_CASE_Q -1 /* sizeof will never return -1 */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* An exchange-type operation, which takes a value and a pointer, and
|
||||
* returns a the old value.
|
||||
*/
|
||||
#define __xchg_op(ptr, arg, op, lock) \
|
||||
({ \
|
||||
__typeof__ (*(ptr)) __ret = (arg); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case __X86_CASE_B: \
|
||||
asm volatile (lock #op "b %b0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_W: \
|
||||
asm volatile (lock #op "w %w0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_L: \
|
||||
asm volatile (lock #op "l %0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_Q: \
|
||||
asm volatile (lock #op "q %q0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
default: \
|
||||
__ ## op ## _wrong_size(); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
|
||||
* Since this is generally used to protect other memory information, we
|
||||
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
|
||||
* information around.
|
||||
*/
|
||||
#define __xchg(x, ptr, size) \
|
||||
({ \
|
||||
__typeof(*(ptr)) __x = (x); \
|
||||
switch (size) { \
|
||||
case __X86_CASE_B: \
|
||||
{ \
|
||||
volatile u8 *__ptr = (volatile u8 *)(ptr); \
|
||||
asm volatile("xchgb %0,%1" \
|
||||
: "=q" (__x), "+m" (*__ptr) \
|
||||
: "0" (__x) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_W: \
|
||||
{ \
|
||||
volatile u16 *__ptr = (volatile u16 *)(ptr); \
|
||||
asm volatile("xchgw %0,%1" \
|
||||
: "=r" (__x), "+m" (*__ptr) \
|
||||
: "0" (__x) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_L: \
|
||||
{ \
|
||||
volatile u32 *__ptr = (volatile u32 *)(ptr); \
|
||||
asm volatile("xchgl %0,%1" \
|
||||
: "=r" (__x), "+m" (*__ptr) \
|
||||
: "0" (__x) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_Q: \
|
||||
{ \
|
||||
volatile u64 *__ptr = (volatile u64 *)(ptr); \
|
||||
asm volatile("xchgq %0,%1" \
|
||||
: "=r" (__x), "+m" (*__ptr) \
|
||||
: "0" (__x) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
__xchg_wrong_size(); \
|
||||
} \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define xchg(ptr, v) \
|
||||
__xchg((v), (ptr), sizeof(*ptr))
|
||||
#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
|
||||
|
||||
/*
|
||||
* Atomic compare and exchange. Compare OLD with MEM, if identical,
|
||||
@ -165,36 +154,6 @@ extern void __xadd_wrong_size(void)
|
||||
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
|
||||
#endif
|
||||
|
||||
#define __xadd(ptr, inc, lock) \
|
||||
({ \
|
||||
__typeof__ (*(ptr)) __ret = (inc); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case __X86_CASE_B: \
|
||||
asm volatile (lock "xaddb %b0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_W: \
|
||||
asm volatile (lock "xaddw %w0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_L: \
|
||||
asm volatile (lock "xaddl %0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_Q: \
|
||||
asm volatile (lock "xaddq %q0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
default: \
|
||||
__xadd_wrong_size(); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* xadd() adds "inc" to "*ptr" and atomically returns the previous
|
||||
* value of "*ptr".
|
||||
@ -203,8 +162,49 @@ extern void __xadd_wrong_size(void)
|
||||
* xadd_sync() is always locked
|
||||
* xadd_local() is never locked
|
||||
*/
|
||||
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
|
||||
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
|
||||
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
|
||||
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
|
||||
|
||||
#define __add(ptr, inc, lock) \
|
||||
({ \
|
||||
__typeof__ (*(ptr)) __ret = (inc); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case __X86_CASE_B: \
|
||||
asm volatile (lock "addb %b1, %0\n" \
|
||||
: "+m" (*(ptr)) : "ri" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_W: \
|
||||
asm volatile (lock "addw %w1, %0\n" \
|
||||
: "+m" (*(ptr)) : "ri" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_L: \
|
||||
asm volatile (lock "addl %1, %0\n" \
|
||||
: "+m" (*(ptr)) : "ri" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_Q: \
|
||||
asm volatile (lock "addq %1, %0\n" \
|
||||
: "+m" (*(ptr)) : "ri" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
default: \
|
||||
__add_wrong_size(); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* add_*() adds "inc" to "*ptr"
|
||||
*
|
||||
* __add() takes a lock prefix
|
||||
* add_smp() is locked when multiple CPUs are online
|
||||
* add_sync() is always locked
|
||||
*/
|
||||
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
|
||||
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
|
||||
|
||||
#endif /* ASM_X86_CMPXCHG_H */
|
||||
|
@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
|
||||
}
|
||||
|
||||
#if (NR_CPUS < 256)
|
||||
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
||||
: "+m" (lock->head_tail)
|
||||
:
|
||||
: "memory", "cc");
|
||||
__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
|
||||
}
|
||||
#else
|
||||
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
||||
: "+m" (lock->head_tail)
|
||||
:
|
||||
: "memory", "cc");
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user