x86, pvticketlock: When paravirtualizing ticket locks, increment by 2

Increment ticket head/tails by 2 rather than 1 to leave the LSB free
to store a "is in slowpath state" bit.  This halves the number
of possible CPUs for a given ticket size, but this shouldn't matter
in practice - kernels built for 32k+ CPU systems are probably
specially built for the hardware rather than a generic distro
kernel.

Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-9-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Jeremy Fitzhardinge 2013-08-09 19:51:56 +05:30 committed by H. Peter Anvin
parent 354714dd26
commit 4a1ed4ca68
2 changed files with 14 additions and 6 deletions

View File

@ -78,7 +78,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
*/
static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
register struct __raw_tickets inc = { .tail = 1 };
register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
inc = xadd(&lock->tickets, inc);
@ -104,7 +104,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
if (old.tickets.head != old.tickets.tail)
return 0;
new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
/* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
@ -112,9 +112,9 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_t next = lock->tickets.head + 1;
__ticket_t next = lock->tickets.head + TICKET_LOCK_INC;
__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
__ticket_unlock_kick(lock, next);
}
@ -129,7 +129,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
return (__ticket_t)(tmp.tail - tmp.head) > 1;
return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
#define arch_spin_is_contended arch_spin_is_contended

View File

@ -3,7 +3,13 @@
#include <linux/types.h>
#if (CONFIG_NR_CPUS < 256)
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define __TICKET_LOCK_INC 2
#else
#define __TICKET_LOCK_INC 1
#endif
#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
@ -11,6 +17,8 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
typedef struct arch_spinlock {