2008-10-23 05:26:29 +00:00
|
|
|
#ifndef _ASM_X86_SPINLOCK_H
|
|
|
|
#define _ASM_X86_SPINLOCK_H
|
2008-01-30 12:30:33 +00:00
|
|
|
|
2011-07-26 23:09:06 +00:00
|
|
|
#include <linux/atomic.h>
|
2008-01-30 12:30:34 +00:00
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/processor.h>
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
#include <linux/compiler.h>
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
#include <asm/paravirt.h>
|
2008-01-30 12:30:34 +00:00
|
|
|
/*
|
|
|
|
* Your basic SMP spinlocks, allowing only a single CPU anywhere
|
|
|
|
*
|
|
|
|
* Simple spin lock operations. There are two variants, one clears IRQ's
|
|
|
|
* on the local processor, one does not.
|
|
|
|
*
|
2012-08-14 21:47:37 +00:00
|
|
|
* These are fair FIFO ticket locks, which support up to 2^16 CPUs.
|
2008-01-30 12:30:34 +00:00
|
|
|
*
|
|
|
|
* (the type definitions are in asm/spinlock_types.h)
|
|
|
|
*/
|
|
|
|
|
2007-10-11 09:20:03 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-01-30 12:30:34 +00:00
|
|
|
# define LOCK_PTR_REG "a"
|
2007-10-11 09:20:03 +00:00
|
|
|
#else
|
2008-01-30 12:30:34 +00:00
|
|
|
# define LOCK_PTR_REG "D"
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 12:33:00 +00:00
|
|
|
#if defined(CONFIG_X86_32) && \
|
|
|
|
(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
|
|
|
|
/*
|
|
|
|
* On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
|
|
|
|
* (PPro errata 66, 92)
|
|
|
|
*/
|
|
|
|
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
|
|
|
|
#else
|
|
|
|
# define UNLOCK_LOCK_PREFIX
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
#endif
|
|
|
|
|
x86, spinlock: Replace pv spinlocks with pv ticketlocks
Rather than outright replacing the entire spinlock implementation in
order to paravirtualize it, keep the ticket lock implementation but add
a couple of pvops hooks on the slow patch (long spin on lock, unlocking
a contended lock).
Ticket locks have a number of nice properties, but they also have some
surprising behaviours in virtual environments. They enforce a strict
FIFO ordering on cpus trying to take a lock; however, if the hypervisor
scheduler does not schedule the cpus in the correct order, the system can
waste a huge amount of time spinning until the next cpu can take the lock.
(See Thomas Friebel's talk "Prevent Guests from Spinning Around"
http://www.xen.org/files/xensummitboston08/LHP.pdf for more details.)
To address this, we add two hooks:
- __ticket_spin_lock which is called after the cpu has been
spinning on the lock for a significant number of iterations but has
failed to take the lock (presumably because the cpu holding the lock
has been descheduled). The lock_spinning pvop is expected to block
the cpu until it has been kicked by the current lock holder.
- __ticket_spin_unlock, which on releasing a contended lock
(there are more cpus with tail tickets), it looks to see if the next
cpu is blocked and wakes it if so.
When compiled with CONFIG_PARAVIRT_SPINLOCKS disabled, a set of stub
functions causes all the extra code to go away.
Results:
=======
setup: 32 core machine with 32 vcpu KVM guest (HT off) with 8GB RAM
base = 3.11-rc
patched = base + pvspinlock V12
+-----------------+----------------+--------+
dbench (Throughput in MB/sec. Higher is better)
+-----------------+----------------+--------+
| base (stdev %)|patched(stdev%) | %gain |
+-----------------+----------------+--------+
| 15035.3 (0.3) |15150.0 (0.6) | 0.8 |
| 1470.0 (2.2) | 1713.7 (1.9) | 16.6 |
| 848.6 (4.3) | 967.8 (4.3) | 14.0 |
| 652.9 (3.5) | 685.3 (3.7) | 5.0 |
+-----------------+----------------+--------+
pvspinlock shows benefits for overcommit ratio > 1 for PLE enabled cases,
and undercommits results are flat
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-2-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
[ Raghavendra: Changed SPIN_THRESHOLD, fixed redefinition of arch_spinlock_t]
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-08-09 14:21:49 +00:00
|
|
|
/* How long a lock should spin before we consider blocking */
|
|
|
|
#define SPIN_THRESHOLD (1 << 15)
|
|
|
|
|
|
|
|
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
|
|
|
|
|
|
|
static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
|
|
|
|
__ticket_t ticket)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void ____ticket_unlock_kick(struct arch_spinlock *lock,
|
|
|
|
__ticket_t ticket)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a spinlock has someone waiting on it, then kick the appropriate
|
|
|
|
* waiting cpu.
|
|
|
|
*/
|
|
|
|
static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
|
|
|
|
__ticket_t next)
|
|
|
|
{
|
|
|
|
if (unlikely(lock->tickets.tail != next))
|
|
|
|
____ticket_unlock_kick(lock, next);
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:33:00 +00:00
|
|
|
/*
|
|
|
|
* Ticket locks are conceptually two parts, one indicating the current head of
|
|
|
|
* the queue, and the other indicating the current tail. The lock is acquired
|
|
|
|
* by atomically noting the tail and incrementing it by one (thus adding
|
|
|
|
* ourself to the queue and noting our position), then waiting until the head
|
|
|
|
* becomes equal to the the initial value of the tail.
|
|
|
|
*
|
|
|
|
* We use an xadd covering *both* parts of the lock, to increment the tail and
|
|
|
|
* also load the position of the head, which takes care of memory ordering
|
|
|
|
* issues and should be optimal for the uncontended case. Note the tail must be
|
|
|
|
* in the high part, because a wide xadd increment of the low part would carry
|
|
|
|
* up and contaminate the high part.
|
|
|
|
*/
|
2013-08-09 14:21:51 +00:00
|
|
|
static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2013-08-09 14:21:56 +00:00
|
|
|
register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
|
2010-07-13 21:07:45 +00:00
|
|
|
inc = xadd(&lock->tickets, inc);
|
2010-07-03 00:06:04 +00:00
|
|
|
|
|
|
|
for (;;) {
|
x86, spinlock: Replace pv spinlocks with pv ticketlocks
Rather than outright replacing the entire spinlock implementation in
order to paravirtualize it, keep the ticket lock implementation but add
a couple of pvops hooks on the slow patch (long spin on lock, unlocking
a contended lock).
Ticket locks have a number of nice properties, but they also have some
surprising behaviours in virtual environments. They enforce a strict
FIFO ordering on cpus trying to take a lock; however, if the hypervisor
scheduler does not schedule the cpus in the correct order, the system can
waste a huge amount of time spinning until the next cpu can take the lock.
(See Thomas Friebel's talk "Prevent Guests from Spinning Around"
http://www.xen.org/files/xensummitboston08/LHP.pdf for more details.)
To address this, we add two hooks:
- __ticket_spin_lock which is called after the cpu has been
spinning on the lock for a significant number of iterations but has
failed to take the lock (presumably because the cpu holding the lock
has been descheduled). The lock_spinning pvop is expected to block
the cpu until it has been kicked by the current lock holder.
- __ticket_spin_unlock, which on releasing a contended lock
(there are more cpus with tail tickets), it looks to see if the next
cpu is blocked and wakes it if so.
When compiled with CONFIG_PARAVIRT_SPINLOCKS disabled, a set of stub
functions causes all the extra code to go away.
Results:
=======
setup: 32 core machine with 32 vcpu KVM guest (HT off) with 8GB RAM
base = 3.11-rc
patched = base + pvspinlock V12
+-----------------+----------------+--------+
dbench (Throughput in MB/sec. Higher is better)
+-----------------+----------------+--------+
| base (stdev %)|patched(stdev%) | %gain |
+-----------------+----------------+--------+
| 15035.3 (0.3) |15150.0 (0.6) | 0.8 |
| 1470.0 (2.2) | 1713.7 (1.9) | 16.6 |
| 848.6 (4.3) | 967.8 (4.3) | 14.0 |
| 652.9 (3.5) | 685.3 (3.7) | 5.0 |
+-----------------+----------------+--------+
pvspinlock shows benefits for overcommit ratio > 1 for PLE enabled cases,
and undercommits results are flat
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-2-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
[ Raghavendra: Changed SPIN_THRESHOLD, fixed redefinition of arch_spinlock_t]
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-08-09 14:21:49 +00:00
|
|
|
unsigned count = SPIN_THRESHOLD;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (inc.head == inc.tail)
|
|
|
|
goto out;
|
|
|
|
cpu_relax();
|
|
|
|
inc.head = ACCESS_ONCE(lock->tickets.head);
|
|
|
|
} while (--count);
|
|
|
|
__ticket_lock_spinning(lock, inc.tail);
|
2010-07-03 00:06:04 +00:00
|
|
|
}
|
x86, spinlock: Replace pv spinlocks with pv ticketlocks
Rather than outright replacing the entire spinlock implementation in
order to paravirtualize it, keep the ticket lock implementation but add
a couple of pvops hooks on the slow patch (long spin on lock, unlocking
a contended lock).
Ticket locks have a number of nice properties, but they also have some
surprising behaviours in virtual environments. They enforce a strict
FIFO ordering on cpus trying to take a lock; however, if the hypervisor
scheduler does not schedule the cpus in the correct order, the system can
waste a huge amount of time spinning until the next cpu can take the lock.
(See Thomas Friebel's talk "Prevent Guests from Spinning Around"
http://www.xen.org/files/xensummitboston08/LHP.pdf for more details.)
To address this, we add two hooks:
- __ticket_spin_lock which is called after the cpu has been
spinning on the lock for a significant number of iterations but has
failed to take the lock (presumably because the cpu holding the lock
has been descheduled). The lock_spinning pvop is expected to block
the cpu until it has been kicked by the current lock holder.
- __ticket_spin_unlock, which on releasing a contended lock
(there are more cpus with tail tickets), it looks to see if the next
cpu is blocked and wakes it if so.
When compiled with CONFIG_PARAVIRT_SPINLOCKS disabled, a set of stub
functions causes all the extra code to go away.
Results:
=======
setup: 32 core machine with 32 vcpu KVM guest (HT off) with 8GB RAM
base = 3.11-rc
patched = base + pvspinlock V12
+-----------------+----------------+--------+
dbench (Throughput in MB/sec. Higher is better)
+-----------------+----------------+--------+
| base (stdev %)|patched(stdev%) | %gain |
+-----------------+----------------+--------+
| 15035.3 (0.3) |15150.0 (0.6) | 0.8 |
| 1470.0 (2.2) | 1713.7 (1.9) | 16.6 |
| 848.6 (4.3) | 967.8 (4.3) | 14.0 |
| 652.9 (3.5) | 685.3 (3.7) | 5.0 |
+-----------------+----------------+--------+
pvspinlock shows benefits for overcommit ratio > 1 for PLE enabled cases,
and undercommits results are flat
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-2-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
[ Raghavendra: Changed SPIN_THRESHOLD, fixed redefinition of arch_spinlock_t]
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-08-09 14:21:49 +00:00
|
|
|
out: barrier(); /* make sure nothing creeps before the lock is taken */
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
|
2013-08-09 14:21:51 +00:00
|
|
|
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2010-07-13 22:14:26 +00:00
|
|
|
arch_spinlock_t old, new;
|
|
|
|
|
|
|
|
old.tickets = ACCESS_ONCE(lock->tickets);
|
|
|
|
if (old.tickets.head != old.tickets.tail)
|
|
|
|
return 0;
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
|
2013-08-09 14:21:56 +00:00
|
|
|
new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
|
2010-07-13 22:14:26 +00:00
|
|
|
|
|
|
|
/* cmpxchg is a full barrier, so nothing can move before it */
|
|
|
|
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
2013-08-09 14:21:51 +00:00
|
|
|
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
|
2008-01-30 12:33:00 +00:00
|
|
|
{
|
2013-08-09 14:21:56 +00:00
|
|
|
__ticket_t next = lock->tickets.head + TICKET_LOCK_INC;
|
x86, spinlock: Replace pv spinlocks with pv ticketlocks
Rather than outright replacing the entire spinlock implementation in
order to paravirtualize it, keep the ticket lock implementation but add
a couple of pvops hooks on the slow patch (long spin on lock, unlocking
a contended lock).
Ticket locks have a number of nice properties, but they also have some
surprising behaviours in virtual environments. They enforce a strict
FIFO ordering on cpus trying to take a lock; however, if the hypervisor
scheduler does not schedule the cpus in the correct order, the system can
waste a huge amount of time spinning until the next cpu can take the lock.
(See Thomas Friebel's talk "Prevent Guests from Spinning Around"
http://www.xen.org/files/xensummitboston08/LHP.pdf for more details.)
To address this, we add two hooks:
- __ticket_spin_lock which is called after the cpu has been
spinning on the lock for a significant number of iterations but has
failed to take the lock (presumably because the cpu holding the lock
has been descheduled). The lock_spinning pvop is expected to block
the cpu until it has been kicked by the current lock holder.
- __ticket_spin_unlock, which on releasing a contended lock
(there are more cpus with tail tickets), it looks to see if the next
cpu is blocked and wakes it if so.
When compiled with CONFIG_PARAVIRT_SPINLOCKS disabled, a set of stub
functions causes all the extra code to go away.
Results:
=======
setup: 32 core machine with 32 vcpu KVM guest (HT off) with 8GB RAM
base = 3.11-rc
patched = base + pvspinlock V12
+-----------------+----------------+--------+
dbench (Throughput in MB/sec. Higher is better)
+-----------------+----------------+--------+
| base (stdev %)|patched(stdev%) | %gain |
+-----------------+----------------+--------+
| 15035.3 (0.3) |15150.0 (0.6) | 0.8 |
| 1470.0 (2.2) | 1713.7 (1.9) | 16.6 |
| 848.6 (4.3) | 967.8 (4.3) | 14.0 |
| 652.9 (3.5) | 685.3 (3.7) | 5.0 |
+-----------------+----------------+--------+
pvspinlock shows benefits for overcommit ratio > 1 for PLE enabled cases,
and undercommits results are flat
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-2-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
[ Raghavendra: Changed SPIN_THRESHOLD, fixed redefinition of arch_spinlock_t]
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-08-09 14:21:49 +00:00
|
|
|
|
2013-08-09 14:21:56 +00:00
|
|
|
__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
|
x86, spinlock: Replace pv spinlocks with pv ticketlocks
Rather than outright replacing the entire spinlock implementation in
order to paravirtualize it, keep the ticket lock implementation but add
a couple of pvops hooks on the slow patch (long spin on lock, unlocking
a contended lock).
Ticket locks have a number of nice properties, but they also have some
surprising behaviours in virtual environments. They enforce a strict
FIFO ordering on cpus trying to take a lock; however, if the hypervisor
scheduler does not schedule the cpus in the correct order, the system can
waste a huge amount of time spinning until the next cpu can take the lock.
(See Thomas Friebel's talk "Prevent Guests from Spinning Around"
http://www.xen.org/files/xensummitboston08/LHP.pdf for more details.)
To address this, we add two hooks:
- __ticket_spin_lock which is called after the cpu has been
spinning on the lock for a significant number of iterations but has
failed to take the lock (presumably because the cpu holding the lock
has been descheduled). The lock_spinning pvop is expected to block
the cpu until it has been kicked by the current lock holder.
- __ticket_spin_unlock, which on releasing a contended lock
(there are more cpus with tail tickets), it looks to see if the next
cpu is blocked and wakes it if so.
When compiled with CONFIG_PARAVIRT_SPINLOCKS disabled, a set of stub
functions causes all the extra code to go away.
Results:
=======
setup: 32 core machine with 32 vcpu KVM guest (HT off) with 8GB RAM
base = 3.11-rc
patched = base + pvspinlock V12
+-----------------+----------------+--------+
dbench (Throughput in MB/sec. Higher is better)
+-----------------+----------------+--------+
| base (stdev %)|patched(stdev%) | %gain |
+-----------------+----------------+--------+
| 15035.3 (0.3) |15150.0 (0.6) | 0.8 |
| 1470.0 (2.2) | 1713.7 (1.9) | 16.6 |
| 848.6 (4.3) | 967.8 (4.3) | 14.0 |
| 652.9 (3.5) | 685.3 (3.7) | 5.0 |
+-----------------+----------------+--------+
pvspinlock shows benefits for overcommit ratio > 1 for PLE enabled cases,
and undercommits results are flat
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-2-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
[ Raghavendra: Changed SPIN_THRESHOLD, fixed redefinition of arch_spinlock_t]
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-08-09 14:21:49 +00:00
|
|
|
__ticket_unlock_kick(lock, next);
|
2008-01-30 12:33:00 +00:00
|
|
|
}
|
2008-01-30 12:30:34 +00:00
|
|
|
|
2013-08-09 14:21:51 +00:00
|
|
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
2008-09-05 12:26:39 +00:00
|
|
|
{
|
2010-07-02 22:26:36 +00:00
|
|
|
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
|
2008-09-05 12:26:39 +00:00
|
|
|
|
2012-02-03 15:06:26 +00:00
|
|
|
return tmp.tail != tmp.head;
|
2008-09-05 12:26:39 +00:00
|
|
|
}
|
|
|
|
|
2013-08-09 14:21:51 +00:00
|
|
|
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
2008-09-05 12:26:39 +00:00
|
|
|
{
|
2010-07-02 22:26:36 +00:00
|
|
|
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
|
2008-09-05 12:26:39 +00:00
|
|
|
|
2013-08-09 14:21:56 +00:00
|
|
|
return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
|
2008-09-05 12:26:39 +00:00
|
|
|
}
|
2009-12-02 19:01:25 +00:00
|
|
|
#define arch_spin_is_contended arch_spin_is_contended
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
|
2009-12-02 19:01:25 +00:00
|
|
|
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
2008-08-19 20:19:36 +00:00
|
|
|
unsigned long flags)
|
|
|
|
{
|
2009-12-02 19:01:25 +00:00
|
|
|
arch_spin_lock(lock);
|
2008-08-19 20:19:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-02 19:01:25 +00:00
|
|
|
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2009-12-02 19:01:25 +00:00
|
|
|
while (arch_spin_is_locked(lock))
|
2008-01-30 12:30:34 +00:00
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read-write spinlocks, allowing multiple readers
|
|
|
|
* but only one writer.
|
|
|
|
*
|
|
|
|
* NOTE! it is quite common to have readers in interrupts
|
|
|
|
* but no interrupt writers. For those circumstances we
|
|
|
|
* can "mix" irq-safe locks - any writer needs to get a
|
|
|
|
* irq-safe write-lock, but readers can get non-irqsafe
|
|
|
|
* read-locks.
|
|
|
|
*
|
|
|
|
* On x86, we implement read-write locks as a 32-bit counter
|
|
|
|
* with the high bit (sign) being the "contended" bit.
|
|
|
|
*/
|
|
|
|
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
/**
|
|
|
|
* read_can_lock - would read_trylock() succeed?
|
|
|
|
* @lock: the rwlock in question.
|
|
|
|
*/
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline int arch_read_can_lock(arch_rwlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
return lock->lock > 0;
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
/**
|
|
|
|
* write_can_lock - would write_trylock() succeed?
|
|
|
|
* @lock: the rwlock in question.
|
|
|
|
*/
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline int arch_write_can_lock(arch_rwlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
return lock->write == WRITE_LOCK_CMP;
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
|
2008-01-30 12:30:34 +00:00
|
|
|
"jns 1f\n"
|
|
|
|
"call __read_lock_failed\n\t"
|
|
|
|
"1:\n"
|
|
|
|
::LOCK_PTR_REG (rw) : "memory");
|
|
|
|
}
|
|
|
|
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
|
2008-01-30 12:30:34 +00:00
|
|
|
"jz 1f\n"
|
|
|
|
"call __write_lock_failed\n\t"
|
|
|
|
"1:\n"
|
2011-07-19 12:00:45 +00:00
|
|
|
::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
|
|
|
|
: "memory");
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline int arch_read_trylock(arch_rwlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
|
2008-01-30 12:30:34 +00:00
|
|
|
|
2011-07-19 12:00:45 +00:00
|
|
|
if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
|
2008-01-30 12:30:34 +00:00
|
|
|
return 1;
|
2011-07-19 12:00:45 +00:00
|
|
|
READ_LOCK_ATOMIC(inc)(count);
|
2008-01-30 12:30:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline int arch_write_trylock(arch_rwlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
atomic_t *count = (atomic_t *)&lock->write;
|
2008-01-30 12:30:34 +00:00
|
|
|
|
2011-07-19 12:00:45 +00:00
|
|
|
if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
|
2008-01-30 12:30:34 +00:00
|
|
|
return 1;
|
2011-07-19 12:00:45 +00:00
|
|
|
atomic_add(WRITE_LOCK_CMP, count);
|
2008-01-30 12:30:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
|
|
|
|
:"+m" (rw->lock) : : "memory");
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
2009-12-03 19:08:46 +00:00
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2011-07-19 12:00:45 +00:00
|
|
|
asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
|
|
|
|
: "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
2009-12-03 19:08:46 +00:00
|
|
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
|
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
2009-04-02 23:59:46 +00:00
|
|
|
|
2011-07-19 12:00:45 +00:00
|
|
|
#undef READ_LOCK_SIZE
|
|
|
|
#undef READ_LOCK_ATOMIC
|
|
|
|
#undef WRITE_LOCK_ADD
|
|
|
|
#undef WRITE_LOCK_SUB
|
|
|
|
#undef WRITE_LOCK_CMP
|
|
|
|
|
2009-12-02 19:01:25 +00:00
|
|
|
#define arch_spin_relax(lock) cpu_relax()
|
|
|
|
#define arch_read_relax(lock) cpu_relax()
|
|
|
|
#define arch_write_relax(lock) cpu_relax()
|
2008-01-30 12:30:34 +00:00
|
|
|
|
2009-07-08 12:10:31 +00:00
|
|
|
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
|
|
|
static inline void smp_mb__after_lock(void) { }
|
|
|
|
#define ARCH_HAS_SMP_MB_AFTER_LOCK
|
|
|
|
|
2008-10-23 05:26:29 +00:00
|
|
|
#endif /* _ASM_X86_SPINLOCK_H */
|