forked from Minki/linux
a41b56efa7
This will allow me to call functions that have multiple arguments if fastpath fails. This is required to support ticket mutexes, because they need to be able to pass an extra argument to the fail function. Originally I duplicated the functions, by adding __mutex_fastpath_lock_retval_arg. This ended up being just a duplication of the existing function, so a way to test if fastpath was called ended up being better. This also cleaned up the reservation mutex patch some by being able to call an atomic_set instead of atomic_xchg, and making it easier to detect if the wrong unlock function was previously used. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Cc: robclark@gmail.com Cc: rostedt@goodmis.org Cc: daniel@ffwll.ch Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20130620113105.4001.83929.stgit@patser Signed-off-by: Ingo Molnar <mingo@kernel.org>
98 lines
2.7 KiB
C
98 lines
2.7 KiB
C
/*
|
|
* Assembly implementation of the mutex fastpath, based on atomic
|
|
* decrement/increment.
|
|
*
|
|
* started by Ingo Molnar:
|
|
*
|
|
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
*/
|
|
#ifndef _ASM_X86_MUTEX_64_H
|
|
#define _ASM_X86_MUTEX_64_H
|
|
|
|
/**
|
|
* __mutex_fastpath_lock - decrement and call function if negative
|
|
* @v: pointer of type atomic_t
|
|
* @fail_fn: function to call if the result is negative
|
|
*
|
|
* Atomically decrements @v and calls <fail_fn> if the result is negative.
|
|
*/
|
|
#define __mutex_fastpath_lock(v, fail_fn) \
|
|
do { \
|
|
unsigned long dummy; \
|
|
\
|
|
typecheck(atomic_t *, v); \
|
|
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
|
\
|
|
asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
|
|
" jns 1f \n" \
|
|
" call " #fail_fn "\n" \
|
|
"1:" \
|
|
: "=D" (dummy) \
|
|
: "D" (v) \
|
|
: "rax", "rsi", "rdx", "rcx", \
|
|
"r8", "r9", "r10", "r11", "memory"); \
|
|
} while (0)
|
|
|
|
/**
|
|
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
|
* from 1 to a 0 value
|
|
* @count: pointer of type atomic_t
|
|
*
|
|
* Change the count from 1 to a value lower than 1. This function returns 0
|
|
* if the fastpath succeeds, or -1 otherwise.
|
|
*/
|
|
static inline int __mutex_fastpath_lock_retval(atomic_t *count)
|
|
{
|
|
if (unlikely(atomic_dec_return(count) < 0))
|
|
return -1;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* __mutex_fastpath_unlock - increment and call function if nonpositive
|
|
* @v: pointer of type atomic_t
|
|
* @fail_fn: function to call if the result is nonpositive
|
|
*
|
|
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
|
|
*/
|
|
#define __mutex_fastpath_unlock(v, fail_fn) \
|
|
do { \
|
|
unsigned long dummy; \
|
|
\
|
|
typecheck(atomic_t *, v); \
|
|
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
|
\
|
|
asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
|
|
" jg 1f\n" \
|
|
" call " #fail_fn "\n" \
|
|
"1:" \
|
|
: "=D" (dummy) \
|
|
: "D" (v) \
|
|
: "rax", "rsi", "rdx", "rcx", \
|
|
"r8", "r9", "r10", "r11", "memory"); \
|
|
} while (0)
|
|
|
|
#define __mutex_slowpath_needs_to_unlock() 1
|
|
|
|
/**
|
|
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
|
*
|
|
* @count: pointer of type atomic_t
|
|
* @fail_fn: fallback function
|
|
*
|
|
* Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
|
|
* if it wasn't 1 originally. [the fallback function is never used on
|
|
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
|
|
*/
|
|
static inline int __mutex_fastpath_trylock(atomic_t *count,
|
|
int (*fail_fn)(atomic_t *))
|
|
{
|
|
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
|
|
return 1;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
#endif /* _ASM_X86_MUTEX_64_H */
|