mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 17:51:43 +00:00
cc189d2513
Linus suggested that probably all the supported architectures can allow a negative mutex count without incorrect behavior, so we can then back out the architecture specific change and allow the mutex count to go to any negative number. That should further reduce contention for non-x86 architecture. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Waiman Long <Waiman.Long@hp.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Chandramouleeswaran Aswin <aswin@hp.com> Cc: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Norton Scott J <scott.norton@hp.com> Cc: Rik van Riel <riel@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com> Cc: Dave Jones <davej@redhat.com> Cc: Clark Williams <williams@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1366226594-5506-5-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
643 lines
17 KiB
C
643 lines
17 KiB
C
/*
|
|
* kernel/mutex.c
|
|
*
|
|
* Mutexes: blocking mutual exclusion locks
|
|
*
|
|
* Started by Ingo Molnar:
|
|
*
|
|
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
|
|
* David Howells for suggestions and improvements.
|
|
*
|
|
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
|
|
* from the -rt tree, where it was originally implemented for rtmutexes
|
|
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
|
|
* and Sven Dietrich.
|
|
*
|
|
* Also see Documentation/mutex-design.txt.
|
|
*/
|
|
#include <linux/mutex.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/rt.h>
|
|
#include <linux/export.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/debug_locks.h>
|
|
|
|
/*
|
|
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
|
|
* which forces all calls into the slowpath:
|
|
*/
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
# include "mutex-debug.h"
|
|
# include <asm-generic/mutex-null.h>
|
|
#else
|
|
# include "mutex.h"
|
|
# include <asm/mutex.h>
|
|
#endif
|
|
|
|
/*
|
|
* A negative mutex count indicates that waiters are sleeping waiting for the
|
|
* mutex.
|
|
*/
|
|
#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
|
|
|
|
void
|
|
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
|
{
|
|
atomic_set(&lock->count, 1);
|
|
spin_lock_init(&lock->wait_lock);
|
|
INIT_LIST_HEAD(&lock->wait_list);
|
|
mutex_clear_owner(lock);
|
|
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
lock->spin_mlock = NULL;
|
|
#endif
|
|
|
|
debug_mutex_init(lock, name, key);
|
|
}
|
|
|
|
EXPORT_SYMBOL(__mutex_init);
|
|
|
|
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
|
/*
|
|
* We split the mutex lock/unlock logic into separate fastpath and
|
|
* slowpath functions, to reduce the register pressure on the fastpath.
|
|
* We also put the fastpath first in the kernel image, to make sure the
|
|
* branch is predicted by the CPU as default-untaken.
|
|
*/
|
|
static __used noinline void __sched
|
|
__mutex_lock_slowpath(atomic_t *lock_count);
|
|
|
|
/**
|
|
* mutex_lock - acquire the mutex
|
|
* @lock: the mutex to be acquired
|
|
*
|
|
* Lock the mutex exclusively for this task. If the mutex is not
|
|
* available right now, it will sleep until it can get it.
|
|
*
|
|
* The mutex must later on be released by the same task that
|
|
* acquired it. Recursive locking is not allowed. The task
|
|
* may not exit without first unlocking the mutex. Also, kernel
|
|
* memory where the mutex resides mutex must not be freed with
|
|
* the mutex still locked. The mutex must first be initialized
|
|
* (or statically defined) before it can be locked. memset()-ing
|
|
* the mutex to 0 is not allowed.
|
|
*
|
|
* ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
|
|
* checks that will enforce the restrictions and will also do
|
|
* deadlock debugging. )
|
|
*
|
|
* This function is similar to (but not equivalent to) down().
|
|
*/
|
|
void __sched mutex_lock(struct mutex *lock)
|
|
{
|
|
might_sleep();
|
|
/*
|
|
* The locking fastpath is the 1->0 transition from
|
|
* 'unlocked' into 'locked' state.
|
|
*/
|
|
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
|
|
mutex_set_owner(lock);
|
|
}
|
|
|
|
EXPORT_SYMBOL(mutex_lock);
|
|
#endif
|
|
|
|
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
/*
|
|
* In order to avoid a stampede of mutex spinners from acquiring the mutex
|
|
* more or less simultaneously, the spinners need to acquire a MCS lock
|
|
* first before spinning on the owner field.
|
|
*
|
|
* We don't inline mspin_lock() so that perf can correctly account for the
|
|
* time spent in this lock function.
|
|
*/
|
|
struct mspin_node {
|
|
struct mspin_node *next ;
|
|
int locked; /* 1 if lock acquired */
|
|
};
|
|
#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
|
|
|
|
static noinline
|
|
void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
|
|
{
|
|
struct mspin_node *prev;
|
|
|
|
/* Init node */
|
|
node->locked = 0;
|
|
node->next = NULL;
|
|
|
|
prev = xchg(lock, node);
|
|
if (likely(prev == NULL)) {
|
|
/* Lock acquired */
|
|
node->locked = 1;
|
|
return;
|
|
}
|
|
ACCESS_ONCE(prev->next) = node;
|
|
smp_wmb();
|
|
/* Wait until the lock holder passes the lock down */
|
|
while (!ACCESS_ONCE(node->locked))
|
|
arch_mutex_cpu_relax();
|
|
}
|
|
|
|
static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
|
|
{
|
|
struct mspin_node *next = ACCESS_ONCE(node->next);
|
|
|
|
if (likely(!next)) {
|
|
/*
|
|
* Release the lock by setting it to NULL
|
|
*/
|
|
if (cmpxchg(lock, node, NULL) == node)
|
|
return;
|
|
/* Wait until the next pointer is set */
|
|
while (!(next = ACCESS_ONCE(node->next)))
|
|
arch_mutex_cpu_relax();
|
|
}
|
|
ACCESS_ONCE(next->locked) = 1;
|
|
smp_wmb();
|
|
}
|
|
|
|
/*
|
|
* Mutex spinning code migrated from kernel/sched/core.c
|
|
*/
|
|
|
|
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
|
{
|
|
if (lock->owner != owner)
|
|
return false;
|
|
|
|
/*
|
|
* Ensure we emit the owner->on_cpu, dereference _after_ checking
|
|
* lock->owner still matches owner, if that fails, owner might
|
|
* point to free()d memory, if it still matches, the rcu_read_lock()
|
|
* ensures the memory stays valid.
|
|
*/
|
|
barrier();
|
|
|
|
return owner->on_cpu;
|
|
}
|
|
|
|
/*
|
|
* Look out! "owner" is an entirely speculative pointer
|
|
* access and not reliable.
|
|
*/
|
|
static noinline
|
|
int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|
{
|
|
rcu_read_lock();
|
|
while (owner_running(lock, owner)) {
|
|
if (need_resched())
|
|
break;
|
|
|
|
arch_mutex_cpu_relax();
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
/*
|
|
* We break out the loop above on need_resched() and when the
|
|
* owner changed, which is a sign for heavy contention. Return
|
|
* success only when lock->owner is NULL.
|
|
*/
|
|
return lock->owner == NULL;
|
|
}
|
|
|
|
/*
|
|
* Initial check for entering the mutex spinning loop
|
|
*/
|
|
static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
|
{
|
|
int retval = 1;
|
|
|
|
rcu_read_lock();
|
|
if (lock->owner)
|
|
retval = lock->owner->on_cpu;
|
|
rcu_read_unlock();
|
|
/*
|
|
* if lock->owner is not set, the mutex owner may have just acquired
|
|
* it and not set the owner yet or the mutex has been released.
|
|
*/
|
|
return retval;
|
|
}
|
|
#endif
|
|
|
|
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
|
|
|
/**
|
|
* mutex_unlock - release the mutex
|
|
* @lock: the mutex to be released
|
|
*
|
|
* Unlock a mutex that has been locked by this task previously.
|
|
*
|
|
* This function must not be used in interrupt context. Unlocking
|
|
* of a not locked mutex is not allowed.
|
|
*
|
|
* This function is similar to (but not equivalent to) up().
|
|
*/
|
|
void __sched mutex_unlock(struct mutex *lock)
|
|
{
|
|
/*
|
|
* The unlocking fastpath is the 0->1 transition from 'locked'
|
|
* into 'unlocked' state:
|
|
*/
|
|
#ifndef CONFIG_DEBUG_MUTEXES
|
|
/*
|
|
* When debugging is enabled we must not clear the owner before time,
|
|
* the slow path will always be taken, and that clears the owner field
|
|
* after verifying that it was indeed current.
|
|
*/
|
|
mutex_clear_owner(lock);
|
|
#endif
|
|
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
|
|
}
|
|
|
|
EXPORT_SYMBOL(mutex_unlock);
|
|
|
|
/*
|
|
* Lock a mutex (possibly interruptible), slowpath:
|
|
*/
|
|
static inline int __sched
|
|
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
struct lockdep_map *nest_lock, unsigned long ip)
|
|
{
|
|
struct task_struct *task = current;
|
|
struct mutex_waiter waiter;
|
|
unsigned long flags;
|
|
|
|
preempt_disable();
|
|
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
|
|
|
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
/*
|
|
* Optimistic spinning.
|
|
*
|
|
* We try to spin for acquisition when we find that there are no
|
|
* pending waiters and the lock owner is currently running on a
|
|
* (different) CPU.
|
|
*
|
|
* The rationale is that if the lock owner is running, it is likely to
|
|
* release the lock soon.
|
|
*
|
|
* Since this needs the lock owner, and this mutex implementation
|
|
* doesn't track the owner atomically in the lock field, we need to
|
|
* track it non-atomically.
|
|
*
|
|
* We can't do this for DEBUG_MUTEXES because that relies on wait_lock
|
|
* to serialize everything.
|
|
*
|
|
* The mutex spinners are queued up using MCS lock so that only one
|
|
* spinner can compete for the mutex. However, if mutex spinning isn't
|
|
* going to happen, there is no point in going through the lock/unlock
|
|
* overhead.
|
|
*/
|
|
if (!mutex_can_spin_on_owner(lock))
|
|
goto slowpath;
|
|
|
|
for (;;) {
|
|
struct task_struct *owner;
|
|
struct mspin_node node;
|
|
|
|
/*
|
|
* If there's an owner, wait for it to either
|
|
* release the lock or go to sleep.
|
|
*/
|
|
mspin_lock(MLOCK(lock), &node);
|
|
owner = ACCESS_ONCE(lock->owner);
|
|
if (owner && !mutex_spin_on_owner(lock, owner)) {
|
|
mspin_unlock(MLOCK(lock), &node);
|
|
break;
|
|
}
|
|
|
|
if ((atomic_read(&lock->count) == 1) &&
|
|
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
|
|
lock_acquired(&lock->dep_map, ip);
|
|
mutex_set_owner(lock);
|
|
mspin_unlock(MLOCK(lock), &node);
|
|
preempt_enable();
|
|
return 0;
|
|
}
|
|
mspin_unlock(MLOCK(lock), &node);
|
|
|
|
/*
|
|
* When there's no owner, we might have preempted between the
|
|
* owner acquiring the lock and setting the owner field. If
|
|
* we're an RT task that will live-lock because we won't let
|
|
* the owner complete.
|
|
*/
|
|
if (!owner && (need_resched() || rt_task(task)))
|
|
break;
|
|
|
|
/*
|
|
* The cpu_relax() call is a compiler barrier which forces
|
|
* everything in this loop to be re-loaded. We don't need
|
|
* memory barriers as we'll eventually observe the right
|
|
* values at the cost of a few extra spins.
|
|
*/
|
|
arch_mutex_cpu_relax();
|
|
}
|
|
slowpath:
|
|
#endif
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
|
debug_mutex_lock_common(lock, &waiter);
|
|
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
|
|
|
/* add waiting tasks to the end of the waitqueue (FIFO): */
|
|
list_add_tail(&waiter.list, &lock->wait_list);
|
|
waiter.task = task;
|
|
|
|
if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
|
|
goto done;
|
|
|
|
lock_contended(&lock->dep_map, ip);
|
|
|
|
for (;;) {
|
|
/*
|
|
* Lets try to take the lock again - this is needed even if
|
|
* we get here for the first time (shortly after failing to
|
|
* acquire the lock), to make sure that we get a wakeup once
|
|
* it's unlocked. Later on, if we sleep, this is the
|
|
* operation that gives us the lock. We xchg it to -1, so
|
|
* that when we release the lock, we properly wake up the
|
|
* other waiters:
|
|
*/
|
|
if (MUTEX_SHOW_NO_WAITER(lock) &&
|
|
(atomic_xchg(&lock->count, -1) == 1))
|
|
break;
|
|
|
|
/*
|
|
* got a signal? (This code gets eliminated in the
|
|
* TASK_UNINTERRUPTIBLE case.)
|
|
*/
|
|
if (unlikely(signal_pending_state(state, task))) {
|
|
mutex_remove_waiter(lock, &waiter,
|
|
task_thread_info(task));
|
|
mutex_release(&lock->dep_map, 1, ip);
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
|
debug_mutex_free_waiter(&waiter);
|
|
preempt_enable();
|
|
return -EINTR;
|
|
}
|
|
__set_task_state(task, state);
|
|
|
|
/* didn't get the lock, go to sleep: */
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
schedule_preempt_disabled();
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
}
|
|
|
|
done:
|
|
lock_acquired(&lock->dep_map, ip);
|
|
/* got the lock - rejoice! */
|
|
mutex_remove_waiter(lock, &waiter, current_thread_info());
|
|
mutex_set_owner(lock);
|
|
|
|
/* set it to 0 if there are no waiters left: */
|
|
if (likely(list_empty(&lock->wait_list)))
|
|
atomic_set(&lock->count, 0);
|
|
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
|
debug_mutex_free_waiter(&waiter);
|
|
preempt_enable();
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
void __sched
|
|
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
|
{
|
|
might_sleep();
|
|
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
|
|
|
void __sched
|
|
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
|
{
|
|
might_sleep();
|
|
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
|
|
|
int __sched
|
|
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
|
{
|
|
might_sleep();
|
|
return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
|
|
}
|
|
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
|
|
|
int __sched
|
|
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
|
{
|
|
might_sleep();
|
|
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
|
subclass, NULL, _RET_IP_);
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
|
#endif
|
|
|
|
/*
|
|
* Release the lock, slowpath:
|
|
*/
|
|
static inline void
|
|
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
|
{
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
unsigned long flags;
|
|
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
mutex_release(&lock->dep_map, nested, _RET_IP_);
|
|
debug_mutex_unlock(lock);
|
|
|
|
/*
|
|
* some architectures leave the lock unlocked in the fastpath failure
|
|
* case, others need to leave it locked. In the later case we have to
|
|
* unlock it here
|
|
*/
|
|
if (__mutex_slowpath_needs_to_unlock())
|
|
atomic_set(&lock->count, 1);
|
|
|
|
if (!list_empty(&lock->wait_list)) {
|
|
/* get the first entry from the wait-list: */
|
|
struct mutex_waiter *waiter =
|
|
list_entry(lock->wait_list.next,
|
|
struct mutex_waiter, list);
|
|
|
|
debug_mutex_wake_waiter(lock, waiter);
|
|
|
|
wake_up_process(waiter->task);
|
|
}
|
|
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
}
|
|
|
|
/*
|
|
* Release the lock, slowpath:
|
|
*/
|
|
static __used noinline void
|
|
__mutex_unlock_slowpath(atomic_t *lock_count)
|
|
{
|
|
__mutex_unlock_common_slowpath(lock_count, 1);
|
|
}
|
|
|
|
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
|
/*
|
|
* Here come the less common (and hence less performance-critical) APIs:
|
|
* mutex_lock_interruptible() and mutex_trylock().
|
|
*/
|
|
static noinline int __sched
|
|
__mutex_lock_killable_slowpath(atomic_t *lock_count);
|
|
|
|
static noinline int __sched
|
|
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
|
|
|
/**
|
|
* mutex_lock_interruptible - acquire the mutex, interruptible
|
|
* @lock: the mutex to be acquired
|
|
*
|
|
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
|
|
* been acquired or sleep until the mutex becomes available. If a
|
|
* signal arrives while waiting for the lock then this function
|
|
* returns -EINTR.
|
|
*
|
|
* This function is similar to (but not equivalent to) down_interruptible().
|
|
*/
|
|
int __sched mutex_lock_interruptible(struct mutex *lock)
|
|
{
|
|
int ret;
|
|
|
|
might_sleep();
|
|
ret = __mutex_fastpath_lock_retval
|
|
(&lock->count, __mutex_lock_interruptible_slowpath);
|
|
if (!ret)
|
|
mutex_set_owner(lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
EXPORT_SYMBOL(mutex_lock_interruptible);
|
|
|
|
int __sched mutex_lock_killable(struct mutex *lock)
|
|
{
|
|
int ret;
|
|
|
|
might_sleep();
|
|
ret = __mutex_fastpath_lock_retval
|
|
(&lock->count, __mutex_lock_killable_slowpath);
|
|
if (!ret)
|
|
mutex_set_owner(lock);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(mutex_lock_killable);
|
|
|
|
static __used noinline void __sched
|
|
__mutex_lock_slowpath(atomic_t *lock_count)
|
|
{
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
|
|
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
|
}
|
|
|
|
static noinline int __sched
|
|
__mutex_lock_killable_slowpath(atomic_t *lock_count)
|
|
{
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
|
|
return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
|
|
}
|
|
|
|
static noinline int __sched
|
|
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
|
|
{
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
|
|
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Spinlock based trylock, we take the spinlock and check whether we
|
|
* can get the lock:
|
|
*/
|
|
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
|
{
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
unsigned long flags;
|
|
int prev;
|
|
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
|
prev = atomic_xchg(&lock->count, -1);
|
|
if (likely(prev == 1)) {
|
|
mutex_set_owner(lock);
|
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
}
|
|
|
|
/* Set it back to 0 if there are no waiters: */
|
|
if (likely(list_empty(&lock->wait_list)))
|
|
atomic_set(&lock->count, 0);
|
|
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
|
return prev == 1;
|
|
}
|
|
|
|
/**
|
|
* mutex_trylock - try to acquire the mutex, without waiting
|
|
* @lock: the mutex to be acquired
|
|
*
|
|
* Try to acquire the mutex atomically. Returns 1 if the mutex
|
|
* has been acquired successfully, and 0 on contention.
|
|
*
|
|
* NOTE: this function follows the spin_trylock() convention, so
|
|
* it is negated from the down_trylock() return values! Be careful
|
|
* about this when converting semaphore users to mutexes.
|
|
*
|
|
* This function must not be used in interrupt context. The
|
|
* mutex must be released by the same task that acquired it.
|
|
*/
|
|
int __sched mutex_trylock(struct mutex *lock)
|
|
{
|
|
int ret;
|
|
|
|
ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
|
|
if (ret)
|
|
mutex_set_owner(lock);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(mutex_trylock);
|
|
|
|
/**
|
|
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
|
* @cnt: the atomic which we are to dec
|
|
* @lock: the mutex to return holding if we dec to 0
|
|
*
|
|
* return true and hold lock if we dec to 0, return false otherwise
|
|
*/
|
|
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
|
{
|
|
/* dec if we can't possibly hit 0 */
|
|
if (atomic_add_unless(cnt, -1, 1))
|
|
return 0;
|
|
/* we might hit 0, so take the lock */
|
|
mutex_lock(lock);
|
|
if (!atomic_dec_and_test(cnt)) {
|
|
/* when we actually did the dec, we didn't hit 0 */
|
|
mutex_unlock(lock);
|
|
return 0;
|
|
}
|
|
/* we hit 0, and we hold the lock */
|
|
return 1;
|
|
}
|
|
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
|