locking/mutex: Introduce __mutex_trylock_or_handoff()
Yanfei reported that it is possible to loose HANDOFF when we race with mutex_unlock() and end up setting HANDOFF on an unlocked mutex. At that point anybody can steal it, losing HANDOFF in the process. If this happens often enough, we can in fact starve the top waiter. Solve this by folding the 'set HANDOFF' operation into the trylock operation, such that either we acquire the lock, or it gets HANDOFF set. This avoids having HANDOFF set on an unlocked mutex. Reported-by: Yanfei Xu <yanfei.xu@windriver.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Waiman Long <longman@redhat.com> Reviewed-by: Yanfei Xu <yanfei.xu@windriver.com> Link: https://lore.kernel.org/r/20210630154114.958507900@infradead.org
This commit is contained in:
parent
048661a1f9
commit
ad90880dc9
@ -91,10 +91,7 @@ static inline unsigned long __owner_flags(unsigned long owner)
|
|||||||
return owner & MUTEX_FLAGS;
|
return owner & MUTEX_FLAGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
|
||||||
* Trylock variant that returns the owning task on failure.
|
|
||||||
*/
|
|
||||||
static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
|
|
||||||
{
|
{
|
||||||
unsigned long owner, curr = (unsigned long)current;
|
unsigned long owner, curr = (unsigned long)current;
|
||||||
|
|
||||||
@ -104,39 +101,48 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
|
|||||||
unsigned long task = owner & ~MUTEX_FLAGS;
|
unsigned long task = owner & ~MUTEX_FLAGS;
|
||||||
|
|
||||||
if (task) {
|
if (task) {
|
||||||
if (likely(task != curr))
|
if (flags & MUTEX_FLAG_PICKUP) {
|
||||||
|
if (task != curr)
|
||||||
|
break;
|
||||||
|
flags &= ~MUTEX_FLAG_PICKUP;
|
||||||
|
} else if (handoff) {
|
||||||
|
if (flags & MUTEX_FLAG_HANDOFF)
|
||||||
|
break;
|
||||||
|
flags |= MUTEX_FLAG_HANDOFF;
|
||||||
|
} else {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
if (likely(!(flags & MUTEX_FLAG_PICKUP)))
|
|
||||||
break;
|
|
||||||
|
|
||||||
flags &= ~MUTEX_FLAG_PICKUP;
|
|
||||||
} else {
|
} else {
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
|
DEBUG_LOCKS_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
|
||||||
#endif
|
#endif
|
||||||
|
task = curr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
|
||||||
* We set the HANDOFF bit, we must make sure it doesn't live
|
if (task == curr)
|
||||||
* past the point where we acquire it. This would be possible
|
return NULL;
|
||||||
* if we (accidentally) set the bit on an unlocked mutex.
|
break;
|
||||||
*/
|
}
|
||||||
flags &= ~MUTEX_FLAG_HANDOFF;
|
|
||||||
|
|
||||||
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, curr | flags))
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return __owner_task(owner);
|
return __owner_task(owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Trylock or set HANDOFF
|
||||||
|
*/
|
||||||
|
static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
|
||||||
|
{
|
||||||
|
return !__mutex_trylock_common(lock, handoff);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Actual trylock that will work on any unlocked state.
|
* Actual trylock that will work on any unlocked state.
|
||||||
*/
|
*/
|
||||||
static inline bool __mutex_trylock(struct mutex *lock)
|
static inline bool __mutex_trylock(struct mutex *lock)
|
||||||
{
|
{
|
||||||
return !__mutex_trylock_or_owner(lock);
|
return !__mutex_trylock_common(lock, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
@ -479,6 +485,14 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|||||||
|
|
||||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Trylock variant that returns the owning task on failure.
|
||||||
|
*/
|
||||||
|
static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
|
||||||
|
{
|
||||||
|
return __mutex_trylock_common(lock, false);
|
||||||
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
struct mutex_waiter *waiter)
|
struct mutex_waiter *waiter)
|
||||||
@ -1018,8 +1032,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
schedule_preempt_disabled();
|
schedule_preempt_disabled();
|
||||||
|
|
||||||
first = __mutex_waiter_is_first(lock, &waiter);
|
first = __mutex_waiter_is_first(lock, &waiter);
|
||||||
if (first)
|
|
||||||
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
|
||||||
|
|
||||||
set_current_state(state);
|
set_current_state(state);
|
||||||
/*
|
/*
|
||||||
@ -1027,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
* state back to RUNNING and fall through the next schedule(),
|
* state back to RUNNING and fall through the next schedule(),
|
||||||
* or we must see its unlock and acquire.
|
* or we must see its unlock and acquire.
|
||||||
*/
|
*/
|
||||||
if (__mutex_trylock(lock) ||
|
if (__mutex_trylock_or_handoff(lock, first) ||
|
||||||
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
|
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user