mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
- Prevent the leaking of a debug timer in futex_waitv()
- A preempt-RT mutex locking fix, adding the proper acquire semantics -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmOxn1cACgkQEsHwGGHe VUp+AQ/9GilVlaWqloC+sDK/7qTLfmQjR/XwQPii2pMBUO1oRFT7mG+dG7lKNpRL WCLTVIrzG528ORJytvMGuqx+CqenuTGPFI9rtizxdtRlNZyiS33hvlaGBPY7XYY7 G2qugGKuZ7WVKZpq6szPwnrMuWjdsd+XOO+NE3h/5kI2T7ZukmlXl9/EtVii7+y3 LGVjMhN+R0XCMCPSSPtFx5eUGalemPnGSBcX/Qb4zG1MphSPURFKHEQmWHZzgrkg 6fmqx773u2mquxTYeEy0FDLjOYFEHlzDFgRtIDBaziHIxjzO/5r09m8RDF2mXetW W/pf/Iduh2TZWwJDngmUi0JxqOzz0l8T/6vSCuArYWkjb+0zDOPHuCYwif2iHAQo pMn+O5nAtLMZhdk+yjTs4abQ11q9owLh0nGps4A2SKgJIH0mL2BICwRotzuV1sxf CGnoOxIxZuZGq8xMoHp6sJ8e4RnJHBAt7PKrCsLTkPAjKjgmBpqshVav//kyCFeY 2R4rgRY/a42Z86uNSysVnxxkop4vRgTZ3l7TijzyeIbycI7v9WMADk0TKTDgsMbL nEnlZPWXJGYNXH71ZPV5TPgidHfiO68sVKerXWRrWRLyGHhY2TC67Lz29vTAlbMK NdOM6pO0DkaKSLxEJ6qso819ePnRA/2O5B7xU/Ll7vONjCpHlFw= =9LHF -----END PGP SIGNATURE----- Merge tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fixes from Borislav Petkov: - Prevent the leaking of a debug timer in futex_waitv() - A preempt-RT mutex locking fix, adding the proper acquire semantics * tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: futex: Fix futex_waitv() hrtimer debug object leak on kcalloc error rtmutex: Add acquire semantics for rtmutex lock acquisition slow path
This commit is contained in:
commit
95d248d16f
@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
|
||||
}
|
||||
|
||||
futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
|
||||
if (!futexv)
|
||||
return -ENOMEM;
|
||||
if (!futexv) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_timer;
|
||||
}
|
||||
|
||||
ret = futex_parse_waitv(futexv, waiters, nr_futexes);
|
||||
if (!ret)
|
||||
ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
|
||||
|
||||
kfree(futexv);
|
||||
|
||||
destroy_timer:
|
||||
if (timeout) {
|
||||
hrtimer_cancel(&to.timer);
|
||||
destroy_hrtimer_on_stack(&to.timer);
|
||||
}
|
||||
|
||||
kfree(futexv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
|
||||
* set this bit before looking at the lock.
|
||||
*/
|
||||
|
||||
static __always_inline void
|
||||
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
|
||||
static __always_inline struct task_struct *
|
||||
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
|
||||
{
|
||||
unsigned long val = (unsigned long)owner;
|
||||
|
||||
if (rt_mutex_has_waiters(lock))
|
||||
val |= RT_MUTEX_HAS_WAITERS;
|
||||
|
||||
WRITE_ONCE(lock->owner, (struct task_struct *)val);
|
||||
return (struct task_struct *)val;
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
|
||||
{
|
||||
/*
|
||||
* lock->wait_lock is held but explicit acquire semantics are needed
|
||||
* for a new lock owner so WRITE_ONCE is insufficient.
|
||||
*/
|
||||
xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
|
||||
}
|
||||
|
||||
static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
|
||||
{
|
||||
/* lock->wait_lock is held so the unlock provides release semantics. */
|
||||
WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
|
||||
}
|
||||
|
||||
static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
|
||||
@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
|
||||
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
}
|
||||
|
||||
static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
|
||||
static __always_inline void
|
||||
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
|
||||
{
|
||||
unsigned long owner, *p = (unsigned long *) &lock->owner;
|
||||
|
||||
@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
|
||||
* still set.
|
||||
*/
|
||||
owner = READ_ONCE(*p);
|
||||
if (owner & RT_MUTEX_HAS_WAITERS)
|
||||
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
if (owner & RT_MUTEX_HAS_WAITERS) {
|
||||
/*
|
||||
* See rt_mutex_set_owner() and rt_mutex_clear_owner() on
|
||||
* why xchg_acquire() is used for updating owner for
|
||||
* locking and WRITE_ONCE() for unlocking.
|
||||
*
|
||||
* WRITE_ONCE() would work for the acquire case too, but
|
||||
* in case that the lock acquisition failed it might
|
||||
* force other lockers into the slow path unnecessarily.
|
||||
*/
|
||||
if (acquire_lock)
|
||||
xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
else
|
||||
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
|
||||
owner = *p;
|
||||
} while (cmpxchg_relaxed(p, owner,
|
||||
owner | RT_MUTEX_HAS_WAITERS) != owner);
|
||||
|
||||
/*
|
||||
* The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
|
||||
* operations in the event of contention. Ensure the successful
|
||||
* cmpxchg is visible.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
|
||||
* try_to_take_rt_mutex() sets the lock waiters bit
|
||||
* unconditionally. Clean this up.
|
||||
*/
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
fixup_rt_mutex_waiters(lock, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
* try_to_take_rt_mutex() sets the waiter bit
|
||||
* unconditionally. We might have to fix that up.
|
||||
*/
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
fixup_rt_mutex_waiters(lock, true);
|
||||
|
||||
trace_contention_end(lock, ret);
|
||||
|
||||
@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
|
||||
* try_to_take_rt_mutex() sets the waiter bit unconditionally.
|
||||
* We might have to fix that up:
|
||||
*/
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
fixup_rt_mutex_waiters(lock, true);
|
||||
debug_rt_mutex_free_waiter(&waiter);
|
||||
|
||||
trace_contention_end(lock, 0);
|
||||
|
@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
|
||||
void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
|
||||
{
|
||||
debug_rt_mutex_proxy_unlock(lock);
|
||||
rt_mutex_set_owner(lock, NULL);
|
||||
rt_mutex_clear_owner(lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
|
||||
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
|
||||
* have to fix that up.
|
||||
*/
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
fixup_rt_mutex_waiters(lock, true);
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
|
||||
return ret;
|
||||
@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
|
||||
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
|
||||
* have to fix that up.
|
||||
*/
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
fixup_rt_mutex_waiters(lock, false);
|
||||
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user