mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
Locking changes for v6.11:
- Jump label fixes, including a perf events fix that originally manifested as jump label failures, but was a serialization bug at the usage site. - Mark down_write*() helpers as __always_inline, to improve WCHAN debuggability. - Misc cleanups and fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmaVEV8RHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1iWbQ/+KBa29udVx0qiX/1R/+4EBNKzFDyvhobu 9XuAUZJZ9g46PFqnXlnA/VhGff52M2I+1scva11OPnb2NnOhAN1yUFQJaZh0HM+y GTsaLQEdJfNKv1Z0i05AnYE7BKFEnoksWInsOg6Or5KoML5RS6vIyLWsBQpCjelM ZxAYNHVpI5qeeUT7dqy9bkBxUXgOpc5X5+qZmmBnBDTGvhsPviV88gL1Ol2tjmGi hhALK9RdLMltrNEejDc9sBcLw/qRA5QOUDDxSoBykSOBaljJB+3BcTCuXdUJT24Y xpufbESHyl7HAmdvRU+n4ypo4kuKSIcyF1+V6LcMmNi0jSsyUG7x+SuhJetQHd2z kLsYD18JbHHBVz7/047DnVTtyTcifsDpJi4MqEYYrUhMw1ns/goEiLRgSEYdB/FT eCjdZxbCzkKzmV42/gqkM/mv8/pWkhvAqZ1/LNkHSOR/JSa5X3NeR0qrj18OKSrS Du+ycGDqk1PqVcqiC1heBayIU/QoY/sEkdktTtbAKSujKCJ+tYhPtqMki6JaOh5I VPp9ekcbs+Vvuu1qNDcueoe3TqxO2wfchKMNcSD8+I68EZtxHWZN3iHtTOlqGYVr uZuG0V2WSgGvUi7k0UIPpnnmZ2+XejA++VnjVitXM3Z+xf7MlrPNy/4fzUbkgTQn T8/Tt7SytQE= =T1KT -----END PGP SIGNATURE----- Merge tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking updates from Ingo Molnar: - Jump label fixes, including a perf events fix that originally manifested as jump label failures, but was a serialization bug at the usage site - Mark down_write*() helpers as __always_inline, to improve WCHAN debuggability - Misc cleanups and fixes * tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/rwsem: Add __always_inline annotation to __down_write_common() and inlined callers jump_label: Simplify and clarify static_key_fast_inc_cpus_locked() jump_label: Clarify condition in static_key_fast_inc_not_disabled() jump_label: Fix concurrency issues in static_key_slow_dec() perf/x86: Serialize set_attr_rdpmc() cleanup: Standardize the header guard define's name
This commit is contained in:
commit
151647ab58
@ -2547,6 +2547,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
static DEFINE_MUTEX(rdpmc_mutex);
|
||||
unsigned long val;
|
||||
ssize_t ret;
|
||||
|
||||
@ -2560,6 +2561,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
|
||||
if (x86_pmu.attr_rdpmc_broken)
|
||||
return -ENOTSUPP;
|
||||
|
||||
guard(mutex)(&rdpmc_mutex);
|
||||
|
||||
if (val != x86_pmu.attr_rdpmc) {
|
||||
/*
|
||||
* Changing into or out of never available or always available,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_GUARDS_H
|
||||
#define __LINUX_GUARDS_H
|
||||
#ifndef _LINUX_CLEANUP_H
|
||||
#define _LINUX_CLEANUP_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
@ -250,4 +250,4 @@ __DEFINE_LOCK_GUARD_0(_name, _lock)
|
||||
{ return class_##_name##_lock_ptr(_T); }
|
||||
|
||||
|
||||
#endif /* __LINUX_GUARDS_H */
|
||||
#endif /* _LINUX_CLEANUP_H */
|
||||
|
@ -131,13 +131,16 @@ bool static_key_fast_inc_not_disabled(struct static_key *key)
|
||||
STATIC_KEY_CHECK_USE(key);
|
||||
/*
|
||||
* Negative key->enabled has a special meaning: it sends
|
||||
* static_key_slow_inc() down the slow path, and it is non-zero
|
||||
* so it counts as "enabled" in jump_label_update(). Note that
|
||||
* atomic_inc_unless_negative() checks >= 0, so roll our own.
|
||||
* static_key_slow_inc/dec() down the slow path, and it is non-zero
|
||||
* so it counts as "enabled" in jump_label_update().
|
||||
*
|
||||
* The INT_MAX overflow condition is either used by the networking
|
||||
* code to reset or detected in the slow path of
|
||||
* static_key_slow_inc_cpuslocked().
|
||||
*/
|
||||
v = atomic_read(&key->enabled);
|
||||
do {
|
||||
if (v <= 0 || (v + 1) < 0)
|
||||
if (v <= 0 || v == INT_MAX)
|
||||
return false;
|
||||
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
|
||||
|
||||
@ -150,7 +153,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
|
||||
lockdep_assert_cpus_held();
|
||||
|
||||
/*
|
||||
* Careful if we get concurrent static_key_slow_inc() calls;
|
||||
* Careful if we get concurrent static_key_slow_inc/dec() calls;
|
||||
* later calls must wait for the first one to _finish_ the
|
||||
* jump_label_update() process. At the same time, however,
|
||||
* the jump_label_update() call below wants to see
|
||||
@ -159,22 +162,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
|
||||
if (static_key_fast_inc_not_disabled(key))
|
||||
return true;
|
||||
|
||||
jump_label_lock();
|
||||
if (atomic_read(&key->enabled) == 0) {
|
||||
atomic_set(&key->enabled, -1);
|
||||
guard(mutex)(&jump_label_mutex);
|
||||
/* Try to mark it as 'enabling in progress. */
|
||||
if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
|
||||
jump_label_update(key);
|
||||
/*
|
||||
* Ensure that if the above cmpxchg loop observes our positive
|
||||
* value, it must also observe all the text changes.
|
||||
* Ensure that when static_key_fast_inc_not_disabled() or
|
||||
* static_key_slow_try_dec() observe the positive value,
|
||||
* they must also observe all the text changes.
|
||||
*/
|
||||
atomic_set_release(&key->enabled, 1);
|
||||
} else {
|
||||
if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
|
||||
jump_label_unlock();
|
||||
/*
|
||||
* While holding the mutex this should never observe
|
||||
* anything else than a value >= 1 and succeed
|
||||
*/
|
||||
if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
jump_label_unlock();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -247,20 +252,32 @@ EXPORT_SYMBOL_GPL(static_key_disable);
|
||||
|
||||
static bool static_key_slow_try_dec(struct static_key *key)
|
||||
{
|
||||
int val;
|
||||
|
||||
val = atomic_fetch_add_unless(&key->enabled, -1, 1);
|
||||
if (val == 1)
|
||||
return false;
|
||||
int v;
|
||||
|
||||
/*
|
||||
* The negative count check is valid even when a negative
|
||||
* key->enabled is in use by static_key_slow_inc(); a
|
||||
* __static_key_slow_dec() before the first static_key_slow_inc()
|
||||
* returns is unbalanced, because all other static_key_slow_inc()
|
||||
* instances block while the update is in progress.
|
||||
* Go into the slow path if key::enabled is less than or equal than
|
||||
* one. One is valid to shut down the key, anything less than one
|
||||
* is an imbalance, which is handled at the call site.
|
||||
*
|
||||
* That includes the special case of '-1' which is set in
|
||||
* static_key_slow_inc_cpuslocked(), but that's harmless as it is
|
||||
* fully serialized in the slow path below. By the time this task
|
||||
* acquires the jump label lock the value is back to one and the
|
||||
* retry under the lock must succeed.
|
||||
*/
|
||||
WARN(val < 0, "jump label: negative count!\n");
|
||||
v = atomic_read(&key->enabled);
|
||||
do {
|
||||
/*
|
||||
* Warn about the '-1' case though; since that means a
|
||||
* decrement is concurrent with a first (0->1) increment. IOW
|
||||
* people are trying to disable something that wasn't yet fully
|
||||
* enabled. This suggests an ordering problem on the user side.
|
||||
*/
|
||||
WARN_ON_ONCE(v < 0);
|
||||
if (v <= 1)
|
||||
return false;
|
||||
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -271,10 +288,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
|
||||
if (static_key_slow_try_dec(key))
|
||||
return;
|
||||
|
||||
jump_label_lock();
|
||||
if (atomic_dec_and_test(&key->enabled))
|
||||
guard(mutex)(&jump_label_mutex);
|
||||
if (atomic_cmpxchg(&key->enabled, 1, 0))
|
||||
jump_label_update(key);
|
||||
jump_label_unlock();
|
||||
else
|
||||
WARN_ON_ONCE(!static_key_slow_try_dec(key));
|
||||
}
|
||||
|
||||
static void __static_key_slow_dec(struct static_key *key)
|
||||
|
@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
/*
|
||||
* lock for writing
|
||||
*/
|
||||
static inline int __down_write_common(struct rw_semaphore *sem, int state)
|
||||
static __always_inline int __down_write_common(struct rw_semaphore *sem, int state)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __down_write(struct rw_semaphore *sem)
|
||||
static __always_inline void __down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
static inline int __down_write_killable(struct rw_semaphore *sem)
|
||||
static __always_inline int __down_write_killable(struct rw_semaphore *sem)
|
||||
{
|
||||
return __down_write_common(sem, TASK_KILLABLE);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user