Locking changes for v6.13 are:

- lockdep:
     - Enable PROVE_RAW_LOCK_NESTING with PROVE_LOCKING (Sebastian Andrzej Siewior)
     - Add lockdep_cleanup_dead_cpu() (David Woodhouse)
 
  - futexes:
     - Use atomic64_inc_return() in get_inode_sequence_number() (Uros Bizjak)
     - Use atomic64_try_cmpxchg_relaxed() in get_inode_sequence_number() (Uros Bizjak)
 
  - RT locking:
     - Add sparse annotation PREEMPT_RT's locking (Sebastian Andrzej Siewior)
 
  - spinlocks:
     - Use atomic_try_cmpxchg_release() in osq_unlock() (Uros Bizjak)
 
  - atomics:
     - x86: Use ALT_OUTPUT_SP() for __alternative_atomic64() (Uros Bizjak)
     - x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu() (Uros Bizjak)
 
  - KCSAN, seqlocks:
     - Support seqcount_latch_t (Marco Elver)
 
  - <linux/cleanup.h>:
     - Add if_not_cond_guard() conditional guard helper (David Lechner)
     - Adjust scoped_guard() macros to avoid potential warning (Przemek Kitszel)
     - Remove address space of returned pointer (Uros Bizjak)
 
  - WW mutexes:
     - locking/ww_mutex: Adjust to lockdep nest_lock requirements (Thomas Hellström)
 
  - Rust integration:
     - Fix raw_spin_lock initialization on PREEMPT_RT (Eder Zulian)
 
  - miscellaneous cleanups & fixes:
     - lockdep: Fix wait-type check related warnings (Ahmed Ehab)
     - lockdep: Use info level for initial info messages (Jiri Slaby)
     - spinlocks: Make __raw_* lock ops static (Geert Uytterhoeven)
     - pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase (Qiuxu Zhuo)
     - iio: magnetometer: Fix if () scoped_guard() formatting (Stephen Rothwell)
     - rtmutex: Fix misleading comment (Peter Zijlstra)
     - percpu-rw-semaphores: Fix grammar in percpu-rw-semaphore.rst (Xiu Jianfeng)
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmc7AkQRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1hGqQ/+KWR5arkoJjH/Nf5IyezYitOwqK7YAdJk
 mrWoZcez0DRopNTf8yZMv1m8jyx7W9KUQumEO/ghqJRlBW+AbxZ1t99kmqWI5Aw0
 +zmhpyo06JHeMYQAfKJXX3iRt2Rt59BPHtGzoop6b0e2i55+uPE+DZTNm2+FwCV9
 4vxmfpYyg5/sJB9/v5b0N9TTDe9a8caOHXU5F+HA1yWuxMmqFuDFIcpKrgS/sUeP
 NelOLbh2L3UOPWP6tRRfpajxCQTmRoeZOQQv0L9dd3jYpyQOCesgKqOhqNTCU8KK
 qamTPig2N00smSLp6I/OVyJ96vFYZrbhyq0kwMayaafAU7mB8lzcfUj+8qP0c90k
 1PROtD1XpF3Nobp1F+YUp3sQxEGdCgs+9VeLWWObv2b/Vt3MDZijdEiC/3OkRAUh
 LPCfl/ky41BmT8AlaxRDjkyrN7hH4oUOkGUdVx6yR389J0OR9MSwEX9qNaMw8bBg
 1ALvv9+OR3QhTWyG30PGqUf3Um230oIdWuWxwFrhaoMmDVEVMRZQMtvQahi5hDYq
 zyX79DKWtExEe/f2hY1m/6eNm6st5HE7X7scOba3TamQzvOzJkjzo7XoS2yeUAjb
 eByO2G0PvTrA0TFls6Hyrl6db5OW5KjQnVWr6W3fiWL5YIdh0SQMkWeaGVvGyfy8
 Q3vhk7POaZo=
 =BvPn
 -----END PGP SIGNATURE-----

Merge tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "Lockdep:
   - Enable PROVE_RAW_LOCK_NESTING with PROVE_LOCKING (Sebastian Andrzej
     Siewior)
   - Add lockdep_cleanup_dead_cpu() (David Woodhouse)

  futexes:
   - Use atomic64_inc_return() in get_inode_sequence_number() (Uros
     Bizjak)
   - Use atomic64_try_cmpxchg_relaxed() in get_inode_sequence_number()
     (Uros Bizjak)

  RT locking:
   - Add sparse annotation PREEMPT_RT's locking (Sebastian Andrzej
     Siewior)

  spinlocks:
   - Use atomic_try_cmpxchg_release() in osq_unlock() (Uros Bizjak)

  atomics:
   - x86: Use ALT_OUTPUT_SP() for __alternative_atomic64() (Uros Bizjak)
   - x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu() (Uros
     Bizjak)

  KCSAN, seqlocks:
   - Support seqcount_latch_t (Marco Elver)

  <linux/cleanup.h>:
   - Add if_not_guard() conditional guard helper (David Lechner)
   - Adjust scoped_guard() macros to avoid potential warning (Przemek
     Kitszel)
   - Remove address space of returned pointer (Uros Bizjak)

  WW mutexes:
   - locking/ww_mutex: Adjust to lockdep nest_lock requirements (Thomas
     Hellström)

  Rust integration:
   - Fix raw_spin_lock initialization on PREEMPT_RT (Eder Zulian)

  Misc cleanups & fixes:
   - lockdep: Fix wait-type check related warnings (Ahmed Ehab)
   - lockdep: Use info level for initial info messages (Jiri Slaby)
   - spinlocks: Make __raw_* lock ops static (Geert Uytterhoeven)
   - pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase
     (Qiuxu Zhuo)
   - iio: magnetometer: Fix if () scoped_guard() formatting (Stephen
     Rothwell)
   - rtmutex: Fix misleading comment (Peter Zijlstra)
   - percpu-rw-semaphores: Fix grammar in percpu-rw-semaphore.rst (Xiu
     Jianfeng)"

* tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits)
  locking/Documentation: Fix grammar in percpu-rw-semaphore.rst
  iio: magnetometer: fix if () scoped_guard() formatting
  rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
  kcsan, seqlock: Fix incorrect assumption in read_seqbegin()
  seqlock, treewide: Switch to non-raw seqcount_latch interface
  kcsan, seqlock: Support seqcount_latch_t
  time/sched_clock: Broaden sched_clock()'s instrumentation coverage
  time/sched_clock: Swap update_clock_read_data() latch writes
  locking/atomic/x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu()
  locking/atomic/x86: Use ALT_OUTPUT_SP() for __alternative_atomic64()
  cleanup: Add conditional guard helper
  cleanup: Adjust scoped_guard() macros to avoid potential warning
  locking/osq_lock: Use atomic_try_cmpxchg_release() in osq_unlock()
  cleanup: Remove address space of returned pointer
  locking/rtmutex: Fix misleading comment
  locking/rt: Annotate unlock followed by lock for sparse.
  locking/rt: Add sparse annotation for RCU.
  locking/rt: Remove one __cond_lock() in RT's spin_trylock_irqsave()
  locking/rt: Add sparse annotation PREEMPT_RT's sleeping locks.
  locking/pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase
  ...
This commit is contained in:
Linus Torvalds 2024-11-19 12:43:11 -08:00
commit 364eeb79a2
30 changed files with 354 additions and 160 deletions

View File

@ -16,8 +16,8 @@ writing is very expensive, it calls synchronize_rcu() that can take
hundreds of milliseconds.
The lock is declared with "struct percpu_rw_semaphore" type.
The lock is initialized percpu_init_rwsem, it returns 0 on success and
-ENOMEM on allocation failure.
The lock is initialized with percpu_init_rwsem, it returns 0 on success
and -ENOMEM on allocation failure.
The lock must be freed with percpu_free_rwsem to avoid memory leak.
The lock is locked for read with percpu_down_read, percpu_up_read and

View File

@ -153,7 +153,7 @@ Use seqcount_latch_t when the write side sections cannot be protected
from interruption by readers. This is typically the case when the read
side can be invoked from NMI handlers.
Check `raw_write_seqcount_latch()` for more information.
Check `write_seqcount_latch()` for more information.
.. _seqlock_t:

View File

@ -51,7 +51,8 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
#ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \
asm volatile("call %c[func]" \
: out : [func] "i" (atomic64_##g##_cx8), ## in)
: ALT_OUTPUT_SP(out) \
: [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
#else

View File

@ -94,7 +94,7 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
: "+a" (o.low), "+d" (o.high) \
: ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
@ -123,8 +123,8 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
"call cmpxchg8b_emu", \
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
CC_SET(e) \
: CC_OUT(e) (ret), \
"+a" (o.low), "+d" (o.high) \
: ALT_OUTPUT_SP(CC_OUT(e) (ret), \
"+a" (o.low), "+d" (o.high)) \
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\

View File

@ -174,10 +174,11 @@ static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long ts
c2n = per_cpu_ptr(&cyc2ns, cpu);
raw_write_seqcount_latch(&c2n->seq);
write_seqcount_latch_begin(&c2n->seq);
c2n->data[0] = data;
raw_write_seqcount_latch(&c2n->seq);
write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
write_seqcount_latch_end(&c2n->seq);
}
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)

View File

@ -312,10 +312,11 @@ static int af8133j_set_scale(struct af8133j_data *data,
* When suspended, just store the new range to data->range to be
* applied later during power up.
*/
if (!pm_runtime_status_suspended(dev))
if (!pm_runtime_status_suspended(dev)) {
scoped_guard(mutex, &data->mutex)
ret = regmap_write(data->regmap,
AF8133J_REG_RANGE, range);
}
pm_runtime_enable(dev);

View File

@ -273,6 +273,12 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* an anonymous instance of the (guard) class, not recommended for
* conditional locks.
*
* if_not_guard(name, args...) { <error handling> }:
* convenience macro for conditional guards that calls the statement that
* follows only if the lock was not acquired (typically an error return).
*
* Only for conditional locks.
*
* scoped_guard (name, args...) { }:
* similar to CLASS(name, scope)(args), except the variable (with the
* explicit name 'scope') is declard in a for-loop such that its scope is
@ -285,14 +291,20 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* similar to scoped_guard(), except it does fail when the lock
* acquire fails.
*
* Only for conditional locks.
*/
#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
{ return *_T; }
{ return (void *)(__force unsigned long)*_T; }
#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
EXTEND_CLASS(_name, _ext, \
({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
class_##_name##_t _T) \
@ -303,16 +315,48 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
CLASS(_name, __UNIQUE_ID(guard))
#define __guard_ptr(_name) class_##_name##_lock_ptr
#define __is_cond_ptr(_name) class_##_name##_is_conditional
/*
* Helper macro for scoped_guard().
*
* Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
* compiler would be sure that for the unconditional locks the body of the
* loop (caller-provided code glued to the else clause) could not be skipped.
* It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
* hard to deduce (even if could be proven true for unconditional locks).
*/
#define __scoped_guard(_name, _label, args...) \
for (CLASS(_name, scope)(args); \
__guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
({ goto _label; })) \
if (0) { \
_label: \
break; \
} else
#define scoped_guard(_name, args...) \
for (CLASS(_name, scope)(args), \
*done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
__scoped_guard(_name, __UNIQUE_ID(label), args)
#define __scoped_cond_guard(_name, _fail, _label, args...) \
for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
if (!__guard_ptr(_name)(&scope)) { \
BUILD_BUG_ON(!__is_cond_ptr(_name)); \
_fail; \
_label: \
break; \
} else
#define scoped_cond_guard(_name, _fail, args...) \
for (CLASS(_name, scope)(args), \
*done = NULL; !done; done = (void *)1) \
if (!__guard_ptr(_name)(&scope)) _fail; \
else
__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
#define __if_not_guard(_name, _id, args...) \
BUILD_BUG_ON(!__is_cond_ptr(_name)); \
CLASS(_name, _id)(args); \
if (!__guard_ptr(_name)(&_id))
#define if_not_guard(_name, args...) \
__if_not_guard(_name, __UNIQUE_ID(guard), args)
/*
* Additional helper macros for generating lock guards with types, either for
@ -347,7 +391,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T) \
\
static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
{ \
return _T->lock; \
return (void *)(__force unsigned long)_T->lock; \
}
@ -369,14 +413,17 @@ static inline class_##_name##_t class_##_name##_constructor(void) \
}
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_0(_name, _lock)
#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
EXTEND_CLASS(_name, _ext, \
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
if (_T->lock && !(_condlock)) _T->lock = NULL; \

View File

@ -18,6 +18,8 @@
#include <asm/irqflags.h>
#include <asm/percpu.h>
struct task_struct;
/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
extern void lockdep_softirqs_on(unsigned long ip);
@ -25,12 +27,16 @@
extern void lockdep_hardirqs_on_prepare(void);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
struct task_struct *idle);
#else
static inline void lockdep_softirqs_on(unsigned long ip) { }
static inline void lockdep_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on_prepare(void) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
struct task_struct *idle) {}
#endif
#ifdef CONFIG_TRACE_IRQFLAGS

View File

@ -173,7 +173,7 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
(lock)->dep_map.lock_type)
#define lockdep_set_subclass(lock, sub) \
lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
(lock)->dep_map.wait_type_inner, \
(lock)->dep_map.wait_type_outer, \
(lock)->dep_map.lock_type)

View File

@ -14,7 +14,7 @@
*
* If we need to allow unconditional lookups (say as required for NMI context
* usage) we need a more complex setup; this data structure provides this by
* employing the latch technique -- see @raw_write_seqcount_latch -- to
* employing the latch technique -- see @write_seqcount_latch_begin -- to
* implement a latched RB-tree which does allow for unconditional lookups by
* virtue of always having (at least) one stable copy of the tree.
*
@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx,
* @ops: operators defining the node order
*
* It inserts @node into @root in an ordered fashion such that we can always
* observe one complete tree. See the comment for raw_write_seqcount_latch().
* observe one complete tree. See the comment for write_seqcount_latch_begin().
*
* The inserts use rcu_assign_pointer() to publish the element such that the
* tree structure is stored before we can observe the new @node.
@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
raw_write_seqcount_latch(&root->seq);
write_seqcount_latch_begin(&root->seq);
__lt_insert(node, root, 0, ops->less);
raw_write_seqcount_latch(&root->seq);
write_seqcount_latch(&root->seq);
__lt_insert(node, root, 1, ops->less);
write_seqcount_latch_end(&root->seq);
}
/**
@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node,
*
* Removes @node from the trees @root in an ordered fashion such that we can
* always observe one complete tree. See the comment for
* raw_write_seqcount_latch().
* write_seqcount_latch_begin().
*
* It is assumed that @node will observe one RCU quiescent state before being
* reused of freed.
@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
raw_write_seqcount_latch(&root->seq);
write_seqcount_latch_begin(&root->seq);
__lt_erase(node, root, 0);
raw_write_seqcount_latch(&root->seq);
write_seqcount_latch(&root->seq);
__lt_erase(node, root, 1);
write_seqcount_latch_end(&root->seq);
}
/**
@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root,
unsigned int seq;
do {
seq = raw_read_seqcount_latch(&root->seq);
seq = read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
} while (raw_read_seqcount_latch_retry(&root->seq, seq));
} while (read_seqcount_latch_retry(&root->seq, seq));
return node;
}

View File

@ -24,13 +24,13 @@ do { \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
extern void rt_read_lock(rwlock_t *rwlock);
extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
extern int rt_read_trylock(rwlock_t *rwlock);
extern void rt_read_unlock(rwlock_t *rwlock);
extern void rt_write_lock(rwlock_t *rwlock);
extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
extern int rt_write_trylock(rwlock_t *rwlock);
extern void rt_write_unlock(rwlock_t *rwlock);
extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
{

View File

@ -621,6 +621,23 @@ static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *
return READ_ONCE(s->seqcount.sequence);
}
/**
* read_seqcount_latch() - pick even/odd latch data copy
* @s: Pointer to seqcount_latch_t
*
* See write_seqcount_latch() for details and a full reader/writer usage
* example.
*
* Return: sequence counter raw value. Use the lowest bit as an index for
* picking which data copy to read. The full counter must then be checked
* with read_seqcount_latch_retry().
*/
static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s)
{
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return raw_read_seqcount_latch(s);
}
/**
* raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
@ -635,9 +652,34 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
return unlikely(READ_ONCE(s->seqcount.sequence) != start);
}
/**
* read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from read_seqcount_latch()
*
* Return: true if a read section retry is required, else false
*/
static __always_inline int
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
kcsan_atomic_next(0);
return raw_read_seqcount_latch_retry(s, start);
}
/**
* raw_write_seqcount_latch() - redirect latch readers to even/odd copy
* @s: Pointer to seqcount_latch_t
*/
static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s)
{
smp_wmb(); /* prior stores before incrementing "sequence" */
s->seqcount.sequence++;
smp_wmb(); /* increment "sequence" before following stores */
}
/**
* write_seqcount_latch_begin() - redirect latch readers to odd copy
* @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@ -665,17 +707,11 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
*
* void latch_modify(struct latch_struct *latch, ...)
* {
* smp_wmb(); // Ensure that the last data[1] update is visible
* latch->seq.sequence++;
* smp_wmb(); // Ensure that the seqcount update is visible
*
* write_seqcount_latch_begin(&latch->seq);
* modify(latch->data[0], ...);
*
* smp_wmb(); // Ensure that the data[0] update is visible
* latch->seq.sequence++;
* smp_wmb(); // Ensure that the seqcount update is visible
*
* write_seqcount_latch(&latch->seq);
* modify(latch->data[1], ...);
* write_seqcount_latch_end(&latch->seq);
* }
*
* The query will have a form like::
@ -686,13 +722,13 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* unsigned seq, idx;
*
* do {
* seq = raw_read_seqcount_latch(&latch->seq);
* seq = read_seqcount_latch(&latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
* // This includes needed smp_rmb()
* } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
* } while (read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
@ -716,11 +752,31 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s)
{
smp_wmb(); /* prior stores before incrementing "sequence" */
s->seqcount.sequence++;
smp_wmb(); /* increment "sequence" before following stores */
kcsan_nestable_atomic_begin();
raw_write_seqcount_latch(s);
}
/**
* write_seqcount_latch() - redirect latch readers to even copy
* @s: Pointer to seqcount_latch_t
*/
static __always_inline void write_seqcount_latch(seqcount_latch_t *s)
{
raw_write_seqcount_latch(s);
}
/**
* write_seqcount_latch_end() - end a seqcount_latch_t write section
* @s: Pointer to seqcount_latch_t
*
* Marks the end of a seqcount_latch_t writer section, after all copies of the
* latch-protected data have been updated.
*/
static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
{
kcsan_nestable_atomic_end();
}
#define __SEQLOCK_UNLOCKED(lockname) \
@ -754,11 +810,7 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret = read_seqcount_begin(&sl->seqcount);
kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
kcsan_flat_atomic_begin();
return ret;
return read_seqcount_begin(&sl->seqcount);
}
/**
@ -774,12 +826,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
/*
* Assume not nested: read_seqretry() may be called multiple times when
* completing read critical section.
*/
kcsan_flat_atomic_end();
return read_seqcount_retry(&sl->seqcount, start);
}

View File

@ -16,26 +16,25 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
}
#endif
#define spin_lock_init(slock) \
#define __spin_lock_init(slock, name, key, percpu) \
do { \
static struct lock_class_key __key; \
\
rt_mutex_base_init(&(slock)->lock); \
__rt_spin_lock_init(slock, #slock, &__key, false); \
__rt_spin_lock_init(slock, name, key, percpu); \
} while (0)
#define local_spin_lock_init(slock) \
#define _spin_lock_init(slock, percpu) \
do { \
static struct lock_class_key __key; \
\
rt_mutex_base_init(&(slock)->lock); \
__rt_spin_lock_init(slock, #slock, &__key, true); \
__spin_lock_init(slock, #slock, &__key, percpu); \
} while (0)
extern void rt_spin_lock(spinlock_t *lock);
extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
extern void rt_spin_unlock(spinlock_t *lock);
#define spin_lock_init(slock) _spin_lock_init(slock, false)
#define local_spin_lock_init(slock) _spin_lock_init(slock, true)
extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock);
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
extern void rt_spin_lock_unlock(spinlock_t *lock);
extern int rt_spin_trylock_bh(spinlock_t *lock);
extern int rt_spin_trylock(spinlock_t *lock);
@ -132,7 +131,7 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
#define spin_trylock_irq(lock) \
__cond_lock(lock, rt_spin_trylock(lock))
#define __spin_trylock_irqsave(lock, flags) \
#define spin_trylock_irqsave(lock, flags) \
({ \
int __locked; \
\
@ -142,9 +141,6 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
__locked; \
})
#define spin_trylock_irqsave(lock, flags) \
__cond_lock(lock, __spin_trylock_irqsave(lock, flags))
#define spin_is_contended(lock) (((void)(lock), 0))
static inline int spin_is_locked(spinlock_t *lock)

View File

@ -65,6 +65,16 @@ struct ww_acquire_ctx {
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
/**
* @first_lock_dep_map: fake lockdep_map for first locked ww_mutex.
*
* lockdep requires the lockdep_map for the first locked ww_mutex
* in a ww transaction to remain in memory until all ww_mutexes of
* the transaction have been unlocked. Ensure this by keeping a
* fake locked ww_mutex lockdep map between ww_acquire_init() and
* ww_acquire_fini().
*/
struct lockdep_map first_lock_dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned int deadlock_inject_interval;
@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name,
&ww_class->mutex_key, 0);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
mutex_release(&ctx->dep_map, _THIS_IP_);
#endif
#ifdef DEBUG_WW_MUTEXES

View File

@ -1338,6 +1338,7 @@ static int takedown_cpu(unsigned int cpu)
cpuhp_bp_sync_dead(cpu);
lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu));
tick_cleanup_dead_cpu(cpu);
/*

View File

@ -181,12 +181,12 @@ static u64 get_inode_sequence_number(struct inode *inode)
return old;
for (;;) {
u64 new = atomic64_add_return(1, &i_seq);
u64 new = atomic64_inc_return(&i_seq);
if (WARN_ON_ONCE(!new))
continue;
old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
if (old)
old = 0;
if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new))
return old;
return new;
}

View File

@ -4586,6 +4586,30 @@ void lockdep_softirqs_off(unsigned long ip)
debug_atomic_inc(redundant_softirqs_off);
}
/**
* lockdep_cleanup_dead_cpu - Ensure CPU lockdep state is cleanly stopped
*
* @cpu: index of offlined CPU
* @idle: task pointer for offlined CPU's idle thread
*
* Invoked after the CPU is dead. Ensures that the tracing infrastructure
* is left in a suitable state for the CPU to be subsequently brought
* online again.
*/
void lockdep_cleanup_dead_cpu(unsigned int cpu, struct task_struct *idle)
{
if (unlikely(!debug_locks))
return;
if (unlikely(per_cpu(hardirqs_enabled, cpu))) {
pr_warn("CPU %u left hardirqs enabled!", cpu);
if (idle)
print_irqtrace_events(idle);
/* Clean it up for when the CPU comes online again. */
per_cpu(hardirqs_enabled, cpu) = 0;
}
}
static int
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
{
@ -6576,17 +6600,17 @@ EXPORT_SYMBOL_GPL(lockdep_unregister_key);
void __init lockdep_init(void)
{
printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
pr_info("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
pr_info("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
pr_info("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
pr_info("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
pr_info("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
pr_info("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
pr_info("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
pr_info("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
printk(" memory used by lock dependency info: %zu kB\n",
pr_info(" memory used by lock dependency info: %zu kB\n",
(sizeof(lock_classes) +
sizeof(lock_classes_in_use) +
sizeof(classhash_table) +
@ -6604,12 +6628,12 @@ void __init lockdep_init(void)
);
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
printk(" memory used for stack traces: %zu kB\n",
pr_info(" memory used for stack traces: %zu kB\n",
(sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
);
#endif
printk(" per task-struct memory footprint: %zu bytes\n",
pr_info(" per task-struct memory footprint: %zu bytes\n",
sizeof(((struct task_struct *)NULL)->held_locks));
}

View File

@ -215,8 +215,7 @@ void osq_unlock(struct optimistic_spin_queue *lock)
/*
* Fast path for the uncontended case.
*/
if (likely(atomic_cmpxchg_release(&lock->tail, curr,
OSQ_UNLOCKED_VAL) == curr))
if (atomic_try_cmpxchg_release(&lock->tail, &curr, OSQ_UNLOCKED_VAL))
return;
/*

View File

@ -38,13 +38,13 @@
#define PV_PREV_CHECK_MASK 0xff
/*
* Queue node uses: vcpu_running & vcpu_halted.
* Queue head uses: vcpu_running & vcpu_hashed.
* Queue node uses: VCPU_RUNNING & VCPU_HALTED.
* Queue head uses: VCPU_RUNNING & VCPU_HASHED.
*/
enum vcpu_state {
vcpu_running = 0,
vcpu_halted, /* Used only in pv_wait_node */
vcpu_hashed, /* = pv_hash'ed + vcpu_halted */
VCPU_RUNNING = 0,
VCPU_HALTED, /* Used only in pv_wait_node */
VCPU_HASHED, /* = pv_hash'ed + VCPU_HALTED */
};
struct pv_node {
@ -266,7 +266,7 @@ pv_wait_early(struct pv_node *prev, int loop)
if ((loop & PV_PREV_CHECK_MASK) != 0)
return false;
return READ_ONCE(prev->state) != vcpu_running;
return READ_ONCE(prev->state) != VCPU_RUNNING;
}
/*
@ -279,7 +279,7 @@ static void pv_init_node(struct mcs_spinlock *node)
BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
pn->cpu = smp_processor_id();
pn->state = vcpu_running;
pn->state = VCPU_RUNNING;
}
/*
@ -308,26 +308,26 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
/*
* Order pn->state vs pn->locked thusly:
*
* [S] pn->state = vcpu_halted [S] next->locked = 1
* [S] pn->state = VCPU_HALTED [S] next->locked = 1
* MB MB
* [L] pn->locked [RmW] pn->state = vcpu_hashed
* [L] pn->locked [RmW] pn->state = VCPU_HASHED
*
* Matches the cmpxchg() from pv_kick_node().
*/
smp_store_mb(pn->state, vcpu_halted);
smp_store_mb(pn->state, VCPU_HALTED);
if (!READ_ONCE(node->locked)) {
lockevent_inc(pv_wait_node);
lockevent_cond_inc(pv_wait_early, wait_early);
pv_wait(&pn->state, vcpu_halted);
pv_wait(&pn->state, VCPU_HALTED);
}
/*
* If pv_kick_node() changed us to vcpu_hashed, retain that
* If pv_kick_node() changed us to VCPU_HASHED, retain that
* value so that pv_wait_head_or_lock() knows to not also try
* to hash this lock.
*/
cmpxchg(&pn->state, vcpu_halted, vcpu_running);
cmpxchg(&pn->state, VCPU_HALTED, VCPU_RUNNING);
/*
* If the locked flag is still not set after wakeup, it is a
@ -357,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
{
struct pv_node *pn = (struct pv_node *)node;
u8 old = vcpu_halted;
u8 old = VCPU_HALTED;
/*
* If the vCPU is indeed halted, advance its state to match that of
* pv_wait_node(). If OTOH this fails, the vCPU was running and will
@ -374,7 +374,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
* subsequent writes.
*/
smp_mb__before_atomic();
if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
if (!try_cmpxchg_relaxed(&pn->state, &old, VCPU_HASHED))
return;
/*
@ -407,7 +407,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
* If pv_kick_node() already advanced our state, we don't need to
* insert ourselves into the hash table anymore.
*/
if (READ_ONCE(pn->state) == vcpu_hashed)
if (READ_ONCE(pn->state) == VCPU_HASHED)
lp = (struct qspinlock **)1;
/*
@ -420,7 +420,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
* Set correct vCPU state to be used by queue node wait-early
* mechanism.
*/
WRITE_ONCE(pn->state, vcpu_running);
WRITE_ONCE(pn->state, VCPU_RUNNING);
/*
* Set the pending bit in the active lock spinning loop to
@ -460,7 +460,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
goto gotlock;
}
}
WRITE_ONCE(pn->state, vcpu_hashed);
WRITE_ONCE(pn->state, VCPU_HASHED);
lockevent_inc(pv_wait_head);
lockevent_cond_inc(pv_wait_again, waitcnt);
pv_wait(&lock->locked, _Q_SLOW_VAL);

View File

@ -1601,6 +1601,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
unsigned int state,
struct hrtimer_sleeper *timeout,
struct rt_mutex_waiter *waiter)
__releases(&lock->wait_lock) __acquires(&lock->wait_lock)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct task_struct *owner;
@ -1805,6 +1806,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
* @lock: The underlying RT mutex
*/
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
__releases(&lock->wait_lock) __acquires(&lock->wait_lock)
{
struct rt_mutex_waiter waiter;
struct task_struct *owner;

View File

@ -175,10 +175,10 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
}
/*
* We've already deboosted, mark_wakeup_next_waiter() will
* retain preempt_disabled when we drop the wait_lock, to
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
* mark_wakeup_next_waiter() deboosts and retains preemption
* disabled when dropping the wait_lock, to avoid inversion prior
* to the wakeup. preempt_disable() therein pairs with the
* preempt_enable() in rt_mutex_postunlock().
*/
mark_wakeup_next_waiter(wqh, lock);

View File

@ -65,7 +65,7 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
* towards that other CPU that it should break the lock ASAP.
*/
#define BUILD_LOCK_OPS(op, locktype) \
void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
static void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
{ \
for (;;) { \
preempt_disable(); \
@ -77,7 +77,7 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
} \
} \
\
unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@ -95,12 +95,12 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
return flags; \
} \
\
void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
static void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
{ \
_raw_##op##_lock_irqsave(lock); \
} \
\
void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\

View File

@ -51,7 +51,7 @@ static __always_inline void __rt_spin_lock(spinlock_t *lock)
migrate_disable();
}
void __sched rt_spin_lock(spinlock_t *lock)
void __sched rt_spin_lock(spinlock_t *lock) __acquires(RCU)
{
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
__rt_spin_lock(lock);
@ -75,7 +75,7 @@ void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
EXPORT_SYMBOL(rt_spin_lock_nest_lock);
#endif
void __sched rt_spin_unlock(spinlock_t *lock)
void __sched rt_spin_unlock(spinlock_t *lock) __releases(RCU)
{
spin_release(&lock->dep_map, _RET_IP_);
migrate_enable();
@ -225,7 +225,7 @@ int __sched rt_write_trylock(rwlock_t *rwlock)
}
EXPORT_SYMBOL(rt_write_trylock);
void __sched rt_read_lock(rwlock_t *rwlock)
void __sched rt_read_lock(rwlock_t *rwlock) __acquires(RCU)
{
rtlock_might_resched();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
@ -235,7 +235,7 @@ void __sched rt_read_lock(rwlock_t *rwlock)
}
EXPORT_SYMBOL(rt_read_lock);
void __sched rt_write_lock(rwlock_t *rwlock)
void __sched rt_write_lock(rwlock_t *rwlock) __acquires(RCU)
{
rtlock_might_resched();
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
@ -246,7 +246,7 @@ void __sched rt_write_lock(rwlock_t *rwlock)
EXPORT_SYMBOL(rt_write_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(RCU)
{
rtlock_might_resched();
rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
@ -257,7 +257,7 @@ void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
EXPORT_SYMBOL(rt_write_lock_nested);
#endif
void __sched rt_read_unlock(rwlock_t *rwlock)
void __sched rt_read_unlock(rwlock_t *rwlock) __releases(RCU)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
migrate_enable();
@ -266,7 +266,7 @@ void __sched rt_read_unlock(rwlock_t *rwlock)
}
EXPORT_SYMBOL(rt_read_unlock);
void __sched rt_write_unlock(rwlock_t *rwlock)
void __sched rt_write_unlock(rwlock_t *rwlock) __releases(RCU)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
rcu_read_unlock();

View File

@ -62,6 +62,7 @@ static int __test_mutex(unsigned int flags)
int ret;
ww_mutex_init(&mtx.mutex, &ww_class);
if (flags & TEST_MTX_CTX)
ww_acquire_init(&ctx, &ww_class);
INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
@ -90,6 +91,7 @@ static int __test_mutex(unsigned int flags)
ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
}
ww_mutex_unlock(&mtx.mutex);
if (flags & TEST_MTX_CTX)
ww_acquire_fini(&ctx);
if (ret) {
@ -679,7 +681,7 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
if (ret)
return ret;

View File

@ -560,10 +560,11 @@ bool printk_percpu_data_ready(void)
/* Must be called under syslog_lock. */
static void latched_seq_write(struct latched_seq *ls, u64 val)
{
raw_write_seqcount_latch(&ls->latch);
write_seqcount_latch_begin(&ls->latch);
ls->val[0] = val;
raw_write_seqcount_latch(&ls->latch);
write_seqcount_latch(&ls->latch);
ls->val[1] = val;
write_seqcount_latch_end(&ls->latch);
}
/* Can be called from any context. */
@ -574,10 +575,10 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
u64 val;
do {
seq = raw_read_seqcount_latch(&ls->latch);
seq = read_seqcount_latch(&ls->latch);
idx = seq & 0x1;
val = ls->val[idx];
} while (raw_read_seqcount_latch_retry(&ls->latch, seq));
} while (read_seqcount_latch_retry(&ls->latch, seq));
return val;
}

View File

@ -71,16 +71,16 @@ static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
{
*seq = raw_read_seqcount_latch(&cd.seq);
*seq = read_seqcount_latch(&cd.seq);
return cd.read_data + (*seq & 1);
}
notrace int sched_clock_read_retry(unsigned int seq)
{
return raw_read_seqcount_latch_retry(&cd.seq, seq);
return read_seqcount_latch_retry(&cd.seq, seq);
}
unsigned long long noinstr sched_clock_noinstr(void)
static __always_inline unsigned long long __sched_clock(void)
{
struct clock_read_data *rd;
unsigned int seq;
@ -98,11 +98,23 @@ unsigned long long noinstr sched_clock_noinstr(void)
return res;
}
unsigned long long noinstr sched_clock_noinstr(void)
{
return __sched_clock();
}
unsigned long long notrace sched_clock(void)
{
unsigned long long ns;
preempt_disable_notrace();
ns = sched_clock_noinstr();
/*
* All of __sched_clock() is a seqcount_latch reader critical section,
* but relies on the raw helpers which are uninstrumented. For KCSAN,
* mark all accesses in __sched_clock() as atomic.
*/
kcsan_nestable_atomic_begin();
ns = __sched_clock();
kcsan_nestable_atomic_end();
preempt_enable_notrace();
return ns;
}
@ -119,17 +131,19 @@ unsigned long long notrace sched_clock(void)
*/
static void update_clock_read_data(struct clock_read_data *rd)
{
/* update the backup (odd) copy with the new data */
cd.read_data[1] = *rd;
/* steer readers towards the odd copy */
raw_write_seqcount_latch(&cd.seq);
write_seqcount_latch_begin(&cd.seq);
/* now its safe for us to update the normal (even) copy */
cd.read_data[0] = *rd;
/* switch readers back to the even copy */
raw_write_seqcount_latch(&cd.seq);
write_seqcount_latch(&cd.seq);
/* update the backup (odd) copy with the new data */
cd.read_data[1] = *rd;
write_seqcount_latch_end(&cd.seq);
}
/*
@ -267,7 +281,7 @@ void __init generic_sched_clock_init(void)
*/
static u64 notrace suspended_sched_clock_read(void)
{
unsigned int seq = raw_read_seqcount_latch(&cd.seq);
unsigned int seq = read_seqcount_latch(&cd.seq);
return cd.read_data[seq & 1].epoch_cyc;
}

View File

@ -428,7 +428,7 @@ static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
*
* Employ the latch technique; see @raw_write_seqcount_latch.
* Employ the latch technique; see @write_seqcount_latch.
*
* So if a NMI hits the update of base[0] then it will use base[1]
* which is still consistent. In the worst case this can result is a
@ -441,16 +441,18 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
struct tk_read_base *base = tkf->base;
/* Force readers off to base[1] */
raw_write_seqcount_latch(&tkf->seq);
write_seqcount_latch_begin(&tkf->seq);
/* Update base[0] */
memcpy(base, tkr, sizeof(*base));
/* Force readers back to base[0] */
raw_write_seqcount_latch(&tkf->seq);
write_seqcount_latch(&tkf->seq);
/* Update base[1] */
memcpy(base + 1, base, sizeof(*base));
write_seqcount_latch_end(&tkf->seq);
}
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
@ -460,11 +462,11 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
u64 now;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
seq = read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
now += __timekeeping_get_ns(tkr);
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
} while (read_seqcount_latch_retry(&tkf->seq, seq));
return now;
}

View File

@ -1409,22 +1409,14 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.rst.
config PROVE_RAW_LOCK_NESTING
bool "Enable raw_spinlock - spinlock nesting checks"
bool
depends on PROVE_LOCKING
default n
default y
help
Enable the raw_spinlock vs. spinlock nesting checks which ensure
that the lock nesting rules for PREEMPT_RT enabled kernels are
not violated.
NOTE: There are known nesting problems. So if you enable this
option expect lockdep splats until these problems have been fully
addressed which is work in progress. This config switch allows to
identify and analyze these problems. It will be removed and the
check permanently enabled once the main issues have been fixed.
If unsure, select N.
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT

View File

@ -2710,6 +2710,43 @@ static void local_lock_3B(void)
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline const char *rw_semaphore_lockdep_name(struct rw_semaphore *rwsem)
{
return rwsem->dep_map.name;
}
#else
static inline const char *rw_semaphore_lockdep_name(struct rw_semaphore *rwsem)
{
return NULL;
}
#endif
static void test_lockdep_set_subclass_name(void)
{
const char *name_before = rw_semaphore_lockdep_name(&rwsem_X1);
const char *name_after;
lockdep_set_subclass(&rwsem_X1, 1);
name_after = rw_semaphore_lockdep_name(&rwsem_X1);
DEBUG_LOCKS_WARN_ON(name_before != name_after);
}
/*
* lockdep_set_subclass() should reuse the existing lock class name instead
* of creating a new one.
*/
static void lockdep_set_subclass_name_test(void)
{
printk(" --------------------------------------------------------------------------\n");
printk(" | lockdep_set_subclass() name test|\n");
printk(" -----------------------------------\n");
print_testname("compare name before and after");
dotest(test_lockdep_set_subclass_name, SUCCESS, LOCKTYPE_RWSEM);
pr_cont("\n");
}
static void local_lock_tests(void)
{
printk(" --------------------------------------------------------------------------\n");
@ -2920,6 +2957,8 @@ void locking_selftest(void)
dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
pr_cont("\n");
lockdep_set_subclass_name_test();
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;

View File

@ -7,10 +7,14 @@ void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_SPINLOCK
# if defined(CONFIG_PREEMPT_RT)
__spin_lock_init(lock, name, key, false);
# else /*!CONFIG_PREEMPT_RT */
__raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
#else
# endif /* CONFIG_PREEMPT_RT */
#else /* !CONFIG_DEBUG_SPINLOCK */
spin_lock_init(lock);
#endif
#endif /* CONFIG_DEBUG_SPINLOCK */
}
void rust_helper_spin_lock(spinlock_t *lock)