mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
69562e4983
Add support for modeling a subset of weak memory, which will enable detection of a subset of data races due to missing memory barriers. KCSAN's approach to detecting missing memory barriers is based on modeling access reordering, and enabled if `CONFIG_KCSAN_WEAK_MEMORY=y`, which depends on `CONFIG_KCSAN_STRICT=y`. The feature can be enabled or disabled at boot and runtime via the `kcsan.weak_memory` boot parameter. Each memory access for which a watchpoint is set up, is also selected for simulated reordering within the scope of its function (at most 1 in-flight access). We are limited to modeling the effects of "buffering" (delaying the access), since the runtime cannot "prefetch" accesses (therefore no acquire modeling). Once an access has been selected for reordering, it is checked along every other access until the end of the function scope. If an appropriate memory barrier is encountered, the access will no longer be considered for reordering. When the result of a memory operation should be ordered by a barrier, KCSAN can then detect data races where the conflict only occurs as a result of a missing barrier due to reordering accesses. Suggested-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
76 lines
2.2 KiB
C
76 lines
2.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and
|
|
* data structures to set up runtime. See kcsan-checks.h for explicit checks and
|
|
* modifiers. For more info please see Documentation/dev-tools/kcsan.rst.
|
|
*
|
|
* Copyright (C) 2019, Google LLC.
|
|
*/
|
|
|
|
#ifndef _LINUX_KCSAN_H
|
|
#define _LINUX_KCSAN_H
|
|
|
|
#include <linux/kcsan-checks.h>
|
|
#include <linux/types.h>
|
|
|
|
#ifdef CONFIG_KCSAN
|
|
|
|
/*
|
|
* Context for each thread of execution: for tasks, this is stored in
|
|
* task_struct, and interrupts access internal per-CPU storage.
|
|
*/
|
|
struct kcsan_ctx {
|
|
int disable_count; /* disable counter */
|
|
int disable_scoped; /* disable scoped access counter */
|
|
int atomic_next; /* number of following atomic ops */
|
|
|
|
/*
|
|
* We distinguish between: (a) nestable atomic regions that may contain
|
|
* other nestable regions; and (b) flat atomic regions that do not keep
|
|
* track of nesting. Both (a) and (b) are entirely independent of each
|
|
* other, and a flat region may be started in a nestable region or
|
|
* vice-versa.
|
|
*
|
|
* This is required because, for example, in the annotations for
|
|
* seqlocks, we declare seqlock writer critical sections as (a) nestable
|
|
* atomic regions, but reader critical sections as (b) flat atomic
|
|
* regions, but have encountered cases where seqlock reader critical
|
|
* sections are contained within writer critical sections (the opposite
|
|
* may be possible, too).
|
|
*
|
|
* To support these cases, we independently track the depth of nesting
|
|
* for (a), and whether the leaf level is flat for (b).
|
|
*/
|
|
int atomic_nest_count;
|
|
bool in_flat_atomic;
|
|
|
|
/*
|
|
* Access mask for all accesses if non-zero.
|
|
*/
|
|
unsigned long access_mask;
|
|
|
|
/* List of scoped accesses; likely to be empty. */
|
|
struct list_head scoped_accesses;
|
|
|
|
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
|
/*
|
|
* Scoped access for modeling access reordering to detect missing memory
|
|
* barriers; only keep 1 to keep fast-path complexity manageable.
|
|
*/
|
|
struct kcsan_scoped_access reorder_access;
|
|
#endif
|
|
};
|
|
|
|
/**
|
|
* kcsan_init - initialize KCSAN runtime
|
|
*/
|
|
void kcsan_init(void);
|
|
|
|
#else /* CONFIG_KCSAN */
|
|
|
|
static inline void kcsan_init(void) { }
|
|
|
|
#endif /* CONFIG_KCSAN */
|
|
|
|
#endif /* _LINUX_KCSAN_H */
|