Merge branch 'kcsan.2020.01.07a' into locking/kcsan
Pull KCSAN updates from Paul E. McKenney: - UBSAN fixes - inlining updates - documentation updates Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
7add7875a8
@ -101,18 +101,28 @@ instrumentation or e.g. DMA accesses.
|
|||||||
Selective analysis
|
Selective analysis
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To disable KCSAN data race detection for an entire subsystem, add to the
|
It may be desirable to disable data race detection for specific accesses,
|
||||||
respective ``Makefile``::
|
functions, compilation units, or entire subsystems. For static blacklisting,
|
||||||
|
the below options are available:
|
||||||
|
|
||||||
KCSAN_SANITIZE := n
|
* KCSAN understands the ``data_race(expr)`` annotation, which tells KCSAN that
|
||||||
|
any data races due to accesses in ``expr`` should be ignored and resulting
|
||||||
|
behaviour when encountering a data race is deemed safe.
|
||||||
|
|
||||||
To disable KCSAN on a per-file basis, add to the ``Makefile``::
|
* Disabling data race detection for entire functions can be accomplished by
|
||||||
|
using the function attribute ``__no_kcsan`` (or ``__no_kcsan_or_inline`` for
|
||||||
|
``__always_inline`` functions). To dynamically control for which functions
|
||||||
|
data races are reported, see the `debugfs`_ blacklist/whitelist feature.
|
||||||
|
|
||||||
|
* To disable data race detection for a particular compilation unit, add to the
|
||||||
|
``Makefile``::
|
||||||
|
|
||||||
KCSAN_SANITIZE_file.o := n
|
KCSAN_SANITIZE_file.o := n
|
||||||
|
|
||||||
KCSAN also understands the ``data_race(expr)`` annotation, which tells KCSAN
|
* To disable data race detection for all compilation units listed in a
|
||||||
that any data races due to accesses in ``expr`` should be ignored and resulting
|
``Makefile``, add to the respective ``Makefile``::
|
||||||
behaviour when encountering a data race is deemed safe.
|
|
||||||
|
KCSAN_SANITIZE := n
|
||||||
|
|
||||||
debugfs
|
debugfs
|
||||||
~~~~~~~
|
~~~~~~~
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -146,8 +146,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
|
#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
|
||||||
#define __no_sanitize_thread \
|
#define __no_sanitize_thread __attribute__((no_sanitize_thread))
|
||||||
__attribute__((__noinline__)) __attribute__((no_sanitize_thread))
|
|
||||||
#else
|
#else
|
||||||
#define __no_sanitize_thread
|
#define __no_sanitize_thread
|
||||||
#endif
|
#endif
|
||||||
|
@ -207,12 +207,15 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||||||
# define __no_kasan_or_inline __always_inline
|
# define __no_kasan_or_inline __always_inline
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define __no_kcsan __no_sanitize_thread
|
||||||
#ifdef __SANITIZE_THREAD__
|
#ifdef __SANITIZE_THREAD__
|
||||||
/*
|
/*
|
||||||
* Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
|
* Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
|
||||||
* compilation units where instrumentation is disabled.
|
* compilation units where instrumentation is disabled. The attribute 'noinline'
|
||||||
|
* is required for older compilers, where implicit inlining of very small
|
||||||
|
* functions renders __no_sanitize_thread ineffective.
|
||||||
*/
|
*/
|
||||||
# define __no_kcsan_or_inline __no_sanitize_thread notrace __maybe_unused
|
# define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused
|
||||||
# define __no_sanitize_or_inline __no_kcsan_or_inline
|
# define __no_sanitize_or_inline __no_kcsan_or_inline
|
||||||
#else
|
#else
|
||||||
# define __no_kcsan_or_inline __always_inline
|
# define __no_kcsan_or_inline __always_inline
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
KCSAN_SANITIZE := n
|
KCSAN_SANITIZE := n
|
||||||
KCOV_INSTRUMENT := n
|
KCOV_INSTRUMENT := n
|
||||||
|
UBSAN_SANITIZE := n
|
||||||
|
|
||||||
CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
|
||||||
|
|
||||||
|
@ -282,6 +282,7 @@ obj-$(CONFIG_UBSAN) += ubsan.o
|
|||||||
|
|
||||||
UBSAN_SANITIZE_ubsan.o := n
|
UBSAN_SANITIZE_ubsan.o := n
|
||||||
KASAN_SANITIZE_ubsan.o := n
|
KASAN_SANITIZE_ubsan.o := n
|
||||||
|
KCSAN_SANITIZE_ubsan.o := n
|
||||||
CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
|
CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
|
||||||
|
|
||||||
obj-$(CONFIG_SBITMAP) += sbitmap.o
|
obj-$(CONFIG_SBITMAP) += sbitmap.o
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_${pfx}${name}${sfx}_acquire(${params})
|
${atomic}_${pfx}${name}${sfx}_acquire(${params})
|
||||||
{
|
{
|
||||||
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||||
|
@ -8,7 +8,7 @@ cat <<EOF
|
|||||||
* if the result is negative, or false when
|
* if the result is negative, or false when
|
||||||
* result is greater than or equal to zero.
|
* result is greater than or equal to zero.
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_add_negative(${int} i, ${atomic}_t *v)
|
${atomic}_add_negative(${int} i, ${atomic}_t *v)
|
||||||
{
|
{
|
||||||
return ${atomic}_add_return(i, v) < 0;
|
return ${atomic}_add_return(i, v) < 0;
|
||||||
|
@ -8,7 +8,7 @@ cat << EOF
|
|||||||
* Atomically adds @a to @v, if @v was not already @u.
|
* Atomically adds @a to @v, if @v was not already @u.
|
||||||
* Returns true if the addition was done.
|
* Returns true if the addition was done.
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||||
{
|
{
|
||||||
return ${atomic}_fetch_add_unless(v, a, u) != u;
|
return ${atomic}_fetch_add_unless(v, a, u) != u;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
|
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
|
||||||
{
|
{
|
||||||
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
|
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
|
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
|
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
|
||||||
|
@ -7,7 +7,7 @@ cat <<EOF
|
|||||||
* returns true if the result is 0, or false for all other
|
* returns true if the result is 0, or false for all other
|
||||||
* cases.
|
* cases.
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_dec_and_test(${atomic}_t *v)
|
${atomic}_dec_and_test(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
return ${atomic}_dec_return(v) == 0;
|
return ${atomic}_dec_return(v) == 0;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_dec_if_positive(${atomic}_t *v)
|
${atomic}_dec_if_positive(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
${int} dec, c = ${atomic}_read(v);
|
${int} dec, c = ${atomic}_read(v);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_dec_unless_positive(${atomic}_t *v)
|
${atomic}_dec_unless_positive(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
${int} c = ${atomic}_read(v);
|
${int} c = ${atomic}_read(v);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_${pfx}${name}${sfx}(${params})
|
${atomic}_${pfx}${name}${sfx}(${params})
|
||||||
{
|
{
|
||||||
${ret} ret;
|
${ret} ret;
|
||||||
|
@ -8,7 +8,7 @@ cat << EOF
|
|||||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||||
* Returns original value of @v
|
* Returns original value of @v
|
||||||
*/
|
*/
|
||||||
static inline ${int}
|
static __always_inline ${int}
|
||||||
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||||
{
|
{
|
||||||
${int} c = ${atomic}_read(v);
|
${int} c = ${atomic}_read(v);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
|
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
|
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
|
||||||
|
@ -7,7 +7,7 @@ cat <<EOF
|
|||||||
* and returns true if the result is zero, or false for all
|
* and returns true if the result is zero, or false for all
|
||||||
* other cases.
|
* other cases.
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_inc_and_test(${atomic}_t *v)
|
${atomic}_inc_and_test(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
return ${atomic}_inc_return(v) == 0;
|
return ${atomic}_inc_return(v) == 0;
|
||||||
|
@ -6,7 +6,7 @@ cat <<EOF
|
|||||||
* Atomically increments @v by 1, if @v is non-zero.
|
* Atomically increments @v by 1, if @v is non-zero.
|
||||||
* Returns true if the increment was done.
|
* Returns true if the increment was done.
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_inc_not_zero(${atomic}_t *v)
|
${atomic}_inc_not_zero(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
return ${atomic}_add_unless(v, 1, 0);
|
return ${atomic}_add_unless(v, 1, 0);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_inc_unless_negative(${atomic}_t *v)
|
${atomic}_inc_unless_negative(${atomic}_t *v)
|
||||||
{
|
{
|
||||||
${int} c = ${atomic}_read(v);
|
${int} c = ${atomic}_read(v);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_read_acquire(const ${atomic}_t *v)
|
${atomic}_read_acquire(const ${atomic}_t *v)
|
||||||
{
|
{
|
||||||
return smp_load_acquire(&(v)->counter);
|
return smp_load_acquire(&(v)->counter);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomic}_${pfx}${name}${sfx}_release(${params})
|
${atomic}_${pfx}${name}${sfx}_release(${params})
|
||||||
{
|
{
|
||||||
__atomic_release_fence();
|
__atomic_release_fence();
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline void
|
static __always_inline void
|
||||||
${atomic}_set_release(${atomic}_t *v, ${int} i)
|
${atomic}_set_release(${atomic}_t *v, ${int} i)
|
||||||
{
|
{
|
||||||
smp_store_release(&(v)->counter, i);
|
smp_store_release(&(v)->counter, i);
|
||||||
|
@ -8,7 +8,7 @@ cat <<EOF
|
|||||||
* true if the result is zero, or false for all
|
* true if the result is zero, or false for all
|
||||||
* other cases.
|
* other cases.
|
||||||
*/
|
*/
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
|
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
|
||||||
{
|
{
|
||||||
return ${atomic}_sub_return(i, v) == 0;
|
return ${atomic}_sub_return(i, v) == 0;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline bool
|
static __always_inline bool
|
||||||
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
|
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
|
||||||
{
|
{
|
||||||
${int} r, o = *old;
|
${int} r, o = *old;
|
||||||
|
@ -149,6 +149,8 @@ cat << EOF
|
|||||||
#ifndef _LINUX_ATOMIC_FALLBACK_H
|
#ifndef _LINUX_ATOMIC_FALLBACK_H
|
||||||
#define _LINUX_ATOMIC_FALLBACK_H
|
#define _LINUX_ATOMIC_FALLBACK_H
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
|
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
|
||||||
|
@ -84,7 +84,7 @@ gen_proto_order_variant()
|
|||||||
[ ! -z "${guard}" ] && printf "#if ${guard}\n"
|
[ ! -z "${guard}" ] && printf "#if ${guard}\n"
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
${atomicname}(${params})
|
${atomicname}(${params})
|
||||||
{
|
{
|
||||||
${checks}
|
${checks}
|
||||||
@ -147,16 +147,17 @@ cat << EOF
|
|||||||
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
|
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
|
||||||
|
|
||||||
#include <linux/build_bug.h>
|
#include <linux/build_bug.h>
|
||||||
|
#include <linux/compiler.h>
|
||||||
#include <linux/kasan-checks.h>
|
#include <linux/kasan-checks.h>
|
||||||
#include <linux/kcsan-checks.h>
|
#include <linux/kcsan-checks.h>
|
||||||
|
|
||||||
static inline void __atomic_check_read(const volatile void *v, size_t size)
|
static __always_inline void __atomic_check_read(const volatile void *v, size_t size)
|
||||||
{
|
{
|
||||||
kasan_check_read(v, size);
|
kasan_check_read(v, size);
|
||||||
kcsan_check_atomic_read(v, size);
|
kcsan_check_atomic_read(v, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __atomic_check_write(const volatile void *v, size_t size)
|
static __always_inline void __atomic_check_write(const volatile void *v, size_t size)
|
||||||
{
|
{
|
||||||
kasan_check_write(v, size);
|
kasan_check_write(v, size);
|
||||||
kcsan_check_atomic_write(v, size);
|
kcsan_check_atomic_write(v, size);
|
||||||
|
@ -46,7 +46,7 @@ gen_proto_order_variant()
|
|||||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
static inline ${ret}
|
static __always_inline ${ret}
|
||||||
atomic_long_${name}(${params})
|
atomic_long_${name}(${params})
|
||||||
{
|
{
|
||||||
${retstmt}${atomic}_${name}(${argscast});
|
${retstmt}${atomic}_${name}(${argscast});
|
||||||
@ -64,6 +64,7 @@ cat << EOF
|
|||||||
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
|
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
|
||||||
#define _ASM_GENERIC_ATOMIC_LONG_H
|
#define _ASM_GENERIC_ATOMIC_LONG_H
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
|
Loading…
Reference in New Issue
Block a user