Merge branch 'kcsan.2020.01.07a' into locking/kcsan

Pull KCSAN updates from Paul E. McKenney:

 - UBSAN fixes
 - inlining updates
 - documentation updates

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2020-01-24 09:33:52 +01:00
commit 7add7875a8
30 changed files with 557 additions and 535 deletions

View File

@ -101,18 +101,28 @@ instrumentation or e.g. DMA accesses.
Selective analysis
~~~~~~~~~~~~~~~~~~
To disable KCSAN data race detection for an entire subsystem, add to the
respective ``Makefile``::
It may be desirable to disable data race detection for specific accesses,
functions, compilation units, or entire subsystems. For static blacklisting,
the below options are available:
KCSAN_SANITIZE := n
* KCSAN understands the ``data_race(expr)`` annotation, which tells KCSAN that
any data races due to accesses in ``expr`` should be ignored and resulting
behaviour when encountering a data race is deemed safe.
To disable KCSAN on a per-file basis, add to the ``Makefile``::
* Disabling data race detection for entire functions can be accomplished by
using the function attribute ``__no_kcsan`` (or ``__no_kcsan_or_inline`` for
``__always_inline`` functions). To dynamically control for which functions
data races are reported, see the `debugfs`_ blacklist/whitelist feature.
* To disable data race detection for a particular compilation unit, add to the
``Makefile``::
KCSAN_SANITIZE_file.o := n
KCSAN also understands the ``data_race(expr)`` annotation, which tells KCSAN
that any data races due to accesses in ``expr`` should be ignored and resulting
behaviour when encountering a data race is deemed safe.
* To disable data race detection for all compilation units listed in a
``Makefile``, add to the respective ``Makefile``::
KCSAN_SANITIZE := n
debugfs
~~~~~~~

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -146,8 +146,7 @@
#endif
#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
#define __no_sanitize_thread \
__attribute__((__noinline__)) __attribute__((no_sanitize_thread))
#define __no_sanitize_thread __attribute__((no_sanitize_thread))
#else
#define __no_sanitize_thread
#endif

View File

@ -207,12 +207,15 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define __no_kasan_or_inline __always_inline
#endif
#define __no_kcsan __no_sanitize_thread
#ifdef __SANITIZE_THREAD__
/*
* Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
* compilation units where instrumentation is disabled.
* compilation units where instrumentation is disabled. The attribute 'noinline'
* is required for older compilers, where implicit inlining of very small
* functions renders __no_sanitize_thread ineffective.
*/
# define __no_kcsan_or_inline __no_sanitize_thread notrace __maybe_unused
# define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused
# define __no_sanitize_or_inline __no_kcsan_or_inline
#else
# define __no_kcsan_or_inline __always_inline

View File

@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
KCSAN_SANITIZE := n
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)

View File

@ -282,6 +282,7 @@ obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
KCSAN_SANITIZE_ubsan.o := n
CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});

View File

@ -8,7 +8,7 @@ cat <<EOF
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline bool
static __always_inline bool
${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
return ${atomic}_add_return(i, v) < 0;

View File

@ -8,7 +8,7 @@ cat << EOF
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
static inline bool
static __always_inline bool
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return ${atomic}_fetch_add_unless(v, a, u) != u;

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);

View File

@ -7,7 +7,7 @@ cat <<EOF
* returns true if the result is 0, or false for all other
* cases.
*/
static inline bool
static __always_inline bool
${atomic}_dec_and_test(${atomic}_t *v)
{
return ${atomic}_dec_return(v) == 0;

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = ${atomic}_read(v);

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline bool
static __always_inline bool
${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;

View File

@ -8,7 +8,7 @@ cat << EOF
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
static inline ${int}
static __always_inline ${int}
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = ${atomic}_read(v);

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);

View File

@ -7,7 +7,7 @@ cat <<EOF
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline bool
static __always_inline bool
${atomic}_inc_and_test(${atomic}_t *v)
{
return ${atomic}_inc_return(v) == 0;

View File

@ -6,7 +6,7 @@ cat <<EOF
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
static inline bool
static __always_inline bool
${atomic}_inc_not_zero(${atomic}_t *v)
{
return ${atomic}_add_unless(v, 1, 0);

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline bool
static __always_inline bool
${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline void
static __always_inline void
${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);

View File

@ -8,7 +8,7 @@ cat <<EOF
* true if the result is zero, or false for all
* other cases.
*/
static inline bool
static __always_inline bool
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return ${atomic}_sub_return(i, v) == 0;

View File

@ -1,5 +1,5 @@
cat <<EOF
static inline bool
static __always_inline bool
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;

View File

@ -149,6 +149,8 @@ cat << EOF
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
#include <linux/compiler.h>
EOF
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do

View File

@ -84,7 +84,7 @@ gen_proto_order_variant()
[ ! -z "${guard}" ] && printf "#if ${guard}\n"
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
${atomicname}(${params})
{
${checks}
@ -147,16 +147,17 @@ cat << EOF
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
static inline void __atomic_check_read(const volatile void *v, size_t size)
static __always_inline void __atomic_check_read(const volatile void *v, size_t size)
{
kasan_check_read(v, size);
kcsan_check_atomic_read(v, size);
}
static inline void __atomic_check_write(const volatile void *v, size_t size)
static __always_inline void __atomic_check_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_atomic_write(v, size);

View File

@ -46,7 +46,7 @@ gen_proto_order_variant()
local retstmt="$(gen_ret_stmt "${meta}")"
cat <<EOF
static inline ${ret}
static __always_inline ${ret}
atomic_long_${name}(${params})
{
${retstmt}${atomic}_${name}(${argscast});
@ -64,6 +64,7 @@ cat << EOF
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT