linux/arch/x86/include/asm/refcount.h
Kees Cook 564c9cc84e locking/refcounts, x86/asm: Use unique .text section for refcount exceptions
Using .text.unlikely for refcount exceptions isn't safe because gcc may
move entire functions into .text.unlikely (e.g. in6_dev_dev()), which
would cause any uses of a protected refcount_t function to stay inline
with the function, triggering the protection unconditionally:

        .section        .text.unlikely,"ax",@progbits
        .type   in6_dev_get, @function
in6_dev_getx:
.LFB4673:
        .loc 2 4128 0
        .cfi_startproc
...
        lock; incl 480(%rbx)
        js 111f
        .pushsection .text.unlikely
111:    lea 480(%rbx), %rcx
112:    .byte 0x0f, 0xff
.popsection
113:

This creates a unique .text..refcount section and adds an additional
test to the exception handler to WARN in the case of having none of OF,
SF, nor ZF set so we can see things like this more easily in the future.

The double dot for the section name keeps it out of the TEXT_MAIN macro
namespace, to avoid collisions and so it can be put at the end with
text.unlikely to keep the cold code together.

See commit:

  cb87481ee8 ("kbuild: linker script do not match C names unless LD_DEAD_CODE_DATA_ELIMINATION is configured")

... which matches C names: [a-zA-Z0-9_] but not ".".

Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Elena <elena.reshetova@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch <linux-arch@vger.kernel.org>
Fixes: 7a46ec0e2f ("locking/refcounts, x86/asm: Implement fast refcount overflow protection")
Link: http://lkml.kernel.org/r/1504382986-49301-2-git-send-email-keescook@chromium.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-28 09:45:05 +02:00

110 lines
2.8 KiB
C

#ifndef __ASM_X86_REFCOUNT_H
#define __ASM_X86_REFCOUNT_H
/*
* x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
* PaX/grsecurity.
*/
#include <linux/refcount.h>
/*
* This is the first portion of the refcount error handling, which lives in
* .text.unlikely, and is jumped to from the CPU flag check (in the
* following macros). This saves the refcount value location into CX for
* the exception handler to use (in mm/extable.c), and then triggers the
* central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text.
*/
#define _REFCOUNT_EXCEPTION \
".pushsection .text..refcount\n" \
"111:\tlea %[counter], %%" _ASM_CX "\n" \
"112:\t" ASM_UD0 "\n" \
ASM_UNREACHABLE \
".popsection\n" \
"113:\n" \
_ASM_EXTABLE_REFCOUNT(112b, 113b)
/* Trigger refcount exception if refcount result is negative. */
#define REFCOUNT_CHECK_LT_ZERO \
"js 111f\n\t" \
_REFCOUNT_EXCEPTION
/* Trigger refcount exception if refcount result is zero or negative. */
#define REFCOUNT_CHECK_LE_ZERO \
"jz 111f\n\t" \
REFCOUNT_CHECK_LT_ZERO
/* Trigger refcount exception unconditionally. */
#define REFCOUNT_ERROR \
"jmp 111f\n\t" \
_REFCOUNT_EXCEPTION
static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{
asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter)
: "ir" (i)
: "cc", "cx");
}
static __always_inline void refcount_inc(refcount_t *r)
{
asm volatile(LOCK_PREFIX "incl %0\n\t"
REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter)
: : "cc", "cx");
}
static __always_inline void refcount_dec(refcount_t *r)
{
asm volatile(LOCK_PREFIX "decl %0\n\t"
REFCOUNT_CHECK_LE_ZERO
: [counter] "+m" (r->refs.counter)
: : "cc", "cx");
}
static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, "er", i, "%0", e);
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, "%0", e);
}
static __always_inline __must_check
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
int c, result;
c = atomic_read(&(r->refs));
do {
if (unlikely(c == 0))
return false;
result = c + i;
/* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) {
asm volatile(REFCOUNT_ERROR
: : [counter] "m" (r->refs.counter)
: "cc", "cx");
break;
}
} while (!atomic_try_cmpxchg(&(r->refs), &c, result));
return c != 0;
}
static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
return refcount_add_not_zero(1, r);
}
#endif