mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 07:33:56 +00:00
6974f0c455
This adds support for compiling with a rough equivalent to the glibc _FORTIFY_SOURCE=1 feature, providing compile-time and runtime buffer overflow checks for string.h functions when the compiler determines the size of the source or destination buffer at compile-time. Unlike glibc, it covers buffer reads in addition to writes. GNU C __builtin_*_chk intrinsics are avoided because they would force a much more complex implementation. They aren't designed to detect read overflows and offer no real benefit when using an implementation based on inline checks. Inline checks don't add up to much code size and allow full use of the regular string intrinsics while avoiding the need for a bunch of _chk functions and per-arch assembly to avoid wrapper overhead. This detects various overflows at compile-time in various drivers and some non-x86 core kernel code. There will likely be issues caught in regular use at runtime too. Future improvements left out of initial implementation for simplicity, as it's all quite optional and can be done incrementally: * Some of the fortified string functions (strncpy, strcat), don't yet place a limit on reads from the source based on __builtin_object_size of the source buffer. * Extending coverage to more string functions like strlcat. * It should be possible to optionally use __builtin_object_size(x, 1) for some functions (C strings) to detect intra-object overflows (like glibc's _FORTIFY_SOURCE=2), but for now this takes the conservative approach to avoid likely compatibility issues. * The compile-time checks should be made available via a separate config option which can be enabled by default (or always enabled) once enough time has passed to get the issues it catches fixed. Kees said: "This is great to have. While it was out-of-tree code, it would have blocked at least CVE-2016-3858 from being exploitable (improper size argument to strlcpy()). I've sent a number of fixes for out-of-bounds-reads that this detected upstream already" [arnd@arndb.de: x86: fix fortified memcpy] Link: http://lkml.kernel.org/r/20170627150047.660360-1-arnd@arndb.de [keescook@chromium.org: avoid panic() in favor of BUG()] Link: http://lkml.kernel.org/r/20170626235122.GA25261@beast [keescook@chromium.org: move from -mm, add ARCH_HAS_FORTIFY_SOURCE, tweak Kconfig help] Link: http://lkml.kernel.org/r/20170526095404.20439-1-danielmicay@gmail.com Link: http://lkml.kernel.org/r/1497903987-21002-8-git-send-email-keescook@chromium.org Signed-off-by: Daniel Micay <danielmicay@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Kees Cook <keescook@chromium.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Daniel Axtens <dja@axtens.net> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
209 lines
3.8 KiB
C
209 lines
3.8 KiB
C
#include <linux/string.h>
|
|
#include <linux/export.h>
|
|
|
|
#undef memcpy
|
|
#undef memset
|
|
|
|
__visible void *memcpy(void *to, const void *from, size_t n)
|
|
{
|
|
#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE)
|
|
return __memcpy3d(to, from, n);
|
|
#else
|
|
return __memcpy(to, from, n);
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(memcpy);
|
|
|
|
__visible void *memset(void *s, int c, size_t count)
|
|
{
|
|
return __memset(s, c, count);
|
|
}
|
|
EXPORT_SYMBOL(memset);
|
|
|
|
__visible void *memmove(void *dest, const void *src, size_t n)
|
|
{
|
|
int d0,d1,d2,d3,d4,d5;
|
|
char *ret = dest;
|
|
|
|
__asm__ __volatile__(
|
|
/* Handle more 16 bytes in loop */
|
|
"cmp $0x10, %0\n\t"
|
|
"jb 1f\n\t"
|
|
|
|
/* Decide forward/backward copy mode */
|
|
"cmp %2, %1\n\t"
|
|
"jb 2f\n\t"
|
|
|
|
/*
|
|
* movs instruction have many startup latency
|
|
* so we handle small size by general register.
|
|
*/
|
|
"cmp $680, %0\n\t"
|
|
"jb 3f\n\t"
|
|
/*
|
|
* movs instruction is only good for aligned case.
|
|
*/
|
|
"mov %1, %3\n\t"
|
|
"xor %2, %3\n\t"
|
|
"and $0xff, %3\n\t"
|
|
"jz 4f\n\t"
|
|
"3:\n\t"
|
|
"sub $0x10, %0\n\t"
|
|
|
|
/*
|
|
* We gobble 16 bytes forward in each loop.
|
|
*/
|
|
"3:\n\t"
|
|
"sub $0x10, %0\n\t"
|
|
"mov 0*4(%1), %3\n\t"
|
|
"mov 1*4(%1), %4\n\t"
|
|
"mov %3, 0*4(%2)\n\t"
|
|
"mov %4, 1*4(%2)\n\t"
|
|
"mov 2*4(%1), %3\n\t"
|
|
"mov 3*4(%1), %4\n\t"
|
|
"mov %3, 2*4(%2)\n\t"
|
|
"mov %4, 3*4(%2)\n\t"
|
|
"lea 0x10(%1), %1\n\t"
|
|
"lea 0x10(%2), %2\n\t"
|
|
"jae 3b\n\t"
|
|
"add $0x10, %0\n\t"
|
|
"jmp 1f\n\t"
|
|
|
|
/*
|
|
* Handle data forward by movs.
|
|
*/
|
|
".p2align 4\n\t"
|
|
"4:\n\t"
|
|
"mov -4(%1, %0), %3\n\t"
|
|
"lea -4(%2, %0), %4\n\t"
|
|
"shr $2, %0\n\t"
|
|
"rep movsl\n\t"
|
|
"mov %3, (%4)\n\t"
|
|
"jmp 11f\n\t"
|
|
/*
|
|
* Handle data backward by movs.
|
|
*/
|
|
".p2align 4\n\t"
|
|
"6:\n\t"
|
|
"mov (%1), %3\n\t"
|
|
"mov %2, %4\n\t"
|
|
"lea -4(%1, %0), %1\n\t"
|
|
"lea -4(%2, %0), %2\n\t"
|
|
"shr $2, %0\n\t"
|
|
"std\n\t"
|
|
"rep movsl\n\t"
|
|
"mov %3,(%4)\n\t"
|
|
"cld\n\t"
|
|
"jmp 11f\n\t"
|
|
|
|
/*
|
|
* Start to prepare for backward copy.
|
|
*/
|
|
".p2align 4\n\t"
|
|
"2:\n\t"
|
|
"cmp $680, %0\n\t"
|
|
"jb 5f\n\t"
|
|
"mov %1, %3\n\t"
|
|
"xor %2, %3\n\t"
|
|
"and $0xff, %3\n\t"
|
|
"jz 6b\n\t"
|
|
|
|
/*
|
|
* Calculate copy position to tail.
|
|
*/
|
|
"5:\n\t"
|
|
"add %0, %1\n\t"
|
|
"add %0, %2\n\t"
|
|
"sub $0x10, %0\n\t"
|
|
|
|
/*
|
|
* We gobble 16 bytes backward in each loop.
|
|
*/
|
|
"7:\n\t"
|
|
"sub $0x10, %0\n\t"
|
|
|
|
"mov -1*4(%1), %3\n\t"
|
|
"mov -2*4(%1), %4\n\t"
|
|
"mov %3, -1*4(%2)\n\t"
|
|
"mov %4, -2*4(%2)\n\t"
|
|
"mov -3*4(%1), %3\n\t"
|
|
"mov -4*4(%1), %4\n\t"
|
|
"mov %3, -3*4(%2)\n\t"
|
|
"mov %4, -4*4(%2)\n\t"
|
|
"lea -0x10(%1), %1\n\t"
|
|
"lea -0x10(%2), %2\n\t"
|
|
"jae 7b\n\t"
|
|
/*
|
|
* Calculate copy position to head.
|
|
*/
|
|
"add $0x10, %0\n\t"
|
|
"sub %0, %1\n\t"
|
|
"sub %0, %2\n\t"
|
|
|
|
/*
|
|
* Move data from 8 bytes to 15 bytes.
|
|
*/
|
|
".p2align 4\n\t"
|
|
"1:\n\t"
|
|
"cmp $8, %0\n\t"
|
|
"jb 8f\n\t"
|
|
"mov 0*4(%1), %3\n\t"
|
|
"mov 1*4(%1), %4\n\t"
|
|
"mov -2*4(%1, %0), %5\n\t"
|
|
"mov -1*4(%1, %0), %1\n\t"
|
|
|
|
"mov %3, 0*4(%2)\n\t"
|
|
"mov %4, 1*4(%2)\n\t"
|
|
"mov %5, -2*4(%2, %0)\n\t"
|
|
"mov %1, -1*4(%2, %0)\n\t"
|
|
"jmp 11f\n\t"
|
|
|
|
/*
|
|
* Move data from 4 bytes to 7 bytes.
|
|
*/
|
|
".p2align 4\n\t"
|
|
"8:\n\t"
|
|
"cmp $4, %0\n\t"
|
|
"jb 9f\n\t"
|
|
"mov 0*4(%1), %3\n\t"
|
|
"mov -1*4(%1, %0), %4\n\t"
|
|
"mov %3, 0*4(%2)\n\t"
|
|
"mov %4, -1*4(%2, %0)\n\t"
|
|
"jmp 11f\n\t"
|
|
|
|
/*
|
|
* Move data from 2 bytes to 3 bytes.
|
|
*/
|
|
".p2align 4\n\t"
|
|
"9:\n\t"
|
|
"cmp $2, %0\n\t"
|
|
"jb 10f\n\t"
|
|
"movw 0*2(%1), %%dx\n\t"
|
|
"movw -1*2(%1, %0), %%bx\n\t"
|
|
"movw %%dx, 0*2(%2)\n\t"
|
|
"movw %%bx, -1*2(%2, %0)\n\t"
|
|
"jmp 11f\n\t"
|
|
|
|
/*
|
|
* Move data for 1 byte.
|
|
*/
|
|
".p2align 4\n\t"
|
|
"10:\n\t"
|
|
"cmp $1, %0\n\t"
|
|
"jb 11f\n\t"
|
|
"movb (%1), %%cl\n\t"
|
|
"movb %%cl, (%2)\n\t"
|
|
".p2align 4\n\t"
|
|
"11:"
|
|
: "=&c" (d0), "=&S" (d1), "=&D" (d2),
|
|
"=r" (d3),"=r" (d4), "=r"(d5)
|
|
:"0" (n),
|
|
"1" (src),
|
|
"2" (dest)
|
|
:"memory");
|
|
|
|
return ret;
|
|
|
|
}
|
|
EXPORT_SYMBOL(memmove);
|