forked from Minki/linux
kasan: test: bypass __alloc_size checks
Intentional overflows, as performed by the KASAN tests, are detected at compile time[1] (instead of only at run-time) with the addition of __alloc_size. Fix this by forcing the compiler into not being able to trust the size used following the kmalloc()s. [1] https://lore.kernel.org/lkml/20211005184717.65c6d8eb39350395e387b71f@linux-foundation.org Link: https://lkml.kernel.org/r/20211006181544.1670992-1-keescook@chromium.org Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8772716f96
commit
d73dad4eb5
@ -440,6 +440,7 @@ static void kmalloc_oob_memset_2(struct kunit *test)
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
|
||||
kfree(ptr);
|
||||
}
|
||||
@ -452,6 +453,7 @@ static void kmalloc_oob_memset_4(struct kunit *test)
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
|
||||
kfree(ptr);
|
||||
}
|
||||
@ -464,6 +466,7 @@ static void kmalloc_oob_memset_8(struct kunit *test)
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
|
||||
kfree(ptr);
|
||||
}
|
||||
@ -476,6 +479,7 @@ static void kmalloc_oob_memset_16(struct kunit *test)
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
|
||||
kfree(ptr);
|
||||
}
|
||||
@ -488,6 +492,7 @@ static void kmalloc_oob_in_memset(struct kunit *test)
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test,
|
||||
memset(ptr, 0, size + KASAN_GRANULE_SIZE));
|
||||
kfree(ptr);
|
||||
@ -497,7 +502,7 @@ static void kmalloc_memmove_negative_size(struct kunit *test)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = 64;
|
||||
volatile size_t invalid_size = -2;
|
||||
size_t invalid_size = -2;
|
||||
|
||||
/*
|
||||
* Hardware tag-based mode doesn't check memmove for negative size.
|
||||
@ -510,6 +515,7 @@ static void kmalloc_memmove_negative_size(struct kunit *test)
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||
|
||||
memset((char *)ptr, 0, 64);
|
||||
OPTIMIZER_HIDE_VAR(invalid_size);
|
||||
KUNIT_EXPECT_KASAN_FAIL(test,
|
||||
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
|
||||
kfree(ptr);
|
||||
|
@ -35,6 +35,8 @@ static noinline void __init copy_user_test(void)
|
||||
return;
|
||||
}
|
||||
|
||||
OPTIMIZER_HIDE_VAR(size);
|
||||
|
||||
pr_info("out-of-bounds in copy_from_user()\n");
|
||||
unused = copy_from_user(kmem, usermem, size + 1);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user