mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
usercopy: remove page-spanning test for now
A custom allocator without __GFP_COMP that copies to userspace has been
found in vmw_execbuf_process[1], so this disables the page-span checker
by placing it behind a CONFIG for future work where such things can be
tracked down later.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1373326
Reported-by: Vinson Lee <vlee@freedesktop.org>
Fixes: f5509cc18d
("mm: Hardened usercopy")
Signed-off-by: Kees Cook <keescook@chromium.org>
This commit is contained in:
parent
a85d6b8242
commit
8e1f74ea02
@ -134,30 +134,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
/* Checks for allocs that are marked in some way as spanning multiple pages. */
|
||||||
bool to_user)
|
static inline const char *check_page_span(const void *ptr, unsigned long n,
|
||||||
|
struct page *page, bool to_user)
|
||||||
{
|
{
|
||||||
struct page *page, *endpage;
|
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
|
||||||
const void *end = ptr + n - 1;
|
const void *end = ptr + n - 1;
|
||||||
|
struct page *endpage;
|
||||||
bool is_reserved, is_cma;
|
bool is_reserved, is_cma;
|
||||||
|
|
||||||
/*
|
|
||||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
|
||||||
* vmalloced addresses. Work around this by checking for vmalloc
|
|
||||||
* first.
|
|
||||||
*/
|
|
||||||
if (is_vmalloc_addr(ptr))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (!virt_addr_valid(ptr))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
page = virt_to_head_page(ptr);
|
|
||||||
|
|
||||||
/* Check slab allocator for flags and size. */
|
|
||||||
if (PageSlab(page))
|
|
||||||
return __check_heap_object(ptr, n, page);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sometimes the kernel data regions are not marked Reserved (see
|
* Sometimes the kernel data regions are not marked Reserved (see
|
||||||
* check below). And sometimes [_sdata,_edata) does not cover
|
* check below). And sometimes [_sdata,_edata) does not cover
|
||||||
@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
|||||||
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Allow if start and end are inside the same compound page. */
|
/* Allow if fully inside the same compound (__GFP_COMP) page. */
|
||||||
endpage = virt_to_head_page(end);
|
endpage = virt_to_head_page(end);
|
||||||
if (likely(endpage == page))
|
if (likely(endpage == page))
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
|||||||
is_reserved = PageReserved(page);
|
is_reserved = PageReserved(page);
|
||||||
is_cma = is_migrate_cma_page(page);
|
is_cma = is_migrate_cma_page(page);
|
||||||
if (!is_reserved && !is_cma)
|
if (!is_reserved && !is_cma)
|
||||||
goto reject;
|
return "<spans multiple pages>";
|
||||||
|
|
||||||
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
||||||
page = virt_to_head_page(ptr);
|
page = virt_to_head_page(ptr);
|
||||||
if (is_reserved && !PageReserved(page))
|
if (is_reserved && !PageReserved(page))
|
||||||
goto reject;
|
return "<spans Reserved and non-Reserved pages>";
|
||||||
if (is_cma && !is_migrate_cma_page(page))
|
if (is_cma && !is_migrate_cma_page(page))
|
||||||
goto reject;
|
return "<spans CMA and non-CMA pages>";
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
reject:
|
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||||
return "<spans multiple pages>";
|
bool to_user)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||||
|
* vmalloced addresses. Work around this by checking for vmalloc
|
||||||
|
* first.
|
||||||
|
*/
|
||||||
|
if (is_vmalloc_addr(ptr))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!virt_addr_valid(ptr))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
page = virt_to_head_page(ptr);
|
||||||
|
|
||||||
|
/* Check slab allocator for flags and size. */
|
||||||
|
if (PageSlab(page))
|
||||||
|
return __check_heap_object(ptr, n, page);
|
||||||
|
|
||||||
|
/* Verify object does not incorrectly span multiple pages. */
|
||||||
|
return check_page_span(ptr, n, page, to_user);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -147,6 +147,17 @@ config HARDENED_USERCOPY
|
|||||||
or are part of the kernel text. This kills entire classes
|
or are part of the kernel text. This kills entire classes
|
||||||
of heap overflow exploits and similar kernel memory exposures.
|
of heap overflow exploits and similar kernel memory exposures.
|
||||||
|
|
||||||
|
config HARDENED_USERCOPY_PAGESPAN
|
||||||
|
bool "Refuse to copy allocations that span multiple pages"
|
||||||
|
depends on HARDENED_USERCOPY
|
||||||
|
depends on !COMPILE_TEST
|
||||||
|
help
|
||||||
|
When a multi-page allocation is done without __GFP_COMP,
|
||||||
|
hardened usercopy will reject attempts to copy it. There are,
|
||||||
|
however, several cases of this in the kernel that have not all
|
||||||
|
been removed. This config is intended to be used only while
|
||||||
|
trying to find such users.
|
||||||
|
|
||||||
source security/selinux/Kconfig
|
source security/selinux/Kconfig
|
||||||
source security/smack/Kconfig
|
source security/smack/Kconfig
|
||||||
source security/tomoyo/Kconfig
|
source security/tomoyo/Kconfig
|
||||||
|
Loading…
Reference in New Issue
Block a user