mm: enable page poisoning early at boot

On SPARSEMEM systems page poisoning is enabled after buddy is up,
because of the dependency on page extension init.  This causes the pages
released by free_all_bootmem not to be poisoned.  This either delays or
misses the identification of some issues because the pages have to
undergo another cycle of alloc-free-alloc for any corruption to be
detected.

Enable page poisoning early by getting rid of the PAGE_EXT_DEBUG_POISON
flag.  Since all the free pages will now be poisoned, the flag need not
be verified before checking the poison during an alloc.

[vinmenon@codeaurora.org: fix Kconfig]
  Link: http://lkml.kernel.org/r/1490878002-14423-1-git-send-email-vinmenon@codeaurora.org
Link: http://lkml.kernel.org/r/1490358246-11001-1-git-send-email-vinmenon@codeaurora.org
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Acked-by: Laura Abbott <labbott@redhat.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Vinayak Menon 2017-05-03 14:54:42 -07:00 committed by Linus Torvalds
parent 2872bb2d0a
commit bd33ef3681
5 changed files with 17 additions and 88 deletions

View File

@ -2487,7 +2487,6 @@ extern long copy_huge_page_from_user(struct page *dst_page,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops; extern struct page_ext_operations debug_guardpage_ops;
extern struct page_ext_operations page_poisoning_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder; extern unsigned int _debug_guardpage_minorder;

View File

@ -42,7 +42,6 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT
config PAGE_POISONING config PAGE_POISONING
bool "Poison pages after freeing" bool "Poison pages after freeing"
select PAGE_EXTENSION
select PAGE_POISONING_NO_SANITY if HIBERNATION select PAGE_POISONING_NO_SANITY if HIBERNATION
---help--- ---help---
Fill the pages with poison patterns after free_pages() and verify Fill the pages with poison patterns after free_pages() and verify

View File

@ -1689,10 +1689,10 @@ static inline int check_new_page(struct page *page)
return 1; return 1;
} }
static inline bool free_pages_prezeroed(bool poisoned) static inline bool free_pages_prezeroed(void)
{ {
return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
page_poisoning_enabled() && poisoned; page_poisoning_enabled();
} }
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
@ -1746,17 +1746,10 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
unsigned int alloc_flags) unsigned int alloc_flags)
{ {
int i; int i;
bool poisoned = true;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (poisoned)
poisoned &= page_is_poisoned(p);
}
post_alloc_hook(page, order, gfp_flags); post_alloc_hook(page, order, gfp_flags);
if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++) for (i = 0; i < (1 << order); i++)
clear_highpage(page + i); clear_highpage(page + i);

View File

@ -59,9 +59,6 @@
static struct page_ext_operations *page_ext_ops[] = { static struct page_ext_operations *page_ext_ops[] = {
&debug_guardpage_ops, &debug_guardpage_ops,
#ifdef CONFIG_PAGE_POISONING
&page_poisoning_ops,
#endif
#ifdef CONFIG_PAGE_OWNER #ifdef CONFIG_PAGE_OWNER
&page_owner_ops, &page_owner_ops,
#endif #endif
@ -127,15 +124,12 @@ struct page_ext *lookup_page_ext(struct page *page)
struct page_ext *base; struct page_ext *base;
base = NODE_DATA(page_to_nid(page))->node_page_ext; base = NODE_DATA(page_to_nid(page))->node_page_ext;
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) #if defined(CONFIG_DEBUG_VM)
/* /*
* The sanity checks the page allocator does upon freeing a * The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are * page can reach here before the page_ext arrays are
* allocated when feeding a range of pages to the allocator * allocated when feeding a range of pages to the allocator
* for the first time during bootup or memory hotplug. * for the first time during bootup or memory hotplug.
*
* This check is also necessary for ensuring page poisoning
* works as expected when enabled
*/ */
if (unlikely(!base)) if (unlikely(!base))
return NULL; return NULL;
@ -204,15 +198,12 @@ struct page_ext *lookup_page_ext(struct page *page)
{ {
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn); struct mem_section *section = __pfn_to_section(pfn);
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) #if defined(CONFIG_DEBUG_VM)
/* /*
* The sanity checks the page allocator does upon freeing a * The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are * page can reach here before the page_ext arrays are
* allocated when feeding a range of pages to the allocator * allocated when feeding a range of pages to the allocator
* for the first time during bootup or memory hotplug. * for the first time during bootup or memory hotplug.
*
* This check is also necessary for ensuring page poisoning
* works as expected when enabled
*/ */
if (!section->page_ext) if (!section->page_ext)
return NULL; return NULL;

View File

@ -6,7 +6,6 @@
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
static bool __page_poisoning_enabled __read_mostly;
static bool want_page_poisoning __read_mostly; static bool want_page_poisoning __read_mostly;
static int early_page_poison_param(char *buf) static int early_page_poison_param(char *buf)
@ -18,75 +17,22 @@ static int early_page_poison_param(char *buf)
early_param("page_poison", early_page_poison_param); early_param("page_poison", early_page_poison_param);
bool page_poisoning_enabled(void) bool page_poisoning_enabled(void)
{
return __page_poisoning_enabled;
}
static bool need_page_poisoning(void)
{
return want_page_poisoning;
}
static void init_page_poisoning(void)
{ {
/* /*
* page poisoning is debug page alloc for some arches. If either * Assumes that debug_pagealloc_enabled is set before
* of those options are enabled, enable poisoning * free_all_bootmem.
* Page poisoning is debug page alloc for some arches. If
* either of those options are enabled, enable poisoning.
*/ */
if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) { return (want_page_poisoning ||
if (!want_page_poisoning && !debug_pagealloc_enabled()) (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
return; debug_pagealloc_enabled()));
} else {
if (!want_page_poisoning)
return;
}
__page_poisoning_enabled = true;
}
struct page_ext_operations page_poisoning_ops = {
.need = need_page_poisoning,
.init = init_page_poisoning,
};
static inline void set_page_poison(struct page *page)
{
struct page_ext *page_ext;
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
}
static inline void clear_page_poison(struct page *page)
{
struct page_ext *page_ext;
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
}
bool page_is_poisoned(struct page *page)
{
struct page_ext *page_ext;
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
} }
static void poison_page(struct page *page) static void poison_page(struct page *page)
{ {
void *addr = kmap_atomic(page); void *addr = kmap_atomic(page);
set_page_poison(page);
memset(addr, PAGE_POISON, PAGE_SIZE); memset(addr, PAGE_POISON, PAGE_SIZE);
kunmap_atomic(addr); kunmap_atomic(addr);
} }
@ -140,12 +86,13 @@ static void unpoison_page(struct page *page)
{ {
void *addr; void *addr;
if (!page_is_poisoned(page))
return;
addr = kmap_atomic(page); addr = kmap_atomic(page);
/*
* Page poisoning when enabled poisons each and every page
* that is freed to buddy. Thus no extra check is done to
* see if a page was posioned.
*/
check_poison_mem(addr, PAGE_SIZE); check_poison_mem(addr, PAGE_SIZE);
clear_page_poison(page);
kunmap_atomic(addr); kunmap_atomic(addr);
} }