mm/page_alloc.c: allow error injection

Model call chain after should_failslab().  Likewise, we can now use a
kprobe to override the return value of should_fail_alloc_page() and inject
allocation failures into alloc_page*().

This will allow injecting allocation failures using the BCC tools even
without building kernel with CONFIG_FAIL_PAGE_ALLOC and booting it with a
fail_page_alloc= parameter, which incurs some overhead even when failures
are not being injected.  On the other hand, this patch adds an
unconditional call to should_fail_alloc_page() from page allocation
hotpath.  That overhead should be rather negligible with
CONFIG_FAIL_PAGE_ALLOC=n when there's no kprobe attached, though.

[vbabka@suse.cz: changelog addition]
Link: http://lkml.kernel.org/r/20181214074330.18917-1-bpoirier@suse.com
Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Benjamin Poirier 2018-12-28 00:39:23 -08:00 committed by Linus Torvalds
parent ab41ee6879
commit af3b854492
2 changed files with 9 additions and 2 deletions

View File

@ -8,6 +8,7 @@ enum {
EI_ETYPE_NULL, /* Return NULL if failure */ EI_ETYPE_NULL, /* Return NULL if failure */
EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
EI_ETYPE_TRUE, /* Return true if failure */
}; };
struct error_injection_entry { struct error_injection_entry {

View File

@ -3131,7 +3131,7 @@ static int __init setup_fail_page_alloc(char *str)
} }
__setup("fail_page_alloc=", setup_fail_page_alloc); __setup("fail_page_alloc=", setup_fail_page_alloc);
static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{ {
if (order < fail_page_alloc.min_order) if (order < fail_page_alloc.min_order)
return false; return false;
@ -3181,13 +3181,19 @@ late_initcall(fail_page_alloc_debugfs);
#else /* CONFIG_FAIL_PAGE_ALLOC */ #else /* CONFIG_FAIL_PAGE_ALLOC */
static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{ {
return false; return false;
} }
#endif /* CONFIG_FAIL_PAGE_ALLOC */ #endif /* CONFIG_FAIL_PAGE_ALLOC */
static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
/* /*
* Return true if free base pages are above 'mark'. For high-order checks it * Return true if free base pages are above 'mark'. For high-order checks it
* will return true of the order-0 watermark is reached and there is at least * will return true of the order-0 watermark is reached and there is at least