mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
mm: page_alloc: split out DEBUG_PAGEALLOC
Move DEBUG_PAGEALLOC related functions into a single file to reduce a bit of page_alloc.c. Link: https://lkml.kernel.org/r/20230516063821.121844-9-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Iurii Zaikin <yzaikin@google.com> Cc: Kees Cook <keescook@chromium.org> Cc: Len Brown <len.brown@intel.com> Cc: Luis Chamberlain <mcgrof@kernel.org> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pavel Machek <pavel@ucw.cz> Cc: Rafael J. Wysocki <rafael@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0866e82e40
commit
884c175f12
@ -3471,9 +3471,58 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
|
||||
if (debug_pagealloc_enabled_static())
|
||||
__kernel_map_pages(page, numpages, 0);
|
||||
}
|
||||
|
||||
extern unsigned int _debug_guardpage_minorder;
|
||||
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
|
||||
|
||||
static inline unsigned int debug_guardpage_minorder(void)
|
||||
{
|
||||
return _debug_guardpage_minorder;
|
||||
}
|
||||
|
||||
static inline bool debug_guardpage_enabled(void)
|
||||
{
|
||||
return static_branch_unlikely(&_debug_guardpage_enabled);
|
||||
}
|
||||
|
||||
static inline bool page_is_guard(struct page *page)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
|
||||
return PageGuard(page);
|
||||
}
|
||||
|
||||
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
|
||||
int migratetype);
|
||||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
return __set_page_guard(zone, page, order, migratetype);
|
||||
}
|
||||
|
||||
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
|
||||
int migratetype);
|
||||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return;
|
||||
__clear_page_guard(zone, page, order, migratetype);
|
||||
}
|
||||
|
||||
#else /* CONFIG_DEBUG_PAGEALLOC */
|
||||
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
|
||||
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
|
||||
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
|
||||
static inline bool debug_guardpage_enabled(void) { return false; }
|
||||
static inline bool page_is_guard(struct page *page) { return false; }
|
||||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype) { return false; }
|
||||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype) {}
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
#ifdef __HAVE_ARCH_GATE_AREA
|
||||
@ -3711,33 +3760,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
|
||||
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
extern unsigned int _debug_guardpage_minorder;
|
||||
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
|
||||
|
||||
static inline unsigned int debug_guardpage_minorder(void)
|
||||
{
|
||||
return _debug_guardpage_minorder;
|
||||
}
|
||||
|
||||
static inline bool debug_guardpage_enabled(void)
|
||||
{
|
||||
return static_branch_unlikely(&_debug_guardpage_enabled);
|
||||
}
|
||||
|
||||
static inline bool page_is_guard(struct page *page)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
|
||||
return PageGuard(page);
|
||||
}
|
||||
#else
|
||||
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
|
||||
static inline bool debug_guardpage_enabled(void) { return false; }
|
||||
static inline bool page_is_guard(struct page *page) { return false; }
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
#if MAX_NUMNODES > 1
|
||||
void __init setup_nr_node_ids(void);
|
||||
#else
|
||||
|
@ -124,6 +124,7 @@ obj-$(CONFIG_SECRETMEM) += secretmem.o
|
||||
obj-$(CONFIG_CMA_SYSFS) += cma_sysfs.o
|
||||
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
|
||||
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
|
||||
obj-$(CONFIG_DEBUG_PAGEALLOC) += debug_page_alloc.o
|
||||
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
|
||||
obj-$(CONFIG_DAMON) += damon/
|
||||
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
|
||||
|
59
mm/debug_page_alloc.c
Normal file
59
mm/debug_page_alloc.c
Normal file
@ -0,0 +1,59 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/mm.h>
|
||||
#include <linux/page-isolation.h>
|
||||
|
||||
unsigned int _debug_guardpage_minorder;
|
||||
|
||||
bool _debug_pagealloc_enabled_early __read_mostly
|
||||
= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
|
||||
EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
|
||||
DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
|
||||
EXPORT_SYMBOL(_debug_pagealloc_enabled);
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
|
||||
|
||||
static int __init early_debug_pagealloc(char *buf)
|
||||
{
|
||||
return kstrtobool(buf, &_debug_pagealloc_enabled_early);
|
||||
}
|
||||
early_param("debug_pagealloc", early_debug_pagealloc);
|
||||
|
||||
static int __init debug_guardpage_minorder_setup(char *buf)
|
||||
{
|
||||
unsigned long res;
|
||||
|
||||
if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
|
||||
pr_err("Bad debug_guardpage_minorder value\n");
|
||||
return 0;
|
||||
}
|
||||
_debug_guardpage_minorder = res;
|
||||
pr_info("Setting debug_guardpage_minorder to %lu\n", res);
|
||||
return 0;
|
||||
}
|
||||
early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
|
||||
|
||||
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
|
||||
int migratetype)
|
||||
{
|
||||
if (order >= debug_guardpage_minorder())
|
||||
return false;
|
||||
|
||||
__SetPageGuard(page);
|
||||
INIT_LIST_HEAD(&page->buddy_list);
|
||||
set_page_private(page, order);
|
||||
/* Guard pages are not available for any usage */
|
||||
if (!is_migrate_isolate(migratetype))
|
||||
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
|
||||
int migratetype)
|
||||
{
|
||||
__ClearPageGuard(page);
|
||||
|
||||
set_page_private(page, 0);
|
||||
if (!is_migrate_isolate(migratetype))
|
||||
__mod_zone_freepage_state(zone, (1 << order), migratetype);
|
||||
}
|
@ -664,75 +664,6 @@ void destroy_large_folio(struct folio *folio)
|
||||
compound_page_dtors[dtor](&folio->page);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
unsigned int _debug_guardpage_minorder;
|
||||
|
||||
bool _debug_pagealloc_enabled_early __read_mostly
|
||||
= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
|
||||
EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
|
||||
DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
|
||||
EXPORT_SYMBOL(_debug_pagealloc_enabled);
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
|
||||
|
||||
static int __init early_debug_pagealloc(char *buf)
|
||||
{
|
||||
return kstrtobool(buf, &_debug_pagealloc_enabled_early);
|
||||
}
|
||||
early_param("debug_pagealloc", early_debug_pagealloc);
|
||||
|
||||
static int __init debug_guardpage_minorder_setup(char *buf)
|
||||
{
|
||||
unsigned long res;
|
||||
|
||||
if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
|
||||
pr_err("Bad debug_guardpage_minorder value\n");
|
||||
return 0;
|
||||
}
|
||||
_debug_guardpage_minorder = res;
|
||||
pr_info("Setting debug_guardpage_minorder to %lu\n", res);
|
||||
return 0;
|
||||
}
|
||||
early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
|
||||
|
||||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
|
||||
if (order >= debug_guardpage_minorder())
|
||||
return false;
|
||||
|
||||
__SetPageGuard(page);
|
||||
INIT_LIST_HEAD(&page->buddy_list);
|
||||
set_page_private(page, order);
|
||||
/* Guard pages are not available for any usage */
|
||||
if (!is_migrate_isolate(migratetype))
|
||||
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return;
|
||||
|
||||
__ClearPageGuard(page);
|
||||
|
||||
set_page_private(page, 0);
|
||||
if (!is_migrate_isolate(migratetype))
|
||||
__mod_zone_freepage_state(zone, (1 << order), migratetype);
|
||||
}
|
||||
#else
|
||||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype) { return false; }
|
||||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype) {}
|
||||
#endif
|
||||
|
||||
static inline void set_buddy_order(struct page *page, unsigned int order)
|
||||
{
|
||||
set_page_private(page, order);
|
||||
|
Loading…
Reference in New Issue
Block a user