mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
e3bba3c3c9
commit
309381feae
@@ -1,6 +1,7 @@
|
||||
#ifndef __LINUX_GFP_H
|
||||
#define __LINUX_GFP_H
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define _LINUX_HUGETLB_H
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hugetlb_inline.h>
|
||||
#include <linux/cgroup.h>
|
||||
@@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
||||
|
||||
static inline struct hstate *page_hstate(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHuge(page));
|
||||
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
||||
return size_to_hstate(PAGE_SIZE << compound_order(page));
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#ifndef _LINUX_HUGETLB_CGROUP_H
|
||||
#define _LINUX_HUGETLB_CGROUP_H
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/res_counter.h>
|
||||
|
||||
struct hugetlb_cgroup;
|
||||
@@ -28,7 +29,7 @@ struct hugetlb_cgroup;
|
||||
|
||||
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHuge(page));
|
||||
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
||||
|
||||
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
|
||||
return NULL;
|
||||
@@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
|
||||
static inline
|
||||
int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
|
||||
{
|
||||
VM_BUG_ON(!PageHuge(page));
|
||||
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
||||
|
||||
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
|
||||
return -1;
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/list.h>
|
||||
@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
|
||||
*/
|
||||
static inline int put_page_testzero(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&page->_count) == 0);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
|
||||
return atomic_dec_and_test(&page->_count);
|
||||
}
|
||||
|
||||
@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
|
||||
static inline void compound_lock(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(PageSlab(page));
|
||||
VM_BUG_ON_PAGE(PageSlab(page), page);
|
||||
bit_spin_lock(PG_compound_lock, &page->flags);
|
||||
#endif
|
||||
}
|
||||
@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page)
|
||||
static inline void compound_unlock(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(PageSlab(page));
|
||||
VM_BUG_ON_PAGE(PageSlab(page), page);
|
||||
bit_spin_unlock(PG_compound_lock, &page->flags);
|
||||
#endif
|
||||
}
|
||||
@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page)
|
||||
*/
|
||||
static inline bool compound_tail_refcounted(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHead(page));
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
return __compound_tail_refcounted(page);
|
||||
}
|
||||
|
||||
@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page)
|
||||
/*
|
||||
* __split_huge_page_refcount() cannot run from under us.
|
||||
*/
|
||||
VM_BUG_ON(!PageTail(page));
|
||||
VM_BUG_ON(page_mapcount(page) < 0);
|
||||
VM_BUG_ON(atomic_read(&page->_count) != 0);
|
||||
VM_BUG_ON_PAGE(!PageTail(page), page);
|
||||
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
|
||||
if (compound_tail_refcounted(page->first_page))
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
@@ -474,7 +475,7 @@ static inline void get_page(struct page *page)
|
||||
* Getting a normal page or the head of a compound page
|
||||
* requires to already have an elevated page->_count.
|
||||
*/
|
||||
VM_BUG_ON(atomic_read(&page->_count) <= 0);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
|
||||
atomic_inc(&page->_count);
|
||||
}
|
||||
|
||||
@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page)
|
||||
|
||||
static inline void __SetPageBuddy(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
|
||||
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
|
||||
}
|
||||
|
||||
static inline void __ClearPageBuddy(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageBuddy(page));
|
||||
VM_BUG_ON_PAGE(!PageBuddy(page), page);
|
||||
atomic_set(&page->_mapcount, -1);
|
||||
}
|
||||
|
||||
@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page)
|
||||
* slab code uses page->slab_cache and page->first_page (for tail
|
||||
* pages), which share storage with page->ptl.
|
||||
*/
|
||||
VM_BUG_ON(*(unsigned long *)&page->ptl);
|
||||
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
|
||||
if (!ptlock_alloc(page))
|
||||
return false;
|
||||
spin_lock_init(ptlock_ptr(page));
|
||||
@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
|
||||
static inline void pgtable_pmd_page_dtor(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(page->pmd_huge_pte);
|
||||
VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
|
||||
#endif
|
||||
ptlock_free(page);
|
||||
}
|
||||
@@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access);
|
||||
extern atomic_long_t num_poisoned_pages;
|
||||
extern int soft_offline_page(struct page *page, int flags);
|
||||
|
||||
extern void dump_page(struct page *page, char *reason);
|
||||
extern void dump_page_badflags(struct page *page, char *reason,
|
||||
unsigned long badflags);
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
|
||||
extern void clear_huge_page(struct page *page,
|
||||
unsigned long addr,
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
#ifndef LINUX_MM_DEBUG_H
|
||||
#define LINUX_MM_DEBUG_H 1
|
||||
|
||||
struct page;
|
||||
|
||||
extern void dump_page(struct page *page, char *reason);
|
||||
extern void dump_page_badflags(struct page *page, char *reason,
|
||||
unsigned long badflags);
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
#define VM_BUG_ON(cond) BUG_ON(cond)
|
||||
#define VM_BUG_ON_PAGE(cond, page) \
|
||||
do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0)
|
||||
#else
|
||||
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_VIRTUAL
|
||||
|
||||
@@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page)
|
||||
*/
|
||||
static inline int PageTransHuge(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(PageTail(page));
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
return PageHead(page);
|
||||
}
|
||||
|
||||
@@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page)
|
||||
*/
|
||||
static inline int PageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
return PageActive(page);
|
||||
}
|
||||
|
||||
static inline void SetPageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
SetPageActive(page);
|
||||
}
|
||||
|
||||
static inline void __ClearPageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
__ClearPageActive(page);
|
||||
}
|
||||
|
||||
static inline void ClearPageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
ClearPageActive(page);
|
||||
}
|
||||
|
||||
|
||||
@@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page)
|
||||
* disabling preempt, and hence no need for the "speculative get" that
|
||||
* SMP requires.
|
||||
*/
|
||||
VM_BUG_ON(page_count(page) == 0);
|
||||
VM_BUG_ON_PAGE(page_count(page) == 0, page);
|
||||
atomic_inc(&page->_count);
|
||||
|
||||
#else
|
||||
@@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page)
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
VM_BUG_ON(PageTail(page));
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count)
|
||||
# ifdef CONFIG_PREEMPT_COUNT
|
||||
VM_BUG_ON(!in_atomic());
|
||||
# endif
|
||||
VM_BUG_ON(page_count(page) == 0);
|
||||
VM_BUG_ON_PAGE(page_count(page) == 0, page);
|
||||
atomic_add(count, &page->_count);
|
||||
|
||||
#else
|
||||
if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
|
||||
return 0;
|
||||
#endif
|
||||
VM_BUG_ON(PageCompound(page) && page != compound_head(page));
|
||||
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count)
|
||||
|
||||
static inline void page_unfreeze_refs(struct page *page, int count)
|
||||
{
|
||||
VM_BUG_ON(page_count(page) != 0);
|
||||
VM_BUG_ON_PAGE(page_count(page) != 0, page);
|
||||
VM_BUG_ON(count == 0);
|
||||
|
||||
atomic_set(&page->_count, count);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#ifndef __LINUX_PERCPU_H
|
||||
#define __LINUX_PERCPU_H
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
Reference in New Issue
Block a user