mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 08:01:59 +00:00
ed8ea81501
Add a CONFIG_DEBUG_VM_RB build option for the previously existing DEBUG_MM_RB code. Now that Andi Kleen modified it to avoid using recursive algorithms, we can expose it a bit more. Also extend this code to validate_mm() after stack expansion, and to check that the vma's start and last pgoffs have not changed since the nodes were inserted on the anon vma interval tree (as it is important that the nodes be reindexed after each such update). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Daniel Santos <daniel.santos@pobox.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
113 lines
3.2 KiB
C
113 lines
3.2 KiB
C
/*
|
|
* mm/interval_tree.c - interval tree for mapping->i_mmap
|
|
*
|
|
* Copyright (C) 2012, Michel Lespinasse <walken@google.com>
|
|
*
|
|
* This file is released under the GPL v2.
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/interval_tree_generic.h>
|
|
|
|
static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
|
|
{
|
|
return v->vm_pgoff;
|
|
}
|
|
|
|
static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
|
|
{
|
|
return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
|
|
}
|
|
|
|
INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.linear.rb,
|
|
unsigned long, shared.linear.rb_subtree_last,
|
|
vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
|
|
|
|
/* Insert node immediately after prev in the interval tree */
|
|
void vma_interval_tree_insert_after(struct vm_area_struct *node,
|
|
struct vm_area_struct *prev,
|
|
struct rb_root *root)
|
|
{
|
|
struct rb_node **link;
|
|
struct vm_area_struct *parent;
|
|
unsigned long last = vma_last_pgoff(node);
|
|
|
|
VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev));
|
|
|
|
if (!prev->shared.linear.rb.rb_right) {
|
|
parent = prev;
|
|
link = &prev->shared.linear.rb.rb_right;
|
|
} else {
|
|
parent = rb_entry(prev->shared.linear.rb.rb_right,
|
|
struct vm_area_struct, shared.linear.rb);
|
|
if (parent->shared.linear.rb_subtree_last < last)
|
|
parent->shared.linear.rb_subtree_last = last;
|
|
while (parent->shared.linear.rb.rb_left) {
|
|
parent = rb_entry(parent->shared.linear.rb.rb_left,
|
|
struct vm_area_struct, shared.linear.rb);
|
|
if (parent->shared.linear.rb_subtree_last < last)
|
|
parent->shared.linear.rb_subtree_last = last;
|
|
}
|
|
link = &parent->shared.linear.rb.rb_left;
|
|
}
|
|
|
|
node->shared.linear.rb_subtree_last = last;
|
|
rb_link_node(&node->shared.linear.rb, &parent->shared.linear.rb, link);
|
|
rb_insert_augmented(&node->shared.linear.rb, root,
|
|
&vma_interval_tree_augment);
|
|
}
|
|
|
|
static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
|
|
{
|
|
return vma_start_pgoff(avc->vma);
|
|
}
|
|
|
|
static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
|
|
{
|
|
return vma_last_pgoff(avc->vma);
|
|
}
|
|
|
|
INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
|
|
avc_start_pgoff, avc_last_pgoff,
|
|
static inline, __anon_vma_interval_tree)
|
|
|
|
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
|
|
struct rb_root *root)
|
|
{
|
|
#ifdef CONFIG_DEBUG_VM_RB
|
|
node->cached_vma_start = avc_start_pgoff(node);
|
|
node->cached_vma_last = avc_last_pgoff(node);
|
|
#endif
|
|
__anon_vma_interval_tree_insert(node, root);
|
|
}
|
|
|
|
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
|
|
struct rb_root *root)
|
|
{
|
|
__anon_vma_interval_tree_remove(node, root);
|
|
}
|
|
|
|
struct anon_vma_chain *
|
|
anon_vma_interval_tree_iter_first(struct rb_root *root,
|
|
unsigned long first, unsigned long last)
|
|
{
|
|
return __anon_vma_interval_tree_iter_first(root, first, last);
|
|
}
|
|
|
|
struct anon_vma_chain *
|
|
anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
|
|
unsigned long first, unsigned long last)
|
|
{
|
|
return __anon_vma_interval_tree_iter_next(node, first, last);
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_VM_RB
|
|
void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
|
|
{
|
|
WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
|
|
WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
|
|
}
|
|
#endif
|