rbtree: avoid generating code twice for the cached versions (tools copy)

As was already noted in rbtree.h, the logic to cache rb_first (or
rb_last) can easily be implemented externally to the core rbtree api.

This commit takes the changes applied to the include/linux/ and lib/
rbtree files in 9f973cb380 ("lib/rbtree: avoid generating code twice
for the cached versions"), and applies these to the
tools/include/linux/ and tools/lib/ files as well to keep them
synchronized.

Link: http://lkml.kernel.org/r/20190703034812.53002-1-walken@google.com
Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Michel Lespinasse
2019-09-25 16:46:02 -07:00
committed by Linus Torvalds
parent 0f74914071
commit c7d4f7eeb6
3 changed files with 62 additions and 77 deletions

View File

@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
if (newleft)
*leftmost = node;
while (true) {
/*
* Loop invariant: node is red.
@@ -436,34 +432,17 @@ static const struct rb_augment_callbacks dummy_callbacks = {
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
__rb_insert(node, root, false, NULL, dummy_rotate);
__rb_insert(node, root, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
rebalance = __rb_erase_augmented(node, root,
NULL, &dummy_callbacks);
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
void rb_insert_color_cached(struct rb_node *node,
struct rb_root_cached *root, bool leftmost)
{
__rb_insert(node, &root->rb_root, leftmost,
&root->rb_leftmost, dummy_rotate);
}
void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{
struct rb_node *rebalance;
rebalance = __rb_erase_augmented(node, &root->rb_root,
&root->rb_leftmost, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
}
/*
* Augmented rbtree manipulation functions.
*
@@ -472,10 +451,9 @@ void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
__rb_insert(node, root, newleft, leftmost, augment_rotate);
__rb_insert(node, root, augment_rotate);
}
/*
@@ -580,15 +558,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
__rb_change_child(victim, new, parent, root);
}
void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
struct rb_root_cached *root)
{
rb_replace_node(victim, new, &root->rb_root);
if (root->rb_leftmost == victim)
root->rb_leftmost = new;
}
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
{
for (;;) {