perf/core improvements and fixes:

BPF:
 
   Song Liu:
 
   - Fix synthesized PERF_RECORD_KSYMBOL/BPF_EVENT
 
   Arnaldo Carvalho de Melo:
 
   - Add bpf_map() helper, to make BPF map declararions more compact and
     allow for BTF annotations to be made transparently.
 
 perf script python:
 
   Tony Jones:
 
   - Remove explicit shebangs.
 
   - Fix the PYTHON=python3 builds.
 
 Core:
 
   Davidlohr Bueso:
 
   - Update rbtree implementation, getting it closer to the kernel one.
 
   - Use cached rbtrees.
 
   Arnaldo Carvalho de Melo:
 
   - Remove some needless headers from .c and .h files fixing up the fallout,
     to reduce building time when changes are made to .h files
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCXEuYhwAKCRCyPKLppCJ+
 J6PcAP0TCNtSV0UF/tuaocj6dEGoaYgLNZCyLWbn0oUEC50jXwEAglHH1oiwR6vV
 hTdMS0EIXr0hBh66C3LKtPmScSsJtAY=
 =VSg9
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-5.0-20190126' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

BPF:

  Song Liu:

  - Fix synthesized PERF_RECORD_KSYMBOL/BPF_EVENT

  Arnaldo Carvalho de Melo:

  - Add bpf_map() helper, to make BPF map declararions more compact and
    allow for BTF annotations to be made transparently.

perf script python:

  Tony Jones:

  - Remove explicit shebangs.

  - Fix the PYTHON=python3 builds.

Core:

  Davidlohr Bueso:

  - Update rbtree implementation, getting it closer to the kernel one.

  - Use cached rbtrees.

  Arnaldo Carvalho de Melo:

  - Remove some needless headers from .c and .h files fixing up the fallout,
    to reduce building time when changes are made to .h files

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2019-01-26 10:50:29 +01:00
commit b844ff366f
73 changed files with 810 additions and 531 deletions

View File

@ -43,13 +43,28 @@ struct rb_root {
struct rb_node *rb_node;
};
/*
* Leftmost-cached rbtrees.
*
* We do not cache the rightmost node based on footprint
* size vs number of potential users that could benefit
* from O(1) rb_last(). Just not worth it, users that want
* this feature can always implement the logic explicitly.
* Furthermore, users that want to cache both pointers may
* find it a bit asymmetric, but that's ok.
*/
struct rb_root_cached {
struct rb_root rb_root;
struct rb_node *rb_leftmost;
};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
@ -68,6 +83,12 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
extern void rb_insert_color_cached(struct rb_node *,
struct rb_root_cached *, bool);
extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@ -75,6 +96,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@ -90,12 +113,29 @@ static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/*
* Handy for checking that we are not deleting an entry that is
* already in a list, found in block/{blk-throttle,cfq-iosched}.c,
* probably should be moved to lib/rbtree.c...
/**
* rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
* given type allowing the backing memory of @pos to be invalidated
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*
* rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
* list_for_each_entry_safe() and allows the iteration to continue independent
* of changes to @pos by the body of the loop.
*
* Note, however, that it cannot handle other modifications that re-order the
* rbtree it is iterating over. This includes calling rb_erase() on @pos, as
* rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
typeof(*pos), field); 1; }); \
pos = n)
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);

View File

@ -44,7 +44,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
extern void __rb_insert_augmented(struct rb_node *node,
struct rb_root *root,
bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
@ -60,7 +62,16 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
__rb_insert_augmented(node, root, augment->rotate);
__rb_insert_augmented(node, root, false, NULL, augment->rotate);
}
static inline void
rb_insert_augmented_cached(struct rb_node *node,
struct rb_root_cached *root, bool newleft,
const struct rb_augment_callbacks *augment)
{
__rb_insert_augmented(node, &root->rb_root,
newleft, &root->rb_leftmost, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@ -93,7 +104,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
.propagate = rbname ## _propagate, \
.copy = rbname ## _copy, \
.rotate = rbname ## _rotate \
};
@ -126,11 +139,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
{
if (parent) {
if (parent->rb_left == old)
parent->rb_left = new;
WRITE_ONCE(parent->rb_left, new);
else
parent->rb_right = new;
WRITE_ONCE(parent->rb_right, new);
} else
root->rb_node = new;
WRITE_ONCE(root->rb_node, new);
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@ -138,12 +151,17 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *child = node->rb_right;
struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
if (leftmost && node == *leftmost)
*leftmost = rb_next(node);
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@ -170,6 +188,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
tmp = child->rb_left;
if (!tmp) {
/*
@ -183,6 +202,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
*/
parent = successor;
child2 = successor->rb_right;
augment->copy(node, successor);
} else {
/*
@ -204,19 +224,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
parent->rb_left = child2 = successor->rb_right;
successor->rb_right = child;
child2 = successor->rb_right;
WRITE_ONCE(parent->rb_left, child2);
WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
augment->copy(node, successor);
augment->propagate(parent, successor);
}
successor->rb_left = tmp = node->rb_left;
tmp = node->rb_left;
WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
@ -237,9 +261,21 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
struct rb_node *rebalance = __rb_erase_augmented(node, root,
NULL, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
static __always_inline void
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
&root->rb_leftmost,
augment);
if (rebalance)
__rb_erase_color(rebalance, &root->rb_root, augment->rotate);
}
#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */

View File

@ -22,6 +22,7 @@
*/
#include <linux/rbtree_augmented.h>
#include <linux/export.h>
/*
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
@ -43,6 +44,30 @@
* parentheses and have some accompanying text comment.
*/
/*
* Notes on lockless lookups:
*
* All stores to the tree structure (rb_left and rb_right) must be done using
* WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
* tree structure as seen in program order.
*
* These two requirements will allow lockless iteration of the tree -- not
* correct iteration mind you, tree rotations are not atomic so a lookup might
* miss entire subtrees.
*
* But they do guarantee that any such traversal will only see valid elements
* and that it will indeed complete -- does not get stuck in a loop.
*
* It also guarantees that if the lookup returns an element it is the 'correct'
* one. But not returning an element does _NOT_ mean it's not present.
*
* NOTE:
*
* Stores to __rb_parent_color are not important for simple lookups so those
* are left undone as of now. Nor did I check for loops involving parent
* pointers.
*/
static inline void rb_set_black(struct rb_node *rb)
{
rb->__rb_parent_color |= RB_BLACK;
@ -70,22 +95,35 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
if (newleft)
*leftmost = node;
while (true) {
/*
* Loop invariant: node is red
*
* If there is a black parent, we are done.
* Otherwise, take some corrective action as we don't
* want a red root or two consecutive red nodes.
* Loop invariant: node is red.
*/
if (!parent) {
if (unlikely(!parent)) {
/*
* The inserted node is root. Either this is the
* first node, or we recursed at Case 1 below and
* are no longer violating 4).
*/
rb_set_parent_color(node, NULL, RB_BLACK);
break;
} else if (rb_is_black(parent))
}
/*
* If there is a black parent, we are done.
* Otherwise, take some corrective action as,
* per 4), we don't want a red root or two
* consecutive red nodes.
*/
if(rb_is_black(parent))
break;
gparent = rb_red_parent(parent);
@ -94,7 +132,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
if (parent != tmp) { /* parent == gparent->rb_left */
if (tmp && rb_is_red(tmp)) {
/*
* Case 1 - color flips
* Case 1 - node's uncle is red (color flips).
*
* G g
* / \ / \
@ -117,7 +155,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_right;
if (node == tmp) {
/*
* Case 2 - left rotate at parent
* Case 2 - node's uncle is black and node is
* the parent's right child (left rotate at parent).
*
* G G
* / \ / \
@ -128,8 +167,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* This still leaves us in violation of 4), the
* continuation into Case 3 will fix that.
*/
parent->rb_right = tmp = node->rb_left;
node->rb_left = parent;
tmp = node->rb_left;
WRITE_ONCE(parent->rb_right, tmp);
WRITE_ONCE(node->rb_left, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@ -140,7 +180,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/*
* Case 3 - right rotate at gparent
* Case 3 - node's uncle is black and node is
* the parent's left child (right rotate at gparent).
*
* G P
* / \ / \
@ -148,8 +189,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* / \
* n U
*/
gparent->rb_left = tmp; /* == parent->rb_right */
parent->rb_right = gparent;
WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
WRITE_ONCE(parent->rb_right, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@ -170,8 +211,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_left;
if (node == tmp) {
/* Case 2 - right rotate at parent */
parent->rb_left = tmp = node->rb_right;
node->rb_right = parent;
tmp = node->rb_right;
WRITE_ONCE(parent->rb_left, tmp);
WRITE_ONCE(node->rb_right, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@ -182,8 +224,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/* Case 3 - left rotate at gparent */
gparent->rb_right = tmp; /* == parent->rb_left */
parent->rb_left = gparent;
WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
WRITE_ONCE(parent->rb_left, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@ -223,8 +265,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* Sl Sr N Sl
*/
parent->rb_right = tmp1 = sibling->rb_left;
sibling->rb_left = parent;
tmp1 = sibling->rb_left;
WRITE_ONCE(parent->rb_right, tmp1);
WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@ -268,15 +311,31 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
*
* (p) (p)
* / \ / \
* N S --> N Sl
* N S --> N sl
* / \ \
* sl Sr s
* sl Sr S
* \
* Sr
*
* Note: p might be red, and then both
* p and sl are red after rotation(which
* breaks property 4). This is fixed in
* Case 4 (in __rb_rotate_set_parents()
* which set sl the color of p
* and set p RB_BLACK)
*
* (p) (sl)
* / \ / \
* N sl --> P S
* \ / \
* S N Sr
* \
* Sr
*/
sibling->rb_left = tmp1 = tmp2->rb_right;
tmp2->rb_right = sibling;
parent->rb_right = tmp2;
tmp1 = tmp2->rb_right;
WRITE_ONCE(sibling->rb_left, tmp1);
WRITE_ONCE(tmp2->rb_right, sibling);
WRITE_ONCE(parent->rb_right, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@ -296,8 +355,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* (sl) sr N (sl)
*/
parent->rb_right = tmp2 = sibling->rb_left;
sibling->rb_left = parent;
tmp2 = sibling->rb_left;
WRITE_ONCE(parent->rb_right, tmp2);
WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@ -309,8 +369,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
sibling = parent->rb_left;
if (rb_is_red(sibling)) {
/* Case 1 - right rotate at parent */
parent->rb_left = tmp1 = sibling->rb_right;
sibling->rb_right = parent;
tmp1 = sibling->rb_right;
WRITE_ONCE(parent->rb_left, tmp1);
WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@ -334,10 +395,11 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
}
break;
}
/* Case 3 - right rotate at sibling */
sibling->rb_right = tmp1 = tmp2->rb_left;
tmp2->rb_left = sibling;
parent->rb_left = tmp2;
/* Case 3 - left rotate at sibling */
tmp1 = tmp2->rb_left;
WRITE_ONCE(sibling->rb_right, tmp1);
WRITE_ONCE(tmp2->rb_left, sibling);
WRITE_ONCE(parent->rb_left, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@ -345,9 +407,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
tmp1 = sibling;
sibling = tmp2;
}
/* Case 4 - left rotate at parent + color flips */
parent->rb_left = tmp2 = sibling->rb_right;
sibling->rb_right = parent;
/* Case 4 - right rotate at parent + color flips */
tmp2 = sibling->rb_right;
WRITE_ONCE(parent->rb_left, tmp2);
WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@ -378,22 +441,41 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
dummy_propagate, dummy_copy, dummy_rotate
.propagate = dummy_propagate,
.copy = dummy_copy,
.rotate = dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
__rb_insert(node, root, dummy_rotate);
__rb_insert(node, root, false, NULL, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
rebalance = __rb_erase_augmented(node, root,
NULL, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
void rb_insert_color_cached(struct rb_node *node,
struct rb_root_cached *root, bool leftmost)
{
__rb_insert(node, &root->rb_root, leftmost,
&root->rb_leftmost, dummy_rotate);
}
void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{
struct rb_node *rebalance;
rebalance = __rb_erase_augmented(node, &root->rb_root,
&root->rb_leftmost, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
}
/*
* Augmented rbtree manipulation functions.
*
@ -402,9 +484,10 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
__rb_insert(node, root, augment_rotate);
__rb_insert(node, root, newleft, leftmost, augment_rotate);
}
/*
@ -498,15 +581,24 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
{
struct rb_node *parent = rb_parent(victim);
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
/* Set the surrounding nodes to point to the replacement */
__rb_change_child(victim, new, parent, root);
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
rb_set_parent(victim->rb_right, new);
__rb_change_child(victim, new, parent, root);
}
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
struct rb_root_cached *root)
{
rb_replace_node(victim, new, &root->rb_root);
if (root->rb_leftmost == victim)
root->rb_leftmost = new;
}
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)

View File

@ -863,8 +863,8 @@ ifndef NO_LIBPYTHON
$(call QUIET_INSTALL, python-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \
$(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) scripts/python/*.py -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
endif
$(call QUIET_INSTALL, perf_completion-script) \

View File

@ -227,7 +227,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
* the DSO?
*/
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
rb_erase_cached(&al->sym->rb_node,
&al->map->dso->symbols);
symbol__delete(al->sym);
dso__reset_find_symbol_cache(al->map->dso);
@ -305,7 +305,7 @@ static void hists__find_annotations(struct hists *hists,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
struct rb_node *nd = rb_first(&hists->entries), *next;
struct rb_node *nd = rb_first_cached(&hists->entries), *next;
int key = K_RIGHT;
while (nd) {

View File

@ -2088,7 +2088,7 @@ static int resort_hitm_cb(struct hist_entry *he)
static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
{
struct rb_node *next = rb_first(&hists->entries);
struct rb_node *next = rb_first_cached(&hists->entries);
int ret = 0;
while (next) {
@ -2215,7 +2215,7 @@ static void print_pareto(FILE *out)
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
nd = rb_first(&c2c.hists.hists.entries);
nd = rb_first_cached(&c2c.hists.hists.entries);
for (; nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@ -2283,7 +2283,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
static void c2c_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
struct rb_node *nd = rb_first(&hb->hists->entries);
struct rb_node *nd = rb_first_cached(&hb->hists->entries);
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);

View File

@ -429,7 +429,7 @@ get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
static void hists__baseline_only(struct hists *hists)
{
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@ -437,13 +437,13 @@ static void hists__baseline_only(struct hists *hists)
else
root = hists->entries_in;
next = rb_first(root);
next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
rb_erase(&he->rb_node_in, root);
rb_erase_cached(&he->rb_node_in, root);
hist_entry__delete(he);
}
}
@ -451,7 +451,7 @@ static void hists__baseline_only(struct hists *hists)
static void hists__precompute(struct hists *hists)
{
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@ -459,7 +459,7 @@ static void hists__precompute(struct hists *hists)
else
root = hists->entries_in;
next = rb_first(root);
next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he, *pair;
struct data__file *d;

View File

@ -32,6 +32,7 @@
#include "perf.h"
#include "builtin.h"
#include "namespaces.h"
#include "util/util.h"
#include "util/strlist.h"
#include "util/strfilter.h"

View File

@ -753,7 +753,8 @@ static int tasks_print(struct report *rep, FILE *fp)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
task = tasks + itask++;
task->thread = rb_entry(nd, struct thread, rb_node);

View File

@ -213,7 +213,7 @@ struct perf_sched {
u64 all_runtime;
u64 all_count;
u64 cpu_last_switched[MAX_CPUS];
struct rb_root atom_root, sorted_atom_root, merged_atom_root;
struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
@ -271,7 +271,7 @@ struct evsel_runtime {
struct idle_thread_runtime {
struct thread_runtime tr;
struct thread *last_thread;
struct rb_root sorted_root;
struct rb_root_cached sorted_root;
struct callchain_root callchain;
struct callchain_cursor cursor;
};
@ -950,10 +950,10 @@ thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *
}
static struct work_atoms *
thread_atoms_search(struct rb_root *root, struct thread *thread,
thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
struct list_head *sort_list)
{
struct rb_node *node = root->rb_node;
struct rb_node *node = root->rb_root.rb_node;
struct work_atoms key = { .thread = thread };
while (node) {
@ -976,10 +976,11 @@ thread_atoms_search(struct rb_root *root, struct thread *thread,
}
static void
__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
struct list_head *sort_list)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
bool leftmost = true;
while (*new) {
struct work_atoms *this;
@ -992,12 +993,14 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
if (cmp > 0)
new = &((*new)->rb_left);
else
else {
new = &((*new)->rb_right);
leftmost = false;
}
}
rb_link_node(&data->node, parent, new);
rb_insert_color(&data->node, root);
rb_insert_color_cached(&data->node, root, leftmost);
}
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
@ -1447,15 +1450,15 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
struct rb_root *root = &sched->atom_root;
struct rb_root_cached *root = &sched->atom_root;
again:
for (;;) {
struct work_atoms *data;
node = rb_first(root);
node = rb_first_cached(root);
if (!node)
break;
rb_erase(node, root);
rb_erase_cached(node, root);
data = rb_entry(node, struct work_atoms, node);
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
@ -2762,12 +2765,12 @@ static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
return ret;
}
static size_t timehist_print_idlehist_callchain(struct rb_root *root)
static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
{
size_t ret = 0;
FILE *fp = stdout;
struct callchain_node *chain;
struct rb_node *rb_node = rb_first(root);
struct rb_node *rb_node = rb_first_cached(root);
printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
@ -2868,7 +2871,7 @@ static void timehist_print_summary(struct perf_sched *sched,
if (itr == NULL)
continue;
callchain_param.sort(&itr->sorted_root, &itr->callchain,
callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
0, &callchain_param);
printf(" CPU %2d:", i);
@ -3074,11 +3077,12 @@ static void print_bad_events(struct perf_sched *sched)
}
}
static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
struct work_atoms *this;
const char *comm = thread__comm_str(data->thread), *this_comm;
bool leftmost = true;
while (*new) {
int cmp;
@ -3092,6 +3096,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
new = &((*new)->rb_left);
} else if (cmp < 0) {
new = &((*new)->rb_right);
leftmost = false;
} else {
this->num_merged++;
this->total_runtime += data->total_runtime;
@ -3109,7 +3114,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
data->num_merged++;
rb_link_node(&data->node, parent, new);
rb_insert_color(&data->node, root);
rb_insert_color_cached(&data->node, root, leftmost);
}
static void perf_sched__merge_lat(struct perf_sched *sched)
@ -3120,8 +3125,8 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
if (sched->skip_merge)
return;
while ((node = rb_first(&sched->atom_root))) {
rb_erase(node, &sched->atom_root);
while ((node = rb_first_cached(&sched->atom_root))) {
rb_erase_cached(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
__merge_work_atoms(&sched->merged_atom_root, data);
}
@ -3143,7 +3148,7 @@ static int perf_sched__lat(struct perf_sched *sched)
printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
printf(" -----------------------------------------------------------------------------------------------------------------\n");
next = rb_first(&sched->sorted_atom_root);
next = rb_first_cached(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;

View File

@ -367,7 +367,7 @@ static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
if (p)
*p = 0;
next = rb_first(&hists->entries);
next = rb_first_cached(&hists->entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {

View File

@ -3854,7 +3854,8 @@ int cmd_trace(int argc, const char **argv)
goto init_augmented_syscall_tp;
}
if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
if (trace.syscalls.events.augmented->priv == NULL &&
strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
struct perf_evsel *augmented = trace.syscalls.events.augmented;
if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
perf_evsel__init_augmented_syscall_tp_args(augmented))

View File

@ -18,23 +18,13 @@
#include <pid_filter.h>
/* bpf-output associated map */
struct bpf_map SEC("maps") __augmented_syscalls__ = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(u32),
.max_entries = __NR_CPUS__,
};
bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall {
bool enabled;
};
struct bpf_map SEC("maps") syscalls = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(struct syscall),
.max_entries = 512,
};
bpf_map(syscalls, ARRAY, int, struct syscall, 512);
struct syscall_enter_args {
unsigned long long common_tp_fields;

View File

@ -19,12 +19,8 @@
#include <stdio.h>
#include <linux/socket.h>
struct bpf_map SEC("maps") __augmented_syscalls__ = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(u32),
.max_entries = __NR_CPUS__,
};
/* bpf-output associated map */
bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall_exit_args {
unsigned long long common_tp_fields;

View File

@ -21,12 +21,8 @@
#include <stdio.h>
struct bpf_map SEC("maps") __augmented_syscalls__ = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(u32),
.max_entries = __NR_CPUS__,
};
/* bpf-output associated map */
bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct augmented_filename {
int size;

View File

@ -18,6 +18,14 @@ struct bpf_map {
unsigned int numa_node;
};
#define bpf_map(name, _type, type_key, type_val, _max_entries) \
struct bpf_map SEC("maps") name = { \
.type = BPF_MAP_TYPE_##_type, \
.key_size = sizeof(type_key), \
.value_size = sizeof(type_val), \
.max_entries = _max_entries, \
}
/*
* FIXME: this should receive .max_entries as a parameter, as careful
* tuning of these limits is needed to avoid hitting limits that
@ -26,13 +34,7 @@ struct bpf_map {
* For the current need, 'perf trace --filter-pids', 64 should
* be good enough, but this surely needs to be revisited.
*/
#define pid_map(name, value_type) \
struct bpf_map SEC("maps") name = { \
.type = BPF_MAP_TYPE_HASH, \
.key_size = sizeof(pid_t), \
.value_size = sizeof(value_type), \
.max_entries = 64, \
}
#define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64)
static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;

View File

@ -1,4 +1,3 @@
#!/usr/bin/python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.

View File

@ -1,5 +1,3 @@
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
data = {}

View File

@ -1,6 +1,7 @@
#! /usr/bin/python
# SPDX-License-Identifier: GPL-2.0
from __future__ import print_function
import os
import sys
import glob
@ -8,7 +9,11 @@ import optparse
import tempfile
import logging
import shutil
import ConfigParser
try:
import configparser
except ImportError:
import ConfigParser as configparser
def data_equal(a, b):
# Allow multiple values in assignment separated by '|'
@ -100,20 +105,20 @@ class Event(dict):
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
if t not in self or t not in other:
return False
if not data_equal(self[t], other[t]):
return False
return True
def optional(self):
if self.has_key('optional') and self['optional'] == '1':
if 'optional' in self and self['optional'] == '1':
return True
return False
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
if t not in self or t not in other:
continue
if not data_equal(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
@ -134,7 +139,7 @@ class Event(dict):
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser = configparser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
@ -193,7 +198,7 @@ class Test(object):
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event = configparser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
@ -207,7 +212,7 @@ class Test(object):
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base = configparser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
@ -322,9 +327,9 @@ def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
except Unsup as obj:
log.warning("unsupp %s" % obj.getMsg())
except Notest, obj:
except Notest as obj:
log.warning("skipped %s" % obj.getMsg())
def setup_log(verbose):
@ -363,7 +368,7 @@ def main():
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
default=0, action="count", dest="verbose")
options, args = parser.parse_args()
if args:
@ -373,7 +378,7 @@ def main():
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
print('FAILED no -d option specified')
sys.exit(-1)
if not options.test:
@ -382,8 +387,8 @@ def main():
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
except Fail as obj:
print("FAILED %s" % obj.getMsg())
sys.exit(-1)
sys.exit(0)

View File

@ -161,7 +161,7 @@ out:
void print_hists_in(struct hists *hists)
{
int i = 0;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@ -170,7 +170,7 @@ void print_hists_in(struct hists *hists)
root = hists->entries_in;
pr_info("----- %s --------\n", __func__);
node = rb_first(root);
node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@ -191,13 +191,13 @@ void print_hists_in(struct hists *hists)
void print_hists_out(struct hists *hists)
{
int i = 0;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
root = &hists->entries;
pr_info("----- %s --------\n", __func__);
node = rb_first(root);
node = rb_first_cached(root);
while (node) {
struct hist_entry *he;

View File

@ -125,8 +125,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
struct rb_root *root_in;
struct rb_root *root_out;
struct rb_root_cached *root_in;
struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@ -136,12 +136,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
while (!RB_EMPTY_ROOT(root_out)) {
node = rb_first(root_out);
while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
rb_erase(node, root_out);
rb_erase(&he->rb_node_in, root_in);
rb_erase_cached(node, root_out);
rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@ -198,7 +198,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
print_hists_out(hists);
}
root = &hists->entries;
root = &hists->entries.rb_root;
for (node = rb_first(root), i = 0;
node && (he = rb_entry(node, struct hist_entry, rb_node));
node = rb_next(node), i++) {

View File

@ -142,7 +142,7 @@ static int find_sample(struct sample *samples, size_t nr_samples,
static int __validate_match(struct hists *hists)
{
size_t count = 0;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
/*
@ -153,7 +153,7 @@ static int __validate_match(struct hists *hists)
else
root = hists->entries_in;
node = rb_first(root);
node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@ -192,7 +192,7 @@ static int __validate_link(struct hists *hists, int idx)
size_t count = 0;
size_t count_pair = 0;
size_t count_dummy = 0;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
/*
@ -205,7 +205,7 @@ static int __validate_link(struct hists *hists, int idx)
else
root = hists->entries_in;
node = rb_first(root);
node = rb_first_cached(root);
while (node) {
struct hist_entry *he;

View File

@ -91,8 +91,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
struct rb_root *root_in;
struct rb_root *root_out;
struct rb_root_cached *root_in;
struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@ -102,12 +102,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
while (!RB_EMPTY_ROOT(root_out)) {
node = rb_first(root_out);
while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
rb_erase(node, root_out);
rb_erase(&he->rb_node_in, root_in);
rb_erase_cached(node, root_out);
rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@ -126,7 +126,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
field_order = NULL;
@ -162,7 +162,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
node = rb_first(root);
node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
@ -228,7 +228,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
field_order = "overhead,cpu";
@ -262,7 +262,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
node = rb_first(root);
node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
@ -284,7 +284,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
field_order = "comm,overhead,dso";
@ -316,7 +316,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
node = rb_first(root);
node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
@ -358,7 +358,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
field_order = "dso,sym,comm,overhead,dso";
@ -394,7 +394,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
node = rb_first(root);
node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
@ -460,7 +460,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *node;
field_order = "cpu,pid,comm,dso,sym";
@ -497,7 +497,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
node = rb_first(root);
node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",

View File

@ -49,7 +49,7 @@ static int hist_browser__get_folding(struct hist_browser *browser)
struct hists *hists = browser->hists;
int unfolded_rows = 0;
for (nd = rb_first(&hists->entries);
for (nd = rb_first_cached(&hists->entries);
(nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
nd = rb_hierarchy_next(nd)) {
struct hist_entry *he =
@ -267,7 +267,7 @@ static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he,
if (he->has_no_entry)
return 1;
node = rb_first(&he->hroot_out);
node = rb_first_cached(&he->hroot_out);
while (node) {
float percent;
@ -372,7 +372,7 @@ static void hist_entry__init_have_children(struct hist_entry *he)
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
} else {
he->has_children = !RB_EMPTY_ROOT(&he->hroot_out);
he->has_children = !RB_EMPTY_ROOT(&he->hroot_out.rb_root);
}
he->init_have_children = true;
@ -508,7 +508,7 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
struct hist_entry *child;
int n = 0;
for (nd = rb_first(&he->hroot_out); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) {
child = rb_entry(nd, struct hist_entry, rb_node);
percent = hist_entry__get_percent_limit(child);
if (!child->filtered && percent >= hb->min_pcnt)
@ -566,7 +566,7 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
struct rb_node *nd;
struct hist_entry *he;
nd = rb_first(&browser->hists->entries);
nd = rb_first_cached(&browser->hists->entries);
while (nd) {
he = rb_entry(nd, struct hist_entry, rb_node);
@ -1738,7 +1738,7 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
browser->top = rb_first(&hb->hists->entries);
browser->top = rb_first_cached(&hb->hists->entries);
}
}
@ -2649,7 +2649,7 @@ add_socket_opt(struct hist_browser *browser, struct popup_action *act,
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
struct rb_node *nd = rb_first(&hb->hists->entries);
struct rb_node *nd = rb_first_cached(&hb->hists->entries);
if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
@ -2669,7 +2669,7 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
double percent)
{
struct hist_entry *he;
struct rb_node *nd = rb_first(&hb->hists->entries);
struct rb_node *nd = rb_first_cached(&hb->hists->entries);
u64 total = hists__total_period(hb->hists);
u64 min_callchain_hits = total * (percent / 100);

View File

@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include "../../util/util.h"
#include "../../util/debug.h"
#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../helpline.h"

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
#include "util/sort.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"

View File

@ -353,7 +353,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
g_object_unref(GTK_TREE_MODEL(store));
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
u64 total = hists__total_period(h->hists);
@ -401,7 +401,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
}
static void perf_gtk__add_hierarchy_entries(struct hists *hists,
struct rb_root *root,
struct rb_root_cached *root,
GtkTreeStore *store,
GtkTreeIter *parent,
struct perf_hpp *hpp,
@ -415,7 +415,7 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
u64 total = hists__total_period(hists);
int size;
for (node = rb_first(root); node; node = rb_next(node)) {
for (node = rb_first_cached(root); node; node = rb_next(node)) {
GtkTreeIter iter;
float percent;
char *bf;

View File

@ -788,7 +788,8 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
indent = hists__overhead_width(hists) + 4;
for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
for (nd = rb_first_cached(&hists->entries); nd;
nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;

View File

@ -9,6 +9,7 @@
#include <errno.h>
#include <inttypes.h>
#include <libgen.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"

View File

@ -4,16 +4,24 @@
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <linux/types.h>
#include "symbol.h"
#include "hist.h"
#include "sort.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <asm/bug.h>
#include "symbol_conf.h"
struct hist_browser_timer;
struct hist_entry;
struct ins_ops;
struct map;
struct map_symbol;
struct addr_map_symbol;
struct option;
struct perf_sample;
struct perf_evsel;
struct symbol;
struct ins {
const char *name;

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "block-range.h"
#include "annotate.h"
#include <assert.h>
#include <stdlib.h>
struct {
struct rb_root root;

View File

@ -2,7 +2,11 @@
#ifndef __PERF_BLOCK_RANGE_H
#define __PERF_BLOCK_RANGE_H
#include "symbol.h"
#include <stdbool.h>
#include <linux/rbtree.h>
#include <linux/types.h>
struct symbol;
/*
* struct block_range - non-overlapping parts of basic blocks

View File

@ -7,6 +7,7 @@
#include "bpf-event.h"
#include "debug.h"
#include "symbol.h"
#include "machine.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@ -149,7 +150,7 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
*ksymbol_event = (struct ksymbol_event){
.header = {
.type = PERF_RECORD_KSYMBOL,
.size = sizeof(struct ksymbol_event),
.size = offsetof(struct ksymbol_event, name),
},
.addr = prog_addrs[i],
.len = prog_lens[i],
@ -178,6 +179,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
sizeof(u64));
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
@ -194,6 +198,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
.id = info.id,
};
memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
@ -217,7 +223,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
int err;
int fd;
event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN);
event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
if (!event)
return -1;
while (true) {

View File

@ -1,8 +1,31 @@
#ifndef _PERF_BRANCH_H
#define _PERF_BRANCH_H 1
#include <stdio.h>
#include <stdint.h>
#include "../perf.h"
#include <linux/perf_event.h>
#include <linux/types.h>
struct branch_flags {
u64 mispred:1;
u64 predicted:1;
u64 in_tx:1;
u64 abort:1;
u64 cycles:16;
u64 type:4;
u64 reserved:40;
};
struct branch_entry {
u64 from;
u64 to;
struct branch_flags flags;
};
struct branch_stack {
u64 nr;
struct branch_entry entries[0];
};
struct branch_type_stat {
bool branch_to;
@ -13,8 +36,6 @@ struct branch_type_stat {
u64 cross_2m;
};
struct branch_flags;
void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
u64 from, u64 to);

View File

@ -15,6 +15,7 @@
#include <sys/types.h>
#include "build-id.h"
#include "event.h"
#include "namespaces.h"
#include "symbol.h"
#include "thread.h"
#include <linux/kernel.h>
@ -363,7 +364,8 @@ int perf_session__write_buildid_table(struct perf_session *session,
if (err)
return err;
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&session->machines.guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
@ -396,7 +398,8 @@ int dsos__hit_all(struct perf_session *session)
if (err)
return err;
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&session->machines.guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
@ -849,7 +852,8 @@ int perf_session__cache_build_ids(struct perf_session *session)
ret = machine__cache_build_ids(&session->machines.host);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&session->machines.guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos);
}
@ -866,7 +870,8 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&session->machines.guests); nd;
nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}

View File

@ -6,9 +6,10 @@
#define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1)
#include "tool.h"
#include "namespaces.h"
#include <linux/types.h>
struct nsinfo;
extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
struct feat_fd;

View File

@ -2,7 +2,6 @@
#ifndef __PERF_CALLCHAIN_H
#define __PERF_CALLCHAIN_H
#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"

View File

@ -3,6 +3,7 @@
#define __PERF_COLOR_H
#include <stdio.h>
#include <stdarg.h>
/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
#define COLOR_MAXLEN 24

View File

@ -6,6 +6,7 @@
#include <stdio.h>
#include <string.h>
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include "rwsem.h"
struct comm_str {

View File

@ -2,9 +2,9 @@
#ifndef __PERF_COMM_H
#define __PERF_COMM_H
#include "../perf.h"
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/types.h>
#include <stdbool.h>
struct comm_str;

View File

@ -8,7 +8,9 @@
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include "compress.h"
#include "namespaces.h"
#include "path.h"
#include "symbol.h"
#include "srcline.h"
@ -1195,10 +1197,10 @@ struct dso *dso__new(const char *name)
strcpy(dso->name, name);
dso__set_long_name(dso, dso->name, false);
dso__set_short_name(dso, dso->name, false);
dso->symbols = dso->symbol_names = RB_ROOT;
dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
dso->data.cache = RB_ROOT;
dso->inlined_nodes = RB_ROOT;
dso->srclines = RB_ROOT;
dso->inlined_nodes = RB_ROOT_CACHED;
dso->srclines = RB_ROOT_CACHED;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@ -1467,7 +1469,7 @@ size_t dso__fprintf(struct dso *dso, FILE *fp)
ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}

View File

@ -7,12 +7,14 @@
#include <linux/rbtree.h>
#include <sys/types.h>
#include <stdbool.h>
#include <stdio.h>
#include "rwsem.h"
#include <linux/bitops.h>
#include "map.h"
#include "namespaces.h"
#include "build-id.h"
struct machine;
struct map;
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@ -139,10 +141,10 @@ struct dso {
struct list_head node;
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root *root; /* root of rbtree that rb_node is in */
struct rb_root symbols;
struct rb_root symbol_names;
struct rb_root inlined_nodes;
struct rb_root srclines;
struct rb_root_cached symbols;
struct rb_root_cached symbol_names;
struct rb_root_cached inlined_nodes;
struct rb_root_cached srclines;
struct {
u64 addr;
struct symbol *symbol;
@ -234,7 +236,7 @@ bool dso__loaded(const struct dso *dso);
static inline bool dso__has_symbols(const struct dso *dso)
{
return !RB_EMPTY_ROOT(&dso->symbols);
return !RB_EMPTY_ROOT(&dso->symbols.rb_root);
}
bool dso__sorted_by_name(const struct dso *dso);

View File

@ -161,26 +161,7 @@ struct ip_callchain {
u64 ips[0];
};
struct branch_flags {
u64 mispred:1;
u64 predicted:1;
u64 in_tx:1;
u64 abort:1;
u64 cycles:16;
u64 type:4;
u64 reserved:40;
};
struct branch_entry {
u64 from;
u64 to;
struct branch_flags flags;
};
struct branch_stack {
u64 nr;
struct branch_entry entries[0];
};
struct branch_stack;
enum {
PERF_IP_FLAG_BRANCH = 1ULL << 0,

View File

@ -209,7 +209,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
struct rb_node *next = rb_first(&hists->entries);
struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
int row = 0;
@ -296,7 +296,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
if (!he->leaf) {
struct hist_entry *child;
struct rb_node *node = rb_first(&he->hroot_out);
struct rb_node *node = rb_first_cached(&he->hroot_out);
while (node) {
child = rb_entry(node, struct hist_entry, rb_node);
node = rb_next(node);
@ -311,8 +311,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
struct rb_root *root_in;
struct rb_root *root_out;
struct rb_root_cached *root_in;
struct rb_root_cached *root_out;
if (he->parent_he) {
root_in = &he->parent_he->hroot_in;
@ -325,8 +325,8 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
root_out = &hists->entries;
}
rb_erase(&he->rb_node_in, root_in);
rb_erase(&he->rb_node, root_out);
rb_erase_cached(&he->rb_node_in, root_in);
rb_erase_cached(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
@ -337,7 +337,7 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
struct rb_node *next = rb_first(&hists->entries);
struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@ -353,7 +353,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
void hists__delete_entries(struct hists *hists)
{
struct rb_node *next = rb_first(&hists->entries);
struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@ -435,8 +435,8 @@ static int hist_entry__init(struct hist_entry *he,
}
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
he->hroot_in = RB_ROOT;
he->hroot_out = RB_ROOT;
he->hroot_in = RB_ROOT_CACHED;
he->hroot_out = RB_ROOT_CACHED;
if (!symbol_conf.report_hierarchy)
he->leaf = true;
@ -513,8 +513,9 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
int64_t cmp;
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
bool leftmost = true;
p = &hists->entries_in->rb_node;
p = &hists->entries_in->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@ -557,8 +558,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
he = hist_entry__new(entry, sample_self);
@ -570,7 +573,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
out:
if (sample_self)
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
@ -1279,16 +1282,17 @@ static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
}
static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
struct rb_root *root,
struct rb_root_cached *root,
struct hist_entry *he,
struct hist_entry *parent_he,
struct perf_hpp_list *hpp_list)
{
struct rb_node **p = &root->rb_node;
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter, *new;
struct perf_hpp_fmt *fmt;
int64_t cmp;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
@ -1308,8 +1312,10 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
else
else {
p = &parent->rb_right;
leftmost = false;
}
}
new = hist_entry__new(he, true);
@ -1343,12 +1349,12 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
}
rb_link_node(&new->rb_node_in, parent, p);
rb_insert_color(&new->rb_node_in, root);
rb_insert_color_cached(&new->rb_node_in, root, leftmost);
return new;
}
static int hists__hierarchy_insert_entry(struct hists *hists,
struct rb_root *root,
struct rb_root_cached *root,
struct hist_entry *he)
{
struct perf_hpp_list_node *node;
@ -1395,13 +1401,14 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
}
static int hists__collapse_insert_entry(struct hists *hists,
struct rb_root *root,
struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
bool leftmost = true;
if (symbol_conf.report_hierarchy)
return hists__hierarchy_insert_entry(hists, root, he);
@ -1432,19 +1439,21 @@ static int hists__collapse_insert_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
rb_insert_color_cached(&he->rb_node_in, root, leftmost);
return 1;
}
struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
{
struct rb_root *root;
struct rb_root_cached *root;
pthread_mutex_lock(&hists->lock);
@ -1467,7 +1476,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
int ret;
@ -1479,7 +1488,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
root = hists__get_rotate_entries_in(hists);
next = rb_first(root);
next = rb_first_cached(root);
while (next) {
if (session_done())
@ -1487,7 +1496,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
rb_erase(&n->rb_node_in, root);
rb_erase_cached(&n->rb_node_in, root);
ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
if (ret < 0)
return -1;
@ -1558,7 +1567,7 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
struct rb_node *node;
struct hist_entry *he;
node = rb_first(&hists->entries);
node = rb_first_cached(&hists->entries);
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
@ -1578,13 +1587,14 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
}
}
static void hierarchy_insert_output_entry(struct rb_root *root,
static void hierarchy_insert_output_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
@ -1592,12 +1602,14 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
if (hist_entry__sort(he, iter) > 0)
p = &parent->rb_left;
else
else {
p = &parent->rb_right;
leftmost = false;
}
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
rb_insert_color_cached(&he->rb_node, root, leftmost);
/* update column width of dynamic entry */
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
@ -1608,16 +1620,16 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
static void hists__hierarchy_output_resort(struct hists *hists,
struct ui_progress *prog,
struct rb_root *root_in,
struct rb_root *root_out,
struct rb_root_cached *root_in,
struct rb_root_cached *root_out,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node *node;
struct hist_entry *he;
*root_out = RB_ROOT;
node = rb_first(root_in);
*root_out = RB_ROOT_CACHED;
node = rb_first_cached(root_in);
while (node) {
he = rb_entry(node, struct hist_entry, rb_node_in);
@ -1660,15 +1672,16 @@ static void hists__hierarchy_output_resort(struct hists *hists,
}
}
static void __hists__insert_output_entry(struct rb_root *entries,
static void __hists__insert_output_entry(struct rb_root_cached *entries,
struct hist_entry *he,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node **p = &entries->rb_node;
struct rb_node **p = &entries->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
bool leftmost = true;
if (use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL) {
@ -1689,12 +1702,14 @@ static void __hists__insert_output_entry(struct rb_root *entries,
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, entries);
rb_insert_color_cached(&he->rb_node, entries, leftmost);
perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
@ -1706,7 +1721,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
static void output_resort(struct hists *hists, struct ui_progress *prog,
bool use_callchain, hists__resort_cb_t cb)
{
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
u64 callchain_total;
@ -1736,8 +1751,8 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
else
root = hists->entries_in;
next = rb_first(root);
hists->entries = RB_ROOT;
next = rb_first_cached(root);
hists->entries = RB_ROOT_CACHED;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
@ -1798,7 +1813,7 @@ struct rb_node *rb_hierarchy_last(struct rb_node *node)
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
while (can_goto_child(he, HMD_NORMAL)) {
node = rb_last(&he->hroot_out);
node = rb_last(&he->hroot_out.rb_root);
he = rb_entry(node, struct hist_entry, rb_node);
}
return node;
@ -1809,7 +1824,7 @@ struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_di
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
if (can_goto_child(he, hmd))
node = rb_first(&he->hroot_out);
node = rb_first_cached(&he->hroot_out);
else
node = rb_next(node);
@ -1847,7 +1862,7 @@ bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
if (he->leaf)
return false;
node = rb_first(&he->hroot_out);
node = rb_first_cached(&he->hroot_out);
child = rb_entry(node, struct hist_entry, rb_node);
while (node && child->filtered) {
@ -1965,7 +1980,7 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (filter(hists, h))
@ -1975,13 +1990,15 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
}
}
static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
static void resort_filtered_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct rb_root new_root = RB_ROOT;
struct rb_root_cached new_root = RB_ROOT_CACHED;
struct rb_node *nd;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
@ -1989,22 +2006,24 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
rb_insert_color_cached(&he->rb_node, root, leftmost);
if (he->leaf || he->filtered)
return;
nd = rb_first(&he->hroot_out);
nd = rb_first_cached(&he->hroot_out);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
rb_erase(&h->rb_node, &he->hroot_out);
rb_erase_cached(&h->rb_node, &he->hroot_out);
resort_filtered_entry(&new_root, h);
}
@ -2015,14 +2034,14 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
struct rb_root new_root = RB_ROOT;
struct rb_root_cached new_root = RB_ROOT_CACHED;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
nd = rb_first(&hists->entries);
nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
int ret;
@ -2066,12 +2085,12 @@ static void hists__filter_hierarchy(struct hists *hists, int type, const void *a
* resort output after applying a new filter since filter in a lower
* hierarchy can change periods in a upper hierarchy.
*/
nd = rb_first(&hists->entries);
nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
rb_erase(&h->rb_node, &hists->entries);
rb_erase_cached(&h->rb_node, &hists->entries);
resort_filtered_entry(&new_root, h);
}
@ -2140,18 +2159,19 @@ void hists__inc_nr_samples(struct hists *hists, bool filtered)
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
bool leftmost = true;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
p = &root->rb_node;
p = &root->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@ -2164,8 +2184,10 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
he = hist_entry__new(pair, true);
@ -2175,7 +2197,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (symbol_conf.cumulate_callchain)
memset(he->stat_acc, 0, sizeof(he->stat));
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
rb_insert_color_cached(&he->rb_node_in, root, leftmost);
hists__inc_stats(hists, he);
he->dummy = true;
}
@ -2184,15 +2206,16 @@ out:
}
static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
struct rb_root *root,
struct rb_root_cached *root,
struct hist_entry *pair)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
bool leftmost = true;
p = &root->rb_node;
p = &root->rb_root.rb_node;
while (*p != NULL) {
int64_t cmp = 0;
@ -2209,14 +2232,16 @@ static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
else
else {
p = &parent->rb_right;
leftmost = false;
}
}
he = hist_entry__new(pair, true);
if (he) {
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
rb_insert_color_cached(&he->rb_node_in, root, leftmost);
he->dummy = true;
he->hists = hists;
@ -2233,9 +2258,9 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
struct rb_node *n;
if (hists__has(hists, need_collapse))
n = hists->entries_collapsed.rb_node;
n = hists->entries_collapsed.rb_root.rb_node;
else
n = hists->entries_in->rb_node;
n = hists->entries_in->rb_root.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
@ -2252,10 +2277,10 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
return NULL;
}
static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
struct rb_node *n = root->rb_node;
struct rb_node *n = root->rb_root.rb_node;
while (n) {
struct hist_entry *iter;
@ -2280,13 +2305,13 @@ static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
return NULL;
}
static void hists__match_hierarchy(struct rb_root *leader_root,
struct rb_root *other_root)
static void hists__match_hierarchy(struct rb_root_cached *leader_root,
struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *pair;
for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_hierarchy_entry(other_root, pos);
@ -2302,7 +2327,7 @@ static void hists__match_hierarchy(struct rb_root *leader_root,
*/
void hists__match(struct hists *leader, struct hists *other)
{
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@ -2317,7 +2342,7 @@ void hists__match(struct hists *leader, struct hists *other)
else
root = leader->entries_in;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_entry(other, pos);
@ -2328,13 +2353,13 @@ void hists__match(struct hists *leader, struct hists *other)
static int hists__link_hierarchy(struct hists *leader_hists,
struct hist_entry *parent,
struct rb_root *leader_root,
struct rb_root *other_root)
struct rb_root_cached *leader_root,
struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *leader;
for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(pos)) {
@ -2377,7 +2402,7 @@ static int hists__link_hierarchy(struct hists *leader_hists,
*/
int hists__link(struct hists *leader, struct hists *other)
{
struct rb_root *root;
struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@ -2393,7 +2418,7 @@ int hists__link(struct hists *leader, struct hists *other)
else
root = other->entries_in;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (!hist_entry__has_pairs(pos)) {
@ -2566,10 +2591,10 @@ int perf_hist_config(const char *var, const char *value)
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
hists->entries_in = &hists->entries_in_array[0];
hists->entries_collapsed = RB_ROOT;
hists->entries = RB_ROOT;
hists->entries_collapsed = RB_ROOT_CACHED;
hists->entries = RB_ROOT_CACHED;
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
@ -2577,14 +2602,14 @@ int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
return 0;
}
static void hists__delete_remaining_entries(struct rb_root *root)
static void hists__delete_remaining_entries(struct rb_root_cached *root)
{
struct rb_node *node;
struct hist_entry *he;
while (!RB_EMPTY_ROOT(root)) {
node = rb_first(root);
rb_erase(node, root);
while (!RB_EMPTY_ROOT(&root->rb_root)) {
node = rb_first_cached(root);
rb_erase_cached(node, root);
he = rb_entry(node, struct hist_entry, rb_node_in);
hist_entry__delete(he);

View File

@ -70,10 +70,10 @@ struct thread;
struct dso;
struct hists {
struct rb_root entries_in_array[2];
struct rb_root *entries_in;
struct rb_root entries;
struct rb_root entries_collapsed;
struct rb_root_cached entries_in_array[2];
struct rb_root_cached *entries_in;
struct rb_root_cached entries;
struct rb_root_cached entries_collapsed;
u64 nr_entries;
u64 nr_non_filtered_entries;
u64 callchain_period;
@ -230,7 +230,7 @@ static __pure inline bool hists__has_callchains(struct hists *hists)
int hists__init(void);
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists);
struct perf_hpp {
char *buf;

View File

@ -45,7 +45,7 @@ static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
/* For intlist iteration */
static inline struct int_node *intlist__first(struct intlist *ilist)
{
struct rb_node *rn = rb_first(&ilist->rblist.entries);
struct rb_node *rn = rb_first_cached(&ilist->rblist.entries);
return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
}
static inline struct int_node *intlist__next(struct int_node *in)

View File

@ -2,6 +2,7 @@
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <errno.h>
#include <libgen.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

View File

@ -42,7 +42,7 @@ static void machine__threads_init(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
threads->entries = RB_ROOT;
threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
INIT_LIST_HEAD(&threads->dead);
@ -180,7 +180,7 @@ void machine__delete_threads(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
down_write(&threads->lock);
nd = rb_first(&threads->entries);
nd = rb_first_cached(&threads->entries);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
@ -223,7 +223,7 @@ void machine__delete(struct machine *machine)
void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
machines->guests = RB_ROOT;
machines->guests = RB_ROOT_CACHED;
}
void machines__exit(struct machines *machines)
@ -235,9 +235,10 @@ void machines__exit(struct machines *machines)
struct machine *machines__add(struct machines *machines, pid_t pid,
const char *root_dir)
{
struct rb_node **p = &machines->guests.rb_node;
struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *pos, *machine = malloc(sizeof(*machine));
bool leftmost = true;
if (machine == NULL)
return NULL;
@ -252,12 +253,14 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
pos = rb_entry(parent, struct machine, rb_node);
if (pid < pos->pid)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&machine->rb_node, parent, p);
rb_insert_color(&machine->rb_node, &machines->guests);
rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
return machine;
}
@ -268,7 +271,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
machines->host.comm_exec = comm_exec;
for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
machine->comm_exec = comm_exec;
@ -277,7 +280,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
struct machine *machines__find(struct machines *machines, pid_t pid)
{
struct rb_node **p = &machines->guests.rb_node;
struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *machine;
struct machine *default_machine = NULL;
@ -340,7 +343,7 @@ void machines__process_guests(struct machines *machines,
{
struct rb_node *nd;
for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
process(pos, data);
}
@ -353,7 +356,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
machines->host.id_hdr_size = id_hdr_size;
for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
for (node = rb_first_cached(&machines->guests); node;
node = rb_next(node)) {
machine = rb_entry(node, struct machine, rb_node);
machine->id_hdr_size = id_hdr_size;
}
@ -466,9 +470,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
{
struct rb_node **p = &threads->entries.rb_node;
struct rb_node **p = &threads->entries.rb_root.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
bool leftmost = true;
th = threads__get_last_match(threads, machine, pid, tid);
if (th)
@ -486,8 +491,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (tid < th->tid)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
if (!create)
@ -496,7 +503,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
rb_insert_color(&th->rb_node, &threads->entries);
rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
/*
* We have to initialize map_groups separately
@ -507,7 +514,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
rb_erase_init(&th->rb_node, &threads->entries);
rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
return NULL;
@ -798,7 +805,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
struct rb_node *nd;
size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->dsos.head, fp);
}
@ -818,7 +825,7 @@ size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
struct rb_node *nd;
size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
}
@ -858,7 +865,8 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
ret = fprintf(fp, "Threads: %u\n", threads->nr);
for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
@ -1161,7 +1169,7 @@ failure:
void machines__destroy_kernel_maps(struct machines *machines)
{
struct rb_node *next = rb_first(&machines->guests);
struct rb_node *next = rb_first_cached(&machines->guests);
machine__destroy_kernel_maps(&machines->host);
@ -1169,7 +1177,7 @@ void machines__destroy_kernel_maps(struct machines *machines)
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, &machines->guests);
rb_erase_cached(&pos->rb_node, &machines->guests);
machine__delete(pos);
}
}
@ -1734,7 +1742,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
down_write(&threads->lock);
rb_erase_init(&th->rb_node, &threads->entries);
rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
--threads->nr;
/*
@ -2511,7 +2519,8 @@ int machine__for_each_thread(struct machine *machine,
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
threads = &machine->threads[i];
for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
thread = rb_entry(nd, struct thread, rb_node);
rc = fn(thread, priv);
if (rc != 0)
@ -2538,7 +2547,7 @@ int machines__for_each_thread(struct machines *machines,
if (rc != 0)
return rc;
for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
rc = machine__for_each_thread(machine, fn, priv);

View File

@ -29,11 +29,11 @@ struct vdso_info;
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads {
struct rb_root entries;
struct rw_semaphore lock;
unsigned int nr;
struct list_head dead;
struct thread *last_match;
struct rb_root_cached entries;
struct rw_semaphore lock;
unsigned int nr;
struct list_head dead;
struct thread *last_match;
};
struct machine {
@ -140,7 +140,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data);
struct machines {
struct machine host;
struct rb_root guests;
struct rb_root_cached guests;
};
void machines__init(struct machines *machines);

View File

@ -286,8 +286,8 @@ void map__put(struct map *map)
void map__fixup_start(struct map *map)
{
struct rb_root *symbols = &map->dso->symbols;
struct rb_node *nd = rb_first(symbols);
struct rb_root_cached *symbols = &map->dso->symbols;
struct rb_node *nd = rb_first_cached(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->start = sym->start;
@ -296,8 +296,8 @@ void map__fixup_start(struct map *map)
void map__fixup_end(struct map *map)
{
struct rb_root *symbols = &map->dso->symbols;
struct rb_node *nd = rb_last(symbols);
struct rb_root_cached *symbols = &map->dso->symbols;
struct rb_node *nd = rb_last(&symbols->rb_root);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->end = sym->end;

View File

@ -352,7 +352,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
else if (metrics && !raw)
printf("\nMetrics:\n\n");
for (node = rb_first(&groups.entries); node; node = next) {
for (node = rb_first_cached(&groups.entries); node; node = next) {
struct mep *me = container_of(node, struct mep, nd);
if (metricgroups)

View File

@ -35,6 +35,7 @@
#include "util.h"
#include "event.h"
#include "namespaces.h"
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
@ -3528,7 +3529,8 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
/* Show all (filtered) symbols */
setup_pager();
for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&map->dso->symbol_names); nd;
nd = rb_next(nd)) {
struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
if (strfilter__compare(_filter, pos->sym.name))

View File

@ -4,8 +4,9 @@
#include <linux/compiler.h>
#include <stdbool.h>
#include "intlist.h"
#include "namespaces.h"
struct intlist;
struct nsinfo;
/* Probe related configurations */
struct probe_conf {

View File

@ -20,6 +20,7 @@
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
#include "namespaces.h"
#include "util.h"
#include "event.h"
#include "strlist.h"

View File

@ -140,12 +140,12 @@ struct __name##_sorted *__name = __name##_sorted__new
/* For 'struct intlist' */
#define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \
DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \
DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \
__ilist->rblist.nr_entries)
/* For 'struct machine->threads' */
#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \
__machine->threads[hash_bucket].nr)
#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \
__machine->threads[hash_bucket].nr)
#endif /* _PERF_RESORT_RB_H_ */

View File

@ -13,8 +13,9 @@
int rblist__add_node(struct rblist *rblist, const void *new_entry)
{
struct rb_node **p = &rblist->entries.rb_node;
struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node;
bool leftmost = true;
while (*p != NULL) {
int rc;
@ -24,8 +25,10 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
rc = rblist->node_cmp(parent, new_entry);
if (rc > 0)
p = &(*p)->rb_left;
else if (rc < 0)
else if (rc < 0) {
p = &(*p)->rb_right;
leftmost = false;
}
else
return -EEXIST;
}
@ -35,7 +38,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
return -ENOMEM;
rb_link_node(new_node, parent, p);
rb_insert_color(new_node, &rblist->entries);
rb_insert_color_cached(new_node, &rblist->entries, leftmost);
++rblist->nr_entries;
return 0;
@ -43,7 +46,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
{
rb_erase(rb_node, &rblist->entries);
rb_erase_cached(rb_node, &rblist->entries);
--rblist->nr_entries;
rblist->node_delete(rblist, rb_node);
}
@ -52,8 +55,9 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
const void *entry,
bool create)
{
struct rb_node **p = &rblist->entries.rb_node;
struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node = NULL;
bool leftmost = true;
while (*p != NULL) {
int rc;
@ -63,8 +67,10 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
rc = rblist->node_cmp(parent, entry);
if (rc > 0)
p = &(*p)->rb_left;
else if (rc < 0)
else if (rc < 0) {
p = &(*p)->rb_right;
leftmost = false;
}
else
return parent;
}
@ -73,7 +79,8 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
new_node = rblist->node_new(rblist, entry);
if (new_node) {
rb_link_node(new_node, parent, p);
rb_insert_color(new_node, &rblist->entries);
rb_insert_color_cached(new_node,
&rblist->entries, leftmost);
++rblist->nr_entries;
}
}
@ -94,7 +101,7 @@ struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
void rblist__init(struct rblist *rblist)
{
if (rblist != NULL) {
rblist->entries = RB_ROOT;
rblist->entries = RB_ROOT_CACHED;
rblist->nr_entries = 0;
}
@ -103,7 +110,7 @@ void rblist__init(struct rblist *rblist)
void rblist__exit(struct rblist *rblist)
{
struct rb_node *pos, *next = rb_first(&rblist->entries);
struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
@ -124,7 +131,8 @@ struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
{
struct rb_node *node;
for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
for (node = rb_first_cached(&rblist->entries); node;
node = rb_next(node)) {
if (!idx--)
return node;
}

View File

@ -20,7 +20,7 @@
*/
struct rblist {
struct rb_root entries;
struct rb_root_cached entries;
unsigned int nr_entries;
int (*node_cmp)(struct rb_node *rbn, const void *entry);

View File

@ -733,8 +733,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
(const char *)&evsel->attr, sizeof(evsel->attr)));
pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
pydict_set_item_string_decref(dict_sample, "pid",
_PyLong_FromLong(sample->pid));
@ -1494,34 +1493,40 @@ static void _free_command_line(wchar_t **command_line, int num)
static int python_start_script(const char *script, int argc, const char **argv)
{
struct tables *tables = &tables_global;
PyMODINIT_FUNC (*initfunc)(void);
#if PY_MAJOR_VERSION < 3
const char **command_line;
#else
wchar_t **command_line;
#endif
char buf[PATH_MAX];
/*
* Use a non-const name variable to cope with python 2.6's
* PyImport_AppendInittab prototype
*/
char buf[PATH_MAX], name[19] = "perf_trace_context";
int i, err = 0;
FILE *fp;
#if PY_MAJOR_VERSION < 3
initfunc = initperf_trace_context;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
#else
initfunc = PyInit_perf_trace_context;
command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
#endif
PyImport_AppendInittab(name, initfunc);
Py_Initialize();
#if PY_MAJOR_VERSION < 3
initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
#else
PyInit_perf_trace_context();
PySys_SetArgv(argc + 1, command_line);
#endif

View File

@ -1,5 +1,3 @@
#!/usr/bin/python
from os import getenv
from subprocess import Popen, PIPE
from re import sub

View File

@ -145,8 +145,8 @@ struct hist_entry {
union {
/* this is for hierarchical entry structure */
struct {
struct rb_root hroot_in;
struct rb_root hroot_out;
struct rb_root_cached hroot_in;
struct rb_root_cached hroot_out;
}; /* non-leaf entries */
struct rb_root sorted_chain; /* leaf entry has callchains */
};

View File

@ -594,11 +594,12 @@ struct srcline_node {
struct rb_node rb_node;
};
void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline)
{
struct rb_node **p = &tree->rb_node;
struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
struct srcline_node *i, *node;
bool leftmost = true;
node = zalloc(sizeof(struct srcline_node));
if (!node) {
@ -614,16 +615,18 @@ void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
i = rb_entry(parent, struct srcline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, tree);
rb_insert_color_cached(&node->rb_node, tree, leftmost);
}
char *srcline__tree_find(struct rb_root *tree, u64 addr)
char *srcline__tree_find(struct rb_root_cached *tree, u64 addr)
{
struct rb_node *n = tree->rb_node;
struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct srcline_node *i = rb_entry(n, struct srcline_node,
@ -640,15 +643,15 @@ char *srcline__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
void srcline__tree_delete(struct rb_root *tree)
void srcline__tree_delete(struct rb_root_cached *tree)
{
struct srcline_node *pos;
struct rb_node *next = rb_first(tree);
struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct srcline_node, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, tree);
rb_erase_cached(&pos->rb_node, tree);
free_srcline(pos->srcline);
zfree(&pos);
}
@ -682,28 +685,32 @@ void inline_node__delete(struct inline_node *node)
free(node);
}
void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
void inlines__tree_insert(struct rb_root_cached *tree,
struct inline_node *inlines)
{
struct rb_node **p = &tree->rb_node;
struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 addr = inlines->addr;
struct inline_node *i;
bool leftmost = true;
while (*p != NULL) {
parent = *p;
i = rb_entry(parent, struct inline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&inlines->rb_node, parent, p);
rb_insert_color(&inlines->rb_node, tree);
rb_insert_color_cached(&inlines->rb_node, tree, leftmost);
}
struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr)
{
struct rb_node *n = tree->rb_node;
struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct inline_node *i = rb_entry(n, struct inline_node,
@ -720,15 +727,15 @@ struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
void inlines__tree_delete(struct rb_root *tree)
void inlines__tree_delete(struct rb_root_cached *tree)
{
struct inline_node *pos;
struct rb_node *next = rb_first(tree);
struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct inline_node, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, tree);
rb_erase_cached(&pos->rb_node, tree);
inline_node__delete(pos);
}
}

View File

@ -19,11 +19,11 @@ void free_srcline(char *srcline);
char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
/* insert the srcline into the DSO, which will take ownership */
void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline);
/* find previously inserted srcline */
char *srcline__tree_find(struct rb_root *tree, u64 addr);
char *srcline__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all srclines within the tree */
void srcline__tree_delete(struct rb_root *tree);
void srcline__tree_delete(struct rb_root_cached *tree);
#define SRCLINE_UNKNOWN ((char *) "??:0")
@ -46,10 +46,11 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
void inline_node__delete(struct inline_node *node);
/* insert the inline node list into the DSO, which will take ownership */
void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines);
void inlines__tree_insert(struct rb_root_cached *tree,
struct inline_node *inlines);
/* find previously inserted inline node list */
struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr);
struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all nodes within the tree of inline_node s */
void inlines__tree_delete(struct rb_root *tree);
void inlines__tree_delete(struct rb_root_cached *tree);
#endif /* PERF_SRCLINE_H */

View File

@ -2,6 +2,7 @@
#include <inttypes.h>
#include <linux/time64.h>
#include <math.h>
#include "color.h"
#include "evlist.h"
#include "evsel.h"
#include "stat.h"

View File

@ -168,7 +168,7 @@ static void reset_stat(struct runtime_stat *st)
struct rb_node *pos, *next;
rblist = &st->value_list;
next = rb_first(&rblist->entries);
next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);

View File

@ -57,7 +57,7 @@ static inline unsigned int strlist__nr_entries(const struct strlist *slist)
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *slist)
{
struct rb_node *rn = rb_first(&slist->rblist.entries);
struct rb_node *rn = rb_first_cached(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)

View File

@ -3,6 +3,7 @@
#include "util.h"
#include <errno.h>
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>

View File

@ -163,7 +163,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
return arch__choose_best_symbol(syma, symb);
}
void symbols__fixup_duplicate(struct rb_root *symbols)
void symbols__fixup_duplicate(struct rb_root_cached *symbols)
{
struct rb_node *nd;
struct symbol *curr, *next;
@ -171,7 +171,7 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
if (symbol_conf.allow_aliases)
return;
nd = rb_first(symbols);
nd = rb_first_cached(symbols);
while (nd) {
curr = rb_entry(nd, struct symbol, rb_node);
@ -186,20 +186,20 @@ again:
continue;
if (choose_best_symbol(curr, next) == SYMBOL_A) {
rb_erase(&next->rb_node, symbols);
rb_erase_cached(&next->rb_node, symbols);
symbol__delete(next);
goto again;
} else {
nd = rb_next(&curr->rb_node);
rb_erase(&curr->rb_node, symbols);
rb_erase_cached(&curr->rb_node, symbols);
symbol__delete(curr);
}
}
}
void symbols__fixup_end(struct rb_root *symbols)
void symbols__fixup_end(struct rb_root_cached *symbols)
{
struct rb_node *nd, *prevnd = rb_first(symbols);
struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
if (prevnd == NULL)
@ -282,25 +282,27 @@ void symbol__delete(struct symbol *sym)
free(((void *)sym) - symbol_conf.priv_size);
}
void symbols__delete(struct rb_root *symbols)
void symbols__delete(struct rb_root_cached *symbols)
{
struct symbol *pos;
struct rb_node *next = rb_first(symbols);
struct rb_node *next = rb_first_cached(symbols);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, symbols);
rb_erase_cached(&pos->rb_node, symbols);
symbol__delete(pos);
}
}
void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
void __symbols__insert(struct rb_root_cached *symbols,
struct symbol *sym, bool kernel)
{
struct rb_node **p = &symbols->rb_node;
struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
bool leftmost = true;
if (kernel) {
const char *name = sym->name;
@ -318,26 +320,28 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
s = rb_entry(parent, struct symbol, rb_node);
if (ip < s->start)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&sym->rb_node, parent, p);
rb_insert_color(&sym->rb_node, symbols);
rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
}
void symbols__insert(struct rb_root *symbols, struct symbol *sym)
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
{
__symbols__insert(symbols, sym, false);
}
static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
{
struct rb_node *n;
if (symbols == NULL)
return NULL;
n = symbols->rb_node;
n = symbols->rb_root.rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
@ -353,9 +357,9 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL;
}
static struct symbol *symbols__first(struct rb_root *symbols)
static struct symbol *symbols__first(struct rb_root_cached *symbols)
{
struct rb_node *n = rb_first(symbols);
struct rb_node *n = rb_first_cached(symbols);
if (n)
return rb_entry(n, struct symbol, rb_node);
@ -363,9 +367,9 @@ static struct symbol *symbols__first(struct rb_root *symbols)
return NULL;
}
static struct symbol *symbols__last(struct rb_root *symbols)
static struct symbol *symbols__last(struct rb_root_cached *symbols)
{
struct rb_node *n = rb_last(symbols);
struct rb_node *n = rb_last(&symbols->rb_root);
if (n)
return rb_entry(n, struct symbol, rb_node);
@ -383,11 +387,12 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
{
struct rb_node **p = &symbols->rb_node;
struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
struct symbol_name_rb_node *symn, *s;
bool leftmost = true;
symn = container_of(sym, struct symbol_name_rb_node, sym);
@ -396,19 +401,21 @@ static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
if (strcmp(sym->name, s->sym.name) < 0)
p = &(*p)->rb_left;
else
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
rb_link_node(&symn->rb_node, parent, p);
rb_insert_color(&symn->rb_node, symbols);
rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
}
static void symbols__sort_by_name(struct rb_root *symbols,
struct rb_root *source)
static void symbols__sort_by_name(struct rb_root_cached *symbols,
struct rb_root_cached *source)
{
struct rb_node *nd;
for (nd = rb_first(source); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
symbols__insert_by_name(symbols, pos);
}
@ -431,7 +438,7 @@ int symbol__match_symbol_name(const char *name, const char *str,
return arch__compare_symbol_names(name, str);
}
static struct symbol *symbols__find_by_name(struct rb_root *symbols,
static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
const char *name,
enum symbol_tag_include includes)
{
@ -441,7 +448,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
if (symbols == NULL)
return NULL;
n = symbols->rb_node;
n = symbols->rb_root.rb_node;
while (n) {
int cmp;
@ -644,7 +651,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
{
struct symbol *sym;
struct dso *dso = arg;
struct rb_root *root = &dso->symbols;
struct rb_root_cached *root = &dso->symbols;
if (!symbol_type__filter(type))
return 0;
@ -681,14 +688,14 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
struct map *curr_map;
struct symbol *pos;
int count = 0;
struct rb_root old_root = dso->symbols;
struct rb_root *root = &dso->symbols;
struct rb_node *next = rb_first(root);
struct rb_root_cached old_root = dso->symbols;
struct rb_root_cached *root = &dso->symbols;
struct rb_node *next = rb_first_cached(root);
if (!kmaps)
return -1;
*root = RB_ROOT;
*root = RB_ROOT_CACHED;
while (next) {
char *module;
@ -696,8 +703,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
rb_erase_init(&pos->rb_node, &old_root);
rb_erase_cached(&pos->rb_node, &old_root);
RB_CLEAR_NODE(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module)
*module = '\0';
@ -734,8 +741,8 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
struct map *curr_map = initial_map;
struct symbol *pos;
int count = 0, moved = 0;
struct rb_root *root = &dso->symbols;
struct rb_node *next = rb_first(root);
struct rb_root_cached *root = &dso->symbols;
struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
@ -849,7 +856,7 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
}
add_symbol:
if (curr_map != initial_map) {
rb_erase(&pos->rb_node, root);
rb_erase_cached(&pos->rb_node, root);
symbols__insert(&curr_map->dso->symbols, pos);
++moved;
} else
@ -857,7 +864,7 @@ add_symbol:
continue;
discard_symbol:
rb_erase(&pos->rb_node, root);
rb_erase_cached(&pos->rb_node, root);
symbol__delete(pos);
}

View File

@ -5,16 +5,12 @@
#include <linux/types.h>
#include <stdbool.h>
#include <stdint.h>
#include "map.h"
#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
#include <byteswap.h>
#include <libgen.h>
#include "build-id.h"
#include "event.h"
#include "branch.h"
#include "path.h"
#include "symbol_conf.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@ -24,6 +20,10 @@
#include "dso.h"
struct map;
struct map_groups;
struct option;
/*
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
@ -68,7 +68,7 @@ struct symbol {
};
void symbol__delete(struct symbol *sym);
void symbols__delete(struct rb_root *symbols);
void symbols__delete(struct rb_root_cached *symbols);
/* symbols__for_each_entry - iterate over symbols (rb_root)
*
@ -77,7 +77,7 @@ void symbols__delete(struct rb_root *symbols);
* @nd: the 'struct rb_node *' to use as a temporary storage
*/
#define symbols__for_each_entry(symbols, pos, nd) \
for (nd = rb_first(symbols); \
for (nd = rb_first_cached(symbols); \
nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
nd = rb_next(nd))
@ -89,69 +89,6 @@ static inline size_t symbol__size(const struct symbol *sym)
struct strlist;
struct intlist;
struct symbol_conf {
unsigned short priv_size;
bool try_vmlinux_path,
init_annotation,
force,
ignore_vmlinux,
ignore_vmlinux_buildid,
show_kernel_path,
use_modules,
allow_aliases,
sort_by_name,
show_nr_samples,
show_total_period,
use_callchain,
cumulate_callchain,
show_branchflag_count,
exclude_other,
show_cpu_utilization,
initialized,
kptr_restrict,
event_group,
demangle,
demangle_kernel,
filter_relative,
show_hist_headers,
branch_callstack,
has_filter,
show_ref_callgraph,
hide_unresolved,
raw_trace,
report_hierarchy,
inline_name;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
*field_sep,
*graph_function;
const char *default_guest_vmlinux_name,
*default_guest_kallsyms,
*default_guest_modules;
const char *guestmount;
const char *dso_list_str,
*comm_list_str,
*pid_list_str,
*tid_list_str,
*sym_list_str,
*col_width_list_str,
*bt_stop_list_str;
struct strlist *dso_list,
*comm_list,
*sym_list,
*dso_from_list,
*dso_to_list,
*sym_from_list,
*sym_to_list,
*bt_stop_list;
struct intlist *pid_list,
*tid_list;
const char *symfs;
};
extern struct symbol_conf symbol_conf;
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
@ -310,10 +247,11 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss);
char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel);
void symbols__insert(struct rb_root *symbols, struct symbol *sym);
void symbols__fixup_duplicate(struct rb_root *symbols);
void symbols__fixup_end(struct rb_root *symbols);
void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
bool kernel);
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
void symbols__fixup_duplicate(struct rb_root_cached *symbols);
void symbols__fixup_end(struct rb_root_cached *symbols);
void map_groups__fixup_end(struct map_groups *mg);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_SYMBOL_CONF
#define __PERF_SYMBOL_CONF 1
#include <stdbool.h>
struct strlist;
struct intlist;
struct symbol_conf {
unsigned short priv_size;
bool try_vmlinux_path,
init_annotation,
force,
ignore_vmlinux,
ignore_vmlinux_buildid,
show_kernel_path,
use_modules,
allow_aliases,
sort_by_name,
show_nr_samples,
show_total_period,
use_callchain,
cumulate_callchain,
show_branchflag_count,
exclude_other,
show_cpu_utilization,
initialized,
kptr_restrict,
event_group,
demangle,
demangle_kernel,
filter_relative,
show_hist_headers,
branch_callstack,
has_filter,
show_ref_callgraph,
hide_unresolved,
raw_trace,
report_hierarchy,
inline_name;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
*field_sep,
*graph_function;
const char *default_guest_vmlinux_name,
*default_guest_kallsyms,
*default_guest_modules;
const char *guestmount;
const char *dso_list_str,
*comm_list_str,
*pid_list_str,
*tid_list_str,
*sym_list_str,
*col_width_list_str,
*bt_stop_list_str;
struct strlist *dso_list,
*comm_list,
*sym_list,
*dso_from_list,
*dso_to_list,
*sym_from_list,
*sym_to_list,
*bt_stop_list;
struct intlist *pid_list,
*tid_list;
const char *symfs;
};
extern struct symbol_conf symbol_conf;
#endif // __PERF_SYMBOL_CONF

View File

@ -3,6 +3,7 @@
#include <inttypes.h>
#include <stdio.h>
#include "map.h"
#include "symbol.h"
size_t symbol__fprintf(struct symbol *sym, FILE *fp)
@ -64,7 +65,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
struct rb_node *nd;
struct symbol_name_rb_node *pos;
for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
fprintf(fp, "%s\n", pos->sym.name);
}

View File

@ -13,6 +13,7 @@
#include <intlist.h>
#include "rwsem.h"
struct namespaces_event;
struct thread_stack;
struct unwind_libunwind_ops;

View File

@ -2,6 +2,7 @@
#include "../perf.h"
#include "util.h"
#include "debug.h"
#include "namespaces.h"
#include <api/fs/fs.h>
#include <sys/mman.h>
#include <sys/stat.h>