Merge tag 'mac80211-for-davem-2016-06-09' of git://git.kernel.org/pub/scm/linux/kerne...
[cascardo/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13                                        struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15                                           struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17                                           struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19                                           struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23         return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28         hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33         if (len > hists__col_len(hists, col)) {
34                 hists__set_col_len(hists, col, len);
35                 return true;
36         }
37         return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42         enum hist_column col;
43
44         for (col = 0; col < HISTC_NR_COLS; ++col)
45                 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52         if (hists__col_len(hists, dso) < unresolved_col_width &&
53             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54             !symbol_conf.dso_list)
55                 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61         int symlen;
62         u16 len;
63
64         /*
65          * +4 accounts for '[x] ' priv level info
66          * +2 accounts for 0x prefix on raw addresses
67          * +3 accounts for ' y ' symtab origin info
68          */
69         if (h->ms.sym) {
70                 symlen = h->ms.sym->namelen + 4;
71                 if (verbose)
72                         symlen += BITS_PER_LONG / 4 + 2 + 3;
73                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74         } else {
75                 symlen = unresolved_col_width + 4 + 2;
76                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78         }
79
80         len = thread__comm_len(h->thread);
81         if (hists__new_col_len(hists, HISTC_COMM, len))
82                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84         if (h->ms.map) {
85                 len = dso__name_len(h->ms.map->dso);
86                 hists__new_col_len(hists, HISTC_DSO, len);
87         }
88
89         if (h->parent)
90                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92         if (h->branch_info) {
93                 if (h->branch_info->from.sym) {
94                         symlen = (int)h->branch_info->from.sym->namelen + 4;
95                         if (verbose)
96                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
97                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99                         symlen = dso__name_len(h->branch_info->from.map->dso);
100                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101                 } else {
102                         symlen = unresolved_col_width + 4 + 2;
103                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105                 }
106
107                 if (h->branch_info->to.sym) {
108                         symlen = (int)h->branch_info->to.sym->namelen + 4;
109                         if (verbose)
110                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
111                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113                         symlen = dso__name_len(h->branch_info->to.map->dso);
114                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115                 } else {
116                         symlen = unresolved_col_width + 4 + 2;
117                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119                 }
120
121                 if (h->branch_info->srcline_from)
122                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
123                                         strlen(h->branch_info->srcline_from));
124                 if (h->branch_info->srcline_to)
125                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
126                                         strlen(h->branch_info->srcline_to));
127         }
128
129         if (h->mem_info) {
130                 if (h->mem_info->daddr.sym) {
131                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
132                                + unresolved_col_width + 2;
133                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
134                                            symlen);
135                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
136                                            symlen + 1);
137                 } else {
138                         symlen = unresolved_col_width + 4 + 2;
139                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
140                                            symlen);
141                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
142                                            symlen);
143                 }
144
145                 if (h->mem_info->iaddr.sym) {
146                         symlen = (int)h->mem_info->iaddr.sym->namelen + 4
147                                + unresolved_col_width + 2;
148                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
149                                            symlen);
150                 } else {
151                         symlen = unresolved_col_width + 4 + 2;
152                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
153                                            symlen);
154                 }
155
156                 if (h->mem_info->daddr.map) {
157                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
158                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
159                                            symlen);
160                 } else {
161                         symlen = unresolved_col_width + 4 + 2;
162                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
163                 }
164         } else {
165                 symlen = unresolved_col_width + 4 + 2;
166                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
167                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
168                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
169         }
170
171         hists__new_col_len(hists, HISTC_CPU, 3);
172         hists__new_col_len(hists, HISTC_SOCKET, 6);
173         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
174         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
175         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
176         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
177         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
178         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
179
180         if (h->srcline)
181                 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
182
183         if (h->srcfile)
184                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
185
186         if (h->transaction)
187                 hists__new_col_len(hists, HISTC_TRANSACTION,
188                                    hist_entry__transaction_len());
189
190         if (h->trace_output)
191                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
192 }
193
194 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
195 {
196         struct rb_node *next = rb_first(&hists->entries);
197         struct hist_entry *n;
198         int row = 0;
199
200         hists__reset_col_len(hists);
201
202         while (next && row++ < max_rows) {
203                 n = rb_entry(next, struct hist_entry, rb_node);
204                 if (!n->filtered)
205                         hists__calc_col_len(hists, n);
206                 next = rb_next(&n->rb_node);
207         }
208 }
209
210 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
211                                         unsigned int cpumode, u64 period)
212 {
213         switch (cpumode) {
214         case PERF_RECORD_MISC_KERNEL:
215                 he_stat->period_sys += period;
216                 break;
217         case PERF_RECORD_MISC_USER:
218                 he_stat->period_us += period;
219                 break;
220         case PERF_RECORD_MISC_GUEST_KERNEL:
221                 he_stat->period_guest_sys += period;
222                 break;
223         case PERF_RECORD_MISC_GUEST_USER:
224                 he_stat->period_guest_us += period;
225                 break;
226         default:
227                 break;
228         }
229 }
230
231 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
232                                 u64 weight)
233 {
234
235         he_stat->period         += period;
236         he_stat->weight         += weight;
237         he_stat->nr_events      += 1;
238 }
239
240 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
241 {
242         dest->period            += src->period;
243         dest->period_sys        += src->period_sys;
244         dest->period_us         += src->period_us;
245         dest->period_guest_sys  += src->period_guest_sys;
246         dest->period_guest_us   += src->period_guest_us;
247         dest->nr_events         += src->nr_events;
248         dest->weight            += src->weight;
249 }
250
251 static void he_stat__decay(struct he_stat *he_stat)
252 {
253         he_stat->period = (he_stat->period * 7) / 8;
254         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
255         /* XXX need decay for weight too? */
256 }
257
258 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
259
260 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
261 {
262         u64 prev_period = he->stat.period;
263         u64 diff;
264
265         if (prev_period == 0)
266                 return true;
267
268         he_stat__decay(&he->stat);
269         if (symbol_conf.cumulate_callchain)
270                 he_stat__decay(he->stat_acc);
271         decay_callchain(he->callchain);
272
273         diff = prev_period - he->stat.period;
274
275         if (!he->depth) {
276                 hists->stats.total_period -= diff;
277                 if (!he->filtered)
278                         hists->stats.total_non_filtered_period -= diff;
279         }
280
281         if (!he->leaf) {
282                 struct hist_entry *child;
283                 struct rb_node *node = rb_first(&he->hroot_out);
284                 while (node) {
285                         child = rb_entry(node, struct hist_entry, rb_node);
286                         node = rb_next(node);
287
288                         if (hists__decay_entry(hists, child))
289                                 hists__delete_entry(hists, child);
290                 }
291         }
292
293         return he->stat.period == 0;
294 }
295
296 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
297 {
298         struct rb_root *root_in;
299         struct rb_root *root_out;
300
301         if (he->parent_he) {
302                 root_in  = &he->parent_he->hroot_in;
303                 root_out = &he->parent_he->hroot_out;
304         } else {
305                 if (hists__has(hists, need_collapse))
306                         root_in = &hists->entries_collapsed;
307                 else
308                         root_in = hists->entries_in;
309                 root_out = &hists->entries;
310         }
311
312         rb_erase(&he->rb_node_in, root_in);
313         rb_erase(&he->rb_node, root_out);
314
315         --hists->nr_entries;
316         if (!he->filtered)
317                 --hists->nr_non_filtered_entries;
318
319         hist_entry__delete(he);
320 }
321
322 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
323 {
324         struct rb_node *next = rb_first(&hists->entries);
325         struct hist_entry *n;
326
327         while (next) {
328                 n = rb_entry(next, struct hist_entry, rb_node);
329                 next = rb_next(&n->rb_node);
330                 if (((zap_user && n->level == '.') ||
331                      (zap_kernel && n->level != '.') ||
332                      hists__decay_entry(hists, n))) {
333                         hists__delete_entry(hists, n);
334                 }
335         }
336 }
337
338 void hists__delete_entries(struct hists *hists)
339 {
340         struct rb_node *next = rb_first(&hists->entries);
341         struct hist_entry *n;
342
343         while (next) {
344                 n = rb_entry(next, struct hist_entry, rb_node);
345                 next = rb_next(&n->rb_node);
346
347                 hists__delete_entry(hists, n);
348         }
349 }
350
351 /*
352  * histogram, sorted on item, collects periods
353  */
354
355 static struct hist_entry *hist_entry__new(struct hist_entry *template,
356                                           bool sample_self)
357 {
358         size_t callchain_size = 0;
359         struct hist_entry *he;
360
361         if (symbol_conf.use_callchain)
362                 callchain_size = sizeof(struct callchain_root);
363
364         he = zalloc(sizeof(*he) + callchain_size);
365
366         if (he != NULL) {
367                 *he = *template;
368
369                 if (symbol_conf.cumulate_callchain) {
370                         he->stat_acc = malloc(sizeof(he->stat));
371                         if (he->stat_acc == NULL) {
372                                 free(he);
373                                 return NULL;
374                         }
375                         memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
376                         if (!sample_self)
377                                 memset(&he->stat, 0, sizeof(he->stat));
378                 }
379
380                 map__get(he->ms.map);
381
382                 if (he->branch_info) {
383                         /*
384                          * This branch info is (a part of) allocated from
385                          * sample__resolve_bstack() and will be freed after
386                          * adding new entries.  So we need to save a copy.
387                          */
388                         he->branch_info = malloc(sizeof(*he->branch_info));
389                         if (he->branch_info == NULL) {
390                                 map__zput(he->ms.map);
391                                 free(he->stat_acc);
392                                 free(he);
393                                 return NULL;
394                         }
395
396                         memcpy(he->branch_info, template->branch_info,
397                                sizeof(*he->branch_info));
398
399                         map__get(he->branch_info->from.map);
400                         map__get(he->branch_info->to.map);
401                 }
402
403                 if (he->mem_info) {
404                         map__get(he->mem_info->iaddr.map);
405                         map__get(he->mem_info->daddr.map);
406                 }
407
408                 if (symbol_conf.use_callchain)
409                         callchain_init(he->callchain);
410
411                 if (he->raw_data) {
412                         he->raw_data = memdup(he->raw_data, he->raw_size);
413
414                         if (he->raw_data == NULL) {
415                                 map__put(he->ms.map);
416                                 if (he->branch_info) {
417                                         map__put(he->branch_info->from.map);
418                                         map__put(he->branch_info->to.map);
419                                         free(he->branch_info);
420                                 }
421                                 if (he->mem_info) {
422                                         map__put(he->mem_info->iaddr.map);
423                                         map__put(he->mem_info->daddr.map);
424                                 }
425                                 free(he->stat_acc);
426                                 free(he);
427                                 return NULL;
428                         }
429                 }
430                 INIT_LIST_HEAD(&he->pairs.node);
431                 thread__get(he->thread);
432
433                 if (!symbol_conf.report_hierarchy)
434                         he->leaf = true;
435         }
436
437         return he;
438 }
439
440 static u8 symbol__parent_filter(const struct symbol *parent)
441 {
442         if (symbol_conf.exclude_other && parent == NULL)
443                 return 1 << HIST_FILTER__PARENT;
444         return 0;
445 }
446
447 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
448 {
449         if (!symbol_conf.use_callchain)
450                 return;
451
452         he->hists->callchain_period += period;
453         if (!he->filtered)
454                 he->hists->callchain_non_filtered_period += period;
455 }
456
457 static struct hist_entry *hists__findnew_entry(struct hists *hists,
458                                                struct hist_entry *entry,
459                                                struct addr_location *al,
460                                                bool sample_self)
461 {
462         struct rb_node **p;
463         struct rb_node *parent = NULL;
464         struct hist_entry *he;
465         int64_t cmp;
466         u64 period = entry->stat.period;
467         u64 weight = entry->stat.weight;
468
469         p = &hists->entries_in->rb_node;
470
471         while (*p != NULL) {
472                 parent = *p;
473                 he = rb_entry(parent, struct hist_entry, rb_node_in);
474
475                 /*
476                  * Make sure that it receives arguments in a same order as
477                  * hist_entry__collapse() so that we can use an appropriate
478                  * function when searching an entry regardless which sort
479                  * keys were used.
480                  */
481                 cmp = hist_entry__cmp(he, entry);
482
483                 if (!cmp) {
484                         if (sample_self) {
485                                 he_stat__add_period(&he->stat, period, weight);
486                                 hist_entry__add_callchain_period(he, period);
487                         }
488                         if (symbol_conf.cumulate_callchain)
489                                 he_stat__add_period(he->stat_acc, period, weight);
490
491                         /*
492                          * This mem info was allocated from sample__resolve_mem
493                          * and will not be used anymore.
494                          */
495                         zfree(&entry->mem_info);
496
497                         /* If the map of an existing hist_entry has
498                          * become out-of-date due to an exec() or
499                          * similar, update it.  Otherwise we will
500                          * mis-adjust symbol addresses when computing
501                          * the history counter to increment.
502                          */
503                         if (he->ms.map != entry->ms.map) {
504                                 map__put(he->ms.map);
505                                 he->ms.map = map__get(entry->ms.map);
506                         }
507                         goto out;
508                 }
509
510                 if (cmp < 0)
511                         p = &(*p)->rb_left;
512                 else
513                         p = &(*p)->rb_right;
514         }
515
516         he = hist_entry__new(entry, sample_self);
517         if (!he)
518                 return NULL;
519
520         if (sample_self)
521                 hist_entry__add_callchain_period(he, period);
522         hists->nr_entries++;
523
524         rb_link_node(&he->rb_node_in, parent, p);
525         rb_insert_color(&he->rb_node_in, hists->entries_in);
526 out:
527         if (sample_self)
528                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
529         if (symbol_conf.cumulate_callchain)
530                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
531         return he;
532 }
533
534 struct hist_entry *__hists__add_entry(struct hists *hists,
535                                       struct addr_location *al,
536                                       struct symbol *sym_parent,
537                                       struct branch_info *bi,
538                                       struct mem_info *mi,
539                                       struct perf_sample *sample,
540                                       bool sample_self)
541 {
542         struct hist_entry entry = {
543                 .thread = al->thread,
544                 .comm = thread__comm(al->thread),
545                 .ms = {
546                         .map    = al->map,
547                         .sym    = al->sym,
548                 },
549                 .socket  = al->socket,
550                 .cpu     = al->cpu,
551                 .cpumode = al->cpumode,
552                 .ip      = al->addr,
553                 .level   = al->level,
554                 .stat = {
555                         .nr_events = 1,
556                         .period = sample->period,
557                         .weight = sample->weight,
558                 },
559                 .parent = sym_parent,
560                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
561                 .hists  = hists,
562                 .branch_info = bi,
563                 .mem_info = mi,
564                 .transaction = sample->transaction,
565                 .raw_data = sample->raw_data,
566                 .raw_size = sample->raw_size,
567         };
568
569         return hists__findnew_entry(hists, &entry, al, sample_self);
570 }
571
572 static int
573 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
574                     struct addr_location *al __maybe_unused)
575 {
576         return 0;
577 }
578
579 static int
580 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
581                         struct addr_location *al __maybe_unused)
582 {
583         return 0;
584 }
585
586 static int
587 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
588 {
589         struct perf_sample *sample = iter->sample;
590         struct mem_info *mi;
591
592         mi = sample__resolve_mem(sample, al);
593         if (mi == NULL)
594                 return -ENOMEM;
595
596         iter->priv = mi;
597         return 0;
598 }
599
600 static int
601 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
602 {
603         u64 cost;
604         struct mem_info *mi = iter->priv;
605         struct hists *hists = evsel__hists(iter->evsel);
606         struct perf_sample *sample = iter->sample;
607         struct hist_entry *he;
608
609         if (mi == NULL)
610                 return -EINVAL;
611
612         cost = sample->weight;
613         if (!cost)
614                 cost = 1;
615
616         /*
617          * must pass period=weight in order to get the correct
618          * sorting from hists__collapse_resort() which is solely
619          * based on periods. We want sorting be done on nr_events * weight
620          * and this is indirectly achieved by passing period=weight here
621          * and the he_stat__add_period() function.
622          */
623         sample->period = cost;
624
625         he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
626                                 sample, true);
627         if (!he)
628                 return -ENOMEM;
629
630         iter->he = he;
631         return 0;
632 }
633
634 static int
635 iter_finish_mem_entry(struct hist_entry_iter *iter,
636                       struct addr_location *al __maybe_unused)
637 {
638         struct perf_evsel *evsel = iter->evsel;
639         struct hists *hists = evsel__hists(evsel);
640         struct hist_entry *he = iter->he;
641         int err = -EINVAL;
642
643         if (he == NULL)
644                 goto out;
645
646         hists__inc_nr_samples(hists, he->filtered);
647
648         err = hist_entry__append_callchain(he, iter->sample);
649
650 out:
651         /*
652          * We don't need to free iter->priv (mem_info) here since the mem info
653          * was either already freed in hists__findnew_entry() or passed to a
654          * new hist entry by hist_entry__new().
655          */
656         iter->priv = NULL;
657
658         iter->he = NULL;
659         return err;
660 }
661
662 static int
663 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
664 {
665         struct branch_info *bi;
666         struct perf_sample *sample = iter->sample;
667
668         bi = sample__resolve_bstack(sample, al);
669         if (!bi)
670                 return -ENOMEM;
671
672         iter->curr = 0;
673         iter->total = sample->branch_stack->nr;
674
675         iter->priv = bi;
676         return 0;
677 }
678
679 static int
680 iter_add_single_branch_entry(struct hist_entry_iter *iter,
681                              struct addr_location *al __maybe_unused)
682 {
683         /* to avoid calling callback function */
684         iter->he = NULL;
685
686         return 0;
687 }
688
689 static int
690 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
691 {
692         struct branch_info *bi = iter->priv;
693         int i = iter->curr;
694
695         if (bi == NULL)
696                 return 0;
697
698         if (iter->curr >= iter->total)
699                 return 0;
700
701         al->map = bi[i].to.map;
702         al->sym = bi[i].to.sym;
703         al->addr = bi[i].to.addr;
704         return 1;
705 }
706
707 static int
708 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
709 {
710         struct branch_info *bi;
711         struct perf_evsel *evsel = iter->evsel;
712         struct hists *hists = evsel__hists(evsel);
713         struct perf_sample *sample = iter->sample;
714         struct hist_entry *he = NULL;
715         int i = iter->curr;
716         int err = 0;
717
718         bi = iter->priv;
719
720         if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
721                 goto out;
722
723         /*
724          * The report shows the percentage of total branches captured
725          * and not events sampled. Thus we use a pseudo period of 1.
726          */
727         sample->period = 1;
728         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
729
730         he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
731                                 sample, true);
732         if (he == NULL)
733                 return -ENOMEM;
734
735         hists__inc_nr_samples(hists, he->filtered);
736
737 out:
738         iter->he = he;
739         iter->curr++;
740         return err;
741 }
742
743 static int
744 iter_finish_branch_entry(struct hist_entry_iter *iter,
745                          struct addr_location *al __maybe_unused)
746 {
747         zfree(&iter->priv);
748         iter->he = NULL;
749
750         return iter->curr >= iter->total ? 0 : -1;
751 }
752
753 static int
754 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
755                           struct addr_location *al __maybe_unused)
756 {
757         return 0;
758 }
759
760 static int
761 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
762 {
763         struct perf_evsel *evsel = iter->evsel;
764         struct perf_sample *sample = iter->sample;
765         struct hist_entry *he;
766
767         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
768                                 sample, true);
769         if (he == NULL)
770                 return -ENOMEM;
771
772         iter->he = he;
773         return 0;
774 }
775
776 static int
777 iter_finish_normal_entry(struct hist_entry_iter *iter,
778                          struct addr_location *al __maybe_unused)
779 {
780         struct hist_entry *he = iter->he;
781         struct perf_evsel *evsel = iter->evsel;
782         struct perf_sample *sample = iter->sample;
783
784         if (he == NULL)
785                 return 0;
786
787         iter->he = NULL;
788
789         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
790
791         return hist_entry__append_callchain(he, sample);
792 }
793
794 static int
795 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
796                               struct addr_location *al __maybe_unused)
797 {
798         struct hist_entry **he_cache;
799
800         callchain_cursor_commit(&callchain_cursor);
801
802         /*
803          * This is for detecting cycles or recursions so that they're
804          * cumulated only one time to prevent entries more than 100%
805          * overhead.
806          */
807         he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
808         if (he_cache == NULL)
809                 return -ENOMEM;
810
811         iter->priv = he_cache;
812         iter->curr = 0;
813
814         return 0;
815 }
816
817 static int
818 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
819                                  struct addr_location *al)
820 {
821         struct perf_evsel *evsel = iter->evsel;
822         struct hists *hists = evsel__hists(evsel);
823         struct perf_sample *sample = iter->sample;
824         struct hist_entry **he_cache = iter->priv;
825         struct hist_entry *he;
826         int err = 0;
827
828         he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
829                                 sample, true);
830         if (he == NULL)
831                 return -ENOMEM;
832
833         iter->he = he;
834         he_cache[iter->curr++] = he;
835
836         hist_entry__append_callchain(he, sample);
837
838         /*
839          * We need to re-initialize the cursor since callchain_append()
840          * advanced the cursor to the end.
841          */
842         callchain_cursor_commit(&callchain_cursor);
843
844         hists__inc_nr_samples(hists, he->filtered);
845
846         return err;
847 }
848
849 static int
850 iter_next_cumulative_entry(struct hist_entry_iter *iter,
851                            struct addr_location *al)
852 {
853         struct callchain_cursor_node *node;
854
855         node = callchain_cursor_current(&callchain_cursor);
856         if (node == NULL)
857                 return 0;
858
859         return fill_callchain_info(al, node, iter->hide_unresolved);
860 }
861
862 static int
863 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
864                                struct addr_location *al)
865 {
866         struct perf_evsel *evsel = iter->evsel;
867         struct perf_sample *sample = iter->sample;
868         struct hist_entry **he_cache = iter->priv;
869         struct hist_entry *he;
870         struct hist_entry he_tmp = {
871                 .hists = evsel__hists(evsel),
872                 .cpu = al->cpu,
873                 .thread = al->thread,
874                 .comm = thread__comm(al->thread),
875                 .ip = al->addr,
876                 .ms = {
877                         .map = al->map,
878                         .sym = al->sym,
879                 },
880                 .parent = iter->parent,
881                 .raw_data = sample->raw_data,
882                 .raw_size = sample->raw_size,
883         };
884         int i;
885         struct callchain_cursor cursor;
886
887         callchain_cursor_snapshot(&cursor, &callchain_cursor);
888
889         callchain_cursor_advance(&callchain_cursor);
890
891         /*
892          * Check if there's duplicate entries in the callchain.
893          * It's possible that it has cycles or recursive calls.
894          */
895         for (i = 0; i < iter->curr; i++) {
896                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
897                         /* to avoid calling callback function */
898                         iter->he = NULL;
899                         return 0;
900                 }
901         }
902
903         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
904                                 sample, false);
905         if (he == NULL)
906                 return -ENOMEM;
907
908         iter->he = he;
909         he_cache[iter->curr++] = he;
910
911         if (symbol_conf.use_callchain)
912                 callchain_append(he->callchain, &cursor, sample->period);
913         return 0;
914 }
915
916 static int
917 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
918                              struct addr_location *al __maybe_unused)
919 {
920         zfree(&iter->priv);
921         iter->he = NULL;
922
923         return 0;
924 }
925
926 const struct hist_iter_ops hist_iter_mem = {
927         .prepare_entry          = iter_prepare_mem_entry,
928         .add_single_entry       = iter_add_single_mem_entry,
929         .next_entry             = iter_next_nop_entry,
930         .add_next_entry         = iter_add_next_nop_entry,
931         .finish_entry           = iter_finish_mem_entry,
932 };
933
934 const struct hist_iter_ops hist_iter_branch = {
935         .prepare_entry          = iter_prepare_branch_entry,
936         .add_single_entry       = iter_add_single_branch_entry,
937         .next_entry             = iter_next_branch_entry,
938         .add_next_entry         = iter_add_next_branch_entry,
939         .finish_entry           = iter_finish_branch_entry,
940 };
941
942 const struct hist_iter_ops hist_iter_normal = {
943         .prepare_entry          = iter_prepare_normal_entry,
944         .add_single_entry       = iter_add_single_normal_entry,
945         .next_entry             = iter_next_nop_entry,
946         .add_next_entry         = iter_add_next_nop_entry,
947         .finish_entry           = iter_finish_normal_entry,
948 };
949
950 const struct hist_iter_ops hist_iter_cumulative = {
951         .prepare_entry          = iter_prepare_cumulative_entry,
952         .add_single_entry       = iter_add_single_cumulative_entry,
953         .next_entry             = iter_next_cumulative_entry,
954         .add_next_entry         = iter_add_next_cumulative_entry,
955         .finish_entry           = iter_finish_cumulative_entry,
956 };
957
958 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
959                          int max_stack_depth, void *arg)
960 {
961         int err, err2;
962
963         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
964                                         iter->evsel, al, max_stack_depth);
965         if (err)
966                 return err;
967
968         iter->max_stack = max_stack_depth;
969
970         err = iter->ops->prepare_entry(iter, al);
971         if (err)
972                 goto out;
973
974         err = iter->ops->add_single_entry(iter, al);
975         if (err)
976                 goto out;
977
978         if (iter->he && iter->add_entry_cb) {
979                 err = iter->add_entry_cb(iter, al, true, arg);
980                 if (err)
981                         goto out;
982         }
983
984         while (iter->ops->next_entry(iter, al)) {
985                 err = iter->ops->add_next_entry(iter, al);
986                 if (err)
987                         break;
988
989                 if (iter->he && iter->add_entry_cb) {
990                         err = iter->add_entry_cb(iter, al, false, arg);
991                         if (err)
992                                 goto out;
993                 }
994         }
995
996 out:
997         err2 = iter->ops->finish_entry(iter, al);
998         if (!err)
999                 err = err2;
1000
1001         return err;
1002 }
1003
1004 int64_t
1005 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1006 {
1007         struct hists *hists = left->hists;
1008         struct perf_hpp_fmt *fmt;
1009         int64_t cmp = 0;
1010
1011         hists__for_each_sort_list(hists, fmt) {
1012                 if (perf_hpp__is_dynamic_entry(fmt) &&
1013                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1014                         continue;
1015
1016                 cmp = fmt->cmp(fmt, left, right);
1017                 if (cmp)
1018                         break;
1019         }
1020
1021         return cmp;
1022 }
1023
1024 int64_t
1025 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1026 {
1027         struct hists *hists = left->hists;
1028         struct perf_hpp_fmt *fmt;
1029         int64_t cmp = 0;
1030
1031         hists__for_each_sort_list(hists, fmt) {
1032                 if (perf_hpp__is_dynamic_entry(fmt) &&
1033                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1034                         continue;
1035
1036                 cmp = fmt->collapse(fmt, left, right);
1037                 if (cmp)
1038                         break;
1039         }
1040
1041         return cmp;
1042 }
1043
1044 void hist_entry__delete(struct hist_entry *he)
1045 {
1046         thread__zput(he->thread);
1047         map__zput(he->ms.map);
1048
1049         if (he->branch_info) {
1050                 map__zput(he->branch_info->from.map);
1051                 map__zput(he->branch_info->to.map);
1052                 free_srcline(he->branch_info->srcline_from);
1053                 free_srcline(he->branch_info->srcline_to);
1054                 zfree(&he->branch_info);
1055         }
1056
1057         if (he->mem_info) {
1058                 map__zput(he->mem_info->iaddr.map);
1059                 map__zput(he->mem_info->daddr.map);
1060                 zfree(&he->mem_info);
1061         }
1062
1063         zfree(&he->stat_acc);
1064         free_srcline(he->srcline);
1065         if (he->srcfile && he->srcfile[0])
1066                 free(he->srcfile);
1067         free_callchain(he->callchain);
1068         free(he->trace_output);
1069         free(he->raw_data);
1070         free(he);
1071 }
1072
1073 /*
1074  * If this is not the last column, then we need to pad it according to the
1075  * pre-calculated max lenght for this column, otherwise don't bother adding
1076  * spaces because that would break viewing this with, for instance, 'less',
1077  * that would show tons of trailing spaces when a long C++ demangled method
1078  * names is sampled.
1079 */
1080 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1081                                    struct perf_hpp_fmt *fmt, int printed)
1082 {
1083         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1084                 const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
1085                 if (printed < width) {
1086                         advance_hpp(hpp, printed);
1087                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1088                 }
1089         }
1090
1091         return printed;
1092 }
1093
1094 /*
1095  * collapse the histogram
1096  */
1097
1098 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1099 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1100                                        enum hist_filter type);
1101
1102 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1103
1104 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1105 {
1106         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1107 }
1108
1109 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1110                                                 enum hist_filter type,
1111                                                 fmt_chk_fn check)
1112 {
1113         struct perf_hpp_fmt *fmt;
1114         bool type_match = false;
1115         struct hist_entry *parent = he->parent_he;
1116
1117         switch (type) {
1118         case HIST_FILTER__THREAD:
1119                 if (symbol_conf.comm_list == NULL &&
1120                     symbol_conf.pid_list == NULL &&
1121                     symbol_conf.tid_list == NULL)
1122                         return;
1123                 break;
1124         case HIST_FILTER__DSO:
1125                 if (symbol_conf.dso_list == NULL)
1126                         return;
1127                 break;
1128         case HIST_FILTER__SYMBOL:
1129                 if (symbol_conf.sym_list == NULL)
1130                         return;
1131                 break;
1132         case HIST_FILTER__PARENT:
1133         case HIST_FILTER__GUEST:
1134         case HIST_FILTER__HOST:
1135         case HIST_FILTER__SOCKET:
1136         default:
1137                 return;
1138         }
1139
1140         /* if it's filtered by own fmt, it has to have filter bits */
1141         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1142                 if (check(fmt)) {
1143                         type_match = true;
1144                         break;
1145                 }
1146         }
1147
1148         if (type_match) {
1149                 /*
1150                  * If the filter is for current level entry, propagate
1151                  * filter marker to parents.  The marker bit was
1152                  * already set by default so it only needs to clear
1153                  * non-filtered entries.
1154                  */
1155                 if (!(he->filtered & (1 << type))) {
1156                         while (parent) {
1157                                 parent->filtered &= ~(1 << type);
1158                                 parent = parent->parent_he;
1159                         }
1160                 }
1161         } else {
1162                 /*
1163                  * If current entry doesn't have matching formats, set
1164                  * filter marker for upper level entries.  it will be
1165                  * cleared if its lower level entries is not filtered.
1166                  *
1167                  * For lower-level entries, it inherits parent's
1168                  * filter bit so that lower level entries of a
1169                  * non-filtered entry won't set the filter marker.
1170                  */
1171                 if (parent == NULL)
1172                         he->filtered |= (1 << type);
1173                 else
1174                         he->filtered |= (parent->filtered & (1 << type));
1175         }
1176 }
1177
1178 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1179 {
1180         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1181                                             check_thread_entry);
1182
1183         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1184                                             perf_hpp__is_dso_entry);
1185
1186         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1187                                             perf_hpp__is_sym_entry);
1188
1189         hists__apply_filters(he->hists, he);
1190 }
1191
1192 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1193                                                  struct rb_root *root,
1194                                                  struct hist_entry *he,
1195                                                  struct hist_entry *parent_he,
1196                                                  struct perf_hpp_list *hpp_list)
1197 {
1198         struct rb_node **p = &root->rb_node;
1199         struct rb_node *parent = NULL;
1200         struct hist_entry *iter, *new;
1201         struct perf_hpp_fmt *fmt;
1202         int64_t cmp;
1203
1204         while (*p != NULL) {
1205                 parent = *p;
1206                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1207
1208                 cmp = 0;
1209                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1210                         cmp = fmt->collapse(fmt, iter, he);
1211                         if (cmp)
1212                                 break;
1213                 }
1214
1215                 if (!cmp) {
1216                         he_stat__add_stat(&iter->stat, &he->stat);
1217                         return iter;
1218                 }
1219
1220                 if (cmp < 0)
1221                         p = &parent->rb_left;
1222                 else
1223                         p = &parent->rb_right;
1224         }
1225
1226         new = hist_entry__new(he, true);
1227         if (new == NULL)
1228                 return NULL;
1229
1230         hists->nr_entries++;
1231
1232         /* save related format list for output */
1233         new->hpp_list = hpp_list;
1234         new->parent_he = parent_he;
1235
1236         hist_entry__apply_hierarchy_filters(new);
1237
1238         /* some fields are now passed to 'new' */
1239         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1240                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1241                         he->trace_output = NULL;
1242                 else
1243                         new->trace_output = NULL;
1244
1245                 if (perf_hpp__is_srcline_entry(fmt))
1246                         he->srcline = NULL;
1247                 else
1248                         new->srcline = NULL;
1249
1250                 if (perf_hpp__is_srcfile_entry(fmt))
1251                         he->srcfile = NULL;
1252                 else
1253                         new->srcfile = NULL;
1254         }
1255
1256         rb_link_node(&new->rb_node_in, parent, p);
1257         rb_insert_color(&new->rb_node_in, root);
1258         return new;
1259 }
1260
1261 static int hists__hierarchy_insert_entry(struct hists *hists,
1262                                          struct rb_root *root,
1263                                          struct hist_entry *he)
1264 {
1265         struct perf_hpp_list_node *node;
1266         struct hist_entry *new_he = NULL;
1267         struct hist_entry *parent = NULL;
1268         int depth = 0;
1269         int ret = 0;
1270
1271         list_for_each_entry(node, &hists->hpp_formats, list) {
1272                 /* skip period (overhead) and elided columns */
1273                 if (node->level == 0 || node->skip)
1274                         continue;
1275
1276                 /* insert copy of 'he' for each fmt into the hierarchy */
1277                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1278                 if (new_he == NULL) {
1279                         ret = -1;
1280                         break;
1281                 }
1282
1283                 root = &new_he->hroot_in;
1284                 new_he->depth = depth++;
1285                 parent = new_he;
1286         }
1287
1288         if (new_he) {
1289                 new_he->leaf = true;
1290
1291                 if (symbol_conf.use_callchain) {
1292                         callchain_cursor_reset(&callchain_cursor);
1293                         if (callchain_merge(&callchain_cursor,
1294                                             new_he->callchain,
1295                                             he->callchain) < 0)
1296                                 ret = -1;
1297                 }
1298         }
1299
1300         /* 'he' is no longer used */
1301         hist_entry__delete(he);
1302
1303         /* return 0 (or -1) since it already applied filters */
1304         return ret;
1305 }
1306
1307 static int hists__collapse_insert_entry(struct hists *hists,
1308                                         struct rb_root *root,
1309                                         struct hist_entry *he)
1310 {
1311         struct rb_node **p = &root->rb_node;
1312         struct rb_node *parent = NULL;
1313         struct hist_entry *iter;
1314         int64_t cmp;
1315
1316         if (symbol_conf.report_hierarchy)
1317                 return hists__hierarchy_insert_entry(hists, root, he);
1318
1319         while (*p != NULL) {
1320                 parent = *p;
1321                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1322
1323                 cmp = hist_entry__collapse(iter, he);
1324
1325                 if (!cmp) {
1326                         int ret = 0;
1327
1328                         he_stat__add_stat(&iter->stat, &he->stat);
1329                         if (symbol_conf.cumulate_callchain)
1330                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1331
1332                         if (symbol_conf.use_callchain) {
1333                                 callchain_cursor_reset(&callchain_cursor);
1334                                 if (callchain_merge(&callchain_cursor,
1335                                                     iter->callchain,
1336                                                     he->callchain) < 0)
1337                                         ret = -1;
1338                         }
1339                         hist_entry__delete(he);
1340                         return ret;
1341                 }
1342
1343                 if (cmp < 0)
1344                         p = &(*p)->rb_left;
1345                 else
1346                         p = &(*p)->rb_right;
1347         }
1348         hists->nr_entries++;
1349
1350         rb_link_node(&he->rb_node_in, parent, p);
1351         rb_insert_color(&he->rb_node_in, root);
1352         return 1;
1353 }
1354
1355 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1356 {
1357         struct rb_root *root;
1358
1359         pthread_mutex_lock(&hists->lock);
1360
1361         root = hists->entries_in;
1362         if (++hists->entries_in > &hists->entries_in_array[1])
1363                 hists->entries_in = &hists->entries_in_array[0];
1364
1365         pthread_mutex_unlock(&hists->lock);
1366
1367         return root;
1368 }
1369
1370 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1371 {
1372         hists__filter_entry_by_dso(hists, he);
1373         hists__filter_entry_by_thread(hists, he);
1374         hists__filter_entry_by_symbol(hists, he);
1375         hists__filter_entry_by_socket(hists, he);
1376 }
1377
1378 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1379 {
1380         struct rb_root *root;
1381         struct rb_node *next;
1382         struct hist_entry *n;
1383         int ret;
1384
1385         if (!hists__has(hists, need_collapse))
1386                 return 0;
1387
1388         hists->nr_entries = 0;
1389
1390         root = hists__get_rotate_entries_in(hists);
1391
1392         next = rb_first(root);
1393
1394         while (next) {
1395                 if (session_done())
1396                         break;
1397                 n = rb_entry(next, struct hist_entry, rb_node_in);
1398                 next = rb_next(&n->rb_node_in);
1399
1400                 rb_erase(&n->rb_node_in, root);
1401                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1402                 if (ret < 0)
1403                         return -1;
1404
1405                 if (ret) {
1406                         /*
1407                          * If it wasn't combined with one of the entries already
1408                          * collapsed, we need to apply the filters that may have
1409                          * been set by, say, the hist_browser.
1410                          */
1411                         hists__apply_filters(hists, n);
1412                 }
1413                 if (prog)
1414                         ui_progress__update(prog, 1);
1415         }
1416         return 0;
1417 }
1418
1419 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1420 {
1421         struct hists *hists = a->hists;
1422         struct perf_hpp_fmt *fmt;
1423         int64_t cmp = 0;
1424
1425         hists__for_each_sort_list(hists, fmt) {
1426                 if (perf_hpp__should_skip(fmt, a->hists))
1427                         continue;
1428
1429                 cmp = fmt->sort(fmt, a, b);
1430                 if (cmp)
1431                         break;
1432         }
1433
1434         return cmp;
1435 }
1436
1437 static void hists__reset_filter_stats(struct hists *hists)
1438 {
1439         hists->nr_non_filtered_entries = 0;
1440         hists->stats.total_non_filtered_period = 0;
1441 }
1442
1443 void hists__reset_stats(struct hists *hists)
1444 {
1445         hists->nr_entries = 0;
1446         hists->stats.total_period = 0;
1447
1448         hists__reset_filter_stats(hists);
1449 }
1450
1451 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1452 {
1453         hists->nr_non_filtered_entries++;
1454         hists->stats.total_non_filtered_period += h->stat.period;
1455 }
1456
1457 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1458 {
1459         if (!h->filtered)
1460                 hists__inc_filter_stats(hists, h);
1461
1462         hists->nr_entries++;
1463         hists->stats.total_period += h->stat.period;
1464 }
1465
1466 static void hierarchy_recalc_total_periods(struct hists *hists)
1467 {
1468         struct rb_node *node;
1469         struct hist_entry *he;
1470
1471         node = rb_first(&hists->entries);
1472
1473         hists->stats.total_period = 0;
1474         hists->stats.total_non_filtered_period = 0;
1475
1476         /*
1477          * recalculate total period using top-level entries only
1478          * since lower level entries only see non-filtered entries
1479          * but upper level entries have sum of both entries.
1480          */
1481         while (node) {
1482                 he = rb_entry(node, struct hist_entry, rb_node);
1483                 node = rb_next(node);
1484
1485                 hists->stats.total_period += he->stat.period;
1486                 if (!he->filtered)
1487                         hists->stats.total_non_filtered_period += he->stat.period;
1488         }
1489 }
1490
1491 static void hierarchy_insert_output_entry(struct rb_root *root,
1492                                           struct hist_entry *he)
1493 {
1494         struct rb_node **p = &root->rb_node;
1495         struct rb_node *parent = NULL;
1496         struct hist_entry *iter;
1497         struct perf_hpp_fmt *fmt;
1498
1499         while (*p != NULL) {
1500                 parent = *p;
1501                 iter = rb_entry(parent, struct hist_entry, rb_node);
1502
1503                 if (hist_entry__sort(he, iter) > 0)
1504                         p = &parent->rb_left;
1505                 else
1506                         p = &parent->rb_right;
1507         }
1508
1509         rb_link_node(&he->rb_node, parent, p);
1510         rb_insert_color(&he->rb_node, root);
1511
1512         /* update column width of dynamic entry */
1513         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1514                 if (perf_hpp__is_dynamic_entry(fmt))
1515                         fmt->sort(fmt, he, NULL);
1516         }
1517 }
1518
1519 static void hists__hierarchy_output_resort(struct hists *hists,
1520                                            struct ui_progress *prog,
1521                                            struct rb_root *root_in,
1522                                            struct rb_root *root_out,
1523                                            u64 min_callchain_hits,
1524                                            bool use_callchain)
1525 {
1526         struct rb_node *node;
1527         struct hist_entry *he;
1528
1529         *root_out = RB_ROOT;
1530         node = rb_first(root_in);
1531
1532         while (node) {
1533                 he = rb_entry(node, struct hist_entry, rb_node_in);
1534                 node = rb_next(node);
1535
1536                 hierarchy_insert_output_entry(root_out, he);
1537
1538                 if (prog)
1539                         ui_progress__update(prog, 1);
1540
1541                 if (!he->leaf) {
1542                         hists__hierarchy_output_resort(hists, prog,
1543                                                        &he->hroot_in,
1544                                                        &he->hroot_out,
1545                                                        min_callchain_hits,
1546                                                        use_callchain);
1547                         hists->nr_entries++;
1548                         if (!he->filtered) {
1549                                 hists->nr_non_filtered_entries++;
1550                                 hists__calc_col_len(hists, he);
1551                         }
1552
1553                         continue;
1554                 }
1555
1556                 if (!use_callchain)
1557                         continue;
1558
1559                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1560                         u64 total = he->stat.period;
1561
1562                         if (symbol_conf.cumulate_callchain)
1563                                 total = he->stat_acc->period;
1564
1565                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1566                 }
1567
1568                 callchain_param.sort(&he->sorted_chain, he->callchain,
1569                                      min_callchain_hits, &callchain_param);
1570         }
1571 }
1572
1573 static void __hists__insert_output_entry(struct rb_root *entries,
1574                                          struct hist_entry *he,
1575                                          u64 min_callchain_hits,
1576                                          bool use_callchain)
1577 {
1578         struct rb_node **p = &entries->rb_node;
1579         struct rb_node *parent = NULL;
1580         struct hist_entry *iter;
1581         struct perf_hpp_fmt *fmt;
1582
1583         if (use_callchain) {
1584                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1585                         u64 total = he->stat.period;
1586
1587                         if (symbol_conf.cumulate_callchain)
1588                                 total = he->stat_acc->period;
1589
1590                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1591                 }
1592                 callchain_param.sort(&he->sorted_chain, he->callchain,
1593                                       min_callchain_hits, &callchain_param);
1594         }
1595
1596         while (*p != NULL) {
1597                 parent = *p;
1598                 iter = rb_entry(parent, struct hist_entry, rb_node);
1599
1600                 if (hist_entry__sort(he, iter) > 0)
1601                         p = &(*p)->rb_left;
1602                 else
1603                         p = &(*p)->rb_right;
1604         }
1605
1606         rb_link_node(&he->rb_node, parent, p);
1607         rb_insert_color(&he->rb_node, entries);
1608
1609         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1610                 if (perf_hpp__is_dynamic_entry(fmt) &&
1611                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1612                         fmt->sort(fmt, he, NULL);  /* update column width */
1613         }
1614 }
1615
1616 static void output_resort(struct hists *hists, struct ui_progress *prog,
1617                           bool use_callchain)
1618 {
1619         struct rb_root *root;
1620         struct rb_node *next;
1621         struct hist_entry *n;
1622         u64 callchain_total;
1623         u64 min_callchain_hits;
1624
1625         callchain_total = hists->callchain_period;
1626         if (symbol_conf.filter_relative)
1627                 callchain_total = hists->callchain_non_filtered_period;
1628
1629         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1630
1631         hists__reset_stats(hists);
1632         hists__reset_col_len(hists);
1633
1634         if (symbol_conf.report_hierarchy) {
1635                 hists__hierarchy_output_resort(hists, prog,
1636                                                &hists->entries_collapsed,
1637                                                &hists->entries,
1638                                                min_callchain_hits,
1639                                                use_callchain);
1640                 hierarchy_recalc_total_periods(hists);
1641                 return;
1642         }
1643
1644         if (hists__has(hists, need_collapse))
1645                 root = &hists->entries_collapsed;
1646         else
1647                 root = hists->entries_in;
1648
1649         next = rb_first(root);
1650         hists->entries = RB_ROOT;
1651
1652         while (next) {
1653                 n = rb_entry(next, struct hist_entry, rb_node_in);
1654                 next = rb_next(&n->rb_node_in);
1655
1656                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1657                 hists__inc_stats(hists, n);
1658
1659                 if (!n->filtered)
1660                         hists__calc_col_len(hists, n);
1661
1662                 if (prog)
1663                         ui_progress__update(prog, 1);
1664         }
1665 }
1666
1667 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1668 {
1669         bool use_callchain;
1670
1671         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1672                 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1673         else
1674                 use_callchain = symbol_conf.use_callchain;
1675
1676         output_resort(evsel__hists(evsel), prog, use_callchain);
1677 }
1678
1679 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1680 {
1681         output_resort(hists, prog, symbol_conf.use_callchain);
1682 }
1683
1684 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1685 {
1686         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1687                 return false;
1688
1689         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1690                 return true;
1691
1692         return false;
1693 }
1694
1695 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1696 {
1697         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1698
1699         while (can_goto_child(he, HMD_NORMAL)) {
1700                 node = rb_last(&he->hroot_out);
1701                 he = rb_entry(node, struct hist_entry, rb_node);
1702         }
1703         return node;
1704 }
1705
1706 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1707 {
1708         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1709
1710         if (can_goto_child(he, hmd))
1711                 node = rb_first(&he->hroot_out);
1712         else
1713                 node = rb_next(node);
1714
1715         while (node == NULL) {
1716                 he = he->parent_he;
1717                 if (he == NULL)
1718                         break;
1719
1720                 node = rb_next(&he->rb_node);
1721         }
1722         return node;
1723 }
1724
1725 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1726 {
1727         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1728
1729         node = rb_prev(node);
1730         if (node)
1731                 return rb_hierarchy_last(node);
1732
1733         he = he->parent_he;
1734         if (he == NULL)
1735                 return NULL;
1736
1737         return &he->rb_node;
1738 }
1739
1740 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1741 {
1742         struct rb_node *node;
1743         struct hist_entry *child;
1744         float percent;
1745
1746         if (he->leaf)
1747                 return false;
1748
1749         node = rb_first(&he->hroot_out);
1750         child = rb_entry(node, struct hist_entry, rb_node);
1751
1752         while (node && child->filtered) {
1753                 node = rb_next(node);
1754                 child = rb_entry(node, struct hist_entry, rb_node);
1755         }
1756
1757         if (node)
1758                 percent = hist_entry__get_percent_limit(child);
1759         else
1760                 percent = 0;
1761
1762         return node && percent >= limit;
1763 }
1764
1765 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1766                                        enum hist_filter filter)
1767 {
1768         h->filtered &= ~(1 << filter);
1769
1770         if (symbol_conf.report_hierarchy) {
1771                 struct hist_entry *parent = h->parent_he;
1772
1773                 while (parent) {
1774                         he_stat__add_stat(&parent->stat, &h->stat);
1775
1776                         parent->filtered &= ~(1 << filter);
1777
1778                         if (parent->filtered)
1779                                 goto next;
1780
1781                         /* force fold unfiltered entry for simplicity */
1782                         parent->unfolded = false;
1783                         parent->has_no_entry = false;
1784                         parent->row_offset = 0;
1785                         parent->nr_rows = 0;
1786 next:
1787                         parent = parent->parent_he;
1788                 }
1789         }
1790
1791         if (h->filtered)
1792                 return;
1793
1794         /* force fold unfiltered entry for simplicity */
1795         h->unfolded = false;
1796         h->has_no_entry = false;
1797         h->row_offset = 0;
1798         h->nr_rows = 0;
1799
1800         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1801
1802         hists__inc_filter_stats(hists, h);
1803         hists__calc_col_len(hists, h);
1804 }
1805
1806
1807 static bool hists__filter_entry_by_dso(struct hists *hists,
1808                                        struct hist_entry *he)
1809 {
1810         if (hists->dso_filter != NULL &&
1811             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1812                 he->filtered |= (1 << HIST_FILTER__DSO);
1813                 return true;
1814         }
1815
1816         return false;
1817 }
1818
1819 static bool hists__filter_entry_by_thread(struct hists *hists,
1820                                           struct hist_entry *he)
1821 {
1822         if (hists->thread_filter != NULL &&
1823             he->thread != hists->thread_filter) {
1824                 he->filtered |= (1 << HIST_FILTER__THREAD);
1825                 return true;
1826         }
1827
1828         return false;
1829 }
1830
1831 static bool hists__filter_entry_by_symbol(struct hists *hists,
1832                                           struct hist_entry *he)
1833 {
1834         if (hists->symbol_filter_str != NULL &&
1835             (!he->ms.sym || strstr(he->ms.sym->name,
1836                                    hists->symbol_filter_str) == NULL)) {
1837                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1838                 return true;
1839         }
1840
1841         return false;
1842 }
1843
1844 static bool hists__filter_entry_by_socket(struct hists *hists,
1845                                           struct hist_entry *he)
1846 {
1847         if ((hists->socket_filter > -1) &&
1848             (he->socket != hists->socket_filter)) {
1849                 he->filtered |= (1 << HIST_FILTER__SOCKET);
1850                 return true;
1851         }
1852
1853         return false;
1854 }
1855
1856 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1857
1858 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1859 {
1860         struct rb_node *nd;
1861
1862         hists->stats.nr_non_filtered_samples = 0;
1863
1864         hists__reset_filter_stats(hists);
1865         hists__reset_col_len(hists);
1866
1867         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1868                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1869
1870                 if (filter(hists, h))
1871                         continue;
1872
1873                 hists__remove_entry_filter(hists, h, type);
1874         }
1875 }
1876
1877 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1878 {
1879         struct rb_node **p = &root->rb_node;
1880         struct rb_node *parent = NULL;
1881         struct hist_entry *iter;
1882         struct rb_root new_root = RB_ROOT;
1883         struct rb_node *nd;
1884
1885         while (*p != NULL) {
1886                 parent = *p;
1887                 iter = rb_entry(parent, struct hist_entry, rb_node);
1888
1889                 if (hist_entry__sort(he, iter) > 0)
1890                         p = &(*p)->rb_left;
1891                 else
1892                         p = &(*p)->rb_right;
1893         }
1894
1895         rb_link_node(&he->rb_node, parent, p);
1896         rb_insert_color(&he->rb_node, root);
1897
1898         if (he->leaf || he->filtered)
1899                 return;
1900
1901         nd = rb_first(&he->hroot_out);
1902         while (nd) {
1903                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1904
1905                 nd = rb_next(nd);
1906                 rb_erase(&h->rb_node, &he->hroot_out);
1907
1908                 resort_filtered_entry(&new_root, h);
1909         }
1910
1911         he->hroot_out = new_root;
1912 }
1913
1914 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
1915 {
1916         struct rb_node *nd;
1917         struct rb_root new_root = RB_ROOT;
1918
1919         hists->stats.nr_non_filtered_samples = 0;
1920
1921         hists__reset_filter_stats(hists);
1922         hists__reset_col_len(hists);
1923
1924         nd = rb_first(&hists->entries);
1925         while (nd) {
1926                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1927                 int ret;
1928
1929                 ret = hist_entry__filter(h, type, arg);
1930
1931                 /*
1932                  * case 1. non-matching type
1933                  * zero out the period, set filter marker and move to child
1934                  */
1935                 if (ret < 0) {
1936                         memset(&h->stat, 0, sizeof(h->stat));
1937                         h->filtered |= (1 << type);
1938
1939                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
1940                 }
1941                 /*
1942                  * case 2. matched type (filter out)
1943                  * set filter marker and move to next
1944                  */
1945                 else if (ret == 1) {
1946                         h->filtered |= (1 << type);
1947
1948                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1949                 }
1950                 /*
1951                  * case 3. ok (not filtered)
1952                  * add period to hists and parents, erase the filter marker
1953                  * and move to next sibling
1954                  */
1955                 else {
1956                         hists__remove_entry_filter(hists, h, type);
1957
1958                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1959                 }
1960         }
1961
1962         hierarchy_recalc_total_periods(hists);
1963
1964         /*
1965          * resort output after applying a new filter since filter in a lower
1966          * hierarchy can change periods in a upper hierarchy.
1967          */
1968         nd = rb_first(&hists->entries);
1969         while (nd) {
1970                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1971
1972                 nd = rb_next(nd);
1973                 rb_erase(&h->rb_node, &hists->entries);
1974
1975                 resort_filtered_entry(&new_root, h);
1976         }
1977
1978         hists->entries = new_root;
1979 }
1980
1981 void hists__filter_by_thread(struct hists *hists)
1982 {
1983         if (symbol_conf.report_hierarchy)
1984                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
1985                                         hists->thread_filter);
1986         else
1987                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
1988                                       hists__filter_entry_by_thread);
1989 }
1990
1991 void hists__filter_by_dso(struct hists *hists)
1992 {
1993         if (symbol_conf.report_hierarchy)
1994                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
1995                                         hists->dso_filter);
1996         else
1997                 hists__filter_by_type(hists, HIST_FILTER__DSO,
1998                                       hists__filter_entry_by_dso);
1999 }
2000
2001 void hists__filter_by_symbol(struct hists *hists)
2002 {
2003         if (symbol_conf.report_hierarchy)
2004                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2005                                         hists->symbol_filter_str);
2006         else
2007                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2008                                       hists__filter_entry_by_symbol);
2009 }
2010
2011 void hists__filter_by_socket(struct hists *hists)
2012 {
2013         if (symbol_conf.report_hierarchy)
2014                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2015                                         &hists->socket_filter);
2016         else
2017                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2018                                       hists__filter_entry_by_socket);
2019 }
2020
2021 void events_stats__inc(struct events_stats *stats, u32 type)
2022 {
2023         ++stats->nr_events[0];
2024         ++stats->nr_events[type];
2025 }
2026
2027 void hists__inc_nr_events(struct hists *hists, u32 type)
2028 {
2029         events_stats__inc(&hists->stats, type);
2030 }
2031
2032 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2033 {
2034         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2035         if (!filtered)
2036                 hists->stats.nr_non_filtered_samples++;
2037 }
2038
2039 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2040                                                  struct hist_entry *pair)
2041 {
2042         struct rb_root *root;
2043         struct rb_node **p;
2044         struct rb_node *parent = NULL;
2045         struct hist_entry *he;
2046         int64_t cmp;
2047
2048         if (hists__has(hists, need_collapse))
2049                 root = &hists->entries_collapsed;
2050         else
2051                 root = hists->entries_in;
2052
2053         p = &root->rb_node;
2054
2055         while (*p != NULL) {
2056                 parent = *p;
2057                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2058
2059                 cmp = hist_entry__collapse(he, pair);
2060
2061                 if (!cmp)
2062                         goto out;
2063
2064                 if (cmp < 0)
2065                         p = &(*p)->rb_left;
2066                 else
2067                         p = &(*p)->rb_right;
2068         }
2069
2070         he = hist_entry__new(pair, true);
2071         if (he) {
2072                 memset(&he->stat, 0, sizeof(he->stat));
2073                 he->hists = hists;
2074                 if (symbol_conf.cumulate_callchain)
2075                         memset(he->stat_acc, 0, sizeof(he->stat));
2076                 rb_link_node(&he->rb_node_in, parent, p);
2077                 rb_insert_color(&he->rb_node_in, root);
2078                 hists__inc_stats(hists, he);
2079                 he->dummy = true;
2080         }
2081 out:
2082         return he;
2083 }
2084
2085 static struct hist_entry *hists__find_entry(struct hists *hists,
2086                                             struct hist_entry *he)
2087 {
2088         struct rb_node *n;
2089
2090         if (hists__has(hists, need_collapse))
2091                 n = hists->entries_collapsed.rb_node;
2092         else
2093                 n = hists->entries_in->rb_node;
2094
2095         while (n) {
2096                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2097                 int64_t cmp = hist_entry__collapse(iter, he);
2098
2099                 if (cmp < 0)
2100                         n = n->rb_left;
2101                 else if (cmp > 0)
2102                         n = n->rb_right;
2103                 else
2104                         return iter;
2105         }
2106
2107         return NULL;
2108 }
2109
2110 /*
2111  * Look for pairs to link to the leader buckets (hist_entries):
2112  */
2113 void hists__match(struct hists *leader, struct hists *other)
2114 {
2115         struct rb_root *root;
2116         struct rb_node *nd;
2117         struct hist_entry *pos, *pair;
2118
2119         if (hists__has(leader, need_collapse))
2120                 root = &leader->entries_collapsed;
2121         else
2122                 root = leader->entries_in;
2123
2124         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2125                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2126                 pair = hists__find_entry(other, pos);
2127
2128                 if (pair)
2129                         hist_entry__add_pair(pair, pos);
2130         }
2131 }
2132
2133 /*
2134  * Look for entries in the other hists that are not present in the leader, if
2135  * we find them, just add a dummy entry on the leader hists, with period=0,
2136  * nr_events=0, to serve as the list header.
2137  */
2138 int hists__link(struct hists *leader, struct hists *other)
2139 {
2140         struct rb_root *root;
2141         struct rb_node *nd;
2142         struct hist_entry *pos, *pair;
2143
2144         if (hists__has(other, need_collapse))
2145                 root = &other->entries_collapsed;
2146         else
2147                 root = other->entries_in;
2148
2149         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2150                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2151
2152                 if (!hist_entry__has_pairs(pos)) {
2153                         pair = hists__add_dummy_entry(leader, pos);
2154                         if (pair == NULL)
2155                                 return -1;
2156                         hist_entry__add_pair(pos, pair);
2157                 }
2158         }
2159
2160         return 0;
2161 }
2162
2163 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2164                           struct perf_sample *sample, bool nonany_branch_mode)
2165 {
2166         struct branch_info *bi;
2167
2168         /* If we have branch cycles always annotate them. */
2169         if (bs && bs->nr && bs->entries[0].flags.cycles) {
2170                 int i;
2171
2172                 bi = sample__resolve_bstack(sample, al);
2173                 if (bi) {
2174                         struct addr_map_symbol *prev = NULL;
2175
2176                         /*
2177                          * Ignore errors, still want to process the
2178                          * other entries.
2179                          *
2180                          * For non standard branch modes always
2181                          * force no IPC (prev == NULL)
2182                          *
2183                          * Note that perf stores branches reversed from
2184                          * program order!
2185                          */
2186                         for (i = bs->nr - 1; i >= 0; i--) {
2187                                 addr_map_symbol__account_cycles(&bi[i].from,
2188                                         nonany_branch_mode ? NULL : prev,
2189                                         bi[i].flags.cycles);
2190                                 prev = &bi[i].to;
2191                         }
2192                         free(bi);
2193                 }
2194         }
2195 }
2196
2197 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2198 {
2199         struct perf_evsel *pos;
2200         size_t ret = 0;
2201
2202         evlist__for_each(evlist, pos) {
2203                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2204                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2205         }
2206
2207         return ret;
2208 }
2209
2210
2211 u64 hists__total_period(struct hists *hists)
2212 {
2213         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2214                 hists->stats.total_period;
2215 }
2216
2217 int parse_filter_percentage(const struct option *opt __maybe_unused,
2218                             const char *arg, int unset __maybe_unused)
2219 {
2220         if (!strcmp(arg, "relative"))
2221                 symbol_conf.filter_relative = true;
2222         else if (!strcmp(arg, "absolute"))
2223                 symbol_conf.filter_relative = false;
2224         else
2225                 return -1;
2226
2227         return 0;
2228 }
2229
2230 int perf_hist_config(const char *var, const char *value)
2231 {
2232         if (!strcmp(var, "hist.percentage"))
2233                 return parse_filter_percentage(NULL, value, 0);
2234
2235         return 0;
2236 }
2237
2238 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2239 {
2240         memset(hists, 0, sizeof(*hists));
2241         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2242         hists->entries_in = &hists->entries_in_array[0];
2243         hists->entries_collapsed = RB_ROOT;
2244         hists->entries = RB_ROOT;
2245         pthread_mutex_init(&hists->lock, NULL);
2246         hists->socket_filter = -1;
2247         hists->hpp_list = hpp_list;
2248         INIT_LIST_HEAD(&hists->hpp_formats);
2249         return 0;
2250 }
2251
2252 static void hists__delete_remaining_entries(struct rb_root *root)
2253 {
2254         struct rb_node *node;
2255         struct hist_entry *he;
2256
2257         while (!RB_EMPTY_ROOT(root)) {
2258                 node = rb_first(root);
2259                 rb_erase(node, root);
2260
2261                 he = rb_entry(node, struct hist_entry, rb_node_in);
2262                 hist_entry__delete(he);
2263         }
2264 }
2265
2266 static void hists__delete_all_entries(struct hists *hists)
2267 {
2268         hists__delete_entries(hists);
2269         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2270         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2271         hists__delete_remaining_entries(&hists->entries_collapsed);
2272 }
2273
2274 static void hists_evsel__exit(struct perf_evsel *evsel)
2275 {
2276         struct hists *hists = evsel__hists(evsel);
2277         struct perf_hpp_fmt *fmt, *pos;
2278         struct perf_hpp_list_node *node, *tmp;
2279
2280         hists__delete_all_entries(hists);
2281
2282         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2283                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2284                         list_del(&fmt->list);
2285                         free(fmt);
2286                 }
2287                 list_del(&node->list);
2288                 free(node);
2289         }
2290 }
2291
2292 static int hists_evsel__init(struct perf_evsel *evsel)
2293 {
2294         struct hists *hists = evsel__hists(evsel);
2295
2296         __hists__init(hists, &perf_hpp_list);
2297         return 0;
2298 }
2299
2300 /*
2301  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2302  * stored in the rbtree...
2303  */
2304
2305 int hists__init(void)
2306 {
2307         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2308                                             hists_evsel__init,
2309                                             hists_evsel__exit);
2310         if (err)
2311                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2312
2313         return err;
2314 }
2315
2316 void perf_hpp_list__init(struct perf_hpp_list *list)
2317 {
2318         INIT_LIST_HEAD(&list->fields);
2319         INIT_LIST_HEAD(&list->sorts);
2320 }