perf stat: Handled scaled == -1 case for counters
[cascardo/linux.git] / tools / perf / builtin-stat.c
1 /*
2  * builtin-stat.c
3  *
4  * Builtin stat command: Give a precise performance counters summary
5  * overview about any workload, CPU or specific PID.
6  *
7  * Sample output:
8
9    $ perf stat ./hackbench 10
10
11   Time: 0.118
12
13   Performance counter stats for './hackbench 10':
14
15        1708.761321 task-clock                #   11.037 CPUs utilized
16             41,190 context-switches          #    0.024 M/sec
17              6,735 CPU-migrations            #    0.004 M/sec
18             17,318 page-faults               #    0.010 M/sec
19      5,205,202,243 cycles                    #    3.046 GHz
20      3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
21      1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
22      2,603,501,247 instructions              #    0.50  insns per cycle
23                                              #    1.48  stalled cycles per insn
24        484,357,498 branches                  #  283.455 M/sec
25          6,388,934 branch-misses             #    1.32% of all branches
26
27         0.154822978  seconds time elapsed
28
29  *
30  * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
31  *
32  * Improvements and fixes by:
33  *
34  *   Arjan van de Ven <arjan@linux.intel.com>
35  *   Yanmin Zhang <yanmin.zhang@intel.com>
36  *   Wu Fengguang <fengguang.wu@intel.com>
37  *   Mike Galbraith <efault@gmx.de>
38  *   Paul Mackerras <paulus@samba.org>
39  *   Jaswinder Singh Rajput <jaswinder@kernel.org>
40  *
41  * Released under the GPL v2. (and only v2, not any later version)
42  */
43
44 #include "perf.h"
45 #include "builtin.h"
46 #include "util/cgroup.h"
47 #include "util/util.h"
48 #include <subcmd/parse-options.h>
49 #include "util/parse-events.h"
50 #include "util/pmu.h"
51 #include "util/event.h"
52 #include "util/evlist.h"
53 #include "util/evsel.h"
54 #include "util/debug.h"
55 #include "util/color.h"
56 #include "util/stat.h"
57 #include "util/header.h"
58 #include "util/cpumap.h"
59 #include "util/thread.h"
60 #include "util/thread_map.h"
61 #include "util/counts.h"
62 #include "util/session.h"
63 #include "util/tool.h"
64 #include "asm/bug.h"
65
66 #include <stdlib.h>
67 #include <sys/prctl.h>
68 #include <locale.h>
69
70 #define DEFAULT_SEPARATOR       " "
71 #define CNTR_NOT_SUPPORTED      "<not supported>"
72 #define CNTR_NOT_COUNTED        "<not counted>"
73
74 static void print_counters(struct timespec *ts, int argc, const char **argv);
75
76 /* Default events used for perf stat -T */
77 static const char *transaction_attrs = {
78         "task-clock,"
79         "{"
80         "instructions,"
81         "cycles,"
82         "cpu/cycles-t/,"
83         "cpu/tx-start/,"
84         "cpu/el-start/,"
85         "cpu/cycles-ct/"
86         "}"
87 };
88
89 /* More limited version when the CPU does not have all events. */
90 static const char * transaction_limited_attrs = {
91         "task-clock,"
92         "{"
93         "instructions,"
94         "cycles,"
95         "cpu/cycles-t/,"
96         "cpu/tx-start/"
97         "}"
98 };
99
100 static struct perf_evlist       *evsel_list;
101
102 static struct target target = {
103         .uid    = UINT_MAX,
104 };
105
106 typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
107
108 static int                      run_count                       =  1;
109 static bool                     no_inherit                      = false;
110 static volatile pid_t           child_pid                       = -1;
111 static bool                     null_run                        =  false;
112 static int                      detailed_run                    =  0;
113 static bool                     transaction_run;
114 static bool                     big_num                         =  true;
115 static int                      big_num_opt                     =  -1;
116 static const char               *csv_sep                        = NULL;
117 static bool                     csv_output                      = false;
118 static bool                     group                           = false;
119 static const char               *pre_cmd                        = NULL;
120 static const char               *post_cmd                       = NULL;
121 static bool                     sync_run                        = false;
122 static unsigned int             initial_delay                   = 0;
123 static unsigned int             unit_width                      = 4; /* strlen("unit") */
124 static bool                     forever                         = false;
125 static struct timespec          ref_time;
126 static struct cpu_map           *aggr_map;
127 static aggr_get_id_t            aggr_get_id;
128 static bool                     append_file;
129 static const char               *output_name;
130 static int                      output_fd;
131
132 struct perf_stat {
133         bool                     record;
134         struct perf_data_file    file;
135         struct perf_session     *session;
136         u64                      bytes_written;
137         struct perf_tool         tool;
138         bool                     maps_allocated;
139         struct cpu_map          *cpus;
140         struct thread_map       *threads;
141         enum aggr_mode           aggr_mode;
142 };
143
144 static struct perf_stat         perf_stat;
145 #define STAT_RECORD             perf_stat.record
146
147 static volatile int done = 0;
148
149 static struct perf_stat_config stat_config = {
150         .aggr_mode      = AGGR_GLOBAL,
151         .scale          = true,
152 };
153
154 static inline void diff_timespec(struct timespec *r, struct timespec *a,
155                                  struct timespec *b)
156 {
157         r->tv_sec = a->tv_sec - b->tv_sec;
158         if (a->tv_nsec < b->tv_nsec) {
159                 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
160                 r->tv_sec--;
161         } else {
162                 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
163         }
164 }
165
166 static void perf_stat__reset_stats(void)
167 {
168         perf_evlist__reset_stats(evsel_list);
169         perf_stat__reset_shadow_stats();
170 }
171
172 static int create_perf_stat_counter(struct perf_evsel *evsel)
173 {
174         struct perf_event_attr *attr = &evsel->attr;
175
176         if (stat_config.scale)
177                 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
178                                     PERF_FORMAT_TOTAL_TIME_RUNNING;
179
180         attr->inherit = !no_inherit;
181
182         /*
183          * Some events get initialized with sample_(period/type) set,
184          * like tracepoints. Clear it up for counting.
185          */
186         attr->sample_period = 0;
187
188         /*
189          * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
190          * while avoiding that older tools show confusing messages.
191          *
192          * However for pipe sessions we need to keep it zero,
193          * because script's perf_evsel__check_attr is triggered
194          * by attr->sample_type != 0, and we can't run it on
195          * stat sessions.
196          */
197         if (!(STAT_RECORD && perf_stat.file.is_pipe))
198                 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
199
200         /*
201          * Disabling all counters initially, they will be enabled
202          * either manually by us or by kernel via enable_on_exec
203          * set later.
204          */
205         if (perf_evsel__is_group_leader(evsel)) {
206                 attr->disabled = 1;
207
208                 /*
209                  * In case of initial_delay we enable tracee
210                  * events manually.
211                  */
212                 if (target__none(&target) && !initial_delay)
213                         attr->enable_on_exec = 1;
214         }
215
216         if (target__has_cpu(&target))
217                 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
218
219         return perf_evsel__open_per_thread(evsel, evsel_list->threads);
220 }
221
222 /*
223  * Does the counter have nsecs as a unit?
224  */
225 static inline int nsec_counter(struct perf_evsel *evsel)
226 {
227         if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
228             perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
229                 return 1;
230
231         return 0;
232 }
233
234 static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
235                                      union perf_event *event,
236                                      struct perf_sample *sample __maybe_unused,
237                                      struct machine *machine __maybe_unused)
238 {
239         if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) {
240                 pr_err("failed to write perf data, error: %m\n");
241                 return -1;
242         }
243
244         perf_stat.bytes_written += event->header.size;
245         return 0;
246 }
247
248 static int write_stat_round_event(u64 tm, u64 type)
249 {
250         return perf_event__synthesize_stat_round(NULL, tm, type,
251                                                  process_synthesized_event,
252                                                  NULL);
253 }
254
255 #define WRITE_STAT_ROUND_EVENT(time, interval) \
256         write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
257
258 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
259
260 static int
261 perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
262                              struct perf_counts_values *count)
263 {
264         struct perf_sample_id *sid = SID(counter, cpu, thread);
265
266         return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
267                                            process_synthesized_event, NULL);
268 }
269
270 /*
271  * Read out the results of a single counter:
272  * do not aggregate counts across CPUs in system-wide mode
273  */
274 static int read_counter(struct perf_evsel *counter)
275 {
276         int nthreads = thread_map__nr(evsel_list->threads);
277         int ncpus = perf_evsel__nr_cpus(counter);
278         int cpu, thread;
279
280         if (!counter->supported)
281                 return -ENOENT;
282
283         if (counter->system_wide)
284                 nthreads = 1;
285
286         for (thread = 0; thread < nthreads; thread++) {
287                 for (cpu = 0; cpu < ncpus; cpu++) {
288                         struct perf_counts_values *count;
289
290                         count = perf_counts(counter->counts, cpu, thread);
291                         if (perf_evsel__read(counter, cpu, thread, count))
292                                 return -1;
293
294                         if (STAT_RECORD) {
295                                 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
296                                         pr_err("failed to write stat event\n");
297                                         return -1;
298                                 }
299                         }
300                 }
301         }
302
303         return 0;
304 }
305
306 static void read_counters(bool close_counters)
307 {
308         struct perf_evsel *counter;
309
310         evlist__for_each(evsel_list, counter) {
311                 if (read_counter(counter))
312                         pr_debug("failed to read counter %s\n", counter->name);
313
314                 if (perf_stat_process_counter(&stat_config, counter))
315                         pr_warning("failed to process counter %s\n", counter->name);
316
317                 if (close_counters) {
318                         perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
319                                              thread_map__nr(evsel_list->threads));
320                 }
321         }
322 }
323
324 static void process_interval(void)
325 {
326         struct timespec ts, rs;
327
328         read_counters(false);
329
330         clock_gettime(CLOCK_MONOTONIC, &ts);
331         diff_timespec(&rs, &ts, &ref_time);
332
333         if (STAT_RECORD) {
334                 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSECS_PER_SEC + rs.tv_nsec, INTERVAL))
335                         pr_err("failed to write stat round event\n");
336         }
337
338         print_counters(&rs, 0, NULL);
339 }
340
341 static void enable_counters(void)
342 {
343         if (initial_delay)
344                 usleep(initial_delay * 1000);
345
346         /*
347          * We need to enable counters only if:
348          * - we don't have tracee (attaching to task or cpu)
349          * - we have initial delay configured
350          */
351         if (!target__none(&target) || initial_delay)
352                 perf_evlist__enable(evsel_list);
353 }
354
355 static volatile int workload_exec_errno;
356
357 /*
358  * perf_evlist__prepare_workload will send a SIGUSR1
359  * if the fork fails, since we asked by setting its
360  * want_signal to true.
361  */
362 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
363                                         void *ucontext __maybe_unused)
364 {
365         workload_exec_errno = info->si_value.sival_int;
366 }
367
368 static bool has_unit(struct perf_evsel *counter)
369 {
370         return counter->unit && *counter->unit;
371 }
372
373 static bool has_scale(struct perf_evsel *counter)
374 {
375         return counter->scale != 1;
376 }
377
378 static int perf_stat_synthesize_config(bool is_pipe)
379 {
380         struct perf_evsel *counter;
381         int err;
382
383         if (is_pipe) {
384                 err = perf_event__synthesize_attrs(NULL, perf_stat.session,
385                                                    process_synthesized_event);
386                 if (err < 0) {
387                         pr_err("Couldn't synthesize attrs.\n");
388                         return err;
389                 }
390         }
391
392         /*
393          * Synthesize other events stuff not carried within
394          * attr event - unit, scale, name
395          */
396         evlist__for_each(evsel_list, counter) {
397                 if (!counter->supported)
398                         continue;
399
400                 /*
401                  * Synthesize unit and scale only if it's defined.
402                  */
403                 if (has_unit(counter)) {
404                         err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
405                         if (err < 0) {
406                                 pr_err("Couldn't synthesize evsel unit.\n");
407                                 return err;
408                         }
409                 }
410
411                 if (has_scale(counter)) {
412                         err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
413                         if (err < 0) {
414                                 pr_err("Couldn't synthesize evsel scale.\n");
415                                 return err;
416                         }
417                 }
418
419                 if (counter->own_cpus) {
420                         err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
421                         if (err < 0) {
422                                 pr_err("Couldn't synthesize evsel scale.\n");
423                                 return err;
424                         }
425                 }
426
427                 /*
428                  * Name is needed only for pipe output,
429                  * perf.data carries event names.
430                  */
431                 if (is_pipe) {
432                         err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
433                         if (err < 0) {
434                                 pr_err("Couldn't synthesize evsel name.\n");
435                                 return err;
436                         }
437                 }
438         }
439
440         err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
441                                                 process_synthesized_event,
442                                                 NULL);
443         if (err < 0) {
444                 pr_err("Couldn't synthesize thread map.\n");
445                 return err;
446         }
447
448         err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
449                                              process_synthesized_event, NULL);
450         if (err < 0) {
451                 pr_err("Couldn't synthesize thread map.\n");
452                 return err;
453         }
454
455         err = perf_event__synthesize_stat_config(NULL, &stat_config,
456                                                  process_synthesized_event, NULL);
457         if (err < 0) {
458                 pr_err("Couldn't synthesize config.\n");
459                 return err;
460         }
461
462         return 0;
463 }
464
465 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
466
467 static int __store_counter_ids(struct perf_evsel *counter,
468                                struct cpu_map *cpus,
469                                struct thread_map *threads)
470 {
471         int cpu, thread;
472
473         for (cpu = 0; cpu < cpus->nr; cpu++) {
474                 for (thread = 0; thread < threads->nr; thread++) {
475                         int fd = FD(counter, cpu, thread);
476
477                         if (perf_evlist__id_add_fd(evsel_list, counter,
478                                                    cpu, thread, fd) < 0)
479                                 return -1;
480                 }
481         }
482
483         return 0;
484 }
485
486 static int store_counter_ids(struct perf_evsel *counter)
487 {
488         struct cpu_map *cpus = counter->cpus;
489         struct thread_map *threads = counter->threads;
490
491         if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
492                 return -ENOMEM;
493
494         return __store_counter_ids(counter, cpus, threads);
495 }
496
497 static int __run_perf_stat(int argc, const char **argv)
498 {
499         int interval = stat_config.interval;
500         char msg[512];
501         unsigned long long t0, t1;
502         struct perf_evsel *counter;
503         struct timespec ts;
504         size_t l;
505         int status = 0;
506         const bool forks = (argc > 0);
507         bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false;
508
509         if (interval) {
510                 ts.tv_sec  = interval / 1000;
511                 ts.tv_nsec = (interval % 1000) * 1000000;
512         } else {
513                 ts.tv_sec  = 1;
514                 ts.tv_nsec = 0;
515         }
516
517         if (forks) {
518                 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
519                                                   workload_exec_failed_signal) < 0) {
520                         perror("failed to prepare workload");
521                         return -1;
522                 }
523                 child_pid = evsel_list->workload.pid;
524         }
525
526         if (group)
527                 perf_evlist__set_leader(evsel_list);
528
529         evlist__for_each(evsel_list, counter) {
530                 if (create_perf_stat_counter(counter) < 0) {
531                         /*
532                          * PPC returns ENXIO for HW counters until 2.6.37
533                          * (behavior changed with commit b0a873e).
534                          */
535                         if (errno == EINVAL || errno == ENOSYS ||
536                             errno == ENOENT || errno == EOPNOTSUPP ||
537                             errno == ENXIO) {
538                                 if (verbose)
539                                         ui__warning("%s event is not supported by the kernel.\n",
540                                                     perf_evsel__name(counter));
541                                 counter->supported = false;
542
543                                 if ((counter->leader != counter) ||
544                                     !(counter->leader->nr_members > 1))
545                                         continue;
546                         }
547
548                         perf_evsel__open_strerror(counter, &target,
549                                                   errno, msg, sizeof(msg));
550                         ui__error("%s\n", msg);
551
552                         if (child_pid != -1)
553                                 kill(child_pid, SIGTERM);
554
555                         return -1;
556                 }
557                 counter->supported = true;
558
559                 l = strlen(counter->unit);
560                 if (l > unit_width)
561                         unit_width = l;
562
563                 if (STAT_RECORD && store_counter_ids(counter))
564                         return -1;
565         }
566
567         if (perf_evlist__apply_filters(evsel_list, &counter)) {
568                 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
569                         counter->filter, perf_evsel__name(counter), errno,
570                         strerror_r(errno, msg, sizeof(msg)));
571                 return -1;
572         }
573
574         if (STAT_RECORD) {
575                 int err, fd = perf_data_file__fd(&perf_stat.file);
576
577                 if (is_pipe) {
578                         err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file));
579                 } else {
580                         err = perf_session__write_header(perf_stat.session, evsel_list,
581                                                          fd, false);
582                 }
583
584                 if (err < 0)
585                         return err;
586
587                 err = perf_stat_synthesize_config(is_pipe);
588                 if (err < 0)
589                         return err;
590         }
591
592         /*
593          * Enable counters and exec the command:
594          */
595         t0 = rdclock();
596         clock_gettime(CLOCK_MONOTONIC, &ref_time);
597
598         if (forks) {
599                 perf_evlist__start_workload(evsel_list);
600                 enable_counters();
601
602                 if (interval) {
603                         while (!waitpid(child_pid, &status, WNOHANG)) {
604                                 nanosleep(&ts, NULL);
605                                 process_interval();
606                         }
607                 }
608                 wait(&status);
609
610                 if (workload_exec_errno) {
611                         const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
612                         pr_err("Workload failed: %s\n", emsg);
613                         return -1;
614                 }
615
616                 if (WIFSIGNALED(status))
617                         psignal(WTERMSIG(status), argv[0]);
618         } else {
619                 enable_counters();
620                 while (!done) {
621                         nanosleep(&ts, NULL);
622                         if (interval)
623                                 process_interval();
624                 }
625         }
626
627         t1 = rdclock();
628
629         update_stats(&walltime_nsecs_stats, t1 - t0);
630
631         read_counters(true);
632
633         return WEXITSTATUS(status);
634 }
635
636 static int run_perf_stat(int argc, const char **argv)
637 {
638         int ret;
639
640         if (pre_cmd) {
641                 ret = system(pre_cmd);
642                 if (ret)
643                         return ret;
644         }
645
646         if (sync_run)
647                 sync();
648
649         ret = __run_perf_stat(argc, argv);
650         if (ret)
651                 return ret;
652
653         if (post_cmd) {
654                 ret = system(post_cmd);
655                 if (ret)
656                         return ret;
657         }
658
659         return ret;
660 }
661
662 static void print_running(u64 run, u64 ena)
663 {
664         if (csv_output) {
665                 fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
666                                         csv_sep,
667                                         run,
668                                         csv_sep,
669                                         ena ? 100.0 * run / ena : 100.0);
670         } else if (run != ena) {
671                 fprintf(stat_config.output, "  (%.2f%%)", 100.0 * run / ena);
672         }
673 }
674
675 static void print_noise_pct(double total, double avg)
676 {
677         double pct = rel_stddev_stats(total, avg);
678
679         if (csv_output)
680                 fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
681         else if (pct)
682                 fprintf(stat_config.output, "  ( +-%6.2f%% )", pct);
683 }
684
685 static void print_noise(struct perf_evsel *evsel, double avg)
686 {
687         struct perf_stat_evsel *ps;
688
689         if (run_count == 1)
690                 return;
691
692         ps = evsel->priv;
693         print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
694 }
695
696 static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
697 {
698         switch (stat_config.aggr_mode) {
699         case AGGR_CORE:
700                 fprintf(stat_config.output, "S%d-C%*d%s%*d%s",
701                         cpu_map__id_to_socket(id),
702                         csv_output ? 0 : -8,
703                         cpu_map__id_to_cpu(id),
704                         csv_sep,
705                         csv_output ? 0 : 4,
706                         nr,
707                         csv_sep);
708                 break;
709         case AGGR_SOCKET:
710                 fprintf(stat_config.output, "S%*d%s%*d%s",
711                         csv_output ? 0 : -5,
712                         id,
713                         csv_sep,
714                         csv_output ? 0 : 4,
715                         nr,
716                         csv_sep);
717                         break;
718         case AGGR_NONE:
719                 fprintf(stat_config.output, "CPU%*d%s",
720                         csv_output ? 0 : -4,
721                         perf_evsel__cpus(evsel)->map[id], csv_sep);
722                 break;
723         case AGGR_THREAD:
724                 fprintf(stat_config.output, "%*s-%*d%s",
725                         csv_output ? 0 : 16,
726                         thread_map__comm(evsel->threads, id),
727                         csv_output ? 0 : -8,
728                         thread_map__pid(evsel->threads, id),
729                         csv_sep);
730                 break;
731         case AGGR_GLOBAL:
732         case AGGR_UNSET:
733         default:
734                 break;
735         }
736 }
737
738 struct outstate {
739         FILE *fh;
740         bool newline;
741         const char *prefix;
742 };
743
744 #define METRIC_LEN  35
745
746 static void new_line_std(void *ctx)
747 {
748         struct outstate *os = ctx;
749
750         os->newline = true;
751 }
752
753 static void do_new_line_std(struct outstate *os)
754 {
755         fputc('\n', os->fh);
756         fputs(os->prefix, os->fh);
757         if (stat_config.aggr_mode == AGGR_NONE)
758                 fprintf(os->fh, "        ");
759         if (stat_config.aggr_mode == AGGR_CORE)
760                 fprintf(os->fh, "                  ");
761         if (stat_config.aggr_mode == AGGR_SOCKET)
762                 fprintf(os->fh, "            ");
763         fprintf(os->fh, "                                                 ");
764 }
765
766 static void print_metric_std(void *ctx, const char *color, const char *fmt,
767                              const char *unit, double val)
768 {
769         struct outstate *os = ctx;
770         FILE *out = os->fh;
771         int n;
772         bool newline = os->newline;
773
774         os->newline = false;
775
776         if (unit == NULL || fmt == NULL) {
777                 fprintf(out, "%-*s", METRIC_LEN, "");
778                 return;
779         }
780
781         if (newline)
782                 do_new_line_std(os);
783
784         n = fprintf(out, " # ");
785         if (color)
786                 n += color_fprintf(out, color, fmt, val);
787         else
788                 n += fprintf(out, fmt, val);
789         fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
790 }
791
792 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
793 {
794         FILE *output = stat_config.output;
795         double msecs = avg / 1e6;
796         const char *fmt_v, *fmt_n;
797         char name[25];
798
799         fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
800         fmt_n = csv_output ? "%s" : "%-25s";
801
802         aggr_printout(evsel, id, nr);
803
804         scnprintf(name, sizeof(name), "%s%s",
805                   perf_evsel__name(evsel), csv_output ? "" : " (msec)");
806
807         fprintf(output, fmt_v, msecs, csv_sep);
808
809         if (csv_output)
810                 fprintf(output, "%s%s", evsel->unit, csv_sep);
811         else
812                 fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
813
814         fprintf(output, fmt_n, name);
815
816         if (evsel->cgrp)
817                 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
818 }
819
820 static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
821 {
822         FILE *output = stat_config.output;
823         double sc =  evsel->scale;
824         const char *fmt;
825
826         if (csv_output) {
827                 fmt = sc != 1.0 ?  "%.2f%s" : "%.0f%s";
828         } else {
829                 if (big_num)
830                         fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s";
831                 else
832                         fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
833         }
834
835         aggr_printout(evsel, id, nr);
836
837         fprintf(output, fmt, avg, csv_sep);
838
839         if (evsel->unit)
840                 fprintf(output, "%-*s%s",
841                         csv_output ? 0 : unit_width,
842                         evsel->unit, csv_sep);
843
844         fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel));
845
846         if (evsel->cgrp)
847                 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
848 }
849
850 static void printout(int id, int nr, struct perf_evsel *counter, double uval,
851                      char *prefix, u64 run, u64 ena, double noise)
852 {
853         struct perf_stat_output_ctx out;
854         struct outstate os = {
855                 .fh = stat_config.output,
856                 .prefix = prefix ? prefix : ""
857         };
858         print_metric_t pm = print_metric_std;
859         void (*nl)(void *);
860
861         nl = new_line_std;
862
863         if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
864                 aggr_printout(counter, id, nr);
865
866                 fprintf(stat_config.output, "%*s%s",
867                         csv_output ? 0 : 18,
868                         counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
869                         csv_sep);
870
871                 fprintf(stat_config.output, "%-*s%s",
872                         csv_output ? 0 : unit_width,
873                         counter->unit, csv_sep);
874
875                 fprintf(stat_config.output, "%*s",
876                         csv_output ? 0 : -25,
877                         perf_evsel__name(counter));
878
879                 if (counter->cgrp)
880                         fprintf(stat_config.output, "%s%s",
881                                 csv_sep, counter->cgrp->name);
882
883                 print_running(run, ena);
884                 return;
885         }
886
887         if (nsec_counter(counter))
888                 nsec_printout(id, nr, counter, uval);
889         else
890                 abs_printout(id, nr, counter, uval);
891
892         out.print_metric = pm;
893         out.new_line = nl;
894         out.ctx = &os;
895
896         if (!csv_output)
897                 perf_stat__print_shadow_stats(counter, uval,
898                                 stat_config.aggr_mode == AGGR_GLOBAL ? 0 :
899                                 cpu_map__id_to_cpu(id),
900                                 &out);
901
902         print_noise(counter, noise);
903         print_running(run, ena);
904 }
905
906 static void print_aggr(char *prefix)
907 {
908         FILE *output = stat_config.output;
909         struct perf_evsel *counter;
910         int cpu, s, s2, id, nr;
911         double uval;
912         u64 ena, run, val;
913
914         if (!(aggr_map || aggr_get_id))
915                 return;
916
917         for (s = 0; s < aggr_map->nr; s++) {
918                 id = aggr_map->map[s];
919                 evlist__for_each(evsel_list, counter) {
920                         val = ena = run = 0;
921                         nr = 0;
922                         for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
923                                 s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
924                                 if (s2 != id)
925                                         continue;
926                                 val += perf_counts(counter->counts, cpu, 0)->val;
927                                 ena += perf_counts(counter->counts, cpu, 0)->ena;
928                                 run += perf_counts(counter->counts, cpu, 0)->run;
929                                 nr++;
930                         }
931                         if (prefix)
932                                 fprintf(output, "%s", prefix);
933
934                         uval = val * counter->scale;
935                         printout(id, nr, counter, uval, prefix, run, ena, 1.0);
936                         fputc('\n', output);
937                 }
938         }
939 }
940
941 static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
942 {
943         FILE *output = stat_config.output;
944         int nthreads = thread_map__nr(counter->threads);
945         int ncpus = cpu_map__nr(counter->cpus);
946         int cpu, thread;
947         double uval;
948
949         for (thread = 0; thread < nthreads; thread++) {
950                 u64 ena = 0, run = 0, val = 0;
951
952                 for (cpu = 0; cpu < ncpus; cpu++) {
953                         val += perf_counts(counter->counts, cpu, thread)->val;
954                         ena += perf_counts(counter->counts, cpu, thread)->ena;
955                         run += perf_counts(counter->counts, cpu, thread)->run;
956                 }
957
958                 if (prefix)
959                         fprintf(output, "%s", prefix);
960
961                 uval = val * counter->scale;
962                 printout(thread, 0, counter, uval, prefix, run, ena, 1.0);
963                 fputc('\n', output);
964         }
965 }
966
967 /*
968  * Print out the results of a single counter:
969  * aggregated counts in system-wide mode
970  */
971 static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
972 {
973         FILE *output = stat_config.output;
974         struct perf_stat_evsel *ps = counter->priv;
975         double avg = avg_stats(&ps->res_stats[0]);
976         double uval;
977         double avg_enabled, avg_running;
978
979         avg_enabled = avg_stats(&ps->res_stats[1]);
980         avg_running = avg_stats(&ps->res_stats[2]);
981
982         if (prefix)
983                 fprintf(output, "%s", prefix);
984
985         uval = avg * counter->scale;
986         printout(-1, 0, counter, uval, prefix, avg_running, avg_enabled, avg);
987         fprintf(output, "\n");
988 }
989
990 /*
991  * Print out the results of a single counter:
992  * does not use aggregated count in system-wide
993  */
994 static void print_counter(struct perf_evsel *counter, char *prefix)
995 {
996         FILE *output = stat_config.output;
997         u64 ena, run, val;
998         double uval;
999         int cpu;
1000
1001         for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
1002                 val = perf_counts(counter->counts, cpu, 0)->val;
1003                 ena = perf_counts(counter->counts, cpu, 0)->ena;
1004                 run = perf_counts(counter->counts, cpu, 0)->run;
1005
1006                 if (prefix)
1007                         fprintf(output, "%s", prefix);
1008
1009                 uval = val * counter->scale;
1010                 printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
1011
1012                 fputc('\n', output);
1013         }
1014 }
1015
1016 static void print_interval(char *prefix, struct timespec *ts)
1017 {
1018         FILE *output = stat_config.output;
1019         static int num_print_interval;
1020
1021         sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
1022
1023         if (num_print_interval == 0 && !csv_output) {
1024                 switch (stat_config.aggr_mode) {
1025                 case AGGR_SOCKET:
1026                         fprintf(output, "#           time socket cpus             counts %*s events\n", unit_width, "unit");
1027                         break;
1028                 case AGGR_CORE:
1029                         fprintf(output, "#           time core         cpus             counts %*s events\n", unit_width, "unit");
1030                         break;
1031                 case AGGR_NONE:
1032                         fprintf(output, "#           time CPU                counts %*s events\n", unit_width, "unit");
1033                         break;
1034                 case AGGR_THREAD:
1035                         fprintf(output, "#           time             comm-pid                  counts %*s events\n", unit_width, "unit");
1036                         break;
1037                 case AGGR_GLOBAL:
1038                 default:
1039                         fprintf(output, "#           time             counts %*s events\n", unit_width, "unit");
1040                 case AGGR_UNSET:
1041                         break;
1042                 }
1043         }
1044
1045         if (++num_print_interval == 25)
1046                 num_print_interval = 0;
1047 }
1048
1049 static void print_header(int argc, const char **argv)
1050 {
1051         FILE *output = stat_config.output;
1052         int i;
1053
1054         fflush(stdout);
1055
1056         if (!csv_output) {
1057                 fprintf(output, "\n");
1058                 fprintf(output, " Performance counter stats for ");
1059                 if (target.system_wide)
1060                         fprintf(output, "\'system wide");
1061                 else if (target.cpu_list)
1062                         fprintf(output, "\'CPU(s) %s", target.cpu_list);
1063                 else if (!target__has_task(&target)) {
1064                         fprintf(output, "\'%s", argv ? argv[0] : "pipe");
1065                         for (i = 1; argv && (i < argc); i++)
1066                                 fprintf(output, " %s", argv[i]);
1067                 } else if (target.pid)
1068                         fprintf(output, "process id \'%s", target.pid);
1069                 else
1070                         fprintf(output, "thread id \'%s", target.tid);
1071
1072                 fprintf(output, "\'");
1073                 if (run_count > 1)
1074                         fprintf(output, " (%d runs)", run_count);
1075                 fprintf(output, ":\n\n");
1076         }
1077 }
1078
1079 static void print_footer(void)
1080 {
1081         FILE *output = stat_config.output;
1082
1083         if (!null_run)
1084                 fprintf(output, "\n");
1085         fprintf(output, " %17.9f seconds time elapsed",
1086                         avg_stats(&walltime_nsecs_stats)/1e9);
1087         if (run_count > 1) {
1088                 fprintf(output, "                                        ");
1089                 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
1090                                 avg_stats(&walltime_nsecs_stats));
1091         }
1092         fprintf(output, "\n\n");
1093 }
1094
1095 static void print_counters(struct timespec *ts, int argc, const char **argv)
1096 {
1097         int interval = stat_config.interval;
1098         struct perf_evsel *counter;
1099         char buf[64], *prefix = NULL;
1100
1101         /* Do not print anything if we record to the pipe. */
1102         if (STAT_RECORD && perf_stat.file.is_pipe)
1103                 return;
1104
1105         if (interval)
1106                 print_interval(prefix = buf, ts);
1107         else
1108                 print_header(argc, argv);
1109
1110         switch (stat_config.aggr_mode) {
1111         case AGGR_CORE:
1112         case AGGR_SOCKET:
1113                 print_aggr(prefix);
1114                 break;
1115         case AGGR_THREAD:
1116                 evlist__for_each(evsel_list, counter)
1117                         print_aggr_thread(counter, prefix);
1118                 break;
1119         case AGGR_GLOBAL:
1120                 evlist__for_each(evsel_list, counter)
1121                         print_counter_aggr(counter, prefix);
1122                 break;
1123         case AGGR_NONE:
1124                 evlist__for_each(evsel_list, counter)
1125                         print_counter(counter, prefix);
1126                 break;
1127         case AGGR_UNSET:
1128         default:
1129                 break;
1130         }
1131
1132         if (!interval && !csv_output)
1133                 print_footer();
1134
1135         fflush(stat_config.output);
1136 }
1137
1138 static volatile int signr = -1;
1139
1140 static void skip_signal(int signo)
1141 {
1142         if ((child_pid == -1) || stat_config.interval)
1143                 done = 1;
1144
1145         signr = signo;
1146         /*
1147          * render child_pid harmless
1148          * won't send SIGTERM to a random
1149          * process in case of race condition
1150          * and fast PID recycling
1151          */
1152         child_pid = -1;
1153 }
1154
1155 static void sig_atexit(void)
1156 {
1157         sigset_t set, oset;
1158
1159         /*
1160          * avoid race condition with SIGCHLD handler
1161          * in skip_signal() which is modifying child_pid
1162          * goal is to avoid send SIGTERM to a random
1163          * process
1164          */
1165         sigemptyset(&set);
1166         sigaddset(&set, SIGCHLD);
1167         sigprocmask(SIG_BLOCK, &set, &oset);
1168
1169         if (child_pid != -1)
1170                 kill(child_pid, SIGTERM);
1171
1172         sigprocmask(SIG_SETMASK, &oset, NULL);
1173
1174         if (signr == -1)
1175                 return;
1176
1177         signal(signr, SIG_DFL);
1178         kill(getpid(), signr);
1179 }
1180
1181 static int stat__set_big_num(const struct option *opt __maybe_unused,
1182                              const char *s __maybe_unused, int unset)
1183 {
1184         big_num_opt = unset ? 0 : 1;
1185         return 0;
1186 }
1187
1188 static const struct option stat_options[] = {
1189         OPT_BOOLEAN('T', "transaction", &transaction_run,
1190                     "hardware transaction statistics"),
1191         OPT_CALLBACK('e', "event", &evsel_list, "event",
1192                      "event selector. use 'perf list' to list available events",
1193                      parse_events_option),
1194         OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1195                      "event filter", parse_filter),
1196         OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1197                     "child tasks do not inherit counters"),
1198         OPT_STRING('p', "pid", &target.pid, "pid",
1199                    "stat events on existing process id"),
1200         OPT_STRING('t', "tid", &target.tid, "tid",
1201                    "stat events on existing thread id"),
1202         OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1203                     "system-wide collection from all CPUs"),
1204         OPT_BOOLEAN('g', "group", &group,
1205                     "put the counters into a counter group"),
1206         OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
1207         OPT_INCR('v', "verbose", &verbose,
1208                     "be more verbose (show counter open errors, etc)"),
1209         OPT_INTEGER('r', "repeat", &run_count,
1210                     "repeat command and print average + stddev (max: 100, forever: 0)"),
1211         OPT_BOOLEAN('n', "null", &null_run,
1212                     "null run - dont start any counters"),
1213         OPT_INCR('d', "detailed", &detailed_run,
1214                     "detailed run - start a lot of events"),
1215         OPT_BOOLEAN('S', "sync", &sync_run,
1216                     "call sync() before starting a run"),
1217         OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1218                            "print large numbers with thousands\' separators",
1219                            stat__set_big_num),
1220         OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1221                     "list of cpus to monitor in system-wide"),
1222         OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1223                     "disable CPU count aggregation", AGGR_NONE),
1224         OPT_STRING('x', "field-separator", &csv_sep, "separator",
1225                    "print counts with custom separator"),
1226         OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1227                      "monitor event in cgroup name only", parse_cgroups),
1228         OPT_STRING('o', "output", &output_name, "file", "output file name"),
1229         OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1230         OPT_INTEGER(0, "log-fd", &output_fd,
1231                     "log output to fd, instead of stderr"),
1232         OPT_STRING(0, "pre", &pre_cmd, "command",
1233                         "command to run prior to the measured command"),
1234         OPT_STRING(0, "post", &post_cmd, "command",
1235                         "command to run after to the measured command"),
1236         OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1237                     "print counts at regular interval in ms (>= 10)"),
1238         OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1239                      "aggregate counts per processor socket", AGGR_SOCKET),
1240         OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1241                      "aggregate counts per physical processor core", AGGR_CORE),
1242         OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1243                      "aggregate counts per thread", AGGR_THREAD),
1244         OPT_UINTEGER('D', "delay", &initial_delay,
1245                      "ms to wait before starting measurement after program start"),
1246         OPT_END()
1247 };
1248
1249 static int perf_stat__get_socket(struct cpu_map *map, int cpu)
1250 {
1251         return cpu_map__get_socket(map, cpu, NULL);
1252 }
1253
1254 static int perf_stat__get_core(struct cpu_map *map, int cpu)
1255 {
1256         return cpu_map__get_core(map, cpu, NULL);
1257 }
1258
1259 static int cpu_map__get_max(struct cpu_map *map)
1260 {
1261         int i, max = -1;
1262
1263         for (i = 0; i < map->nr; i++) {
1264                 if (map->map[i] > max)
1265                         max = map->map[i];
1266         }
1267
1268         return max;
1269 }
1270
1271 static struct cpu_map *cpus_aggr_map;
1272
1273 static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
1274 {
1275         int cpu;
1276
1277         if (idx >= map->nr)
1278                 return -1;
1279
1280         cpu = map->map[idx];
1281
1282         if (cpus_aggr_map->map[cpu] == -1)
1283                 cpus_aggr_map->map[cpu] = get_id(map, idx);
1284
1285         return cpus_aggr_map->map[cpu];
1286 }
1287
1288 static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
1289 {
1290         return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
1291 }
1292
1293 static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
1294 {
1295         return perf_stat__get_aggr(perf_stat__get_core, map, idx);
1296 }
1297
1298 static int perf_stat_init_aggr_mode(void)
1299 {
1300         int nr;
1301
1302         switch (stat_config.aggr_mode) {
1303         case AGGR_SOCKET:
1304                 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
1305                         perror("cannot build socket map");
1306                         return -1;
1307                 }
1308                 aggr_get_id = perf_stat__get_socket_cached;
1309                 break;
1310         case AGGR_CORE:
1311                 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
1312                         perror("cannot build core map");
1313                         return -1;
1314                 }
1315                 aggr_get_id = perf_stat__get_core_cached;
1316                 break;
1317         case AGGR_NONE:
1318         case AGGR_GLOBAL:
1319         case AGGR_THREAD:
1320         case AGGR_UNSET:
1321         default:
1322                 break;
1323         }
1324
1325         /*
1326          * The evsel_list->cpus is the base we operate on,
1327          * taking the highest cpu number to be the size of
1328          * the aggregation translate cpumap.
1329          */
1330         nr = cpu_map__get_max(evsel_list->cpus);
1331         cpus_aggr_map = cpu_map__empty_new(nr + 1);
1332         return cpus_aggr_map ? 0 : -ENOMEM;
1333 }
1334
1335 static void perf_stat__exit_aggr_mode(void)
1336 {
1337         cpu_map__put(aggr_map);
1338         cpu_map__put(cpus_aggr_map);
1339         aggr_map = NULL;
1340         cpus_aggr_map = NULL;
1341 }
1342
1343 static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
1344 {
1345         int cpu;
1346
1347         if (idx > map->nr)
1348                 return -1;
1349
1350         cpu = map->map[idx];
1351
1352         if (cpu >= env->nr_cpus_online)
1353                 return -1;
1354
1355         return cpu;
1356 }
1357
1358 static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
1359 {
1360         struct perf_env *env = data;
1361         int cpu = perf_env__get_cpu(env, map, idx);
1362
1363         return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
1364 }
1365
1366 static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
1367 {
1368         struct perf_env *env = data;
1369         int core = -1, cpu = perf_env__get_cpu(env, map, idx);
1370
1371         if (cpu != -1) {
1372                 int socket_id = env->cpu[cpu].socket_id;
1373
1374                 /*
1375                  * Encode socket in upper 16 bits
1376                  * core_id is relative to socket, and
1377                  * we need a global id. So we combine
1378                  * socket + core id.
1379                  */
1380                 core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
1381         }
1382
1383         return core;
1384 }
1385
1386 static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
1387                                       struct cpu_map **sockp)
1388 {
1389         return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
1390 }
1391
1392 static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
1393                                     struct cpu_map **corep)
1394 {
1395         return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
1396 }
1397
1398 static int perf_stat__get_socket_file(struct cpu_map *map, int idx)
1399 {
1400         return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
1401 }
1402
1403 static int perf_stat__get_core_file(struct cpu_map *map, int idx)
1404 {
1405         return perf_env__get_core(map, idx, &perf_stat.session->header.env);
1406 }
1407
1408 static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
1409 {
1410         struct perf_env *env = &st->session->header.env;
1411
1412         switch (stat_config.aggr_mode) {
1413         case AGGR_SOCKET:
1414                 if (perf_env__build_socket_map(env, evsel_list->cpus, &aggr_map)) {
1415                         perror("cannot build socket map");
1416                         return -1;
1417                 }
1418                 aggr_get_id = perf_stat__get_socket_file;
1419                 break;
1420         case AGGR_CORE:
1421                 if (perf_env__build_core_map(env, evsel_list->cpus, &aggr_map)) {
1422                         perror("cannot build core map");
1423                         return -1;
1424                 }
1425                 aggr_get_id = perf_stat__get_core_file;
1426                 break;
1427         case AGGR_NONE:
1428         case AGGR_GLOBAL:
1429         case AGGR_THREAD:
1430         case AGGR_UNSET:
1431         default:
1432                 break;
1433         }
1434
1435         return 0;
1436 }
1437
1438 /*
1439  * Add default attributes, if there were no attributes specified or
1440  * if -d/--detailed, -d -d or -d -d -d is used:
1441  */
1442 static int add_default_attributes(void)
1443 {
1444         struct perf_event_attr default_attrs[] = {
1445
1446   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK              },
1447   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES        },
1448   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS          },
1449   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS             },
1450
1451   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES              },
1452   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1453   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND  },
1454   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS            },
1455   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS     },
1456   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES           },
1457
1458 };
1459
1460 /*
1461  * Detailed stats (-d), covering the L1 and last level data caches:
1462  */
1463         struct perf_event_attr detailed_attrs[] = {
1464
1465   { .type = PERF_TYPE_HW_CACHE,
1466     .config =
1467          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1468         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1469         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1470
1471   { .type = PERF_TYPE_HW_CACHE,
1472     .config =
1473          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1474         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1475         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1476
1477   { .type = PERF_TYPE_HW_CACHE,
1478     .config =
1479          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1480         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1481         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1482
1483   { .type = PERF_TYPE_HW_CACHE,
1484     .config =
1485          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1486         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1487         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1488 };
1489
1490 /*
1491  * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1492  */
1493         struct perf_event_attr very_detailed_attrs[] = {
1494
1495   { .type = PERF_TYPE_HW_CACHE,
1496     .config =
1497          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1498         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1499         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1500
1501   { .type = PERF_TYPE_HW_CACHE,
1502     .config =
1503          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1504         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1505         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1506
1507   { .type = PERF_TYPE_HW_CACHE,
1508     .config =
1509          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1510         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1511         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1512
1513   { .type = PERF_TYPE_HW_CACHE,
1514     .config =
1515          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1516         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1517         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1518
1519   { .type = PERF_TYPE_HW_CACHE,
1520     .config =
1521          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1522         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1523         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1524
1525   { .type = PERF_TYPE_HW_CACHE,
1526     .config =
1527          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1528         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1529         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1530
1531 };
1532
1533 /*
1534  * Very, very detailed stats (-d -d -d), adding prefetch events:
1535  */
1536         struct perf_event_attr very_very_detailed_attrs[] = {
1537
1538   { .type = PERF_TYPE_HW_CACHE,
1539     .config =
1540          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1541         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1542         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1543
1544   { .type = PERF_TYPE_HW_CACHE,
1545     .config =
1546          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1547         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1548         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1549 };
1550
1551         /* Set attrs if no event is selected and !null_run: */
1552         if (null_run)
1553                 return 0;
1554
1555         if (transaction_run) {
1556                 int err;
1557                 if (pmu_have_event("cpu", "cycles-ct") &&
1558                     pmu_have_event("cpu", "el-start"))
1559                         err = parse_events(evsel_list, transaction_attrs, NULL);
1560                 else
1561                         err = parse_events(evsel_list, transaction_limited_attrs, NULL);
1562                 if (err) {
1563                         fprintf(stderr, "Cannot set up transaction events\n");
1564                         return -1;
1565                 }
1566                 return 0;
1567         }
1568
1569         if (!evsel_list->nr_entries) {
1570                 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
1571                         return -1;
1572         }
1573
1574         /* Detailed events get appended to the event list: */
1575
1576         if (detailed_run <  1)
1577                 return 0;
1578
1579         /* Append detailed run extra attributes: */
1580         if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1581                 return -1;
1582
1583         if (detailed_run < 2)
1584                 return 0;
1585
1586         /* Append very detailed run extra attributes: */
1587         if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1588                 return -1;
1589
1590         if (detailed_run < 3)
1591                 return 0;
1592
1593         /* Append very, very detailed run extra attributes: */
1594         return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1595 }
1596
1597 static const char * const stat_record_usage[] = {
1598         "perf stat record [<options>]",
1599         NULL,
1600 };
1601
1602 static void init_features(struct perf_session *session)
1603 {
1604         int feat;
1605
1606         for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1607                 perf_header__set_feat(&session->header, feat);
1608
1609         perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1610         perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1611         perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1612         perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1613 }
1614
1615 static int __cmd_record(int argc, const char **argv)
1616 {
1617         struct perf_session *session;
1618         struct perf_data_file *file = &perf_stat.file;
1619
1620         argc = parse_options(argc, argv, stat_options, stat_record_usage,
1621                              PARSE_OPT_STOP_AT_NON_OPTION);
1622
1623         if (output_name)
1624                 file->path = output_name;
1625
1626         if (run_count != 1 || forever) {
1627                 pr_err("Cannot use -r option with perf stat record.\n");
1628                 return -1;
1629         }
1630
1631         session = perf_session__new(file, false, NULL);
1632         if (session == NULL) {
1633                 pr_err("Perf session creation failed.\n");
1634                 return -1;
1635         }
1636
1637         init_features(session);
1638
1639         session->evlist   = evsel_list;
1640         perf_stat.session = session;
1641         perf_stat.record  = true;
1642         return argc;
1643 }
1644
1645 static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
1646                                     union perf_event *event,
1647                                     struct perf_session *session)
1648 {
1649         struct stat_round_event *round = &event->stat_round;
1650         struct perf_evsel *counter;
1651         struct timespec tsh, *ts = NULL;
1652         const char **argv = session->header.env.cmdline_argv;
1653         int argc = session->header.env.nr_cmdline;
1654
1655         evlist__for_each(evsel_list, counter)
1656                 perf_stat_process_counter(&stat_config, counter);
1657
1658         if (round->type == PERF_STAT_ROUND_TYPE__FINAL)
1659                 update_stats(&walltime_nsecs_stats, round->time);
1660
1661         if (stat_config.interval && round->time) {
1662                 tsh.tv_sec  = round->time / NSECS_PER_SEC;
1663                 tsh.tv_nsec = round->time % NSECS_PER_SEC;
1664                 ts = &tsh;
1665         }
1666
1667         print_counters(ts, argc, argv);
1668         return 0;
1669 }
1670
1671 static
1672 int process_stat_config_event(struct perf_tool *tool __maybe_unused,
1673                               union perf_event *event,
1674                               struct perf_session *session __maybe_unused)
1675 {
1676         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1677
1678         perf_event__read_stat_config(&stat_config, &event->stat_config);
1679
1680         if (cpu_map__empty(st->cpus)) {
1681                 if (st->aggr_mode != AGGR_UNSET)
1682                         pr_warning("warning: processing task data, aggregation mode not set\n");
1683                 return 0;
1684         }
1685
1686         if (st->aggr_mode != AGGR_UNSET)
1687                 stat_config.aggr_mode = st->aggr_mode;
1688
1689         if (perf_stat.file.is_pipe)
1690                 perf_stat_init_aggr_mode();
1691         else
1692                 perf_stat_init_aggr_mode_file(st);
1693
1694         return 0;
1695 }
1696
1697 static int set_maps(struct perf_stat *st)
1698 {
1699         if (!st->cpus || !st->threads)
1700                 return 0;
1701
1702         if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
1703                 return -EINVAL;
1704
1705         perf_evlist__set_maps(evsel_list, st->cpus, st->threads);
1706
1707         if (perf_evlist__alloc_stats(evsel_list, true))
1708                 return -ENOMEM;
1709
1710         st->maps_allocated = true;
1711         return 0;
1712 }
1713
1714 static
1715 int process_thread_map_event(struct perf_tool *tool __maybe_unused,
1716                              union perf_event *event,
1717                              struct perf_session *session __maybe_unused)
1718 {
1719         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1720
1721         if (st->threads) {
1722                 pr_warning("Extra thread map event, ignoring.\n");
1723                 return 0;
1724         }
1725
1726         st->threads = thread_map__new_event(&event->thread_map);
1727         if (!st->threads)
1728                 return -ENOMEM;
1729
1730         return set_maps(st);
1731 }
1732
1733 static
1734 int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
1735                           union perf_event *event,
1736                           struct perf_session *session __maybe_unused)
1737 {
1738         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1739         struct cpu_map *cpus;
1740
1741         if (st->cpus) {
1742                 pr_warning("Extra cpu map event, ignoring.\n");
1743                 return 0;
1744         }
1745
1746         cpus = cpu_map__new_data(&event->cpu_map.data);
1747         if (!cpus)
1748                 return -ENOMEM;
1749
1750         st->cpus = cpus;
1751         return set_maps(st);
1752 }
1753
1754 static const char * const stat_report_usage[] = {
1755         "perf stat report [<options>]",
1756         NULL,
1757 };
1758
1759 static struct perf_stat perf_stat = {
1760         .tool = {
1761                 .attr           = perf_event__process_attr,
1762                 .event_update   = perf_event__process_event_update,
1763                 .thread_map     = process_thread_map_event,
1764                 .cpu_map        = process_cpu_map_event,
1765                 .stat_config    = process_stat_config_event,
1766                 .stat           = perf_event__process_stat_event,
1767                 .stat_round     = process_stat_round_event,
1768         },
1769         .aggr_mode = AGGR_UNSET,
1770 };
1771
1772 static int __cmd_report(int argc, const char **argv)
1773 {
1774         struct perf_session *session;
1775         const struct option options[] = {
1776         OPT_STRING('i', "input", &input_name, "file", "input file name"),
1777         OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
1778                      "aggregate counts per processor socket", AGGR_SOCKET),
1779         OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
1780                      "aggregate counts per physical processor core", AGGR_CORE),
1781         OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
1782                      "disable CPU count aggregation", AGGR_NONE),
1783         OPT_END()
1784         };
1785         struct stat st;
1786         int ret;
1787
1788         argc = parse_options(argc, argv, options, stat_report_usage, 0);
1789
1790         if (!input_name || !strlen(input_name)) {
1791                 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1792                         input_name = "-";
1793                 else
1794                         input_name = "perf.data";
1795         }
1796
1797         perf_stat.file.path = input_name;
1798         perf_stat.file.mode = PERF_DATA_MODE_READ;
1799
1800         session = perf_session__new(&perf_stat.file, false, &perf_stat.tool);
1801         if (session == NULL)
1802                 return -1;
1803
1804         perf_stat.session  = session;
1805         stat_config.output = stderr;
1806         evsel_list         = session->evlist;
1807
1808         ret = perf_session__process_events(session);
1809         if (ret)
1810                 return ret;
1811
1812         perf_session__delete(session);
1813         return 0;
1814 }
1815
1816 int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1817 {
1818         const char * const stat_usage[] = {
1819                 "perf stat [<options>] [<command>]",
1820                 NULL
1821         };
1822         int status = -EINVAL, run_idx;
1823         const char *mode;
1824         FILE *output = stderr;
1825         unsigned int interval;
1826         const char * const stat_subcommands[] = { "record", "report" };
1827
1828         setlocale(LC_ALL, "");
1829
1830         evsel_list = perf_evlist__new();
1831         if (evsel_list == NULL)
1832                 return -ENOMEM;
1833
1834         argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
1835                                         (const char **) stat_usage,
1836                                         PARSE_OPT_STOP_AT_NON_OPTION);
1837
1838         if (csv_sep) {
1839                 csv_output = true;
1840                 if (!strcmp(csv_sep, "\\t"))
1841                         csv_sep = "\t";
1842         } else
1843                 csv_sep = DEFAULT_SEPARATOR;
1844
1845         if (argc && !strncmp(argv[0], "rec", 3)) {
1846                 argc = __cmd_record(argc, argv);
1847                 if (argc < 0)
1848                         return -1;
1849         } else if (argc && !strncmp(argv[0], "rep", 3))
1850                 return __cmd_report(argc, argv);
1851
1852         interval = stat_config.interval;
1853
1854         /*
1855          * For record command the -o is already taken care of.
1856          */
1857         if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
1858                 output = NULL;
1859
1860         if (output_name && output_fd) {
1861                 fprintf(stderr, "cannot use both --output and --log-fd\n");
1862                 parse_options_usage(stat_usage, stat_options, "o", 1);
1863                 parse_options_usage(NULL, stat_options, "log-fd", 0);
1864                 goto out;
1865         }
1866
1867         if (output_fd < 0) {
1868                 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1869                 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
1870                 goto out;
1871         }
1872
1873         if (!output) {
1874                 struct timespec tm;
1875                 mode = append_file ? "a" : "w";
1876
1877                 output = fopen(output_name, mode);
1878                 if (!output) {
1879                         perror("failed to create output file");
1880                         return -1;
1881                 }
1882                 clock_gettime(CLOCK_REALTIME, &tm);
1883                 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1884         } else if (output_fd > 0) {
1885                 mode = append_file ? "a" : "w";
1886                 output = fdopen(output_fd, mode);
1887                 if (!output) {
1888                         perror("Failed opening logfd");
1889                         return -errno;
1890                 }
1891         }
1892
1893         stat_config.output = output;
1894
1895         /*
1896          * let the spreadsheet do the pretty-printing
1897          */
1898         if (csv_output) {
1899                 /* User explicitly passed -B? */
1900                 if (big_num_opt == 1) {
1901                         fprintf(stderr, "-B option not supported with -x\n");
1902                         parse_options_usage(stat_usage, stat_options, "B", 1);
1903                         parse_options_usage(NULL, stat_options, "x", 1);
1904                         goto out;
1905                 } else /* Nope, so disable big number formatting */
1906                         big_num = false;
1907         } else if (big_num_opt == 0) /* User passed --no-big-num */
1908                 big_num = false;
1909
1910         if (!argc && target__none(&target))
1911                 usage_with_options(stat_usage, stat_options);
1912
1913         if (run_count < 0) {
1914                 pr_err("Run count must be a positive number\n");
1915                 parse_options_usage(stat_usage, stat_options, "r", 1);
1916                 goto out;
1917         } else if (run_count == 0) {
1918                 forever = true;
1919                 run_count = 1;
1920         }
1921
1922         if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
1923                 fprintf(stderr, "The --per-thread option is only available "
1924                         "when monitoring via -p -t options.\n");
1925                 parse_options_usage(NULL, stat_options, "p", 1);
1926                 parse_options_usage(NULL, stat_options, "t", 1);
1927                 goto out;
1928         }
1929
1930         /*
1931          * no_aggr, cgroup are for system-wide only
1932          * --per-thread is aggregated per thread, we dont mix it with cpu mode
1933          */
1934         if (((stat_config.aggr_mode != AGGR_GLOBAL &&
1935               stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
1936             !target__has_cpu(&target)) {
1937                 fprintf(stderr, "both cgroup and no-aggregation "
1938                         "modes only available in system-wide mode\n");
1939
1940                 parse_options_usage(stat_usage, stat_options, "G", 1);
1941                 parse_options_usage(NULL, stat_options, "A", 1);
1942                 parse_options_usage(NULL, stat_options, "a", 1);
1943                 goto out;
1944         }
1945
1946         if (add_default_attributes())
1947                 goto out;
1948
1949         target__validate(&target);
1950
1951         if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1952                 if (target__has_task(&target)) {
1953                         pr_err("Problems finding threads of monitor\n");
1954                         parse_options_usage(stat_usage, stat_options, "p", 1);
1955                         parse_options_usage(NULL, stat_options, "t", 1);
1956                 } else if (target__has_cpu(&target)) {
1957                         perror("failed to parse CPUs map");
1958                         parse_options_usage(stat_usage, stat_options, "C", 1);
1959                         parse_options_usage(NULL, stat_options, "a", 1);
1960                 }
1961                 goto out;
1962         }
1963
1964         /*
1965          * Initialize thread_map with comm names,
1966          * so we could print it out on output.
1967          */
1968         if (stat_config.aggr_mode == AGGR_THREAD)
1969                 thread_map__read_comms(evsel_list->threads);
1970
1971         if (interval && interval < 100) {
1972                 if (interval < 10) {
1973                         pr_err("print interval must be >= 10ms\n");
1974                         parse_options_usage(stat_usage, stat_options, "I", 1);
1975                         goto out;
1976                 } else
1977                         pr_warning("print interval < 100ms. "
1978                                    "The overhead percentage could be high in some cases. "
1979                                    "Please proceed with caution.\n");
1980         }
1981
1982         if (perf_evlist__alloc_stats(evsel_list, interval))
1983                 goto out;
1984
1985         if (perf_stat_init_aggr_mode())
1986                 goto out;
1987
1988         /*
1989          * We dont want to block the signals - that would cause
1990          * child tasks to inherit that and Ctrl-C would not work.
1991          * What we want is for Ctrl-C to work in the exec()-ed
1992          * task, but being ignored by perf stat itself:
1993          */
1994         atexit(sig_atexit);
1995         if (!forever)
1996                 signal(SIGINT,  skip_signal);
1997         signal(SIGCHLD, skip_signal);
1998         signal(SIGALRM, skip_signal);
1999         signal(SIGABRT, skip_signal);
2000
2001         status = 0;
2002         for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
2003                 if (run_count != 1 && verbose)
2004                         fprintf(output, "[ perf stat: executing run #%d ... ]\n",
2005                                 run_idx + 1);
2006
2007                 status = run_perf_stat(argc, argv);
2008                 if (forever && status != -1) {
2009                         print_counters(NULL, argc, argv);
2010                         perf_stat__reset_stats();
2011                 }
2012         }
2013
2014         if (!forever && status != -1 && !interval)
2015                 print_counters(NULL, argc, argv);
2016
2017         if (STAT_RECORD) {
2018                 /*
2019                  * We synthesize the kernel mmap record just so that older tools
2020                  * don't emit warnings about not being able to resolve symbols
2021                  * due to /proc/sys/kernel/kptr_restrict settings and instear provide
2022                  * a saner message about no samples being in the perf.data file.
2023                  *
2024                  * This also serves to suppress a warning about f_header.data.size == 0
2025                  * in header.c at the moment 'perf stat record' gets introduced, which
2026                  * is not really needed once we start adding the stat specific PERF_RECORD_
2027                  * records, but the need to suppress the kptr_restrict messages in older
2028                  * tools remain  -acme
2029                  */
2030                 int fd = perf_data_file__fd(&perf_stat.file);
2031                 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
2032                                                              process_synthesized_event,
2033                                                              &perf_stat.session->machines.host);
2034                 if (err) {
2035                         pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
2036                                    "older tools may produce warnings about this file\n.");
2037                 }
2038
2039                 if (!interval) {
2040                         if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
2041                                 pr_err("failed to write stat round event\n");
2042                 }
2043
2044                 if (!perf_stat.file.is_pipe) {
2045                         perf_stat.session->header.data_size += perf_stat.bytes_written;
2046                         perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2047                 }
2048
2049                 perf_session__delete(perf_stat.session);
2050         }
2051
2052         perf_stat__exit_aggr_mode();
2053         perf_evlist__free_stats(evsel_list);
2054 out:
2055         perf_evlist__delete(evsel_list);
2056         return status;
2057 }