98ec96b3a744d4cf51693119408fe8ef582cf0ff
[cascardo/linux.git] / tools / perf / util / evlist.c
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include <lk/debugfs.h>
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "debug.h"
18 #include <unistd.h>
19
20 #include "parse-events.h"
21 #include "parse-options.h"
22
23 #include <sys/mman.h>
24
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
27
28 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30
31 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32                        struct thread_map *threads)
33 {
34         int i;
35
36         for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37                 INIT_HLIST_HEAD(&evlist->heads[i]);
38         INIT_LIST_HEAD(&evlist->entries);
39         perf_evlist__set_maps(evlist, cpus, threads);
40         evlist->workload.pid = -1;
41 }
42
43 struct perf_evlist *perf_evlist__new(void)
44 {
45         struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
47         if (evlist != NULL)
48                 perf_evlist__init(evlist, NULL, NULL);
49
50         return evlist;
51 }
52
53 struct perf_evlist *perf_evlist__new_default(void)
54 {
55         struct perf_evlist *evlist = perf_evlist__new();
56
57         if (evlist && perf_evlist__add_default(evlist)) {
58                 perf_evlist__delete(evlist);
59                 evlist = NULL;
60         }
61
62         return evlist;
63 }
64
65 /**
66  * perf_evlist__set_id_pos - set the positions of event ids.
67  * @evlist: selected event list
68  *
69  * Events with compatible sample types all have the same id_pos
70  * and is_pos.  For convenience, put a copy on evlist.
71  */
72 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73 {
74         struct perf_evsel *first = perf_evlist__first(evlist);
75
76         evlist->id_pos = first->id_pos;
77         evlist->is_pos = first->is_pos;
78 }
79
80 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81 {
82         struct perf_evsel *evsel;
83
84         list_for_each_entry(evsel, &evlist->entries, node)
85                 perf_evsel__calc_id_pos(evsel);
86
87         perf_evlist__set_id_pos(evlist);
88 }
89
90 static void perf_evlist__purge(struct perf_evlist *evlist)
91 {
92         struct perf_evsel *pos, *n;
93
94         list_for_each_entry_safe(pos, n, &evlist->entries, node) {
95                 list_del_init(&pos->node);
96                 perf_evsel__delete(pos);
97         }
98
99         evlist->nr_entries = 0;
100 }
101
102 void perf_evlist__exit(struct perf_evlist *evlist)
103 {
104         free(evlist->mmap);
105         free(evlist->pollfd);
106         evlist->mmap = NULL;
107         evlist->pollfd = NULL;
108 }
109
110 void perf_evlist__delete(struct perf_evlist *evlist)
111 {
112         perf_evlist__purge(evlist);
113         perf_evlist__exit(evlist);
114         free(evlist);
115 }
116
117 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
118 {
119         list_add_tail(&entry->node, &evlist->entries);
120         entry->idx = evlist->nr_entries;
121
122         if (!evlist->nr_entries++)
123                 perf_evlist__set_id_pos(evlist);
124 }
125
126 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
127                                    struct list_head *list,
128                                    int nr_entries)
129 {
130         bool set_id_pos = !evlist->nr_entries;
131
132         list_splice_tail(list, &evlist->entries);
133         evlist->nr_entries += nr_entries;
134         if (set_id_pos)
135                 perf_evlist__set_id_pos(evlist);
136 }
137
138 void __perf_evlist__set_leader(struct list_head *list)
139 {
140         struct perf_evsel *evsel, *leader;
141
142         leader = list_entry(list->next, struct perf_evsel, node);
143         evsel = list_entry(list->prev, struct perf_evsel, node);
144
145         leader->nr_members = evsel->idx - leader->idx + 1;
146
147         list_for_each_entry(evsel, list, node) {
148                 evsel->leader = leader;
149         }
150 }
151
152 void perf_evlist__set_leader(struct perf_evlist *evlist)
153 {
154         if (evlist->nr_entries) {
155                 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
156                 __perf_evlist__set_leader(&evlist->entries);
157         }
158 }
159
160 int perf_evlist__add_default(struct perf_evlist *evlist)
161 {
162         struct perf_event_attr attr = {
163                 .type = PERF_TYPE_HARDWARE,
164                 .config = PERF_COUNT_HW_CPU_CYCLES,
165         };
166         struct perf_evsel *evsel;
167
168         event_attr_init(&attr);
169
170         evsel = perf_evsel__new(&attr);
171         if (evsel == NULL)
172                 goto error;
173
174         /* use strdup() because free(evsel) assumes name is allocated */
175         evsel->name = strdup("cycles");
176         if (!evsel->name)
177                 goto error_free;
178
179         perf_evlist__add(evlist, evsel);
180         return 0;
181 error_free:
182         perf_evsel__delete(evsel);
183 error:
184         return -ENOMEM;
185 }
186
187 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
188                                   struct perf_event_attr *attrs, size_t nr_attrs)
189 {
190         struct perf_evsel *evsel, *n;
191         LIST_HEAD(head);
192         size_t i;
193
194         for (i = 0; i < nr_attrs; i++) {
195                 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
196                 if (evsel == NULL)
197                         goto out_delete_partial_list;
198                 list_add_tail(&evsel->node, &head);
199         }
200
201         perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
202
203         return 0;
204
205 out_delete_partial_list:
206         list_for_each_entry_safe(evsel, n, &head, node)
207                 perf_evsel__delete(evsel);
208         return -1;
209 }
210
211 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
212                                      struct perf_event_attr *attrs, size_t nr_attrs)
213 {
214         size_t i;
215
216         for (i = 0; i < nr_attrs; i++)
217                 event_attr_init(attrs + i);
218
219         return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
220 }
221
222 struct perf_evsel *
223 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
224 {
225         struct perf_evsel *evsel;
226
227         list_for_each_entry(evsel, &evlist->entries, node) {
228                 if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
229                     (int)evsel->attr.config == id)
230                         return evsel;
231         }
232
233         return NULL;
234 }
235
236 struct perf_evsel *
237 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
238                                      const char *name)
239 {
240         struct perf_evsel *evsel;
241
242         list_for_each_entry(evsel, &evlist->entries, node) {
243                 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
244                     (strcmp(evsel->name, name) == 0))
245                         return evsel;
246         }
247
248         return NULL;
249 }
250
251 int perf_evlist__add_newtp(struct perf_evlist *evlist,
252                            const char *sys, const char *name, void *handler)
253 {
254         struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
255
256         if (evsel == NULL)
257                 return -1;
258
259         evsel->handler = handler;
260         perf_evlist__add(evlist, evsel);
261         return 0;
262 }
263
264 void perf_evlist__disable(struct perf_evlist *evlist)
265 {
266         int cpu, thread;
267         struct perf_evsel *pos;
268         int nr_cpus = cpu_map__nr(evlist->cpus);
269         int nr_threads = thread_map__nr(evlist->threads);
270
271         for (cpu = 0; cpu < nr_cpus; cpu++) {
272                 list_for_each_entry(pos, &evlist->entries, node) {
273                         if (!perf_evsel__is_group_leader(pos) || !pos->fd)
274                                 continue;
275                         for (thread = 0; thread < nr_threads; thread++)
276                                 ioctl(FD(pos, cpu, thread),
277                                       PERF_EVENT_IOC_DISABLE, 0);
278                 }
279         }
280 }
281
282 void perf_evlist__enable(struct perf_evlist *evlist)
283 {
284         int cpu, thread;
285         struct perf_evsel *pos;
286         int nr_cpus = cpu_map__nr(evlist->cpus);
287         int nr_threads = thread_map__nr(evlist->threads);
288
289         for (cpu = 0; cpu < nr_cpus; cpu++) {
290                 list_for_each_entry(pos, &evlist->entries, node) {
291                         if (!perf_evsel__is_group_leader(pos) || !pos->fd)
292                                 continue;
293                         for (thread = 0; thread < nr_threads; thread++)
294                                 ioctl(FD(pos, cpu, thread),
295                                       PERF_EVENT_IOC_ENABLE, 0);
296                 }
297         }
298 }
299
300 int perf_evlist__disable_event(struct perf_evlist *evlist,
301                                struct perf_evsel *evsel)
302 {
303         int cpu, thread, err;
304
305         if (!evsel->fd)
306                 return 0;
307
308         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
309                 for (thread = 0; thread < evlist->threads->nr; thread++) {
310                         err = ioctl(FD(evsel, cpu, thread),
311                                     PERF_EVENT_IOC_DISABLE, 0);
312                         if (err)
313                                 return err;
314                 }
315         }
316         return 0;
317 }
318
319 int perf_evlist__enable_event(struct perf_evlist *evlist,
320                               struct perf_evsel *evsel)
321 {
322         int cpu, thread, err;
323
324         if (!evsel->fd)
325                 return -EINVAL;
326
327         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
328                 for (thread = 0; thread < evlist->threads->nr; thread++) {
329                         err = ioctl(FD(evsel, cpu, thread),
330                                     PERF_EVENT_IOC_ENABLE, 0);
331                         if (err)
332                                 return err;
333                 }
334         }
335         return 0;
336 }
337
338 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
339 {
340         int nr_cpus = cpu_map__nr(evlist->cpus);
341         int nr_threads = thread_map__nr(evlist->threads);
342         int nfds = nr_cpus * nr_threads * evlist->nr_entries;
343         evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
344         return evlist->pollfd != NULL ? 0 : -ENOMEM;
345 }
346
347 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
348 {
349         fcntl(fd, F_SETFL, O_NONBLOCK);
350         evlist->pollfd[evlist->nr_fds].fd = fd;
351         evlist->pollfd[evlist->nr_fds].events = POLLIN;
352         evlist->nr_fds++;
353 }
354
355 static void perf_evlist__id_hash(struct perf_evlist *evlist,
356                                  struct perf_evsel *evsel,
357                                  int cpu, int thread, u64 id)
358 {
359         int hash;
360         struct perf_sample_id *sid = SID(evsel, cpu, thread);
361
362         sid->id = id;
363         sid->evsel = evsel;
364         hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
365         hlist_add_head(&sid->node, &evlist->heads[hash]);
366 }
367
368 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
369                          int cpu, int thread, u64 id)
370 {
371         perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
372         evsel->id[evsel->ids++] = id;
373 }
374
375 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
376                                   struct perf_evsel *evsel,
377                                   int cpu, int thread, int fd)
378 {
379         u64 read_data[4] = { 0, };
380         int id_idx = 1; /* The first entry is the counter value */
381         u64 id;
382         int ret;
383
384         ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
385         if (!ret)
386                 goto add;
387
388         if (errno != ENOTTY)
389                 return -1;
390
391         /* Legacy way to get event id.. All hail to old kernels! */
392
393         /*
394          * This way does not work with group format read, so bail
395          * out in that case.
396          */
397         if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
398                 return -1;
399
400         if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
401             read(fd, &read_data, sizeof(read_data)) == -1)
402                 return -1;
403
404         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
405                 ++id_idx;
406         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
407                 ++id_idx;
408
409         id = read_data[id_idx];
410
411  add:
412         perf_evlist__id_add(evlist, evsel, cpu, thread, id);
413         return 0;
414 }
415
416 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
417 {
418         struct hlist_head *head;
419         struct perf_sample_id *sid;
420         int hash;
421
422         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
423         head = &evlist->heads[hash];
424
425         hlist_for_each_entry(sid, head, node)
426                 if (sid->id == id)
427                         return sid;
428
429         return NULL;
430 }
431
432 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
433 {
434         struct perf_sample_id *sid;
435
436         if (evlist->nr_entries == 1)
437                 return perf_evlist__first(evlist);
438
439         sid = perf_evlist__id2sid(evlist, id);
440         if (sid)
441                 return sid->evsel;
442
443         if (!perf_evlist__sample_id_all(evlist))
444                 return perf_evlist__first(evlist);
445
446         return NULL;
447 }
448
449 static int perf_evlist__event2id(struct perf_evlist *evlist,
450                                  union perf_event *event, u64 *id)
451 {
452         const u64 *array = event->sample.array;
453         ssize_t n;
454
455         n = (event->header.size - sizeof(event->header)) >> 3;
456
457         if (event->header.type == PERF_RECORD_SAMPLE) {
458                 if (evlist->id_pos >= n)
459                         return -1;
460                 *id = array[evlist->id_pos];
461         } else {
462                 if (evlist->is_pos > n)
463                         return -1;
464                 n -= evlist->is_pos;
465                 *id = array[n];
466         }
467         return 0;
468 }
469
470 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
471                                                    union perf_event *event)
472 {
473         struct perf_evsel *first = perf_evlist__first(evlist);
474         struct hlist_head *head;
475         struct perf_sample_id *sid;
476         int hash;
477         u64 id;
478
479         if (evlist->nr_entries == 1)
480                 return first;
481
482         if (!first->attr.sample_id_all &&
483             event->header.type != PERF_RECORD_SAMPLE)
484                 return first;
485
486         if (perf_evlist__event2id(evlist, event, &id))
487                 return NULL;
488
489         /* Synthesized events have an id of zero */
490         if (!id)
491                 return first;
492
493         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
494         head = &evlist->heads[hash];
495
496         hlist_for_each_entry(sid, head, node) {
497                 if (sid->id == id)
498                         return sid->evsel;
499         }
500         return NULL;
501 }
502
503 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
504 {
505         struct perf_mmap *md = &evlist->mmap[idx];
506         unsigned int head = perf_mmap__read_head(md);
507         unsigned int old = md->prev;
508         unsigned char *data = md->base + page_size;
509         union perf_event *event = NULL;
510
511         if (evlist->overwrite) {
512                 /*
513                  * If we're further behind than half the buffer, there's a chance
514                  * the writer will bite our tail and mess up the samples under us.
515                  *
516                  * If we somehow ended up ahead of the head, we got messed up.
517                  *
518                  * In either case, truncate and restart at head.
519                  */
520                 int diff = head - old;
521                 if (diff > md->mask / 2 || diff < 0) {
522                         fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
523
524                         /*
525                          * head points to a known good entry, start there.
526                          */
527                         old = head;
528                 }
529         }
530
531         if (old != head) {
532                 size_t size;
533
534                 event = (union perf_event *)&data[old & md->mask];
535                 size = event->header.size;
536
537                 /*
538                  * Event straddles the mmap boundary -- header should always
539                  * be inside due to u64 alignment of output.
540                  */
541                 if ((old & md->mask) + size != ((old + size) & md->mask)) {
542                         unsigned int offset = old;
543                         unsigned int len = min(sizeof(*event), size), cpy;
544                         void *dst = md->event_copy;
545
546                         do {
547                                 cpy = min(md->mask + 1 - (offset & md->mask), len);
548                                 memcpy(dst, &data[offset & md->mask], cpy);
549                                 offset += cpy;
550                                 dst += cpy;
551                                 len -= cpy;
552                         } while (len);
553
554                         event = (union perf_event *) md->event_copy;
555                 }
556
557                 old += size;
558         }
559
560         md->prev = old;
561
562         return event;
563 }
564
565 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
566 {
567         if (!evlist->overwrite) {
568                 struct perf_mmap *md = &evlist->mmap[idx];
569                 unsigned int old = md->prev;
570
571                 perf_mmap__write_tail(md, old);
572         }
573 }
574
575 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
576 {
577         if (evlist->mmap[idx].base != NULL) {
578                 munmap(evlist->mmap[idx].base, evlist->mmap_len);
579                 evlist->mmap[idx].base = NULL;
580         }
581 }
582
583 void perf_evlist__munmap(struct perf_evlist *evlist)
584 {
585         int i;
586
587         for (i = 0; i < evlist->nr_mmaps; i++)
588                 __perf_evlist__munmap(evlist, i);
589
590         free(evlist->mmap);
591         evlist->mmap = NULL;
592 }
593
594 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
595 {
596         evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
597         if (cpu_map__empty(evlist->cpus))
598                 evlist->nr_mmaps = thread_map__nr(evlist->threads);
599         evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
600         return evlist->mmap != NULL ? 0 : -ENOMEM;
601 }
602
603 static int __perf_evlist__mmap(struct perf_evlist *evlist,
604                                int idx, int prot, int mask, int fd)
605 {
606         evlist->mmap[idx].prev = 0;
607         evlist->mmap[idx].mask = mask;
608         evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
609                                       MAP_SHARED, fd, 0);
610         if (evlist->mmap[idx].base == MAP_FAILED) {
611                 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
612                           errno);
613                 evlist->mmap[idx].base = NULL;
614                 return -1;
615         }
616
617         perf_evlist__add_pollfd(evlist, fd);
618         return 0;
619 }
620
621 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
622                                        int prot, int mask, int cpu, int thread,
623                                        int *output)
624 {
625         struct perf_evsel *evsel;
626
627         list_for_each_entry(evsel, &evlist->entries, node) {
628                 int fd = FD(evsel, cpu, thread);
629
630                 if (*output == -1) {
631                         *output = fd;
632                         if (__perf_evlist__mmap(evlist, idx, prot, mask,
633                                                 *output) < 0)
634                                 return -1;
635                 } else {
636                         if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
637                                 return -1;
638                 }
639
640                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
641                     perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
642                         return -1;
643         }
644
645         return 0;
646 }
647
648 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
649                                      int mask)
650 {
651         int cpu, thread;
652         int nr_cpus = cpu_map__nr(evlist->cpus);
653         int nr_threads = thread_map__nr(evlist->threads);
654
655         pr_debug2("perf event ring buffer mmapped per cpu\n");
656         for (cpu = 0; cpu < nr_cpus; cpu++) {
657                 int output = -1;
658
659                 for (thread = 0; thread < nr_threads; thread++) {
660                         if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
661                                                         cpu, thread, &output))
662                                 goto out_unmap;
663                 }
664         }
665
666         return 0;
667
668 out_unmap:
669         for (cpu = 0; cpu < nr_cpus; cpu++)
670                 __perf_evlist__munmap(evlist, cpu);
671         return -1;
672 }
673
674 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
675                                         int mask)
676 {
677         int thread;
678         int nr_threads = thread_map__nr(evlist->threads);
679
680         pr_debug2("perf event ring buffer mmapped per thread\n");
681         for (thread = 0; thread < nr_threads; thread++) {
682                 int output = -1;
683
684                 if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
685                                                 thread, &output))
686                         goto out_unmap;
687         }
688
689         return 0;
690
691 out_unmap:
692         for (thread = 0; thread < nr_threads; thread++)
693                 __perf_evlist__munmap(evlist, thread);
694         return -1;
695 }
696
697 static size_t perf_evlist__mmap_size(unsigned long pages)
698 {
699         /* 512 kiB: default amount of unprivileged mlocked memory */
700         if (pages == UINT_MAX)
701                 pages = (512 * 1024) / page_size;
702         else if (!is_power_of_2(pages))
703                 return 0;
704
705         return (pages + 1) * page_size;
706 }
707
708 static long parse_pages_arg(const char *str, unsigned long min,
709                             unsigned long max)
710 {
711         unsigned long pages, val;
712         static struct parse_tag tags[] = {
713                 { .tag  = 'B', .mult = 1       },
714                 { .tag  = 'K', .mult = 1 << 10 },
715                 { .tag  = 'M', .mult = 1 << 20 },
716                 { .tag  = 'G', .mult = 1 << 30 },
717                 { .tag  = 0 },
718         };
719
720         if (str == NULL)
721                 return -EINVAL;
722
723         val = parse_tag_value(str, tags);
724         if (val != (unsigned long) -1) {
725                 /* we got file size value */
726                 pages = PERF_ALIGN(val, page_size) / page_size;
727         } else {
728                 /* we got pages count value */
729                 char *eptr;
730                 pages = strtoul(str, &eptr, 10);
731                 if (*eptr != '\0')
732                         return -EINVAL;
733         }
734
735         if (pages == 0 && min == 0) {
736                 /* leave number of pages at 0 */
737         } else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
738                 /* round pages up to next power of 2 */
739                 pages = next_pow2(pages);
740                 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
741                         pages * page_size, pages);
742         }
743
744         if (pages > max)
745                 return -EINVAL;
746
747         return pages;
748 }
749
750 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
751                                   int unset __maybe_unused)
752 {
753         unsigned int *mmap_pages = opt->value;
754         unsigned long max = UINT_MAX;
755         long pages;
756
757         if (max > SIZE_MAX / page_size)
758                 max = SIZE_MAX / page_size;
759
760         pages = parse_pages_arg(str, 1, max);
761         if (pages < 0) {
762                 pr_err("Invalid argument for --mmap_pages/-m\n");
763                 return -1;
764         }
765
766         *mmap_pages = pages;
767         return 0;
768 }
769
770 /**
771  * perf_evlist__mmap - Create mmaps to receive events.
772  * @evlist: list of events
773  * @pages: map length in pages
774  * @overwrite: overwrite older events?
775  *
776  * If @overwrite is %false the user needs to signal event consumption using
777  * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
778  * automatically.
779  *
780  * Return: %0 on success, negative error code otherwise.
781  */
782 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
783                       bool overwrite)
784 {
785         struct perf_evsel *evsel;
786         const struct cpu_map *cpus = evlist->cpus;
787         const struct thread_map *threads = evlist->threads;
788         int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
789
790         if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
791                 return -ENOMEM;
792
793         if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
794                 return -ENOMEM;
795
796         evlist->overwrite = overwrite;
797         evlist->mmap_len = perf_evlist__mmap_size(pages);
798         pr_debug("mmap size %zuB\n", evlist->mmap_len);
799         mask = evlist->mmap_len - page_size - 1;
800
801         list_for_each_entry(evsel, &evlist->entries, node) {
802                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
803                     evsel->sample_id == NULL &&
804                     perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
805                         return -ENOMEM;
806         }
807
808         if (cpu_map__empty(cpus))
809                 return perf_evlist__mmap_per_thread(evlist, prot, mask);
810
811         return perf_evlist__mmap_per_cpu(evlist, prot, mask);
812 }
813
814 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
815 {
816         evlist->threads = thread_map__new_str(target->pid, target->tid,
817                                               target->uid);
818
819         if (evlist->threads == NULL)
820                 return -1;
821
822         if (target__uses_dummy_map(target))
823                 evlist->cpus = cpu_map__dummy_new();
824         else
825                 evlist->cpus = cpu_map__new(target->cpu_list);
826
827         if (evlist->cpus == NULL)
828                 goto out_delete_threads;
829
830         return 0;
831
832 out_delete_threads:
833         thread_map__delete(evlist->threads);
834         return -1;
835 }
836
837 void perf_evlist__delete_maps(struct perf_evlist *evlist)
838 {
839         cpu_map__delete(evlist->cpus);
840         thread_map__delete(evlist->threads);
841         evlist->cpus    = NULL;
842         evlist->threads = NULL;
843 }
844
845 int perf_evlist__apply_filters(struct perf_evlist *evlist)
846 {
847         struct perf_evsel *evsel;
848         int err = 0;
849         const int ncpus = cpu_map__nr(evlist->cpus),
850                   nthreads = thread_map__nr(evlist->threads);
851
852         list_for_each_entry(evsel, &evlist->entries, node) {
853                 if (evsel->filter == NULL)
854                         continue;
855
856                 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
857                 if (err)
858                         break;
859         }
860
861         return err;
862 }
863
864 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
865 {
866         struct perf_evsel *evsel;
867         int err = 0;
868         const int ncpus = cpu_map__nr(evlist->cpus),
869                   nthreads = thread_map__nr(evlist->threads);
870
871         list_for_each_entry(evsel, &evlist->entries, node) {
872                 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
873                 if (err)
874                         break;
875         }
876
877         return err;
878 }
879
880 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
881 {
882         struct perf_evsel *pos;
883
884         if (evlist->nr_entries == 1)
885                 return true;
886
887         if (evlist->id_pos < 0 || evlist->is_pos < 0)
888                 return false;
889
890         list_for_each_entry(pos, &evlist->entries, node) {
891                 if (pos->id_pos != evlist->id_pos ||
892                     pos->is_pos != evlist->is_pos)
893                         return false;
894         }
895
896         return true;
897 }
898
899 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
900 {
901         struct perf_evsel *evsel;
902
903         if (evlist->combined_sample_type)
904                 return evlist->combined_sample_type;
905
906         list_for_each_entry(evsel, &evlist->entries, node)
907                 evlist->combined_sample_type |= evsel->attr.sample_type;
908
909         return evlist->combined_sample_type;
910 }
911
912 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
913 {
914         evlist->combined_sample_type = 0;
915         return __perf_evlist__combined_sample_type(evlist);
916 }
917
918 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
919 {
920         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
921         u64 read_format = first->attr.read_format;
922         u64 sample_type = first->attr.sample_type;
923
924         list_for_each_entry_continue(pos, &evlist->entries, node) {
925                 if (read_format != pos->attr.read_format)
926                         return false;
927         }
928
929         /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
930         if ((sample_type & PERF_SAMPLE_READ) &&
931             !(read_format & PERF_FORMAT_ID)) {
932                 return false;
933         }
934
935         return true;
936 }
937
938 u64 perf_evlist__read_format(struct perf_evlist *evlist)
939 {
940         struct perf_evsel *first = perf_evlist__first(evlist);
941         return first->attr.read_format;
942 }
943
944 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
945 {
946         struct perf_evsel *first = perf_evlist__first(evlist);
947         struct perf_sample *data;
948         u64 sample_type;
949         u16 size = 0;
950
951         if (!first->attr.sample_id_all)
952                 goto out;
953
954         sample_type = first->attr.sample_type;
955
956         if (sample_type & PERF_SAMPLE_TID)
957                 size += sizeof(data->tid) * 2;
958
959        if (sample_type & PERF_SAMPLE_TIME)
960                 size += sizeof(data->time);
961
962         if (sample_type & PERF_SAMPLE_ID)
963                 size += sizeof(data->id);
964
965         if (sample_type & PERF_SAMPLE_STREAM_ID)
966                 size += sizeof(data->stream_id);
967
968         if (sample_type & PERF_SAMPLE_CPU)
969                 size += sizeof(data->cpu) * 2;
970
971         if (sample_type & PERF_SAMPLE_IDENTIFIER)
972                 size += sizeof(data->id);
973 out:
974         return size;
975 }
976
977 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
978 {
979         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
980
981         list_for_each_entry_continue(pos, &evlist->entries, node) {
982                 if (first->attr.sample_id_all != pos->attr.sample_id_all)
983                         return false;
984         }
985
986         return true;
987 }
988
989 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
990 {
991         struct perf_evsel *first = perf_evlist__first(evlist);
992         return first->attr.sample_id_all;
993 }
994
995 void perf_evlist__set_selected(struct perf_evlist *evlist,
996                                struct perf_evsel *evsel)
997 {
998         evlist->selected = evsel;
999 }
1000
1001 void perf_evlist__close(struct perf_evlist *evlist)
1002 {
1003         struct perf_evsel *evsel;
1004         int ncpus = cpu_map__nr(evlist->cpus);
1005         int nthreads = thread_map__nr(evlist->threads);
1006
1007         list_for_each_entry_reverse(evsel, &evlist->entries, node)
1008                 perf_evsel__close(evsel, ncpus, nthreads);
1009 }
1010
1011 int perf_evlist__open(struct perf_evlist *evlist)
1012 {
1013         struct perf_evsel *evsel;
1014         int err;
1015
1016         perf_evlist__update_id_pos(evlist);
1017
1018         list_for_each_entry(evsel, &evlist->entries, node) {
1019                 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
1020                 if (err < 0)
1021                         goto out_err;
1022         }
1023
1024         return 0;
1025 out_err:
1026         perf_evlist__close(evlist);
1027         errno = -err;
1028         return err;
1029 }
1030
1031 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1032                                   const char *argv[], bool pipe_output,
1033                                   bool want_signal)
1034 {
1035         int child_ready_pipe[2], go_pipe[2];
1036         char bf;
1037
1038         if (pipe(child_ready_pipe) < 0) {
1039                 perror("failed to create 'ready' pipe");
1040                 return -1;
1041         }
1042
1043         if (pipe(go_pipe) < 0) {
1044                 perror("failed to create 'go' pipe");
1045                 goto out_close_ready_pipe;
1046         }
1047
1048         evlist->workload.pid = fork();
1049         if (evlist->workload.pid < 0) {
1050                 perror("failed to fork");
1051                 goto out_close_pipes;
1052         }
1053
1054         if (!evlist->workload.pid) {
1055                 if (pipe_output)
1056                         dup2(2, 1);
1057
1058                 signal(SIGTERM, SIG_DFL);
1059
1060                 close(child_ready_pipe[0]);
1061                 close(go_pipe[1]);
1062                 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1063
1064                 /*
1065                  * Tell the parent we're ready to go
1066                  */
1067                 close(child_ready_pipe[1]);
1068
1069                 /*
1070                  * Wait until the parent tells us to go.
1071                  */
1072                 if (read(go_pipe[0], &bf, 1) == -1)
1073                         perror("unable to read pipe");
1074
1075                 execvp(argv[0], (char **)argv);
1076
1077                 perror(argv[0]);
1078                 if (want_signal)
1079                         kill(getppid(), SIGUSR1);
1080                 exit(-1);
1081         }
1082
1083         if (target__none(target))
1084                 evlist->threads->map[0] = evlist->workload.pid;
1085
1086         close(child_ready_pipe[1]);
1087         close(go_pipe[0]);
1088         /*
1089          * wait for child to settle
1090          */
1091         if (read(child_ready_pipe[0], &bf, 1) == -1) {
1092                 perror("unable to read pipe");
1093                 goto out_close_pipes;
1094         }
1095
1096         fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1097         evlist->workload.cork_fd = go_pipe[1];
1098         close(child_ready_pipe[0]);
1099         return 0;
1100
1101 out_close_pipes:
1102         close(go_pipe[0]);
1103         close(go_pipe[1]);
1104 out_close_ready_pipe:
1105         close(child_ready_pipe[0]);
1106         close(child_ready_pipe[1]);
1107         return -1;
1108 }
1109
1110 int perf_evlist__start_workload(struct perf_evlist *evlist)
1111 {
1112         if (evlist->workload.cork_fd > 0) {
1113                 char bf = 0;
1114                 int ret;
1115                 /*
1116                  * Remove the cork, let it rip!
1117                  */
1118                 ret = write(evlist->workload.cork_fd, &bf, 1);
1119                 if (ret < 0)
1120                         perror("enable to write to pipe");
1121
1122                 close(evlist->workload.cork_fd);
1123                 return ret;
1124         }
1125
1126         return 0;
1127 }
1128
1129 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1130                               struct perf_sample *sample)
1131 {
1132         struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1133
1134         if (!evsel)
1135                 return -EFAULT;
1136         return perf_evsel__parse_sample(evsel, event, sample);
1137 }
1138
1139 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1140 {
1141         struct perf_evsel *evsel;
1142         size_t printed = 0;
1143
1144         list_for_each_entry(evsel, &evlist->entries, node) {
1145                 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1146                                    perf_evsel__name(evsel));
1147         }
1148
1149         return printed + fprintf(fp, "\n");
1150 }
1151
1152 int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1153                              int err, char *buf, size_t size)
1154 {
1155         char sbuf[128];
1156
1157         switch (err) {
1158         case ENOENT:
1159                 scnprintf(buf, size, "%s",
1160                           "Error:\tUnable to find debugfs\n"
1161                           "Hint:\tWas your kernel was compiled with debugfs support?\n"
1162                           "Hint:\tIs the debugfs filesystem mounted?\n"
1163                           "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1164                 break;
1165         case EACCES:
1166                 scnprintf(buf, size,
1167                           "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1168                           "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1169                           debugfs_mountpoint, debugfs_mountpoint);
1170                 break;
1171         default:
1172                 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1173                 break;
1174         }
1175
1176         return 0;
1177 }
1178
1179 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1180                                int err, char *buf, size_t size)
1181 {
1182         int printed, value;
1183         char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1184
1185         switch (err) {
1186         case EACCES:
1187         case EPERM:
1188                 printed = scnprintf(buf, size,
1189                                     "Error:\t%s.\n"
1190                                     "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1191
1192                 if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
1193                         break;
1194
1195                 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1196
1197                 if (value >= 2) {
1198                         printed += scnprintf(buf + printed, size - printed,
1199                                              "For your workloads it needs to be <= 1\nHint:\t");
1200                 }
1201                 printed += scnprintf(buf + printed, size - printed,
1202                                      "For system wide tracing it needs to be set to -1");
1203
1204                 printed += scnprintf(buf + printed, size - printed,
1205                                     ".\nHint:\tThe current value is %d.", value);
1206                 break;
1207         default:
1208                 scnprintf(buf, size, "%s", emsg);
1209                 break;
1210         }
1211
1212         return 0;
1213 }