perf evlist: Introduce set_filter() method
[cascardo/linux.git] / tools / perf / util / evlist.c
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18
19 #include "parse-events.h"
20
21 #include <sys/mman.h>
22
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30                        struct thread_map *threads)
31 {
32         int i;
33
34         for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35                 INIT_HLIST_HEAD(&evlist->heads[i]);
36         INIT_LIST_HEAD(&evlist->entries);
37         perf_evlist__set_maps(evlist, cpus, threads);
38         evlist->workload.pid = -1;
39 }
40
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42                                      struct thread_map *threads)
43 {
44         struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
46         if (evlist != NULL)
47                 perf_evlist__init(evlist, cpus, threads);
48
49         return evlist;
50 }
51
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53                                struct perf_record_opts *opts)
54 {
55         struct perf_evsel *evsel, *first;
56
57         if (evlist->cpus->map[0] < 0)
58                 opts->no_inherit = true;
59
60         first = perf_evlist__first(evlist);
61
62         list_for_each_entry(evsel, &evlist->entries, node) {
63                 perf_evsel__config(evsel, opts, first);
64
65                 if (evlist->nr_entries > 1)
66                         evsel->attr.sample_type |= PERF_SAMPLE_ID;
67         }
68 }
69
70 static void perf_evlist__purge(struct perf_evlist *evlist)
71 {
72         struct perf_evsel *pos, *n;
73
74         list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75                 list_del_init(&pos->node);
76                 perf_evsel__delete(pos);
77         }
78
79         evlist->nr_entries = 0;
80 }
81
82 void perf_evlist__exit(struct perf_evlist *evlist)
83 {
84         free(evlist->mmap);
85         free(evlist->pollfd);
86         evlist->mmap = NULL;
87         evlist->pollfd = NULL;
88 }
89
90 void perf_evlist__delete(struct perf_evlist *evlist)
91 {
92         perf_evlist__purge(evlist);
93         perf_evlist__exit(evlist);
94         free(evlist);
95 }
96
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98 {
99         list_add_tail(&entry->node, &evlist->entries);
100         ++evlist->nr_entries;
101 }
102
103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104                                    struct list_head *list,
105                                    int nr_entries)
106 {
107         list_splice_tail(list, &evlist->entries);
108         evlist->nr_entries += nr_entries;
109 }
110
111 void __perf_evlist__set_leader(struct list_head *list)
112 {
113         struct perf_evsel *evsel, *leader;
114
115         leader = list_entry(list->next, struct perf_evsel, node);
116         leader->leader = NULL;
117
118         list_for_each_entry(evsel, list, node) {
119                 if (evsel != leader)
120                         evsel->leader = leader;
121         }
122 }
123
124 void perf_evlist__set_leader(struct perf_evlist *evlist)
125 {
126         if (evlist->nr_entries)
127                 __perf_evlist__set_leader(&evlist->entries);
128 }
129
130 int perf_evlist__add_default(struct perf_evlist *evlist)
131 {
132         struct perf_event_attr attr = {
133                 .type = PERF_TYPE_HARDWARE,
134                 .config = PERF_COUNT_HW_CPU_CYCLES,
135         };
136         struct perf_evsel *evsel;
137
138         event_attr_init(&attr);
139
140         evsel = perf_evsel__new(&attr, 0);
141         if (evsel == NULL)
142                 goto error;
143
144         /* use strdup() because free(evsel) assumes name is allocated */
145         evsel->name = strdup("cycles");
146         if (!evsel->name)
147                 goto error_free;
148
149         perf_evlist__add(evlist, evsel);
150         return 0;
151 error_free:
152         perf_evsel__delete(evsel);
153 error:
154         return -ENOMEM;
155 }
156
157 int perf_evlist__add_attrs(struct perf_evlist *evlist,
158                            struct perf_event_attr *attrs, size_t nr_attrs)
159 {
160         struct perf_evsel *evsel, *n;
161         LIST_HEAD(head);
162         size_t i;
163
164         for (i = 0; i < nr_attrs; i++) {
165                 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
166                 if (evsel == NULL)
167                         goto out_delete_partial_list;
168                 list_add_tail(&evsel->node, &head);
169         }
170
171         perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
172
173         return 0;
174
175 out_delete_partial_list:
176         list_for_each_entry_safe(evsel, n, &head, node)
177                 perf_evsel__delete(evsel);
178         return -1;
179 }
180
181 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
182                                      struct perf_event_attr *attrs, size_t nr_attrs)
183 {
184         size_t i;
185
186         for (i = 0; i < nr_attrs; i++)
187                 event_attr_init(attrs + i);
188
189         return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
190 }
191
192 static int trace_event__id(const char *evname)
193 {
194         char *filename, *colon;
195         int err = -1, fd;
196
197         if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
198                 return -1;
199
200         colon = strrchr(filename, ':');
201         if (colon != NULL)
202                 *colon = '/';
203
204         fd = open(filename, O_RDONLY);
205         if (fd >= 0) {
206                 char id[16];
207                 if (read(fd, id, sizeof(id)) > 0)
208                         err = atoi(id);
209                 close(fd);
210         }
211
212         free(filename);
213         return err;
214 }
215
216 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
217                                  const char *tracepoints[],
218                                  size_t nr_tracepoints)
219 {
220         int err;
221         size_t i;
222         struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
223
224         if (attrs == NULL)
225                 return -1;
226
227         for (i = 0; i < nr_tracepoints; i++) {
228                 err = trace_event__id(tracepoints[i]);
229
230                 if (err < 0)
231                         goto out_free_attrs;
232
233                 attrs[i].type          = PERF_TYPE_TRACEPOINT;
234                 attrs[i].config        = err;
235                 attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
236                                           PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD);
237                 attrs[i].sample_period = 1;
238         }
239
240         err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
241 out_free_attrs:
242         free(attrs);
243         return err;
244 }
245
246 struct perf_evsel *
247 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
248 {
249         struct perf_evsel *evsel;
250
251         list_for_each_entry(evsel, &evlist->entries, node) {
252                 if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
253                     (int)evsel->attr.config == id)
254                         return evsel;
255         }
256
257         return NULL;
258 }
259
260 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
261                                           const struct perf_evsel_str_handler *assocs,
262                                           size_t nr_assocs)
263 {
264         struct perf_evsel *evsel;
265         int err;
266         size_t i;
267
268         for (i = 0; i < nr_assocs; i++) {
269                 err = trace_event__id(assocs[i].name);
270                 if (err < 0)
271                         goto out;
272
273                 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
274                 if (evsel == NULL)
275                         continue;
276
277                 err = -EEXIST;
278                 if (evsel->handler.func != NULL)
279                         goto out;
280                 evsel->handler.func = assocs[i].handler;
281         }
282
283         err = 0;
284 out:
285         return err;
286 }
287
288 void perf_evlist__disable(struct perf_evlist *evlist)
289 {
290         int cpu, thread;
291         struct perf_evsel *pos;
292
293         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
294                 list_for_each_entry(pos, &evlist->entries, node) {
295                         for (thread = 0; thread < evlist->threads->nr; thread++)
296                                 ioctl(FD(pos, cpu, thread),
297                                       PERF_EVENT_IOC_DISABLE, 0);
298                 }
299         }
300 }
301
302 void perf_evlist__enable(struct perf_evlist *evlist)
303 {
304         int cpu, thread;
305         struct perf_evsel *pos;
306
307         for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
308                 list_for_each_entry(pos, &evlist->entries, node) {
309                         for (thread = 0; thread < evlist->threads->nr; thread++)
310                                 ioctl(FD(pos, cpu, thread),
311                                       PERF_EVENT_IOC_ENABLE, 0);
312                 }
313         }
314 }
315
316 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
317 {
318         int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
319         evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320         return evlist->pollfd != NULL ? 0 : -ENOMEM;
321 }
322
323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
324 {
325         fcntl(fd, F_SETFL, O_NONBLOCK);
326         evlist->pollfd[evlist->nr_fds].fd = fd;
327         evlist->pollfd[evlist->nr_fds].events = POLLIN;
328         evlist->nr_fds++;
329 }
330
331 static void perf_evlist__id_hash(struct perf_evlist *evlist,
332                                  struct perf_evsel *evsel,
333                                  int cpu, int thread, u64 id)
334 {
335         int hash;
336         struct perf_sample_id *sid = SID(evsel, cpu, thread);
337
338         sid->id = id;
339         sid->evsel = evsel;
340         hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341         hlist_add_head(&sid->node, &evlist->heads[hash]);
342 }
343
344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345                          int cpu, int thread, u64 id)
346 {
347         perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348         evsel->id[evsel->ids++] = id;
349 }
350
351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352                                   struct perf_evsel *evsel,
353                                   int cpu, int thread, int fd)
354 {
355         u64 read_data[4] = { 0, };
356         int id_idx = 1; /* The first entry is the counter value */
357
358         if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
359             read(fd, &read_data, sizeof(read_data)) == -1)
360                 return -1;
361
362         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
363                 ++id_idx;
364         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
365                 ++id_idx;
366
367         perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
368         return 0;
369 }
370
371 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
372 {
373         struct hlist_head *head;
374         struct hlist_node *pos;
375         struct perf_sample_id *sid;
376         int hash;
377
378         if (evlist->nr_entries == 1)
379                 return perf_evlist__first(evlist);
380
381         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
382         head = &evlist->heads[hash];
383
384         hlist_for_each_entry(sid, pos, head, node)
385                 if (sid->id == id)
386                         return sid->evsel;
387
388         if (!perf_evlist__sample_id_all(evlist))
389                 return perf_evlist__first(evlist);
390
391         return NULL;
392 }
393
394 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
395 {
396         /* XXX Move this to perf.c, making it generally available */
397         unsigned int page_size = sysconf(_SC_PAGE_SIZE);
398         struct perf_mmap *md = &evlist->mmap[idx];
399         unsigned int head = perf_mmap__read_head(md);
400         unsigned int old = md->prev;
401         unsigned char *data = md->base + page_size;
402         union perf_event *event = NULL;
403
404         if (evlist->overwrite) {
405                 /*
406                  * If we're further behind than half the buffer, there's a chance
407                  * the writer will bite our tail and mess up the samples under us.
408                  *
409                  * If we somehow ended up ahead of the head, we got messed up.
410                  *
411                  * In either case, truncate and restart at head.
412                  */
413                 int diff = head - old;
414                 if (diff > md->mask / 2 || diff < 0) {
415                         fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
416
417                         /*
418                          * head points to a known good entry, start there.
419                          */
420                         old = head;
421                 }
422         }
423
424         if (old != head) {
425                 size_t size;
426
427                 event = (union perf_event *)&data[old & md->mask];
428                 size = event->header.size;
429
430                 /*
431                  * Event straddles the mmap boundary -- header should always
432                  * be inside due to u64 alignment of output.
433                  */
434                 if ((old & md->mask) + size != ((old + size) & md->mask)) {
435                         unsigned int offset = old;
436                         unsigned int len = min(sizeof(*event), size), cpy;
437                         void *dst = &evlist->event_copy;
438
439                         do {
440                                 cpy = min(md->mask + 1 - (offset & md->mask), len);
441                                 memcpy(dst, &data[offset & md->mask], cpy);
442                                 offset += cpy;
443                                 dst += cpy;
444                                 len -= cpy;
445                         } while (len);
446
447                         event = &evlist->event_copy;
448                 }
449
450                 old += size;
451         }
452
453         md->prev = old;
454
455         if (!evlist->overwrite)
456                 perf_mmap__write_tail(md, old);
457
458         return event;
459 }
460
461 void perf_evlist__munmap(struct perf_evlist *evlist)
462 {
463         int i;
464
465         for (i = 0; i < evlist->nr_mmaps; i++) {
466                 if (evlist->mmap[i].base != NULL) {
467                         munmap(evlist->mmap[i].base, evlist->mmap_len);
468                         evlist->mmap[i].base = NULL;
469                 }
470         }
471
472         free(evlist->mmap);
473         evlist->mmap = NULL;
474 }
475
476 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
477 {
478         evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
479         if (cpu_map__all(evlist->cpus))
480                 evlist->nr_mmaps = evlist->threads->nr;
481         evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
482         return evlist->mmap != NULL ? 0 : -ENOMEM;
483 }
484
485 static int __perf_evlist__mmap(struct perf_evlist *evlist,
486                                int idx, int prot, int mask, int fd)
487 {
488         evlist->mmap[idx].prev = 0;
489         evlist->mmap[idx].mask = mask;
490         evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
491                                       MAP_SHARED, fd, 0);
492         if (evlist->mmap[idx].base == MAP_FAILED) {
493                 evlist->mmap[idx].base = NULL;
494                 return -1;
495         }
496
497         perf_evlist__add_pollfd(evlist, fd);
498         return 0;
499 }
500
501 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
502 {
503         struct perf_evsel *evsel;
504         int cpu, thread;
505
506         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
507                 int output = -1;
508
509                 for (thread = 0; thread < evlist->threads->nr; thread++) {
510                         list_for_each_entry(evsel, &evlist->entries, node) {
511                                 int fd = FD(evsel, cpu, thread);
512
513                                 if (output == -1) {
514                                         output = fd;
515                                         if (__perf_evlist__mmap(evlist, cpu,
516                                                                 prot, mask, output) < 0)
517                                                 goto out_unmap;
518                                 } else {
519                                         if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
520                                                 goto out_unmap;
521                                 }
522
523                                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
524                                     perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
525                                         goto out_unmap;
526                         }
527                 }
528         }
529
530         return 0;
531
532 out_unmap:
533         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
534                 if (evlist->mmap[cpu].base != NULL) {
535                         munmap(evlist->mmap[cpu].base, evlist->mmap_len);
536                         evlist->mmap[cpu].base = NULL;
537                 }
538         }
539         return -1;
540 }
541
542 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
543 {
544         struct perf_evsel *evsel;
545         int thread;
546
547         for (thread = 0; thread < evlist->threads->nr; thread++) {
548                 int output = -1;
549
550                 list_for_each_entry(evsel, &evlist->entries, node) {
551                         int fd = FD(evsel, 0, thread);
552
553                         if (output == -1) {
554                                 output = fd;
555                                 if (__perf_evlist__mmap(evlist, thread,
556                                                         prot, mask, output) < 0)
557                                         goto out_unmap;
558                         } else {
559                                 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
560                                         goto out_unmap;
561                         }
562
563                         if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
564                             perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
565                                 goto out_unmap;
566                 }
567         }
568
569         return 0;
570
571 out_unmap:
572         for (thread = 0; thread < evlist->threads->nr; thread++) {
573                 if (evlist->mmap[thread].base != NULL) {
574                         munmap(evlist->mmap[thread].base, evlist->mmap_len);
575                         evlist->mmap[thread].base = NULL;
576                 }
577         }
578         return -1;
579 }
580
581 /** perf_evlist__mmap - Create per cpu maps to receive events
582  *
583  * @evlist - list of events
584  * @pages - map length in pages
585  * @overwrite - overwrite older events?
586  *
587  * If overwrite is false the user needs to signal event consuption using:
588  *
589  *      struct perf_mmap *m = &evlist->mmap[cpu];
590  *      unsigned int head = perf_mmap__read_head(m);
591  *
592  *      perf_mmap__write_tail(m, head)
593  *
594  * Using perf_evlist__read_on_cpu does this automatically.
595  */
596 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
597                       bool overwrite)
598 {
599         unsigned int page_size = sysconf(_SC_PAGE_SIZE);
600         struct perf_evsel *evsel;
601         const struct cpu_map *cpus = evlist->cpus;
602         const struct thread_map *threads = evlist->threads;
603         int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
604
605         /* 512 kiB: default amount of unprivileged mlocked memory */
606         if (pages == UINT_MAX)
607                 pages = (512 * 1024) / page_size;
608         else if (!is_power_of_2(pages))
609                 return -EINVAL;
610
611         mask = pages * page_size - 1;
612
613         if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
614                 return -ENOMEM;
615
616         if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
617                 return -ENOMEM;
618
619         evlist->overwrite = overwrite;
620         evlist->mmap_len = (pages + 1) * page_size;
621
622         list_for_each_entry(evsel, &evlist->entries, node) {
623                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
624                     evsel->sample_id == NULL &&
625                     perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
626                         return -ENOMEM;
627         }
628
629         if (cpu_map__all(cpus))
630                 return perf_evlist__mmap_per_thread(evlist, prot, mask);
631
632         return perf_evlist__mmap_per_cpu(evlist, prot, mask);
633 }
634
635 int perf_evlist__create_maps(struct perf_evlist *evlist,
636                              struct perf_target *target)
637 {
638         evlist->threads = thread_map__new_str(target->pid, target->tid,
639                                               target->uid);
640
641         if (evlist->threads == NULL)
642                 return -1;
643
644         if (perf_target__has_task(target))
645                 evlist->cpus = cpu_map__dummy_new();
646         else if (!perf_target__has_cpu(target) && !target->uses_mmap)
647                 evlist->cpus = cpu_map__dummy_new();
648         else
649                 evlist->cpus = cpu_map__new(target->cpu_list);
650
651         if (evlist->cpus == NULL)
652                 goto out_delete_threads;
653
654         return 0;
655
656 out_delete_threads:
657         thread_map__delete(evlist->threads);
658         return -1;
659 }
660
661 void perf_evlist__delete_maps(struct perf_evlist *evlist)
662 {
663         cpu_map__delete(evlist->cpus);
664         thread_map__delete(evlist->threads);
665         evlist->cpus    = NULL;
666         evlist->threads = NULL;
667 }
668
669 int perf_evlist__apply_filters(struct perf_evlist *evlist)
670 {
671         struct perf_evsel *evsel;
672         int err = 0;
673         const int ncpus = cpu_map__nr(evlist->cpus),
674                   nthreads = evlist->threads->nr;
675
676         list_for_each_entry(evsel, &evlist->entries, node) {
677                 if (evsel->filter == NULL)
678                         continue;
679
680                 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
681                 if (err)
682                         break;
683         }
684
685         return err;
686 }
687
688 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
689 {
690         struct perf_evsel *evsel;
691         int err = 0;
692         const int ncpus = cpu_map__nr(evlist->cpus),
693                   nthreads = evlist->threads->nr;
694
695         list_for_each_entry(evsel, &evlist->entries, node) {
696                 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
697                 if (err)
698                         break;
699         }
700
701         return err;
702 }
703
704 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
705 {
706         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
707
708         list_for_each_entry_continue(pos, &evlist->entries, node) {
709                 if (first->attr.sample_type != pos->attr.sample_type)
710                         return false;
711         }
712
713         return true;
714 }
715
716 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
717 {
718         struct perf_evsel *first = perf_evlist__first(evlist);
719         return first->attr.sample_type;
720 }
721
722 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
723 {
724         struct perf_evsel *first = perf_evlist__first(evlist);
725         struct perf_sample *data;
726         u64 sample_type;
727         u16 size = 0;
728
729         if (!first->attr.sample_id_all)
730                 goto out;
731
732         sample_type = first->attr.sample_type;
733
734         if (sample_type & PERF_SAMPLE_TID)
735                 size += sizeof(data->tid) * 2;
736
737        if (sample_type & PERF_SAMPLE_TIME)
738                 size += sizeof(data->time);
739
740         if (sample_type & PERF_SAMPLE_ID)
741                 size += sizeof(data->id);
742
743         if (sample_type & PERF_SAMPLE_STREAM_ID)
744                 size += sizeof(data->stream_id);
745
746         if (sample_type & PERF_SAMPLE_CPU)
747                 size += sizeof(data->cpu) * 2;
748 out:
749         return size;
750 }
751
752 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
753 {
754         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
755
756         list_for_each_entry_continue(pos, &evlist->entries, node) {
757                 if (first->attr.sample_id_all != pos->attr.sample_id_all)
758                         return false;
759         }
760
761         return true;
762 }
763
764 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
765 {
766         struct perf_evsel *first = perf_evlist__first(evlist);
767         return first->attr.sample_id_all;
768 }
769
770 void perf_evlist__set_selected(struct perf_evlist *evlist,
771                                struct perf_evsel *evsel)
772 {
773         evlist->selected = evsel;
774 }
775
776 int perf_evlist__open(struct perf_evlist *evlist)
777 {
778         struct perf_evsel *evsel;
779         int err, ncpus, nthreads;
780
781         list_for_each_entry(evsel, &evlist->entries, node) {
782                 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
783                 if (err < 0)
784                         goto out_err;
785         }
786
787         return 0;
788 out_err:
789         ncpus = evlist->cpus ? evlist->cpus->nr : 1;
790         nthreads = evlist->threads ? evlist->threads->nr : 1;
791
792         list_for_each_entry_reverse(evsel, &evlist->entries, node)
793                 perf_evsel__close(evsel, ncpus, nthreads);
794
795         errno = -err;
796         return err;
797 }
798
799 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
800                                   struct perf_record_opts *opts,
801                                   const char *argv[])
802 {
803         int child_ready_pipe[2], go_pipe[2];
804         char bf;
805
806         if (pipe(child_ready_pipe) < 0) {
807                 perror("failed to create 'ready' pipe");
808                 return -1;
809         }
810
811         if (pipe(go_pipe) < 0) {
812                 perror("failed to create 'go' pipe");
813                 goto out_close_ready_pipe;
814         }
815
816         evlist->workload.pid = fork();
817         if (evlist->workload.pid < 0) {
818                 perror("failed to fork");
819                 goto out_close_pipes;
820         }
821
822         if (!evlist->workload.pid) {
823                 if (opts->pipe_output)
824                         dup2(2, 1);
825
826                 close(child_ready_pipe[0]);
827                 close(go_pipe[1]);
828                 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
829
830                 /*
831                  * Do a dummy execvp to get the PLT entry resolved,
832                  * so we avoid the resolver overhead on the real
833                  * execvp call.
834                  */
835                 execvp("", (char **)argv);
836
837                 /*
838                  * Tell the parent we're ready to go
839                  */
840                 close(child_ready_pipe[1]);
841
842                 /*
843                  * Wait until the parent tells us to go.
844                  */
845                 if (read(go_pipe[0], &bf, 1) == -1)
846                         perror("unable to read pipe");
847
848                 execvp(argv[0], (char **)argv);
849
850                 perror(argv[0]);
851                 kill(getppid(), SIGUSR1);
852                 exit(-1);
853         }
854
855         if (perf_target__none(&opts->target))
856                 evlist->threads->map[0] = evlist->workload.pid;
857
858         close(child_ready_pipe[1]);
859         close(go_pipe[0]);
860         /*
861          * wait for child to settle
862          */
863         if (read(child_ready_pipe[0], &bf, 1) == -1) {
864                 perror("unable to read pipe");
865                 goto out_close_pipes;
866         }
867
868         evlist->workload.cork_fd = go_pipe[1];
869         close(child_ready_pipe[0]);
870         return 0;
871
872 out_close_pipes:
873         close(go_pipe[0]);
874         close(go_pipe[1]);
875 out_close_ready_pipe:
876         close(child_ready_pipe[0]);
877         close(child_ready_pipe[1]);
878         return -1;
879 }
880
881 int perf_evlist__start_workload(struct perf_evlist *evlist)
882 {
883         if (evlist->workload.cork_fd > 0) {
884                 /*
885                  * Remove the cork, let it rip!
886                  */
887                 return close(evlist->workload.cork_fd);
888         }
889
890         return 0;
891 }
892
893 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
894                               struct perf_sample *sample)
895 {
896         struct perf_evsel *evsel = perf_evlist__first(evlist);
897         return perf_evsel__parse_sample(evsel, event, sample);
898 }
899
900 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
901 {
902         struct perf_evsel *evsel;
903         size_t printed = 0;
904
905         list_for_each_entry(evsel, &evlist->entries, node) {
906                 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
907                                    perf_evsel__name(evsel));
908         }
909
910         return printed + fprintf(fp, "\n");;
911 }