2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/fs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
21 #include "parse-options.h"
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
27 #include <linux/log2.h>
28 #include <linux/err.h>
30 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
31 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
33 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
34 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
36 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
37 struct thread_map *threads)
41 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
42 INIT_HLIST_HEAD(&evlist->heads[i]);
43 INIT_LIST_HEAD(&evlist->entries);
44 perf_evlist__set_maps(evlist, cpus, threads);
45 fdarray__init(&evlist->pollfd, 64);
46 evlist->workload.pid = -1;
49 struct perf_evlist *perf_evlist__new(void)
51 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
54 perf_evlist__init(evlist, NULL, NULL);
59 struct perf_evlist *perf_evlist__new_default(void)
61 struct perf_evlist *evlist = perf_evlist__new();
63 if (evlist && perf_evlist__add_default(evlist)) {
64 perf_evlist__delete(evlist);
72 * perf_evlist__set_id_pos - set the positions of event ids.
73 * @evlist: selected event list
75 * Events with compatible sample types all have the same id_pos
76 * and is_pos. For convenience, put a copy on evlist.
78 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
80 struct perf_evsel *first = perf_evlist__first(evlist);
82 evlist->id_pos = first->id_pos;
83 evlist->is_pos = first->is_pos;
86 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
88 struct perf_evsel *evsel;
90 evlist__for_each(evlist, evsel)
91 perf_evsel__calc_id_pos(evsel);
93 perf_evlist__set_id_pos(evlist);
96 static void perf_evlist__purge(struct perf_evlist *evlist)
98 struct perf_evsel *pos, *n;
100 evlist__for_each_safe(evlist, n, pos) {
101 list_del_init(&pos->node);
103 perf_evsel__delete(pos);
106 evlist->nr_entries = 0;
109 void perf_evlist__exit(struct perf_evlist *evlist)
111 zfree(&evlist->mmap);
112 fdarray__exit(&evlist->pollfd);
115 void perf_evlist__delete(struct perf_evlist *evlist)
117 perf_evlist__munmap(evlist);
118 perf_evlist__close(evlist);
119 cpu_map__put(evlist->cpus);
120 thread_map__put(evlist->threads);
122 evlist->threads = NULL;
123 perf_evlist__purge(evlist);
124 perf_evlist__exit(evlist);
128 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
129 struct perf_evsel *evsel)
132 * We already have cpus for evsel (via PMU sysfs) so
133 * keep it, if there's no target cpu list defined.
135 if (!evsel->own_cpus || evlist->has_user_cpus) {
136 cpu_map__put(evsel->cpus);
137 evsel->cpus = cpu_map__get(evlist->cpus);
138 } else if (evsel->cpus != evsel->own_cpus) {
139 cpu_map__put(evsel->cpus);
140 evsel->cpus = cpu_map__get(evsel->own_cpus);
143 thread_map__put(evsel->threads);
144 evsel->threads = thread_map__get(evlist->threads);
147 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
149 struct perf_evsel *evsel;
151 evlist__for_each(evlist, evsel)
152 __perf_evlist__propagate_maps(evlist, evsel);
155 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
157 entry->evlist = evlist;
158 list_add_tail(&entry->node, &evlist->entries);
159 entry->idx = evlist->nr_entries;
160 entry->tracking = !entry->idx;
162 if (!evlist->nr_entries++)
163 perf_evlist__set_id_pos(evlist);
165 __perf_evlist__propagate_maps(evlist, entry);
168 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
170 evsel->evlist = NULL;
171 list_del_init(&evsel->node);
172 evlist->nr_entries -= 1;
175 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
176 struct list_head *list)
178 struct perf_evsel *evsel, *temp;
180 __evlist__for_each_safe(list, temp, evsel) {
181 list_del_init(&evsel->node);
182 perf_evlist__add(evlist, evsel);
186 void __perf_evlist__set_leader(struct list_head *list)
188 struct perf_evsel *evsel, *leader;
190 leader = list_entry(list->next, struct perf_evsel, node);
191 evsel = list_entry(list->prev, struct perf_evsel, node);
193 leader->nr_members = evsel->idx - leader->idx + 1;
195 __evlist__for_each(list, evsel) {
196 evsel->leader = leader;
200 void perf_evlist__set_leader(struct perf_evlist *evlist)
202 if (evlist->nr_entries) {
203 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
204 __perf_evlist__set_leader(&evlist->entries);
208 int perf_evlist__add_default(struct perf_evlist *evlist)
210 struct perf_event_attr attr = {
211 .type = PERF_TYPE_HARDWARE,
212 .config = PERF_COUNT_HW_CPU_CYCLES,
214 struct perf_evsel *evsel;
216 event_attr_init(&attr);
218 evsel = perf_evsel__new(&attr);
222 /* use strdup() because free(evsel) assumes name is allocated */
223 evsel->name = strdup("cycles");
227 perf_evlist__add(evlist, evsel);
230 perf_evsel__delete(evsel);
235 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
236 struct perf_event_attr *attrs, size_t nr_attrs)
238 struct perf_evsel *evsel, *n;
242 for (i = 0; i < nr_attrs; i++) {
243 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
245 goto out_delete_partial_list;
246 list_add_tail(&evsel->node, &head);
249 perf_evlist__splice_list_tail(evlist, &head);
253 out_delete_partial_list:
254 __evlist__for_each_safe(&head, n, evsel)
255 perf_evsel__delete(evsel);
259 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
260 struct perf_event_attr *attrs, size_t nr_attrs)
264 for (i = 0; i < nr_attrs; i++)
265 event_attr_init(attrs + i);
267 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
271 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
273 struct perf_evsel *evsel;
275 evlist__for_each(evlist, evsel) {
276 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
277 (int)evsel->attr.config == id)
285 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
288 struct perf_evsel *evsel;
290 evlist__for_each(evlist, evsel) {
291 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
292 (strcmp(evsel->name, name) == 0))
299 int perf_evlist__add_newtp(struct perf_evlist *evlist,
300 const char *sys, const char *name, void *handler)
302 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
307 evsel->handler = handler;
308 perf_evlist__add(evlist, evsel);
312 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
313 struct perf_evsel *evsel)
315 if (evsel->system_wide)
318 return thread_map__nr(evlist->threads);
321 void perf_evlist__disable(struct perf_evlist *evlist)
324 struct perf_evsel *pos;
325 int nr_cpus = cpu_map__nr(evlist->cpus);
328 for (cpu = 0; cpu < nr_cpus; cpu++) {
329 evlist__for_each(evlist, pos) {
330 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
332 nr_threads = perf_evlist__nr_threads(evlist, pos);
333 for (thread = 0; thread < nr_threads; thread++)
334 ioctl(FD(pos, cpu, thread),
335 PERF_EVENT_IOC_DISABLE, 0);
339 evlist->enabled = false;
342 void perf_evlist__enable(struct perf_evlist *evlist)
345 struct perf_evsel *pos;
346 int nr_cpus = cpu_map__nr(evlist->cpus);
349 for (cpu = 0; cpu < nr_cpus; cpu++) {
350 evlist__for_each(evlist, pos) {
351 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
353 nr_threads = perf_evlist__nr_threads(evlist, pos);
354 for (thread = 0; thread < nr_threads; thread++)
355 ioctl(FD(pos, cpu, thread),
356 PERF_EVENT_IOC_ENABLE, 0);
360 evlist->enabled = true;
363 void perf_evlist__toggle_enable(struct perf_evlist *evlist)
365 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
368 int perf_evlist__disable_event(struct perf_evlist *evlist,
369 struct perf_evsel *evsel)
371 int cpu, thread, err;
372 int nr_cpus = cpu_map__nr(evlist->cpus);
373 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
378 for (cpu = 0; cpu < nr_cpus; cpu++) {
379 for (thread = 0; thread < nr_threads; thread++) {
380 err = ioctl(FD(evsel, cpu, thread),
381 PERF_EVENT_IOC_DISABLE, 0);
389 int perf_evlist__enable_event(struct perf_evlist *evlist,
390 struct perf_evsel *evsel)
392 int cpu, thread, err;
393 int nr_cpus = cpu_map__nr(evlist->cpus);
394 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
399 for (cpu = 0; cpu < nr_cpus; cpu++) {
400 for (thread = 0; thread < nr_threads; thread++) {
401 err = ioctl(FD(evsel, cpu, thread),
402 PERF_EVENT_IOC_ENABLE, 0);
410 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
411 struct perf_evsel *evsel, int cpu)
414 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
419 for (thread = 0; thread < nr_threads; thread++) {
420 err = ioctl(FD(evsel, cpu, thread),
421 PERF_EVENT_IOC_ENABLE, 0);
428 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
429 struct perf_evsel *evsel,
433 int nr_cpus = cpu_map__nr(evlist->cpus);
438 for (cpu = 0; cpu < nr_cpus; cpu++) {
439 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
446 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
447 struct perf_evsel *evsel, int idx)
449 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
452 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
454 return perf_evlist__enable_event_thread(evlist, evsel, idx);
457 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
459 int nr_cpus = cpu_map__nr(evlist->cpus);
460 int nr_threads = thread_map__nr(evlist->threads);
462 struct perf_evsel *evsel;
464 evlist__for_each(evlist, evsel) {
465 if (evsel->system_wide)
468 nfds += nr_cpus * nr_threads;
471 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
472 fdarray__grow(&evlist->pollfd, nfds) < 0)
478 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
480 int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
482 * Save the idx so that when we filter out fds POLLHUP'ed we can
483 * close the associated evlist->mmap[] entry.
486 evlist->pollfd.priv[pos].idx = idx;
488 fcntl(fd, F_SETFL, O_NONBLOCK);
494 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
496 return __perf_evlist__add_pollfd(evlist, fd, -1);
499 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
501 struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
503 perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
506 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
508 return fdarray__filter(&evlist->pollfd, revents_and_mask,
509 perf_evlist__munmap_filtered);
512 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
514 return fdarray__poll(&evlist->pollfd, timeout);
517 static void perf_evlist__id_hash(struct perf_evlist *evlist,
518 struct perf_evsel *evsel,
519 int cpu, int thread, u64 id)
522 struct perf_sample_id *sid = SID(evsel, cpu, thread);
526 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
527 hlist_add_head(&sid->node, &evlist->heads[hash]);
530 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
531 int cpu, int thread, u64 id)
533 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
534 evsel->id[evsel->ids++] = id;
537 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
538 struct perf_evsel *evsel,
539 int cpu, int thread, int fd)
541 u64 read_data[4] = { 0, };
542 int id_idx = 1; /* The first entry is the counter value */
546 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
553 /* Legacy way to get event id.. All hail to old kernels! */
556 * This way does not work with group format read, so bail
559 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
562 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
563 read(fd, &read_data, sizeof(read_data)) == -1)
566 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
568 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
571 id = read_data[id_idx];
574 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
578 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
579 struct perf_evsel *evsel, int idx, int cpu,
582 struct perf_sample_id *sid = SID(evsel, cpu, thread);
584 if (evlist->cpus && cpu >= 0)
585 sid->cpu = evlist->cpus->map[cpu];
588 if (!evsel->system_wide && evlist->threads && thread >= 0)
589 sid->tid = thread_map__pid(evlist->threads, thread);
594 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
596 struct hlist_head *head;
597 struct perf_sample_id *sid;
600 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
601 head = &evlist->heads[hash];
603 hlist_for_each_entry(sid, head, node)
610 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
612 struct perf_sample_id *sid;
614 if (evlist->nr_entries == 1 || !id)
615 return perf_evlist__first(evlist);
617 sid = perf_evlist__id2sid(evlist, id);
621 if (!perf_evlist__sample_id_all(evlist))
622 return perf_evlist__first(evlist);
627 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
630 struct perf_sample_id *sid;
635 sid = perf_evlist__id2sid(evlist, id);
642 static int perf_evlist__event2id(struct perf_evlist *evlist,
643 union perf_event *event, u64 *id)
645 const u64 *array = event->sample.array;
648 n = (event->header.size - sizeof(event->header)) >> 3;
650 if (event->header.type == PERF_RECORD_SAMPLE) {
651 if (evlist->id_pos >= n)
653 *id = array[evlist->id_pos];
655 if (evlist->is_pos > n)
663 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
664 union perf_event *event)
666 struct perf_evsel *first = perf_evlist__first(evlist);
667 struct hlist_head *head;
668 struct perf_sample_id *sid;
672 if (evlist->nr_entries == 1)
675 if (!first->attr.sample_id_all &&
676 event->header.type != PERF_RECORD_SAMPLE)
679 if (perf_evlist__event2id(evlist, event, &id))
682 /* Synthesized events have an id of zero */
686 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
687 head = &evlist->heads[hash];
689 hlist_for_each_entry(sid, head, node) {
696 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
698 struct perf_mmap *md = &evlist->mmap[idx];
701 unsigned char *data = md->base + page_size;
702 union perf_event *event = NULL;
705 * Check if event was unmapped due to a POLLHUP/POLLERR.
707 if (!atomic_read(&md->refcnt))
710 head = perf_mmap__read_head(md);
711 if (evlist->overwrite) {
713 * If we're further behind than half the buffer, there's a chance
714 * the writer will bite our tail and mess up the samples under us.
716 * If we somehow ended up ahead of the head, we got messed up.
718 * In either case, truncate and restart at head.
720 int diff = head - old;
721 if (diff > md->mask / 2 || diff < 0) {
722 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
725 * head points to a known good entry, start there.
734 event = (union perf_event *)&data[old & md->mask];
735 size = event->header.size;
738 * Event straddles the mmap boundary -- header should always
739 * be inside due to u64 alignment of output.
741 if ((old & md->mask) + size != ((old + size) & md->mask)) {
742 unsigned int offset = old;
743 unsigned int len = min(sizeof(*event), size), cpy;
744 void *dst = md->event_copy;
747 cpy = min(md->mask + 1 - (offset & md->mask), len);
748 memcpy(dst, &data[offset & md->mask], cpy);
754 event = (union perf_event *) md->event_copy;
765 static bool perf_mmap__empty(struct perf_mmap *md)
767 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
770 static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
772 atomic_inc(&evlist->mmap[idx].refcnt);
775 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
777 BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
779 if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
780 __perf_evlist__munmap(evlist, idx);
783 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
785 struct perf_mmap *md = &evlist->mmap[idx];
787 if (!evlist->overwrite) {
790 perf_mmap__write_tail(md, old);
793 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
794 perf_evlist__mmap_put(evlist, idx);
797 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
798 struct auxtrace_mmap_params *mp __maybe_unused,
799 void *userpg __maybe_unused,
800 int fd __maybe_unused)
805 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
809 void __weak auxtrace_mmap_params__init(
810 struct auxtrace_mmap_params *mp __maybe_unused,
811 off_t auxtrace_offset __maybe_unused,
812 unsigned int auxtrace_pages __maybe_unused,
813 bool auxtrace_overwrite __maybe_unused)
817 void __weak auxtrace_mmap_params__set_idx(
818 struct auxtrace_mmap_params *mp __maybe_unused,
819 struct perf_evlist *evlist __maybe_unused,
820 int idx __maybe_unused,
821 bool per_cpu __maybe_unused)
825 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
827 if (evlist->mmap[idx].base != NULL) {
828 munmap(evlist->mmap[idx].base, evlist->mmap_len);
829 evlist->mmap[idx].base = NULL;
830 atomic_set(&evlist->mmap[idx].refcnt, 0);
832 auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
835 void perf_evlist__munmap(struct perf_evlist *evlist)
839 if (evlist->mmap == NULL)
842 for (i = 0; i < evlist->nr_mmaps; i++)
843 __perf_evlist__munmap(evlist, i);
845 zfree(&evlist->mmap);
848 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
850 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
851 if (cpu_map__empty(evlist->cpus))
852 evlist->nr_mmaps = thread_map__nr(evlist->threads);
853 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
854 return evlist->mmap != NULL ? 0 : -ENOMEM;
860 struct auxtrace_mmap_params auxtrace_mp;
863 static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
864 struct mmap_params *mp, int fd)
867 * The last one will be done at perf_evlist__mmap_consume(), so that we
868 * make sure we don't prevent tools from consuming every last event in
871 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
872 * anymore, but the last events for it are still in the ring buffer,
873 * waiting to be consumed.
875 * Tools can chose to ignore this at their own discretion, but the
876 * evlist layer can't just drop it when filtering events in
877 * perf_evlist__filter_pollfd().
879 atomic_set(&evlist->mmap[idx].refcnt, 2);
880 evlist->mmap[idx].prev = 0;
881 evlist->mmap[idx].mask = mp->mask;
882 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
884 if (evlist->mmap[idx].base == MAP_FAILED) {
885 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
887 evlist->mmap[idx].base = NULL;
891 if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
892 &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
898 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
899 struct mmap_params *mp, int cpu,
900 int thread, int *output)
902 struct perf_evsel *evsel;
904 evlist__for_each(evlist, evsel) {
907 if (evsel->system_wide && thread)
910 fd = FD(evsel, cpu, thread);
914 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
917 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
920 perf_evlist__mmap_get(evlist, idx);
924 * The system_wide flag causes a selected event to be opened
925 * always without a pid. Consequently it will never get a
926 * POLLHUP, but it is used for tracking in combination with
927 * other events, so it should not need to be polled anyway.
928 * Therefore don't add it for polling.
930 if (!evsel->system_wide &&
931 __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
932 perf_evlist__mmap_put(evlist, idx);
936 if (evsel->attr.read_format & PERF_FORMAT_ID) {
937 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
940 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
948 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
949 struct mmap_params *mp)
952 int nr_cpus = cpu_map__nr(evlist->cpus);
953 int nr_threads = thread_map__nr(evlist->threads);
955 pr_debug2("perf event ring buffer mmapped per cpu\n");
956 for (cpu = 0; cpu < nr_cpus; cpu++) {
959 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
962 for (thread = 0; thread < nr_threads; thread++) {
963 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
972 for (cpu = 0; cpu < nr_cpus; cpu++)
973 __perf_evlist__munmap(evlist, cpu);
977 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
978 struct mmap_params *mp)
981 int nr_threads = thread_map__nr(evlist->threads);
983 pr_debug2("perf event ring buffer mmapped per thread\n");
984 for (thread = 0; thread < nr_threads; thread++) {
987 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
990 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
998 for (thread = 0; thread < nr_threads; thread++)
999 __perf_evlist__munmap(evlist, thread);
1003 static size_t perf_evlist__mmap_size(unsigned long pages)
1005 if (pages == UINT_MAX) {
1008 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1010 * Pick a once upon a time good value, i.e. things look
1011 * strange since we can't read a sysctl value, but lets not
1016 max -= (page_size / 1024);
1019 pages = (max * 1024) / page_size;
1020 if (!is_power_of_2(pages))
1021 pages = rounddown_pow_of_two(pages);
1022 } else if (!is_power_of_2(pages))
1025 return (pages + 1) * page_size;
1028 static long parse_pages_arg(const char *str, unsigned long min,
1031 unsigned long pages, val;
1032 static struct parse_tag tags[] = {
1033 { .tag = 'B', .mult = 1 },
1034 { .tag = 'K', .mult = 1 << 10 },
1035 { .tag = 'M', .mult = 1 << 20 },
1036 { .tag = 'G', .mult = 1 << 30 },
1043 val = parse_tag_value(str, tags);
1044 if (val != (unsigned long) -1) {
1045 /* we got file size value */
1046 pages = PERF_ALIGN(val, page_size) / page_size;
1048 /* we got pages count value */
1050 pages = strtoul(str, &eptr, 10);
1055 if (pages == 0 && min == 0) {
1056 /* leave number of pages at 0 */
1057 } else if (!is_power_of_2(pages)) {
1058 /* round pages up to next power of 2 */
1059 pages = roundup_pow_of_two(pages);
1062 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1063 pages * page_size, pages);
1072 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1074 unsigned long max = UINT_MAX;
1077 if (max > SIZE_MAX / page_size)
1078 max = SIZE_MAX / page_size;
1080 pages = parse_pages_arg(str, 1, max);
1082 pr_err("Invalid argument for --mmap_pages/-m\n");
1086 *mmap_pages = pages;
1090 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1091 int unset __maybe_unused)
1093 return __perf_evlist__parse_mmap_pages(opt->value, str);
1097 * perf_evlist__mmap_ex - Create mmaps to receive events.
1098 * @evlist: list of events
1099 * @pages: map length in pages
1100 * @overwrite: overwrite older events?
1101 * @auxtrace_pages - auxtrace map length in pages
1102 * @auxtrace_overwrite - overwrite older auxtrace data?
1104 * If @overwrite is %false the user needs to signal event consumption using
1105 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1108 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1109 * consumption using auxtrace_mmap__write_tail().
1111 * Return: %0 on success, negative error code otherwise.
1113 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1114 bool overwrite, unsigned int auxtrace_pages,
1115 bool auxtrace_overwrite)
1117 struct perf_evsel *evsel;
1118 const struct cpu_map *cpus = evlist->cpus;
1119 const struct thread_map *threads = evlist->threads;
1120 struct mmap_params mp = {
1121 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1124 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
1127 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1130 evlist->overwrite = overwrite;
1131 evlist->mmap_len = perf_evlist__mmap_size(pages);
1132 pr_debug("mmap size %zuB\n", evlist->mmap_len);
1133 mp.mask = evlist->mmap_len - page_size - 1;
1135 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1136 auxtrace_pages, auxtrace_overwrite);
1138 evlist__for_each(evlist, evsel) {
1139 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1140 evsel->sample_id == NULL &&
1141 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1145 if (cpu_map__empty(cpus))
1146 return perf_evlist__mmap_per_thread(evlist, &mp);
1148 return perf_evlist__mmap_per_cpu(evlist, &mp);
1151 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1154 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1157 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1159 struct cpu_map *cpus;
1160 struct thread_map *threads;
1162 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1167 if (target__uses_dummy_map(target))
1168 cpus = cpu_map__dummy_new();
1170 cpus = cpu_map__new(target->cpu_list);
1173 goto out_delete_threads;
1175 evlist->has_user_cpus = !!target->cpu_list;
1177 perf_evlist__set_maps(evlist, cpus, threads);
1182 thread_map__put(threads);
1186 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1187 struct thread_map *threads)
1190 * Allow for the possibility that one or another of the maps isn't being
1191 * changed i.e. don't put it. Note we are assuming the maps that are
1192 * being applied are brand new and evlist is taking ownership of the
1193 * original reference count of 1. If that is not the case it is up to
1194 * the caller to increase the reference count.
1196 if (cpus != evlist->cpus) {
1197 cpu_map__put(evlist->cpus);
1198 evlist->cpus = cpus;
1201 if (threads != evlist->threads) {
1202 thread_map__put(evlist->threads);
1203 evlist->threads = threads;
1206 perf_evlist__propagate_maps(evlist);
1209 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1211 struct perf_evsel *evsel;
1213 const int ncpus = cpu_map__nr(evlist->cpus),
1214 nthreads = thread_map__nr(evlist->threads);
1216 evlist__for_each(evlist, evsel) {
1217 if (evsel->filter == NULL)
1221 * filters only work for tracepoint event, which doesn't have cpu limit.
1222 * So evlist and evsel should always be same.
1224 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1234 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1236 struct perf_evsel *evsel;
1239 evlist__for_each(evlist, evsel) {
1240 err = perf_evsel__set_filter(evsel, filter);
1248 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1254 for (i = 0; i < npids; ++i) {
1256 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1261 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1269 ret = perf_evlist__set_filter(evlist, filter);
1275 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1277 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1280 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1282 struct perf_evsel *pos;
1284 if (evlist->nr_entries == 1)
1287 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1290 evlist__for_each(evlist, pos) {
1291 if (pos->id_pos != evlist->id_pos ||
1292 pos->is_pos != evlist->is_pos)
1299 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1301 struct perf_evsel *evsel;
1303 if (evlist->combined_sample_type)
1304 return evlist->combined_sample_type;
1306 evlist__for_each(evlist, evsel)
1307 evlist->combined_sample_type |= evsel->attr.sample_type;
1309 return evlist->combined_sample_type;
1312 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1314 evlist->combined_sample_type = 0;
1315 return __perf_evlist__combined_sample_type(evlist);
1318 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1320 struct perf_evsel *evsel;
1321 u64 branch_type = 0;
1323 evlist__for_each(evlist, evsel)
1324 branch_type |= evsel->attr.branch_sample_type;
1328 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1330 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1331 u64 read_format = first->attr.read_format;
1332 u64 sample_type = first->attr.sample_type;
1334 evlist__for_each(evlist, pos) {
1335 if (read_format != pos->attr.read_format)
1339 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1340 if ((sample_type & PERF_SAMPLE_READ) &&
1341 !(read_format & PERF_FORMAT_ID)) {
1348 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1350 struct perf_evsel *first = perf_evlist__first(evlist);
1351 return first->attr.read_format;
1354 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1356 struct perf_evsel *first = perf_evlist__first(evlist);
1357 struct perf_sample *data;
1361 if (!first->attr.sample_id_all)
1364 sample_type = first->attr.sample_type;
1366 if (sample_type & PERF_SAMPLE_TID)
1367 size += sizeof(data->tid) * 2;
1369 if (sample_type & PERF_SAMPLE_TIME)
1370 size += sizeof(data->time);
1372 if (sample_type & PERF_SAMPLE_ID)
1373 size += sizeof(data->id);
1375 if (sample_type & PERF_SAMPLE_STREAM_ID)
1376 size += sizeof(data->stream_id);
1378 if (sample_type & PERF_SAMPLE_CPU)
1379 size += sizeof(data->cpu) * 2;
1381 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1382 size += sizeof(data->id);
1387 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1389 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1391 evlist__for_each_continue(evlist, pos) {
1392 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1399 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1401 struct perf_evsel *first = perf_evlist__first(evlist);
1402 return first->attr.sample_id_all;
1405 void perf_evlist__set_selected(struct perf_evlist *evlist,
1406 struct perf_evsel *evsel)
1408 evlist->selected = evsel;
1411 void perf_evlist__close(struct perf_evlist *evlist)
1413 struct perf_evsel *evsel;
1414 int ncpus = cpu_map__nr(evlist->cpus);
1415 int nthreads = thread_map__nr(evlist->threads);
1418 evlist__for_each_reverse(evlist, evsel) {
1419 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1420 perf_evsel__close(evsel, n, nthreads);
1424 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1426 struct cpu_map *cpus;
1427 struct thread_map *threads;
1431 * Try reading /sys/devices/system/cpu/online to get
1434 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1435 * code needs an overhaul to properly forward the
1436 * error, and we may not want to do that fallback to a
1437 * default cpu identity map :-\
1439 cpus = cpu_map__new(NULL);
1443 threads = thread_map__new_dummy();
1447 perf_evlist__set_maps(evlist, cpus, threads);
1455 int perf_evlist__open(struct perf_evlist *evlist)
1457 struct perf_evsel *evsel;
1461 * Default: one fd per CPU, all threads, aka systemwide
1462 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1464 if (evlist->threads == NULL && evlist->cpus == NULL) {
1465 err = perf_evlist__create_syswide_maps(evlist);
1470 perf_evlist__update_id_pos(evlist);
1472 evlist__for_each(evlist, evsel) {
1473 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
1480 perf_evlist__close(evlist);
1485 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1486 const char *argv[], bool pipe_output,
1487 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1489 int child_ready_pipe[2], go_pipe[2];
1492 if (pipe(child_ready_pipe) < 0) {
1493 perror("failed to create 'ready' pipe");
1497 if (pipe(go_pipe) < 0) {
1498 perror("failed to create 'go' pipe");
1499 goto out_close_ready_pipe;
1502 evlist->workload.pid = fork();
1503 if (evlist->workload.pid < 0) {
1504 perror("failed to fork");
1505 goto out_close_pipes;
1508 if (!evlist->workload.pid) {
1514 signal(SIGTERM, SIG_DFL);
1516 close(child_ready_pipe[0]);
1518 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1521 * Tell the parent we're ready to go
1523 close(child_ready_pipe[1]);
1526 * Wait until the parent tells us to go.
1528 ret = read(go_pipe[0], &bf, 1);
1530 * The parent will ask for the execvp() to be performed by
1531 * writing exactly one byte, in workload.cork_fd, usually via
1532 * perf_evlist__start_workload().
1534 * For cancelling the workload without actually running it,
1535 * the parent will just close workload.cork_fd, without writing
1536 * anything, i.e. read will return zero and we just exit()
1541 perror("unable to read pipe");
1545 execvp(argv[0], (char **)argv);
1550 val.sival_int = errno;
1551 if (sigqueue(getppid(), SIGUSR1, val))
1559 struct sigaction act = {
1560 .sa_flags = SA_SIGINFO,
1561 .sa_sigaction = exec_error,
1563 sigaction(SIGUSR1, &act, NULL);
1566 if (target__none(target)) {
1567 if (evlist->threads == NULL) {
1568 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1569 __func__, __LINE__);
1570 goto out_close_pipes;
1572 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1575 close(child_ready_pipe[1]);
1578 * wait for child to settle
1580 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1581 perror("unable to read pipe");
1582 goto out_close_pipes;
1585 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1586 evlist->workload.cork_fd = go_pipe[1];
1587 close(child_ready_pipe[0]);
1593 out_close_ready_pipe:
1594 close(child_ready_pipe[0]);
1595 close(child_ready_pipe[1]);
1599 int perf_evlist__start_workload(struct perf_evlist *evlist)
1601 if (evlist->workload.cork_fd > 0) {
1605 * Remove the cork, let it rip!
1607 ret = write(evlist->workload.cork_fd, &bf, 1);
1609 perror("enable to write to pipe");
1611 close(evlist->workload.cork_fd);
1618 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1619 struct perf_sample *sample)
1621 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1625 return perf_evsel__parse_sample(evsel, event, sample);
1628 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1630 struct perf_evsel *evsel;
1633 evlist__for_each(evlist, evsel) {
1634 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1635 perf_evsel__name(evsel));
1638 return printed + fprintf(fp, "\n");
1641 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1642 int err, char *buf, size_t size)
1645 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1650 printed = scnprintf(buf, size,
1652 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1654 value = perf_event_paranoid();
1656 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1659 printed += scnprintf(buf + printed, size - printed,
1660 "For your workloads it needs to be <= 1\nHint:\t");
1662 printed += scnprintf(buf + printed, size - printed,
1663 "For system wide tracing it needs to be set to -1.\n");
1665 printed += scnprintf(buf + printed, size - printed,
1666 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1667 "Hint:\tThe current value is %d.", value);
1670 scnprintf(buf, size, "%s", emsg);
1677 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1679 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1680 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1684 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1685 printed += scnprintf(buf + printed, size - printed,
1687 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1688 "Hint:\tTried using %zd kB.\n",
1689 emsg, pages_max_per_user, pages_attempted);
1691 if (pages_attempted >= pages_max_per_user) {
1692 printed += scnprintf(buf + printed, size - printed,
1693 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1694 pages_max_per_user + pages_attempted);
1697 printed += scnprintf(buf + printed, size - printed,
1698 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1701 scnprintf(buf, size, "%s", emsg);
1708 void perf_evlist__to_front(struct perf_evlist *evlist,
1709 struct perf_evsel *move_evsel)
1711 struct perf_evsel *evsel, *n;
1714 if (move_evsel == perf_evlist__first(evlist))
1717 evlist__for_each_safe(evlist, n, evsel) {
1718 if (evsel->leader == move_evsel->leader)
1719 list_move_tail(&evsel->node, &move);
1722 list_splice(&move, &evlist->entries);
1725 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1726 struct perf_evsel *tracking_evsel)
1728 struct perf_evsel *evsel;
1730 if (tracking_evsel->tracking)
1733 evlist__for_each(evlist, evsel) {
1734 if (evsel != tracking_evsel)
1735 evsel->tracking = false;
1738 tracking_evsel->tracking = true;