1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9#include "util.h"
10#include <lk/debugfs.h>
11#include <poll.h>
12#include "cpumap.h"
13#include "thread_map.h"
14#include "target.h"
15#include "evlist.h"
16#include "evsel.h"
17#include "debug.h"
18#include <unistd.h>
19
20#include "parse-events.h"
21
22#include <sys/mman.h>
23
24#include <linux/bitops.h>
25#include <linux/hash.h>
26
27#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29
30void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31		       struct thread_map *threads)
32{
33	int i;
34
35	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36		INIT_HLIST_HEAD(&evlist->heads[i]);
37	INIT_LIST_HEAD(&evlist->entries);
38	perf_evlist__set_maps(evlist, cpus, threads);
39	evlist->workload.pid = -1;
40}
41
42struct perf_evlist *perf_evlist__new(void)
43{
44	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
46	if (evlist != NULL)
47		perf_evlist__init(evlist, NULL, NULL);
48
49	return evlist;
50}
51
52/**
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
55 *
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos.  For convenience, put a copy on evlist.
58 */
59void perf_evlist__set_id_pos(struct perf_evlist *evlist)
60{
61	struct perf_evsel *first = perf_evlist__first(evlist);
62
63	evlist->id_pos = first->id_pos;
64	evlist->is_pos = first->is_pos;
65}
66
67static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
68{
69	struct perf_evsel *evsel;
70
71	list_for_each_entry(evsel, &evlist->entries, node)
72		perf_evsel__calc_id_pos(evsel);
73
74	perf_evlist__set_id_pos(evlist);
75}
76
77static void perf_evlist__purge(struct perf_evlist *evlist)
78{
79	struct perf_evsel *pos, *n;
80
81	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
82		list_del_init(&pos->node);
83		perf_evsel__delete(pos);
84	}
85
86	evlist->nr_entries = 0;
87}
88
89void perf_evlist__exit(struct perf_evlist *evlist)
90{
91	free(evlist->mmap);
92	free(evlist->pollfd);
93	evlist->mmap = NULL;
94	evlist->pollfd = NULL;
95}
96
97void perf_evlist__delete(struct perf_evlist *evlist)
98{
99	perf_evlist__purge(evlist);
100	perf_evlist__exit(evlist);
101	free(evlist);
102}
103
104void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
105{
106	list_add_tail(&entry->node, &evlist->entries);
107	if (!evlist->nr_entries++)
108		perf_evlist__set_id_pos(evlist);
109}
110
111void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
112				   struct list_head *list,
113				   int nr_entries)
114{
115	bool set_id_pos = !evlist->nr_entries;
116
117	list_splice_tail(list, &evlist->entries);
118	evlist->nr_entries += nr_entries;
119	if (set_id_pos)
120		perf_evlist__set_id_pos(evlist);
121}
122
123void __perf_evlist__set_leader(struct list_head *list)
124{
125	struct perf_evsel *evsel, *leader;
126
127	leader = list_entry(list->next, struct perf_evsel, node);
128	evsel = list_entry(list->prev, struct perf_evsel, node);
129
130	leader->nr_members = evsel->idx - leader->idx + 1;
131
132	list_for_each_entry(evsel, list, node) {
133		evsel->leader = leader;
134	}
135}
136
137void perf_evlist__set_leader(struct perf_evlist *evlist)
138{
139	if (evlist->nr_entries) {
140		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
141		__perf_evlist__set_leader(&evlist->entries);
142	}
143}
144
145int perf_evlist__add_default(struct perf_evlist *evlist)
146{
147	struct perf_event_attr attr = {
148		.type = PERF_TYPE_HARDWARE,
149		.config = PERF_COUNT_HW_CPU_CYCLES,
150	};
151	struct perf_evsel *evsel;
152
153	event_attr_init(&attr);
154
155	evsel = perf_evsel__new(&attr, 0);
156	if (evsel == NULL)
157		goto error;
158
159	/* use strdup() because free(evsel) assumes name is allocated */
160	evsel->name = strdup("cycles");
161	if (!evsel->name)
162		goto error_free;
163
164	perf_evlist__add(evlist, evsel);
165	return 0;
166error_free:
167	perf_evsel__delete(evsel);
168error:
169	return -ENOMEM;
170}
171
172static int perf_evlist__add_attrs(struct perf_evlist *evlist,
173				  struct perf_event_attr *attrs, size_t nr_attrs)
174{
175	struct perf_evsel *evsel, *n;
176	LIST_HEAD(head);
177	size_t i;
178
179	for (i = 0; i < nr_attrs; i++) {
180		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
181		if (evsel == NULL)
182			goto out_delete_partial_list;
183		list_add_tail(&evsel->node, &head);
184	}
185
186	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
187
188	return 0;
189
190out_delete_partial_list:
191	list_for_each_entry_safe(evsel, n, &head, node)
192		perf_evsel__delete(evsel);
193	return -1;
194}
195
196int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
197				     struct perf_event_attr *attrs, size_t nr_attrs)
198{
199	size_t i;
200
201	for (i = 0; i < nr_attrs; i++)
202		event_attr_init(attrs + i);
203
204	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
205}
206
207struct perf_evsel *
208perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
209{
210	struct perf_evsel *evsel;
211
212	list_for_each_entry(evsel, &evlist->entries, node) {
213		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
214		    (int)evsel->attr.config == id)
215			return evsel;
216	}
217
218	return NULL;
219}
220
221struct perf_evsel *
222perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
223				     const char *name)
224{
225	struct perf_evsel *evsel;
226
227	list_for_each_entry(evsel, &evlist->entries, node) {
228		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
229		    (strcmp(evsel->name, name) == 0))
230			return evsel;
231	}
232
233	return NULL;
234}
235
236int perf_evlist__add_newtp(struct perf_evlist *evlist,
237			   const char *sys, const char *name, void *handler)
238{
239	struct perf_evsel *evsel;
240
241	evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
242	if (evsel == NULL)
243		return -1;
244
245	evsel->handler.func = handler;
246	perf_evlist__add(evlist, evsel);
247	return 0;
248}
249
250void perf_evlist__disable(struct perf_evlist *evlist)
251{
252	int cpu, thread;
253	struct perf_evsel *pos;
254	int nr_cpus = cpu_map__nr(evlist->cpus);
255	int nr_threads = thread_map__nr(evlist->threads);
256
257	for (cpu = 0; cpu < nr_cpus; cpu++) {
258		list_for_each_entry(pos, &evlist->entries, node) {
259			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
260				continue;
261			for (thread = 0; thread < nr_threads; thread++)
262				ioctl(FD(pos, cpu, thread),
263				      PERF_EVENT_IOC_DISABLE, 0);
264		}
265	}
266}
267
268void perf_evlist__enable(struct perf_evlist *evlist)
269{
270	int cpu, thread;
271	struct perf_evsel *pos;
272	int nr_cpus = cpu_map__nr(evlist->cpus);
273	int nr_threads = thread_map__nr(evlist->threads);
274
275	for (cpu = 0; cpu < nr_cpus; cpu++) {
276		list_for_each_entry(pos, &evlist->entries, node) {
277			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
278				continue;
279			for (thread = 0; thread < nr_threads; thread++)
280				ioctl(FD(pos, cpu, thread),
281				      PERF_EVENT_IOC_ENABLE, 0);
282		}
283	}
284}
285
286int perf_evlist__disable_event(struct perf_evlist *evlist,
287			       struct perf_evsel *evsel)
288{
289	int cpu, thread, err;
290
291	if (!evsel->fd)
292		return 0;
293
294	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
295		for (thread = 0; thread < evlist->threads->nr; thread++) {
296			err = ioctl(FD(evsel, cpu, thread),
297				    PERF_EVENT_IOC_DISABLE, 0);
298			if (err)
299				return err;
300		}
301	}
302	return 0;
303}
304
305int perf_evlist__enable_event(struct perf_evlist *evlist,
306			      struct perf_evsel *evsel)
307{
308	int cpu, thread, err;
309
310	if (!evsel->fd)
311		return -EINVAL;
312
313	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
314		for (thread = 0; thread < evlist->threads->nr; thread++) {
315			err = ioctl(FD(evsel, cpu, thread),
316				    PERF_EVENT_IOC_ENABLE, 0);
317			if (err)
318				return err;
319		}
320	}
321	return 0;
322}
323
324static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
325{
326	int nr_cpus = cpu_map__nr(evlist->cpus);
327	int nr_threads = thread_map__nr(evlist->threads);
328	int nfds = nr_cpus * nr_threads * evlist->nr_entries;
329	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
330	return evlist->pollfd != NULL ? 0 : -ENOMEM;
331}
332
333void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
334{
335	fcntl(fd, F_SETFL, O_NONBLOCK);
336	evlist->pollfd[evlist->nr_fds].fd = fd;
337	evlist->pollfd[evlist->nr_fds].events = POLLIN;
338	evlist->nr_fds++;
339}
340
341static void perf_evlist__id_hash(struct perf_evlist *evlist,
342				 struct perf_evsel *evsel,
343				 int cpu, int thread, u64 id)
344{
345	int hash;
346	struct perf_sample_id *sid = SID(evsel, cpu, thread);
347
348	sid->id = id;
349	sid->evsel = evsel;
350	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
351	hlist_add_head(&sid->node, &evlist->heads[hash]);
352}
353
354void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
355			 int cpu, int thread, u64 id)
356{
357	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
358	evsel->id[evsel->ids++] = id;
359}
360
361static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
362				  struct perf_evsel *evsel,
363				  int cpu, int thread, int fd)
364{
365	u64 read_data[4] = { 0, };
366	int id_idx = 1; /* The first entry is the counter value */
367	u64 id;
368	int ret;
369
370	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
371	if (!ret)
372		goto add;
373
374	if (errno != ENOTTY)
375		return -1;
376
377	/* Legacy way to get event id.. All hail to old kernels! */
378
379	/*
380	 * This way does not work with group format read, so bail
381	 * out in that case.
382	 */
383	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
384		return -1;
385
386	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
387	    read(fd, &read_data, sizeof(read_data)) == -1)
388		return -1;
389
390	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
391		++id_idx;
392	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
393		++id_idx;
394
395	id = read_data[id_idx];
396
397 add:
398	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
399	return 0;
400}
401
402struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
403{
404	struct hlist_head *head;
405	struct perf_sample_id *sid;
406	int hash;
407
408	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
409	head = &evlist->heads[hash];
410
411	hlist_for_each_entry(sid, head, node)
412		if (sid->id == id)
413			return sid;
414
415	return NULL;
416}
417
418struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
419{
420	struct perf_sample_id *sid;
421
422	if (evlist->nr_entries == 1)
423		return perf_evlist__first(evlist);
424
425	sid = perf_evlist__id2sid(evlist, id);
426	if (sid)
427		return sid->evsel;
428
429	if (!perf_evlist__sample_id_all(evlist))
430		return perf_evlist__first(evlist);
431
432	return NULL;
433}
434
435static int perf_evlist__event2id(struct perf_evlist *evlist,
436				 union perf_event *event, u64 *id)
437{
438	const u64 *array = event->sample.array;
439	ssize_t n;
440
441	n = (event->header.size - sizeof(event->header)) >> 3;
442
443	if (event->header.type == PERF_RECORD_SAMPLE) {
444		if (evlist->id_pos >= n)
445			return -1;
446		*id = array[evlist->id_pos];
447	} else {
448		if (evlist->is_pos > n)
449			return -1;
450		n -= evlist->is_pos;
451		*id = array[n];
452	}
453	return 0;
454}
455
456static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
457						   union perf_event *event)
458{
459	struct perf_evsel *first = perf_evlist__first(evlist);
460	struct hlist_head *head;
461	struct perf_sample_id *sid;
462	int hash;
463	u64 id;
464
465	if (evlist->nr_entries == 1)
466		return first;
467
468	if (!first->attr.sample_id_all &&
469	    event->header.type != PERF_RECORD_SAMPLE)
470		return first;
471
472	if (perf_evlist__event2id(evlist, event, &id))
473		return NULL;
474
475	/* Synthesized events have an id of zero */
476	if (!id)
477		return first;
478
479	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
480	head = &evlist->heads[hash];
481
482	hlist_for_each_entry(sid, head, node) {
483		if (sid->id == id)
484			return sid->evsel;
485	}
486	return NULL;
487}
488
489union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
490{
491	struct perf_mmap *md = &evlist->mmap[idx];
492	unsigned int head = perf_mmap__read_head(md);
493	unsigned int old = md->prev;
494	unsigned char *data = md->base + page_size;
495	union perf_event *event = NULL;
496
497	if (evlist->overwrite) {
498		/*
499		 * If we're further behind than half the buffer, there's a chance
500		 * the writer will bite our tail and mess up the samples under us.
501		 *
502		 * If we somehow ended up ahead of the head, we got messed up.
503		 *
504		 * In either case, truncate and restart at head.
505		 */
506		int diff = head - old;
507		if (diff > md->mask / 2 || diff < 0) {
508			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
509
510			/*
511			 * head points to a known good entry, start there.
512			 */
513			old = head;
514		}
515	}
516
517	if (old != head) {
518		size_t size;
519
520		event = (union perf_event *)&data[old & md->mask];
521		size = event->header.size;
522
523		/*
524		 * Event straddles the mmap boundary -- header should always
525		 * be inside due to u64 alignment of output.
526		 */
527		if ((old & md->mask) + size != ((old + size) & md->mask)) {
528			unsigned int offset = old;
529			unsigned int len = min(sizeof(*event), size), cpy;
530			void *dst = &md->event_copy;
531
532			do {
533				cpy = min(md->mask + 1 - (offset & md->mask), len);
534				memcpy(dst, &data[offset & md->mask], cpy);
535				offset += cpy;
536				dst += cpy;
537				len -= cpy;
538			} while (len);
539
540			event = &md->event_copy;
541		}
542
543		old += size;
544	}
545
546	md->prev = old;
547
548	return event;
549}
550
551void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
552{
553	if (!evlist->overwrite) {
554		struct perf_mmap *md = &evlist->mmap[idx];
555		unsigned int old = md->prev;
556
557		perf_mmap__write_tail(md, old);
558	}
559}
560
561static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
562{
563	if (evlist->mmap[idx].base != NULL) {
564		munmap(evlist->mmap[idx].base, evlist->mmap_len);
565		evlist->mmap[idx].base = NULL;
566	}
567}
568
569void perf_evlist__munmap(struct perf_evlist *evlist)
570{
571	int i;
572
573	for (i = 0; i < evlist->nr_mmaps; i++)
574		__perf_evlist__munmap(evlist, i);
575
576	free(evlist->mmap);
577	evlist->mmap = NULL;
578}
579
580static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
581{
582	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
583	if (cpu_map__empty(evlist->cpus))
584		evlist->nr_mmaps = thread_map__nr(evlist->threads);
585	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
586	return evlist->mmap != NULL ? 0 : -ENOMEM;
587}
588
589static int __perf_evlist__mmap(struct perf_evlist *evlist,
590			       int idx, int prot, int mask, int fd)
591{
592	evlist->mmap[idx].prev = 0;
593	evlist->mmap[idx].mask = mask;
594	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
595				      MAP_SHARED, fd, 0);
596	if (evlist->mmap[idx].base == MAP_FAILED) {
597		evlist->mmap[idx].base = NULL;
598		return -1;
599	}
600
601	perf_evlist__add_pollfd(evlist, fd);
602	return 0;
603}
604
605static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
606{
607	struct perf_evsel *evsel;
608	int cpu, thread;
609	int nr_cpus = cpu_map__nr(evlist->cpus);
610	int nr_threads = thread_map__nr(evlist->threads);
611
612	pr_debug2("perf event ring buffer mmapped per cpu\n");
613	for (cpu = 0; cpu < nr_cpus; cpu++) {
614		int output = -1;
615
616		for (thread = 0; thread < nr_threads; thread++) {
617			list_for_each_entry(evsel, &evlist->entries, node) {
618				int fd = FD(evsel, cpu, thread);
619
620				if (output == -1) {
621					output = fd;
622					if (__perf_evlist__mmap(evlist, cpu,
623								prot, mask, output) < 0)
624						goto out_unmap;
625				} else {
626					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
627						goto out_unmap;
628				}
629
630				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
631				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
632					goto out_unmap;
633			}
634		}
635	}
636
637	return 0;
638
639out_unmap:
640	for (cpu = 0; cpu < nr_cpus; cpu++)
641		__perf_evlist__munmap(evlist, cpu);
642	return -1;
643}
644
645static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
646{
647	struct perf_evsel *evsel;
648	int thread;
649	int nr_threads = thread_map__nr(evlist->threads);
650
651	pr_debug2("perf event ring buffer mmapped per thread\n");
652	for (thread = 0; thread < nr_threads; thread++) {
653		int output = -1;
654
655		list_for_each_entry(evsel, &evlist->entries, node) {
656			int fd = FD(evsel, 0, thread);
657
658			if (output == -1) {
659				output = fd;
660				if (__perf_evlist__mmap(evlist, thread,
661							prot, mask, output) < 0)
662					goto out_unmap;
663			} else {
664				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
665					goto out_unmap;
666			}
667
668			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
669			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
670				goto out_unmap;
671		}
672	}
673
674	return 0;
675
676out_unmap:
677	for (thread = 0; thread < nr_threads; thread++)
678		__perf_evlist__munmap(evlist, thread);
679	return -1;
680}
681
682/** perf_evlist__mmap - Create per cpu maps to receive events
683 *
684 * @evlist - list of events
685 * @pages - map length in pages
686 * @overwrite - overwrite older events?
687 *
688 * If overwrite is false the user needs to signal event consuption using:
689 *
690 *	struct perf_mmap *m = &evlist->mmap[cpu];
691 *	unsigned int head = perf_mmap__read_head(m);
692 *
693 *	perf_mmap__write_tail(m, head)
694 *
695 * Using perf_evlist__read_on_cpu does this automatically.
696 */
697int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
698		      bool overwrite)
699{
700	struct perf_evsel *evsel;
701	const struct cpu_map *cpus = evlist->cpus;
702	const struct thread_map *threads = evlist->threads;
703	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
704
705        /* 512 kiB: default amount of unprivileged mlocked memory */
706        if (pages == UINT_MAX)
707                pages = (512 * 1024) / page_size;
708	else if (!is_power_of_2(pages))
709		return -EINVAL;
710
711	mask = pages * page_size - 1;
712
713	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
714		return -ENOMEM;
715
716	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
717		return -ENOMEM;
718
719	evlist->overwrite = overwrite;
720	evlist->mmap_len = (pages + 1) * page_size;
721
722	list_for_each_entry(evsel, &evlist->entries, node) {
723		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
724		    evsel->sample_id == NULL &&
725		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
726			return -ENOMEM;
727	}
728
729	if (cpu_map__empty(cpus))
730		return perf_evlist__mmap_per_thread(evlist, prot, mask);
731
732	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
733}
734
735int perf_evlist__create_maps(struct perf_evlist *evlist,
736			     struct perf_target *target)
737{
738	evlist->threads = thread_map__new_str(target->pid, target->tid,
739					      target->uid);
740
741	if (evlist->threads == NULL)
742		return -1;
743
744	if (perf_target__has_task(target))
745		evlist->cpus = cpu_map__dummy_new();
746	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
747		evlist->cpus = cpu_map__dummy_new();
748	else
749		evlist->cpus = cpu_map__new(target->cpu_list);
750
751	if (evlist->cpus == NULL)
752		goto out_delete_threads;
753
754	return 0;
755
756out_delete_threads:
757	thread_map__delete(evlist->threads);
758	return -1;
759}
760
761void perf_evlist__delete_maps(struct perf_evlist *evlist)
762{
763	cpu_map__delete(evlist->cpus);
764	thread_map__delete(evlist->threads);
765	evlist->cpus	= NULL;
766	evlist->threads = NULL;
767}
768
769int perf_evlist__apply_filters(struct perf_evlist *evlist)
770{
771	struct perf_evsel *evsel;
772	int err = 0;
773	const int ncpus = cpu_map__nr(evlist->cpus),
774		  nthreads = thread_map__nr(evlist->threads);
775
776	list_for_each_entry(evsel, &evlist->entries, node) {
777		if (evsel->filter == NULL)
778			continue;
779
780		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
781		if (err)
782			break;
783	}
784
785	return err;
786}
787
788int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
789{
790	struct perf_evsel *evsel;
791	int err = 0;
792	const int ncpus = cpu_map__nr(evlist->cpus),
793		  nthreads = thread_map__nr(evlist->threads);
794
795	list_for_each_entry(evsel, &evlist->entries, node) {
796		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
797		if (err)
798			break;
799	}
800
801	return err;
802}
803
804bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
805{
806	struct perf_evsel *pos;
807
808	if (evlist->nr_entries == 1)
809		return true;
810
811	if (evlist->id_pos < 0 || evlist->is_pos < 0)
812		return false;
813
814	list_for_each_entry(pos, &evlist->entries, node) {
815		if (pos->id_pos != evlist->id_pos ||
816		    pos->is_pos != evlist->is_pos)
817			return false;
818	}
819
820	return true;
821}
822
823u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
824{
825	struct perf_evsel *evsel;
826
827	if (evlist->combined_sample_type)
828		return evlist->combined_sample_type;
829
830	list_for_each_entry(evsel, &evlist->entries, node)
831		evlist->combined_sample_type |= evsel->attr.sample_type;
832
833	return evlist->combined_sample_type;
834}
835
836u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
837{
838	evlist->combined_sample_type = 0;
839	return __perf_evlist__combined_sample_type(evlist);
840}
841
842bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
843{
844	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
845	u64 read_format = first->attr.read_format;
846	u64 sample_type = first->attr.sample_type;
847
848	list_for_each_entry_continue(pos, &evlist->entries, node) {
849		if (read_format != pos->attr.read_format)
850			return false;
851	}
852
853	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
854	if ((sample_type & PERF_SAMPLE_READ) &&
855	    !(read_format & PERF_FORMAT_ID)) {
856		return false;
857	}
858
859	return true;
860}
861
862u64 perf_evlist__read_format(struct perf_evlist *evlist)
863{
864	struct perf_evsel *first = perf_evlist__first(evlist);
865	return first->attr.read_format;
866}
867
868u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
869{
870	struct perf_evsel *first = perf_evlist__first(evlist);
871	struct perf_sample *data;
872	u64 sample_type;
873	u16 size = 0;
874
875	if (!first->attr.sample_id_all)
876		goto out;
877
878	sample_type = first->attr.sample_type;
879
880	if (sample_type & PERF_SAMPLE_TID)
881		size += sizeof(data->tid) * 2;
882
883       if (sample_type & PERF_SAMPLE_TIME)
884		size += sizeof(data->time);
885
886	if (sample_type & PERF_SAMPLE_ID)
887		size += sizeof(data->id);
888
889	if (sample_type & PERF_SAMPLE_STREAM_ID)
890		size += sizeof(data->stream_id);
891
892	if (sample_type & PERF_SAMPLE_CPU)
893		size += sizeof(data->cpu) * 2;
894
895	if (sample_type & PERF_SAMPLE_IDENTIFIER)
896		size += sizeof(data->id);
897out:
898	return size;
899}
900
901bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
902{
903	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
904
905	list_for_each_entry_continue(pos, &evlist->entries, node) {
906		if (first->attr.sample_id_all != pos->attr.sample_id_all)
907			return false;
908	}
909
910	return true;
911}
912
913bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
914{
915	struct perf_evsel *first = perf_evlist__first(evlist);
916	return first->attr.sample_id_all;
917}
918
919void perf_evlist__set_selected(struct perf_evlist *evlist,
920			       struct perf_evsel *evsel)
921{
922	evlist->selected = evsel;
923}
924
925void perf_evlist__close(struct perf_evlist *evlist)
926{
927	struct perf_evsel *evsel;
928	int ncpus = cpu_map__nr(evlist->cpus);
929	int nthreads = thread_map__nr(evlist->threads);
930
931	list_for_each_entry_reverse(evsel, &evlist->entries, node)
932		perf_evsel__close(evsel, ncpus, nthreads);
933}
934
935int perf_evlist__open(struct perf_evlist *evlist)
936{
937	struct perf_evsel *evsel;
938	int err;
939
940	perf_evlist__update_id_pos(evlist);
941
942	list_for_each_entry(evsel, &evlist->entries, node) {
943		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
944		if (err < 0)
945			goto out_err;
946	}
947
948	return 0;
949out_err:
950	perf_evlist__close(evlist);
951	errno = -err;
952	return err;
953}
954
955int perf_evlist__prepare_workload(struct perf_evlist *evlist,
956				  struct perf_target *target,
957				  const char *argv[], bool pipe_output,
958				  bool want_signal)
959{
960	int child_ready_pipe[2], go_pipe[2];
961	char bf;
962
963	if (pipe(child_ready_pipe) < 0) {
964		perror("failed to create 'ready' pipe");
965		return -1;
966	}
967
968	if (pipe(go_pipe) < 0) {
969		perror("failed to create 'go' pipe");
970		goto out_close_ready_pipe;
971	}
972
973	evlist->workload.pid = fork();
974	if (evlist->workload.pid < 0) {
975		perror("failed to fork");
976		goto out_close_pipes;
977	}
978
979	if (!evlist->workload.pid) {
980		if (pipe_output)
981			dup2(2, 1);
982
983		signal(SIGTERM, SIG_DFL);
984
985		close(child_ready_pipe[0]);
986		close(go_pipe[1]);
987		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
988
989		/*
990		 * Tell the parent we're ready to go
991		 */
992		close(child_ready_pipe[1]);
993
994		/*
995		 * Wait until the parent tells us to go.
996		 */
997		if (read(go_pipe[0], &bf, 1) == -1)
998			perror("unable to read pipe");
999
1000		execvp(argv[0], (char **)argv);
1001
1002		perror(argv[0]);
1003		if (want_signal)
1004			kill(getppid(), SIGUSR1);
1005		exit(-1);
1006	}
1007
1008	if (perf_target__none(target))
1009		evlist->threads->map[0] = evlist->workload.pid;
1010
1011	close(child_ready_pipe[1]);
1012	close(go_pipe[0]);
1013	/*
1014	 * wait for child to settle
1015	 */
1016	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1017		perror("unable to read pipe");
1018		goto out_close_pipes;
1019	}
1020
1021	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1022	evlist->workload.cork_fd = go_pipe[1];
1023	close(child_ready_pipe[0]);
1024	return 0;
1025
1026out_close_pipes:
1027	close(go_pipe[0]);
1028	close(go_pipe[1]);
1029out_close_ready_pipe:
1030	close(child_ready_pipe[0]);
1031	close(child_ready_pipe[1]);
1032	return -1;
1033}
1034
1035int perf_evlist__start_workload(struct perf_evlist *evlist)
1036{
1037	if (evlist->workload.cork_fd > 0) {
1038		char bf = 0;
1039		int ret;
1040		/*
1041		 * Remove the cork, let it rip!
1042		 */
1043		ret = write(evlist->workload.cork_fd, &bf, 1);
1044		if (ret < 0)
1045			perror("enable to write to pipe");
1046
1047		close(evlist->workload.cork_fd);
1048		return ret;
1049	}
1050
1051	return 0;
1052}
1053
1054int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1055			      struct perf_sample *sample)
1056{
1057	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1058
1059	if (!evsel)
1060		return -EFAULT;
1061	return perf_evsel__parse_sample(evsel, event, sample);
1062}
1063
1064size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1065{
1066	struct perf_evsel *evsel;
1067	size_t printed = 0;
1068
1069	list_for_each_entry(evsel, &evlist->entries, node) {
1070		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1071				   perf_evsel__name(evsel));
1072	}
1073
1074	return printed + fprintf(fp, "\n");;
1075}
1076