1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10#include "evsel.h"
11#include "evlist.h"
12#include "util.h"
13#include "cpumap.h"
14#include "thread_map.h"
15
16#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17
18int __perf_evsel__sample_size(u64 sample_type)
19{
20	u64 mask = sample_type & PERF_SAMPLE_MASK;
21	int size = 0;
22	int i;
23
24	for (i = 0; i < 64; i++) {
25		if (mask & (1ULL << i))
26			size++;
27	}
28
29	size *= sizeof(u64);
30
31	return size;
32}
33
34void perf_evsel__init(struct perf_evsel *evsel,
35		      struct perf_event_attr *attr, int idx)
36{
37	evsel->idx	   = idx;
38	evsel->attr	   = *attr;
39	INIT_LIST_HEAD(&evsel->node);
40}
41
42struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
43{
44	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
45
46	if (evsel != NULL)
47		perf_evsel__init(evsel, attr, idx);
48
49	return evsel;
50}
51
52int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
53{
54	int cpu, thread;
55	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
56
57	if (evsel->fd) {
58		for (cpu = 0; cpu < ncpus; cpu++) {
59			for (thread = 0; thread < nthreads; thread++) {
60				FD(evsel, cpu, thread) = -1;
61			}
62		}
63	}
64
65	return evsel->fd != NULL ? 0 : -ENOMEM;
66}
67
68int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
69{
70	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
71	if (evsel->sample_id == NULL)
72		return -ENOMEM;
73
74	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
75	if (evsel->id == NULL) {
76		xyarray__delete(evsel->sample_id);
77		evsel->sample_id = NULL;
78		return -ENOMEM;
79	}
80
81	return 0;
82}
83
84int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
85{
86	evsel->counts = zalloc((sizeof(*evsel->counts) +
87				(ncpus * sizeof(struct perf_counts_values))));
88	return evsel->counts != NULL ? 0 : -ENOMEM;
89}
90
91void perf_evsel__free_fd(struct perf_evsel *evsel)
92{
93	xyarray__delete(evsel->fd);
94	evsel->fd = NULL;
95}
96
97void perf_evsel__free_id(struct perf_evsel *evsel)
98{
99	xyarray__delete(evsel->sample_id);
100	evsel->sample_id = NULL;
101	free(evsel->id);
102	evsel->id = NULL;
103}
104
105void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
106{
107	int cpu, thread;
108
109	for (cpu = 0; cpu < ncpus; cpu++)
110		for (thread = 0; thread < nthreads; ++thread) {
111			close(FD(evsel, cpu, thread));
112			FD(evsel, cpu, thread) = -1;
113		}
114}
115
116void perf_evsel__exit(struct perf_evsel *evsel)
117{
118	assert(list_empty(&evsel->node));
119	xyarray__delete(evsel->fd);
120	xyarray__delete(evsel->sample_id);
121	free(evsel->id);
122}
123
124void perf_evsel__delete(struct perf_evsel *evsel)
125{
126	perf_evsel__exit(evsel);
127	close_cgroup(evsel->cgrp);
128	free(evsel->name);
129	free(evsel);
130}
131
132int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
133			      int cpu, int thread, bool scale)
134{
135	struct perf_counts_values count;
136	size_t nv = scale ? 3 : 1;
137
138	if (FD(evsel, cpu, thread) < 0)
139		return -EINVAL;
140
141	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
142		return -ENOMEM;
143
144	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
145		return -errno;
146
147	if (scale) {
148		if (count.run == 0)
149			count.val = 0;
150		else if (count.run < count.ena)
151			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
152	} else
153		count.ena = count.run = 0;
154
155	evsel->counts->cpu[cpu] = count;
156	return 0;
157}
158
159int __perf_evsel__read(struct perf_evsel *evsel,
160		       int ncpus, int nthreads, bool scale)
161{
162	size_t nv = scale ? 3 : 1;
163	int cpu, thread;
164	struct perf_counts_values *aggr = &evsel->counts->aggr, count;
165
166	aggr->val = aggr->ena = aggr->run = 0;
167
168	for (cpu = 0; cpu < ncpus; cpu++) {
169		for (thread = 0; thread < nthreads; thread++) {
170			if (FD(evsel, cpu, thread) < 0)
171				continue;
172
173			if (readn(FD(evsel, cpu, thread),
174				  &count, nv * sizeof(u64)) < 0)
175				return -errno;
176
177			aggr->val += count.val;
178			if (scale) {
179				aggr->ena += count.ena;
180				aggr->run += count.run;
181			}
182		}
183	}
184
185	evsel->counts->scaled = 0;
186	if (scale) {
187		if (aggr->run == 0) {
188			evsel->counts->scaled = -1;
189			aggr->val = 0;
190			return 0;
191		}
192
193		if (aggr->run < aggr->ena) {
194			evsel->counts->scaled = 1;
195			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
196		}
197	} else
198		aggr->ena = aggr->run = 0;
199
200	return 0;
201}
202
203static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
204			      struct thread_map *threads, bool group)
205{
206/* ANDROID_CHANGE_BEGIN */
207#ifndef __APPLE__
208	int cpu, thread;
209	unsigned long flags = 0;
210	int pid = -1;
211
212	if (evsel->fd == NULL &&
213	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
214		return -1;
215
216	if (evsel->cgrp) {
217		flags = PERF_FLAG_PID_CGROUP;
218		pid = evsel->cgrp->fd;
219	}
220
221	for (cpu = 0; cpu < cpus->nr; cpu++) {
222		int group_fd = -1;
223
224		for (thread = 0; thread < threads->nr; thread++) {
225
226			if (!evsel->cgrp)
227				pid = threads->map[thread];
228
229			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
230								     pid,
231								     cpus->map[cpu],
232								     group_fd, flags);
233			if (FD(evsel, cpu, thread) < 0)
234				goto out_close;
235
236			if (group && group_fd == -1)
237				group_fd = FD(evsel, cpu, thread);
238		}
239	}
240
241	return 0;
242
243out_close:
244	do {
245		while (--thread >= 0) {
246			close(FD(evsel, cpu, thread));
247			FD(evsel, cpu, thread) = -1;
248		}
249		thread = threads->nr;
250	} while (--cpu >= 0);
251	return -1;
252#else
253	return -1;
254#endif
255/* ANDROID_CHANGE_END */
256}
257
258static struct {
259	struct cpu_map map;
260	int cpus[1];
261} empty_cpu_map = {
262	.map.nr	= 1,
263	.cpus	= { -1, },
264};
265
266static struct {
267	struct thread_map map;
268	int threads[1];
269} empty_thread_map = {
270	.map.nr	 = 1,
271	.threads = { -1, },
272};
273
274int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
275		     struct thread_map *threads, bool group)
276{
277	if (cpus == NULL) {
278		/* Work around old compiler warnings about strict aliasing */
279		cpus = &empty_cpu_map.map;
280	}
281
282	if (threads == NULL)
283		threads = &empty_thread_map.map;
284
285	return __perf_evsel__open(evsel, cpus, threads, group);
286}
287
288int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
289			     struct cpu_map *cpus, bool group)
290{
291	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
292}
293
294int perf_evsel__open_per_thread(struct perf_evsel *evsel,
295				struct thread_map *threads, bool group)
296{
297	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
298}
299
300static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
301				       struct perf_sample *sample)
302{
303	const u64 *array = event->sample.array;
304
305	array += ((event->header.size -
306		   sizeof(event->header)) / sizeof(u64)) - 1;
307
308	if (type & PERF_SAMPLE_CPU) {
309		u32 *p = (u32 *)array;
310		sample->cpu = *p;
311		array--;
312	}
313
314	if (type & PERF_SAMPLE_STREAM_ID) {
315		sample->stream_id = *array;
316		array--;
317	}
318
319	if (type & PERF_SAMPLE_ID) {
320		sample->id = *array;
321		array--;
322	}
323
324	if (type & PERF_SAMPLE_TIME) {
325		sample->time = *array;
326		array--;
327	}
328
329	if (type & PERF_SAMPLE_TID) {
330		u32 *p = (u32 *)array;
331		sample->pid = p[0];
332		sample->tid = p[1];
333	}
334
335	return 0;
336}
337
338static bool sample_overlap(const union perf_event *event,
339			   const void *offset, u64 size)
340{
341	const void *base = event;
342
343	if (offset + size > base + event->header.size)
344		return true;
345
346	return false;
347}
348
349int perf_event__parse_sample(const union perf_event *event, u64 type,
350			     int sample_size, bool sample_id_all,
351			     struct perf_sample *data)
352{
353	const u64 *array;
354
355	data->cpu = data->pid = data->tid = -1;
356	data->stream_id = data->id = data->time = -1ULL;
357
358	if (event->header.type != PERF_RECORD_SAMPLE) {
359		if (!sample_id_all)
360			return 0;
361		return perf_event__parse_id_sample(event, type, data);
362	}
363
364	array = event->sample.array;
365
366	if (sample_size + sizeof(event->header) > event->header.size)
367		return -EFAULT;
368
369	if (type & PERF_SAMPLE_IP) {
370		data->ip = event->ip.ip;
371		array++;
372	}
373
374	if (type & PERF_SAMPLE_TID) {
375		u32 *p = (u32 *)array;
376		data->pid = p[0];
377		data->tid = p[1];
378		array++;
379	}
380
381	if (type & PERF_SAMPLE_TIME) {
382		data->time = *array;
383		array++;
384	}
385
386	if (type & PERF_SAMPLE_ADDR) {
387		data->addr = *array;
388		array++;
389	}
390
391	data->id = -1ULL;
392	if (type & PERF_SAMPLE_ID) {
393		data->id = *array;
394		array++;
395	}
396
397	if (type & PERF_SAMPLE_STREAM_ID) {
398		data->stream_id = *array;
399		array++;
400	}
401
402	if (type & PERF_SAMPLE_CPU) {
403		u32 *p = (u32 *)array;
404		data->cpu = *p;
405		array++;
406	}
407
408	if (type & PERF_SAMPLE_PERIOD) {
409		data->period = *array;
410		array++;
411	}
412
413	if (type & PERF_SAMPLE_READ) {
414		fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
415		return -1;
416	}
417
418	if (type & PERF_SAMPLE_CALLCHAIN) {
419		if (sample_overlap(event, array, sizeof(data->callchain->nr)))
420			return -EFAULT;
421
422		data->callchain = (struct ip_callchain *)array;
423
424		if (sample_overlap(event, array, data->callchain->nr))
425			return -EFAULT;
426
427		array += 1 + data->callchain->nr;
428	}
429
430	if (type & PERF_SAMPLE_RAW) {
431		u32 *p = (u32 *)array;
432
433		if (sample_overlap(event, array, sizeof(u32)))
434			return -EFAULT;
435
436		data->raw_size = *p;
437		p++;
438
439		if (sample_overlap(event, p, data->raw_size))
440			return -EFAULT;
441
442		data->raw_data = p;
443	}
444
445	return 0;
446}
447