opd_sfile.c revision cc2ee177dbb3befca43e36cfc56778b006c3d050
1/**
2 * @file daemon/opd_sfile.c
3 * Management of sample files
4 *
5 * @remark Copyright 2002, 2005 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 */
11
12#include "opd_sfile.h"
13
14#include "opd_trans.h"
15#include "opd_kernel.h"
16#include "opd_mangling.h"
17#include "opd_anon.h"
18#include "opd_printf.h"
19#include "opd_stats.h"
20#include "oprofiled.h"
21
22#include "op_libiberty.h"
23
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27
28#define HASH_SIZE 2048
29#define HASH_BITS (HASH_SIZE - 1)
30
31/** All sfiles are hashed into these lists */
32static struct list_head hashes[HASH_SIZE];
33
34/** All sfiles are on this list. */
35static LIST_HEAD(lru_list);
36
37
38/* FIXME: can undoubtedly improve this hashing */
39/** Hash the transient parameters for lookup. */
40static unsigned long
41sfile_hash(struct transient const * trans, struct kernel_image * ki)
42{
43	unsigned long val = 0;
44
45	if (separate_thread) {
46		val ^= trans->tid << 2;
47		val ^= trans->tgid << 2;
48	}
49
50	if (separate_kernel || ((trans->anon || separate_lib) && !ki))
51		val ^= trans->app_cookie >> (DCOOKIE_SHIFT + 3);
52
53	if (separate_cpu)
54		val ^= trans->cpu;
55
56	/* cookie meaningless for kernel, shouldn't hash */
57	if (trans->in_kernel) {
58		val ^= ki->start >> 14;
59		val ^= ki->end >> 7;
60		return val & HASH_BITS;
61	}
62
63	if (trans->cookie != NO_COOKIE) {
64		val ^= trans->cookie >> DCOOKIE_SHIFT;
65		return val & HASH_BITS;
66	}
67
68	if (!separate_thread)
69		val ^= trans->tgid << 2;
70
71	if (trans->anon) {
72		val ^= trans->anon->start >> VMA_SHIFT;
73		val ^= trans->anon->end >> (VMA_SHIFT + 1);
74	}
75
76	return val & HASH_BITS;
77}
78
79
80static int
81do_match(struct sfile const * sf, cookie_t cookie, cookie_t app_cookie,
82         struct kernel_image const * ki, struct anon_mapping const * anon,
83         pid_t tgid, pid_t tid, unsigned int cpu)
84{
85	/* this is a simplified check for "is a kernel image" AND
86	 * "is the right kernel image". Also handles no-vmlinux
87	 * correctly.
88	 */
89	if (sf->kernel != ki)
90		return 0;
91
92	if (separate_thread) {
93		if (sf->tid != tid || sf->tgid != tgid)
94			return 0;
95	}
96
97	if (separate_cpu) {
98		if (sf->cpu != cpu)
99			return 0;
100	}
101
102	if (separate_kernel || ((anon || separate_lib) && !ki)) {
103		if (sf->app_cookie != app_cookie)
104			return 0;
105	}
106
107	/* ignore the cached trans->cookie for kernel images,
108	 * it's meaningless and we checked all others already
109	 */
110	if (ki)
111		return 1;
112
113	if (sf->anon != anon)
114		return 0;
115
116	return sf->cookie == cookie;
117}
118
119
120static int
121trans_match(struct transient const * trans, struct sfile const * sfile,
122            struct kernel_image const * ki)
123{
124	return do_match(sfile, trans->cookie, trans->app_cookie, ki,
125	                trans->anon, trans->tgid, trans->tid, trans->cpu);
126}
127
128
129static int
130sfile_equal(struct sfile const * sf, struct sfile const * sf2)
131{
132	return do_match(sf, sf2->cookie, sf2->app_cookie, sf2->kernel,
133	                sf2->anon, sf2->tgid, sf2->tid, sf2->cpu);
134}
135
136
137static int
138is_sf_ignored(struct sfile const * sf)
139{
140	if (sf->kernel) {
141		if (!is_image_ignored(sf->kernel->name))
142			return 0;
143
144		/* Let a dependent kernel image redeem the sf if we're
145		 * executing on behalf of an application.
146		 */
147		return is_cookie_ignored(sf->app_cookie);
148	}
149
150	/* Anon regions are always dependent on the application.
151 	 * Otherwise, let a dependent image redeem the sf.
152	 */
153	if (sf->anon || is_cookie_ignored(sf->cookie))
154		return is_cookie_ignored(sf->app_cookie);
155
156	return 0;
157}
158
159
160/** create a new sfile matching the current transient parameters */
161static struct sfile *
162create_sfile(unsigned long hash, struct transient const * trans,
163             struct kernel_image * ki)
164{
165	size_t i;
166	struct sfile * sf;
167
168	sf = xmalloc(sizeof(struct sfile));
169
170	sf->hashval = hash;
171
172	/* The logic here: if we're in the kernel, the cached cookie is
173	 * meaningless (though not the app_cookie if separate_kernel)
174	 */
175	sf->cookie = trans->in_kernel ? INVALID_COOKIE : trans->cookie;
176	sf->app_cookie = INVALID_COOKIE;
177	sf->tid = (pid_t)-1;
178	sf->tgid = (pid_t)-1;
179	sf->cpu = 0;
180	sf->kernel = ki;
181	sf->anon = trans->anon;
182
183	for (i = 0 ; i < op_nr_counters ; ++i)
184		odb_init(&sf->files[i]);
185
186	for (i = 0; i < CG_HASH_SIZE; ++i)
187		list_init(&sf->cg_hash[i]);
188
189	if (separate_thread)
190		sf->tid = trans->tid;
191	if (separate_thread || trans->cookie == NO_COOKIE)
192		sf->tgid = trans->tgid;
193
194	if (separate_cpu)
195		sf->cpu = trans->cpu;
196
197	if (separate_kernel || ((trans->anon || separate_lib) && !ki))
198		sf->app_cookie = trans->app_cookie;
199
200	sf->ignored = is_sf_ignored(sf);
201
202	return sf;
203}
204
205
206struct sfile * sfile_find(struct transient const * trans)
207{
208	struct sfile * sf;
209	struct list_head * pos;
210	struct kernel_image * ki = NULL;
211	unsigned long hash;
212
213	if (trans->tracing != TRACING_ON) {
214		opd_stats[OPD_SAMPLES]++;
215		opd_stats[trans->in_kernel == 1 ? OPD_KERNEL : OPD_PROCESS]++;
216	}
217
218	/* There is a small race where this *can* happen, see
219	 * caller of cpu_buffer_reset() in the kernel
220	 */
221	if (trans->in_kernel == -1) {
222		verbprintf(vsamples, "Losing sample at 0x%llx of unknown provenance.\n",
223		           trans->pc);
224		opd_stats[OPD_NO_CTX]++;
225		return NULL;
226	}
227
228	/* we might need a kernel image start/end to hash on */
229	if (trans->in_kernel) {
230		ki = find_kernel_image(trans);
231		if (!ki) {
232			verbprintf(vsamples, "Lost kernel sample %llx\n", trans->pc);
233			opd_stats[OPD_LOST_KERNEL]++;
234			return NULL;
235		}
236	} else if (trans->cookie == NO_COOKIE && !trans->anon) {
237		if (vsamples) {
238			char const * app = verbose_cookie(trans->app_cookie);
239			printf("No anon map for pc %llx, app %s.\n",
240			       trans->pc, app);
241		}
242		opd_stats[OPD_LOST_NO_MAPPING]++;
243		return NULL;
244	}
245
246	hash = sfile_hash(trans, ki);
247	list_for_each(pos, &hashes[hash]) {
248		sf = list_entry(pos, struct sfile, hash);
249		if (trans_match(trans, sf, ki)) {
250			sfile_get(sf);
251			goto lru;
252		}
253	}
254
255	sf = create_sfile(hash, trans, ki);
256	list_add(&sf->hash, &hashes[hash]);
257
258lru:
259	sfile_put(sf);
260	return sf;
261}
262
263
264static void sfile_dup(struct sfile * to, struct sfile * from)
265{
266	size_t i;
267
268	memcpy(to, from, sizeof (struct sfile));
269
270	for (i = 0 ; i < op_nr_counters ; ++i)
271		odb_init(&to->files[i]);
272
273	for (i = 0; i < CG_HASH_SIZE; ++i)
274		list_init(&to->cg_hash[i]);
275
276	list_init(&to->hash);
277	list_init(&to->lru);
278}
279
280
281static odb_t * get_file(struct transient const * trans, int is_cg)
282{
283	struct sfile * sf = trans->current;
284	struct sfile * last = trans->last;
285	struct cg_entry * cg;
286	struct list_head * pos;
287	unsigned long hash;
288	odb_t * file;
289
290	if (trans->event >= op_nr_counters) {
291		fprintf(stderr, "%s: Invalid counter %lu\n", __FUNCTION__,
292			trans->event);
293		abort();
294	}
295
296	file = &sf->files[trans->event];
297
298	if (!is_cg)
299		goto open;
300
301	hash = last->hashval & (CG_HASH_SIZE - 1);
302
303	/* Need to look for the right 'to'. Since we're looking for
304	 * 'last', we use its hash.
305	 */
306	list_for_each(pos, &sf->cg_hash[hash]) {
307		cg = list_entry(pos, struct cg_entry, hash);
308		if (sfile_equal(last, &cg->to)) {
309			file = &cg->to.files[trans->event];
310			goto open;
311		}
312	}
313
314	cg = xmalloc(sizeof(struct cg_entry));
315	sfile_dup(&cg->to, last);
316	list_add(&cg->hash, &sf->cg_hash[hash]);
317	file = &cg->to.files[trans->event];
318
319open:
320	if (!odb_open_count(file))
321		opd_open_sample_file(file, last, sf, trans->event, is_cg);
322
323	/* Error is logged by opd_open_sample_file */
324	if (!odb_open_count(file))
325		return NULL;
326
327	return file;
328}
329
330
331static void verbose_print_sample(struct sfile * sf, vma_t pc, uint counter)
332{
333	char const * app = verbose_cookie(sf->app_cookie);
334	printf("0x%llx(%u): ", pc, counter);
335	if (sf->anon) {
336		printf("anon (tgid %u, 0x%llx-0x%llx), ",
337		       (unsigned int)sf->anon->tgid,
338		       sf->anon->start, sf->anon->end);
339	} else if (sf->kernel) {
340		printf("kern (name %s, 0x%llx-0x%llx), ", sf->kernel->name,
341		       sf->kernel->start, sf->kernel->end);
342	} else {
343		printf("%s(%llx), ", verbose_cookie(sf->cookie),  sf->cookie);
344	}
345	printf("app %s(%llx)", app, sf->app_cookie);
346}
347
348
349static void verbose_sample(struct transient const * trans, vma_t pc)
350{
351	printf("Sample ");
352	verbose_print_sample(trans->current, pc, trans->event);
353	printf("\n");
354}
355
356
357static void
358verbose_arc(struct transient const * trans, vma_t from, vma_t to)
359{
360	printf("Arc ");
361	verbose_print_sample(trans->current, from, trans->event);
362	printf(" -> 0x%llx", to);
363	printf("\n");
364}
365
366
367static void sfile_log_arc(struct transient const * trans)
368{
369	int err;
370	vma_t from = trans->pc;
371	vma_t to = trans->last_pc;
372	uint64_t key;
373	odb_t * file;
374
375	file = get_file(trans, 1);
376
377	/* absolute value -> offset */
378	if (trans->current->kernel)
379		from -= trans->current->kernel->start;
380
381	if (trans->last->kernel)
382		to -= trans->last->kernel->start;
383
384	if (trans->current->anon)
385		from -= trans->current->anon->start;
386
387	if (trans->last->anon)
388		to -= trans->last->anon->start;
389
390	if (varcs)
391		verbose_arc(trans, from, to);
392
393	if (!file) {
394		opd_stats[OPD_LOST_SAMPLEFILE]++;
395		return;
396	}
397
398	/* Possible narrowings to 32-bit value only. */
399	key = to & (0xffffffff);
400	key |= ((uint64_t)from) << 32;
401
402	err = odb_insert(file, key, 1);
403	if (err) {
404		fprintf(stderr, "%s: %s\n", __FUNCTION__, strerror(err));
405		abort();
406	}
407}
408
409
410void sfile_log_sample(struct transient const * trans)
411{
412	int err;
413	vma_t pc = trans->pc;
414	odb_t * file;
415
416	if (trans->tracing == TRACING_ON) {
417		/* can happen if kernel sample falls through the cracks,
418		 * see opd_put_sample() */
419		if (trans->last)
420			sfile_log_arc(trans);
421		return;
422	}
423
424	file = get_file(trans, 0);
425
426	/* absolute value -> offset */
427	if (trans->current->kernel)
428		pc -= trans->current->kernel->start;
429
430	if (trans->current->anon)
431		pc -= trans->current->anon->start;
432
433	if (vsamples)
434		verbose_sample(trans, pc);
435
436	if (!file) {
437		opd_stats[OPD_LOST_SAMPLEFILE]++;
438		return;
439	}
440
441	err = odb_insert(file, (uint64_t)pc, 1);
442	if (err) {
443		fprintf(stderr, "%s: %s\n", __FUNCTION__, strerror(err));
444		abort();
445	}
446}
447
448
449static int close_sfile(struct sfile * sf)
450{
451	size_t i;
452
453	/* it's OK to close a non-open odb file */
454	for (i = 0; i < op_nr_counters; ++i)
455		odb_close(&sf->files[i]);
456
457	return 0;
458}
459
460
461static void kill_sfile(struct sfile * sf)
462{
463	close_sfile(sf);
464	list_del(&sf->hash);
465	list_del(&sf->lru);
466}
467
468
469static int sync_sfile(struct sfile * sf)
470{
471	size_t i;
472
473	for (i = 0; i < op_nr_counters; ++i)
474		odb_sync(&sf->files[i]);
475
476	return 0;
477}
478
479
480static int is_sfile_kernel(struct sfile * sf)
481{
482	return !!sf->kernel;
483}
484
485
486static int is_sfile_anon(struct sfile * sf)
487{
488	return !!sf->anon;
489}
490
491
492static void for_one_sfile(struct sfile * sf, int (*func)(struct sfile *))
493{
494	size_t i;
495	int free_sf = func(sf);
496
497	for (i = 0; i < CG_HASH_SIZE; ++i) {
498		struct list_head * pos;
499		struct list_head * pos2;
500		list_for_each_safe(pos, pos2, &sf->cg_hash[i]) {
501			struct cg_entry * cg =
502				list_entry(pos, struct cg_entry, hash);
503			if (free_sf || func(&cg->to)) {
504				kill_sfile(&cg->to);
505				list_del(&cg->hash);
506				free(cg);
507			}
508		}
509	}
510
511	if (free_sf) {
512		kill_sfile(sf);
513		free(sf);
514	}
515}
516
517
518static void for_each_sfile(int (*func)(struct sfile *))
519{
520	struct list_head * pos;
521	struct list_head * pos2;
522
523	list_for_each_safe(pos, pos2, &lru_list) {
524		struct sfile * sf = list_entry(pos, struct sfile, lru);
525		for_one_sfile(sf, func);
526	}
527}
528
529
530void sfile_clear_kernel(void)
531{
532	for_each_sfile(is_sfile_kernel);
533}
534
535
536void sfile_clear_anon(void)
537{
538	for_each_sfile(is_sfile_anon);
539}
540
541
542void sfile_sync_files(void)
543{
544	for_each_sfile(sync_sfile);
545}
546
547
548void sfile_close_files(void)
549{
550	for_each_sfile(close_sfile);
551}
552
553
554static int always_true(void)
555{
556	return 1;
557}
558
559
560#define LRU_AMOUNT 256
561
562/*
563 * Clear out older sfiles. Note the current sfiles we're using
564 * will not be present in this list, due to sfile_get/put() pairs
565 * around the caller of this.
566 */
567int sfile_lru_clear(void)
568{
569	struct list_head * pos;
570	struct list_head * pos2;
571	int amount = LRU_AMOUNT;
572
573	if (list_empty(&lru_list))
574		return 1;
575
576	list_for_each_safe(pos, pos2, &lru_list) {
577		struct sfile * sf;
578		if (!--amount)
579			break;
580		sf = list_entry(pos, struct sfile, lru);
581		for_one_sfile(sf, (int (*)(struct sfile *))always_true);
582	}
583
584	return 0;
585}
586
587
588void sfile_get(struct sfile * sf)
589{
590	if (sf)
591		list_del(&sf->lru);
592}
593
594
595void sfile_put(struct sfile * sf)
596{
597	if (sf)
598		list_add_tail(&sf->lru, &lru_list);
599}
600
601
602void sfile_init(void)
603{
604	size_t i = 0;
605
606	for (; i < HASH_SIZE; ++i)
607		list_init(&hashes[i]);
608}
609