1/*
2 * Performance events callchain code, extracted from core.c:
3 *
4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 *  Copyright  �  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_event.h>
13#include <linux/slab.h>
14#include "internal.h"
15
16struct callchain_cpus_entries {
17	struct rcu_head			rcu_head;
18	struct perf_callchain_entry	*cpu_entries[0];
19};
20
21static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
22static atomic_t nr_callchain_events;
23static DEFINE_MUTEX(callchain_mutex);
24static struct callchain_cpus_entries *callchain_cpus_entries;
25
26
27__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
28				  struct pt_regs *regs)
29{
30}
31
32__weak void perf_callchain_user(struct perf_callchain_entry *entry,
33				struct pt_regs *regs)
34{
35}
36
37static void release_callchain_buffers_rcu(struct rcu_head *head)
38{
39	struct callchain_cpus_entries *entries;
40	int cpu;
41
42	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
43
44	for_each_possible_cpu(cpu)
45		kfree(entries->cpu_entries[cpu]);
46
47	kfree(entries);
48}
49
50static void release_callchain_buffers(void)
51{
52	struct callchain_cpus_entries *entries;
53
54	entries = callchain_cpus_entries;
55	RCU_INIT_POINTER(callchain_cpus_entries, NULL);
56	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
57}
58
59static int alloc_callchain_buffers(void)
60{
61	int cpu;
62	int size;
63	struct callchain_cpus_entries *entries;
64
65	/*
66	 * We can't use the percpu allocation API for data that can be
67	 * accessed from NMI. Use a temporary manual per cpu allocation
68	 * until that gets sorted out.
69	 */
70	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
71
72	entries = kzalloc(size, GFP_KERNEL);
73	if (!entries)
74		return -ENOMEM;
75
76	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
77
78	for_each_possible_cpu(cpu) {
79		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
80							 cpu_to_node(cpu));
81		if (!entries->cpu_entries[cpu])
82			goto fail;
83	}
84
85	rcu_assign_pointer(callchain_cpus_entries, entries);
86
87	return 0;
88
89fail:
90	for_each_possible_cpu(cpu)
91		kfree(entries->cpu_entries[cpu]);
92	kfree(entries);
93
94	return -ENOMEM;
95}
96
97int get_callchain_buffers(void)
98{
99	int err = 0;
100	int count;
101
102	mutex_lock(&callchain_mutex);
103
104	count = atomic_inc_return(&nr_callchain_events);
105	if (WARN_ON_ONCE(count < 1)) {
106		err = -EINVAL;
107		goto exit;
108	}
109
110	if (count > 1) {
111		/* If the allocation failed, give up */
112		if (!callchain_cpus_entries)
113			err = -ENOMEM;
114		goto exit;
115	}
116
117	err = alloc_callchain_buffers();
118exit:
119	if (err)
120		atomic_dec(&nr_callchain_events);
121
122	mutex_unlock(&callchain_mutex);
123
124	return err;
125}
126
127void put_callchain_buffers(void)
128{
129	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
130		release_callchain_buffers();
131		mutex_unlock(&callchain_mutex);
132	}
133}
134
135static struct perf_callchain_entry *get_callchain_entry(int *rctx)
136{
137	int cpu;
138	struct callchain_cpus_entries *entries;
139
140	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
141	if (*rctx == -1)
142		return NULL;
143
144	entries = rcu_dereference(callchain_cpus_entries);
145	if (!entries)
146		return NULL;
147
148	cpu = smp_processor_id();
149
150	return &entries->cpu_entries[cpu][*rctx];
151}
152
153static void
154put_callchain_entry(int rctx)
155{
156	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
157}
158
159struct perf_callchain_entry *
160perf_callchain(struct perf_event *event, struct pt_regs *regs)
161{
162	int rctx;
163	struct perf_callchain_entry *entry;
164
165	int kernel = !event->attr.exclude_callchain_kernel;
166	int user   = !event->attr.exclude_callchain_user;
167
168	if (!kernel && !user)
169		return NULL;
170
171	entry = get_callchain_entry(&rctx);
172	if (rctx == -1)
173		return NULL;
174
175	if (!entry)
176		goto exit_put;
177
178	entry->nr = 0;
179
180	if (kernel && !user_mode(regs)) {
181		perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
182		perf_callchain_kernel(entry, regs);
183	}
184
185	if (user) {
186		if (!user_mode(regs)) {
187			if  (current->mm)
188				regs = task_pt_regs(current);
189			else
190				regs = NULL;
191		}
192
193		if (regs) {
194			/*
195			 * Disallow cross-task user callchains.
196			 */
197			if (event->ctx->task && event->ctx->task != current)
198				goto exit_put;
199
200			perf_callchain_store(entry, PERF_CONTEXT_USER);
201			perf_callchain_user(entry, regs);
202		}
203	}
204
205exit_put:
206	put_callchain_entry(rctx);
207
208	return entry;
209}
210