1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4#include <asm/unistd.h>
5
6#if defined(__i386__)
7#define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
8#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
9#define CPUINFO_PROC	"model name"
10#ifndef __NR_perf_event_open
11# define __NR_perf_event_open 336
12#endif
13#endif
14
15#if defined(__x86_64__)
16#define rmb()		asm volatile("lfence" ::: "memory")
17#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
18#define CPUINFO_PROC	"model name"
19#ifndef __NR_perf_event_open
20# define __NR_perf_event_open 298
21#endif
22#endif
23
24#ifdef __powerpc__
25#include "../../arch/powerpc/include/uapi/asm/unistd.h"
26#define rmb()		asm volatile ("sync" ::: "memory")
27#define cpu_relax()	asm volatile ("" ::: "memory");
28#define CPUINFO_PROC	"cpu"
29#endif
30
31#ifdef __s390__
32#define rmb()		asm volatile("bcr 15,0" ::: "memory")
33#define cpu_relax()	asm volatile("" ::: "memory");
34#endif
35
36#ifdef __sh__
37#if defined(__SH4A__) || defined(__SH5__)
38# define rmb()		asm volatile("synco" ::: "memory")
39#else
40# define rmb()		asm volatile("" ::: "memory")
41#endif
42#define cpu_relax()	asm volatile("" ::: "memory")
43#define CPUINFO_PROC	"cpu type"
44#endif
45
46#ifdef __hppa__
47#define rmb()		asm volatile("" ::: "memory")
48#define cpu_relax()	asm volatile("" ::: "memory");
49#define CPUINFO_PROC	"cpu"
50#endif
51
52#ifdef __sparc__
53#define rmb()		asm volatile("":::"memory")
54#define cpu_relax()	asm volatile("":::"memory")
55#define CPUINFO_PROC	"cpu"
56#endif
57
58#ifdef __alpha__
59#define rmb()		asm volatile("mb" ::: "memory")
60#define cpu_relax()	asm volatile("" ::: "memory")
61#define CPUINFO_PROC	"cpu model"
62#endif
63
64#ifdef __ia64__
65#define rmb()		asm volatile ("mf" ::: "memory")
66#define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
67#define CPUINFO_PROC	"model name"
68#endif
69
70#ifdef __arm__
71/*
72 * Use the __kuser_memory_barrier helper in the CPU helper page. See
73 * arch/arm/kernel/entry-armv.S in the kernel source for details.
74 */
75#define rmb()		((void(*)(void))0xffff0fa0)()
76#define cpu_relax()	asm volatile("":::"memory")
77#define CPUINFO_PROC	"Processor"
78#endif
79
80#ifdef __aarch64__
81#define rmb()		asm volatile("dmb ld" ::: "memory")
82#define cpu_relax()	asm volatile("yield" ::: "memory")
83#endif
84
85#ifdef __mips__
86#define rmb()		asm volatile(					\
87				".set	mips2\n\t"			\
88				"sync\n\t"				\
89				".set	mips0"				\
90				: /* no output */			\
91				: /* no input */			\
92				: "memory")
93#define cpu_relax()	asm volatile("" ::: "memory")
94#define CPUINFO_PROC	"cpu model"
95#endif
96
97#ifdef __arc__
98#define rmb()		asm volatile("" ::: "memory")
99#define cpu_relax()	rmb()
100#define CPUINFO_PROC	"Processor"
101#endif
102
103#ifdef __metag__
104#define rmb()		asm volatile("" ::: "memory")
105#define cpu_relax()	asm volatile("" ::: "memory")
106#define CPUINFO_PROC	"CPU"
107#endif
108
109#include <time.h>
110#include <unistd.h>
111#include <sys/types.h>
112#include <sys/syscall.h>
113
114#include <linux/perf_event.h>
115#include "util/types.h"
116#include <stdbool.h>
117
118/*
119 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
120 * counters in the current task.
121 */
122#define PR_TASK_PERF_EVENTS_DISABLE   31
123#define PR_TASK_PERF_EVENTS_ENABLE    32
124
125#ifndef NSEC_PER_SEC
126# define NSEC_PER_SEC			1000000000ULL
127#endif
128#ifndef NSEC_PER_USEC
129# define NSEC_PER_USEC			1000ULL
130#endif
131
132static inline unsigned long long rdclock(void)
133{
134#ifdef __APPLE__
135	perror("no clock_gettime");
136	return 0;
137#else
138	struct timespec ts;
139
140	clock_gettime(CLOCK_MONOTONIC, &ts);
141	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
142#endif
143}
144
145/*
146 * Pick up some kernel type conventions:
147 */
148#define __user
149#define asmlinkage
150
151#define unlikely(x)	__builtin_expect(!!(x), 0)
152#define min(x, y) ({				\
153	typeof(x) _min1 = (x);			\
154	typeof(y) _min2 = (y);			\
155	(void) (&_min1 == &_min2);		\
156	_min1 < _min2 ? _min1 : _min2; })
157
158extern bool test_attr__enabled;
159void test_attr__init(void);
160void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
161		     int fd, int group_fd, unsigned long flags);
162
163static inline int
164sys_perf_event_open(struct perf_event_attr *attr,
165		      pid_t pid, int cpu, int group_fd,
166		      unsigned long flags)
167{
168	int fd;
169
170	fd = syscall(__NR_perf_event_open, attr, pid, cpu,
171		     group_fd, flags);
172
173	if (unlikely(test_attr__enabled))
174		test_attr__open(attr, pid, cpu, fd, group_fd, flags);
175
176	return fd;
177}
178
179#define MAX_COUNTERS			256
180#define MAX_NR_CPUS			256
181
182struct ip_callchain {
183	u64 nr;
184	u64 ips[0];
185};
186
187struct branch_flags {
188	u64 mispred:1;
189	u64 predicted:1;
190	u64 reserved:62;
191};
192
193struct branch_entry {
194	u64				from;
195	u64				to;
196	struct branch_flags flags;
197};
198
199struct branch_stack {
200	u64				nr;
201	struct branch_entry	entries[0];
202};
203
204extern const char *input_name;
205extern bool perf_host, perf_guest;
206extern const char perf_version_string[];
207
208void pthread__unblock_sigwinch(void);
209
210#include "util/target.h"
211
212enum perf_call_graph_mode {
213	CALLCHAIN_NONE,
214	CALLCHAIN_FP,
215	CALLCHAIN_DWARF
216};
217
218struct perf_record_opts {
219	struct perf_target target;
220	int	     call_graph;
221	bool	     group;
222	bool	     inherit_stat;
223	bool	     no_delay;
224	bool	     no_inherit;
225	bool	     no_samples;
226	bool	     pipe_output;
227	bool	     raw_samples;
228	bool	     sample_address;
229	bool	     sample_weight;
230	bool	     sample_time;
231	bool	     period;
232	unsigned int freq;
233	unsigned int mmap_pages;
234	unsigned int user_freq;
235	u64          branch_stack;
236	u64	     default_interval;
237	u64	     user_interval;
238	u16	     stack_dump_size;
239};
240
241#endif
242