1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4struct winsize;
5
6void get_term_dimensions(struct winsize *ws);
7
8#if defined(__i386__)
9/* ANDROID_CHANGE_BEGIN */
10#if 0
11#include "../../arch/x86/include/asm/unistd.h"
12#elif !defined(__APPLE__)
13#include <asm/unistd.h>
14#endif
15/* ANDROID_CHANGE_END */
16#define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
17#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
18#endif
19
20#if defined(__x86_64__)
21/* ANDROID_CHANGE_BEGIN */
22#if 0
23#include "../../arch/x86/include/asm/unistd.h"
24#elif !defined(__APPLE__)
25#include <asm/unistd.h>
26#endif
27/* ANDROID_CHANGE_END */
28#define rmb()		asm volatile("lfence" ::: "memory")
29#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
30#endif
31
32#ifdef __powerpc__
33#include "../../arch/powerpc/include/asm/unistd.h"
34#define rmb()		asm volatile ("sync" ::: "memory")
35#define cpu_relax()	asm volatile ("" ::: "memory");
36#endif
37
38#ifdef __s390__
39#include "../../arch/s390/include/asm/unistd.h"
40#define rmb()		asm volatile("bcr 15,0" ::: "memory")
41#define cpu_relax()	asm volatile("" ::: "memory");
42#endif
43
44#ifdef __sh__
45#include "../../arch/sh/include/asm/unistd.h"
46#if defined(__SH4A__) || defined(__SH5__)
47# define rmb()		asm volatile("synco" ::: "memory")
48#else
49# define rmb()		asm volatile("" ::: "memory")
50#endif
51#define cpu_relax()	asm volatile("" ::: "memory")
52#endif
53
54#ifdef __hppa__
55#include "../../arch/parisc/include/asm/unistd.h"
56#define rmb()		asm volatile("" ::: "memory")
57#define cpu_relax()	asm volatile("" ::: "memory");
58#endif
59
60#ifdef __sparc__
61#include "../../arch/sparc/include/asm/unistd.h"
62#define rmb()		asm volatile("":::"memory")
63#define cpu_relax()	asm volatile("":::"memory")
64#endif
65
66#ifdef __alpha__
67#include "../../arch/alpha/include/asm/unistd.h"
68#define rmb()		asm volatile("mb" ::: "memory")
69#define cpu_relax()	asm volatile("" ::: "memory")
70#endif
71
72#ifdef __ia64__
73#include "../../arch/ia64/include/asm/unistd.h"
74#define rmb()		asm volatile ("mf" ::: "memory")
75#define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
76#endif
77
78#ifdef __arm__
79/* ANDROID_CHANGE_BEGIN */
80#if 0
81#include "../../arch/arm/include/asm/unistd.h"
82#else
83#include <asm/unistd.h>
84#endif
85/* ANDROID_CHANGE_END */
86/*
87 * Use the __kuser_memory_barrier helper in the CPU helper page. See
88 * arch/arm/kernel/entry-armv.S in the kernel source for details.
89 */
90#define rmb()		((void(*)(void))0xffff0fa0)()
91#define cpu_relax()	asm volatile("":::"memory")
92#endif
93
94#ifdef __mips__
95/* ANDROID_CHANGE_BEGIN */
96#if 0
97#include "../../arch/mips/include/asm/unistd.h"
98#elif !defined(__APPLE__)
99#include <asm/unistd.h>
100#endif
101/* ANDROID_CHANGE_END */
102#define rmb()		asm volatile(					\
103				".set	mips2\n\t"			\
104				"sync\n\t"				\
105				".set	mips0"				\
106				: /* no output */			\
107				: /* no input */			\
108				: "memory")
109#define cpu_relax()	asm volatile("" ::: "memory")
110#endif
111
112#include <time.h>
113#include <unistd.h>
114#include <sys/types.h>
115
116#include <sys/syscall.h>
117
118/* ANDROID_CHANGE_BEGIN */
119#if 0
120#include "../../include/linux/perf_event.h"
121#else
122#include "util/include/linux/added/perf_event.h"
123#endif
124/* ANDROID_CHANGE_END */
125#include "util/types.h"
126#include <stdbool.h>
127
128struct perf_mmap {
129	void			*base;
130	int			mask;
131	unsigned int		prev;
132};
133
134static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
135{
136	struct perf_event_mmap_page *pc = mm->base;
137	int head = pc->data_head;
138	rmb();
139	return head;
140}
141
142static inline void perf_mmap__write_tail(struct perf_mmap *md,
143					 unsigned long tail)
144{
145	struct perf_event_mmap_page *pc = md->base;
146
147	/*
148	 * ensure all reads are done before we write the tail out.
149	 */
150	/* mb(); */
151	pc->data_tail = tail;
152}
153
154/*
155 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
156 * counters in the current task.
157 */
158#define PR_TASK_PERF_EVENTS_DISABLE   31
159#define PR_TASK_PERF_EVENTS_ENABLE    32
160
161#ifndef NSEC_PER_SEC
162# define NSEC_PER_SEC			1000000000ULL
163#endif
164
165/* ANDROID_CHANGE_BEGIN */
166#ifndef __APPLE__
167static inline unsigned long long rdclock(void)
168{
169	struct timespec ts;
170
171	clock_gettime(CLOCK_MONOTONIC, &ts);
172	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
173}
174#endif
175/* ANDROID_CHANGE_END */
176
177/*
178 * Pick up some kernel type conventions:
179 */
180#define __user
181#define asmlinkage
182
183#define unlikely(x)	__builtin_expect(!!(x), 0)
184#define min(x, y) ({				\
185	typeof(x) _min1 = (x);			\
186	typeof(y) _min2 = (y);			\
187	(void) (&_min1 == &_min2);		\
188	_min1 < _min2 ? _min1 : _min2; })
189
190/* ANDROID_CHANGE_BEGIN */
191#ifndef __APPLE__
192static inline int
193sys_perf_event_open(struct perf_event_attr *attr,
194		      pid_t pid, int cpu, int group_fd,
195		      unsigned long flags)
196{
197	attr->size = sizeof(*attr);
198	return syscall(__NR_perf_event_open, attr, pid, cpu,
199		       group_fd, flags);
200}
201#endif
202/* ANDROID_CHANGE_END */
203
204#define MAX_COUNTERS			256
205#define MAX_NR_CPUS			256
206
207struct ip_callchain {
208	u64 nr;
209	u64 ips[0];
210};
211
212extern bool perf_host, perf_guest;
213
214#endif
215