perf.h revision d92d446933f5c6ba6f2fa7fdcc2363ce8ef060b6
1#ifndef _PERF_PERF_H 2#define _PERF_PERF_H 3 4struct winsize; 5 6void get_term_dimensions(struct winsize *ws); 7 8#if defined(__i386__) 9/* ANDROID_CHANGE_BEGIN */ 10#if 0 11#include "../../arch/x86/include/asm/unistd.h" 12#elif !defined(__APPLE__) 13#include <asm/unistd.h> 14#endif 15/* ANDROID_CHANGE_END */ 16#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 17#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 18#endif 19 20#if defined(__x86_64__) 21#include "../../arch/x86/include/asm/unistd.h" 22#define rmb() asm volatile("lfence" ::: "memory") 23#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 24#endif 25 26#ifdef __powerpc__ 27#include "../../arch/powerpc/include/asm/unistd.h" 28#define rmb() asm volatile ("sync" ::: "memory") 29#define cpu_relax() asm volatile ("" ::: "memory"); 30#endif 31 32#ifdef __s390__ 33#include "../../arch/s390/include/asm/unistd.h" 34#define rmb() asm volatile("bcr 15,0" ::: "memory") 35#define cpu_relax() asm volatile("" ::: "memory"); 36#endif 37 38#ifdef __sh__ 39#include "../../arch/sh/include/asm/unistd.h" 40#if defined(__SH4A__) || defined(__SH5__) 41# define rmb() asm volatile("synco" ::: "memory") 42#else 43# define rmb() asm volatile("" ::: "memory") 44#endif 45#define cpu_relax() asm volatile("" ::: "memory") 46#endif 47 48#ifdef __hppa__ 49#include "../../arch/parisc/include/asm/unistd.h" 50#define rmb() asm volatile("" ::: "memory") 51#define cpu_relax() asm volatile("" ::: "memory"); 52#endif 53 54#ifdef __sparc__ 55#include "../../arch/sparc/include/asm/unistd.h" 56#define rmb() asm volatile("":::"memory") 57#define cpu_relax() asm volatile("":::"memory") 58#endif 59 60#ifdef __alpha__ 61#include "../../arch/alpha/include/asm/unistd.h" 62#define rmb() asm volatile("mb" ::: "memory") 63#define cpu_relax() asm volatile("" ::: "memory") 64#endif 65 66#ifdef __ia64__ 67#include "../../arch/ia64/include/asm/unistd.h" 68#define rmb() asm volatile ("mf" ::: "memory") 69#define cpu_relax() asm volatile ("hint @pause" ::: "memory") 70#endif 71 72#ifdef __arm__ 73/* ANDROID_CHANGE_BEGIN */ 74#if 0 75#include "../../arch/arm/include/asm/unistd.h" 76#else 77#include <asm/unistd.h> 78#endif 79/* ANDROID_CHANGE_END */ 80/* 81 * Use the __kuser_memory_barrier helper in the CPU helper page. See 82 * arch/arm/kernel/entry-armv.S in the kernel source for details. 83 */ 84#define rmb() ((void(*)(void))0xffff0fa0)() 85#define cpu_relax() asm volatile("":::"memory") 86#endif 87 88#ifdef __mips__ 89/* ANDROID_CHANGE_BEGIN */ 90#if 0 91#include "../../arch/mips/include/asm/unistd.h" 92#elif !defined(__APPLE__) 93#include <asm/unistd.h> 94#endif 95/* ANDROID_CHANGE_END */ 96#define rmb() asm volatile( \ 97 ".set mips2\n\t" \ 98 "sync\n\t" \ 99 ".set mips0" \ 100 : /* no output */ \ 101 : /* no input */ \ 102 : "memory") 103#define cpu_relax() asm volatile("" ::: "memory") 104#endif 105 106#include <time.h> 107#include <unistd.h> 108#include <sys/types.h> 109 110#include <sys/syscall.h> 111 112/* ANDROID_CHANGE_BEGIN */ 113#if 0 114#include "../../include/linux/perf_event.h" 115#else 116#include "util/include/linux/added/perf_event.h" 117#endif 118/* ANDROID_CHANGE_END */ 119#include "util/types.h" 120#include <stdbool.h> 121 122struct perf_mmap { 123 void *base; 124 int mask; 125 unsigned int prev; 126}; 127 128static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) 129{ 130 struct perf_event_mmap_page *pc = mm->base; 131 int head = pc->data_head; 132 rmb(); 133 return head; 134} 135 136static inline void perf_mmap__write_tail(struct perf_mmap *md, 137 unsigned long tail) 138{ 139 struct perf_event_mmap_page *pc = md->base; 140 141 /* 142 * ensure all reads are done before we write the tail out. 143 */ 144 /* mb(); */ 145 pc->data_tail = tail; 146} 147 148/* 149 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 150 * counters in the current task. 151 */ 152#define PR_TASK_PERF_EVENTS_DISABLE 31 153#define PR_TASK_PERF_EVENTS_ENABLE 32 154 155#ifndef NSEC_PER_SEC 156# define NSEC_PER_SEC 1000000000ULL 157#endif 158 159/* ANDROID_CHANGE_BEGIN */ 160#ifndef __APPLE__ 161static inline unsigned long long rdclock(void) 162{ 163 struct timespec ts; 164 165 clock_gettime(CLOCK_MONOTONIC, &ts); 166 return ts.tv_sec * 1000000000ULL + ts.tv_nsec; 167} 168#endif 169/* ANDROID_CHANGE_END */ 170 171/* 172 * Pick up some kernel type conventions: 173 */ 174#define __user 175#define asmlinkage 176 177#define unlikely(x) __builtin_expect(!!(x), 0) 178#define min(x, y) ({ \ 179 typeof(x) _min1 = (x); \ 180 typeof(y) _min2 = (y); \ 181 (void) (&_min1 == &_min2); \ 182 _min1 < _min2 ? _min1 : _min2; }) 183 184/* ANDROID_CHANGE_BEGIN */ 185#ifndef __APPLE__ 186static inline int 187sys_perf_event_open(struct perf_event_attr *attr, 188 pid_t pid, int cpu, int group_fd, 189 unsigned long flags) 190{ 191 attr->size = sizeof(*attr); 192 return syscall(__NR_perf_event_open, attr, pid, cpu, 193 group_fd, flags); 194} 195#endif 196/* ANDROID_CHANGE_END */ 197 198#define MAX_COUNTERS 256 199#define MAX_NR_CPUS 256 200 201struct ip_callchain { 202 u64 nr; 203 u64 ips[0]; 204}; 205 206extern bool perf_host, perf_guest; 207 208#endif 209