op_hw_specific.h revision b415faba7482dd7ee3335f0f1518333554e3da0d
1/*
2 * @file architecture specific interfaces
3 * @remark Copyright 2008 Intel Corporation
4 * @remark Read the file COPYING
5 * @author Andi Kleen
6 */
7
8#if (defined(__i386__) || defined(__x86_64__)) && !defined(ANDROID_HOST)
9
10/* Assume we run on the same host as the profilee */
11
12#define num_to_mask(x) ((1U << (x)) - 1)
13
14static inline int cpuid_vendor(char *vnd)
15{
16	union {
17		struct {
18			unsigned b,d,c;
19		};
20		char v[12];
21	} v;
22	unsigned eax;
23#ifdef __PIC__
24        __asm__ __volatile__(
25            "pushl %%ebx\n"      /* must be preserved due to PIC code */
26            "cpuid\n"
27            "mov %%ebx, 0(%%edi)\n"
28            "mov %%ecx, 4(%%edi)\n"
29            "mov %%edx, 8(%%edi)\n"
30            "popl %%ebx\n"
31            : "=a" (eax)
32            : "a"(0), "D"(v.v)
33            : "%ecx", "%edx"
34        );
35#else
36	asm("cpuid" : "=a" (eax), "=b" (v.b), "=c" (v.c), "=d" (v.d) : "0" (0));
37#endif
38	return !strncmp(v.v, vnd, 12);
39}
40
41static inline unsigned arch_cpuid_1(int code)
42{
43    unsigned val;
44#ifdef __PIC__
45        __asm__ __volatile__ (
46            "pushl %%ebx\n"
47            "cpuid\n"
48            "popl %%ebx\n"
49            : "=a" (val)
50            : "a" (code)
51            : "ecx", "edx"
52        );
53#else
54        asm("cpuid" : "=a" (v.eax) : "a" (code) : "ecx","ebx","edx");
55#endif
56        return val;
57}
58
59
60/* Work around Nehalem spec update AAJ79: CPUID incorrectly indicates
61   unhalted reference cycle architectural event is supported. We assume
62   steppings after C0 report correct data in CPUID. */
63static inline void workaround_nehalem_aaj79(unsigned *ebx)
64{
65	union {
66		unsigned eax;
67		struct {
68			unsigned stepping : 4;
69			unsigned model : 4;
70			unsigned family : 4;
71			unsigned type : 2;
72			unsigned res : 2;
73			unsigned ext_model : 4;
74			unsigned ext_family : 8;
75			unsigned res2 : 4;
76		};
77	} v;
78	unsigned model;
79
80	if (!cpuid_vendor("GenuineIntel"))
81		return;
82        arch_cpuid_1(1);
83	model = (v.ext_model << 4) + v.model;
84	if (v.family != 6 || model != 26 || v.stepping > 4)
85		return;
86	*ebx |= (1 << 2);	/* disable unsupported event */
87}
88
89static inline unsigned arch_get_filter(op_cpu cpu_type)
90{
91	if (cpu_type == CPU_ARCH_PERFMON) {
92		unsigned ebx, eax;
93#ifdef __PIC__
94                __asm__ __volatile__ (
95                    "pushl %%ebx\n"
96                    "cpuid\n"
97                    "mov %%ebx, %%ecx\n"
98                    "popl %%ebx"
99                    : "=a" (eax), "=c" (ebx)
100                    : "a" (0xa)
101                    : "edx"
102                );
103#else
104		asm("cpuid" : "=a" (eax), "=b" (ebx) : "0" (0xa) : "ecx","edx");
105#endif
106		workaround_nehalem_aaj79(&ebx);
107		return ebx & num_to_mask(eax >> 24);
108	}
109	return -1U;
110}
111
112static inline int arch_num_counters(op_cpu cpu_type)
113{
114	if (cpu_type == CPU_ARCH_PERFMON) {
115		unsigned v = arch_cpuid_1(0xa);
116		return (v >> 8) & 0xff;
117	}
118	return -1;
119}
120
121static inline unsigned arch_get_counter_mask(void)
122{
123	unsigned v = arch_cpuid_1(0xa);
124	return num_to_mask((v >> 8) & 0xff);
125}
126
127#else
128
129static inline unsigned arch_get_filter(op_cpu cpu_type)
130{
131	/* Do something with passed arg to shut up the compiler warning */
132	if (cpu_type != CPU_NO_GOOD)
133		return 0;
134	return 0;
135}
136
137static inline int arch_num_counters(op_cpu cpu_type)
138{
139	/* Do something with passed arg to shut up the compiler warning */
140	if (cpu_type != CPU_NO_GOOD)
141		return -1;
142	return -1;
143}
144
145static inline unsigned arch_get_counter_mask(void)
146{
147	return 0;
148}
149
150#endif
151