perf_event.h revision ed8777fc132e589d48a0ba854fdbb5d8203b58e5
1#ifndef _ASM_X86_PERF_EVENT_H
2#define _ASM_X86_PERF_EVENT_H
3
4/*
5 * Performance event hw details:
6 */
7
8#define X86_PMC_MAX_GENERIC					8
9#define X86_PMC_MAX_FIXED					3
10
11#define X86_PMC_IDX_GENERIC				        0
12#define X86_PMC_IDX_FIXED				       32
13#define X86_PMC_IDX_MAX					       64
14
15#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
16#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17
18#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20
21#define ARCH_PERFMON_EVENTSEL0_ENABLE			  (1 << 22)
22#define ARCH_PERFMON_EVENTSEL_INT			  (1 << 20)
23#define ARCH_PERFMON_EVENTSEL_OS			  (1 << 17)
24#define ARCH_PERFMON_EVENTSEL_USR			  (1 << 16)
25
26/*
27 * Includes eventsel and unit mask as well:
28 */
29
30
31#define INTEL_ARCH_EVTSEL_MASK		0x000000FFULL
32#define INTEL_ARCH_UNIT_MASK		0x0000FF00ULL
33#define INTEL_ARCH_EDGE_MASK		0x00040000ULL
34#define INTEL_ARCH_INV_MASK		0x00800000ULL
35#define INTEL_ARCH_CNT_MASK		0xFF000000ULL
36#define INTEL_ARCH_EVENT_MASK	(INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
37
38/*
39 * filter mask to validate fixed counter events.
40 * the following filters disqualify for fixed counters:
41 *  - inv
42 *  - edge
43 *  - cnt-mask
44 *  The other filters are supported by fixed counters.
45 *  The any-thread option is supported starting with v3.
46 */
47#define INTEL_ARCH_FIXED_MASK \
48	(INTEL_ARCH_CNT_MASK| \
49	 INTEL_ARCH_INV_MASK| \
50	 INTEL_ARCH_EDGE_MASK|\
51	 INTEL_ARCH_UNIT_MASK|\
52	 INTEL_ARCH_EVTSEL_MASK)
53
54#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX			 0
57#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
58		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
59
60#define ARCH_PERFMON_BRANCH_MISSES_RETIRED			 6
61
62/*
63 * Intel "Architectural Performance Monitoring" CPUID
64 * detection/enumeration details:
65 */
66union cpuid10_eax {
67	struct {
68		unsigned int version_id:8;
69		unsigned int num_events:8;
70		unsigned int bit_width:8;
71		unsigned int mask_length:8;
72	} split;
73	unsigned int full;
74};
75
76union cpuid10_edx {
77	struct {
78		unsigned int num_events_fixed:4;
79		unsigned int reserved:28;
80	} split;
81	unsigned int full;
82};
83
84
85/*
86 * Fixed-purpose performance events:
87 */
88
89/*
90 * All 3 fixed-mode PMCs are configured via this single MSR:
91 */
92#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL			0x38d
93
94/*
95 * The counts are available in three separate MSRs:
96 */
97
98/* Instr_Retired.Any: */
99#define MSR_ARCH_PERFMON_FIXED_CTR0			0x309
100#define X86_PMC_IDX_FIXED_INSTRUCTIONS			(X86_PMC_IDX_FIXED + 0)
101
102/* CPU_CLK_Unhalted.Core: */
103#define MSR_ARCH_PERFMON_FIXED_CTR1			0x30a
104#define X86_PMC_IDX_FIXED_CPU_CYCLES			(X86_PMC_IDX_FIXED + 1)
105
106/* CPU_CLK_Unhalted.Ref: */
107#define MSR_ARCH_PERFMON_FIXED_CTR2			0x30b
108#define X86_PMC_IDX_FIXED_BUS_CYCLES			(X86_PMC_IDX_FIXED + 2)
109
110/*
111 * We model BTS tracing as another fixed-mode PMC.
112 *
113 * We choose a value in the middle of the fixed event range, since lower
114 * values are used by actual fixed events and higher values are used
115 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
116 */
117#define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
118
119
120#ifdef CONFIG_PERF_EVENTS
121extern void init_hw_perf_events(void);
122extern void perf_events_lapic_init(void);
123
124#define PERF_EVENT_INDEX_OFFSET			0
125
126#else
127static inline void init_hw_perf_events(void)		{ }
128static inline void perf_events_lapic_init(void)	{ }
129#endif
130
131#endif /* _ASM_X86_PERF_EVENT_H */
132