1#ifndef __ASM_X86_MSR_H_
2#define __ASM_X86_MSR_H_
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7# include <linux/types.h>
8#endif
9
10#ifdef __i386__
11
12
13#else   /* __i386__ */
14
15#ifndef __ASSEMBLY__
16#include <linux/errno.h>
17/*
18 * Access to machine-specific registers (available on 586 and better only)
19 * Note: the rd* operations modify the parameters directly (without using
20 * pointer indirection), this allows gcc to optimize better
21 */
22
23#define rdmsr(msr,val1,val2) \
24       __asm__ __volatile__("rdmsr" \
25			    : "=a" (val1), "=d" (val2) \
26			    : "c" (msr))
27
28
29#define rdmsrl(msr,val) do { unsigned long a__,b__; \
30       __asm__ __volatile__("rdmsr" \
31			    : "=a" (a__), "=d" (b__) \
32			    : "c" (msr)); \
33       val = a__ | (b__<<32); \
34} while(0)
35
36#define wrmsr(msr,val1,val2) \
37     __asm__ __volatile__("wrmsr" \
38			  : /* no outputs */ \
39			  : "c" (msr), "a" (val1), "d" (val2))
40
41#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
42
43#define rdtsc(low,high) \
44     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
45
46#define rdtscl(low) \
47     __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
48
49#define rdtscp(low,high,aux) \
50     __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
51
52#define rdtscll(val) do { \
53     unsigned int __a,__d; \
54     __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
55     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
56} while(0)
57
58#define rdtscpll(val, aux) do { \
59     unsigned long __a, __d; \
60     __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
61     (val) = (__d << 32) | __a; \
62} while (0)
63
64#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
65
66#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
67
68#define rdpmc(counter,low,high) \
69     __asm__ __volatile__("rdpmc" \
70			  : "=a" (low), "=d" (high) \
71			  : "c" (counter))
72
73
74static __inline__ void cpuid(int op, unsigned int *eax, unsigned int *ebx,
75			 unsigned int *ecx, unsigned int *edx)
76{
77	__asm__("cpuid"
78		: "=a" (*eax),
79		  "=b" (*ebx),
80		  "=c" (*ecx),
81		  "=d" (*edx)
82		: "0" (op));
83}
84
85/* Some CPUID calls want 'count' to be placed in ecx */
86static __inline__ void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
87			       int *edx)
88{
89	__asm__("cpuid"
90		: "=a" (*eax),
91		  "=b" (*ebx),
92		  "=c" (*ecx),
93		  "=d" (*edx)
94		: "0" (op), "c" (count));
95}
96
97/*
98 * CPUID functions returning a single datum
99 */
100static __inline__ unsigned int cpuid_eax(unsigned int op)
101{
102	unsigned int eax;
103
104	__asm__("cpuid"
105		: "=a" (eax)
106		: "0" (op)
107		: "bx", "cx", "dx");
108	return eax;
109}
110static __inline__ unsigned int cpuid_ebx(unsigned int op)
111{
112	unsigned int eax, ebx;
113
114	__asm__("cpuid"
115		: "=a" (eax), "=b" (ebx)
116		: "0" (op)
117		: "cx", "dx" );
118	return ebx;
119}
120static __inline__ unsigned int cpuid_ecx(unsigned int op)
121{
122	unsigned int eax, ecx;
123
124	__asm__("cpuid"
125		: "=a" (eax), "=c" (ecx)
126		: "0" (op)
127		: "bx", "dx" );
128	return ecx;
129}
130static __inline__ unsigned int cpuid_edx(unsigned int op)
131{
132	unsigned int eax, edx;
133
134	__asm__("cpuid"
135		: "=a" (eax), "=d" (edx)
136		: "0" (op)
137		: "bx", "cx");
138	return edx;
139}
140
141#endif  /* __ASSEMBLY__ */
142
143#endif  /* !__i386__ */
144
145#endif
146