x86_lir.h revision 1222c96fafe98061cfc57d3bd115f46edb64e624
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
18#define ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
19
20#include "dex/compiler_internals.h"
21
22namespace art {
23
24/*
25 * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64). The ABI
26 * has different conventions and we capture those here. Changing something that is callee save and
27 * making it caller save places a burden on up-calls to save/restore the callee save register,
28 * however, there are few registers that are callee save in the ABI. Changing something that is
29 * caller save and making it callee save places a burden on down-calls to save/restore the callee
30 * save register. For these reasons we aim to match native conventions for caller and callee save.
31 * On x86 only the first 4 registers can be used for byte operations, for this reason they are
32 * preferred for temporary scratch registers.
33 *
34 * General Purpose Register:
35 *  Native: x86    | x86-64 / x32 | ART x86                                         | ART x86-64
36 *  r0/eax: caller | caller       | caller, Method*, scratch, return value          | caller, scratch, return value
37 *  r1/ecx: caller | caller, arg4 | caller, arg1, scratch                           | caller, arg3, scratch
38 *  r2/edx: caller | caller, arg3 | caller, arg2, scratch, high half of long return | caller, arg2, scratch
39 *  r3/ebx: callEE | callEE       | callER, arg3, scratch                           | callee, promotable
40 *  r4/esp: stack pointer
41 *  r5/ebp: callee | callee       | callee, promotable                              | callee, promotable
42 *  r6/esi: callEE | callER, arg2 | callee, promotable                              | caller, arg1, scratch
43 *  r7/edi: callEE | callER, arg1 | callee, promotable                              | caller, Method*, scratch
44 *  ---  x86-64/x32 registers
45 *  Native: x86-64 / x32      | ART
46 *  r8:     caller save, arg5 | caller, arg4, scratch
47 *  r9:     caller save, arg6 | caller, arg5, scratch
48 *  r10:    caller save       | caller, scratch
49 *  r11:    caller save       | caller, scratch
50 *  r12:    callee save       | callee, available for register promotion (promotable)
51 *  r13:    callee save       | callee, available for register promotion (promotable)
52 *  r14:    callee save       | callee, available for register promotion (promotable)
53 *  r15:    callee save       | callee, available for register promotion (promotable)
54 *
55 * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
56 * x86-64/x32 gs: holds it.
57 *
58 * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
59 *  Native: x86  | x86-64 / x32 | ART x86                    | ART x86-64
60 *  XMM0: caller | caller, arg1 | caller, float return value | caller, arg1, float return value
61 *  XMM1: caller | caller, arg2 | caller, scratch            | caller, arg2, scratch
62 *  XMM2: caller | caller, arg3 | caller, scratch            | caller, arg3, scratch
63 *  XMM3: caller | caller, arg4 | caller, scratch            | caller, arg4, scratch
64 *  XMM4: caller | caller, arg5 | caller, scratch            | caller, arg5, scratch
65 *  XMM5: caller | caller, arg6 | caller, scratch            | caller, arg6, scratch
66 *  XMM6: caller | caller, arg7 | caller, scratch            | caller, arg7, scratch
67 *  XMM7: caller | caller, arg8 | caller, scratch            | caller, arg8, scratch
68 *  ---  x86-64/x32 registers
69 *  XMM8 .. 11: caller save available as scratch registers for ART.
70 *  XMM12 .. 15: callee save available as promoted registers for ART.
71 *  This change (XMM12..15) is for QCG only, for others they are caller save.
72 *
73 * X87 is a necessary evil outside of ART code for x86:
74 *  ST0:  x86 float/double native return value, caller save
75 *  ST1 .. ST7: caller save
76 *
77 *  Stack frame diagram (stack grows down, higher addresses at top):
78 *
79 * +------------------------+
80 * | IN[ins-1]              |  {Note: resides in caller's frame}
81 * |       .                |
82 * | IN[0]                  |
83 * | caller's Method*       |
84 * +========================+  {Note: start of callee's frame}
85 * | return address         |  {pushed by call}
86 * | spill region           |  {variable sized}
87 * +------------------------+
88 * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
89 * +------------------------+
90 * | V[locals-1]            |
91 * | V[locals-2]            |
92 * |      .                 |
93 * |      .                 |
94 * | V[1]                   |
95 * | V[0]                   |
96 * +------------------------+
97 * |  0 to 3 words padding  |
98 * +------------------------+
99 * | OUT[outs-1]            |
100 * | OUT[outs-2]            |
101 * |       .                |
102 * | OUT[0]                 |
103 * | cur_method*            | <<== sp w/ 16-byte alignment
104 * +========================+
105 */
106
107enum X86ResourceEncodingPos {
108  kX86GPReg0   = 0,
109  kX86RegSP    = 4,
110  kX86FPReg0   = 16,  // xmm0 .. xmm7/xmm15.
111  kX86FPRegEnd = 32,
112  kX86FPStack  = 33,
113  kX86RegEnd   = kX86FPStack,
114};
115
116// FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
117enum X86NativeRegisterPool {
118  r0             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
119  r0q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
120  rAX            = r0,
121  r1             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
122  r1q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
123  rCX            = r1,
124  r2             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
125  r2q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
126  rDX            = r2,
127  r3             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
128  r3q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
129  rBX            = r3,
130  r4sp_32        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
131  rX86_SP_32     = r4sp_32,
132  r4sp_64        = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
133  rX86_SP_64     = r4sp_64,
134  r5             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
135  r5q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
136  rBP            = r5,
137  r5sib_no_base  = r5,
138  r6             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
139  r6q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
140  rSI            = r6,
141  r7             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
142  r7q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
143  rDI            = r7,
144  r8             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
145  r8q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
146  r9             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
147  r9q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
148  r10            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
149  r10q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
150  r11            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
151  r11q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
152  r12            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
153  r12q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
154  r13            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
155  r13q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
156  r14            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
157  r14q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
158  r15            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
159  r15q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
160  // fake return address register for core spill mask.
161  rRET           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
162
163  // xmm registers, single precision view.
164  fr0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
165  fr1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
166  fr2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
167  fr3  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
168  fr4  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
169  fr5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
170  fr6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
171  fr7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
172  fr8  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
173  fr9  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
174  fr10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
175  fr11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
176  fr12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
177  fr13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
178  fr14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
179  fr15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
180
181  // xmm registers, double precision aliases.
182  dr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
183  dr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
184  dr2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
185  dr3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
186  dr4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
187  dr5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
188  dr6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
189  dr7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
190  dr8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
191  dr9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
192  dr10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
193  dr11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
194  dr12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
195  dr13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
196  dr14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
197  dr15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
198
199  // xmm registers, quad precision aliases
200  xr0  = RegStorage::k128BitSolo | 0,
201  xr1  = RegStorage::k128BitSolo | 1,
202  xr2  = RegStorage::k128BitSolo | 2,
203  xr3  = RegStorage::k128BitSolo | 3,
204  xr4  = RegStorage::k128BitSolo | 4,
205  xr5  = RegStorage::k128BitSolo | 5,
206  xr6  = RegStorage::k128BitSolo | 6,
207  xr7  = RegStorage::k128BitSolo | 7,
208  xr8  = RegStorage::k128BitSolo | 8,
209  xr9  = RegStorage::k128BitSolo | 9,
210  xr10 = RegStorage::k128BitSolo | 10,
211  xr11 = RegStorage::k128BitSolo | 11,
212  xr12 = RegStorage::k128BitSolo | 12,
213  xr13 = RegStorage::k128BitSolo | 13,
214  xr14 = RegStorage::k128BitSolo | 14,
215  xr15 = RegStorage::k128BitSolo | 15,
216
217  // TODO: as needed, add 256, 512 and 1024-bit xmm views.
218};
219
220constexpr RegStorage rs_r0(RegStorage::kValid | r0);
221constexpr RegStorage rs_r0q(RegStorage::kValid | r0q);
222constexpr RegStorage rs_rAX = rs_r0;
223constexpr RegStorage rs_r1(RegStorage::kValid | r1);
224constexpr RegStorage rs_r1q(RegStorage::kValid | r1q);
225constexpr RegStorage rs_rCX = rs_r1;
226constexpr RegStorage rs_r2(RegStorage::kValid | r2);
227constexpr RegStorage rs_r2q(RegStorage::kValid | r2q);
228constexpr RegStorage rs_rDX = rs_r2;
229constexpr RegStorage rs_r3(RegStorage::kValid | r3);
230constexpr RegStorage rs_r3q(RegStorage::kValid | r3q);
231constexpr RegStorage rs_rBX = rs_r3;
232constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
233constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
234extern RegStorage rs_rX86_SP;
235constexpr RegStorage rs_r5(RegStorage::kValid | r5);
236constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
237constexpr RegStorage rs_rBP = rs_r5;
238constexpr RegStorage rs_r6(RegStorage::kValid | r6);
239constexpr RegStorage rs_r6q(RegStorage::kValid | r6q);
240constexpr RegStorage rs_rSI = rs_r6;
241constexpr RegStorage rs_r7(RegStorage::kValid | r7);
242constexpr RegStorage rs_r7q(RegStorage::kValid | r7q);
243constexpr RegStorage rs_rDI = rs_r7;
244constexpr RegStorage rs_rRET(RegStorage::kValid | rRET);
245constexpr RegStorage rs_r8(RegStorage::kValid | r8);
246constexpr RegStorage rs_r8q(RegStorage::kValid | r8q);
247constexpr RegStorage rs_r9(RegStorage::kValid | r9);
248constexpr RegStorage rs_r9q(RegStorage::kValid | r9q);
249constexpr RegStorage rs_r10(RegStorage::kValid | r10);
250constexpr RegStorage rs_r10q(RegStorage::kValid | r10q);
251constexpr RegStorage rs_r11(RegStorage::kValid | r11);
252constexpr RegStorage rs_r11q(RegStorage::kValid | r11q);
253constexpr RegStorage rs_r12(RegStorage::kValid | r12);
254constexpr RegStorage rs_r12q(RegStorage::kValid | r12q);
255constexpr RegStorage rs_r13(RegStorage::kValid | r13);
256constexpr RegStorage rs_r13q(RegStorage::kValid | r13q);
257constexpr RegStorage rs_r14(RegStorage::kValid | r14);
258constexpr RegStorage rs_r14q(RegStorage::kValid | r14q);
259constexpr RegStorage rs_r15(RegStorage::kValid | r15);
260constexpr RegStorage rs_r15q(RegStorage::kValid | r15q);
261
262constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
263constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
264constexpr RegStorage rs_fr2(RegStorage::kValid | fr2);
265constexpr RegStorage rs_fr3(RegStorage::kValid | fr3);
266constexpr RegStorage rs_fr4(RegStorage::kValid | fr4);
267constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
268constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
269constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
270constexpr RegStorage rs_fr8(RegStorage::kValid | fr8);
271constexpr RegStorage rs_fr9(RegStorage::kValid | fr9);
272constexpr RegStorage rs_fr10(RegStorage::kValid | fr10);
273constexpr RegStorage rs_fr11(RegStorage::kValid | fr11);
274constexpr RegStorage rs_fr12(RegStorage::kValid | fr12);
275constexpr RegStorage rs_fr13(RegStorage::kValid | fr13);
276constexpr RegStorage rs_fr14(RegStorage::kValid | fr14);
277constexpr RegStorage rs_fr15(RegStorage::kValid | fr15);
278
279constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
280constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
281constexpr RegStorage rs_dr2(RegStorage::kValid | dr2);
282constexpr RegStorage rs_dr3(RegStorage::kValid | dr3);
283constexpr RegStorage rs_dr4(RegStorage::kValid | dr4);
284constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
285constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
286constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
287constexpr RegStorage rs_dr8(RegStorage::kValid | dr8);
288constexpr RegStorage rs_dr9(RegStorage::kValid | dr9);
289constexpr RegStorage rs_dr10(RegStorage::kValid | dr10);
290constexpr RegStorage rs_dr11(RegStorage::kValid | dr11);
291constexpr RegStorage rs_dr12(RegStorage::kValid | dr12);
292constexpr RegStorage rs_dr13(RegStorage::kValid | dr13);
293constexpr RegStorage rs_dr14(RegStorage::kValid | dr14);
294constexpr RegStorage rs_dr15(RegStorage::kValid | dr15);
295
296constexpr RegStorage rs_xr0(RegStorage::kValid | xr0);
297constexpr RegStorage rs_xr1(RegStorage::kValid | xr1);
298constexpr RegStorage rs_xr2(RegStorage::kValid | xr2);
299constexpr RegStorage rs_xr3(RegStorage::kValid | xr3);
300constexpr RegStorage rs_xr4(RegStorage::kValid | xr4);
301constexpr RegStorage rs_xr5(RegStorage::kValid | xr5);
302constexpr RegStorage rs_xr6(RegStorage::kValid | xr6);
303constexpr RegStorage rs_xr7(RegStorage::kValid | xr7);
304constexpr RegStorage rs_xr8(RegStorage::kValid | xr8);
305constexpr RegStorage rs_xr9(RegStorage::kValid | xr9);
306constexpr RegStorage rs_xr10(RegStorage::kValid | xr10);
307constexpr RegStorage rs_xr11(RegStorage::kValid | xr11);
308constexpr RegStorage rs_xr12(RegStorage::kValid | xr12);
309constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
310constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
311constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
312
313extern X86NativeRegisterPool rX86_ARG0;
314extern X86NativeRegisterPool rX86_ARG1;
315extern X86NativeRegisterPool rX86_ARG2;
316extern X86NativeRegisterPool rX86_ARG3;
317extern X86NativeRegisterPool rX86_ARG4;
318extern X86NativeRegisterPool rX86_ARG5;
319extern X86NativeRegisterPool rX86_FARG0;
320extern X86NativeRegisterPool rX86_FARG1;
321extern X86NativeRegisterPool rX86_FARG2;
322extern X86NativeRegisterPool rX86_FARG3;
323extern X86NativeRegisterPool rX86_FARG4;
324extern X86NativeRegisterPool rX86_FARG5;
325extern X86NativeRegisterPool rX86_FARG6;
326extern X86NativeRegisterPool rX86_FARG7;
327extern X86NativeRegisterPool rX86_RET0;
328extern X86NativeRegisterPool rX86_RET1;
329extern X86NativeRegisterPool rX86_INVOKE_TGT;
330extern X86NativeRegisterPool rX86_COUNT;
331
332extern RegStorage rs_rX86_ARG0;
333extern RegStorage rs_rX86_ARG1;
334extern RegStorage rs_rX86_ARG2;
335extern RegStorage rs_rX86_ARG3;
336extern RegStorage rs_rX86_ARG4;
337extern RegStorage rs_rX86_ARG5;
338extern RegStorage rs_rX86_FARG0;
339extern RegStorage rs_rX86_FARG1;
340extern RegStorage rs_rX86_FARG2;
341extern RegStorage rs_rX86_FARG3;
342extern RegStorage rs_rX86_FARG4;
343extern RegStorage rs_rX86_FARG5;
344extern RegStorage rs_rX86_FARG6;
345extern RegStorage rs_rX86_FARG7;
346extern RegStorage rs_rX86_RET0;
347extern RegStorage rs_rX86_RET1;
348extern RegStorage rs_rX86_INVOKE_TGT;
349extern RegStorage rs_rX86_COUNT;
350
351// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
352const RegLocation x86_loc_c_return
353    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
354     RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
355const RegLocation x86_loc_c_return_wide
356    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
357     RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
358const RegLocation x86_loc_c_return_ref
359    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
360     RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
361const RegLocation x86_64_loc_c_return_ref
362    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
363     RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
364const RegLocation x86_64_loc_c_return_wide
365    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
366     RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
367const RegLocation x86_loc_c_return_float
368    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
369     RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
370const RegLocation x86_loc_c_return_double
371    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
372     RegStorage(RegStorage::k64BitSolo, dr0), INVALID_SREG, INVALID_SREG};
373
374/*
375 * The following enum defines the list of supported X86 instructions by the
376 * assembler. Their corresponding EncodingMap positions will be defined in
377 * Assemble.cc.
378 */
379enum X86OpCode {
380  kX86First = 0,
381  kX8632BitData = kX86First,  // data [31..0].
382  kX86Bkpt,
383  kX86Nop,
384  // Define groups of binary operations
385  // MR - Memory Register  - opcode [base + disp], reg
386  //             - lir operands - 0: base, 1: disp, 2: reg
387  // AR - Array Register   - opcode [base + index * scale + disp], reg
388  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
389  // TR - Thread Register  - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
390  //             - lir operands - 0: disp, 1: reg
391  // RR - Register Register  - opcode reg1, reg2
392  //             - lir operands - 0: reg1, 1: reg2
393  // RM - Register Memory  - opcode reg, [base + disp]
394  //             - lir operands - 0: reg, 1: base, 2: disp
395  // RA - Register Array   - opcode reg, [base + index * scale + disp]
396  //             - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
397  // RT - Register Thread  - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
398  //             - lir operands - 0: reg, 1: disp
399  // RI - Register Immediate - opcode reg, #immediate
400  //             - lir operands - 0: reg, 1: immediate
401  // MI - Memory Immediate   - opcode [base + disp], #immediate
402  //             - lir operands - 0: base, 1: disp, 2: immediate
403  // AI - Array Immediate  - opcode [base + index * scale + disp], #immediate
404  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
405  // TI - Thread Immediate  - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
406  //             - lir operands - 0: disp, 1: imm
407#define BinaryOpCode(opcode) \
408  opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
409  opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
410  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
411  opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
412  opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
413  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
414  opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
415  opcode ## 32MR, opcode ## 32AR, opcode ## 32TR,  \
416  opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
417  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
418  opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8, \
419  opcode ## 64MR, opcode ## 64AR, opcode ## 64TR,  \
420  opcode ## 64RR, opcode ## 64RM, opcode ## 64RA, opcode ## 64RT, \
421  opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, opcode ## 64TI, \
422  opcode ## 64RI8, opcode ## 64MI8, opcode ## 64AI8, opcode ## 64TI8
423  BinaryOpCode(kX86Add),
424  BinaryOpCode(kX86Or),
425  BinaryOpCode(kX86Adc),
426  BinaryOpCode(kX86Sbb),
427  BinaryOpCode(kX86And),
428  BinaryOpCode(kX86Sub),
429  BinaryOpCode(kX86Xor),
430  BinaryOpCode(kX86Cmp),
431#undef BinaryOpCode
432  kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
433  kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
434  kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
435  kX86Imul64RRI, kX86Imul64RMI, kX86Imul64RAI,
436  kX86Imul64RRI8, kX86Imul64RMI8, kX86Imul64RAI8,
437  kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
438  kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
439  kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
440  kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
441  kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
442  kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
443  kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
444  kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
445  kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
446  kX86Lea32RM,
447  kX86Lea32RA,
448  kX86Mov64MR, kX86Mov64AR, kX86Mov64TR,
449  kX86Mov64RR, kX86Mov64RM, kX86Mov64RA, kX86Mov64RT,
450  kX86Mov64RI32, kX86Mov64RI64, kX86Mov64MI, kX86Mov64AI, kX86Mov64TI,
451  kX86Lea64RM,
452  kX86Lea64RA,
453  // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
454  //             - lir operands - 0: reg1, 1: reg2, 2: CC
455  kX86Cmov32RRC,
456  kX86Cmov64RRC,
457  // RMC - Register Memory ConditionCode - cond_opcode reg1, [base + disp]
458  //             - lir operands - 0: reg1, 1: base, 2: disp 3: CC
459  kX86Cmov32RMC,
460  kX86Cmov64RMC,
461
462  // RC - Register CL - opcode reg, CL
463  //          - lir operands - 0: reg, 1: CL
464  // MC - Memory CL   - opcode [base + disp], CL
465  //          - lir operands - 0: base, 1: disp, 2: CL
466  // AC - Array CL  - opcode [base + index * scale + disp], CL
467  //          - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
468#define BinaryShiftOpCode(opcode) \
469  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
470  opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
471  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
472  opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
473  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
474  opcode ## 32RC, opcode ## 32MC, opcode ## 32AC, \
475  opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, \
476  opcode ## 64RC, opcode ## 64MC, opcode ## 64AC
477  BinaryShiftOpCode(kX86Rol),
478  BinaryShiftOpCode(kX86Ror),
479  BinaryShiftOpCode(kX86Rcl),
480  BinaryShiftOpCode(kX86Rcr),
481  BinaryShiftOpCode(kX86Sal),
482  BinaryShiftOpCode(kX86Shr),
483  BinaryShiftOpCode(kX86Sar),
484#undef BinaryShiftOpcode
485  kX86Cmc,
486  kX86Shld32RRI,
487  kX86Shld32MRI,
488  kX86Shrd32RRI,
489  kX86Shrd32MRI,
490  kX86Shld64RRI,
491  kX86Shld64MRI,
492  kX86Shrd64RRI,
493  kX86Shrd64MRI,
494#define UnaryOpcode(opcode, reg, mem, array) \
495  opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
496  opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
497  opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array, \
498  opcode ## 64 ## reg, opcode ## 64 ## mem, opcode ## 64 ## array
499  UnaryOpcode(kX86Test, RI, MI, AI),
500  kX86Test32RR,
501  kX86Test64RR,
502  kX86Test32RM,
503  UnaryOpcode(kX86Not, R, M, A),
504  UnaryOpcode(kX86Neg, R, M, A),
505  UnaryOpcode(kX86Mul,  DaR, DaM, DaA),
506  UnaryOpcode(kX86Imul, DaR, DaM, DaA),
507  UnaryOpcode(kX86Divmod,  DaR, DaM, DaA),
508  UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
509  kx86Cdq32Da,
510  kx86Cqo64Da,
511  kX86Bswap32R,
512  kX86Bswap64R,
513  kX86Push32R, kX86Pop32R,
514#undef UnaryOpcode
515#define Binary0fOpCode(opcode) \
516  opcode ## RR, opcode ## RM, opcode ## RA
517  Binary0fOpCode(kX86Movsd),
518  kX86MovsdMR,
519  kX86MovsdAR,
520  Binary0fOpCode(kX86Movss),
521  kX86MovssMR,
522  kX86MovssAR,
523  Binary0fOpCode(kX86Cvtsi2sd),  // int to double
524  Binary0fOpCode(kX86Cvtsi2ss),  // int to float
525  Binary0fOpCode(kX86Cvtsqi2sd),  // long to double
526  Binary0fOpCode(kX86Cvtsqi2ss),  // long to float
527  Binary0fOpCode(kX86Cvttsd2si),  // truncating double to int
528  Binary0fOpCode(kX86Cvttss2si),  // truncating float to int
529  Binary0fOpCode(kX86Cvttsd2sqi),  // truncating double to long
530  Binary0fOpCode(kX86Cvttss2sqi),  // truncating float to long
531  Binary0fOpCode(kX86Cvtsd2si),  // rounding double to int
532  Binary0fOpCode(kX86Cvtss2si),  // rounding float to int
533  Binary0fOpCode(kX86Ucomisd),  // unordered double compare
534  Binary0fOpCode(kX86Ucomiss),  // unordered float compare
535  Binary0fOpCode(kX86Comisd),   // double compare
536  Binary0fOpCode(kX86Comiss),   // float compare
537  Binary0fOpCode(kX86Orpd),     // double logical OR
538  Binary0fOpCode(kX86Orps),     // float logical OR
539  Binary0fOpCode(kX86Andpd),    // double logical AND
540  Binary0fOpCode(kX86Andps),    // float logical AND
541  Binary0fOpCode(kX86Xorpd),    // double logical XOR
542  Binary0fOpCode(kX86Xorps),    // float logical XOR
543  Binary0fOpCode(kX86Addsd),    // double ADD
544  Binary0fOpCode(kX86Addss),    // float ADD
545  Binary0fOpCode(kX86Mulsd),    // double multiply
546  Binary0fOpCode(kX86Mulss),    // float multiply
547  Binary0fOpCode(kX86Cvtsd2ss),  // double to float
548  Binary0fOpCode(kX86Cvtss2sd),  // float to double
549  Binary0fOpCode(kX86Subsd),    // double subtract
550  Binary0fOpCode(kX86Subss),    // float subtract
551  Binary0fOpCode(kX86Divsd),    // double divide
552  Binary0fOpCode(kX86Divss),    // float divide
553  Binary0fOpCode(kX86Punpckldq),  // Interleave low-order double words
554  Binary0fOpCode(kX86Sqrtsd),   // square root
555  Binary0fOpCode(kX86Pmulld),   // parallel integer multiply 32 bits x 4
556  Binary0fOpCode(kX86Pmullw),   // parallel integer multiply 16 bits x 8
557  Binary0fOpCode(kX86Mulps),    // parallel FP multiply 32 bits x 4
558  Binary0fOpCode(kX86Mulpd),    // parallel FP multiply 64 bits x 2
559  Binary0fOpCode(kX86Paddb),    // parallel integer addition 8 bits x 16
560  Binary0fOpCode(kX86Paddw),    // parallel integer addition 16 bits x 8
561  Binary0fOpCode(kX86Paddd),    // parallel integer addition 32 bits x 4
562  Binary0fOpCode(kX86Addps),    // parallel FP addition 32 bits x 4
563  Binary0fOpCode(kX86Addpd),    // parallel FP addition 64 bits x 2
564  Binary0fOpCode(kX86Psubb),    // parallel integer subtraction 8 bits x 16
565  Binary0fOpCode(kX86Psubw),    // parallel integer subtraction 16 bits x 8
566  Binary0fOpCode(kX86Psubd),    // parallel integer subtraction 32 bits x 4
567  Binary0fOpCode(kX86Subps),    // parallel FP subtraction 32 bits x 4
568  Binary0fOpCode(kX86Subpd),    // parallel FP subtraction 64 bits x 2
569  Binary0fOpCode(kX86Pand),     // parallel AND 128 bits x 1
570  Binary0fOpCode(kX86Por),      // parallel OR 128 bits x 1
571  Binary0fOpCode(kX86Pxor),     // parallel XOR 128 bits x 1
572  Binary0fOpCode(kX86Phaddw),   // parallel horizontal addition 16 bits x 8
573  Binary0fOpCode(kX86Phaddd),   // parallel horizontal addition 32 bits x 4
574  Binary0fOpCode(kX86Haddpd),   // parallel FP horizontal addition 64 bits x 2
575  Binary0fOpCode(kX86Haddps),   // parallel FP horizontal addition 32 bits x 4
576  kX86PextrbRRI,                // Extract 8 bits from XMM into GPR
577  kX86PextrwRRI,                // Extract 16 bits from XMM into GPR
578  kX86PextrdRRI,                // Extract 32 bits from XMM into GPR
579  kX86PextrbMRI,                // Extract 8 bits from XMM into memory
580  kX86PextrwMRI,                // Extract 16 bits from XMM into memory
581  kX86PextrdMRI,                // Extract 32 bits from XMM into memory
582  kX86PshuflwRRI,               // Shuffle 16 bits in lower 64 bits of XMM.
583  kX86PshufdRRI,                // Shuffle 32 bits in XMM.
584  kX86ShufpsRRI,                // FP Shuffle 32 bits in XMM.
585  kX86ShufpdRRI,                // FP Shuffle 64 bits in XMM.
586  kX86PsrawRI,                  // signed right shift of floating point registers 16 bits x 8
587  kX86PsradRI,                  // signed right shift of floating point registers 32 bits x 4
588  kX86PsrlwRI,                  // logical right shift of floating point registers 16 bits x 8
589  kX86PsrldRI,                  // logical right shift of floating point registers 32 bits x 4
590  kX86PsrlqRI,                  // logical right shift of floating point registers 64 bits x 2
591  kX86PsllwRI,                  // left shift of floating point registers 16 bits x 8
592  kX86PslldRI,                  // left shift of floating point registers 32 bits x 4
593  kX86PsllqRI,                  // left shift of floating point registers 64 bits x 2
594  kX86Fild32M,                  // push 32-bit integer on x87 stack
595  kX86Fild64M,                  // push 64-bit integer on x87 stack
596  kX86Fld32M,                   // push float on x87 stack
597  kX86Fld64M,                   // push double on x87 stack
598  kX86Fstp32M,                  // pop top x87 fp stack and do 32-bit store
599  kX86Fstp64M,                  // pop top x87 fp stack and do 64-bit store
600  kX86Fst32M,                   // do 32-bit store
601  kX86Fst64M,                   // do 64-bit store
602  kX86Fprem,                    // remainder from dividing of two floating point values
603  kX86Fucompp,                  // compare floating point values and pop x87 fp stack twice
604  kX86Fstsw16R,                 // store FPU status word
605  Binary0fOpCode(kX86Mova128),  // move 128 bits aligned
606  kX86Mova128MR, kX86Mova128AR,  // store 128 bit aligned from xmm1 to m128
607  Binary0fOpCode(kX86Movups),   // load unaligned packed single FP values from xmm2/m128 to xmm1
608  kX86MovupsMR, kX86MovupsAR,   // store unaligned packed single FP values from xmm1 to m128
609  Binary0fOpCode(kX86Movaps),   // load aligned packed single FP values from xmm2/m128 to xmm1
610  kX86MovapsMR, kX86MovapsAR,   // store aligned packed single FP values from xmm1 to m128
611  kX86MovlpsRM, kX86MovlpsRA,   // load packed single FP values from m64 to low quadword of xmm
612  kX86MovlpsMR, kX86MovlpsAR,   // store packed single FP values from low quadword of xmm to m64
613  kX86MovhpsRM, kX86MovhpsRA,   // load packed single FP values from m64 to high quadword of xmm
614  kX86MovhpsMR, kX86MovhpsAR,   // store packed single FP values from high quadword of xmm to m64
615  Binary0fOpCode(kX86Movdxr),   // move into xmm from gpr
616  Binary0fOpCode(kX86Movqxr),   // move into xmm from 64 bit gpr
617  kX86MovqrxRR, kX86MovqrxMR, kX86MovqrxAR,  // move into 64 bit reg from xmm
618  kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,  // move into reg from xmm
619  kX86MovsxdRR, kX86MovsxdRM, kX86MovsxdRA,  // move 32 bit to 64 bit with sign extension
620  kX86Set8R, kX86Set8M, kX86Set8A,  // set byte depending on condition operand
621  kX86Mfence,                   // memory barrier
622  Binary0fOpCode(kX86Imul16),   // 16bit multiply
623  Binary0fOpCode(kX86Imul32),   // 32bit multiply
624  Binary0fOpCode(kX86Imul64),   // 64bit multiply
625  kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,  // compare and exchange
626  kX86LockCmpxchgMR, kX86LockCmpxchgAR, kX86LockCmpxchg64AR,  // locked compare and exchange
627  kX86LockCmpxchg64M, kX86LockCmpxchg64A,  // locked compare and exchange
628  kX86XchgMR,  // exchange memory with register (automatically locked)
629  Binary0fOpCode(kX86Movzx8),   // zero-extend 8-bit value
630  Binary0fOpCode(kX86Movzx16),  // zero-extend 16-bit value
631  Binary0fOpCode(kX86Movsx8),   // sign-extend 8-bit value
632  Binary0fOpCode(kX86Movsx16),  // sign-extend 16-bit value
633  Binary0fOpCode(kX86Movzx8q),   // zero-extend 8-bit value to quad word
634  Binary0fOpCode(kX86Movzx16q),  // zero-extend 16-bit value to quad word
635  Binary0fOpCode(kX86Movsx8q),   // sign-extend 8-bit value to quad word
636  Binary0fOpCode(kX86Movsx16q),  // sign-extend 16-bit value to quad word
637#undef Binary0fOpCode
638  kX86Jcc8, kX86Jcc32,  // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
639  kX86Jmp8, kX86Jmp32,  // jmp rel8/32; lir operands - 0: rel, target assigned
640  kX86JmpR,             // jmp reg; lir operands - 0: reg
641  kX86Jecxz8,           // jcexz rel8; jump relative if ECX is zero.
642  kX86JmpT,             // jmp fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
643
644  kX86CallR,            // call reg; lir operands - 0: reg
645  kX86CallM,            // call [base + disp]; lir operands - 0: base, 1: disp
646  kX86CallA,            // call [base + index * scale + disp]
647                        // lir operands - 0: base, 1: index, 2: scale, 3: disp
648  kX86CallT,            // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
649  kX86CallI,            // call <relative> - 0: disp; Used for core.oat linking only
650  kX86Ret,              // ret; no lir operands
651  kX86StartOfMethod,    // call 0; pop reg; sub reg, # - generate start of method into reg
652                        // lir operands - 0: reg
653  kX86PcRelLoadRA,      // mov reg, [base + index * scale + PC relative displacement]
654                        // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
655  kX86PcRelAdr,         // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
656  kX86RepneScasw,       // repne scasw
657  kX86Last
658};
659
660/* Instruction assembly field_loc kind */
661enum X86EncodingKind {
662  kData,                                    // Special case for raw data.
663  kNop,                                     // Special case for variable length nop.
664  kNullary,                                 // Opcode that takes no arguments.
665  kRegOpcode,                               // Shorter form of R instruction kind (opcode+rd)
666  kReg, kMem, kArray,                       // R, M and A instruction kinds.
667  kMemReg, kArrayReg, kThreadReg,           // MR, AR and TR instruction kinds.
668  kRegReg, kRegMem, kRegArray, kRegThread,  // RR, RM, RA and RT instruction kinds.
669  kRegRegStore,                             // RR following the store modrm reg-reg encoding rather than the load.
670  kRegImm, kMemImm, kArrayImm, kThreadImm,  // RI, MI, AI and TI instruction kinds.
671  kRegRegImm, kRegMemImm, kRegArrayImm,     // RRI, RMI and RAI instruction kinds.
672  kMovRegImm,                               // Shorter form move RI.
673  kMovRegQuadImm,                           // 64 bit move RI
674  kRegRegImmStore,                          // RRI following the store modrm reg-reg encoding rather than the load.
675  kMemRegImm,                               // MRI instruction kinds.
676  kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
677  kShiftRegCl, kShiftMemCl, kShiftArrayCl,     // Shift opcode with register CL.
678  // kRegRegReg, kRegRegMem, kRegRegArray,    // RRR, RRM, RRA instruction kinds.
679  kRegCond, kMemCond, kArrayCond,          // R, M, A instruction kinds following by a condition.
680  kRegRegCond,                             // RR instruction kind followed by a condition.
681  kRegMemCond,                             // RM instruction kind followed by a condition.
682  kJmp, kJcc, kCall,                       // Branch instruction kinds.
683  kPcRel,                                  // Operation with displacement that is PC relative
684  kMacro,                                  // An instruction composing multiple others
685  kUnimplemented                           // Encoding used when an instruction isn't yet implemented.
686};
687
688/* Struct used to define the EncodingMap positions for each X86 opcode */
689struct X86EncodingMap {
690  X86OpCode opcode;      // e.g. kOpAddRI
691  // The broad category the instruction conforms to, such as kRegReg. Identifies which LIR operands
692  // hold meaning for the opcode.
693  X86EncodingKind kind;
694  uint64_t flags;
695  struct {
696  uint8_t prefix1;       // Non-zero => a prefix byte.
697  uint8_t prefix2;       // Non-zero => a second prefix byte.
698  uint8_t opcode;        // 1 byte opcode.
699  uint8_t extra_opcode1;  // Possible extra opcode byte.
700  uint8_t extra_opcode2;  // Possible second extra opcode byte.
701  // 3-bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
702  // encoding kind.
703  uint8_t modrm_opcode;
704  uint8_t ax_opcode;  // Non-zero => shorter encoding for AX as a destination.
705  uint8_t immediate_bytes;  // Number of bytes of immediate.
706  // Does the instruction address a byte register? In 32-bit mode the registers ah, bh, ch and dh
707  // are not used. In 64-bit mode the REX prefix is used to normalize and allow any byte register
708  // to be addressed.
709  bool r8_form;
710  } skeleton;
711  const char *name;
712  const char* fmt;
713};
714
715
716// FIXME: mem barrier type - what do we do for x86?
717#define kSY 0
718#define kST 0
719
720// Offsets of high and low halves of a 64bit value.
721#define LOWORD_OFFSET 0
722#define HIWORD_OFFSET 4
723
724// Segment override instruction prefix used for quick TLS access to Thread::Current().
725#define THREAD_PREFIX 0x64
726#define THREAD_PREFIX_GS 0x65
727
728// 64 Bit Operand Size
729#define REX_W 0x48
730// Extension of the ModR/M reg field
731#define REX_R 0x44
732// Extension of the SIB index field
733#define REX_X 0x42
734// Extension of the ModR/M r/m field, SIB base field, or Opcode reg field
735#define REX_B 0x41
736// An empty REX prefix used to normalize the byte operations so that they apply to R4 through R15
737#define REX 0x40
738// Mask extracting the least 3 bits of r0..r15
739#define kRegNumMask32 0x07
740// Value indicating that base or reg is not used
741#define NO_REG 0
742
743#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
744#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
745#define IS_SIMM32(v) ((INT64_C(-2147483648) <= (v)) && ((v) <= INT64_C(2147483647)))
746
747extern X86EncodingMap EncodingMap[kX86Last];
748extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
749
750}  // namespace art
751
752#endif  // ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
753