X86InstrInfo.cpp revision eea4a9b1e6aef4b2a03b4faf0efc10c3f7a8d800
1//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the X86 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86InstrInfo.h"
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86MachineFunctionInfo.h"
18#include "X86Subtarget.h"
19#include "X86TargetMachine.h"
20#include "llvm/DerivedTypes.h"
21#include "llvm/LLVMContext.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/CodeGen/MachineConstantPool.h"
24#include "llvm/CodeGen/MachineDominators.h"
25#include "llvm/CodeGen/MachineFrameInfo.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/LiveVariables.h"
29#include "llvm/MC/MCAsmInfo.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetOptions.h"
36#include <limits>
37
38#define GET_INSTRINFO_CTOR
39#include "X86GenInstrInfo.inc"
40
41using namespace llvm;
42
43static cl::opt<bool>
44NoFusing("disable-spill-fusing",
45         cl::desc("Disable fusing of spill code into instructions"));
46static cl::opt<bool>
47PrintFailedFusing("print-failed-fuse-candidates",
48                  cl::desc("Print instructions that the allocator wants to"
49                           " fuse, but the X86 backend currently can't"),
50                  cl::Hidden);
51static cl::opt<bool>
52ReMatPICStubLoad("remat-pic-stub-load",
53                 cl::desc("Re-materialize load from stub in PIC mode"),
54                 cl::init(false), cl::Hidden);
55
56enum {
57  // Select which memory operand is being unfolded.
58  // (stored in bits 0 - 7)
59  TB_INDEX_0    = 0,
60  TB_INDEX_1    = 1,
61  TB_INDEX_2    = 2,
62  TB_INDEX_3    = 3,
63  TB_INDEX_MASK = 0xff,
64
65  // Minimum alignment required for load/store.
66  // Used for RegOp->MemOp conversion.
67  // (stored in bits 8 - 15)
68  TB_ALIGN_SHIFT = 8,
69  TB_ALIGN_NONE  =    0 << TB_ALIGN_SHIFT,
70  TB_ALIGN_16    =   16 << TB_ALIGN_SHIFT,
71  TB_ALIGN_32    =   32 << TB_ALIGN_SHIFT,
72  TB_ALIGN_MASK  = 0xff << TB_ALIGN_SHIFT,
73
74  // Do not insert the reverse map (MemOp -> RegOp) into the table.
75  // This may be needed because there is a many -> one mapping.
76  TB_NO_REVERSE   = 1 << 16,
77
78  // Do not insert the forward map (RegOp -> MemOp) into the table.
79  // This is needed for Native Client, which prohibits branch
80  // instructions from using a memory operand.
81  TB_NO_FORWARD   = 1 << 17,
82
83  TB_FOLDED_LOAD  = 1 << 18,
84  TB_FOLDED_STORE = 1 << 19
85};
86
87struct X86OpTblEntry {
88  uint16_t RegOp;
89  uint16_t MemOp;
90  uint32_t Flags;
91};
92
93X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
94  : X86GenInstrInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
95                     ? X86::ADJCALLSTACKDOWN64
96                     : X86::ADJCALLSTACKDOWN32),
97                    (tm.getSubtarget<X86Subtarget>().is64Bit()
98                     ? X86::ADJCALLSTACKUP64
99                     : X86::ADJCALLSTACKUP32)),
100    TM(tm), RI(tm, *this) {
101
102  static const X86OpTblEntry OpTbl2Addr[] = {
103    { X86::ADC32ri,     X86::ADC32mi,    0 },
104    { X86::ADC32ri8,    X86::ADC32mi8,   0 },
105    { X86::ADC32rr,     X86::ADC32mr,    0 },
106    { X86::ADC64ri32,   X86::ADC64mi32,  0 },
107    { X86::ADC64ri8,    X86::ADC64mi8,   0 },
108    { X86::ADC64rr,     X86::ADC64mr,    0 },
109    { X86::ADD16ri,     X86::ADD16mi,    0 },
110    { X86::ADD16ri8,    X86::ADD16mi8,   0 },
111    { X86::ADD16ri_DB,  X86::ADD16mi,    TB_NO_REVERSE },
112    { X86::ADD16ri8_DB, X86::ADD16mi8,   TB_NO_REVERSE },
113    { X86::ADD16rr,     X86::ADD16mr,    0 },
114    { X86::ADD16rr_DB,  X86::ADD16mr,    TB_NO_REVERSE },
115    { X86::ADD32ri,     X86::ADD32mi,    0 },
116    { X86::ADD32ri8,    X86::ADD32mi8,   0 },
117    { X86::ADD32ri_DB,  X86::ADD32mi,    TB_NO_REVERSE },
118    { X86::ADD32ri8_DB, X86::ADD32mi8,   TB_NO_REVERSE },
119    { X86::ADD32rr,     X86::ADD32mr,    0 },
120    { X86::ADD32rr_DB,  X86::ADD32mr,    TB_NO_REVERSE },
121    { X86::ADD64ri32,   X86::ADD64mi32,  0 },
122    { X86::ADD64ri8,    X86::ADD64mi8,   0 },
123    { X86::ADD64ri32_DB,X86::ADD64mi32,  TB_NO_REVERSE },
124    { X86::ADD64ri8_DB, X86::ADD64mi8,   TB_NO_REVERSE },
125    { X86::ADD64rr,     X86::ADD64mr,    0 },
126    { X86::ADD64rr_DB,  X86::ADD64mr,    TB_NO_REVERSE },
127    { X86::ADD8ri,      X86::ADD8mi,     0 },
128    { X86::ADD8rr,      X86::ADD8mr,     0 },
129    { X86::AND16ri,     X86::AND16mi,    0 },
130    { X86::AND16ri8,    X86::AND16mi8,   0 },
131    { X86::AND16rr,     X86::AND16mr,    0 },
132    { X86::AND32ri,     X86::AND32mi,    0 },
133    { X86::AND32ri8,    X86::AND32mi8,   0 },
134    { X86::AND32rr,     X86::AND32mr,    0 },
135    { X86::AND64ri32,   X86::AND64mi32,  0 },
136    { X86::AND64ri8,    X86::AND64mi8,   0 },
137    { X86::AND64rr,     X86::AND64mr,    0 },
138    { X86::AND8ri,      X86::AND8mi,     0 },
139    { X86::AND8rr,      X86::AND8mr,     0 },
140    { X86::DEC16r,      X86::DEC16m,     0 },
141    { X86::DEC32r,      X86::DEC32m,     0 },
142    { X86::DEC64_16r,   X86::DEC64_16m,  0 },
143    { X86::DEC64_32r,   X86::DEC64_32m,  0 },
144    { X86::DEC64r,      X86::DEC64m,     0 },
145    { X86::DEC8r,       X86::DEC8m,      0 },
146    { X86::INC16r,      X86::INC16m,     0 },
147    { X86::INC32r,      X86::INC32m,     0 },
148    { X86::INC64_16r,   X86::INC64_16m,  0 },
149    { X86::INC64_32r,   X86::INC64_32m,  0 },
150    { X86::INC64r,      X86::INC64m,     0 },
151    { X86::INC8r,       X86::INC8m,      0 },
152    { X86::NEG16r,      X86::NEG16m,     0 },
153    { X86::NEG32r,      X86::NEG32m,     0 },
154    { X86::NEG64r,      X86::NEG64m,     0 },
155    { X86::NEG8r,       X86::NEG8m,      0 },
156    { X86::NOT16r,      X86::NOT16m,     0 },
157    { X86::NOT32r,      X86::NOT32m,     0 },
158    { X86::NOT64r,      X86::NOT64m,     0 },
159    { X86::NOT8r,       X86::NOT8m,      0 },
160    { X86::OR16ri,      X86::OR16mi,     0 },
161    { X86::OR16ri8,     X86::OR16mi8,    0 },
162    { X86::OR16rr,      X86::OR16mr,     0 },
163    { X86::OR32ri,      X86::OR32mi,     0 },
164    { X86::OR32ri8,     X86::OR32mi8,    0 },
165    { X86::OR32rr,      X86::OR32mr,     0 },
166    { X86::OR64ri32,    X86::OR64mi32,   0 },
167    { X86::OR64ri8,     X86::OR64mi8,    0 },
168    { X86::OR64rr,      X86::OR64mr,     0 },
169    { X86::OR8ri,       X86::OR8mi,      0 },
170    { X86::OR8rr,       X86::OR8mr,      0 },
171    { X86::ROL16r1,     X86::ROL16m1,    0 },
172    { X86::ROL16rCL,    X86::ROL16mCL,   0 },
173    { X86::ROL16ri,     X86::ROL16mi,    0 },
174    { X86::ROL32r1,     X86::ROL32m1,    0 },
175    { X86::ROL32rCL,    X86::ROL32mCL,   0 },
176    { X86::ROL32ri,     X86::ROL32mi,    0 },
177    { X86::ROL64r1,     X86::ROL64m1,    0 },
178    { X86::ROL64rCL,    X86::ROL64mCL,   0 },
179    { X86::ROL64ri,     X86::ROL64mi,    0 },
180    { X86::ROL8r1,      X86::ROL8m1,     0 },
181    { X86::ROL8rCL,     X86::ROL8mCL,    0 },
182    { X86::ROL8ri,      X86::ROL8mi,     0 },
183    { X86::ROR16r1,     X86::ROR16m1,    0 },
184    { X86::ROR16rCL,    X86::ROR16mCL,   0 },
185    { X86::ROR16ri,     X86::ROR16mi,    0 },
186    { X86::ROR32r1,     X86::ROR32m1,    0 },
187    { X86::ROR32rCL,    X86::ROR32mCL,   0 },
188    { X86::ROR32ri,     X86::ROR32mi,    0 },
189    { X86::ROR64r1,     X86::ROR64m1,    0 },
190    { X86::ROR64rCL,    X86::ROR64mCL,   0 },
191    { X86::ROR64ri,     X86::ROR64mi,    0 },
192    { X86::ROR8r1,      X86::ROR8m1,     0 },
193    { X86::ROR8rCL,     X86::ROR8mCL,    0 },
194    { X86::ROR8ri,      X86::ROR8mi,     0 },
195    { X86::SAR16r1,     X86::SAR16m1,    0 },
196    { X86::SAR16rCL,    X86::SAR16mCL,   0 },
197    { X86::SAR16ri,     X86::SAR16mi,    0 },
198    { X86::SAR32r1,     X86::SAR32m1,    0 },
199    { X86::SAR32rCL,    X86::SAR32mCL,   0 },
200    { X86::SAR32ri,     X86::SAR32mi,    0 },
201    { X86::SAR64r1,     X86::SAR64m1,    0 },
202    { X86::SAR64rCL,    X86::SAR64mCL,   0 },
203    { X86::SAR64ri,     X86::SAR64mi,    0 },
204    { X86::SAR8r1,      X86::SAR8m1,     0 },
205    { X86::SAR8rCL,     X86::SAR8mCL,    0 },
206    { X86::SAR8ri,      X86::SAR8mi,     0 },
207    { X86::SBB32ri,     X86::SBB32mi,    0 },
208    { X86::SBB32ri8,    X86::SBB32mi8,   0 },
209    { X86::SBB32rr,     X86::SBB32mr,    0 },
210    { X86::SBB64ri32,   X86::SBB64mi32,  0 },
211    { X86::SBB64ri8,    X86::SBB64mi8,   0 },
212    { X86::SBB64rr,     X86::SBB64mr,    0 },
213    { X86::SHL16rCL,    X86::SHL16mCL,   0 },
214    { X86::SHL16ri,     X86::SHL16mi,    0 },
215    { X86::SHL32rCL,    X86::SHL32mCL,   0 },
216    { X86::SHL32ri,     X86::SHL32mi,    0 },
217    { X86::SHL64rCL,    X86::SHL64mCL,   0 },
218    { X86::SHL64ri,     X86::SHL64mi,    0 },
219    { X86::SHL8rCL,     X86::SHL8mCL,    0 },
220    { X86::SHL8ri,      X86::SHL8mi,     0 },
221    { X86::SHLD16rrCL,  X86::SHLD16mrCL, 0 },
222    { X86::SHLD16rri8,  X86::SHLD16mri8, 0 },
223    { X86::SHLD32rrCL,  X86::SHLD32mrCL, 0 },
224    { X86::SHLD32rri8,  X86::SHLD32mri8, 0 },
225    { X86::SHLD64rrCL,  X86::SHLD64mrCL, 0 },
226    { X86::SHLD64rri8,  X86::SHLD64mri8, 0 },
227    { X86::SHR16r1,     X86::SHR16m1,    0 },
228    { X86::SHR16rCL,    X86::SHR16mCL,   0 },
229    { X86::SHR16ri,     X86::SHR16mi,    0 },
230    { X86::SHR32r1,     X86::SHR32m1,    0 },
231    { X86::SHR32rCL,    X86::SHR32mCL,   0 },
232    { X86::SHR32ri,     X86::SHR32mi,    0 },
233    { X86::SHR64r1,     X86::SHR64m1,    0 },
234    { X86::SHR64rCL,    X86::SHR64mCL,   0 },
235    { X86::SHR64ri,     X86::SHR64mi,    0 },
236    { X86::SHR8r1,      X86::SHR8m1,     0 },
237    { X86::SHR8rCL,     X86::SHR8mCL,    0 },
238    { X86::SHR8ri,      X86::SHR8mi,     0 },
239    { X86::SHRD16rrCL,  X86::SHRD16mrCL, 0 },
240    { X86::SHRD16rri8,  X86::SHRD16mri8, 0 },
241    { X86::SHRD32rrCL,  X86::SHRD32mrCL, 0 },
242    { X86::SHRD32rri8,  X86::SHRD32mri8, 0 },
243    { X86::SHRD64rrCL,  X86::SHRD64mrCL, 0 },
244    { X86::SHRD64rri8,  X86::SHRD64mri8, 0 },
245    { X86::SUB16ri,     X86::SUB16mi,    0 },
246    { X86::SUB16ri8,    X86::SUB16mi8,   0 },
247    { X86::SUB16rr,     X86::SUB16mr,    0 },
248    { X86::SUB32ri,     X86::SUB32mi,    0 },
249    { X86::SUB32ri8,    X86::SUB32mi8,   0 },
250    { X86::SUB32rr,     X86::SUB32mr,    0 },
251    { X86::SUB64ri32,   X86::SUB64mi32,  0 },
252    { X86::SUB64ri8,    X86::SUB64mi8,   0 },
253    { X86::SUB64rr,     X86::SUB64mr,    0 },
254    { X86::SUB8ri,      X86::SUB8mi,     0 },
255    { X86::SUB8rr,      X86::SUB8mr,     0 },
256    { X86::XOR16ri,     X86::XOR16mi,    0 },
257    { X86::XOR16ri8,    X86::XOR16mi8,   0 },
258    { X86::XOR16rr,     X86::XOR16mr,    0 },
259    { X86::XOR32ri,     X86::XOR32mi,    0 },
260    { X86::XOR32ri8,    X86::XOR32mi8,   0 },
261    { X86::XOR32rr,     X86::XOR32mr,    0 },
262    { X86::XOR64ri32,   X86::XOR64mi32,  0 },
263    { X86::XOR64ri8,    X86::XOR64mi8,   0 },
264    { X86::XOR64rr,     X86::XOR64mr,    0 },
265    { X86::XOR8ri,      X86::XOR8mi,     0 },
266    { X86::XOR8rr,      X86::XOR8mr,     0 }
267  };
268
269  for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
270    unsigned RegOp = OpTbl2Addr[i].RegOp;
271    unsigned MemOp = OpTbl2Addr[i].MemOp;
272    unsigned Flags = OpTbl2Addr[i].Flags;
273    AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable,
274                  RegOp, MemOp,
275                  // Index 0, folded load and store, no alignment requirement.
276                  Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE);
277  }
278
279  static const X86OpTblEntry OpTbl0[] = {
280    { X86::BT16ri8,     X86::BT16mi8,       TB_FOLDED_LOAD },
281    { X86::BT32ri8,     X86::BT32mi8,       TB_FOLDED_LOAD },
282    { X86::BT64ri8,     X86::BT64mi8,       TB_FOLDED_LOAD },
283    { X86::CALL32r,     X86::CALL32m,       TB_FOLDED_LOAD },
284    { X86::CALL64r,     X86::CALL64m,       TB_FOLDED_LOAD },
285    { X86::CMP16ri,     X86::CMP16mi,       TB_FOLDED_LOAD },
286    { X86::CMP16ri8,    X86::CMP16mi8,      TB_FOLDED_LOAD },
287    { X86::CMP16rr,     X86::CMP16mr,       TB_FOLDED_LOAD },
288    { X86::CMP32ri,     X86::CMP32mi,       TB_FOLDED_LOAD },
289    { X86::CMP32ri8,    X86::CMP32mi8,      TB_FOLDED_LOAD },
290    { X86::CMP32rr,     X86::CMP32mr,       TB_FOLDED_LOAD },
291    { X86::CMP64ri32,   X86::CMP64mi32,     TB_FOLDED_LOAD },
292    { X86::CMP64ri8,    X86::CMP64mi8,      TB_FOLDED_LOAD },
293    { X86::CMP64rr,     X86::CMP64mr,       TB_FOLDED_LOAD },
294    { X86::CMP8ri,      X86::CMP8mi,        TB_FOLDED_LOAD },
295    { X86::CMP8rr,      X86::CMP8mr,        TB_FOLDED_LOAD },
296    { X86::DIV16r,      X86::DIV16m,        TB_FOLDED_LOAD },
297    { X86::DIV32r,      X86::DIV32m,        TB_FOLDED_LOAD },
298    { X86::DIV64r,      X86::DIV64m,        TB_FOLDED_LOAD },
299    { X86::DIV8r,       X86::DIV8m,         TB_FOLDED_LOAD },
300    { X86::EXTRACTPSrr, X86::EXTRACTPSmr,   TB_FOLDED_STORE | TB_ALIGN_16 },
301    { X86::FsMOVAPDrr,  X86::MOVSDmr,       TB_FOLDED_STORE | TB_NO_REVERSE },
302    { X86::FsMOVAPSrr,  X86::MOVSSmr,       TB_FOLDED_STORE | TB_NO_REVERSE },
303    { X86::IDIV16r,     X86::IDIV16m,       TB_FOLDED_LOAD },
304    { X86::IDIV32r,     X86::IDIV32m,       TB_FOLDED_LOAD },
305    { X86::IDIV64r,     X86::IDIV64m,       TB_FOLDED_LOAD },
306    { X86::IDIV8r,      X86::IDIV8m,        TB_FOLDED_LOAD },
307    { X86::IMUL16r,     X86::IMUL16m,       TB_FOLDED_LOAD },
308    { X86::IMUL32r,     X86::IMUL32m,       TB_FOLDED_LOAD },
309    { X86::IMUL64r,     X86::IMUL64m,       TB_FOLDED_LOAD },
310    { X86::IMUL8r,      X86::IMUL8m,        TB_FOLDED_LOAD },
311    { X86::JMP32r,      X86::JMP32m,        TB_FOLDED_LOAD },
312    { X86::JMP64r,      X86::JMP64m,        TB_FOLDED_LOAD },
313    { X86::MOV16ri,     X86::MOV16mi,       TB_FOLDED_STORE },
314    { X86::MOV16rr,     X86::MOV16mr,       TB_FOLDED_STORE },
315    { X86::MOV32ri,     X86::MOV32mi,       TB_FOLDED_STORE },
316    { X86::MOV32rr,     X86::MOV32mr,       TB_FOLDED_STORE },
317    { X86::MOV64ri32,   X86::MOV64mi32,     TB_FOLDED_STORE },
318    { X86::MOV64rr,     X86::MOV64mr,       TB_FOLDED_STORE },
319    { X86::MOV8ri,      X86::MOV8mi,        TB_FOLDED_STORE },
320    { X86::MOV8rr,      X86::MOV8mr,        TB_FOLDED_STORE },
321    { X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, TB_FOLDED_STORE },
322    { X86::MOVAPDrr,    X86::MOVAPDmr,      TB_FOLDED_STORE | TB_ALIGN_16 },
323    { X86::MOVAPSrr,    X86::MOVAPSmr,      TB_FOLDED_STORE | TB_ALIGN_16 },
324    { X86::MOVDQArr,    X86::MOVDQAmr,      TB_FOLDED_STORE | TB_ALIGN_16 },
325    { X86::MOVPDI2DIrr, X86::MOVPDI2DImr,   TB_FOLDED_STORE },
326    { X86::MOVPQIto64rr,X86::MOVPQI2QImr,   TB_FOLDED_STORE },
327    { X86::MOVSDto64rr, X86::MOVSDto64mr,   TB_FOLDED_STORE },
328    { X86::MOVSS2DIrr,  X86::MOVSS2DImr,    TB_FOLDED_STORE },
329    { X86::MOVUPDrr,    X86::MOVUPDmr,      TB_FOLDED_STORE },
330    { X86::MOVUPSrr,    X86::MOVUPSmr,      TB_FOLDED_STORE },
331    { X86::MUL16r,      X86::MUL16m,        TB_FOLDED_LOAD },
332    { X86::MUL32r,      X86::MUL32m,        TB_FOLDED_LOAD },
333    { X86::MUL64r,      X86::MUL64m,        TB_FOLDED_LOAD },
334    { X86::MUL8r,       X86::MUL8m,         TB_FOLDED_LOAD },
335    { X86::SETAEr,      X86::SETAEm,        TB_FOLDED_STORE },
336    { X86::SETAr,       X86::SETAm,         TB_FOLDED_STORE },
337    { X86::SETBEr,      X86::SETBEm,        TB_FOLDED_STORE },
338    { X86::SETBr,       X86::SETBm,         TB_FOLDED_STORE },
339    { X86::SETEr,       X86::SETEm,         TB_FOLDED_STORE },
340    { X86::SETGEr,      X86::SETGEm,        TB_FOLDED_STORE },
341    { X86::SETGr,       X86::SETGm,         TB_FOLDED_STORE },
342    { X86::SETLEr,      X86::SETLEm,        TB_FOLDED_STORE },
343    { X86::SETLr,       X86::SETLm,         TB_FOLDED_STORE },
344    { X86::SETNEr,      X86::SETNEm,        TB_FOLDED_STORE },
345    { X86::SETNOr,      X86::SETNOm,        TB_FOLDED_STORE },
346    { X86::SETNPr,      X86::SETNPm,        TB_FOLDED_STORE },
347    { X86::SETNSr,      X86::SETNSm,        TB_FOLDED_STORE },
348    { X86::SETOr,       X86::SETOm,         TB_FOLDED_STORE },
349    { X86::SETPr,       X86::SETPm,         TB_FOLDED_STORE },
350    { X86::SETSr,       X86::SETSm,         TB_FOLDED_STORE },
351    { X86::TAILJMPr,    X86::TAILJMPm,      TB_FOLDED_LOAD },
352    { X86::TAILJMPr64,  X86::TAILJMPm64,    TB_FOLDED_LOAD },
353    { X86::TEST16ri,    X86::TEST16mi,      TB_FOLDED_LOAD },
354    { X86::TEST32ri,    X86::TEST32mi,      TB_FOLDED_LOAD },
355    { X86::TEST64ri32,  X86::TEST64mi32,    TB_FOLDED_LOAD },
356    { X86::TEST8ri,     X86::TEST8mi,       TB_FOLDED_LOAD },
357    // AVX 128-bit versions of foldable instructions
358    { X86::VEXTRACTPSrr,X86::VEXTRACTPSmr,  TB_FOLDED_STORE | TB_ALIGN_16 },
359    { X86::FsVMOVAPDrr, X86::VMOVSDmr,      TB_FOLDED_STORE | TB_NO_REVERSE },
360    { X86::FsVMOVAPSrr, X86::VMOVSSmr,      TB_FOLDED_STORE | TB_NO_REVERSE },
361    { X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
362    { X86::VMOVAPDrr,   X86::VMOVAPDmr,     TB_FOLDED_STORE | TB_ALIGN_16 },
363    { X86::VMOVAPSrr,   X86::VMOVAPSmr,     TB_FOLDED_STORE | TB_ALIGN_16 },
364    { X86::VMOVDQArr,   X86::VMOVDQAmr,     TB_FOLDED_STORE | TB_ALIGN_16 },
365    { X86::VMOVPDI2DIrr,X86::VMOVPDI2DImr,  TB_FOLDED_STORE },
366    { X86::VMOVPQIto64rr, X86::VMOVPQI2QImr,TB_FOLDED_STORE },
367    { X86::VMOVSDto64rr,X86::VMOVSDto64mr,  TB_FOLDED_STORE },
368    { X86::VMOVSS2DIrr, X86::VMOVSS2DImr,   TB_FOLDED_STORE },
369    { X86::VMOVUPDrr,   X86::VMOVUPDmr,     TB_FOLDED_STORE },
370    { X86::VMOVUPSrr,   X86::VMOVUPSmr,     TB_FOLDED_STORE },
371    // AVX 256-bit foldable instructions
372    { X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
373    { X86::VMOVAPDYrr,  X86::VMOVAPDYmr,    TB_FOLDED_STORE | TB_ALIGN_32 },
374    { X86::VMOVAPSYrr,  X86::VMOVAPSYmr,    TB_FOLDED_STORE | TB_ALIGN_32 },
375    { X86::VMOVDQAYrr,  X86::VMOVDQAYmr,    TB_FOLDED_STORE | TB_ALIGN_32 },
376    { X86::VMOVUPDYrr,  X86::VMOVUPDYmr,    TB_FOLDED_STORE },
377    { X86::VMOVUPSYrr,  X86::VMOVUPSYmr,    TB_FOLDED_STORE }
378  };
379
380  for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
381    unsigned RegOp      = OpTbl0[i].RegOp;
382    unsigned MemOp      = OpTbl0[i].MemOp;
383    unsigned Flags      = OpTbl0[i].Flags;
384    AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable,
385                  RegOp, MemOp, TB_INDEX_0 | Flags);
386  }
387
388  static const X86OpTblEntry OpTbl1[] = {
389    { X86::CMP16rr,         X86::CMP16rm,             0 },
390    { X86::CMP32rr,         X86::CMP32rm,             0 },
391    { X86::CMP64rr,         X86::CMP64rm,             0 },
392    { X86::CMP8rr,          X86::CMP8rm,              0 },
393    { X86::CVTSD2SSrr,      X86::CVTSD2SSrm,          0 },
394    { X86::CVTSI2SD64rr,    X86::CVTSI2SD64rm,        0 },
395    { X86::CVTSI2SDrr,      X86::CVTSI2SDrm,          0 },
396    { X86::CVTSI2SS64rr,    X86::CVTSI2SS64rm,        0 },
397    { X86::CVTSI2SSrr,      X86::CVTSI2SSrm,          0 },
398    { X86::CVTSS2SDrr,      X86::CVTSS2SDrm,          0 },
399    { X86::CVTTSD2SI64rr,   X86::CVTTSD2SI64rm,       0 },
400    { X86::CVTTSD2SIrr,     X86::CVTTSD2SIrm,         0 },
401    { X86::CVTTSS2SI64rr,   X86::CVTTSS2SI64rm,       0 },
402    { X86::CVTTSS2SIrr,     X86::CVTTSS2SIrm,         0 },
403    { X86::FsMOVAPDrr,      X86::MOVSDrm,             TB_NO_REVERSE },
404    { X86::FsMOVAPSrr,      X86::MOVSSrm,             TB_NO_REVERSE },
405    { X86::IMUL16rri,       X86::IMUL16rmi,           0 },
406    { X86::IMUL16rri8,      X86::IMUL16rmi8,          0 },
407    { X86::IMUL32rri,       X86::IMUL32rmi,           0 },
408    { X86::IMUL32rri8,      X86::IMUL32rmi8,          0 },
409    { X86::IMUL64rri32,     X86::IMUL64rmi32,         0 },
410    { X86::IMUL64rri8,      X86::IMUL64rmi8,          0 },
411    { X86::Int_COMISDrr,    X86::Int_COMISDrm,        0 },
412    { X86::Int_COMISSrr,    X86::Int_COMISSrm,        0 },
413    { X86::Int_CVTDQ2PSrr,  X86::Int_CVTDQ2PSrm,      TB_ALIGN_16 },
414    { X86::Int_CVTPD2DQrr,  X86::Int_CVTPD2DQrm,      TB_ALIGN_16 },
415    { X86::Int_CVTPD2PSrr,  X86::Int_CVTPD2PSrm,      TB_ALIGN_16 },
416    { X86::Int_CVTPS2DQrr,  X86::Int_CVTPS2DQrm,      TB_ALIGN_16 },
417    { X86::Int_CVTPS2PDrr,  X86::Int_CVTPS2PDrm,      0 },
418    { X86::CVTSD2SI64rr,    X86::CVTSD2SI64rm,        0 },
419    { X86::CVTSD2SIrr,      X86::CVTSD2SIrm,          0 },
420    { X86::CVTSS2SI64rr,    X86::CVTSS2SI64rm,        0 },
421    { X86::CVTSS2SIrr,      X86::CVTSS2SIrm,          0 },
422    { X86::Int_CVTSD2SSrr,  X86::Int_CVTSD2SSrm,      0 },
423    { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm,    0 },
424    { X86::Int_CVTSI2SDrr,  X86::Int_CVTSI2SDrm,      0 },
425    { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm,    0 },
426    { X86::Int_CVTSI2SSrr,  X86::Int_CVTSI2SSrm,      0 },
427    { X86::Int_CVTSS2SDrr,  X86::Int_CVTSS2SDrm,      0 },
428    { X86::CVTTPD2DQrr,     X86::CVTTPD2DQrm,         TB_ALIGN_16 },
429    { X86::CVTTPS2DQrr,     X86::CVTTPS2DQrm,         TB_ALIGN_16 },
430    { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm,  0 },
431    { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm,     0 },
432    { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm,  0 },
433    { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm,     0 },
434    { X86::Int_UCOMISDrr,   X86::Int_UCOMISDrm,       0 },
435    { X86::Int_UCOMISSrr,   X86::Int_UCOMISSrm,       0 },
436    { X86::MOV16rr,         X86::MOV16rm,             0 },
437    { X86::MOV32rr,         X86::MOV32rm,             0 },
438    { X86::MOV64rr,         X86::MOV64rm,             0 },
439    { X86::MOV64toPQIrr,    X86::MOVQI2PQIrm,         0 },
440    { X86::MOV64toSDrr,     X86::MOV64toSDrm,         0 },
441    { X86::MOV8rr,          X86::MOV8rm,              0 },
442    { X86::MOVAPDrr,        X86::MOVAPDrm,            TB_ALIGN_16 },
443    { X86::MOVAPSrr,        X86::MOVAPSrm,            TB_ALIGN_16 },
444    { X86::MOVDDUPrr,       X86::MOVDDUPrm,           0 },
445    { X86::MOVDI2PDIrr,     X86::MOVDI2PDIrm,         0 },
446    { X86::MOVDI2SSrr,      X86::MOVDI2SSrm,          0 },
447    { X86::MOVDQArr,        X86::MOVDQArm,            TB_ALIGN_16 },
448    { X86::MOVSHDUPrr,      X86::MOVSHDUPrm,          TB_ALIGN_16 },
449    { X86::MOVSLDUPrr,      X86::MOVSLDUPrm,          TB_ALIGN_16 },
450    { X86::MOVSX16rr8,      X86::MOVSX16rm8,          0 },
451    { X86::MOVSX32rr16,     X86::MOVSX32rm16,         0 },
452    { X86::MOVSX32rr8,      X86::MOVSX32rm8,          0 },
453    { X86::MOVSX64rr16,     X86::MOVSX64rm16,         0 },
454    { X86::MOVSX64rr32,     X86::MOVSX64rm32,         0 },
455    { X86::MOVSX64rr8,      X86::MOVSX64rm8,          0 },
456    { X86::MOVUPDrr,        X86::MOVUPDrm,            TB_ALIGN_16 },
457    { X86::MOVUPSrr,        X86::MOVUPSrm,            0 },
458    { X86::MOVZDI2PDIrr,    X86::MOVZDI2PDIrm,        0 },
459    { X86::MOVZQI2PQIrr,    X86::MOVZQI2PQIrm,        0 },
460    { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm,     TB_ALIGN_16 },
461    { X86::MOVZX16rr8,      X86::MOVZX16rm8,          0 },
462    { X86::MOVZX32rr16,     X86::MOVZX32rm16,         0 },
463    { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8,   0 },
464    { X86::MOVZX32rr8,      X86::MOVZX32rm8,          0 },
465    { X86::MOVZX64rr16,     X86::MOVZX64rm16,         0 },
466    { X86::MOVZX64rr32,     X86::MOVZX64rm32,         0 },
467    { X86::MOVZX64rr8,      X86::MOVZX64rm8,          0 },
468    { X86::PABSBrr128,      X86::PABSBrm128,          TB_ALIGN_16 },
469    { X86::PABSDrr128,      X86::PABSDrm128,          TB_ALIGN_16 },
470    { X86::PABSWrr128,      X86::PABSWrm128,          TB_ALIGN_16 },
471    { X86::PSHUFDri,        X86::PSHUFDmi,            TB_ALIGN_16 },
472    { X86::PSHUFHWri,       X86::PSHUFHWmi,           TB_ALIGN_16 },
473    { X86::PSHUFLWri,       X86::PSHUFLWmi,           TB_ALIGN_16 },
474    { X86::RCPPSr,          X86::RCPPSm,              TB_ALIGN_16 },
475    { X86::RCPPSr_Int,      X86::RCPPSm_Int,          TB_ALIGN_16 },
476    { X86::RSQRTPSr,        X86::RSQRTPSm,            TB_ALIGN_16 },
477    { X86::RSQRTPSr_Int,    X86::RSQRTPSm_Int,        TB_ALIGN_16 },
478    { X86::RSQRTSSr,        X86::RSQRTSSm,            0 },
479    { X86::RSQRTSSr_Int,    X86::RSQRTSSm_Int,        0 },
480    { X86::SQRTPDr,         X86::SQRTPDm,             TB_ALIGN_16 },
481    { X86::SQRTPDr_Int,     X86::SQRTPDm_Int,         TB_ALIGN_16 },
482    { X86::SQRTPSr,         X86::SQRTPSm,             TB_ALIGN_16 },
483    { X86::SQRTPSr_Int,     X86::SQRTPSm_Int,         TB_ALIGN_16 },
484    { X86::SQRTSDr,         X86::SQRTSDm,             0 },
485    { X86::SQRTSDr_Int,     X86::SQRTSDm_Int,         0 },
486    { X86::SQRTSSr,         X86::SQRTSSm,             0 },
487    { X86::SQRTSSr_Int,     X86::SQRTSSm_Int,         0 },
488    { X86::TEST16rr,        X86::TEST16rm,            0 },
489    { X86::TEST32rr,        X86::TEST32rm,            0 },
490    { X86::TEST64rr,        X86::TEST64rm,            0 },
491    { X86::TEST8rr,         X86::TEST8rm,             0 },
492    // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0
493    { X86::UCOMISDrr,       X86::UCOMISDrm,           0 },
494    { X86::UCOMISSrr,       X86::UCOMISSrm,           0 },
495    // AVX 128-bit versions of foldable instructions
496    { X86::Int_VCOMISDrr,   X86::Int_VCOMISDrm,       0 },
497    { X86::Int_VCOMISSrr,   X86::Int_VCOMISSrm,       0 },
498    { X86::Int_VCVTDQ2PSrr, X86::Int_VCVTDQ2PSrm,     TB_ALIGN_16 },
499    { X86::Int_VCVTPD2DQrr, X86::Int_VCVTPD2DQrm,     TB_ALIGN_16 },
500    { X86::Int_VCVTPD2PSrr, X86::Int_VCVTPD2PSrm,     TB_ALIGN_16 },
501    { X86::Int_VCVTPS2DQrr, X86::Int_VCVTPS2DQrm,     TB_ALIGN_16 },
502    { X86::Int_VCVTPS2PDrr, X86::Int_VCVTPS2PDrm,     0 },
503    { X86::Int_VUCOMISDrr,  X86::Int_VUCOMISDrm,      0 },
504    { X86::Int_VUCOMISSrr,  X86::Int_VUCOMISSrm,      0 },
505    { X86::VCVTTSD2SI64rr,  X86::VCVTTSD2SI64rm,      0 },
506    { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm,0 },
507    { X86::VCVTTSD2SIrr,    X86::VCVTTSD2SIrm,        0 },
508    { X86::Int_VCVTTSD2SIrr,X86::Int_VCVTTSD2SIrm,    0 },
509    { X86::VCVTTSS2SI64rr,  X86::VCVTTSS2SI64rm,      0 },
510    { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm,0 },
511    { X86::VCVTTSS2SIrr,    X86::VCVTTSS2SIrm,        0 },
512    { X86::Int_VCVTTSS2SIrr,X86::Int_VCVTTSS2SIrm,    0 },
513    { X86::VCVTSD2SI64rr,   X86::VCVTSD2SI64rm,       0 },
514    { X86::VCVTSD2SIrr,     X86::VCVTSD2SIrm,         0 },
515    { X86::VCVTSS2SI64rr,   X86::VCVTSS2SI64rm,       0 },
516    { X86::VCVTSS2SIrr,     X86::VCVTSS2SIrm,         0 },
517    { X86::FsVMOVAPDrr,     X86::VMOVSDrm,            TB_NO_REVERSE },
518    { X86::FsVMOVAPSrr,     X86::VMOVSSrm,            TB_NO_REVERSE },
519    { X86::VMOV64toPQIrr,   X86::VMOVQI2PQIrm,        0 },
520    { X86::VMOV64toSDrr,    X86::VMOV64toSDrm,        0 },
521    { X86::VMOVAPDrr,       X86::VMOVAPDrm,           TB_ALIGN_16 },
522    { X86::VMOVAPSrr,       X86::VMOVAPSrm,           TB_ALIGN_16 },
523    { X86::VMOVDDUPrr,      X86::VMOVDDUPrm,          0 },
524    { X86::VMOVDI2PDIrr,    X86::VMOVDI2PDIrm,        0 },
525    { X86::VMOVDI2SSrr,     X86::VMOVDI2SSrm,         0 },
526    { X86::VMOVDQArr,       X86::VMOVDQArm,           TB_ALIGN_16 },
527    { X86::VMOVSLDUPrr,     X86::VMOVSLDUPrm,         TB_ALIGN_16 },
528    { X86::VMOVSHDUPrr,     X86::VMOVSHDUPrm,         TB_ALIGN_16 },
529    { X86::VMOVUPDrr,       X86::VMOVUPDrm,           TB_ALIGN_16 },
530    { X86::VMOVUPSrr,       X86::VMOVUPSrm,           0 },
531    { X86::VMOVZDI2PDIrr,   X86::VMOVZDI2PDIrm,       0 },
532    { X86::VMOVZQI2PQIrr,   X86::VMOVZQI2PQIrm,       0 },
533    { X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm,    TB_ALIGN_16 },
534    { X86::VPABSBrr128,     X86::VPABSBrm128,         TB_ALIGN_16 },
535    { X86::VPABSDrr128,     X86::VPABSDrm128,         TB_ALIGN_16 },
536    { X86::VPABSWrr128,     X86::VPABSWrm128,         TB_ALIGN_16 },
537    { X86::VPERMILPDri,     X86::VPERMILPDmi,         TB_ALIGN_16 },
538    { X86::VPERMILPSri,     X86::VPERMILPSmi,         TB_ALIGN_16 },
539    { X86::VPSHUFDri,       X86::VPSHUFDmi,           TB_ALIGN_16 },
540    { X86::VPSHUFHWri,      X86::VPSHUFHWmi,          TB_ALIGN_16 },
541    { X86::VPSHUFLWri,      X86::VPSHUFLWmi,          TB_ALIGN_16 },
542    { X86::VRCPPSr,         X86::VRCPPSm,             TB_ALIGN_16 },
543    { X86::VRCPPSr_Int,     X86::VRCPPSm_Int,         TB_ALIGN_16 },
544    { X86::VRSQRTPSr,       X86::VRSQRTPSm,           TB_ALIGN_16 },
545    { X86::VRSQRTPSr_Int,   X86::VRSQRTPSm_Int,       TB_ALIGN_16 },
546    { X86::VSQRTPDr,        X86::VSQRTPDm,            TB_ALIGN_16 },
547    { X86::VSQRTPDr_Int,    X86::VSQRTPDm_Int,        TB_ALIGN_16 },
548    { X86::VSQRTPSr,        X86::VSQRTPSm,            TB_ALIGN_16 },
549    { X86::VSQRTPSr_Int,    X86::VSQRTPSm_Int,        TB_ALIGN_16 },
550    { X86::VUCOMISDrr,      X86::VUCOMISDrm,          0 },
551    { X86::VUCOMISSrr,      X86::VUCOMISSrm,          0 },
552    // AVX 256-bit foldable instructions
553    { X86::VMOVAPDYrr,      X86::VMOVAPDYrm,          TB_ALIGN_32 },
554    { X86::VMOVAPSYrr,      X86::VMOVAPSYrm,          TB_ALIGN_32 },
555    { X86::VMOVDQAYrr,      X86::VMOVDQAYrm,          TB_ALIGN_32 },
556    { X86::VMOVUPDYrr,      X86::VMOVUPDYrm,          0 },
557    { X86::VMOVUPSYrr,      X86::VMOVUPSYrm,          0 },
558    { X86::VPERMILPDYri,    X86::VPERMILPDYmi,        TB_ALIGN_32 },
559    { X86::VPERMILPSYri,    X86::VPERMILPSYmi,        TB_ALIGN_32 },
560    // AVX2 foldable instructions
561    { X86::VPABSBrr256,     X86::VPABSBrm256,         TB_ALIGN_32 },
562    { X86::VPABSDrr256,     X86::VPABSDrm256,         TB_ALIGN_32 },
563    { X86::VPABSWrr256,     X86::VPABSWrm256,         TB_ALIGN_32 },
564    { X86::VPSHUFDYri,      X86::VPSHUFDYmi,          TB_ALIGN_32 },
565    { X86::VPSHUFHWYri,     X86::VPSHUFHWYmi,         TB_ALIGN_32 },
566    { X86::VPSHUFLWYri,     X86::VPSHUFLWYmi,         TB_ALIGN_32 },
567    { X86::VRCPPSYr,        X86::VRCPPSYm,            TB_ALIGN_32 },
568    { X86::VRCPPSYr_Int,    X86::VRCPPSYm_Int,        TB_ALIGN_32 },
569    { X86::VRSQRTPSYr,      X86::VRSQRTPSYm,          TB_ALIGN_32 },
570    { X86::VRSQRTPSYr_Int,  X86::VRSQRTPSYm_Int,      TB_ALIGN_32 },
571    { X86::VSQRTPDYr,       X86::VSQRTPDYm,           TB_ALIGN_32 },
572    { X86::VSQRTPDYr_Int,   X86::VSQRTPDYm_Int,       TB_ALIGN_32 },
573    { X86::VSQRTPSYr,       X86::VSQRTPSYm,           TB_ALIGN_32 },
574    { X86::VSQRTPSYr_Int,   X86::VSQRTPSYm_Int,       TB_ALIGN_32 },
575  };
576
577  for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
578    unsigned RegOp = OpTbl1[i].RegOp;
579    unsigned MemOp = OpTbl1[i].MemOp;
580    unsigned Flags = OpTbl1[i].Flags;
581    AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable,
582                  RegOp, MemOp,
583                  // Index 1, folded load
584                  Flags | TB_INDEX_1 | TB_FOLDED_LOAD);
585  }
586
587  static const X86OpTblEntry OpTbl2[] = {
588    { X86::ADC32rr,         X86::ADC32rm,       0 },
589    { X86::ADC64rr,         X86::ADC64rm,       0 },
590    { X86::ADD16rr,         X86::ADD16rm,       0 },
591    { X86::ADD16rr_DB,      X86::ADD16rm,       TB_NO_REVERSE },
592    { X86::ADD32rr,         X86::ADD32rm,       0 },
593    { X86::ADD32rr_DB,      X86::ADD32rm,       TB_NO_REVERSE },
594    { X86::ADD64rr,         X86::ADD64rm,       0 },
595    { X86::ADD64rr_DB,      X86::ADD64rm,       TB_NO_REVERSE },
596    { X86::ADD8rr,          X86::ADD8rm,        0 },
597    { X86::ADDPDrr,         X86::ADDPDrm,       TB_ALIGN_16 },
598    { X86::ADDPSrr,         X86::ADDPSrm,       TB_ALIGN_16 },
599    { X86::ADDSDrr,         X86::ADDSDrm,       0 },
600    { X86::ADDSSrr,         X86::ADDSSrm,       0 },
601    { X86::ADDSUBPDrr,      X86::ADDSUBPDrm,    TB_ALIGN_16 },
602    { X86::ADDSUBPSrr,      X86::ADDSUBPSrm,    TB_ALIGN_16 },
603    { X86::AND16rr,         X86::AND16rm,       0 },
604    { X86::AND32rr,         X86::AND32rm,       0 },
605    { X86::AND64rr,         X86::AND64rm,       0 },
606    { X86::AND8rr,          X86::AND8rm,        0 },
607    { X86::ANDNPDrr,        X86::ANDNPDrm,      TB_ALIGN_16 },
608    { X86::ANDNPSrr,        X86::ANDNPSrm,      TB_ALIGN_16 },
609    { X86::ANDPDrr,         X86::ANDPDrm,       TB_ALIGN_16 },
610    { X86::ANDPSrr,         X86::ANDPSrm,       TB_ALIGN_16 },
611    { X86::BLENDPDrri,      X86::BLENDPDrmi,    TB_ALIGN_16 },
612    { X86::BLENDPSrri,      X86::BLENDPSrmi,    TB_ALIGN_16 },
613    { X86::BLENDVPDrr0,     X86::BLENDVPDrm0,   TB_ALIGN_16 },
614    { X86::BLENDVPSrr0,     X86::BLENDVPSrm0,   TB_ALIGN_16 },
615    { X86::CMOVA16rr,       X86::CMOVA16rm,     0 },
616    { X86::CMOVA32rr,       X86::CMOVA32rm,     0 },
617    { X86::CMOVA64rr,       X86::CMOVA64rm,     0 },
618    { X86::CMOVAE16rr,      X86::CMOVAE16rm,    0 },
619    { X86::CMOVAE32rr,      X86::CMOVAE32rm,    0 },
620    { X86::CMOVAE64rr,      X86::CMOVAE64rm,    0 },
621    { X86::CMOVB16rr,       X86::CMOVB16rm,     0 },
622    { X86::CMOVB32rr,       X86::CMOVB32rm,     0 },
623    { X86::CMOVB64rr,       X86::CMOVB64rm,     0 },
624    { X86::CMOVBE16rr,      X86::CMOVBE16rm,    0 },
625    { X86::CMOVBE32rr,      X86::CMOVBE32rm,    0 },
626    { X86::CMOVBE64rr,      X86::CMOVBE64rm,    0 },
627    { X86::CMOVE16rr,       X86::CMOVE16rm,     0 },
628    { X86::CMOVE32rr,       X86::CMOVE32rm,     0 },
629    { X86::CMOVE64rr,       X86::CMOVE64rm,     0 },
630    { X86::CMOVG16rr,       X86::CMOVG16rm,     0 },
631    { X86::CMOVG32rr,       X86::CMOVG32rm,     0 },
632    { X86::CMOVG64rr,       X86::CMOVG64rm,     0 },
633    { X86::CMOVGE16rr,      X86::CMOVGE16rm,    0 },
634    { X86::CMOVGE32rr,      X86::CMOVGE32rm,    0 },
635    { X86::CMOVGE64rr,      X86::CMOVGE64rm,    0 },
636    { X86::CMOVL16rr,       X86::CMOVL16rm,     0 },
637    { X86::CMOVL32rr,       X86::CMOVL32rm,     0 },
638    { X86::CMOVL64rr,       X86::CMOVL64rm,     0 },
639    { X86::CMOVLE16rr,      X86::CMOVLE16rm,    0 },
640    { X86::CMOVLE32rr,      X86::CMOVLE32rm,    0 },
641    { X86::CMOVLE64rr,      X86::CMOVLE64rm,    0 },
642    { X86::CMOVNE16rr,      X86::CMOVNE16rm,    0 },
643    { X86::CMOVNE32rr,      X86::CMOVNE32rm,    0 },
644    { X86::CMOVNE64rr,      X86::CMOVNE64rm,    0 },
645    { X86::CMOVNO16rr,      X86::CMOVNO16rm,    0 },
646    { X86::CMOVNO32rr,      X86::CMOVNO32rm,    0 },
647    { X86::CMOVNO64rr,      X86::CMOVNO64rm,    0 },
648    { X86::CMOVNP16rr,      X86::CMOVNP16rm,    0 },
649    { X86::CMOVNP32rr,      X86::CMOVNP32rm,    0 },
650    { X86::CMOVNP64rr,      X86::CMOVNP64rm,    0 },
651    { X86::CMOVNS16rr,      X86::CMOVNS16rm,    0 },
652    { X86::CMOVNS32rr,      X86::CMOVNS32rm,    0 },
653    { X86::CMOVNS64rr,      X86::CMOVNS64rm,    0 },
654    { X86::CMOVO16rr,       X86::CMOVO16rm,     0 },
655    { X86::CMOVO32rr,       X86::CMOVO32rm,     0 },
656    { X86::CMOVO64rr,       X86::CMOVO64rm,     0 },
657    { X86::CMOVP16rr,       X86::CMOVP16rm,     0 },
658    { X86::CMOVP32rr,       X86::CMOVP32rm,     0 },
659    { X86::CMOVP64rr,       X86::CMOVP64rm,     0 },
660    { X86::CMOVS16rr,       X86::CMOVS16rm,     0 },
661    { X86::CMOVS32rr,       X86::CMOVS32rm,     0 },
662    { X86::CMOVS64rr,       X86::CMOVS64rm,     0 },
663    { X86::CMPPDrri,        X86::CMPPDrmi,      TB_ALIGN_16 },
664    { X86::CMPPSrri,        X86::CMPPSrmi,      TB_ALIGN_16 },
665    { X86::CMPSDrr,         X86::CMPSDrm,       0 },
666    { X86::CMPSSrr,         X86::CMPSSrm,       0 },
667    { X86::DIVPDrr,         X86::DIVPDrm,       TB_ALIGN_16 },
668    { X86::DIVPSrr,         X86::DIVPSrm,       TB_ALIGN_16 },
669    { X86::DIVSDrr,         X86::DIVSDrm,       0 },
670    { X86::DIVSSrr,         X86::DIVSSrm,       0 },
671    { X86::FsANDNPDrr,      X86::FsANDNPDrm,    TB_ALIGN_16 },
672    { X86::FsANDNPSrr,      X86::FsANDNPSrm,    TB_ALIGN_16 },
673    { X86::FsANDPDrr,       X86::FsANDPDrm,     TB_ALIGN_16 },
674    { X86::FsANDPSrr,       X86::FsANDPSrm,     TB_ALIGN_16 },
675    { X86::FsORPDrr,        X86::FsORPDrm,      TB_ALIGN_16 },
676    { X86::FsORPSrr,        X86::FsORPSrm,      TB_ALIGN_16 },
677    { X86::FsXORPDrr,       X86::FsXORPDrm,     TB_ALIGN_16 },
678    { X86::FsXORPSrr,       X86::FsXORPSrm,     TB_ALIGN_16 },
679    { X86::HADDPDrr,        X86::HADDPDrm,      TB_ALIGN_16 },
680    { X86::HADDPSrr,        X86::HADDPSrm,      TB_ALIGN_16 },
681    { X86::HSUBPDrr,        X86::HSUBPDrm,      TB_ALIGN_16 },
682    { X86::HSUBPSrr,        X86::HSUBPSrm,      TB_ALIGN_16 },
683    { X86::IMUL16rr,        X86::IMUL16rm,      0 },
684    { X86::IMUL32rr,        X86::IMUL32rm,      0 },
685    { X86::IMUL64rr,        X86::IMUL64rm,      0 },
686    { X86::Int_CMPSDrr,     X86::Int_CMPSDrm,   0 },
687    { X86::Int_CMPSSrr,     X86::Int_CMPSSrm,   0 },
688    { X86::MAXPDrr,         X86::MAXPDrm,       TB_ALIGN_16 },
689    { X86::MAXPDrr_Int,     X86::MAXPDrm_Int,   TB_ALIGN_16 },
690    { X86::MAXPSrr,         X86::MAXPSrm,       TB_ALIGN_16 },
691    { X86::MAXPSrr_Int,     X86::MAXPSrm_Int,   TB_ALIGN_16 },
692    { X86::MAXSDrr,         X86::MAXSDrm,       0 },
693    { X86::MAXSDrr_Int,     X86::MAXSDrm_Int,   0 },
694    { X86::MAXSSrr,         X86::MAXSSrm,       0 },
695    { X86::MAXSSrr_Int,     X86::MAXSSrm_Int,   0 },
696    { X86::MINPDrr,         X86::MINPDrm,       TB_ALIGN_16 },
697    { X86::MINPDrr_Int,     X86::MINPDrm_Int,   TB_ALIGN_16 },
698    { X86::MINPSrr,         X86::MINPSrm,       TB_ALIGN_16 },
699    { X86::MINPSrr_Int,     X86::MINPSrm_Int,   TB_ALIGN_16 },
700    { X86::MINSDrr,         X86::MINSDrm,       0 },
701    { X86::MINSDrr_Int,     X86::MINSDrm_Int,   0 },
702    { X86::MINSSrr,         X86::MINSSrm,       0 },
703    { X86::MINSSrr_Int,     X86::MINSSrm_Int,   0 },
704    { X86::MPSADBWrri,      X86::MPSADBWrmi,    TB_ALIGN_16 },
705    { X86::MULPDrr,         X86::MULPDrm,       TB_ALIGN_16 },
706    { X86::MULPSrr,         X86::MULPSrm,       TB_ALIGN_16 },
707    { X86::MULSDrr,         X86::MULSDrm,       0 },
708    { X86::MULSSrr,         X86::MULSSrm,       0 },
709    { X86::OR16rr,          X86::OR16rm,        0 },
710    { X86::OR32rr,          X86::OR32rm,        0 },
711    { X86::OR64rr,          X86::OR64rm,        0 },
712    { X86::OR8rr,           X86::OR8rm,         0 },
713    { X86::ORPDrr,          X86::ORPDrm,        TB_ALIGN_16 },
714    { X86::ORPSrr,          X86::ORPSrm,        TB_ALIGN_16 },
715    { X86::PACKSSDWrr,      X86::PACKSSDWrm,    TB_ALIGN_16 },
716    { X86::PACKSSWBrr,      X86::PACKSSWBrm,    TB_ALIGN_16 },
717    { X86::PACKUSDWrr,      X86::PACKUSDWrm,    TB_ALIGN_16 },
718    { X86::PACKUSWBrr,      X86::PACKUSWBrm,    TB_ALIGN_16 },
719    { X86::PADDBrr,         X86::PADDBrm,       TB_ALIGN_16 },
720    { X86::PADDDrr,         X86::PADDDrm,       TB_ALIGN_16 },
721    { X86::PADDQrr,         X86::PADDQrm,       TB_ALIGN_16 },
722    { X86::PADDSBrr,        X86::PADDSBrm,      TB_ALIGN_16 },
723    { X86::PADDSWrr,        X86::PADDSWrm,      TB_ALIGN_16 },
724    { X86::PADDUSBrr,       X86::PADDUSBrm,     TB_ALIGN_16 },
725    { X86::PADDUSWrr,       X86::PADDUSWrm,     TB_ALIGN_16 },
726    { X86::PADDWrr,         X86::PADDWrm,       TB_ALIGN_16 },
727    { X86::PALIGNR128rr,    X86::PALIGNR128rm,  TB_ALIGN_16 },
728    { X86::PANDNrr,         X86::PANDNrm,       TB_ALIGN_16 },
729    { X86::PANDrr,          X86::PANDrm,        TB_ALIGN_16 },
730    { X86::PAVGBrr,         X86::PAVGBrm,       TB_ALIGN_16 },
731    { X86::PAVGWrr,         X86::PAVGWrm,       TB_ALIGN_16 },
732    { X86::PBLENDWrri,      X86::PBLENDWrmi,    TB_ALIGN_16 },
733    { X86::PCMPEQBrr,       X86::PCMPEQBrm,     TB_ALIGN_16 },
734    { X86::PCMPEQDrr,       X86::PCMPEQDrm,     TB_ALIGN_16 },
735    { X86::PCMPEQQrr,       X86::PCMPEQQrm,     TB_ALIGN_16 },
736    { X86::PCMPEQWrr,       X86::PCMPEQWrm,     TB_ALIGN_16 },
737    { X86::PCMPGTBrr,       X86::PCMPGTBrm,     TB_ALIGN_16 },
738    { X86::PCMPGTDrr,       X86::PCMPGTDrm,     TB_ALIGN_16 },
739    { X86::PCMPGTQrr,       X86::PCMPGTQrm,     TB_ALIGN_16 },
740    { X86::PCMPGTWrr,       X86::PCMPGTWrm,     TB_ALIGN_16 },
741    { X86::PHADDDrr,        X86::PHADDDrm,      TB_ALIGN_16 },
742    { X86::PHADDWrr,        X86::PHADDWrm,      TB_ALIGN_16 },
743    { X86::PHADDSWrr128,    X86::PHADDSWrm128,  TB_ALIGN_16 },
744    { X86::PHSUBDrr,        X86::PHSUBDrm,      TB_ALIGN_16 },
745    { X86::PHSUBSWrr128,    X86::PHSUBSWrm128,  TB_ALIGN_16 },
746    { X86::PHSUBWrr,        X86::PHSUBWrm,      TB_ALIGN_16 },
747    { X86::PINSRWrri,       X86::PINSRWrmi,     TB_ALIGN_16 },
748    { X86::PMADDUBSWrr128,  X86::PMADDUBSWrm128, TB_ALIGN_16 },
749    { X86::PMADDWDrr,       X86::PMADDWDrm,     TB_ALIGN_16 },
750    { X86::PMAXSWrr,        X86::PMAXSWrm,      TB_ALIGN_16 },
751    { X86::PMAXUBrr,        X86::PMAXUBrm,      TB_ALIGN_16 },
752    { X86::PMINSWrr,        X86::PMINSWrm,      TB_ALIGN_16 },
753    { X86::PMINUBrr,        X86::PMINUBrm,      TB_ALIGN_16 },
754    { X86::PMULDQrr,        X86::PMULDQrm,      TB_ALIGN_16 },
755    { X86::PMULHRSWrr128,   X86::PMULHRSWrm128, TB_ALIGN_16 },
756    { X86::PMULHUWrr,       X86::PMULHUWrm,     TB_ALIGN_16 },
757    { X86::PMULHWrr,        X86::PMULHWrm,      TB_ALIGN_16 },
758    { X86::PMULLDrr,        X86::PMULLDrm,      TB_ALIGN_16 },
759    { X86::PMULLWrr,        X86::PMULLWrm,      TB_ALIGN_16 },
760    { X86::PMULUDQrr,       X86::PMULUDQrm,     TB_ALIGN_16 },
761    { X86::PORrr,           X86::PORrm,         TB_ALIGN_16 },
762    { X86::PSADBWrr,        X86::PSADBWrm,      TB_ALIGN_16 },
763    { X86::PSHUFBrr,        X86::PSHUFBrm,      TB_ALIGN_16 },
764    { X86::PSIGNBrr,        X86::PSIGNBrm,      TB_ALIGN_16 },
765    { X86::PSIGNWrr,        X86::PSIGNWrm,      TB_ALIGN_16 },
766    { X86::PSIGNDrr,        X86::PSIGNDrm,      TB_ALIGN_16 },
767    { X86::PSLLDrr,         X86::PSLLDrm,       TB_ALIGN_16 },
768    { X86::PSLLQrr,         X86::PSLLQrm,       TB_ALIGN_16 },
769    { X86::PSLLWrr,         X86::PSLLWrm,       TB_ALIGN_16 },
770    { X86::PSRADrr,         X86::PSRADrm,       TB_ALIGN_16 },
771    { X86::PSRAWrr,         X86::PSRAWrm,       TB_ALIGN_16 },
772    { X86::PSRLDrr,         X86::PSRLDrm,       TB_ALIGN_16 },
773    { X86::PSRLQrr,         X86::PSRLQrm,       TB_ALIGN_16 },
774    { X86::PSRLWrr,         X86::PSRLWrm,       TB_ALIGN_16 },
775    { X86::PSUBBrr,         X86::PSUBBrm,       TB_ALIGN_16 },
776    { X86::PSUBDrr,         X86::PSUBDrm,       TB_ALIGN_16 },
777    { X86::PSUBSBrr,        X86::PSUBSBrm,      TB_ALIGN_16 },
778    { X86::PSUBSWrr,        X86::PSUBSWrm,      TB_ALIGN_16 },
779    { X86::PSUBWrr,         X86::PSUBWrm,       TB_ALIGN_16 },
780    { X86::PUNPCKHBWrr,     X86::PUNPCKHBWrm,   TB_ALIGN_16 },
781    { X86::PUNPCKHDQrr,     X86::PUNPCKHDQrm,   TB_ALIGN_16 },
782    { X86::PUNPCKHQDQrr,    X86::PUNPCKHQDQrm,  TB_ALIGN_16 },
783    { X86::PUNPCKHWDrr,     X86::PUNPCKHWDrm,   TB_ALIGN_16 },
784    { X86::PUNPCKLBWrr,     X86::PUNPCKLBWrm,   TB_ALIGN_16 },
785    { X86::PUNPCKLDQrr,     X86::PUNPCKLDQrm,   TB_ALIGN_16 },
786    { X86::PUNPCKLQDQrr,    X86::PUNPCKLQDQrm,  TB_ALIGN_16 },
787    { X86::PUNPCKLWDrr,     X86::PUNPCKLWDrm,   TB_ALIGN_16 },
788    { X86::PXORrr,          X86::PXORrm,        TB_ALIGN_16 },
789    { X86::SBB32rr,         X86::SBB32rm,       0 },
790    { X86::SBB64rr,         X86::SBB64rm,       0 },
791    { X86::SHUFPDrri,       X86::SHUFPDrmi,     TB_ALIGN_16 },
792    { X86::SHUFPSrri,       X86::SHUFPSrmi,     TB_ALIGN_16 },
793    { X86::SUB16rr,         X86::SUB16rm,       0 },
794    { X86::SUB32rr,         X86::SUB32rm,       0 },
795    { X86::SUB64rr,         X86::SUB64rm,       0 },
796    { X86::SUB8rr,          X86::SUB8rm,        0 },
797    { X86::SUBPDrr,         X86::SUBPDrm,       TB_ALIGN_16 },
798    { X86::SUBPSrr,         X86::SUBPSrm,       TB_ALIGN_16 },
799    { X86::SUBSDrr,         X86::SUBSDrm,       0 },
800    { X86::SUBSSrr,         X86::SUBSSrm,       0 },
801    // FIXME: TEST*rr -> swapped operand of TEST*mr.
802    { X86::UNPCKHPDrr,      X86::UNPCKHPDrm,    TB_ALIGN_16 },
803    { X86::UNPCKHPSrr,      X86::UNPCKHPSrm,    TB_ALIGN_16 },
804    { X86::UNPCKLPDrr,      X86::UNPCKLPDrm,    TB_ALIGN_16 },
805    { X86::UNPCKLPSrr,      X86::UNPCKLPSrm,    TB_ALIGN_16 },
806    { X86::XOR16rr,         X86::XOR16rm,       0 },
807    { X86::XOR32rr,         X86::XOR32rm,       0 },
808    { X86::XOR64rr,         X86::XOR64rm,       0 },
809    { X86::XOR8rr,          X86::XOR8rm,        0 },
810    { X86::XORPDrr,         X86::XORPDrm,       TB_ALIGN_16 },
811    { X86::XORPSrr,         X86::XORPSrm,       TB_ALIGN_16 },
812    // AVX 128-bit versions of foldable instructions
813    { X86::VCVTSD2SSrr,       X86::VCVTSD2SSrm,        0 },
814    { X86::Int_VCVTSD2SSrr,   X86::Int_VCVTSD2SSrm,    0 },
815    { X86::VCVTSI2SD64rr,     X86::VCVTSI2SD64rm,      0 },
816    { X86::Int_VCVTSI2SD64rr, X86::Int_VCVTSI2SD64rm,  0 },
817    { X86::VCVTSI2SDrr,       X86::VCVTSI2SDrm,        0 },
818    { X86::Int_VCVTSI2SDrr,   X86::Int_VCVTSI2SDrm,    0 },
819    { X86::VCVTSI2SS64rr,     X86::VCVTSI2SS64rm,      0 },
820    { X86::Int_VCVTSI2SS64rr, X86::Int_VCVTSI2SS64rm,  0 },
821    { X86::VCVTSI2SSrr,       X86::VCVTSI2SSrm,        0 },
822    { X86::Int_VCVTSI2SSrr,   X86::Int_VCVTSI2SSrm,    0 },
823    { X86::VCVTSS2SDrr,       X86::VCVTSS2SDrm,        0 },
824    { X86::Int_VCVTSS2SDrr,   X86::Int_VCVTSS2SDrm,    0 },
825    { X86::VCVTTPD2DQrr,      X86::VCVTTPD2DQrm,       TB_ALIGN_16 },
826    { X86::VCVTTPS2DQrr,      X86::VCVTTPS2DQrm,       TB_ALIGN_16 },
827    { X86::VRSQRTSSr,         X86::VRSQRTSSm,          0 },
828    { X86::VSQRTSDr,          X86::VSQRTSDm,           0 },
829    { X86::VSQRTSSr,          X86::VSQRTSSm,           0 },
830    { X86::VADDPDrr,          X86::VADDPDrm,           TB_ALIGN_16 },
831    { X86::VADDPSrr,          X86::VADDPSrm,           TB_ALIGN_16 },
832    { X86::VADDSDrr,          X86::VADDSDrm,           0 },
833    { X86::VADDSSrr,          X86::VADDSSrm,           0 },
834    { X86::VADDSUBPDrr,       X86::VADDSUBPDrm,        TB_ALIGN_16 },
835    { X86::VADDSUBPSrr,       X86::VADDSUBPSrm,        TB_ALIGN_16 },
836    { X86::VANDNPDrr,         X86::VANDNPDrm,          TB_ALIGN_16 },
837    { X86::VANDNPSrr,         X86::VANDNPSrm,          TB_ALIGN_16 },
838    { X86::VANDPDrr,          X86::VANDPDrm,           TB_ALIGN_16 },
839    { X86::VANDPSrr,          X86::VANDPSrm,           TB_ALIGN_16 },
840    { X86::VBLENDPDrri,       X86::VBLENDPDrmi,        TB_ALIGN_16 },
841    { X86::VBLENDPSrri,       X86::VBLENDPSrmi,        TB_ALIGN_16 },
842    { X86::VBLENDVPDrr,       X86::VBLENDVPDrm,        TB_ALIGN_16 },
843    { X86::VBLENDVPSrr,       X86::VBLENDVPSrm,        TB_ALIGN_16 },
844    { X86::VCMPPDrri,         X86::VCMPPDrmi,          TB_ALIGN_16 },
845    { X86::VCMPPSrri,         X86::VCMPPSrmi,          TB_ALIGN_16 },
846    { X86::VCMPSDrr,          X86::VCMPSDrm,           0 },
847    { X86::VCMPSSrr,          X86::VCMPSSrm,           0 },
848    { X86::VDIVPDrr,          X86::VDIVPDrm,           TB_ALIGN_16 },
849    { X86::VDIVPSrr,          X86::VDIVPSrm,           TB_ALIGN_16 },
850    { X86::VDIVSDrr,          X86::VDIVSDrm,           0 },
851    { X86::VDIVSSrr,          X86::VDIVSSrm,           0 },
852    { X86::VFsANDNPDrr,       X86::VFsANDNPDrm,        TB_ALIGN_16 },
853    { X86::VFsANDNPSrr,       X86::VFsANDNPSrm,        TB_ALIGN_16 },
854    { X86::VFsANDPDrr,        X86::VFsANDPDrm,         TB_ALIGN_16 },
855    { X86::VFsANDPSrr,        X86::VFsANDPSrm,         TB_ALIGN_16 },
856    { X86::VFsORPDrr,         X86::VFsORPDrm,          TB_ALIGN_16 },
857    { X86::VFsORPSrr,         X86::VFsORPSrm,          TB_ALIGN_16 },
858    { X86::VFsXORPDrr,        X86::VFsXORPDrm,         TB_ALIGN_16 },
859    { X86::VFsXORPSrr,        X86::VFsXORPSrm,         TB_ALIGN_16 },
860    { X86::VHADDPDrr,         X86::VHADDPDrm,          TB_ALIGN_16 },
861    { X86::VHADDPSrr,         X86::VHADDPSrm,          TB_ALIGN_16 },
862    { X86::VHSUBPDrr,         X86::VHSUBPDrm,          TB_ALIGN_16 },
863    { X86::VHSUBPSrr,         X86::VHSUBPSrm,          TB_ALIGN_16 },
864    { X86::Int_VCMPSDrr,      X86::Int_VCMPSDrm,       0 },
865    { X86::Int_VCMPSSrr,      X86::Int_VCMPSSrm,       0 },
866    { X86::VMAXPDrr,          X86::VMAXPDrm,           TB_ALIGN_16 },
867    { X86::VMAXPDrr_Int,      X86::VMAXPDrm_Int,       TB_ALIGN_16 },
868    { X86::VMAXPSrr,          X86::VMAXPSrm,           TB_ALIGN_16 },
869    { X86::VMAXPSrr_Int,      X86::VMAXPSrm_Int,       TB_ALIGN_16 },
870    { X86::VMAXSDrr,          X86::VMAXSDrm,           0 },
871    { X86::VMAXSDrr_Int,      X86::VMAXSDrm_Int,       0 },
872    { X86::VMAXSSrr,          X86::VMAXSSrm,           0 },
873    { X86::VMAXSSrr_Int,      X86::VMAXSSrm_Int,       0 },
874    { X86::VMINPDrr,          X86::VMINPDrm,           TB_ALIGN_16 },
875    { X86::VMINPDrr_Int,      X86::VMINPDrm_Int,       TB_ALIGN_16 },
876    { X86::VMINPSrr,          X86::VMINPSrm,           TB_ALIGN_16 },
877    { X86::VMINPSrr_Int,      X86::VMINPSrm_Int,       TB_ALIGN_16 },
878    { X86::VMINSDrr,          X86::VMINSDrm,           0 },
879    { X86::VMINSDrr_Int,      X86::VMINSDrm_Int,       0 },
880    { X86::VMINSSrr,          X86::VMINSSrm,           0 },
881    { X86::VMINSSrr_Int,      X86::VMINSSrm_Int,       0 },
882    { X86::VMPSADBWrri,       X86::VMPSADBWrmi,        TB_ALIGN_16 },
883    { X86::VMULPDrr,          X86::VMULPDrm,           TB_ALIGN_16 },
884    { X86::VMULPSrr,          X86::VMULPSrm,           TB_ALIGN_16 },
885    { X86::VMULSDrr,          X86::VMULSDrm,           0 },
886    { X86::VMULSSrr,          X86::VMULSSrm,           0 },
887    { X86::VORPDrr,           X86::VORPDrm,            TB_ALIGN_16 },
888    { X86::VORPSrr,           X86::VORPSrm,            TB_ALIGN_16 },
889    { X86::VPACKSSDWrr,       X86::VPACKSSDWrm,        TB_ALIGN_16 },
890    { X86::VPACKSSWBrr,       X86::VPACKSSWBrm,        TB_ALIGN_16 },
891    { X86::VPACKUSDWrr,       X86::VPACKUSDWrm,        TB_ALIGN_16 },
892    { X86::VPACKUSWBrr,       X86::VPACKUSWBrm,        TB_ALIGN_16 },
893    { X86::VPADDBrr,          X86::VPADDBrm,           TB_ALIGN_16 },
894    { X86::VPADDDrr,          X86::VPADDDrm,           TB_ALIGN_16 },
895    { X86::VPADDQrr,          X86::VPADDQrm,           TB_ALIGN_16 },
896    { X86::VPADDSBrr,         X86::VPADDSBrm,          TB_ALIGN_16 },
897    { X86::VPADDSWrr,         X86::VPADDSWrm,          TB_ALIGN_16 },
898    { X86::VPADDUSBrr,        X86::VPADDUSBrm,         TB_ALIGN_16 },
899    { X86::VPADDUSWrr,        X86::VPADDUSWrm,         TB_ALIGN_16 },
900    { X86::VPADDWrr,          X86::VPADDWrm,           TB_ALIGN_16 },
901    { X86::VPALIGNR128rr,     X86::VPALIGNR128rm,      TB_ALIGN_16 },
902    { X86::VPANDNrr,          X86::VPANDNrm,           TB_ALIGN_16 },
903    { X86::VPANDrr,           X86::VPANDrm,            TB_ALIGN_16 },
904    { X86::VPAVGBrr,          X86::VPAVGBrm,           TB_ALIGN_16 },
905    { X86::VPAVGWrr,          X86::VPAVGWrm,           TB_ALIGN_16 },
906    { X86::VPBLENDWrri,       X86::VPBLENDWrmi,        TB_ALIGN_16 },
907    { X86::VPCMPEQBrr,        X86::VPCMPEQBrm,         TB_ALIGN_16 },
908    { X86::VPCMPEQDrr,        X86::VPCMPEQDrm,         TB_ALIGN_16 },
909    { X86::VPCMPEQQrr,        X86::VPCMPEQQrm,         TB_ALIGN_16 },
910    { X86::VPCMPEQWrr,        X86::VPCMPEQWrm,         TB_ALIGN_16 },
911    { X86::VPCMPGTBrr,        X86::VPCMPGTBrm,         TB_ALIGN_16 },
912    { X86::VPCMPGTDrr,        X86::VPCMPGTDrm,         TB_ALIGN_16 },
913    { X86::VPCMPGTQrr,        X86::VPCMPGTQrm,         TB_ALIGN_16 },
914    { X86::VPCMPGTWrr,        X86::VPCMPGTWrm,         TB_ALIGN_16 },
915    { X86::VPHADDDrr,         X86::VPHADDDrm,          TB_ALIGN_16 },
916    { X86::VPHADDSWrr128,     X86::VPHADDSWrm128,      TB_ALIGN_16 },
917    { X86::VPHADDWrr,         X86::VPHADDWrm,          TB_ALIGN_16 },
918    { X86::VPHSUBDrr,         X86::VPHSUBDrm,          TB_ALIGN_16 },
919    { X86::VPHSUBSWrr128,     X86::VPHSUBSWrm128,      TB_ALIGN_16 },
920    { X86::VPHSUBWrr,         X86::VPHSUBWrm,          TB_ALIGN_16 },
921    { X86::VPERMILPDrr,       X86::VPERMILPDrm,        TB_ALIGN_16 },
922    { X86::VPERMILPSrr,       X86::VPERMILPSrm,        TB_ALIGN_16 },
923    { X86::VPINSRWrri,        X86::VPINSRWrmi,         TB_ALIGN_16 },
924    { X86::VPMADDUBSWrr128,   X86::VPMADDUBSWrm128,    TB_ALIGN_16 },
925    { X86::VPMADDWDrr,        X86::VPMADDWDrm,         TB_ALIGN_16 },
926    { X86::VPMAXSWrr,         X86::VPMAXSWrm,          TB_ALIGN_16 },
927    { X86::VPMAXUBrr,         X86::VPMAXUBrm,          TB_ALIGN_16 },
928    { X86::VPMINSWrr,         X86::VPMINSWrm,          TB_ALIGN_16 },
929    { X86::VPMINUBrr,         X86::VPMINUBrm,          TB_ALIGN_16 },
930    { X86::VPMULDQrr,         X86::VPMULDQrm,          TB_ALIGN_16 },
931    { X86::VPMULHRSWrr128,    X86::VPMULHRSWrm128,     TB_ALIGN_16 },
932    { X86::VPMULHUWrr,        X86::VPMULHUWrm,         TB_ALIGN_16 },
933    { X86::VPMULHWrr,         X86::VPMULHWrm,          TB_ALIGN_16 },
934    { X86::VPMULLDrr,         X86::VPMULLDrm,          TB_ALIGN_16 },
935    { X86::VPMULLWrr,         X86::VPMULLWrm,          TB_ALIGN_16 },
936    { X86::VPMULUDQrr,        X86::VPMULUDQrm,         TB_ALIGN_16 },
937    { X86::VPORrr,            X86::VPORrm,             TB_ALIGN_16 },
938    { X86::VPSADBWrr,         X86::VPSADBWrm,          TB_ALIGN_16 },
939    { X86::VPSHUFBrr,         X86::VPSHUFBrm,          TB_ALIGN_16 },
940    { X86::VPSIGNBrr,         X86::VPSIGNBrm,          TB_ALIGN_16 },
941    { X86::VPSIGNWrr,         X86::VPSIGNWrm,          TB_ALIGN_16 },
942    { X86::VPSIGNDrr,         X86::VPSIGNDrm,          TB_ALIGN_16 },
943    { X86::VPSLLDrr,          X86::VPSLLDrm,           TB_ALIGN_16 },
944    { X86::VPSLLQrr,          X86::VPSLLQrm,           TB_ALIGN_16 },
945    { X86::VPSLLWrr,          X86::VPSLLWrm,           TB_ALIGN_16 },
946    { X86::VPSRADrr,          X86::VPSRADrm,           TB_ALIGN_16 },
947    { X86::VPSRAWrr,          X86::VPSRAWrm,           TB_ALIGN_16 },
948    { X86::VPSRLDrr,          X86::VPSRLDrm,           TB_ALIGN_16 },
949    { X86::VPSRLQrr,          X86::VPSRLQrm,           TB_ALIGN_16 },
950    { X86::VPSRLWrr,          X86::VPSRLWrm,           TB_ALIGN_16 },
951    { X86::VPSUBBrr,          X86::VPSUBBrm,           TB_ALIGN_16 },
952    { X86::VPSUBDrr,          X86::VPSUBDrm,           TB_ALIGN_16 },
953    { X86::VPSUBSBrr,         X86::VPSUBSBrm,          TB_ALIGN_16 },
954    { X86::VPSUBSWrr,         X86::VPSUBSWrm,          TB_ALIGN_16 },
955    { X86::VPSUBWrr,          X86::VPSUBWrm,           TB_ALIGN_16 },
956    { X86::VPUNPCKHBWrr,      X86::VPUNPCKHBWrm,       TB_ALIGN_16 },
957    { X86::VPUNPCKHDQrr,      X86::VPUNPCKHDQrm,       TB_ALIGN_16 },
958    { X86::VPUNPCKHQDQrr,     X86::VPUNPCKHQDQrm,      TB_ALIGN_16 },
959    { X86::VPUNPCKHWDrr,      X86::VPUNPCKHWDrm,       TB_ALIGN_16 },
960    { X86::VPUNPCKLBWrr,      X86::VPUNPCKLBWrm,       TB_ALIGN_16 },
961    { X86::VPUNPCKLDQrr,      X86::VPUNPCKLDQrm,       TB_ALIGN_16 },
962    { X86::VPUNPCKLQDQrr,     X86::VPUNPCKLQDQrm,      TB_ALIGN_16 },
963    { X86::VPUNPCKLWDrr,      X86::VPUNPCKLWDrm,       TB_ALIGN_16 },
964    { X86::VPXORrr,           X86::VPXORrm,            TB_ALIGN_16 },
965    { X86::VSHUFPDrri,        X86::VSHUFPDrmi,         TB_ALIGN_16 },
966    { X86::VSHUFPSrri,        X86::VSHUFPSrmi,         TB_ALIGN_16 },
967    { X86::VSUBPDrr,          X86::VSUBPDrm,           TB_ALIGN_16 },
968    { X86::VSUBPSrr,          X86::VSUBPSrm,           TB_ALIGN_16 },
969    { X86::VSUBSDrr,          X86::VSUBSDrm,           0 },
970    { X86::VSUBSSrr,          X86::VSUBSSrm,           0 },
971    { X86::VUNPCKHPDrr,       X86::VUNPCKHPDrm,        TB_ALIGN_16 },
972    { X86::VUNPCKHPSrr,       X86::VUNPCKHPSrm,        TB_ALIGN_16 },
973    { X86::VUNPCKLPDrr,       X86::VUNPCKLPDrm,        TB_ALIGN_16 },
974    { X86::VUNPCKLPSrr,       X86::VUNPCKLPSrm,        TB_ALIGN_16 },
975    { X86::VXORPDrr,          X86::VXORPDrm,           TB_ALIGN_16 },
976    { X86::VXORPSrr,          X86::VXORPSrm,           TB_ALIGN_16 },
977    // AVX 256-bit foldable instructions
978    { X86::VADDPDYrr,         X86::VADDPDYrm,          TB_ALIGN_32 },
979    { X86::VADDPSYrr,         X86::VADDPSYrm,          TB_ALIGN_32 },
980    { X86::VADDSUBPDYrr,      X86::VADDSUBPDYrm,       TB_ALIGN_32 },
981    { X86::VADDSUBPSYrr,      X86::VADDSUBPSYrm,       TB_ALIGN_32 },
982    { X86::VANDNPDYrr,        X86::VANDNPDYrm,         TB_ALIGN_32 },
983    { X86::VANDNPSYrr,        X86::VANDNPSYrm,         TB_ALIGN_32 },
984    { X86::VANDPDYrr,         X86::VANDPDYrm,          TB_ALIGN_32 },
985    { X86::VANDPSYrr,         X86::VANDPSYrm,          TB_ALIGN_32 },
986    { X86::VBLENDPDYrri,      X86::VBLENDPDYrmi,       TB_ALIGN_32 },
987    { X86::VBLENDPSYrri,      X86::VBLENDPSYrmi,       TB_ALIGN_32 },
988    { X86::VBLENDVPDYrr,      X86::VBLENDVPDYrm,       TB_ALIGN_32 },
989    { X86::VBLENDVPSYrr,      X86::VBLENDVPSYrm,       TB_ALIGN_32 },
990    { X86::VCMPPDYrri,        X86::VCMPPDYrmi,         TB_ALIGN_32 },
991    { X86::VCMPPSYrri,        X86::VCMPPSYrmi,         TB_ALIGN_32 },
992    { X86::VDIVPDYrr,         X86::VDIVPDYrm,          TB_ALIGN_32 },
993    { X86::VDIVPSYrr,         X86::VDIVPSYrm,          TB_ALIGN_32 },
994    { X86::VHADDPDYrr,        X86::VHADDPDYrm,         TB_ALIGN_32 },
995    { X86::VHADDPSYrr,        X86::VHADDPSYrm,         TB_ALIGN_32 },
996    { X86::VHSUBPDYrr,        X86::VHSUBPDYrm,         TB_ALIGN_32 },
997    { X86::VHSUBPSYrr,        X86::VHSUBPSYrm,         TB_ALIGN_32 },
998    { X86::VINSERTF128rr,     X86::VINSERTF128rm,      TB_ALIGN_32 },
999    { X86::VMAXPDYrr,         X86::VMAXPDYrm,          TB_ALIGN_32 },
1000    { X86::VMAXPDYrr_Int,     X86::VMAXPDYrm_Int,      TB_ALIGN_32 },
1001    { X86::VMAXPSYrr,         X86::VMAXPSYrm,          TB_ALIGN_32 },
1002    { X86::VMAXPSYrr_Int,     X86::VMAXPSYrm_Int,      TB_ALIGN_32 },
1003    { X86::VMINPDYrr,         X86::VMINPDYrm,          TB_ALIGN_32 },
1004    { X86::VMINPDYrr_Int,     X86::VMINPDYrm_Int,      TB_ALIGN_32 },
1005    { X86::VMINPSYrr,         X86::VMINPSYrm,          TB_ALIGN_32 },
1006    { X86::VMINPSYrr_Int,     X86::VMINPSYrm_Int,      TB_ALIGN_32 },
1007    { X86::VMULPDYrr,         X86::VMULPDYrm,          TB_ALIGN_32 },
1008    { X86::VMULPSYrr,         X86::VMULPSYrm,          TB_ALIGN_32 },
1009    { X86::VORPDYrr,          X86::VORPDYrm,           TB_ALIGN_32 },
1010    { X86::VORPSYrr,          X86::VORPSYrm,           TB_ALIGN_32 },
1011    { X86::VPERM2F128rr,      X86::VPERM2F128rm,       TB_ALIGN_32 },
1012    { X86::VPERMILPDYrr,      X86::VPERMILPDYrm,       TB_ALIGN_32 },
1013    { X86::VPERMILPSYrr,      X86::VPERMILPSYrm,       TB_ALIGN_32 },
1014    { X86::VSHUFPDYrri,       X86::VSHUFPDYrmi,        TB_ALIGN_32 },
1015    { X86::VSHUFPSYrri,       X86::VSHUFPSYrmi,        TB_ALIGN_32 },
1016    { X86::VSUBPDYrr,         X86::VSUBPDYrm,          TB_ALIGN_32 },
1017    { X86::VSUBPSYrr,         X86::VSUBPSYrm,          TB_ALIGN_32 },
1018    { X86::VUNPCKHPDYrr,      X86::VUNPCKHPDYrm,       TB_ALIGN_32 },
1019    { X86::VUNPCKHPSYrr,      X86::VUNPCKHPSYrm,       TB_ALIGN_32 },
1020    { X86::VUNPCKLPDYrr,      X86::VUNPCKLPDYrm,       TB_ALIGN_32 },
1021    { X86::VUNPCKLPSYrr,      X86::VUNPCKLPSYrm,       TB_ALIGN_32 },
1022    { X86::VXORPDYrr,         X86::VXORPDYrm,          TB_ALIGN_32 },
1023    { X86::VXORPSYrr,         X86::VXORPSYrm,          TB_ALIGN_32 },
1024    // AVX2 foldable instructions
1025    { X86::VINSERTI128rr,     X86::VINSERTI128rm,      TB_ALIGN_16 },
1026    { X86::VPACKSSDWYrr,      X86::VPACKSSDWYrm,       TB_ALIGN_32 },
1027    { X86::VPACKSSWBYrr,      X86::VPACKSSWBYrm,       TB_ALIGN_32 },
1028    { X86::VPACKUSDWYrr,      X86::VPACKUSDWYrm,       TB_ALIGN_32 },
1029    { X86::VPACKUSWBYrr,      X86::VPACKUSWBYrm,       TB_ALIGN_32 },
1030    { X86::VPADDBYrr,         X86::VPADDBYrm,          TB_ALIGN_32 },
1031    { X86::VPADDDYrr,         X86::VPADDDYrm,          TB_ALIGN_32 },
1032    { X86::VPADDQYrr,         X86::VPADDQYrm,          TB_ALIGN_32 },
1033    { X86::VPADDSBYrr,        X86::VPADDSBYrm,         TB_ALIGN_32 },
1034    { X86::VPADDSWYrr,        X86::VPADDSWYrm,         TB_ALIGN_32 },
1035    { X86::VPADDUSBYrr,       X86::VPADDUSBYrm,        TB_ALIGN_32 },
1036    { X86::VPADDUSWYrr,       X86::VPADDUSWYrm,        TB_ALIGN_32 },
1037    { X86::VPADDWYrr,         X86::VPADDWYrm,          TB_ALIGN_32 },
1038    { X86::VPALIGNR256rr,     X86::VPALIGNR256rm,      TB_ALIGN_32 },
1039    { X86::VPANDNYrr,         X86::VPANDNYrm,          TB_ALIGN_32 },
1040    { X86::VPANDYrr,          X86::VPANDYrm,           TB_ALIGN_32 },
1041    { X86::VPAVGBYrr,         X86::VPAVGBYrm,          TB_ALIGN_32 },
1042    { X86::VPAVGWYrr,         X86::VPAVGWYrm,          TB_ALIGN_32 },
1043    { X86::VPBLENDDrri,       X86::VPBLENDDrmi,        TB_ALIGN_32 },
1044    { X86::VPBLENDDYrri,      X86::VPBLENDDYrmi,       TB_ALIGN_32 },
1045    { X86::VPBLENDWYrri,      X86::VPBLENDWYrmi,       TB_ALIGN_32 },
1046    { X86::VPCMPEQBYrr,       X86::VPCMPEQBYrm,        TB_ALIGN_32 },
1047    { X86::VPCMPEQDYrr,       X86::VPCMPEQDYrm,        TB_ALIGN_32 },
1048    { X86::VPCMPEQQYrr,       X86::VPCMPEQQYrm,        TB_ALIGN_32 },
1049    { X86::VPCMPEQWYrr,       X86::VPCMPEQWYrm,        TB_ALIGN_32 },
1050    { X86::VPCMPGTBYrr,       X86::VPCMPGTBYrm,        TB_ALIGN_32 },
1051    { X86::VPCMPGTDYrr,       X86::VPCMPGTDYrm,        TB_ALIGN_32 },
1052    { X86::VPCMPGTQYrr,       X86::VPCMPGTQYrm,        TB_ALIGN_32 },
1053    { X86::VPCMPGTWYrr,       X86::VPCMPGTWYrm,        TB_ALIGN_32 },
1054    { X86::VPERM2I128rr,      X86::VPERM2I128rm,       TB_ALIGN_32 },
1055    { X86::VPERMDYrr,         X86::VPERMDYrm,          TB_ALIGN_32 },
1056    { X86::VPERMPDYri,        X86::VPERMPDYmi,         TB_ALIGN_32 },
1057    { X86::VPERMPSYrr,        X86::VPERMPSYrm,         TB_ALIGN_32 },
1058    { X86::VPERMQYri,         X86::VPERMQYmi,          TB_ALIGN_32 },
1059    { X86::VPHADDDYrr,        X86::VPHADDDYrm,         TB_ALIGN_32 },
1060    { X86::VPHADDSWrr256,     X86::VPHADDSWrm256,      TB_ALIGN_32 },
1061    { X86::VPHADDWYrr,        X86::VPHADDWYrm,         TB_ALIGN_32 },
1062    { X86::VPHSUBDYrr,        X86::VPHSUBDYrm,         TB_ALIGN_32 },
1063    { X86::VPHSUBSWrr256,     X86::VPHSUBSWrm256,      TB_ALIGN_32 },
1064    { X86::VPHSUBWYrr,        X86::VPHSUBWYrm,         TB_ALIGN_32 },
1065    { X86::VPMADDUBSWrr256,   X86::VPMADDUBSWrm256,    TB_ALIGN_32 },
1066    { X86::VPMADDWDYrr,       X86::VPMADDWDYrm,        TB_ALIGN_32 },
1067    { X86::VPMAXSWYrr,        X86::VPMAXSWYrm,         TB_ALIGN_32 },
1068    { X86::VPMAXUBYrr,        X86::VPMAXUBYrm,         TB_ALIGN_32 },
1069    { X86::VPMINSWYrr,        X86::VPMINSWYrm,         TB_ALIGN_32 },
1070    { X86::VPMINUBYrr,        X86::VPMINUBYrm,         TB_ALIGN_32 },
1071    { X86::VMPSADBWYrri,      X86::VMPSADBWYrmi,       TB_ALIGN_32 },
1072    { X86::VPMULDQYrr,        X86::VPMULDQYrm,         TB_ALIGN_32 },
1073    { X86::VPMULHRSWrr256,    X86::VPMULHRSWrm256,     TB_ALIGN_32 },
1074    { X86::VPMULHUWYrr,       X86::VPMULHUWYrm,        TB_ALIGN_32 },
1075    { X86::VPMULHWYrr,        X86::VPMULHWYrm,         TB_ALIGN_32 },
1076    { X86::VPMULLDYrr,        X86::VPMULLDYrm,         TB_ALIGN_32 },
1077    { X86::VPMULLWYrr,        X86::VPMULLWYrm,         TB_ALIGN_32 },
1078    { X86::VPMULUDQYrr,       X86::VPMULUDQYrm,        TB_ALIGN_32 },
1079    { X86::VPORYrr,           X86::VPORYrm,            TB_ALIGN_32 },
1080    { X86::VPSADBWYrr,        X86::VPSADBWYrm,         TB_ALIGN_32 },
1081    { X86::VPSHUFBYrr,        X86::VPSHUFBYrm,         TB_ALIGN_32 },
1082    { X86::VPSIGNBYrr,        X86::VPSIGNBYrm,         TB_ALIGN_32 },
1083    { X86::VPSIGNWYrr,        X86::VPSIGNWYrm,         TB_ALIGN_32 },
1084    { X86::VPSIGNDYrr,        X86::VPSIGNDYrm,         TB_ALIGN_32 },
1085    { X86::VPSLLDYrr,         X86::VPSLLDYrm,          TB_ALIGN_16 },
1086    { X86::VPSLLQYrr,         X86::VPSLLQYrm,          TB_ALIGN_16 },
1087    { X86::VPSLLWYrr,         X86::VPSLLWYrm,          TB_ALIGN_16 },
1088    { X86::VPSLLVDrr,         X86::VPSLLVDrm,          TB_ALIGN_16 },
1089    { X86::VPSLLVDYrr,        X86::VPSLLVDYrm,         TB_ALIGN_32 },
1090    { X86::VPSLLVQrr,         X86::VPSLLVQrm,          TB_ALIGN_16 },
1091    { X86::VPSLLVQYrr,        X86::VPSLLVQYrm,         TB_ALIGN_32 },
1092    { X86::VPSRADYrr,         X86::VPSRADYrm,          TB_ALIGN_16 },
1093    { X86::VPSRAWYrr,         X86::VPSRAWYrm,          TB_ALIGN_16 },
1094    { X86::VPSRAVDrr,         X86::VPSRAVDrm,          TB_ALIGN_16 },
1095    { X86::VPSRAVDYrr,        X86::VPSRAVDYrm,         TB_ALIGN_32 },
1096    { X86::VPSRLDYrr,         X86::VPSRLDYrm,          TB_ALIGN_16 },
1097    { X86::VPSRLQYrr,         X86::VPSRLQYrm,          TB_ALIGN_16 },
1098    { X86::VPSRLWYrr,         X86::VPSRLWYrm,          TB_ALIGN_16 },
1099    { X86::VPSRLVDrr,         X86::VPSRLVDrm,          TB_ALIGN_16 },
1100    { X86::VPSRLVDYrr,        X86::VPSRLVDYrm,         TB_ALIGN_32 },
1101    { X86::VPSRLVQrr,         X86::VPSRLVQrm,          TB_ALIGN_16 },
1102    { X86::VPSRLVQYrr,        X86::VPSRLVQYrm,         TB_ALIGN_32 },
1103    { X86::VPSUBBYrr,         X86::VPSUBBYrm,          TB_ALIGN_32 },
1104    { X86::VPSUBDYrr,         X86::VPSUBDYrm,          TB_ALIGN_32 },
1105    { X86::VPSUBSBYrr,        X86::VPSUBSBYrm,         TB_ALIGN_32 },
1106    { X86::VPSUBSWYrr,        X86::VPSUBSWYrm,         TB_ALIGN_32 },
1107    { X86::VPSUBWYrr,         X86::VPSUBWYrm,          TB_ALIGN_32 },
1108    { X86::VPUNPCKHBWYrr,     X86::VPUNPCKHBWYrm,      TB_ALIGN_32 },
1109    { X86::VPUNPCKHDQYrr,     X86::VPUNPCKHDQYrm,      TB_ALIGN_32 },
1110    { X86::VPUNPCKHQDQYrr,    X86::VPUNPCKHQDQYrm,     TB_ALIGN_16 },
1111    { X86::VPUNPCKHWDYrr,     X86::VPUNPCKHWDYrm,      TB_ALIGN_32 },
1112    { X86::VPUNPCKLBWYrr,     X86::VPUNPCKLBWYrm,      TB_ALIGN_32 },
1113    { X86::VPUNPCKLDQYrr,     X86::VPUNPCKLDQYrm,      TB_ALIGN_32 },
1114    { X86::VPUNPCKLQDQYrr,    X86::VPUNPCKLQDQYrm,     TB_ALIGN_32 },
1115    { X86::VPUNPCKLWDYrr,     X86::VPUNPCKLWDYrm,      TB_ALIGN_32 },
1116    { X86::VPXORYrr,          X86::VPXORYrm,           TB_ALIGN_32 },
1117    // FIXME: add AVX 256-bit foldable instructions
1118  };
1119
1120  for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
1121    unsigned RegOp = OpTbl2[i].RegOp;
1122    unsigned MemOp = OpTbl2[i].MemOp;
1123    unsigned Flags = OpTbl2[i].Flags;
1124    AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable,
1125                  RegOp, MemOp,
1126                  // Index 2, folded load
1127                  Flags | TB_INDEX_2 | TB_FOLDED_LOAD);
1128  }
1129
1130  static const X86OpTblEntry OpTbl3[] = {
1131    // FMA foldable instructions
1132    { X86::VFMADDSSr231r,         X86::VFMADDSSr231m,         0 },
1133    { X86::VFMADDSDr231r,         X86::VFMADDSDr231m,         0 },
1134    { X86::VFMADDSSr132r,         X86::VFMADDSSr132m,         0 },
1135    { X86::VFMADDSDr132r,         X86::VFMADDSDr132m,         0 },
1136    { X86::VFMADDSSr213r,         X86::VFMADDSSr213m,         0 },
1137    { X86::VFMADDSDr213r,         X86::VFMADDSDr213m,         0 },
1138    { X86::VFMADDSSr132r_Int,     X86::VFMADDSSr132m_Int,     0 },
1139    { X86::VFMADDSDr132r_Int,     X86::VFMADDSDr132m_Int,     0 },
1140
1141    { X86::VFMADDPSr231r,         X86::VFMADDPSr231m,         TB_ALIGN_16 },
1142    { X86::VFMADDPDr231r,         X86::VFMADDPDr231m,         TB_ALIGN_16 },
1143    { X86::VFMADDPSr132r,         X86::VFMADDPSr132m,         TB_ALIGN_16 },
1144    { X86::VFMADDPDr132r,         X86::VFMADDPDr132m,         TB_ALIGN_16 },
1145    { X86::VFMADDPSr213r,         X86::VFMADDPSr213m,         TB_ALIGN_16 },
1146    { X86::VFMADDPDr213r,         X86::VFMADDPDr213m,         TB_ALIGN_16 },
1147    { X86::VFMADDPSr231rY,        X86::VFMADDPSr231mY,        TB_ALIGN_32 },
1148    { X86::VFMADDPDr231rY,        X86::VFMADDPDr231mY,        TB_ALIGN_32 },
1149    { X86::VFMADDPSr132rY,        X86::VFMADDPSr132mY,        TB_ALIGN_32 },
1150    { X86::VFMADDPDr132rY,        X86::VFMADDPDr132mY,        TB_ALIGN_32 },
1151    { X86::VFMADDPSr213rY,        X86::VFMADDPSr213mY,        TB_ALIGN_32 },
1152    { X86::VFMADDPDr213rY,        X86::VFMADDPDr213mY,        TB_ALIGN_32 },
1153    { X86::VFMADDPSr132r_Int,     X86::VFMADDPSr132m_Int,     TB_ALIGN_16 },
1154    { X86::VFMADDPDr132r_Int,     X86::VFMADDPDr132m_Int,     TB_ALIGN_16 },
1155    { X86::VFMADDPSr132rY_Int,    X86::VFMADDPSr132mY_Int,    TB_ALIGN_32 },
1156    { X86::VFMADDPDr132rY_Int,    X86::VFMADDPDr132mY_Int,    TB_ALIGN_32 },
1157
1158    { X86::VFNMADDSSr231r,        X86::VFNMADDSSr231m,        0 },
1159    { X86::VFNMADDSDr231r,        X86::VFNMADDSDr231m,        0 },
1160    { X86::VFNMADDSSr132r,        X86::VFNMADDSSr132m,        0 },
1161    { X86::VFNMADDSDr132r,        X86::VFNMADDSDr132m,        0 },
1162    { X86::VFNMADDSSr213r,        X86::VFNMADDSSr213m,        0 },
1163    { X86::VFNMADDSDr213r,        X86::VFNMADDSDr213m,        0 },
1164    { X86::VFNMADDSSr132r_Int,    X86::VFNMADDSSr132m_Int,    0 },
1165    { X86::VFNMADDSDr132r_Int,    X86::VFNMADDSDr132m_Int,    0 },
1166
1167    { X86::VFNMADDPSr231r,        X86::VFNMADDPSr231m,        TB_ALIGN_16 },
1168    { X86::VFNMADDPDr231r,        X86::VFNMADDPDr231m,        TB_ALIGN_16 },
1169    { X86::VFNMADDPSr132r,        X86::VFNMADDPSr132m,        TB_ALIGN_16 },
1170    { X86::VFNMADDPDr132r,        X86::VFNMADDPDr132m,        TB_ALIGN_16 },
1171    { X86::VFNMADDPSr213r,        X86::VFNMADDPSr213m,        TB_ALIGN_16 },
1172    { X86::VFNMADDPDr213r,        X86::VFNMADDPDr213m,        TB_ALIGN_16 },
1173    { X86::VFNMADDPSr231rY,       X86::VFNMADDPSr231mY,       TB_ALIGN_32 },
1174    { X86::VFNMADDPDr231rY,       X86::VFNMADDPDr231mY,       TB_ALIGN_32 },
1175    { X86::VFNMADDPSr132rY,       X86::VFNMADDPSr132mY,       TB_ALIGN_32 },
1176    { X86::VFNMADDPDr132rY,       X86::VFNMADDPDr132mY,       TB_ALIGN_32 },
1177    { X86::VFNMADDPSr213rY,       X86::VFNMADDPSr213mY,       TB_ALIGN_32 },
1178    { X86::VFNMADDPDr213rY,       X86::VFNMADDPDr213mY,       TB_ALIGN_32 },
1179    { X86::VFNMADDPSr132r_Int,    X86::VFNMADDPSr132m_Int,    TB_ALIGN_16 },
1180    { X86::VFNMADDPDr132r_Int,    X86::VFNMADDPDr132m_Int,    TB_ALIGN_16 },
1181    { X86::VFNMADDPSr132rY_Int,   X86::VFNMADDPSr132mY_Int,   TB_ALIGN_32 },
1182    { X86::VFNMADDPDr132rY_Int,   X86::VFNMADDPDr132mY_Int,   TB_ALIGN_32 },
1183
1184    { X86::VFMSUBSSr231r,         X86::VFMSUBSSr231m,         0 },
1185    { X86::VFMSUBSDr231r,         X86::VFMSUBSDr231m,         0 },
1186    { X86::VFMSUBSSr132r,         X86::VFMSUBSSr132m,         0 },
1187    { X86::VFMSUBSDr132r,         X86::VFMSUBSDr132m,         0 },
1188    { X86::VFMSUBSSr213r,         X86::VFMSUBSSr213m,         0 },
1189    { X86::VFMSUBSDr213r,         X86::VFMSUBSDr213m,         0 },
1190    { X86::VFMSUBSSr132r_Int,     X86::VFMSUBSSr132m_Int,     0 },
1191    { X86::VFMSUBSDr132r_Int,     X86::VFMSUBSDr132m_Int,     0 },
1192
1193    { X86::VFMSUBPSr231r,         X86::VFMSUBPSr231m,         TB_ALIGN_16 },
1194    { X86::VFMSUBPDr231r,         X86::VFMSUBPDr231m,         TB_ALIGN_16 },
1195    { X86::VFMSUBPSr132r,         X86::VFMSUBPSr132m,         TB_ALIGN_16 },
1196    { X86::VFMSUBPDr132r,         X86::VFMSUBPDr132m,         TB_ALIGN_16 },
1197    { X86::VFMSUBPSr213r,         X86::VFMSUBPSr213m,         TB_ALIGN_16 },
1198    { X86::VFMSUBPDr213r,         X86::VFMSUBPDr213m,         TB_ALIGN_16 },
1199    { X86::VFMSUBPSr231rY,        X86::VFMSUBPSr231mY,        TB_ALIGN_32 },
1200    { X86::VFMSUBPDr231rY,        X86::VFMSUBPDr231mY,        TB_ALIGN_32 },
1201    { X86::VFMSUBPSr132rY,        X86::VFMSUBPSr132mY,        TB_ALIGN_32 },
1202    { X86::VFMSUBPDr132rY,        X86::VFMSUBPDr132mY,        TB_ALIGN_32 },
1203    { X86::VFMSUBPSr213rY,        X86::VFMSUBPSr213mY,        TB_ALIGN_32 },
1204    { X86::VFMSUBPDr213rY,        X86::VFMSUBPDr213mY,        TB_ALIGN_32 },
1205    { X86::VFMSUBPSr132r_Int,     X86::VFMSUBPSr132m_Int,     TB_ALIGN_16 },
1206    { X86::VFMSUBPDr132r_Int,     X86::VFMSUBPDr132m_Int,     TB_ALIGN_16 },
1207    { X86::VFMSUBPSr132rY_Int,    X86::VFMSUBPSr132mY_Int,    TB_ALIGN_32 },
1208    { X86::VFMSUBPDr132rY_Int,    X86::VFMSUBPDr132mY_Int,    TB_ALIGN_32 },
1209
1210    { X86::VFNMSUBSSr231r,        X86::VFNMSUBSSr231m,        0 },
1211    { X86::VFNMSUBSDr231r,        X86::VFNMSUBSDr231m,        0 },
1212    { X86::VFNMSUBSSr132r,        X86::VFNMSUBSSr132m,        0 },
1213    { X86::VFNMSUBSDr132r,        X86::VFNMSUBSDr132m,        0 },
1214    { X86::VFNMSUBSSr213r,        X86::VFNMSUBSSr213m,        0 },
1215    { X86::VFNMSUBSDr213r,        X86::VFNMSUBSDr213m,        0 },
1216    { X86::VFNMSUBSSr132r_Int,    X86::VFNMSUBSSr132m_Int,    0 },
1217    { X86::VFNMSUBSDr132r_Int,    X86::VFNMSUBSDr132m_Int,    0 },
1218
1219    { X86::VFNMSUBPSr231r,        X86::VFNMSUBPSr231m,        TB_ALIGN_16 },
1220    { X86::VFNMSUBPDr231r,        X86::VFNMSUBPDr231m,        TB_ALIGN_16 },
1221    { X86::VFNMSUBPSr132r,        X86::VFNMSUBPSr132m,        TB_ALIGN_16 },
1222    { X86::VFNMSUBPDr132r,        X86::VFNMSUBPDr132m,        TB_ALIGN_16 },
1223    { X86::VFNMSUBPSr213r,        X86::VFNMSUBPSr213m,        TB_ALIGN_16 },
1224    { X86::VFNMSUBPDr213r,        X86::VFNMSUBPDr213m,        TB_ALIGN_16 },
1225    { X86::VFNMSUBPSr231rY,       X86::VFNMSUBPSr231mY,       TB_ALIGN_32 },
1226    { X86::VFNMSUBPDr231rY,       X86::VFNMSUBPDr231mY,       TB_ALIGN_32 },
1227    { X86::VFNMSUBPSr132rY,       X86::VFNMSUBPSr132mY,       TB_ALIGN_32 },
1228    { X86::VFNMSUBPDr132rY,       X86::VFNMSUBPDr132mY,       TB_ALIGN_32 },
1229    { X86::VFNMSUBPSr213rY,       X86::VFNMSUBPSr213mY,       TB_ALIGN_32 },
1230    { X86::VFNMSUBPDr213rY,       X86::VFNMSUBPDr213mY,       TB_ALIGN_32 },
1231    { X86::VFNMSUBPSr132r_Int,    X86::VFNMSUBPSr132m_Int,    TB_ALIGN_16 },
1232    { X86::VFNMSUBPDr132r_Int,    X86::VFNMSUBPDr132m_Int,    TB_ALIGN_16 },
1233    { X86::VFNMSUBPSr132rY_Int,   X86::VFNMSUBPSr132mY_Int,   TB_ALIGN_32 },
1234    { X86::VFNMSUBPDr132rY_Int,   X86::VFNMSUBPDr132mY_Int,   TB_ALIGN_32 },
1235
1236    { X86::VFMADDSUBPSr231r,      X86::VFMADDSUBPSr231m,      TB_ALIGN_16 },
1237    { X86::VFMADDSUBPDr231r,      X86::VFMADDSUBPDr231m,      TB_ALIGN_16 },
1238    { X86::VFMADDSUBPSr132r,      X86::VFMADDSUBPSr132m,      TB_ALIGN_16 },
1239    { X86::VFMADDSUBPDr132r,      X86::VFMADDSUBPDr132m,      TB_ALIGN_16 },
1240    { X86::VFMADDSUBPSr213r,      X86::VFMADDSUBPSr213m,      TB_ALIGN_16 },
1241    { X86::VFMADDSUBPDr213r,      X86::VFMADDSUBPDr213m,      TB_ALIGN_16 },
1242    { X86::VFMADDSUBPSr231rY,     X86::VFMADDSUBPSr231mY,     TB_ALIGN_32 },
1243    { X86::VFMADDSUBPDr231rY,     X86::VFMADDSUBPDr231mY,     TB_ALIGN_32 },
1244    { X86::VFMADDSUBPSr132rY,     X86::VFMADDSUBPSr132mY,     TB_ALIGN_32 },
1245    { X86::VFMADDSUBPDr132rY,     X86::VFMADDSUBPDr132mY,     TB_ALIGN_32 },
1246    { X86::VFMADDSUBPSr213rY,     X86::VFMADDSUBPSr213mY,     TB_ALIGN_32 },
1247    { X86::VFMADDSUBPDr213rY,     X86::VFMADDSUBPDr213mY,     TB_ALIGN_32 },
1248    { X86::VFMADDSUBPSr132r_Int,  X86::VFMADDSUBPSr132m_Int,  TB_ALIGN_16 },
1249    { X86::VFMADDSUBPDr132r_Int,  X86::VFMADDSUBPDr132m_Int,  TB_ALIGN_16 },
1250    { X86::VFMADDSUBPSr132rY_Int, X86::VFMADDSUBPSr132mY_Int, TB_ALIGN_32 },
1251    { X86::VFMADDSUBPDr132rY_Int, X86::VFMADDSUBPDr132mY_Int, TB_ALIGN_32 },
1252
1253    { X86::VFMSUBADDPSr231r,      X86::VFMSUBADDPSr231m,      TB_ALIGN_16 },
1254    { X86::VFMSUBADDPDr231r,      X86::VFMSUBADDPDr231m,      TB_ALIGN_16 },
1255    { X86::VFMSUBADDPSr132r,      X86::VFMSUBADDPSr132m,      TB_ALIGN_16 },
1256    { X86::VFMSUBADDPDr132r,      X86::VFMSUBADDPDr132m,      TB_ALIGN_16 },
1257    { X86::VFMSUBADDPSr213r,      X86::VFMSUBADDPSr213m,      TB_ALIGN_16 },
1258    { X86::VFMSUBADDPDr213r,      X86::VFMSUBADDPDr213m,      TB_ALIGN_16 },
1259    { X86::VFMSUBADDPSr231rY,     X86::VFMSUBADDPSr231mY,     TB_ALIGN_32 },
1260    { X86::VFMSUBADDPDr231rY,     X86::VFMSUBADDPDr231mY,     TB_ALIGN_32 },
1261    { X86::VFMSUBADDPSr132rY,     X86::VFMSUBADDPSr132mY,     TB_ALIGN_32 },
1262    { X86::VFMSUBADDPDr132rY,     X86::VFMSUBADDPDr132mY,     TB_ALIGN_32 },
1263    { X86::VFMSUBADDPSr213rY,     X86::VFMSUBADDPSr213mY,     TB_ALIGN_32 },
1264    { X86::VFMSUBADDPDr213rY,     X86::VFMSUBADDPDr213mY,     TB_ALIGN_32 },
1265    { X86::VFMSUBADDPSr132r_Int,  X86::VFMSUBADDPSr132m_Int,  TB_ALIGN_16 },
1266    { X86::VFMSUBADDPDr132r_Int,  X86::VFMSUBADDPDr132m_Int,  TB_ALIGN_16 },
1267    { X86::VFMSUBADDPSr132rY_Int, X86::VFMSUBADDPSr132mY_Int, TB_ALIGN_32 },
1268    { X86::VFMSUBADDPDr132rY_Int, X86::VFMSUBADDPDr132mY_Int, TB_ALIGN_32 },
1269  };
1270
1271  for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) {
1272    unsigned RegOp = OpTbl3[i].RegOp;
1273    unsigned MemOp = OpTbl3[i].MemOp;
1274    unsigned Flags = OpTbl3[i].Flags;
1275    AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable,
1276                  RegOp, MemOp,
1277                  // Index 3, folded load
1278                  Flags | TB_INDEX_3 | TB_FOLDED_LOAD);
1279  }
1280
1281}
1282
1283void
1284X86InstrInfo::AddTableEntry(RegOp2MemOpTableType &R2MTable,
1285                            MemOp2RegOpTableType &M2RTable,
1286                            unsigned RegOp, unsigned MemOp, unsigned Flags) {
1287    if ((Flags & TB_NO_FORWARD) == 0) {
1288      assert(!R2MTable.count(RegOp) && "Duplicate entry!");
1289      R2MTable[RegOp] = std::make_pair(MemOp, Flags);
1290    }
1291    if ((Flags & TB_NO_REVERSE) == 0) {
1292      assert(!M2RTable.count(MemOp) &&
1293           "Duplicated entries in unfolding maps?");
1294      M2RTable[MemOp] = std::make_pair(RegOp, Flags);
1295    }
1296}
1297
1298bool
1299X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
1300                                    unsigned &SrcReg, unsigned &DstReg,
1301                                    unsigned &SubIdx) const {
1302  switch (MI.getOpcode()) {
1303  default: break;
1304  case X86::MOVSX16rr8:
1305  case X86::MOVZX16rr8:
1306  case X86::MOVSX32rr8:
1307  case X86::MOVZX32rr8:
1308  case X86::MOVSX64rr8:
1309  case X86::MOVZX64rr8:
1310    if (!TM.getSubtarget<X86Subtarget>().is64Bit())
1311      // It's not always legal to reference the low 8-bit of the larger
1312      // register in 32-bit mode.
1313      return false;
1314  case X86::MOVSX32rr16:
1315  case X86::MOVZX32rr16:
1316  case X86::MOVSX64rr16:
1317  case X86::MOVZX64rr16:
1318  case X86::MOVSX64rr32:
1319  case X86::MOVZX64rr32: {
1320    if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
1321      // Be conservative.
1322      return false;
1323    SrcReg = MI.getOperand(1).getReg();
1324    DstReg = MI.getOperand(0).getReg();
1325    switch (MI.getOpcode()) {
1326    default:
1327      llvm_unreachable(0);
1328    case X86::MOVSX16rr8:
1329    case X86::MOVZX16rr8:
1330    case X86::MOVSX32rr8:
1331    case X86::MOVZX32rr8:
1332    case X86::MOVSX64rr8:
1333    case X86::MOVZX64rr8:
1334      SubIdx = X86::sub_8bit;
1335      break;
1336    case X86::MOVSX32rr16:
1337    case X86::MOVZX32rr16:
1338    case X86::MOVSX64rr16:
1339    case X86::MOVZX64rr16:
1340      SubIdx = X86::sub_16bit;
1341      break;
1342    case X86::MOVSX64rr32:
1343    case X86::MOVZX64rr32:
1344      SubIdx = X86::sub_32bit;
1345      break;
1346    }
1347    return true;
1348  }
1349  }
1350  return false;
1351}
1352
1353/// isFrameOperand - Return true and the FrameIndex if the specified
1354/// operand and follow operands form a reference to the stack frame.
1355bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
1356                                  int &FrameIndex) const {
1357  if (MI->getOperand(Op).isFI() && MI->getOperand(Op+1).isImm() &&
1358      MI->getOperand(Op+2).isReg() && MI->getOperand(Op+3).isImm() &&
1359      MI->getOperand(Op+1).getImm() == 1 &&
1360      MI->getOperand(Op+2).getReg() == 0 &&
1361      MI->getOperand(Op+3).getImm() == 0) {
1362    FrameIndex = MI->getOperand(Op).getIndex();
1363    return true;
1364  }
1365  return false;
1366}
1367
1368static bool isFrameLoadOpcode(int Opcode) {
1369  switch (Opcode) {
1370  default:
1371    return false;
1372  case X86::MOV8rm:
1373  case X86::MOV16rm:
1374  case X86::MOV32rm:
1375  case X86::MOV64rm:
1376  case X86::LD_Fp64m:
1377  case X86::MOVSSrm:
1378  case X86::MOVSDrm:
1379  case X86::MOVAPSrm:
1380  case X86::MOVAPDrm:
1381  case X86::MOVDQArm:
1382  case X86::VMOVSSrm:
1383  case X86::VMOVSDrm:
1384  case X86::VMOVAPSrm:
1385  case X86::VMOVAPDrm:
1386  case X86::VMOVDQArm:
1387  case X86::VMOVAPSYrm:
1388  case X86::VMOVAPDYrm:
1389  case X86::VMOVDQAYrm:
1390  case X86::MMX_MOVD64rm:
1391  case X86::MMX_MOVQ64rm:
1392    return true;
1393  }
1394}
1395
1396static bool isFrameStoreOpcode(int Opcode) {
1397  switch (Opcode) {
1398  default: break;
1399  case X86::MOV8mr:
1400  case X86::MOV16mr:
1401  case X86::MOV32mr:
1402  case X86::MOV64mr:
1403  case X86::ST_FpP64m:
1404  case X86::MOVSSmr:
1405  case X86::MOVSDmr:
1406  case X86::MOVAPSmr:
1407  case X86::MOVAPDmr:
1408  case X86::MOVDQAmr:
1409  case X86::VMOVSSmr:
1410  case X86::VMOVSDmr:
1411  case X86::VMOVAPSmr:
1412  case X86::VMOVAPDmr:
1413  case X86::VMOVDQAmr:
1414  case X86::VMOVAPSYmr:
1415  case X86::VMOVAPDYmr:
1416  case X86::VMOVDQAYmr:
1417  case X86::MMX_MOVD64mr:
1418  case X86::MMX_MOVQ64mr:
1419  case X86::MMX_MOVNTQmr:
1420    return true;
1421  }
1422  return false;
1423}
1424
1425unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1426                                           int &FrameIndex) const {
1427  if (isFrameLoadOpcode(MI->getOpcode()))
1428    if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
1429      return MI->getOperand(0).getReg();
1430  return 0;
1431}
1432
1433unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
1434                                                 int &FrameIndex) const {
1435  if (isFrameLoadOpcode(MI->getOpcode())) {
1436    unsigned Reg;
1437    if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
1438      return Reg;
1439    // Check for post-frame index elimination operations
1440    const MachineMemOperand *Dummy;
1441    return hasLoadFromStackSlot(MI, Dummy, FrameIndex);
1442  }
1443  return 0;
1444}
1445
1446unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1447                                          int &FrameIndex) const {
1448  if (isFrameStoreOpcode(MI->getOpcode()))
1449    if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
1450        isFrameOperand(MI, 0, FrameIndex))
1451      return MI->getOperand(X86::AddrNumOperands).getReg();
1452  return 0;
1453}
1454
1455unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
1456                                                int &FrameIndex) const {
1457  if (isFrameStoreOpcode(MI->getOpcode())) {
1458    unsigned Reg;
1459    if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
1460      return Reg;
1461    // Check for post-frame index elimination operations
1462    const MachineMemOperand *Dummy;
1463    return hasStoreToStackSlot(MI, Dummy, FrameIndex);
1464  }
1465  return 0;
1466}
1467
1468/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
1469/// X86::MOVPC32r.
1470static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
1471  bool isPICBase = false;
1472  for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
1473         E = MRI.def_end(); I != E; ++I) {
1474    MachineInstr *DefMI = I.getOperand().getParent();
1475    if (DefMI->getOpcode() != X86::MOVPC32r)
1476      return false;
1477    assert(!isPICBase && "More than one PIC base?");
1478    isPICBase = true;
1479  }
1480  return isPICBase;
1481}
1482
1483bool
1484X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
1485                                                AliasAnalysis *AA) const {
1486  switch (MI->getOpcode()) {
1487  default: break;
1488    case X86::MOV8rm:
1489    case X86::MOV16rm:
1490    case X86::MOV32rm:
1491    case X86::MOV64rm:
1492    case X86::LD_Fp64m:
1493    case X86::MOVSSrm:
1494    case X86::MOVSDrm:
1495    case X86::MOVAPSrm:
1496    case X86::MOVUPSrm:
1497    case X86::MOVAPDrm:
1498    case X86::MOVDQArm:
1499    case X86::VMOVSSrm:
1500    case X86::VMOVSDrm:
1501    case X86::VMOVAPSrm:
1502    case X86::VMOVUPSrm:
1503    case X86::VMOVAPDrm:
1504    case X86::VMOVDQArm:
1505    case X86::VMOVAPSYrm:
1506    case X86::VMOVUPSYrm:
1507    case X86::VMOVAPDYrm:
1508    case X86::VMOVDQAYrm:
1509    case X86::MMX_MOVD64rm:
1510    case X86::MMX_MOVQ64rm:
1511    case X86::FsVMOVAPSrm:
1512    case X86::FsVMOVAPDrm:
1513    case X86::FsMOVAPSrm:
1514    case X86::FsMOVAPDrm: {
1515      // Loads from constant pools are trivially rematerializable.
1516      if (MI->getOperand(1).isReg() &&
1517          MI->getOperand(2).isImm() &&
1518          MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
1519          MI->isInvariantLoad(AA)) {
1520        unsigned BaseReg = MI->getOperand(1).getReg();
1521        if (BaseReg == 0 || BaseReg == X86::RIP)
1522          return true;
1523        // Allow re-materialization of PIC load.
1524        if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
1525          return false;
1526        const MachineFunction &MF = *MI->getParent()->getParent();
1527        const MachineRegisterInfo &MRI = MF.getRegInfo();
1528        bool isPICBase = false;
1529        for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
1530               E = MRI.def_end(); I != E; ++I) {
1531          MachineInstr *DefMI = I.getOperand().getParent();
1532          if (DefMI->getOpcode() != X86::MOVPC32r)
1533            return false;
1534          assert(!isPICBase && "More than one PIC base?");
1535          isPICBase = true;
1536        }
1537        return isPICBase;
1538      }
1539      return false;
1540    }
1541
1542     case X86::LEA32r:
1543     case X86::LEA64r: {
1544       if (MI->getOperand(2).isImm() &&
1545           MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
1546           !MI->getOperand(4).isReg()) {
1547         // lea fi#, lea GV, etc. are all rematerializable.
1548         if (!MI->getOperand(1).isReg())
1549           return true;
1550         unsigned BaseReg = MI->getOperand(1).getReg();
1551         if (BaseReg == 0)
1552           return true;
1553         // Allow re-materialization of lea PICBase + x.
1554         const MachineFunction &MF = *MI->getParent()->getParent();
1555         const MachineRegisterInfo &MRI = MF.getRegInfo();
1556         return regIsPICBase(BaseReg, MRI);
1557       }
1558       return false;
1559     }
1560  }
1561
1562  // All other instructions marked M_REMATERIALIZABLE are always trivially
1563  // rematerializable.
1564  return true;
1565}
1566
1567/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that
1568/// would clobber the EFLAGS condition register. Note the result may be
1569/// conservative. If it cannot definitely determine the safety after visiting
1570/// a few instructions in each direction it assumes it's not safe.
1571static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
1572                                  MachineBasicBlock::iterator I) {
1573  MachineBasicBlock::iterator E = MBB.end();
1574
1575  // For compile time consideration, if we are not able to determine the
1576  // safety after visiting 4 instructions in each direction, we will assume
1577  // it's not safe.
1578  MachineBasicBlock::iterator Iter = I;
1579  for (unsigned i = 0; Iter != E && i < 4; ++i) {
1580    bool SeenDef = false;
1581    for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
1582      MachineOperand &MO = Iter->getOperand(j);
1583      if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
1584        SeenDef = true;
1585      if (!MO.isReg())
1586        continue;
1587      if (MO.getReg() == X86::EFLAGS) {
1588        if (MO.isUse())
1589          return false;
1590        SeenDef = true;
1591      }
1592    }
1593
1594    if (SeenDef)
1595      // This instruction defines EFLAGS, no need to look any further.
1596      return true;
1597    ++Iter;
1598    // Skip over DBG_VALUE.
1599    while (Iter != E && Iter->isDebugValue())
1600      ++Iter;
1601  }
1602
1603  // It is safe to clobber EFLAGS at the end of a block of no successor has it
1604  // live in.
1605  if (Iter == E) {
1606    for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
1607           SE = MBB.succ_end(); SI != SE; ++SI)
1608      if ((*SI)->isLiveIn(X86::EFLAGS))
1609        return false;
1610    return true;
1611  }
1612
1613  MachineBasicBlock::iterator B = MBB.begin();
1614  Iter = I;
1615  for (unsigned i = 0; i < 4; ++i) {
1616    // If we make it to the beginning of the block, it's safe to clobber
1617    // EFLAGS iff EFLAGS is not live-in.
1618    if (Iter == B)
1619      return !MBB.isLiveIn(X86::EFLAGS);
1620
1621    --Iter;
1622    // Skip over DBG_VALUE.
1623    while (Iter != B && Iter->isDebugValue())
1624      --Iter;
1625
1626    bool SawKill = false;
1627    for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
1628      MachineOperand &MO = Iter->getOperand(j);
1629      // A register mask may clobber EFLAGS, but we should still look for a
1630      // live EFLAGS def.
1631      if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
1632        SawKill = true;
1633      if (MO.isReg() && MO.getReg() == X86::EFLAGS) {
1634        if (MO.isDef()) return MO.isDead();
1635        if (MO.isKill()) SawKill = true;
1636      }
1637    }
1638
1639    if (SawKill)
1640      // This instruction kills EFLAGS and doesn't redefine it, so
1641      // there's no need to look further.
1642      return true;
1643  }
1644
1645  // Conservative answer.
1646  return false;
1647}
1648
1649void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
1650                                 MachineBasicBlock::iterator I,
1651                                 unsigned DestReg, unsigned SubIdx,
1652                                 const MachineInstr *Orig,
1653                                 const TargetRegisterInfo &TRI) const {
1654  DebugLoc DL = Orig->getDebugLoc();
1655
1656  // MOV32r0 etc. are implemented with xor which clobbers condition code.
1657  // Re-materialize them as movri instructions to avoid side effects.
1658  bool Clone = true;
1659  unsigned Opc = Orig->getOpcode();
1660  switch (Opc) {
1661  default: break;
1662  case X86::MOV8r0:
1663  case X86::MOV16r0:
1664  case X86::MOV32r0:
1665  case X86::MOV64r0: {
1666    if (!isSafeToClobberEFLAGS(MBB, I)) {
1667      switch (Opc) {
1668      default: break;
1669      case X86::MOV8r0:  Opc = X86::MOV8ri;  break;
1670      case X86::MOV16r0: Opc = X86::MOV16ri; break;
1671      case X86::MOV32r0: Opc = X86::MOV32ri; break;
1672      case X86::MOV64r0: Opc = X86::MOV64ri64i32; break;
1673      }
1674      Clone = false;
1675    }
1676    break;
1677  }
1678  }
1679
1680  if (Clone) {
1681    MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
1682    MBB.insert(I, MI);
1683  } else {
1684    BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0);
1685  }
1686
1687  MachineInstr *NewMI = prior(I);
1688  NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
1689}
1690
1691/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
1692/// is not marked dead.
1693static bool hasLiveCondCodeDef(MachineInstr *MI) {
1694  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1695    MachineOperand &MO = MI->getOperand(i);
1696    if (MO.isReg() && MO.isDef() &&
1697        MO.getReg() == X86::EFLAGS && !MO.isDead()) {
1698      return true;
1699    }
1700  }
1701  return false;
1702}
1703
1704/// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when
1705/// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting
1706/// to a 32-bit superregister and then truncating back down to a 16-bit
1707/// subregister.
1708MachineInstr *
1709X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
1710                                           MachineFunction::iterator &MFI,
1711                                           MachineBasicBlock::iterator &MBBI,
1712                                           LiveVariables *LV) const {
1713  MachineInstr *MI = MBBI;
1714  unsigned Dest = MI->getOperand(0).getReg();
1715  unsigned Src = MI->getOperand(1).getReg();
1716  bool isDead = MI->getOperand(0).isDead();
1717  bool isKill = MI->getOperand(1).isKill();
1718
1719  unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
1720    ? X86::LEA64_32r : X86::LEA32r;
1721  MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
1722  unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1723  unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1724
1725  // Build and insert into an implicit UNDEF value. This is OK because
1726  // well be shifting and then extracting the lower 16-bits.
1727  // This has the potential to cause partial register stall. e.g.
1728  //   movw    (%rbp,%rcx,2), %dx
1729  //   leal    -65(%rdx), %esi
1730  // But testing has shown this *does* help performance in 64-bit mode (at
1731  // least on modern x86 machines).
1732  BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
1733  MachineInstr *InsMI =
1734    BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
1735    .addReg(leaInReg, RegState::Define, X86::sub_16bit)
1736    .addReg(Src, getKillRegState(isKill));
1737
1738  MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
1739                                    get(Opc), leaOutReg);
1740  switch (MIOpc) {
1741  default:
1742    llvm_unreachable(0);
1743  case X86::SHL16ri: {
1744    unsigned ShAmt = MI->getOperand(2).getImm();
1745    MIB.addReg(0).addImm(1 << ShAmt)
1746       .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
1747    break;
1748  }
1749  case X86::INC16r:
1750  case X86::INC64_16r:
1751    addRegOffset(MIB, leaInReg, true, 1);
1752    break;
1753  case X86::DEC16r:
1754  case X86::DEC64_16r:
1755    addRegOffset(MIB, leaInReg, true, -1);
1756    break;
1757  case X86::ADD16ri:
1758  case X86::ADD16ri8:
1759  case X86::ADD16ri_DB:
1760  case X86::ADD16ri8_DB:
1761    addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
1762    break;
1763  case X86::ADD16rr:
1764  case X86::ADD16rr_DB: {
1765    unsigned Src2 = MI->getOperand(2).getReg();
1766    bool isKill2 = MI->getOperand(2).isKill();
1767    unsigned leaInReg2 = 0;
1768    MachineInstr *InsMI2 = 0;
1769    if (Src == Src2) {
1770      // ADD16rr %reg1028<kill>, %reg1028
1771      // just a single insert_subreg.
1772      addRegReg(MIB, leaInReg, true, leaInReg, false);
1773    } else {
1774      leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1775      // Build and insert into an implicit UNDEF value. This is OK because
1776      // well be shifting and then extracting the lower 16-bits.
1777      BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2);
1778      InsMI2 =
1779        BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
1780        .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
1781        .addReg(Src2, getKillRegState(isKill2));
1782      addRegReg(MIB, leaInReg, true, leaInReg2, true);
1783    }
1784    if (LV && isKill2 && InsMI2)
1785      LV->replaceKillInstruction(Src2, MI, InsMI2);
1786    break;
1787  }
1788  }
1789
1790  MachineInstr *NewMI = MIB;
1791  MachineInstr *ExtMI =
1792    BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
1793    .addReg(Dest, RegState::Define | getDeadRegState(isDead))
1794    .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
1795
1796  if (LV) {
1797    // Update live variables
1798    LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
1799    LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
1800    if (isKill)
1801      LV->replaceKillInstruction(Src, MI, InsMI);
1802    if (isDead)
1803      LV->replaceKillInstruction(Dest, MI, ExtMI);
1804  }
1805
1806  return ExtMI;
1807}
1808
1809/// convertToThreeAddress - This method must be implemented by targets that
1810/// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
1811/// may be able to convert a two-address instruction into a true
1812/// three-address instruction on demand.  This allows the X86 target (for
1813/// example) to convert ADD and SHL instructions into LEA instructions if they
1814/// would require register copies due to two-addressness.
1815///
1816/// This method returns a null pointer if the transformation cannot be
1817/// performed, otherwise it returns the new instruction.
1818///
1819MachineInstr *
1820X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
1821                                    MachineBasicBlock::iterator &MBBI,
1822                                    LiveVariables *LV) const {
1823  MachineInstr *MI = MBBI;
1824  MachineFunction &MF = *MI->getParent()->getParent();
1825  // All instructions input are two-addr instructions.  Get the known operands.
1826  unsigned Dest = MI->getOperand(0).getReg();
1827  unsigned Src = MI->getOperand(1).getReg();
1828  bool isDead = MI->getOperand(0).isDead();
1829  bool isKill = MI->getOperand(1).isKill();
1830
1831  MachineInstr *NewMI = NULL;
1832  // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's.  When
1833  // we have better subtarget support, enable the 16-bit LEA generation here.
1834  // 16-bit LEA is also slow on Core2.
1835  bool DisableLEA16 = true;
1836  bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
1837
1838  unsigned MIOpc = MI->getOpcode();
1839  switch (MIOpc) {
1840  case X86::SHUFPSrri: {
1841    assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
1842    if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
1843
1844    unsigned B = MI->getOperand(1).getReg();
1845    unsigned C = MI->getOperand(2).getReg();
1846    if (B != C) return 0;
1847    unsigned A = MI->getOperand(0).getReg();
1848    unsigned M = MI->getOperand(3).getImm();
1849    NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
1850      .addReg(A, RegState::Define | getDeadRegState(isDead))
1851      .addReg(B, getKillRegState(isKill)).addImm(M);
1852    break;
1853  }
1854  case X86::SHUFPDrri: {
1855    assert(MI->getNumOperands() == 4 && "Unknown shufpd instruction!");
1856    if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
1857
1858    unsigned B = MI->getOperand(1).getReg();
1859    unsigned C = MI->getOperand(2).getReg();
1860    if (B != C) return 0;
1861    unsigned A = MI->getOperand(0).getReg();
1862    unsigned M = MI->getOperand(3).getImm();
1863
1864    // Convert to PSHUFD mask.
1865    M = ((M & 1) << 1) | ((M & 1) << 3) | ((M & 2) << 4) | ((M & 2) << 6)| 0x44;
1866
1867    NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
1868      .addReg(A, RegState::Define | getDeadRegState(isDead))
1869      .addReg(B, getKillRegState(isKill)).addImm(M);
1870    break;
1871  }
1872  case X86::SHL64ri: {
1873    assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
1874    // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1875    // the flags produced by a shift yet, so this is safe.
1876    unsigned ShAmt = MI->getOperand(2).getImm();
1877    if (ShAmt == 0 || ShAmt >= 4) return 0;
1878
1879    // LEA can't handle RSP.
1880    if (TargetRegisterInfo::isVirtualRegister(Src) &&
1881        !MF.getRegInfo().constrainRegClass(Src, &X86::GR64_NOSPRegClass))
1882      return 0;
1883
1884    NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
1885      .addReg(Dest, RegState::Define | getDeadRegState(isDead))
1886      .addReg(0).addImm(1 << ShAmt)
1887      .addReg(Src, getKillRegState(isKill))
1888      .addImm(0).addReg(0);
1889    break;
1890  }
1891  case X86::SHL32ri: {
1892    assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
1893    // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1894    // the flags produced by a shift yet, so this is safe.
1895    unsigned ShAmt = MI->getOperand(2).getImm();
1896    if (ShAmt == 0 || ShAmt >= 4) return 0;
1897
1898    // LEA can't handle ESP.
1899    if (TargetRegisterInfo::isVirtualRegister(Src) &&
1900        !MF.getRegInfo().constrainRegClass(Src, &X86::GR32_NOSPRegClass))
1901      return 0;
1902
1903    unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
1904    NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
1905      .addReg(Dest, RegState::Define | getDeadRegState(isDead))
1906      .addReg(0).addImm(1 << ShAmt)
1907      .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
1908    break;
1909  }
1910  case X86::SHL16ri: {
1911    assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
1912    // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1913    // the flags produced by a shift yet, so this is safe.
1914    unsigned ShAmt = MI->getOperand(2).getImm();
1915    if (ShAmt == 0 || ShAmt >= 4) return 0;
1916
1917    if (DisableLEA16)
1918      return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1919    NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1920      .addReg(Dest, RegState::Define | getDeadRegState(isDead))
1921      .addReg(0).addImm(1 << ShAmt)
1922      .addReg(Src, getKillRegState(isKill))
1923      .addImm(0).addReg(0);
1924    break;
1925  }
1926  default: {
1927    // The following opcodes also sets the condition code register(s). Only
1928    // convert them to equivalent lea if the condition code register def's
1929    // are dead!
1930    if (hasLiveCondCodeDef(MI))
1931      return 0;
1932
1933    switch (MIOpc) {
1934    default: return 0;
1935    case X86::INC64r:
1936    case X86::INC32r:
1937    case X86::INC64_32r: {
1938      assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
1939      unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
1940        : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
1941      const TargetRegisterClass *RC = MIOpc == X86::INC64r ?
1942        (const TargetRegisterClass*)&X86::GR64_NOSPRegClass :
1943        (const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
1944
1945      // LEA can't handle RSP.
1946      if (TargetRegisterInfo::isVirtualRegister(Src) &&
1947          !MF.getRegInfo().constrainRegClass(Src, RC))
1948        return 0;
1949
1950      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
1951                              .addReg(Dest, RegState::Define |
1952                                      getDeadRegState(isDead)),
1953                              Src, isKill, 1);
1954      break;
1955    }
1956    case X86::INC16r:
1957    case X86::INC64_16r:
1958      if (DisableLEA16)
1959        return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1960      assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
1961      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1962                           .addReg(Dest, RegState::Define |
1963                                   getDeadRegState(isDead)),
1964                           Src, isKill, 1);
1965      break;
1966    case X86::DEC64r:
1967    case X86::DEC32r:
1968    case X86::DEC64_32r: {
1969      assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
1970      unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1971        : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
1972      const TargetRegisterClass *RC = MIOpc == X86::DEC64r ?
1973        (const TargetRegisterClass*)&X86::GR64_NOSPRegClass :
1974        (const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
1975      // LEA can't handle RSP.
1976      if (TargetRegisterInfo::isVirtualRegister(Src) &&
1977          !MF.getRegInfo().constrainRegClass(Src, RC))
1978        return 0;
1979
1980      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
1981                              .addReg(Dest, RegState::Define |
1982                                      getDeadRegState(isDead)),
1983                              Src, isKill, -1);
1984      break;
1985    }
1986    case X86::DEC16r:
1987    case X86::DEC64_16r:
1988      if (DisableLEA16)
1989        return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
1990      assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
1991      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1992                           .addReg(Dest, RegState::Define |
1993                                   getDeadRegState(isDead)),
1994                           Src, isKill, -1);
1995      break;
1996    case X86::ADD64rr:
1997    case X86::ADD64rr_DB:
1998    case X86::ADD32rr:
1999    case X86::ADD32rr_DB: {
2000      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
2001      unsigned Opc;
2002      const TargetRegisterClass *RC;
2003      if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
2004        Opc = X86::LEA64r;
2005        RC = &X86::GR64_NOSPRegClass;
2006      } else {
2007        Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
2008        RC = &X86::GR32_NOSPRegClass;
2009      }
2010
2011
2012      unsigned Src2 = MI->getOperand(2).getReg();
2013      bool isKill2 = MI->getOperand(2).isKill();
2014
2015      // LEA can't handle RSP.
2016      if (TargetRegisterInfo::isVirtualRegister(Src2) &&
2017          !MF.getRegInfo().constrainRegClass(Src2, RC))
2018        return 0;
2019
2020      NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
2021                        .addReg(Dest, RegState::Define |
2022                                getDeadRegState(isDead)),
2023                        Src, isKill, Src2, isKill2);
2024      if (LV && isKill2)
2025        LV->replaceKillInstruction(Src2, MI, NewMI);
2026      break;
2027    }
2028    case X86::ADD16rr:
2029    case X86::ADD16rr_DB: {
2030      if (DisableLEA16)
2031        return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
2032      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
2033      unsigned Src2 = MI->getOperand(2).getReg();
2034      bool isKill2 = MI->getOperand(2).isKill();
2035      NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
2036                        .addReg(Dest, RegState::Define |
2037                                getDeadRegState(isDead)),
2038                        Src, isKill, Src2, isKill2);
2039      if (LV && isKill2)
2040        LV->replaceKillInstruction(Src2, MI, NewMI);
2041      break;
2042    }
2043    case X86::ADD64ri32:
2044    case X86::ADD64ri8:
2045    case X86::ADD64ri32_DB:
2046    case X86::ADD64ri8_DB:
2047      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
2048      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
2049                              .addReg(Dest, RegState::Define |
2050                                      getDeadRegState(isDead)),
2051                              Src, isKill, MI->getOperand(2).getImm());
2052      break;
2053    case X86::ADD32ri:
2054    case X86::ADD32ri8:
2055    case X86::ADD32ri_DB:
2056    case X86::ADD32ri8_DB: {
2057      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
2058      unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
2059      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
2060                              .addReg(Dest, RegState::Define |
2061                                      getDeadRegState(isDead)),
2062                                Src, isKill, MI->getOperand(2).getImm());
2063      break;
2064    }
2065    case X86::ADD16ri:
2066    case X86::ADD16ri8:
2067    case X86::ADD16ri_DB:
2068    case X86::ADD16ri8_DB:
2069      if (DisableLEA16)
2070        return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
2071      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
2072      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
2073                              .addReg(Dest, RegState::Define |
2074                                      getDeadRegState(isDead)),
2075                              Src, isKill, MI->getOperand(2).getImm());
2076      break;
2077    }
2078  }
2079  }
2080
2081  if (!NewMI) return 0;
2082
2083  if (LV) {  // Update live variables
2084    if (isKill)
2085      LV->replaceKillInstruction(Src, MI, NewMI);
2086    if (isDead)
2087      LV->replaceKillInstruction(Dest, MI, NewMI);
2088  }
2089
2090  MFI->insert(MBBI, NewMI);          // Insert the new inst
2091  return NewMI;
2092}
2093
2094/// commuteInstruction - We have a few instructions that must be hacked on to
2095/// commute them.
2096///
2097MachineInstr *
2098X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
2099  switch (MI->getOpcode()) {
2100  case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
2101  case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
2102  case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
2103  case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
2104  case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
2105  case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
2106    unsigned Opc;
2107    unsigned Size;
2108    switch (MI->getOpcode()) {
2109    default: llvm_unreachable("Unreachable!");
2110    case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
2111    case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
2112    case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
2113    case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
2114    case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
2115    case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
2116    }
2117    unsigned Amt = MI->getOperand(3).getImm();
2118    if (NewMI) {
2119      MachineFunction &MF = *MI->getParent()->getParent();
2120      MI = MF.CloneMachineInstr(MI);
2121      NewMI = false;
2122    }
2123    MI->setDesc(get(Opc));
2124    MI->getOperand(3).setImm(Size-Amt);
2125    return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
2126  }
2127  case X86::CMOVB16rr:
2128  case X86::CMOVB32rr:
2129  case X86::CMOVB64rr:
2130  case X86::CMOVAE16rr:
2131  case X86::CMOVAE32rr:
2132  case X86::CMOVAE64rr:
2133  case X86::CMOVE16rr:
2134  case X86::CMOVE32rr:
2135  case X86::CMOVE64rr:
2136  case X86::CMOVNE16rr:
2137  case X86::CMOVNE32rr:
2138  case X86::CMOVNE64rr:
2139  case X86::CMOVBE16rr:
2140  case X86::CMOVBE32rr:
2141  case X86::CMOVBE64rr:
2142  case X86::CMOVA16rr:
2143  case X86::CMOVA32rr:
2144  case X86::CMOVA64rr:
2145  case X86::CMOVL16rr:
2146  case X86::CMOVL32rr:
2147  case X86::CMOVL64rr:
2148  case X86::CMOVGE16rr:
2149  case X86::CMOVGE32rr:
2150  case X86::CMOVGE64rr:
2151  case X86::CMOVLE16rr:
2152  case X86::CMOVLE32rr:
2153  case X86::CMOVLE64rr:
2154  case X86::CMOVG16rr:
2155  case X86::CMOVG32rr:
2156  case X86::CMOVG64rr:
2157  case X86::CMOVS16rr:
2158  case X86::CMOVS32rr:
2159  case X86::CMOVS64rr:
2160  case X86::CMOVNS16rr:
2161  case X86::CMOVNS32rr:
2162  case X86::CMOVNS64rr:
2163  case X86::CMOVP16rr:
2164  case X86::CMOVP32rr:
2165  case X86::CMOVP64rr:
2166  case X86::CMOVNP16rr:
2167  case X86::CMOVNP32rr:
2168  case X86::CMOVNP64rr:
2169  case X86::CMOVO16rr:
2170  case X86::CMOVO32rr:
2171  case X86::CMOVO64rr:
2172  case X86::CMOVNO16rr:
2173  case X86::CMOVNO32rr:
2174  case X86::CMOVNO64rr: {
2175    unsigned Opc = 0;
2176    switch (MI->getOpcode()) {
2177    default: break;
2178    case X86::CMOVB16rr:  Opc = X86::CMOVAE16rr; break;
2179    case X86::CMOVB32rr:  Opc = X86::CMOVAE32rr; break;
2180    case X86::CMOVB64rr:  Opc = X86::CMOVAE64rr; break;
2181    case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
2182    case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
2183    case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
2184    case X86::CMOVE16rr:  Opc = X86::CMOVNE16rr; break;
2185    case X86::CMOVE32rr:  Opc = X86::CMOVNE32rr; break;
2186    case X86::CMOVE64rr:  Opc = X86::CMOVNE64rr; break;
2187    case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
2188    case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
2189    case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
2190    case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
2191    case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
2192    case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
2193    case X86::CMOVA16rr:  Opc = X86::CMOVBE16rr; break;
2194    case X86::CMOVA32rr:  Opc = X86::CMOVBE32rr; break;
2195    case X86::CMOVA64rr:  Opc = X86::CMOVBE64rr; break;
2196    case X86::CMOVL16rr:  Opc = X86::CMOVGE16rr; break;
2197    case X86::CMOVL32rr:  Opc = X86::CMOVGE32rr; break;
2198    case X86::CMOVL64rr:  Opc = X86::CMOVGE64rr; break;
2199    case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
2200    case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
2201    case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
2202    case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
2203    case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
2204    case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
2205    case X86::CMOVG16rr:  Opc = X86::CMOVLE16rr; break;
2206    case X86::CMOVG32rr:  Opc = X86::CMOVLE32rr; break;
2207    case X86::CMOVG64rr:  Opc = X86::CMOVLE64rr; break;
2208    case X86::CMOVS16rr:  Opc = X86::CMOVNS16rr; break;
2209    case X86::CMOVS32rr:  Opc = X86::CMOVNS32rr; break;
2210    case X86::CMOVS64rr:  Opc = X86::CMOVNS64rr; break;
2211    case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
2212    case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
2213    case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
2214    case X86::CMOVP16rr:  Opc = X86::CMOVNP16rr; break;
2215    case X86::CMOVP32rr:  Opc = X86::CMOVNP32rr; break;
2216    case X86::CMOVP64rr:  Opc = X86::CMOVNP64rr; break;
2217    case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
2218    case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
2219    case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
2220    case X86::CMOVO16rr:  Opc = X86::CMOVNO16rr; break;
2221    case X86::CMOVO32rr:  Opc = X86::CMOVNO32rr; break;
2222    case X86::CMOVO64rr:  Opc = X86::CMOVNO64rr; break;
2223    case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
2224    case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
2225    case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
2226    }
2227    if (NewMI) {
2228      MachineFunction &MF = *MI->getParent()->getParent();
2229      MI = MF.CloneMachineInstr(MI);
2230      NewMI = false;
2231    }
2232    MI->setDesc(get(Opc));
2233    // Fallthrough intended.
2234  }
2235  default:
2236    return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
2237  }
2238}
2239
2240static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
2241  switch (BrOpc) {
2242  default: return X86::COND_INVALID;
2243  case X86::JE_4:  return X86::COND_E;
2244  case X86::JNE_4: return X86::COND_NE;
2245  case X86::JL_4:  return X86::COND_L;
2246  case X86::JLE_4: return X86::COND_LE;
2247  case X86::JG_4:  return X86::COND_G;
2248  case X86::JGE_4: return X86::COND_GE;
2249  case X86::JB_4:  return X86::COND_B;
2250  case X86::JBE_4: return X86::COND_BE;
2251  case X86::JA_4:  return X86::COND_A;
2252  case X86::JAE_4: return X86::COND_AE;
2253  case X86::JS_4:  return X86::COND_S;
2254  case X86::JNS_4: return X86::COND_NS;
2255  case X86::JP_4:  return X86::COND_P;
2256  case X86::JNP_4: return X86::COND_NP;
2257  case X86::JO_4:  return X86::COND_O;
2258  case X86::JNO_4: return X86::COND_NO;
2259  }
2260}
2261
2262unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
2263  switch (CC) {
2264  default: llvm_unreachable("Illegal condition code!");
2265  case X86::COND_E:  return X86::JE_4;
2266  case X86::COND_NE: return X86::JNE_4;
2267  case X86::COND_L:  return X86::JL_4;
2268  case X86::COND_LE: return X86::JLE_4;
2269  case X86::COND_G:  return X86::JG_4;
2270  case X86::COND_GE: return X86::JGE_4;
2271  case X86::COND_B:  return X86::JB_4;
2272  case X86::COND_BE: return X86::JBE_4;
2273  case X86::COND_A:  return X86::JA_4;
2274  case X86::COND_AE: return X86::JAE_4;
2275  case X86::COND_S:  return X86::JS_4;
2276  case X86::COND_NS: return X86::JNS_4;
2277  case X86::COND_P:  return X86::JP_4;
2278  case X86::COND_NP: return X86::JNP_4;
2279  case X86::COND_O:  return X86::JO_4;
2280  case X86::COND_NO: return X86::JNO_4;
2281  }
2282}
2283
2284/// GetOppositeBranchCondition - Return the inverse of the specified condition,
2285/// e.g. turning COND_E to COND_NE.
2286X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
2287  switch (CC) {
2288  default: llvm_unreachable("Illegal condition code!");
2289  case X86::COND_E:  return X86::COND_NE;
2290  case X86::COND_NE: return X86::COND_E;
2291  case X86::COND_L:  return X86::COND_GE;
2292  case X86::COND_LE: return X86::COND_G;
2293  case X86::COND_G:  return X86::COND_LE;
2294  case X86::COND_GE: return X86::COND_L;
2295  case X86::COND_B:  return X86::COND_AE;
2296  case X86::COND_BE: return X86::COND_A;
2297  case X86::COND_A:  return X86::COND_BE;
2298  case X86::COND_AE: return X86::COND_B;
2299  case X86::COND_S:  return X86::COND_NS;
2300  case X86::COND_NS: return X86::COND_S;
2301  case X86::COND_P:  return X86::COND_NP;
2302  case X86::COND_NP: return X86::COND_P;
2303  case X86::COND_O:  return X86::COND_NO;
2304  case X86::COND_NO: return X86::COND_O;
2305  }
2306}
2307
2308bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
2309  if (!MI->isTerminator()) return false;
2310
2311  // Conditional branch is a special case.
2312  if (MI->isBranch() && !MI->isBarrier())
2313    return true;
2314  if (!MI->isPredicable())
2315    return true;
2316  return !isPredicated(MI);
2317}
2318
2319bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
2320                                 MachineBasicBlock *&TBB,
2321                                 MachineBasicBlock *&FBB,
2322                                 SmallVectorImpl<MachineOperand> &Cond,
2323                                 bool AllowModify) const {
2324  // Start from the bottom of the block and work up, examining the
2325  // terminator instructions.
2326  MachineBasicBlock::iterator I = MBB.end();
2327  MachineBasicBlock::iterator UnCondBrIter = MBB.end();
2328  while (I != MBB.begin()) {
2329    --I;
2330    if (I->isDebugValue())
2331      continue;
2332
2333    // Working from the bottom, when we see a non-terminator instruction, we're
2334    // done.
2335    if (!isUnpredicatedTerminator(I))
2336      break;
2337
2338    // A terminator that isn't a branch can't easily be handled by this
2339    // analysis.
2340    if (!I->isBranch())
2341      return true;
2342
2343    // Handle unconditional branches.
2344    if (I->getOpcode() == X86::JMP_4) {
2345      UnCondBrIter = I;
2346
2347      if (!AllowModify) {
2348        TBB = I->getOperand(0).getMBB();
2349        continue;
2350      }
2351
2352      // If the block has any instructions after a JMP, delete them.
2353      while (llvm::next(I) != MBB.end())
2354        llvm::next(I)->eraseFromParent();
2355
2356      Cond.clear();
2357      FBB = 0;
2358
2359      // Delete the JMP if it's equivalent to a fall-through.
2360      if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
2361        TBB = 0;
2362        I->eraseFromParent();
2363        I = MBB.end();
2364        UnCondBrIter = MBB.end();
2365        continue;
2366      }
2367
2368      // TBB is used to indicate the unconditional destination.
2369      TBB = I->getOperand(0).getMBB();
2370      continue;
2371    }
2372
2373    // Handle conditional branches.
2374    X86::CondCode BranchCode = GetCondFromBranchOpc(I->getOpcode());
2375    if (BranchCode == X86::COND_INVALID)
2376      return true;  // Can't handle indirect branch.
2377
2378    // Working from the bottom, handle the first conditional branch.
2379    if (Cond.empty()) {
2380      MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
2381      if (AllowModify && UnCondBrIter != MBB.end() &&
2382          MBB.isLayoutSuccessor(TargetBB)) {
2383        // If we can modify the code and it ends in something like:
2384        //
2385        //     jCC L1
2386        //     jmp L2
2387        //   L1:
2388        //     ...
2389        //   L2:
2390        //
2391        // Then we can change this to:
2392        //
2393        //     jnCC L2
2394        //   L1:
2395        //     ...
2396        //   L2:
2397        //
2398        // Which is a bit more efficient.
2399        // We conditionally jump to the fall-through block.
2400        BranchCode = GetOppositeBranchCondition(BranchCode);
2401        unsigned JNCC = GetCondBranchFromCond(BranchCode);
2402        MachineBasicBlock::iterator OldInst = I;
2403
2404        BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
2405          .addMBB(UnCondBrIter->getOperand(0).getMBB());
2406        BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4))
2407          .addMBB(TargetBB);
2408
2409        OldInst->eraseFromParent();
2410        UnCondBrIter->eraseFromParent();
2411
2412        // Restart the analysis.
2413        UnCondBrIter = MBB.end();
2414        I = MBB.end();
2415        continue;
2416      }
2417
2418      FBB = TBB;
2419      TBB = I->getOperand(0).getMBB();
2420      Cond.push_back(MachineOperand::CreateImm(BranchCode));
2421      continue;
2422    }
2423
2424    // Handle subsequent conditional branches. Only handle the case where all
2425    // conditional branches branch to the same destination and their condition
2426    // opcodes fit one of the special multi-branch idioms.
2427    assert(Cond.size() == 1);
2428    assert(TBB);
2429
2430    // Only handle the case where all conditional branches branch to the same
2431    // destination.
2432    if (TBB != I->getOperand(0).getMBB())
2433      return true;
2434
2435    // If the conditions are the same, we can leave them alone.
2436    X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
2437    if (OldBranchCode == BranchCode)
2438      continue;
2439
2440    // If they differ, see if they fit one of the known patterns. Theoretically,
2441    // we could handle more patterns here, but we shouldn't expect to see them
2442    // if instruction selection has done a reasonable job.
2443    if ((OldBranchCode == X86::COND_NP &&
2444         BranchCode == X86::COND_E) ||
2445        (OldBranchCode == X86::COND_E &&
2446         BranchCode == X86::COND_NP))
2447      BranchCode = X86::COND_NP_OR_E;
2448    else if ((OldBranchCode == X86::COND_P &&
2449              BranchCode == X86::COND_NE) ||
2450             (OldBranchCode == X86::COND_NE &&
2451              BranchCode == X86::COND_P))
2452      BranchCode = X86::COND_NE_OR_P;
2453    else
2454      return true;
2455
2456    // Update the MachineOperand.
2457    Cond[0].setImm(BranchCode);
2458  }
2459
2460  return false;
2461}
2462
2463unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
2464  MachineBasicBlock::iterator I = MBB.end();
2465  unsigned Count = 0;
2466
2467  while (I != MBB.begin()) {
2468    --I;
2469    if (I->isDebugValue())
2470      continue;
2471    if (I->getOpcode() != X86::JMP_4 &&
2472        GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
2473      break;
2474    // Remove the branch.
2475    I->eraseFromParent();
2476    I = MBB.end();
2477    ++Count;
2478  }
2479
2480  return Count;
2481}
2482
2483unsigned
2484X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
2485                           MachineBasicBlock *FBB,
2486                           const SmallVectorImpl<MachineOperand> &Cond,
2487                           DebugLoc DL) const {
2488  // Shouldn't be a fall through.
2489  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
2490  assert((Cond.size() == 1 || Cond.size() == 0) &&
2491         "X86 branch conditions have one component!");
2492
2493  if (Cond.empty()) {
2494    // Unconditional branch?
2495    assert(!FBB && "Unconditional branch with multiple successors!");
2496    BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
2497    return 1;
2498  }
2499
2500  // Conditional branch.
2501  unsigned Count = 0;
2502  X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
2503  switch (CC) {
2504  case X86::COND_NP_OR_E:
2505    // Synthesize NP_OR_E with two branches.
2506    BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
2507    ++Count;
2508    BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
2509    ++Count;
2510    break;
2511  case X86::COND_NE_OR_P:
2512    // Synthesize NE_OR_P with two branches.
2513    BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
2514    ++Count;
2515    BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
2516    ++Count;
2517    break;
2518  default: {
2519    unsigned Opc = GetCondBranchFromCond(CC);
2520    BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
2521    ++Count;
2522  }
2523  }
2524  if (FBB) {
2525    // Two-way Conditional branch. Insert the second branch.
2526    BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
2527    ++Count;
2528  }
2529  return Count;
2530}
2531
2532/// isHReg - Test if the given register is a physical h register.
2533static bool isHReg(unsigned Reg) {
2534  return X86::GR8_ABCD_HRegClass.contains(Reg);
2535}
2536
2537// Try and copy between VR128/VR64 and GR64 registers.
2538static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
2539                                        bool HasAVX) {
2540  // SrcReg(VR128) -> DestReg(GR64)
2541  // SrcReg(VR64)  -> DestReg(GR64)
2542  // SrcReg(GR64)  -> DestReg(VR128)
2543  // SrcReg(GR64)  -> DestReg(VR64)
2544
2545  if (X86::GR64RegClass.contains(DestReg)) {
2546    if (X86::VR128RegClass.contains(SrcReg)) {
2547      // Copy from a VR128 register to a GR64 register.
2548      return HasAVX ? X86::VMOVPQIto64rr : X86::MOVPQIto64rr;
2549    } else if (X86::VR64RegClass.contains(SrcReg)) {
2550      // Copy from a VR64 register to a GR64 register.
2551      return X86::MOVSDto64rr;
2552    }
2553  } else if (X86::GR64RegClass.contains(SrcReg)) {
2554    // Copy from a GR64 register to a VR128 register.
2555    if (X86::VR128RegClass.contains(DestReg))
2556      return HasAVX ? X86::VMOV64toPQIrr : X86::MOV64toPQIrr;
2557    // Copy from a GR64 register to a VR64 register.
2558    else if (X86::VR64RegClass.contains(DestReg))
2559      return X86::MOV64toSDrr;
2560  }
2561
2562  // SrcReg(FR32) -> DestReg(GR32)
2563  // SrcReg(GR32) -> DestReg(FR32)
2564
2565  if (X86::GR32RegClass.contains(DestReg) && X86::FR32RegClass.contains(SrcReg))
2566      // Copy from a FR32 register to a GR32 register.
2567      return HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr;
2568
2569  if (X86::FR32RegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg))
2570      // Copy from a GR32 register to a FR32 register.
2571      return HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr;
2572
2573  return 0;
2574}
2575
2576void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
2577                               MachineBasicBlock::iterator MI, DebugLoc DL,
2578                               unsigned DestReg, unsigned SrcReg,
2579                               bool KillSrc) const {
2580  // First deal with the normal symmetric copies.
2581  bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
2582  unsigned Opc = 0;
2583  if (X86::GR64RegClass.contains(DestReg, SrcReg))
2584    Opc = X86::MOV64rr;
2585  else if (X86::GR32RegClass.contains(DestReg, SrcReg))
2586    Opc = X86::MOV32rr;
2587  else if (X86::GR16RegClass.contains(DestReg, SrcReg))
2588    Opc = X86::MOV16rr;
2589  else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
2590    // Copying to or from a physical H register on x86-64 requires a NOREX
2591    // move.  Otherwise use a normal move.
2592    if ((isHReg(DestReg) || isHReg(SrcReg)) &&
2593        TM.getSubtarget<X86Subtarget>().is64Bit()) {
2594      Opc = X86::MOV8rr_NOREX;
2595      // Both operands must be encodable without an REX prefix.
2596      assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
2597             "8-bit H register can not be copied outside GR8_NOREX");
2598    } else
2599      Opc = X86::MOV8rr;
2600  } else if (X86::VR128RegClass.contains(DestReg, SrcReg))
2601    Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
2602  else if (X86::VR256RegClass.contains(DestReg, SrcReg))
2603    Opc = X86::VMOVAPSYrr;
2604  else if (X86::VR64RegClass.contains(DestReg, SrcReg))
2605    Opc = X86::MMX_MOVQ64rr;
2606  else
2607    Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, HasAVX);
2608
2609  if (Opc) {
2610    BuildMI(MBB, MI, DL, get(Opc), DestReg)
2611      .addReg(SrcReg, getKillRegState(KillSrc));
2612    return;
2613  }
2614
2615  // Moving EFLAGS to / from another register requires a push and a pop.
2616  if (SrcReg == X86::EFLAGS) {
2617    if (X86::GR64RegClass.contains(DestReg)) {
2618      BuildMI(MBB, MI, DL, get(X86::PUSHF64));
2619      BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
2620      return;
2621    } else if (X86::GR32RegClass.contains(DestReg)) {
2622      BuildMI(MBB, MI, DL, get(X86::PUSHF32));
2623      BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
2624      return;
2625    }
2626  }
2627  if (DestReg == X86::EFLAGS) {
2628    if (X86::GR64RegClass.contains(SrcReg)) {
2629      BuildMI(MBB, MI, DL, get(X86::PUSH64r))
2630        .addReg(SrcReg, getKillRegState(KillSrc));
2631      BuildMI(MBB, MI, DL, get(X86::POPF64));
2632      return;
2633    } else if (X86::GR32RegClass.contains(SrcReg)) {
2634      BuildMI(MBB, MI, DL, get(X86::PUSH32r))
2635        .addReg(SrcReg, getKillRegState(KillSrc));
2636      BuildMI(MBB, MI, DL, get(X86::POPF32));
2637      return;
2638    }
2639  }
2640
2641  DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
2642               << " to " << RI.getName(DestReg) << '\n');
2643  llvm_unreachable("Cannot emit physreg copy instruction");
2644}
2645
2646static unsigned getLoadStoreRegOpcode(unsigned Reg,
2647                                      const TargetRegisterClass *RC,
2648                                      bool isStackAligned,
2649                                      const TargetMachine &TM,
2650                                      bool load) {
2651  bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
2652  switch (RC->getSize()) {
2653  default:
2654    llvm_unreachable("Unknown spill size");
2655  case 1:
2656    assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
2657    if (TM.getSubtarget<X86Subtarget>().is64Bit())
2658      // Copying to or from a physical H register on x86-64 requires a NOREX
2659      // move.  Otherwise use a normal move.
2660      if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
2661        return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
2662    return load ? X86::MOV8rm : X86::MOV8mr;
2663  case 2:
2664    assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
2665    return load ? X86::MOV16rm : X86::MOV16mr;
2666  case 4:
2667    if (X86::GR32RegClass.hasSubClassEq(RC))
2668      return load ? X86::MOV32rm : X86::MOV32mr;
2669    if (X86::FR32RegClass.hasSubClassEq(RC))
2670      return load ?
2671        (HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) :
2672        (HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
2673    if (X86::RFP32RegClass.hasSubClassEq(RC))
2674      return load ? X86::LD_Fp32m : X86::ST_Fp32m;
2675    llvm_unreachable("Unknown 4-byte regclass");
2676  case 8:
2677    if (X86::GR64RegClass.hasSubClassEq(RC))
2678      return load ? X86::MOV64rm : X86::MOV64mr;
2679    if (X86::FR64RegClass.hasSubClassEq(RC))
2680      return load ?
2681        (HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) :
2682        (HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
2683    if (X86::VR64RegClass.hasSubClassEq(RC))
2684      return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
2685    if (X86::RFP64RegClass.hasSubClassEq(RC))
2686      return load ? X86::LD_Fp64m : X86::ST_Fp64m;
2687    llvm_unreachable("Unknown 8-byte regclass");
2688  case 10:
2689    assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
2690    return load ? X86::LD_Fp80m : X86::ST_FpP80m;
2691  case 16: {
2692    assert(X86::VR128RegClass.hasSubClassEq(RC) && "Unknown 16-byte regclass");
2693    // If stack is realigned we can use aligned stores.
2694    if (isStackAligned)
2695      return load ?
2696        (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) :
2697        (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
2698    else
2699      return load ?
2700        (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) :
2701        (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
2702  }
2703  case 32:
2704    assert(X86::VR256RegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
2705    // If stack is realigned we can use aligned stores.
2706    if (isStackAligned)
2707      return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
2708    else
2709      return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
2710  }
2711}
2712
2713static unsigned getStoreRegOpcode(unsigned SrcReg,
2714                                  const TargetRegisterClass *RC,
2715                                  bool isStackAligned,
2716                                  TargetMachine &TM) {
2717  return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
2718}
2719
2720
2721static unsigned getLoadRegOpcode(unsigned DestReg,
2722                                 const TargetRegisterClass *RC,
2723                                 bool isStackAligned,
2724                                 const TargetMachine &TM) {
2725  return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
2726}
2727
2728void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
2729                                       MachineBasicBlock::iterator MI,
2730                                       unsigned SrcReg, bool isKill, int FrameIdx,
2731                                       const TargetRegisterClass *RC,
2732                                       const TargetRegisterInfo *TRI) const {
2733  const MachineFunction &MF = *MBB.getParent();
2734  assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
2735         "Stack slot too small for store");
2736  unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
2737  bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) ||
2738    RI.canRealignStack(MF);
2739  unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
2740  DebugLoc DL = MBB.findDebugLoc(MI);
2741  addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
2742    .addReg(SrcReg, getKillRegState(isKill));
2743}
2744
2745void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
2746                                  bool isKill,
2747                                  SmallVectorImpl<MachineOperand> &Addr,
2748                                  const TargetRegisterClass *RC,
2749                                  MachineInstr::mmo_iterator MMOBegin,
2750                                  MachineInstr::mmo_iterator MMOEnd,
2751                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
2752  unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
2753  bool isAligned = MMOBegin != MMOEnd &&
2754                   (*MMOBegin)->getAlignment() >= Alignment;
2755  unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
2756  DebugLoc DL;
2757  MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
2758  for (unsigned i = 0, e = Addr.size(); i != e; ++i)
2759    MIB.addOperand(Addr[i]);
2760  MIB.addReg(SrcReg, getKillRegState(isKill));
2761  (*MIB).setMemRefs(MMOBegin, MMOEnd);
2762  NewMIs.push_back(MIB);
2763}
2764
2765
2766void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
2767                                        MachineBasicBlock::iterator MI,
2768                                        unsigned DestReg, int FrameIdx,
2769                                        const TargetRegisterClass *RC,
2770                                        const TargetRegisterInfo *TRI) const {
2771  const MachineFunction &MF = *MBB.getParent();
2772  unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
2773  bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) ||
2774    RI.canRealignStack(MF);
2775  unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
2776  DebugLoc DL = MBB.findDebugLoc(MI);
2777  addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
2778}
2779
2780void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
2781                                 SmallVectorImpl<MachineOperand> &Addr,
2782                                 const TargetRegisterClass *RC,
2783                                 MachineInstr::mmo_iterator MMOBegin,
2784                                 MachineInstr::mmo_iterator MMOEnd,
2785                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
2786  unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
2787  bool isAligned = MMOBegin != MMOEnd &&
2788                   (*MMOBegin)->getAlignment() >= Alignment;
2789  unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
2790  DebugLoc DL;
2791  MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
2792  for (unsigned i = 0, e = Addr.size(); i != e; ++i)
2793    MIB.addOperand(Addr[i]);
2794  (*MIB).setMemRefs(MMOBegin, MMOEnd);
2795  NewMIs.push_back(MIB);
2796}
2797
2798/// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr
2799/// instruction with two undef reads of the register being defined.  This is
2800/// used for mapping:
2801///   %xmm4 = V_SET0
2802/// to:
2803///   %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef>
2804///
2805static bool Expand2AddrUndef(MachineInstr *MI, const MCInstrDesc &Desc) {
2806  assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
2807  unsigned Reg = MI->getOperand(0).getReg();
2808  MI->setDesc(Desc);
2809
2810  // MachineInstr::addOperand() will insert explicit operands before any
2811  // implicit operands.
2812  MachineInstrBuilder(MI).addReg(Reg, RegState::Undef)
2813                         .addReg(Reg, RegState::Undef);
2814  // But we don't trust that.
2815  assert(MI->getOperand(1).getReg() == Reg &&
2816         MI->getOperand(2).getReg() == Reg && "Misplaced operand");
2817  return true;
2818}
2819
2820bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
2821  bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
2822  switch (MI->getOpcode()) {
2823  case X86::V_SET0:
2824  case X86::FsFLD0SS:
2825  case X86::FsFLD0SD:
2826    return Expand2AddrUndef(MI, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
2827  case X86::TEST8ri_NOREX:
2828    MI->setDesc(get(X86::TEST8ri));
2829    return true;
2830  }
2831  return false;
2832}
2833
2834MachineInstr*
2835X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
2836                                       int FrameIx, uint64_t Offset,
2837                                       const MDNode *MDPtr,
2838                                       DebugLoc DL) const {
2839  X86AddressMode AM;
2840  AM.BaseType = X86AddressMode::FrameIndexBase;
2841  AM.Base.FrameIndex = FrameIx;
2842  MachineInstrBuilder MIB = BuildMI(MF, DL, get(X86::DBG_VALUE));
2843  addFullAddress(MIB, AM).addImm(Offset).addMetadata(MDPtr);
2844  return &*MIB;
2845}
2846
2847static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
2848                                     const SmallVectorImpl<MachineOperand> &MOs,
2849                                     MachineInstr *MI,
2850                                     const TargetInstrInfo &TII) {
2851  // Create the base instruction with the memory operand as the first part.
2852  MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
2853                                              MI->getDebugLoc(), true);
2854  MachineInstrBuilder MIB(NewMI);
2855  unsigned NumAddrOps = MOs.size();
2856  for (unsigned i = 0; i != NumAddrOps; ++i)
2857    MIB.addOperand(MOs[i]);
2858  if (NumAddrOps < 4)  // FrameIndex only
2859    addOffset(MIB, 0);
2860
2861  // Loop over the rest of the ri operands, converting them over.
2862  unsigned NumOps = MI->getDesc().getNumOperands()-2;
2863  for (unsigned i = 0; i != NumOps; ++i) {
2864    MachineOperand &MO = MI->getOperand(i+2);
2865    MIB.addOperand(MO);
2866  }
2867  for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
2868    MachineOperand &MO = MI->getOperand(i);
2869    MIB.addOperand(MO);
2870  }
2871  return MIB;
2872}
2873
2874static MachineInstr *FuseInst(MachineFunction &MF,
2875                              unsigned Opcode, unsigned OpNo,
2876                              const SmallVectorImpl<MachineOperand> &MOs,
2877                              MachineInstr *MI, const TargetInstrInfo &TII) {
2878  MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
2879                                              MI->getDebugLoc(), true);
2880  MachineInstrBuilder MIB(NewMI);
2881
2882  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2883    MachineOperand &MO = MI->getOperand(i);
2884    if (i == OpNo) {
2885      assert(MO.isReg() && "Expected to fold into reg operand!");
2886      unsigned NumAddrOps = MOs.size();
2887      for (unsigned i = 0; i != NumAddrOps; ++i)
2888        MIB.addOperand(MOs[i]);
2889      if (NumAddrOps < 4)  // FrameIndex only
2890        addOffset(MIB, 0);
2891    } else {
2892      MIB.addOperand(MO);
2893    }
2894  }
2895  return MIB;
2896}
2897
2898static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
2899                                const SmallVectorImpl<MachineOperand> &MOs,
2900                                MachineInstr *MI) {
2901  MachineFunction &MF = *MI->getParent()->getParent();
2902  MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
2903
2904  unsigned NumAddrOps = MOs.size();
2905  for (unsigned i = 0; i != NumAddrOps; ++i)
2906    MIB.addOperand(MOs[i]);
2907  if (NumAddrOps < 4)  // FrameIndex only
2908    addOffset(MIB, 0);
2909  return MIB.addImm(0);
2910}
2911
2912MachineInstr*
2913X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2914                                    MachineInstr *MI, unsigned i,
2915                                    const SmallVectorImpl<MachineOperand> &MOs,
2916                                    unsigned Size, unsigned Align) const {
2917  const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
2918  bool isTwoAddrFold = false;
2919  unsigned NumOps = MI->getDesc().getNumOperands();
2920  bool isTwoAddr = NumOps > 1 &&
2921    MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
2922
2923  // FIXME: AsmPrinter doesn't know how to handle
2924  // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
2925  if (MI->getOpcode() == X86::ADD32ri &&
2926      MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
2927    return NULL;
2928
2929  MachineInstr *NewMI = NULL;
2930  // Folding a memory location into the two-address part of a two-address
2931  // instruction is different than folding it other places.  It requires
2932  // replacing the *two* registers with the memory location.
2933  if (isTwoAddr && NumOps >= 2 && i < 2 &&
2934      MI->getOperand(0).isReg() &&
2935      MI->getOperand(1).isReg() &&
2936      MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
2937    OpcodeTablePtr = &RegOp2MemOpTable2Addr;
2938    isTwoAddrFold = true;
2939  } else if (i == 0) { // If operand 0
2940    if (MI->getOpcode() == X86::MOV64r0)
2941      NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI);
2942    else if (MI->getOpcode() == X86::MOV32r0)
2943      NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
2944    else if (MI->getOpcode() == X86::MOV16r0)
2945      NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI);
2946    else if (MI->getOpcode() == X86::MOV8r0)
2947      NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
2948    if (NewMI)
2949      return NewMI;
2950
2951    OpcodeTablePtr = &RegOp2MemOpTable0;
2952  } else if (i == 1) {
2953    OpcodeTablePtr = &RegOp2MemOpTable1;
2954  } else if (i == 2) {
2955    OpcodeTablePtr = &RegOp2MemOpTable2;
2956  }
2957
2958  // If table selected...
2959  if (OpcodeTablePtr) {
2960    // Find the Opcode to fuse
2961    DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
2962      OpcodeTablePtr->find(MI->getOpcode());
2963    if (I != OpcodeTablePtr->end()) {
2964      unsigned Opcode = I->second.first;
2965      unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
2966      if (Align < MinAlign)
2967        return NULL;
2968      bool NarrowToMOV32rm = false;
2969      if (Size) {
2970        unsigned RCSize = getRegClass(MI->getDesc(), i, &RI, MF)->getSize();
2971        if (Size < RCSize) {
2972          // Check if it's safe to fold the load. If the size of the object is
2973          // narrower than the load width, then it's not.
2974          if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
2975            return NULL;
2976          // If this is a 64-bit load, but the spill slot is 32, then we can do
2977          // a 32-bit load which is implicitly zero-extended. This likely is due
2978          // to liveintervalanalysis remat'ing a load from stack slot.
2979          if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg())
2980            return NULL;
2981          Opcode = X86::MOV32rm;
2982          NarrowToMOV32rm = true;
2983        }
2984      }
2985
2986      if (isTwoAddrFold)
2987        NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this);
2988      else
2989        NewMI = FuseInst(MF, Opcode, i, MOs, MI, *this);
2990
2991      if (NarrowToMOV32rm) {
2992        // If this is the special case where we use a MOV32rm to load a 32-bit
2993        // value and zero-extend the top bits. Change the destination register
2994        // to a 32-bit one.
2995        unsigned DstReg = NewMI->getOperand(0).getReg();
2996        if (TargetRegisterInfo::isPhysicalRegister(DstReg))
2997          NewMI->getOperand(0).setReg(RI.getSubReg(DstReg,
2998                                                   X86::sub_32bit));
2999        else
3000          NewMI->getOperand(0).setSubReg(X86::sub_32bit);
3001      }
3002      return NewMI;
3003    }
3004  }
3005
3006  // No fusion
3007  if (PrintFailedFusing && !MI->isCopy())
3008    dbgs() << "We failed to fuse operand " << i << " in " << *MI;
3009  return NULL;
3010}
3011
3012/// hasPartialRegUpdate - Return true for all instructions that only update
3013/// the first 32 or 64-bits of the destination register and leave the rest
3014/// unmodified. This can be used to avoid folding loads if the instructions
3015/// only update part of the destination register, and the non-updated part is
3016/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
3017/// instructions breaks the partial register dependency and it can improve
3018/// performance. e.g.:
3019///
3020///   movss (%rdi), %xmm0
3021///   cvtss2sd %xmm0, %xmm0
3022///
3023/// Instead of
3024///   cvtss2sd (%rdi), %xmm0
3025///
3026/// FIXME: This should be turned into a TSFlags.
3027///
3028static bool hasPartialRegUpdate(unsigned Opcode) {
3029  switch (Opcode) {
3030  case X86::CVTSI2SSrr:
3031  case X86::CVTSI2SS64rr:
3032  case X86::CVTSI2SDrr:
3033  case X86::CVTSI2SD64rr:
3034  case X86::CVTSD2SSrr:
3035  case X86::Int_CVTSD2SSrr:
3036  case X86::CVTSS2SDrr:
3037  case X86::Int_CVTSS2SDrr:
3038  case X86::RCPSSr:
3039  case X86::RCPSSr_Int:
3040  case X86::ROUNDSDr:
3041  case X86::ROUNDSDr_Int:
3042  case X86::ROUNDSSr:
3043  case X86::ROUNDSSr_Int:
3044  case X86::RSQRTSSr:
3045  case X86::RSQRTSSr_Int:
3046  case X86::SQRTSSr:
3047  case X86::SQRTSSr_Int:
3048  // AVX encoded versions
3049  case X86::VCVTSD2SSrr:
3050  case X86::Int_VCVTSD2SSrr:
3051  case X86::VCVTSS2SDrr:
3052  case X86::Int_VCVTSS2SDrr:
3053  case X86::VRCPSSr:
3054  case X86::VROUNDSDr:
3055  case X86::VROUNDSDr_Int:
3056  case X86::VROUNDSSr:
3057  case X86::VROUNDSSr_Int:
3058  case X86::VRSQRTSSr:
3059  case X86::VSQRTSSr:
3060    return true;
3061  }
3062
3063  return false;
3064}
3065
3066/// getPartialRegUpdateClearance - Inform the ExeDepsFix pass how many idle
3067/// instructions we would like before a partial register update.
3068unsigned X86InstrInfo::
3069getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
3070                             const TargetRegisterInfo *TRI) const {
3071  if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode()))
3072    return 0;
3073
3074  // If MI is marked as reading Reg, the partial register update is wanted.
3075  const MachineOperand &MO = MI->getOperand(0);
3076  unsigned Reg = MO.getReg();
3077  if (TargetRegisterInfo::isVirtualRegister(Reg)) {
3078    if (MO.readsReg() || MI->readsVirtualRegister(Reg))
3079      return 0;
3080  } else {
3081    if (MI->readsRegister(Reg, TRI))
3082      return 0;
3083  }
3084
3085  // If any of the preceding 16 instructions are reading Reg, insert a
3086  // dependency breaking instruction.  The magic number is based on a few
3087  // Nehalem experiments.
3088  return 16;
3089}
3090
3091void X86InstrInfo::
3092breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
3093                          const TargetRegisterInfo *TRI) const {
3094  unsigned Reg = MI->getOperand(OpNum).getReg();
3095  if (X86::VR128RegClass.contains(Reg)) {
3096    // These instructions are all floating point domain, so xorps is the best
3097    // choice.
3098    bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
3099    unsigned Opc = HasAVX ? X86::VXORPSrr : X86::XORPSrr;
3100    BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg)
3101      .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
3102  } else if (X86::VR256RegClass.contains(Reg)) {
3103    // Use vxorps to clear the full ymm register.
3104    // It wants to read and write the xmm sub-register.
3105    unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
3106    BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg)
3107      .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef)
3108      .addReg(Reg, RegState::ImplicitDefine);
3109  } else
3110    return;
3111  MI->addRegisterKilled(Reg, TRI, true);
3112}
3113
3114MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
3115                                                  MachineInstr *MI,
3116                                           const SmallVectorImpl<unsigned> &Ops,
3117                                                  int FrameIndex) const {
3118  // Check switch flag
3119  if (NoFusing) return NULL;
3120
3121  // Unless optimizing for size, don't fold to avoid partial
3122  // register update stalls
3123  if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
3124      hasPartialRegUpdate(MI->getOpcode()))
3125    return 0;
3126
3127  const MachineFrameInfo *MFI = MF.getFrameInfo();
3128  unsigned Size = MFI->getObjectSize(FrameIndex);
3129  unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
3130  if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
3131    unsigned NewOpc = 0;
3132    unsigned RCSize = 0;
3133    switch (MI->getOpcode()) {
3134    default: return NULL;
3135    case X86::TEST8rr:  NewOpc = X86::CMP8ri; RCSize = 1; break;
3136    case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
3137    case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
3138    case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
3139    }
3140    // Check if it's safe to fold the load. If the size of the object is
3141    // narrower than the load width, then it's not.
3142    if (Size < RCSize)
3143      return NULL;
3144    // Change to CMPXXri r, 0 first.
3145    MI->setDesc(get(NewOpc));
3146    MI->getOperand(1).ChangeToImmediate(0);
3147  } else if (Ops.size() != 1)
3148    return NULL;
3149
3150  SmallVector<MachineOperand,4> MOs;
3151  MOs.push_back(MachineOperand::CreateFI(FrameIndex));
3152  return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, Size, Alignment);
3153}
3154
3155MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
3156                                                  MachineInstr *MI,
3157                                           const SmallVectorImpl<unsigned> &Ops,
3158                                                  MachineInstr *LoadMI) const {
3159  // Check switch flag
3160  if (NoFusing) return NULL;
3161
3162  // Unless optimizing for size, don't fold to avoid partial
3163  // register update stalls
3164  if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
3165      hasPartialRegUpdate(MI->getOpcode()))
3166    return 0;
3167
3168  // Determine the alignment of the load.
3169  unsigned Alignment = 0;
3170  if (LoadMI->hasOneMemOperand())
3171    Alignment = (*LoadMI->memoperands_begin())->getAlignment();
3172  else
3173    switch (LoadMI->getOpcode()) {
3174    case X86::AVX_SET0PSY:
3175    case X86::AVX_SET0PDY:
3176    case X86::AVX2_SETALLONES:
3177    case X86::AVX2_SET0:
3178      Alignment = 32;
3179      break;
3180    case X86::V_SET0:
3181    case X86::V_SETALLONES:
3182    case X86::AVX_SETALLONES:
3183      Alignment = 16;
3184      break;
3185    case X86::FsFLD0SD:
3186      Alignment = 8;
3187      break;
3188    case X86::FsFLD0SS:
3189      Alignment = 4;
3190      break;
3191    default:
3192      return 0;
3193    }
3194  if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
3195    unsigned NewOpc = 0;
3196    switch (MI->getOpcode()) {
3197    default: return NULL;
3198    case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
3199    case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
3200    case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
3201    case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
3202    }
3203    // Change to CMPXXri r, 0 first.
3204    MI->setDesc(get(NewOpc));
3205    MI->getOperand(1).ChangeToImmediate(0);
3206  } else if (Ops.size() != 1)
3207    return NULL;
3208
3209  // Make sure the subregisters match.
3210  // Otherwise we risk changing the size of the load.
3211  if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
3212    return NULL;
3213
3214  SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
3215  switch (LoadMI->getOpcode()) {
3216  case X86::V_SET0:
3217  case X86::V_SETALLONES:
3218  case X86::AVX_SET0PSY:
3219  case X86::AVX_SET0PDY:
3220  case X86::AVX_SETALLONES:
3221  case X86::AVX2_SETALLONES:
3222  case X86::AVX2_SET0:
3223  case X86::FsFLD0SD:
3224  case X86::FsFLD0SS: {
3225    // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
3226    // Create a constant-pool entry and operands to load from it.
3227
3228    // Medium and large mode can't fold loads this way.
3229    if (TM.getCodeModel() != CodeModel::Small &&
3230        TM.getCodeModel() != CodeModel::Kernel)
3231      return NULL;
3232
3233    // x86-32 PIC requires a PIC base register for constant pools.
3234    unsigned PICBase = 0;
3235    if (TM.getRelocationModel() == Reloc::PIC_) {
3236      if (TM.getSubtarget<X86Subtarget>().is64Bit())
3237        PICBase = X86::RIP;
3238      else
3239        // FIXME: PICBase = getGlobalBaseReg(&MF);
3240        // This doesn't work for several reasons.
3241        // 1. GlobalBaseReg may have been spilled.
3242        // 2. It may not be live at MI.
3243        return NULL;
3244    }
3245
3246    // Create a constant-pool entry.
3247    MachineConstantPool &MCP = *MF.getConstantPool();
3248    Type *Ty;
3249    unsigned Opc = LoadMI->getOpcode();
3250    if (Opc == X86::FsFLD0SS)
3251      Ty = Type::getFloatTy(MF.getFunction()->getContext());
3252    else if (Opc == X86::FsFLD0SD)
3253      Ty = Type::getDoubleTy(MF.getFunction()->getContext());
3254    else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
3255      Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);
3256    else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX2_SET0)
3257      Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
3258    else
3259      Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
3260
3261    bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX_SETALLONES ||
3262                      Opc == X86::AVX2_SETALLONES);
3263    const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
3264                                    Constant::getNullValue(Ty);
3265    unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
3266
3267    // Create operands to load from the constant pool entry.
3268    MOs.push_back(MachineOperand::CreateReg(PICBase, false));
3269    MOs.push_back(MachineOperand::CreateImm(1));
3270    MOs.push_back(MachineOperand::CreateReg(0, false));
3271    MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
3272    MOs.push_back(MachineOperand::CreateReg(0, false));
3273    break;
3274  }
3275  default: {
3276    // Folding a normal load. Just copy the load's address operands.
3277    unsigned NumOps = LoadMI->getDesc().getNumOperands();
3278    for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
3279      MOs.push_back(LoadMI->getOperand(i));
3280    break;
3281  }
3282  }
3283  return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, 0, Alignment);
3284}
3285
3286
3287bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
3288                                  const SmallVectorImpl<unsigned> &Ops) const {
3289  // Check switch flag
3290  if (NoFusing) return 0;
3291
3292  if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
3293    switch (MI->getOpcode()) {
3294    default: return false;
3295    case X86::TEST8rr:
3296    case X86::TEST16rr:
3297    case X86::TEST32rr:
3298    case X86::TEST64rr:
3299      return true;
3300    case X86::ADD32ri:
3301      // FIXME: AsmPrinter doesn't know how to handle
3302      // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
3303      if (MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
3304        return false;
3305      break;
3306    }
3307  }
3308
3309  if (Ops.size() != 1)
3310    return false;
3311
3312  unsigned OpNum = Ops[0];
3313  unsigned Opc = MI->getOpcode();
3314  unsigned NumOps = MI->getDesc().getNumOperands();
3315  bool isTwoAddr = NumOps > 1 &&
3316    MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
3317
3318  // Folding a memory location into the two-address part of a two-address
3319  // instruction is different than folding it other places.  It requires
3320  // replacing the *two* registers with the memory location.
3321  const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
3322  if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
3323    OpcodeTablePtr = &RegOp2MemOpTable2Addr;
3324  } else if (OpNum == 0) { // If operand 0
3325    switch (Opc) {
3326    case X86::MOV8r0:
3327    case X86::MOV16r0:
3328    case X86::MOV32r0:
3329    case X86::MOV64r0: return true;
3330    default: break;
3331    }
3332    OpcodeTablePtr = &RegOp2MemOpTable0;
3333  } else if (OpNum == 1) {
3334    OpcodeTablePtr = &RegOp2MemOpTable1;
3335  } else if (OpNum == 2) {
3336    OpcodeTablePtr = &RegOp2MemOpTable2;
3337  }
3338
3339  if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
3340    return true;
3341  return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
3342}
3343
3344bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
3345                                unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
3346                                SmallVectorImpl<MachineInstr*> &NewMIs) const {
3347  DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
3348    MemOp2RegOpTable.find(MI->getOpcode());
3349  if (I == MemOp2RegOpTable.end())
3350    return false;
3351  unsigned Opc = I->second.first;
3352  unsigned Index = I->second.second & TB_INDEX_MASK;
3353  bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
3354  bool FoldedStore = I->second.second & TB_FOLDED_STORE;
3355  if (UnfoldLoad && !FoldedLoad)
3356    return false;
3357  UnfoldLoad &= FoldedLoad;
3358  if (UnfoldStore && !FoldedStore)
3359    return false;
3360  UnfoldStore &= FoldedStore;
3361
3362  const MCInstrDesc &MCID = get(Opc);
3363  const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
3364  if (!MI->hasOneMemOperand() &&
3365      RC == &X86::VR128RegClass &&
3366      !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
3367    // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
3368    // conservatively assume the address is unaligned. That's bad for
3369    // performance.
3370    return false;
3371  SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
3372  SmallVector<MachineOperand,2> BeforeOps;
3373  SmallVector<MachineOperand,2> AfterOps;
3374  SmallVector<MachineOperand,4> ImpOps;
3375  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
3376    MachineOperand &Op = MI->getOperand(i);
3377    if (i >= Index && i < Index + X86::AddrNumOperands)
3378      AddrOps.push_back(Op);
3379    else if (Op.isReg() && Op.isImplicit())
3380      ImpOps.push_back(Op);
3381    else if (i < Index)
3382      BeforeOps.push_back(Op);
3383    else if (i > Index)
3384      AfterOps.push_back(Op);
3385  }
3386
3387  // Emit the load instruction.
3388  if (UnfoldLoad) {
3389    std::pair<MachineInstr::mmo_iterator,
3390              MachineInstr::mmo_iterator> MMOs =
3391      MF.extractLoadMemRefs(MI->memoperands_begin(),
3392                            MI->memoperands_end());
3393    loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
3394    if (UnfoldStore) {
3395      // Address operands cannot be marked isKill.
3396      for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
3397        MachineOperand &MO = NewMIs[0]->getOperand(i);
3398        if (MO.isReg())
3399          MO.setIsKill(false);
3400      }
3401    }
3402  }
3403
3404  // Emit the data processing instruction.
3405  MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true);
3406  MachineInstrBuilder MIB(DataMI);
3407
3408  if (FoldedStore)
3409    MIB.addReg(Reg, RegState::Define);
3410  for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
3411    MIB.addOperand(BeforeOps[i]);
3412  if (FoldedLoad)
3413    MIB.addReg(Reg);
3414  for (unsigned i = 0, e = AfterOps.size(); i != e; ++i)
3415    MIB.addOperand(AfterOps[i]);
3416  for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) {
3417    MachineOperand &MO = ImpOps[i];
3418    MIB.addReg(MO.getReg(),
3419               getDefRegState(MO.isDef()) |
3420               RegState::Implicit |
3421               getKillRegState(MO.isKill()) |
3422               getDeadRegState(MO.isDead()) |
3423               getUndefRegState(MO.isUndef()));
3424  }
3425  // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
3426  unsigned NewOpc = 0;
3427  switch (DataMI->getOpcode()) {
3428  default: break;
3429  case X86::CMP64ri32:
3430  case X86::CMP64ri8:
3431  case X86::CMP32ri:
3432  case X86::CMP32ri8:
3433  case X86::CMP16ri:
3434  case X86::CMP16ri8:
3435  case X86::CMP8ri: {
3436    MachineOperand &MO0 = DataMI->getOperand(0);
3437    MachineOperand &MO1 = DataMI->getOperand(1);
3438    if (MO1.getImm() == 0) {
3439      switch (DataMI->getOpcode()) {
3440      default: break;
3441      case X86::CMP64ri8:
3442      case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
3443      case X86::CMP32ri8:
3444      case X86::CMP32ri:   NewOpc = X86::TEST32rr; break;
3445      case X86::CMP16ri8:
3446      case X86::CMP16ri:   NewOpc = X86::TEST16rr; break;
3447      case X86::CMP8ri:    NewOpc = X86::TEST8rr; break;
3448      }
3449      DataMI->setDesc(get(NewOpc));
3450      MO1.ChangeToRegister(MO0.getReg(), false);
3451    }
3452  }
3453  }
3454  NewMIs.push_back(DataMI);
3455
3456  // Emit the store instruction.
3457  if (UnfoldStore) {
3458    const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
3459    std::pair<MachineInstr::mmo_iterator,
3460              MachineInstr::mmo_iterator> MMOs =
3461      MF.extractStoreMemRefs(MI->memoperands_begin(),
3462                             MI->memoperands_end());
3463    storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
3464  }
3465
3466  return true;
3467}
3468
3469bool
3470X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
3471                                  SmallVectorImpl<SDNode*> &NewNodes) const {
3472  if (!N->isMachineOpcode())
3473    return false;
3474
3475  DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
3476    MemOp2RegOpTable.find(N->getMachineOpcode());
3477  if (I == MemOp2RegOpTable.end())
3478    return false;
3479  unsigned Opc = I->second.first;
3480  unsigned Index = I->second.second & TB_INDEX_MASK;
3481  bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
3482  bool FoldedStore = I->second.second & TB_FOLDED_STORE;
3483  const MCInstrDesc &MCID = get(Opc);
3484  MachineFunction &MF = DAG.getMachineFunction();
3485  const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
3486  unsigned NumDefs = MCID.NumDefs;
3487  std::vector<SDValue> AddrOps;
3488  std::vector<SDValue> BeforeOps;
3489  std::vector<SDValue> AfterOps;
3490  DebugLoc dl = N->getDebugLoc();
3491  unsigned NumOps = N->getNumOperands();
3492  for (unsigned i = 0; i != NumOps-1; ++i) {
3493    SDValue Op = N->getOperand(i);
3494    if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
3495      AddrOps.push_back(Op);
3496    else if (i < Index-NumDefs)
3497      BeforeOps.push_back(Op);
3498    else if (i > Index-NumDefs)
3499      AfterOps.push_back(Op);
3500  }
3501  SDValue Chain = N->getOperand(NumOps-1);
3502  AddrOps.push_back(Chain);
3503
3504  // Emit the load instruction.
3505  SDNode *Load = 0;
3506  if (FoldedLoad) {
3507    EVT VT = *RC->vt_begin();
3508    std::pair<MachineInstr::mmo_iterator,
3509              MachineInstr::mmo_iterator> MMOs =
3510      MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
3511                            cast<MachineSDNode>(N)->memoperands_end());
3512    if (!(*MMOs.first) &&
3513        RC == &X86::VR128RegClass &&
3514        !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
3515      // Do not introduce a slow unaligned load.
3516      return false;
3517    unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
3518    bool isAligned = (*MMOs.first) &&
3519                     (*MMOs.first)->getAlignment() >= Alignment;
3520    Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
3521                              VT, MVT::Other, &AddrOps[0], AddrOps.size());
3522    NewNodes.push_back(Load);
3523
3524    // Preserve memory reference information.
3525    cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
3526  }
3527
3528  // Emit the data processing instruction.
3529  std::vector<EVT> VTs;
3530  const TargetRegisterClass *DstRC = 0;
3531  if (MCID.getNumDefs() > 0) {
3532    DstRC = getRegClass(MCID, 0, &RI, MF);
3533    VTs.push_back(*DstRC->vt_begin());
3534  }
3535  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
3536    EVT VT = N->getValueType(i);
3537    if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
3538      VTs.push_back(VT);
3539  }
3540  if (Load)
3541    BeforeOps.push_back(SDValue(Load, 0));
3542  std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps));
3543  SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, &BeforeOps[0],
3544                                      BeforeOps.size());
3545  NewNodes.push_back(NewNode);
3546
3547  // Emit the store instruction.
3548  if (FoldedStore) {
3549    AddrOps.pop_back();
3550    AddrOps.push_back(SDValue(NewNode, 0));
3551    AddrOps.push_back(Chain);
3552    std::pair<MachineInstr::mmo_iterator,
3553              MachineInstr::mmo_iterator> MMOs =
3554      MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
3555                             cast<MachineSDNode>(N)->memoperands_end());
3556    if (!(*MMOs.first) &&
3557        RC == &X86::VR128RegClass &&
3558        !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
3559      // Do not introduce a slow unaligned store.
3560      return false;
3561    unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
3562    bool isAligned = (*MMOs.first) &&
3563                     (*MMOs.first)->getAlignment() >= Alignment;
3564    SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC,
3565                                                         isAligned, TM),
3566                                       dl, MVT::Other,
3567                                       &AddrOps[0], AddrOps.size());
3568    NewNodes.push_back(Store);
3569
3570    // Preserve memory reference information.
3571    cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
3572  }
3573
3574  return true;
3575}
3576
3577unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
3578                                      bool UnfoldLoad, bool UnfoldStore,
3579                                      unsigned *LoadRegIndex) const {
3580  DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
3581    MemOp2RegOpTable.find(Opc);
3582  if (I == MemOp2RegOpTable.end())
3583    return 0;
3584  bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
3585  bool FoldedStore = I->second.second & TB_FOLDED_STORE;
3586  if (UnfoldLoad && !FoldedLoad)
3587    return 0;
3588  if (UnfoldStore && !FoldedStore)
3589    return 0;
3590  if (LoadRegIndex)
3591    *LoadRegIndex = I->second.second & TB_INDEX_MASK;
3592  return I->second.first;
3593}
3594
3595bool
3596X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
3597                                     int64_t &Offset1, int64_t &Offset2) const {
3598  if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
3599    return false;
3600  unsigned Opc1 = Load1->getMachineOpcode();
3601  unsigned Opc2 = Load2->getMachineOpcode();
3602  switch (Opc1) {
3603  default: return false;
3604  case X86::MOV8rm:
3605  case X86::MOV16rm:
3606  case X86::MOV32rm:
3607  case X86::MOV64rm:
3608  case X86::LD_Fp32m:
3609  case X86::LD_Fp64m:
3610  case X86::LD_Fp80m:
3611  case X86::MOVSSrm:
3612  case X86::MOVSDrm:
3613  case X86::MMX_MOVD64rm:
3614  case X86::MMX_MOVQ64rm:
3615  case X86::FsMOVAPSrm:
3616  case X86::FsMOVAPDrm:
3617  case X86::MOVAPSrm:
3618  case X86::MOVUPSrm:
3619  case X86::MOVAPDrm:
3620  case X86::MOVDQArm:
3621  case X86::MOVDQUrm:
3622  // AVX load instructions
3623  case X86::VMOVSSrm:
3624  case X86::VMOVSDrm:
3625  case X86::FsVMOVAPSrm:
3626  case X86::FsVMOVAPDrm:
3627  case X86::VMOVAPSrm:
3628  case X86::VMOVUPSrm:
3629  case X86::VMOVAPDrm:
3630  case X86::VMOVDQArm:
3631  case X86::VMOVDQUrm:
3632  case X86::VMOVAPSYrm:
3633  case X86::VMOVUPSYrm:
3634  case X86::VMOVAPDYrm:
3635  case X86::VMOVDQAYrm:
3636  case X86::VMOVDQUYrm:
3637    break;
3638  }
3639  switch (Opc2) {
3640  default: return false;
3641  case X86::MOV8rm:
3642  case X86::MOV16rm:
3643  case X86::MOV32rm:
3644  case X86::MOV64rm:
3645  case X86::LD_Fp32m:
3646  case X86::LD_Fp64m:
3647  case X86::LD_Fp80m:
3648  case X86::MOVSSrm:
3649  case X86::MOVSDrm:
3650  case X86::MMX_MOVD64rm:
3651  case X86::MMX_MOVQ64rm:
3652  case X86::FsMOVAPSrm:
3653  case X86::FsMOVAPDrm:
3654  case X86::MOVAPSrm:
3655  case X86::MOVUPSrm:
3656  case X86::MOVAPDrm:
3657  case X86::MOVDQArm:
3658  case X86::MOVDQUrm:
3659  // AVX load instructions
3660  case X86::VMOVSSrm:
3661  case X86::VMOVSDrm:
3662  case X86::FsVMOVAPSrm:
3663  case X86::FsVMOVAPDrm:
3664  case X86::VMOVAPSrm:
3665  case X86::VMOVUPSrm:
3666  case X86::VMOVAPDrm:
3667  case X86::VMOVDQArm:
3668  case X86::VMOVDQUrm:
3669  case X86::VMOVAPSYrm:
3670  case X86::VMOVUPSYrm:
3671  case X86::VMOVAPDYrm:
3672  case X86::VMOVDQAYrm:
3673  case X86::VMOVDQUYrm:
3674    break;
3675  }
3676
3677  // Check if chain operands and base addresses match.
3678  if (Load1->getOperand(0) != Load2->getOperand(0) ||
3679      Load1->getOperand(5) != Load2->getOperand(5))
3680    return false;
3681  // Segment operands should match as well.
3682  if (Load1->getOperand(4) != Load2->getOperand(4))
3683    return false;
3684  // Scale should be 1, Index should be Reg0.
3685  if (Load1->getOperand(1) == Load2->getOperand(1) &&
3686      Load1->getOperand(2) == Load2->getOperand(2)) {
3687    if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1)
3688      return false;
3689
3690    // Now let's examine the displacements.
3691    if (isa<ConstantSDNode>(Load1->getOperand(3)) &&
3692        isa<ConstantSDNode>(Load2->getOperand(3))) {
3693      Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue();
3694      Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue();
3695      return true;
3696    }
3697  }
3698  return false;
3699}
3700
3701bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
3702                                           int64_t Offset1, int64_t Offset2,
3703                                           unsigned NumLoads) const {
3704  assert(Offset2 > Offset1);
3705  if ((Offset2 - Offset1) / 8 > 64)
3706    return false;
3707
3708  unsigned Opc1 = Load1->getMachineOpcode();
3709  unsigned Opc2 = Load2->getMachineOpcode();
3710  if (Opc1 != Opc2)
3711    return false;  // FIXME: overly conservative?
3712
3713  switch (Opc1) {
3714  default: break;
3715  case X86::LD_Fp32m:
3716  case X86::LD_Fp64m:
3717  case X86::LD_Fp80m:
3718  case X86::MMX_MOVD64rm:
3719  case X86::MMX_MOVQ64rm:
3720    return false;
3721  }
3722
3723  EVT VT = Load1->getValueType(0);
3724  switch (VT.getSimpleVT().SimpleTy) {
3725  default:
3726    // XMM registers. In 64-bit mode we can be a bit more aggressive since we
3727    // have 16 of them to play with.
3728    if (TM.getSubtargetImpl()->is64Bit()) {
3729      if (NumLoads >= 3)
3730        return false;
3731    } else if (NumLoads) {
3732      return false;
3733    }
3734    break;
3735  case MVT::i8:
3736  case MVT::i16:
3737  case MVT::i32:
3738  case MVT::i64:
3739  case MVT::f32:
3740  case MVT::f64:
3741    if (NumLoads)
3742      return false;
3743    break;
3744  }
3745
3746  return true;
3747}
3748
3749
3750bool X86InstrInfo::
3751ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
3752  assert(Cond.size() == 1 && "Invalid X86 branch condition!");
3753  X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
3754  if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
3755    return true;
3756  Cond[0].setImm(GetOppositeBranchCondition(CC));
3757  return false;
3758}
3759
3760bool X86InstrInfo::
3761isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
3762  // FIXME: Return false for x87 stack register classes for now. We can't
3763  // allow any loads of these registers before FpGet_ST0_80.
3764  return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass ||
3765           RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass);
3766}
3767
3768/// getGlobalBaseReg - Return a virtual register initialized with the
3769/// the global base register value. Output instructions required to
3770/// initialize the register in the function entry block, if necessary.
3771///
3772/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
3773///
3774unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
3775  assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
3776         "X86-64 PIC uses RIP relative addressing");
3777
3778  X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
3779  unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
3780  if (GlobalBaseReg != 0)
3781    return GlobalBaseReg;
3782
3783  // Create the register. The code to initialize it is inserted
3784  // later, by the CGBR pass (below).
3785  MachineRegisterInfo &RegInfo = MF->getRegInfo();
3786  GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
3787  X86FI->setGlobalBaseReg(GlobalBaseReg);
3788  return GlobalBaseReg;
3789}
3790
3791// These are the replaceable SSE instructions. Some of these have Int variants
3792// that we don't include here. We don't want to replace instructions selected
3793// by intrinsics.
3794static const uint16_t ReplaceableInstrs[][3] = {
3795  //PackedSingle     PackedDouble    PackedInt
3796  { X86::MOVAPSmr,   X86::MOVAPDmr,  X86::MOVDQAmr  },
3797  { X86::MOVAPSrm,   X86::MOVAPDrm,  X86::MOVDQArm  },
3798  { X86::MOVAPSrr,   X86::MOVAPDrr,  X86::MOVDQArr  },
3799  { X86::MOVUPSmr,   X86::MOVUPDmr,  X86::MOVDQUmr  },
3800  { X86::MOVUPSrm,   X86::MOVUPDrm,  X86::MOVDQUrm  },
3801  { X86::MOVNTPSmr,  X86::MOVNTPDmr, X86::MOVNTDQmr },
3802  { X86::ANDNPSrm,   X86::ANDNPDrm,  X86::PANDNrm   },
3803  { X86::ANDNPSrr,   X86::ANDNPDrr,  X86::PANDNrr   },
3804  { X86::ANDPSrm,    X86::ANDPDrm,   X86::PANDrm    },
3805  { X86::ANDPSrr,    X86::ANDPDrr,   X86::PANDrr    },
3806  { X86::ORPSrm,     X86::ORPDrm,    X86::PORrm     },
3807  { X86::ORPSrr,     X86::ORPDrr,    X86::PORrr     },
3808  { X86::XORPSrm,    X86::XORPDrm,   X86::PXORrm    },
3809  { X86::XORPSrr,    X86::XORPDrr,   X86::PXORrr    },
3810  // AVX 128-bit support
3811  { X86::VMOVAPSmr,  X86::VMOVAPDmr,  X86::VMOVDQAmr  },
3812  { X86::VMOVAPSrm,  X86::VMOVAPDrm,  X86::VMOVDQArm  },
3813  { X86::VMOVAPSrr,  X86::VMOVAPDrr,  X86::VMOVDQArr  },
3814  { X86::VMOVUPSmr,  X86::VMOVUPDmr,  X86::VMOVDQUmr  },
3815  { X86::VMOVUPSrm,  X86::VMOVUPDrm,  X86::VMOVDQUrm  },
3816  { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
3817  { X86::VANDNPSrm,  X86::VANDNPDrm,  X86::VPANDNrm   },
3818  { X86::VANDNPSrr,  X86::VANDNPDrr,  X86::VPANDNrr   },
3819  { X86::VANDPSrm,   X86::VANDPDrm,   X86::VPANDrm    },
3820  { X86::VANDPSrr,   X86::VANDPDrr,   X86::VPANDrr    },
3821  { X86::VORPSrm,    X86::VORPDrm,    X86::VPORrm     },
3822  { X86::VORPSrr,    X86::VORPDrr,    X86::VPORrr     },
3823  { X86::VXORPSrm,   X86::VXORPDrm,   X86::VPXORrm    },
3824  { X86::VXORPSrr,   X86::VXORPDrr,   X86::VPXORrr    },
3825  // AVX 256-bit support
3826  { X86::VMOVAPSYmr,   X86::VMOVAPDYmr,   X86::VMOVDQAYmr  },
3827  { X86::VMOVAPSYrm,   X86::VMOVAPDYrm,   X86::VMOVDQAYrm  },
3828  { X86::VMOVAPSYrr,   X86::VMOVAPDYrr,   X86::VMOVDQAYrr  },
3829  { X86::VMOVUPSYmr,   X86::VMOVUPDYmr,   X86::VMOVDQUYmr  },
3830  { X86::VMOVUPSYrm,   X86::VMOVUPDYrm,   X86::VMOVDQUYrm  },
3831  { X86::VMOVNTPSYmr,  X86::VMOVNTPDYmr,  X86::VMOVNTDQYmr }
3832};
3833
3834static const uint16_t ReplaceableInstrsAVX2[][3] = {
3835  //PackedSingle       PackedDouble       PackedInt
3836  { X86::VANDNPSYrm,   X86::VANDNPDYrm,   X86::VPANDNYrm   },
3837  { X86::VANDNPSYrr,   X86::VANDNPDYrr,   X86::VPANDNYrr   },
3838  { X86::VANDPSYrm,    X86::VANDPDYrm,    X86::VPANDYrm    },
3839  { X86::VANDPSYrr,    X86::VANDPDYrr,    X86::VPANDYrr    },
3840  { X86::VORPSYrm,     X86::VORPDYrm,     X86::VPORYrm     },
3841  { X86::VORPSYrr,     X86::VORPDYrr,     X86::VPORYrr     },
3842  { X86::VXORPSYrm,    X86::VXORPDYrm,    X86::VPXORYrm    },
3843  { X86::VXORPSYrr,    X86::VXORPDYrr,    X86::VPXORYrr    },
3844  { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
3845  { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
3846  { X86::VINSERTF128rm,  X86::VINSERTF128rm,  X86::VINSERTI128rm },
3847  { X86::VINSERTF128rr,  X86::VINSERTF128rr,  X86::VINSERTI128rr },
3848  { X86::VPERM2F128rm,   X86::VPERM2F128rm,   X86::VPERM2I128rm },
3849  { X86::VPERM2F128rr,   X86::VPERM2F128rr,   X86::VPERM2I128rr }
3850};
3851
3852// FIXME: Some shuffle and unpack instructions have equivalents in different
3853// domains, but they require a bit more work than just switching opcodes.
3854
3855static const uint16_t *lookup(unsigned opcode, unsigned domain) {
3856  for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i)
3857    if (ReplaceableInstrs[i][domain-1] == opcode)
3858      return ReplaceableInstrs[i];
3859  return 0;
3860}
3861
3862static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) {
3863  for (unsigned i = 0, e = array_lengthof(ReplaceableInstrsAVX2); i != e; ++i)
3864    if (ReplaceableInstrsAVX2[i][domain-1] == opcode)
3865      return ReplaceableInstrsAVX2[i];
3866  return 0;
3867}
3868
3869std::pair<uint16_t, uint16_t>
3870X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const {
3871  uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
3872  bool hasAVX2 = TM.getSubtarget<X86Subtarget>().hasAVX2();
3873  uint16_t validDomains = 0;
3874  if (domain && lookup(MI->getOpcode(), domain))
3875    validDomains = 0xe;
3876  else if (domain && lookupAVX2(MI->getOpcode(), domain))
3877    validDomains = hasAVX2 ? 0xe : 0x6;
3878  return std::make_pair(domain, validDomains);
3879}
3880
3881void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
3882  assert(Domain>0 && Domain<4 && "Invalid execution domain");
3883  uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
3884  assert(dom && "Not an SSE instruction");
3885  const uint16_t *table = lookup(MI->getOpcode(), dom);
3886  if (!table) { // try the other table
3887    assert((TM.getSubtarget<X86Subtarget>().hasAVX2() || Domain < 3) &&
3888           "256-bit vector operations only available in AVX2");
3889    table = lookupAVX2(MI->getOpcode(), dom);
3890  }
3891  assert(table && "Cannot change domain");
3892  MI->setDesc(get(table[Domain-1]));
3893}
3894
3895/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
3896void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
3897  NopInst.setOpcode(X86::NOOP);
3898}
3899
3900bool X86InstrInfo::isHighLatencyDef(int opc) const {
3901  switch (opc) {
3902  default: return false;
3903  case X86::DIVSDrm:
3904  case X86::DIVSDrm_Int:
3905  case X86::DIVSDrr:
3906  case X86::DIVSDrr_Int:
3907  case X86::DIVSSrm:
3908  case X86::DIVSSrm_Int:
3909  case X86::DIVSSrr:
3910  case X86::DIVSSrr_Int:
3911  case X86::SQRTPDm:
3912  case X86::SQRTPDm_Int:
3913  case X86::SQRTPDr:
3914  case X86::SQRTPDr_Int:
3915  case X86::SQRTPSm:
3916  case X86::SQRTPSm_Int:
3917  case X86::SQRTPSr:
3918  case X86::SQRTPSr_Int:
3919  case X86::SQRTSDm:
3920  case X86::SQRTSDm_Int:
3921  case X86::SQRTSDr:
3922  case X86::SQRTSDr_Int:
3923  case X86::SQRTSSm:
3924  case X86::SQRTSSm_Int:
3925  case X86::SQRTSSr:
3926  case X86::SQRTSSr_Int:
3927  // AVX instructions with high latency
3928  case X86::VDIVSDrm:
3929  case X86::VDIVSDrm_Int:
3930  case X86::VDIVSDrr:
3931  case X86::VDIVSDrr_Int:
3932  case X86::VDIVSSrm:
3933  case X86::VDIVSSrm_Int:
3934  case X86::VDIVSSrr:
3935  case X86::VDIVSSrr_Int:
3936  case X86::VSQRTPDm:
3937  case X86::VSQRTPDm_Int:
3938  case X86::VSQRTPDr:
3939  case X86::VSQRTPDr_Int:
3940  case X86::VSQRTPSm:
3941  case X86::VSQRTPSm_Int:
3942  case X86::VSQRTPSr:
3943  case X86::VSQRTPSr_Int:
3944  case X86::VSQRTSDm:
3945  case X86::VSQRTSDm_Int:
3946  case X86::VSQRTSDr:
3947  case X86::VSQRTSSm:
3948  case X86::VSQRTSSm_Int:
3949  case X86::VSQRTSSr:
3950    return true;
3951  }
3952}
3953
3954bool X86InstrInfo::
3955hasHighOperandLatency(const InstrItineraryData *ItinData,
3956                      const MachineRegisterInfo *MRI,
3957                      const MachineInstr *DefMI, unsigned DefIdx,
3958                      const MachineInstr *UseMI, unsigned UseIdx) const {
3959  return isHighLatencyDef(DefMI->getOpcode());
3960}
3961
3962namespace {
3963  /// CGBR - Create Global Base Reg pass. This initializes the PIC
3964  /// global base register for x86-32.
3965  struct CGBR : public MachineFunctionPass {
3966    static char ID;
3967    CGBR() : MachineFunctionPass(ID) {}
3968
3969    virtual bool runOnMachineFunction(MachineFunction &MF) {
3970      const X86TargetMachine *TM =
3971        static_cast<const X86TargetMachine *>(&MF.getTarget());
3972
3973      assert(!TM->getSubtarget<X86Subtarget>().is64Bit() &&
3974             "X86-64 PIC uses RIP relative addressing");
3975
3976      // Only emit a global base reg in PIC mode.
3977      if (TM->getRelocationModel() != Reloc::PIC_)
3978        return false;
3979
3980      X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3981      unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
3982
3983      // If we didn't need a GlobalBaseReg, don't insert code.
3984      if (GlobalBaseReg == 0)
3985        return false;
3986
3987      // Insert the set of GlobalBaseReg into the first MBB of the function
3988      MachineBasicBlock &FirstMBB = MF.front();
3989      MachineBasicBlock::iterator MBBI = FirstMBB.begin();
3990      DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
3991      MachineRegisterInfo &RegInfo = MF.getRegInfo();
3992      const X86InstrInfo *TII = TM->getInstrInfo();
3993
3994      unsigned PC;
3995      if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
3996        PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
3997      else
3998        PC = GlobalBaseReg;
3999
4000      // Operand of MovePCtoStack is completely ignored by asm printer. It's
4001      // only used in JIT code emission as displacement to pc.
4002      BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
4003
4004      // If we're using vanilla 'GOT' PIC style, we should use relative addressing
4005      // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
4006      if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
4007        // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
4008        BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
4009          .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
4010                                        X86II::MO_GOT_ABSOLUTE_ADDRESS);
4011      }
4012
4013      return true;
4014    }
4015
4016    virtual const char *getPassName() const {
4017      return "X86 PIC Global Base Reg Initialization";
4018    }
4019
4020    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
4021      AU.setPreservesCFG();
4022      MachineFunctionPass::getAnalysisUsage(AU);
4023    }
4024  };
4025}
4026
4027char CGBR::ID = 0;
4028FunctionPass*
4029llvm::createGlobalBaseRegPass() { return new CGBR(); }
4030
4031namespace {
4032  struct LDTLSCleanup : public MachineFunctionPass {
4033    static char ID;
4034    LDTLSCleanup() : MachineFunctionPass(ID) {}
4035
4036    virtual bool runOnMachineFunction(MachineFunction &MF) {
4037      X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>();
4038      if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
4039        // No point folding accesses if there isn't at least two.
4040        return false;
4041      }
4042
4043      MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
4044      return VisitNode(DT->getRootNode(), 0);
4045    }
4046
4047    // Visit the dominator subtree rooted at Node in pre-order.
4048    // If TLSBaseAddrReg is non-null, then use that to replace any
4049    // TLS_base_addr instructions. Otherwise, create the register
4050    // when the first such instruction is seen, and then use it
4051    // as we encounter more instructions.
4052    bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
4053      MachineBasicBlock *BB = Node->getBlock();
4054      bool Changed = false;
4055
4056      // Traverse the current block.
4057      for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
4058           ++I) {
4059        switch (I->getOpcode()) {
4060          case X86::TLS_base_addr32:
4061          case X86::TLS_base_addr64:
4062            if (TLSBaseAddrReg)
4063              I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
4064            else
4065              I = SetRegister(I, &TLSBaseAddrReg);
4066            Changed = true;
4067            break;
4068          default:
4069            break;
4070        }
4071      }
4072
4073      // Visit the children of this block in the dominator tree.
4074      for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
4075           I != E; ++I) {
4076        Changed |= VisitNode(*I, TLSBaseAddrReg);
4077      }
4078
4079      return Changed;
4080    }
4081
4082    // Replace the TLS_base_addr instruction I with a copy from
4083    // TLSBaseAddrReg, returning the new instruction.
4084    MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
4085                                         unsigned TLSBaseAddrReg) {
4086      MachineFunction *MF = I->getParent()->getParent();
4087      const X86TargetMachine *TM =
4088          static_cast<const X86TargetMachine *>(&MF->getTarget());
4089      const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
4090      const X86InstrInfo *TII = TM->getInstrInfo();
4091
4092      // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
4093      MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
4094                                   TII->get(TargetOpcode::COPY),
4095                                   is64Bit ? X86::RAX : X86::EAX)
4096                                   .addReg(TLSBaseAddrReg);
4097
4098      // Erase the TLS_base_addr instruction.
4099      I->eraseFromParent();
4100
4101      return Copy;
4102    }
4103
4104    // Create a virtal register in *TLSBaseAddrReg, and populate it by
4105    // inserting a copy instruction after I. Returns the new instruction.
4106    MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
4107      MachineFunction *MF = I->getParent()->getParent();
4108      const X86TargetMachine *TM =
4109          static_cast<const X86TargetMachine *>(&MF->getTarget());
4110      const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
4111      const X86InstrInfo *TII = TM->getInstrInfo();
4112
4113      // Create a virtual register for the TLS base address.
4114      MachineRegisterInfo &RegInfo = MF->getRegInfo();
4115      *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
4116                                                      ? &X86::GR64RegClass
4117                                                      : &X86::GR32RegClass);
4118
4119      // Insert a copy from RAX/EAX to TLSBaseAddrReg.
4120      MachineInstr *Next = I->getNextNode();
4121      MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
4122                                   TII->get(TargetOpcode::COPY),
4123                                   *TLSBaseAddrReg)
4124                                   .addReg(is64Bit ? X86::RAX : X86::EAX);
4125
4126      return Copy;
4127    }
4128
4129    virtual const char *getPassName() const {
4130      return "Local Dynamic TLS Access Clean-up";
4131    }
4132
4133    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
4134      AU.setPreservesCFG();
4135      AU.addRequired<MachineDominatorTree>();
4136      MachineFunctionPass::getAnalysisUsage(AU);
4137    }
4138  };
4139}
4140
4141char LDTLSCleanup::ID = 0;
4142FunctionPass*
4143llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
4144