X86InstrInfo.cpp revision d94b6a16fec7d5021e3922b0e34f9ddb268d54b1
1//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the X86 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86InstrInfo.h"
15#include "X86.h"
16#include "X86GenInstrInfo.inc"
17#include "X86InstrBuilder.h"
18#include "X86MachineFunctionInfo.h"
19#include "X86Subtarget.h"
20#include "X86TargetMachine.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/LiveVariables.h"
26#include "llvm/Target/TargetOptions.h"
27using namespace llvm;
28
29X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
30  : TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
31    TM(tm), RI(tm, *this) {
32}
33
34bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
35                               unsigned& sourceReg,
36                               unsigned& destReg) const {
37  MachineOpCode oc = MI.getOpcode();
38  if (oc == X86::MOV8rr || oc == X86::MOV16rr ||
39      oc == X86::MOV32rr || oc == X86::MOV64rr ||
40      oc == X86::MOV16to16_ || oc == X86::MOV32to32_ ||
41      oc == X86::MOV_Fp3232  || oc == X86::MOVSSrr || oc == X86::MOVSDrr ||
42      oc == X86::MOV_Fp3264 || oc == X86::MOV_Fp6432 || oc == X86::MOV_Fp6464 ||
43      oc == X86::FsMOVAPSrr || oc == X86::FsMOVAPDrr ||
44      oc == X86::MOVAPSrr || oc == X86::MOVAPDrr ||
45      oc == X86::MOVSS2PSrr || oc == X86::MOVSD2PDrr ||
46      oc == X86::MOVPS2SSrr || oc == X86::MOVPD2SDrr ||
47      oc == X86::MMX_MOVD64rr || oc == X86::MMX_MOVQ64rr) {
48      assert(MI.getNumOperands() >= 2 &&
49             MI.getOperand(0).isRegister() &&
50             MI.getOperand(1).isRegister() &&
51             "invalid register-register move instruction");
52      sourceReg = MI.getOperand(1).getReg();
53      destReg = MI.getOperand(0).getReg();
54      return true;
55  }
56  return false;
57}
58
59unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI,
60                                           int &FrameIndex) const {
61  switch (MI->getOpcode()) {
62  default: break;
63  case X86::MOV8rm:
64  case X86::MOV16rm:
65  case X86::MOV16_rm:
66  case X86::MOV32rm:
67  case X86::MOV32_rm:
68  case X86::MOV64rm:
69  case X86::LD_Fp64m:
70  case X86::MOVSSrm:
71  case X86::MOVSDrm:
72  case X86::MOVAPSrm:
73  case X86::MOVAPDrm:
74  case X86::MMX_MOVD64rm:
75  case X86::MMX_MOVQ64rm:
76    if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
77        MI->getOperand(3).isReg() && MI->getOperand(4).isImm() &&
78        MI->getOperand(2).getImm() == 1 &&
79        MI->getOperand(3).getReg() == 0 &&
80        MI->getOperand(4).getImm() == 0) {
81      FrameIndex = MI->getOperand(1).getIndex();
82      return MI->getOperand(0).getReg();
83    }
84    break;
85  }
86  return 0;
87}
88
89unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI,
90                                          int &FrameIndex) const {
91  switch (MI->getOpcode()) {
92  default: break;
93  case X86::MOV8mr:
94  case X86::MOV16mr:
95  case X86::MOV16_mr:
96  case X86::MOV32mr:
97  case X86::MOV32_mr:
98  case X86::MOV64mr:
99  case X86::ST_FpP64m:
100  case X86::MOVSSmr:
101  case X86::MOVSDmr:
102  case X86::MOVAPSmr:
103  case X86::MOVAPDmr:
104  case X86::MMX_MOVD64mr:
105  case X86::MMX_MOVQ64mr:
106  case X86::MMX_MOVNTQmr:
107    if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
108        MI->getOperand(2).isReg() && MI->getOperand(3).isImm() &&
109        MI->getOperand(1).getImm() == 1 &&
110        MI->getOperand(2).getReg() == 0 &&
111        MI->getOperand(3).getImm() == 0) {
112      FrameIndex = MI->getOperand(0).getIndex();
113      return MI->getOperand(4).getReg();
114    }
115    break;
116  }
117  return 0;
118}
119
120
121bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const {
122  switch (MI->getOpcode()) {
123  default: break;
124  case X86::MOV8rm:
125  case X86::MOV16rm:
126  case X86::MOV16_rm:
127  case X86::MOV32rm:
128  case X86::MOV32_rm:
129  case X86::MOV64rm:
130  case X86::LD_Fp64m:
131  case X86::MOVSSrm:
132  case X86::MOVSDrm:
133  case X86::MOVAPSrm:
134  case X86::MOVAPDrm:
135  case X86::MMX_MOVD64rm:
136  case X86::MMX_MOVQ64rm:
137    // Loads from constant pools are trivially rematerializable.
138    return MI->getOperand(1).isRegister() && MI->getOperand(2).isImmediate() &&
139           MI->getOperand(3).isRegister() && MI->getOperand(4).isConstantPoolIndex() &&
140           MI->getOperand(1).getReg() == 0 &&
141           MI->getOperand(2).getImm() == 1 &&
142           MI->getOperand(3).getReg() == 0;
143  }
144  // All other instructions marked M_REMATERIALIZABLE are always trivially
145  // rematerializable.
146  return true;
147}
148
149/// isReallySideEffectFree - If the M_MAY_HAVE_SIDE_EFFECTS flag is set, this
150/// method is called to determine if the specific instance of this instruction
151/// has side effects. This is useful in cases of instructions, like loads, which
152/// generally always have side effects. A load from a constant pool doesn't have
153/// side effects, though. So we need to differentiate it from the general case.
154bool X86InstrInfo::isReallySideEffectFree(MachineInstr *MI) const {
155  switch (MI->getOpcode()) {
156  default: break;
157  case X86::MOV32rm:
158    if (MI->getOperand(1).isRegister()) {
159      unsigned Reg = MI->getOperand(1).getReg();
160
161      // Loads from global addresses which aren't redefined in the function are
162      // side effect free.
163      if (Reg != 0 && MRegisterInfo::isVirtualRegister(Reg) &&
164          MI->getOperand(2).isImmediate() &&
165          MI->getOperand(3).isRegister() &&
166          MI->getOperand(4).isGlobalAddress() &&
167          MI->getOperand(2).getImm() == 1 &&
168          MI->getOperand(3).getReg() == 0)
169        return true;
170    }
171    // FALLTHROUGH
172  case X86::MOV8rm:
173  case X86::MOV16rm:
174  case X86::MOV16_rm:
175  case X86::MOV32_rm:
176  case X86::MOV64rm:
177  case X86::LD_Fp64m:
178  case X86::MOVSSrm:
179  case X86::MOVSDrm:
180  case X86::MOVAPSrm:
181  case X86::MOVAPDrm:
182  case X86::MMX_MOVD64rm:
183  case X86::MMX_MOVQ64rm:
184    // Loads from constant pools have no side effects
185    return MI->getOperand(1).isRegister() &&
186           MI->getOperand(2).isImmediate() &&
187           MI->getOperand(3).isRegister() &&
188           MI->getOperand(4).isConstantPoolIndex() &&
189           MI->getOperand(1).getReg() == 0 &&
190           MI->getOperand(2).getImm() == 1 &&
191           MI->getOperand(3).getReg() == 0;
192  }
193
194  // All other instances of these instructions are presumed to have side
195  // effects.
196  return false;
197}
198
199/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
200/// is not marked dead.
201static bool hasLiveCondCodeDef(MachineInstr *MI) {
202  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
203    MachineOperand &MO = MI->getOperand(i);
204    if (MO.isRegister() && MO.isDef() &&
205        MO.getReg() == X86::EFLAGS && !MO.isDead()) {
206      return true;
207    }
208  }
209  return false;
210}
211
212/// convertToThreeAddress - This method must be implemented by targets that
213/// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
214/// may be able to convert a two-address instruction into a true
215/// three-address instruction on demand.  This allows the X86 target (for
216/// example) to convert ADD and SHL instructions into LEA instructions if they
217/// would require register copies due to two-addressness.
218///
219/// This method returns a null pointer if the transformation cannot be
220/// performed, otherwise it returns the new instruction.
221///
222MachineInstr *
223X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
224                                    MachineBasicBlock::iterator &MBBI,
225                                    LiveVariables &LV) const {
226  MachineInstr *MI = MBBI;
227  // All instructions input are two-addr instructions.  Get the known operands.
228  unsigned Dest = MI->getOperand(0).getReg();
229  unsigned Src = MI->getOperand(1).getReg();
230
231  MachineInstr *NewMI = NULL;
232  // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's.  When
233  // we have better subtarget support, enable the 16-bit LEA generation here.
234  bool DisableLEA16 = true;
235
236  unsigned MIOpc = MI->getOpcode();
237  switch (MIOpc) {
238  case X86::SHUFPSrri: {
239    assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
240    if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
241
242    unsigned A = MI->getOperand(0).getReg();
243    unsigned B = MI->getOperand(1).getReg();
244    unsigned C = MI->getOperand(2).getReg();
245    unsigned M = MI->getOperand(3).getImm();
246    if (B != C) return 0;
247    NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M);
248    break;
249  }
250  case X86::SHL64ri: {
251    assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
252    // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
253    // the flags produced by a shift yet, so this is safe.
254    unsigned Dest = MI->getOperand(0).getReg();
255    unsigned Src = MI->getOperand(1).getReg();
256    unsigned ShAmt = MI->getOperand(2).getImm();
257    if (ShAmt == 0 || ShAmt >= 4) return 0;
258
259    NewMI = BuildMI(get(X86::LEA64r), Dest)
260      .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
261    break;
262  }
263  case X86::SHL32ri: {
264    assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
265    // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
266    // the flags produced by a shift yet, so this is safe.
267    unsigned Dest = MI->getOperand(0).getReg();
268    unsigned Src = MI->getOperand(1).getReg();
269    unsigned ShAmt = MI->getOperand(2).getImm();
270    if (ShAmt == 0 || ShAmt >= 4) return 0;
271
272    unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ?
273      X86::LEA64_32r : X86::LEA32r;
274    NewMI = BuildMI(get(Opc), Dest)
275      .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
276    break;
277  }
278  case X86::SHL16ri: {
279    assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
280    // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
281    // the flags produced by a shift yet, so this is safe.
282    unsigned Dest = MI->getOperand(0).getReg();
283    unsigned Src = MI->getOperand(1).getReg();
284    unsigned ShAmt = MI->getOperand(2).getImm();
285    if (ShAmt == 0 || ShAmt >= 4) return 0;
286
287    if (DisableLEA16) {
288      // If 16-bit LEA is disabled, use 32-bit LEA via subregisters.
289      MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
290      unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
291        ? X86::LEA64_32r : X86::LEA32r;
292      unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
293      unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
294
295      MachineInstr *Ins =
296        BuildMI(get(X86::INSERT_SUBREG), leaInReg).addReg(Src).addImm(2);
297      Ins->copyKillDeadInfo(MI);
298
299      NewMI = BuildMI(get(Opc), leaOutReg)
300        .addReg(0).addImm(1 << ShAmt).addReg(leaInReg).addImm(0);
301
302      MachineInstr *Ext =
303        BuildMI(get(X86::EXTRACT_SUBREG), Dest).addReg(leaOutReg).addImm(2);
304      Ext->copyKillDeadInfo(MI);
305
306      MFI->insert(MBBI, Ins);            // Insert the insert_subreg
307      LV.instructionChanged(MI, NewMI);  // Update live variables
308      LV.addVirtualRegisterKilled(leaInReg, NewMI);
309      MFI->insert(MBBI, NewMI);          // Insert the new inst
310      LV.addVirtualRegisterKilled(leaOutReg, Ext);
311      MFI->insert(MBBI, Ext);            // Insert the extract_subreg
312      return Ext;
313    } else {
314      NewMI = BuildMI(get(X86::LEA16r), Dest)
315        .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
316    }
317    break;
318  }
319  default: {
320    // The following opcodes also sets the condition code register(s). Only
321    // convert them to equivalent lea if the condition code register def's
322    // are dead!
323    if (hasLiveCondCodeDef(MI))
324      return 0;
325
326    bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
327    switch (MIOpc) {
328    default: return 0;
329    case X86::INC64r:
330    case X86::INC32r: {
331      assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
332      unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
333        : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
334      NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, 1);
335      break;
336    }
337    case X86::INC16r:
338    case X86::INC64_16r:
339      if (DisableLEA16) return 0;
340      assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
341      NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1);
342      break;
343    case X86::DEC64r:
344    case X86::DEC32r: {
345      assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
346      unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
347        : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
348      NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, -1);
349      break;
350    }
351    case X86::DEC16r:
352    case X86::DEC64_16r:
353      if (DisableLEA16) return 0;
354      assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
355      NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1);
356      break;
357    case X86::ADD64rr:
358    case X86::ADD32rr: {
359      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
360      unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
361        : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
362      NewMI = addRegReg(BuildMI(get(Opc), Dest), Src,
363                        MI->getOperand(2).getReg());
364      break;
365    }
366    case X86::ADD16rr:
367      if (DisableLEA16) return 0;
368      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
369      NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src,
370                        MI->getOperand(2).getReg());
371      break;
372    case X86::ADD64ri32:
373    case X86::ADD64ri8:
374      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
375      if (MI->getOperand(2).isImmediate())
376        NewMI = addRegOffset(BuildMI(get(X86::LEA64r), Dest), Src,
377                             MI->getOperand(2).getImm());
378      break;
379    case X86::ADD32ri:
380    case X86::ADD32ri8:
381      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
382      if (MI->getOperand(2).isImmediate()) {
383        unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
384        NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src,
385                             MI->getOperand(2).getImm());
386      }
387      break;
388    case X86::ADD16ri:
389    case X86::ADD16ri8:
390      if (DisableLEA16) return 0;
391      assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
392      if (MI->getOperand(2).isImmediate())
393        NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src,
394                             MI->getOperand(2).getImm());
395      break;
396    case X86::SHL16ri:
397      if (DisableLEA16) return 0;
398    case X86::SHL32ri:
399    case X86::SHL64ri: {
400      assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() &&
401             "Unknown shl instruction!");
402      unsigned ShAmt = MI->getOperand(2).getImm();
403      if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
404        X86AddressMode AM;
405        AM.Scale = 1 << ShAmt;
406        AM.IndexReg = Src;
407        unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r
408          : (MIOpc == X86::SHL32ri
409             ? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r);
410        NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM);
411      }
412      break;
413    }
414    }
415  }
416  }
417
418  NewMI->copyKillDeadInfo(MI);
419  LV.instructionChanged(MI, NewMI);  // Update live variables
420  MFI->insert(MBBI, NewMI);          // Insert the new inst
421  return NewMI;
422}
423
424/// commuteInstruction - We have a few instructions that must be hacked on to
425/// commute them.
426///
427MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
428  switch (MI->getOpcode()) {
429  case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
430  case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
431  case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
432  case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
433  case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
434  case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
435    unsigned Opc;
436    unsigned Size;
437    switch (MI->getOpcode()) {
438    default: assert(0 && "Unreachable!");
439    case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
440    case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
441    case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
442    case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
443    case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
444    case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
445    }
446    unsigned Amt = MI->getOperand(3).getImm();
447    unsigned A = MI->getOperand(0).getReg();
448    unsigned B = MI->getOperand(1).getReg();
449    unsigned C = MI->getOperand(2).getReg();
450    bool BisKill = MI->getOperand(1).isKill();
451    bool CisKill = MI->getOperand(2).isKill();
452    return BuildMI(get(Opc), A).addReg(C, false, false, CisKill)
453      .addReg(B, false, false, BisKill).addImm(Size-Amt);
454  }
455  case X86::CMOVB16rr:
456  case X86::CMOVB32rr:
457  case X86::CMOVB64rr:
458  case X86::CMOVAE16rr:
459  case X86::CMOVAE32rr:
460  case X86::CMOVAE64rr:
461  case X86::CMOVE16rr:
462  case X86::CMOVE32rr:
463  case X86::CMOVE64rr:
464  case X86::CMOVNE16rr:
465  case X86::CMOVNE32rr:
466  case X86::CMOVNE64rr:
467  case X86::CMOVBE16rr:
468  case X86::CMOVBE32rr:
469  case X86::CMOVBE64rr:
470  case X86::CMOVA16rr:
471  case X86::CMOVA32rr:
472  case X86::CMOVA64rr:
473  case X86::CMOVL16rr:
474  case X86::CMOVL32rr:
475  case X86::CMOVL64rr:
476  case X86::CMOVGE16rr:
477  case X86::CMOVGE32rr:
478  case X86::CMOVGE64rr:
479  case X86::CMOVLE16rr:
480  case X86::CMOVLE32rr:
481  case X86::CMOVLE64rr:
482  case X86::CMOVG16rr:
483  case X86::CMOVG32rr:
484  case X86::CMOVG64rr:
485  case X86::CMOVS16rr:
486  case X86::CMOVS32rr:
487  case X86::CMOVS64rr:
488  case X86::CMOVNS16rr:
489  case X86::CMOVNS32rr:
490  case X86::CMOVNS64rr:
491  case X86::CMOVP16rr:
492  case X86::CMOVP32rr:
493  case X86::CMOVP64rr:
494  case X86::CMOVNP16rr:
495  case X86::CMOVNP32rr:
496  case X86::CMOVNP64rr: {
497    unsigned Opc = 0;
498    switch (MI->getOpcode()) {
499    default: break;
500    case X86::CMOVB16rr:  Opc = X86::CMOVAE16rr; break;
501    case X86::CMOVB32rr:  Opc = X86::CMOVAE32rr; break;
502    case X86::CMOVB64rr:  Opc = X86::CMOVAE64rr; break;
503    case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
504    case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
505    case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
506    case X86::CMOVE16rr:  Opc = X86::CMOVNE16rr; break;
507    case X86::CMOVE32rr:  Opc = X86::CMOVNE32rr; break;
508    case X86::CMOVE64rr:  Opc = X86::CMOVNE64rr; break;
509    case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
510    case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
511    case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
512    case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
513    case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
514    case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
515    case X86::CMOVA16rr:  Opc = X86::CMOVBE16rr; break;
516    case X86::CMOVA32rr:  Opc = X86::CMOVBE32rr; break;
517    case X86::CMOVA64rr:  Opc = X86::CMOVBE64rr; break;
518    case X86::CMOVL16rr:  Opc = X86::CMOVGE16rr; break;
519    case X86::CMOVL32rr:  Opc = X86::CMOVGE32rr; break;
520    case X86::CMOVL64rr:  Opc = X86::CMOVGE64rr; break;
521    case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
522    case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
523    case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
524    case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
525    case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
526    case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
527    case X86::CMOVG16rr:  Opc = X86::CMOVLE16rr; break;
528    case X86::CMOVG32rr:  Opc = X86::CMOVLE32rr; break;
529    case X86::CMOVG64rr:  Opc = X86::CMOVLE64rr; break;
530    case X86::CMOVS16rr:  Opc = X86::CMOVNS16rr; break;
531    case X86::CMOVS32rr:  Opc = X86::CMOVNS32rr; break;
532    case X86::CMOVS64rr:  Opc = X86::CMOVNS32rr; break;
533    case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
534    case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
535    case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
536    case X86::CMOVP16rr:  Opc = X86::CMOVNP16rr; break;
537    case X86::CMOVP32rr:  Opc = X86::CMOVNP32rr; break;
538    case X86::CMOVP64rr:  Opc = X86::CMOVNP32rr; break;
539    case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
540    case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
541    case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
542    }
543
544    MI->setInstrDescriptor(get(Opc));
545    // Fallthrough intended.
546  }
547  default:
548    return TargetInstrInfoImpl::commuteInstruction(MI);
549  }
550}
551
552static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
553  switch (BrOpc) {
554  default: return X86::COND_INVALID;
555  case X86::JE:  return X86::COND_E;
556  case X86::JNE: return X86::COND_NE;
557  case X86::JL:  return X86::COND_L;
558  case X86::JLE: return X86::COND_LE;
559  case X86::JG:  return X86::COND_G;
560  case X86::JGE: return X86::COND_GE;
561  case X86::JB:  return X86::COND_B;
562  case X86::JBE: return X86::COND_BE;
563  case X86::JA:  return X86::COND_A;
564  case X86::JAE: return X86::COND_AE;
565  case X86::JS:  return X86::COND_S;
566  case X86::JNS: return X86::COND_NS;
567  case X86::JP:  return X86::COND_P;
568  case X86::JNP: return X86::COND_NP;
569  case X86::JO:  return X86::COND_O;
570  case X86::JNO: return X86::COND_NO;
571  }
572}
573
574unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
575  switch (CC) {
576  default: assert(0 && "Illegal condition code!");
577  case X86::COND_E:  return X86::JE;
578  case X86::COND_NE: return X86::JNE;
579  case X86::COND_L:  return X86::JL;
580  case X86::COND_LE: return X86::JLE;
581  case X86::COND_G:  return X86::JG;
582  case X86::COND_GE: return X86::JGE;
583  case X86::COND_B:  return X86::JB;
584  case X86::COND_BE: return X86::JBE;
585  case X86::COND_A:  return X86::JA;
586  case X86::COND_AE: return X86::JAE;
587  case X86::COND_S:  return X86::JS;
588  case X86::COND_NS: return X86::JNS;
589  case X86::COND_P:  return X86::JP;
590  case X86::COND_NP: return X86::JNP;
591  case X86::COND_O:  return X86::JO;
592  case X86::COND_NO: return X86::JNO;
593  }
594}
595
596/// GetOppositeBranchCondition - Return the inverse of the specified condition,
597/// e.g. turning COND_E to COND_NE.
598X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
599  switch (CC) {
600  default: assert(0 && "Illegal condition code!");
601  case X86::COND_E:  return X86::COND_NE;
602  case X86::COND_NE: return X86::COND_E;
603  case X86::COND_L:  return X86::COND_GE;
604  case X86::COND_LE: return X86::COND_G;
605  case X86::COND_G:  return X86::COND_LE;
606  case X86::COND_GE: return X86::COND_L;
607  case X86::COND_B:  return X86::COND_AE;
608  case X86::COND_BE: return X86::COND_A;
609  case X86::COND_A:  return X86::COND_BE;
610  case X86::COND_AE: return X86::COND_B;
611  case X86::COND_S:  return X86::COND_NS;
612  case X86::COND_NS: return X86::COND_S;
613  case X86::COND_P:  return X86::COND_NP;
614  case X86::COND_NP: return X86::COND_P;
615  case X86::COND_O:  return X86::COND_NO;
616  case X86::COND_NO: return X86::COND_O;
617  }
618}
619
620bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
621  const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
622  if (TID->Flags & M_TERMINATOR_FLAG) {
623    // Conditional branch is a special case.
624    if ((TID->Flags & M_BRANCH_FLAG) != 0 && (TID->Flags & M_BARRIER_FLAG) == 0)
625      return true;
626    if ((TID->Flags & M_PREDICABLE) == 0)
627      return true;
628    return !isPredicated(MI);
629  }
630  return false;
631}
632
633// For purposes of branch analysis do not count FP_REG_KILL as a terminator.
634static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI,
635                                               const X86InstrInfo &TII) {
636  if (MI->getOpcode() == X86::FP_REG_KILL)
637    return false;
638  return TII.isUnpredicatedTerminator(MI);
639}
640
641bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
642                                 MachineBasicBlock *&TBB,
643                                 MachineBasicBlock *&FBB,
644                                 std::vector<MachineOperand> &Cond) const {
645  // If the block has no terminators, it just falls into the block after it.
646  MachineBasicBlock::iterator I = MBB.end();
647  if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this))
648    return false;
649
650  // Get the last instruction in the block.
651  MachineInstr *LastInst = I;
652
653  // If there is only one terminator instruction, process it.
654  if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) {
655    if (!isBranch(LastInst->getOpcode()))
656      return true;
657
658    // If the block ends with a branch there are 3 possibilities:
659    // it's an unconditional, conditional, or indirect branch.
660
661    if (LastInst->getOpcode() == X86::JMP) {
662      TBB = LastInst->getOperand(0).getMBB();
663      return false;
664    }
665    X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode());
666    if (BranchCode == X86::COND_INVALID)
667      return true;  // Can't handle indirect branch.
668
669    // Otherwise, block ends with fall-through condbranch.
670    TBB = LastInst->getOperand(0).getMBB();
671    Cond.push_back(MachineOperand::CreateImm(BranchCode));
672    return false;
673  }
674
675  // Get the instruction before it if it's a terminator.
676  MachineInstr *SecondLastInst = I;
677
678  // If there are three terminators, we don't know what sort of block this is.
679  if (SecondLastInst && I != MBB.begin() &&
680      isBrAnalysisUnpredicatedTerminator(--I, *this))
681    return true;
682
683  // If the block ends with X86::JMP and a conditional branch, handle it.
684  X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode());
685  if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) {
686    TBB = SecondLastInst->getOperand(0).getMBB();
687    Cond.push_back(MachineOperand::CreateImm(BranchCode));
688    FBB = LastInst->getOperand(0).getMBB();
689    return false;
690  }
691
692  // If the block ends with two X86::JMPs, handle it.  The second one is not
693  // executed, so remove it.
694  if (SecondLastInst->getOpcode() == X86::JMP &&
695      LastInst->getOpcode() == X86::JMP) {
696    TBB = SecondLastInst->getOperand(0).getMBB();
697    I = LastInst;
698    I->eraseFromParent();
699    return false;
700  }
701
702  // Otherwise, can't handle this.
703  return true;
704}
705
706unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
707  MachineBasicBlock::iterator I = MBB.end();
708  if (I == MBB.begin()) return 0;
709  --I;
710  if (I->getOpcode() != X86::JMP &&
711      GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
712    return 0;
713
714  // Remove the branch.
715  I->eraseFromParent();
716
717  I = MBB.end();
718
719  if (I == MBB.begin()) return 1;
720  --I;
721  if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
722    return 1;
723
724  // Remove the branch.
725  I->eraseFromParent();
726  return 2;
727}
728
729static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
730                                                     MachineOperand &MO) {
731  if (MO.isRegister())
732    MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
733                     false, false, MO.getSubReg());
734  else if (MO.isImmediate())
735    MIB = MIB.addImm(MO.getImm());
736  else if (MO.isFrameIndex())
737    MIB = MIB.addFrameIndex(MO.getIndex());
738  else if (MO.isGlobalAddress())
739    MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
740  else if (MO.isConstantPoolIndex())
741    MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
742  else if (MO.isJumpTableIndex())
743    MIB = MIB.addJumpTableIndex(MO.getIndex());
744  else if (MO.isExternalSymbol())
745    MIB = MIB.addExternalSymbol(MO.getSymbolName());
746  else
747    assert(0 && "Unknown operand for X86InstrAddOperand!");
748
749  return MIB;
750}
751
752unsigned
753X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
754                           MachineBasicBlock *FBB,
755                           const std::vector<MachineOperand> &Cond) const {
756  // Shouldn't be a fall through.
757  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
758  assert((Cond.size() == 1 || Cond.size() == 0) &&
759         "X86 branch conditions have one component!");
760
761  if (FBB == 0) { // One way branch.
762    if (Cond.empty()) {
763      // Unconditional branch?
764      BuildMI(&MBB, get(X86::JMP)).addMBB(TBB);
765    } else {
766      // Conditional branch.
767      unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
768      BuildMI(&MBB, get(Opc)).addMBB(TBB);
769    }
770    return 1;
771  }
772
773  // Two-way Conditional branch.
774  unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
775  BuildMI(&MBB, get(Opc)).addMBB(TBB);
776  BuildMI(&MBB, get(X86::JMP)).addMBB(FBB);
777  return 2;
778}
779
780void X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
781                                   MachineBasicBlock::iterator MI,
782                                   unsigned DestReg, unsigned SrcReg,
783                                   const TargetRegisterClass *DestRC,
784                                   const TargetRegisterClass *SrcRC) const {
785  if (DestRC != SrcRC) {
786    // Moving EFLAGS to / from another register requires a push and a pop.
787    if (SrcRC == &X86::CCRRegClass) {
788      assert(SrcReg == X86::EFLAGS);
789      if (DestRC == &X86::GR64RegClass) {
790        BuildMI(MBB, MI, get(X86::PUSHFQ));
791        BuildMI(MBB, MI, get(X86::POP64r), DestReg);
792        return;
793      } else if (DestRC == &X86::GR32RegClass) {
794        BuildMI(MBB, MI, get(X86::PUSHFD));
795        BuildMI(MBB, MI, get(X86::POP32r), DestReg);
796        return;
797      }
798    } else if (DestRC == &X86::CCRRegClass) {
799      assert(DestReg == X86::EFLAGS);
800      if (SrcRC == &X86::GR64RegClass) {
801        BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg);
802        BuildMI(MBB, MI, get(X86::POPFQ));
803        return;
804      } else if (SrcRC == &X86::GR32RegClass) {
805        BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg);
806        BuildMI(MBB, MI, get(X86::POPFD));
807        return;
808      }
809    }
810    cerr << "Not yet supported!";
811    abort();
812  }
813
814  unsigned Opc;
815  if (DestRC == &X86::GR64RegClass) {
816    Opc = X86::MOV64rr;
817  } else if (DestRC == &X86::GR32RegClass) {
818    Opc = X86::MOV32rr;
819  } else if (DestRC == &X86::GR16RegClass) {
820    Opc = X86::MOV16rr;
821  } else if (DestRC == &X86::GR8RegClass) {
822    Opc = X86::MOV8rr;
823  } else if (DestRC == &X86::GR32_RegClass) {
824    Opc = X86::MOV32_rr;
825  } else if (DestRC == &X86::GR16_RegClass) {
826    Opc = X86::MOV16_rr;
827  } else if (DestRC == &X86::RFP32RegClass) {
828    Opc = X86::MOV_Fp3232;
829  } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) {
830    Opc = X86::MOV_Fp6464;
831  } else if (DestRC == &X86::RFP80RegClass) {
832    Opc = X86::MOV_Fp8080;
833  } else if (DestRC == &X86::FR32RegClass) {
834    Opc = X86::FsMOVAPSrr;
835  } else if (DestRC == &X86::FR64RegClass) {
836    Opc = X86::FsMOVAPDrr;
837  } else if (DestRC == &X86::VR128RegClass) {
838    Opc = X86::MOVAPSrr;
839  } else if (DestRC == &X86::VR64RegClass) {
840    Opc = X86::MMX_MOVQ64rr;
841  } else {
842    assert(0 && "Unknown regclass");
843    abort();
844  }
845  BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg);
846}
847
848static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
849                                  unsigned StackAlign) {
850  unsigned Opc = 0;
851  if (RC == &X86::GR64RegClass) {
852    Opc = X86::MOV64mr;
853  } else if (RC == &X86::GR32RegClass) {
854    Opc = X86::MOV32mr;
855  } else if (RC == &X86::GR16RegClass) {
856    Opc = X86::MOV16mr;
857  } else if (RC == &X86::GR8RegClass) {
858    Opc = X86::MOV8mr;
859  } else if (RC == &X86::GR32_RegClass) {
860    Opc = X86::MOV32_mr;
861  } else if (RC == &X86::GR16_RegClass) {
862    Opc = X86::MOV16_mr;
863  } else if (RC == &X86::RFP80RegClass) {
864    Opc = X86::ST_FpP80m;   // pops
865  } else if (RC == &X86::RFP64RegClass) {
866    Opc = X86::ST_Fp64m;
867  } else if (RC == &X86::RFP32RegClass) {
868    Opc = X86::ST_Fp32m;
869  } else if (RC == &X86::FR32RegClass) {
870    Opc = X86::MOVSSmr;
871  } else if (RC == &X86::FR64RegClass) {
872    Opc = X86::MOVSDmr;
873  } else if (RC == &X86::VR128RegClass) {
874    // FIXME: Use movaps once we are capable of selectively
875    // aligning functions that spill SSE registers on 16-byte boundaries.
876    Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
877  } else if (RC == &X86::VR64RegClass) {
878    Opc = X86::MMX_MOVQ64mr;
879  } else {
880    assert(0 && "Unknown regclass");
881    abort();
882  }
883
884  return Opc;
885}
886
887void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
888                                       MachineBasicBlock::iterator MI,
889                                       unsigned SrcReg, bool isKill, int FrameIdx,
890                                       const TargetRegisterClass *RC) const {
891  unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
892  addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx)
893    .addReg(SrcReg, false, false, isKill);
894}
895
896void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
897                                  bool isKill,
898                                  SmallVectorImpl<MachineOperand> &Addr,
899                                  const TargetRegisterClass *RC,
900                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
901  unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
902  MachineInstrBuilder MIB = BuildMI(get(Opc));
903  for (unsigned i = 0, e = Addr.size(); i != e; ++i)
904    MIB = X86InstrAddOperand(MIB, Addr[i]);
905  MIB.addReg(SrcReg, false, false, isKill);
906  NewMIs.push_back(MIB);
907}
908
909static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
910                                 unsigned StackAlign) {
911  unsigned Opc = 0;
912  if (RC == &X86::GR64RegClass) {
913    Opc = X86::MOV64rm;
914  } else if (RC == &X86::GR32RegClass) {
915    Opc = X86::MOV32rm;
916  } else if (RC == &X86::GR16RegClass) {
917    Opc = X86::MOV16rm;
918  } else if (RC == &X86::GR8RegClass) {
919    Opc = X86::MOV8rm;
920  } else if (RC == &X86::GR32_RegClass) {
921    Opc = X86::MOV32_rm;
922  } else if (RC == &X86::GR16_RegClass) {
923    Opc = X86::MOV16_rm;
924  } else if (RC == &X86::RFP80RegClass) {
925    Opc = X86::LD_Fp80m;
926  } else if (RC == &X86::RFP64RegClass) {
927    Opc = X86::LD_Fp64m;
928  } else if (RC == &X86::RFP32RegClass) {
929    Opc = X86::LD_Fp32m;
930  } else if (RC == &X86::FR32RegClass) {
931    Opc = X86::MOVSSrm;
932  } else if (RC == &X86::FR64RegClass) {
933    Opc = X86::MOVSDrm;
934  } else if (RC == &X86::VR128RegClass) {
935    // FIXME: Use movaps once we are capable of selectively
936    // aligning functions that spill SSE registers on 16-byte boundaries.
937    Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
938  } else if (RC == &X86::VR64RegClass) {
939    Opc = X86::MMX_MOVQ64rm;
940  } else {
941    assert(0 && "Unknown regclass");
942    abort();
943  }
944
945  return Opc;
946}
947
948void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
949                                           MachineBasicBlock::iterator MI,
950                                           unsigned DestReg, int FrameIdx,
951                                           const TargetRegisterClass *RC) const{
952  unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
953  addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx);
954}
955
956void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
957                                      SmallVectorImpl<MachineOperand> &Addr,
958                                      const TargetRegisterClass *RC,
959                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
960  unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
961  MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
962  for (unsigned i = 0, e = Addr.size(); i != e; ++i)
963    MIB = X86InstrAddOperand(MIB, Addr[i]);
964  NewMIs.push_back(MIB);
965}
966
967bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
968                                                MachineBasicBlock::iterator MI,
969                                const std::vector<CalleeSavedInfo> &CSI) const {
970  if (CSI.empty())
971    return false;
972
973  bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
974  unsigned SlotSize = is64Bit ? 8 : 4;
975
976  MachineFunction &MF = *MBB.getParent();
977  X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
978  X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize);
979
980  unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
981  for (unsigned i = CSI.size(); i != 0; --i) {
982    unsigned Reg = CSI[i-1].getReg();
983    // Add the callee-saved register as live-in. It's killed at the spill.
984    MBB.addLiveIn(Reg);
985    BuildMI(MBB, MI, get(Opc)).addReg(Reg);
986  }
987  return true;
988}
989
990bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
991                                                 MachineBasicBlock::iterator MI,
992                                const std::vector<CalleeSavedInfo> &CSI) const {
993  if (CSI.empty())
994    return false;
995
996  bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
997
998  unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
999  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1000    unsigned Reg = CSI[i].getReg();
1001    BuildMI(MBB, MI, get(Opc), Reg);
1002  }
1003  return true;
1004}
1005
1006bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
1007  if (MBB.empty()) return false;
1008
1009  switch (MBB.back().getOpcode()) {
1010  case X86::TCRETURNri:
1011  case X86::TCRETURNdi:
1012  case X86::RET:     // Return.
1013  case X86::RETI:
1014  case X86::TAILJMPd:
1015  case X86::TAILJMPr:
1016  case X86::TAILJMPm:
1017  case X86::JMP:     // Uncond branch.
1018  case X86::JMP32r:  // Indirect branch.
1019  case X86::JMP64r:  // Indirect branch (64-bit).
1020  case X86::JMP32m:  // Indirect branch through mem.
1021  case X86::JMP64m:  // Indirect branch through mem (64-bit).
1022    return true;
1023  default: return false;
1024  }
1025}
1026
1027bool X86InstrInfo::
1028ReverseBranchCondition(std::vector<MachineOperand> &Cond) const {
1029  assert(Cond.size() == 1 && "Invalid X86 branch condition!");
1030  Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm()));
1031  return false;
1032}
1033
1034const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const {
1035  const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
1036  if (Subtarget->is64Bit())
1037    return &X86::GR64RegClass;
1038  else
1039    return &X86::GR32RegClass;
1040}
1041