X86ISelDAGToDAG.cpp revision 317848f4a11f7fe55afdd6d90ded8444069b56fb
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the Evan Cheng and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a DAG pattern matching instruction selector for X86,
11// converting from a legalized dag to a X86 dag.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "x86-isel"
16#include "X86.h"
17#include "X86InstrBuilder.h"
18#include "X86ISelLowering.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
22#include "llvm/GlobalValue.h"
23#include "llvm/Instructions.h"
24#include "llvm/Intrinsics.h"
25#include "llvm/Support/CFG.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/SSARegMap.h"
31#include "llvm/CodeGen/SelectionDAGISel.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/Compiler.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/ADT/Statistic.h"
37#include <queue>
38#include <set>
39using namespace llvm;
40
41STATISTIC(NumFPKill   , "Number of FP_REG_KILL instructions added");
42STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
43
44
45//===----------------------------------------------------------------------===//
46//                      Pattern Matcher Implementation
47//===----------------------------------------------------------------------===//
48
49namespace {
50  /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
51  /// SDOperand's instead of register numbers for the leaves of the matched
52  /// tree.
53  struct X86ISelAddressMode {
54    enum {
55      RegBase,
56      FrameIndexBase
57    } BaseType;
58
59    struct {            // This is really a union, discriminated by BaseType!
60      SDOperand Reg;
61      int FrameIndex;
62    } Base;
63
64    bool isRIPRel;     // RIP relative?
65    unsigned Scale;
66    SDOperand IndexReg;
67    unsigned Disp;
68    GlobalValue *GV;
69    Constant *CP;
70    const char *ES;
71    int JT;
72    unsigned Align;    // CP alignment.
73
74    X86ISelAddressMode()
75      : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
76        GV(0), CP(0), ES(0), JT(-1), Align(0) {
77    }
78  };
79}
80
81namespace {
82  //===--------------------------------------------------------------------===//
83  /// ISel - X86 specific code to select X86 machine instructions for
84  /// SelectionDAG operations.
85  ///
86  class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
87    /// ContainsFPCode - Every instruction we select that uses or defines a FP
88    /// register should set this to true.
89    bool ContainsFPCode;
90
91    /// FastISel - Enable fast(er) instruction selection.
92    ///
93    bool FastISel;
94
95    /// TM - Keep a reference to X86TargetMachine.
96    ///
97    X86TargetMachine &TM;
98
99    /// X86Lowering - This object fully describes how to lower LLVM code to an
100    /// X86-specific SelectionDAG.
101    X86TargetLowering X86Lowering;
102
103    /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
104    /// make the right decision when generating code for different targets.
105    const X86Subtarget *Subtarget;
106
107    /// GlobalBaseReg - keeps track of the virtual register mapped onto global
108    /// base register.
109    unsigned GlobalBaseReg;
110
111  public:
112    X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
113      : SelectionDAGISel(X86Lowering),
114        ContainsFPCode(false), FastISel(fast), TM(tm),
115        X86Lowering(*TM.getTargetLowering()),
116        Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
117
118    virtual bool runOnFunction(Function &Fn) {
119      // Make sure we re-emit a set of the global base reg if necessary
120      GlobalBaseReg = 0;
121      return SelectionDAGISel::runOnFunction(Fn);
122    }
123
124    virtual const char *getPassName() const {
125      return "X86 DAG->DAG Instruction Selection";
126    }
127
128    /// InstructionSelectBasicBlock - This callback is invoked by
129    /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
130    virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
131
132    virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
133
134    virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root);
135
136// Include the pieces autogenerated from the target description.
137#include "X86GenDAGISel.inc"
138
139  private:
140    SDNode *Select(SDOperand N);
141
142    bool MatchAddress(SDOperand N, X86ISelAddressMode &AM, bool isRoot = true);
143    bool SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
144                    SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
145    bool SelectLEAAddr(SDOperand Op, SDOperand N, SDOperand &Base,
146                       SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
147    bool SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
148                             SDOperand N, SDOperand &Base, SDOperand &Scale,
149                             SDOperand &Index, SDOperand &Disp,
150                             SDOperand &InChain, SDOperand &OutChain);
151    bool TryFoldLoad(SDOperand P, SDOperand N,
152                     SDOperand &Base, SDOperand &Scale,
153                     SDOperand &Index, SDOperand &Disp);
154    void InstructionSelectPreprocess(SelectionDAG &DAG);
155
156    /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
157    /// inline asm expressions.
158    virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op,
159                                              char ConstraintCode,
160                                              std::vector<SDOperand> &OutOps,
161                                              SelectionDAG &DAG);
162
163    void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
164
165    inline void getAddressOperands(X86ISelAddressMode &AM, SDOperand &Base,
166                                   SDOperand &Scale, SDOperand &Index,
167                                   SDOperand &Disp) {
168      Base  = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
169        CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
170        AM.Base.Reg;
171      Scale = getI8Imm(AM.Scale);
172      Index = AM.IndexReg;
173      // These are 32-bit even in 64-bit mode since RIP relative offset
174      // is 32-bit.
175      if (AM.GV)
176        Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
177      else if (AM.CP)
178        Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp);
179      else if (AM.ES)
180        Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
181      else if (AM.JT != -1)
182        Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
183      else
184        Disp = getI32Imm(AM.Disp);
185    }
186
187    /// getI8Imm - Return a target constant with the specified value, of type
188    /// i8.
189    inline SDOperand getI8Imm(unsigned Imm) {
190      return CurDAG->getTargetConstant(Imm, MVT::i8);
191    }
192
193    /// getI16Imm - Return a target constant with the specified value, of type
194    /// i16.
195    inline SDOperand getI16Imm(unsigned Imm) {
196      return CurDAG->getTargetConstant(Imm, MVT::i16);
197    }
198
199    /// getI32Imm - Return a target constant with the specified value, of type
200    /// i32.
201    inline SDOperand getI32Imm(unsigned Imm) {
202      return CurDAG->getTargetConstant(Imm, MVT::i32);
203    }
204
205    /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
206    /// base register.  Return the virtual register that holds this value.
207    SDNode *getGlobalBaseReg();
208
209#ifndef NDEBUG
210    unsigned Indent;
211#endif
212  };
213}
214
215static SDNode *findFlagUse(SDNode *N) {
216  unsigned FlagResNo = N->getNumValues()-1;
217  for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
218    SDNode *User = *I;
219    for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
220      SDOperand Op = User->getOperand(i);
221      if (Op.Val == N && Op.ResNo == FlagResNo)
222        return User;
223    }
224  }
225  return NULL;
226}
227
228static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
229                          SDNode *Root, SDNode *Skip, bool &found,
230                          std::set<SDNode *> &Visited) {
231  if (found ||
232      Use->getNodeId() > Def->getNodeId() ||
233      !Visited.insert(Use).second)
234    return;
235
236  for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
237    SDNode *N = Use->getOperand(i).Val;
238    if (N == Skip)
239      continue;
240    if (N == Def) {
241      if (Use == ImmedUse)
242        continue; // Immediate use is ok.
243      if (Use == Root) {
244        assert(Use->getOpcode() == ISD::STORE ||
245               Use->getOpcode() == X86ISD::CMP);
246        continue;
247      }
248      found = true;
249      break;
250    }
251    findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
252  }
253}
254
255/// isNonImmUse - Start searching from Root up the DAG to check is Def can
256/// be reached. Return true if that's the case. However, ignore direct uses
257/// by ImmedUse (which would be U in the example illustrated in
258/// CanBeFoldedBy) and by Root (which can happen in the store case).
259/// FIXME: to be really generic, we should allow direct use by any node
260/// that is being folded. But realisticly since we only fold loads which
261/// have one non-chain use, we only need to watch out for load/op/store
262/// and load/op/cmp case where the root (store / cmp) may reach the load via
263/// its chain operand.
264static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
265                               SDNode *Skip = NULL) {
266  std::set<SDNode *> Visited;
267  bool found = false;
268  findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
269  return found;
270}
271
272
273bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) {
274  if (FastISel) return false;
275
276  // If U use can somehow reach N through another path then U can't fold N or
277  // it will create a cycle. e.g. In the following diagram, U can reach N
278  // through X. If N is folded into into U, then X is both a predecessor and
279  // a successor of U.
280  //
281  //         [ N ]
282  //         ^  ^
283  //         |  |
284  //        /   \---
285  //      /        [X]
286  //      |         ^
287  //     [U]--------|
288
289  if (isNonImmUse(Root, N, U))
290    return false;
291
292  // If U produces a flag, then it gets (even more) interesting. Since it
293  // would have been "glued" together with its flag use, we need to check if
294  // it might reach N:
295  //
296  //       [ N ]
297  //        ^ ^
298  //        | |
299  //       [U] \--
300  //        ^   [TF]
301  //        |    ^
302  //        |    |
303  //         \  /
304  //          [FU]
305  //
306  // If FU (flag use) indirectly reach N (the load), and U fold N (call it
307  // NU), then TF is a predecessor of FU and a successor of NU. But since
308  // NU and FU are flagged together, this effectively creates a cycle.
309  bool HasFlagUse = false;
310  MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1);
311  while ((VT == MVT::Flag && !Root->use_empty())) {
312    SDNode *FU = findFlagUse(Root);
313    if (FU == NULL)
314      break;
315    else {
316      Root = FU;
317      HasFlagUse = true;
318    }
319    VT = Root->getValueType(Root->getNumValues()-1);
320  }
321
322  if (HasFlagUse)
323    return !isNonImmUse(Root, N, Root, U);
324  return true;
325}
326
327/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
328/// and move load below the TokenFactor. Replace store's chain operand with
329/// load's chain result.
330static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load,
331                                 SDOperand Store, SDOperand TF) {
332  std::vector<SDOperand> Ops;
333  for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i)
334    if (Load.Val == TF.Val->getOperand(i).Val)
335      Ops.push_back(Load.Val->getOperand(0));
336    else
337      Ops.push_back(TF.Val->getOperand(i));
338  DAG.UpdateNodeOperands(TF, &Ops[0], Ops.size());
339  DAG.UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
340  DAG.UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
341                         Store.getOperand(2), Store.getOperand(3));
342}
343
344/// InstructionSelectPreprocess - Preprocess the DAG to allow the instruction
345/// selector to pick more load-modify-store instructions. This is a common
346/// case:
347///
348///     [Load chain]
349///         ^
350///         |
351///       [Load]
352///       ^    ^
353///       |    |
354///      /      \-
355///     /         |
356/// [TokenFactor] [Op]
357///     ^          ^
358///     |          |
359///      \        /
360///       \      /
361///       [Store]
362///
363/// The fact the store's chain operand != load's chain will prevent the
364/// (store (op (load))) instruction from being selected. We can transform it to:
365///
366///     [Load chain]
367///         ^
368///         |
369///    [TokenFactor]
370///         ^
371///         |
372///       [Load]
373///       ^    ^
374///       |    |
375///       |     \-
376///       |       |
377///       |     [Op]
378///       |       ^
379///       |       |
380///       \      /
381///        \    /
382///       [Store]
383void X86DAGToDAGISel::InstructionSelectPreprocess(SelectionDAG &DAG) {
384  for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
385         E = DAG.allnodes_end(); I != E; ++I) {
386    if (!ISD::isNON_TRUNCStore(I))
387      continue;
388    SDOperand Chain = I->getOperand(0);
389    if (Chain.Val->getOpcode() != ISD::TokenFactor)
390      continue;
391
392    SDOperand N1 = I->getOperand(1);
393    SDOperand N2 = I->getOperand(2);
394    if (MVT::isFloatingPoint(N1.getValueType()) ||
395        MVT::isVector(N1.getValueType()) ||
396        !N1.hasOneUse())
397      continue;
398
399    bool RModW = false;
400    SDOperand Load;
401    unsigned Opcode = N1.Val->getOpcode();
402    switch (Opcode) {
403      case ISD::ADD:
404      case ISD::MUL:
405      case ISD::AND:
406      case ISD::OR:
407      case ISD::XOR:
408      case ISD::ADDC:
409      case ISD::ADDE: {
410        SDOperand N10 = N1.getOperand(0);
411        SDOperand N11 = N1.getOperand(1);
412        if (ISD::isNON_EXTLoad(N10.Val))
413          RModW = true;
414        else if (ISD::isNON_EXTLoad(N11.Val)) {
415          RModW = true;
416          std::swap(N10, N11);
417        }
418        RModW = RModW && N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
419          (N10.getOperand(1) == N2) &&
420          (N10.Val->getValueType(0) == N1.getValueType());
421        if (RModW)
422          Load = N10;
423        break;
424      }
425      case ISD::SUB:
426      case ISD::SHL:
427      case ISD::SRA:
428      case ISD::SRL:
429      case ISD::ROTL:
430      case ISD::ROTR:
431      case ISD::SUBC:
432      case ISD::SUBE:
433      case X86ISD::SHLD:
434      case X86ISD::SHRD: {
435        SDOperand N10 = N1.getOperand(0);
436        if (ISD::isNON_EXTLoad(N10.Val))
437          RModW = N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
438            (N10.getOperand(1) == N2) &&
439            (N10.Val->getValueType(0) == N1.getValueType());
440        if (RModW)
441          Load = N10;
442        break;
443      }
444    }
445
446    if (RModW) {
447      MoveBelowTokenFactor(DAG, Load, SDOperand(I, 0), Chain);
448      ++NumLoadMoved;
449    }
450  }
451}
452
453/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
454/// when it has created a SelectionDAG for us to codegen.
455void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
456  DEBUG(BB->dump());
457  MachineFunction::iterator FirstMBB = BB;
458
459  if (!FastISel)
460    InstructionSelectPreprocess(DAG);
461
462  // Codegen the basic block.
463#ifndef NDEBUG
464  DOUT << "===== Instruction selection begins:\n";
465  Indent = 0;
466#endif
467  DAG.setRoot(SelectRoot(DAG.getRoot()));
468#ifndef NDEBUG
469  DOUT << "===== Instruction selection ends:\n";
470#endif
471
472  DAG.RemoveDeadNodes();
473
474  // Emit machine code to BB.
475  ScheduleAndEmitDAG(DAG);
476
477  // If we are emitting FP stack code, scan the basic block to determine if this
478  // block defines any FP values.  If so, put an FP_REG_KILL instruction before
479  // the terminator of the block.
480  if (!Subtarget->hasSSE2()) {
481    // Note that FP stack instructions *are* used in SSE code when returning
482    // values, but these are not live out of the basic block, so we don't need
483    // an FP_REG_KILL in this case either.
484    bool ContainsFPCode = false;
485
486    // Scan all of the machine instructions in these MBBs, checking for FP
487    // stores.
488    MachineFunction::iterator MBBI = FirstMBB;
489    do {
490      for (MachineBasicBlock::iterator I = MBBI->begin(), E = MBBI->end();
491           !ContainsFPCode && I != E; ++I) {
492        for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
493          if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
494              MRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
495              RegMap->getRegClass(I->getOperand(0).getReg()) ==
496                X86::RFPRegisterClass) {
497            ContainsFPCode = true;
498            break;
499          }
500        }
501      }
502    } while (!ContainsFPCode && &*(MBBI++) != BB);
503
504    // Check PHI nodes in successor blocks.  These PHI's will be lowered to have
505    // a copy of the input value in this block.
506    if (!ContainsFPCode) {
507      // Final check, check LLVM BB's that are successors to the LLVM BB
508      // corresponding to BB for FP PHI nodes.
509      const BasicBlock *LLVMBB = BB->getBasicBlock();
510      const PHINode *PN;
511      for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
512           !ContainsFPCode && SI != E; ++SI) {
513        for (BasicBlock::const_iterator II = SI->begin();
514             (PN = dyn_cast<PHINode>(II)); ++II) {
515          if (PN->getType()->isFloatingPoint()) {
516            ContainsFPCode = true;
517            break;
518          }
519        }
520      }
521    }
522
523    // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
524    if (ContainsFPCode) {
525      BuildMI(*BB, BB->getFirstTerminator(),
526              TM.getInstrInfo()->get(X86::FP_REG_KILL));
527      ++NumFPKill;
528    }
529  }
530}
531
532/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
533/// the main function.
534void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
535                                             MachineFrameInfo *MFI) {
536  const TargetInstrInfo *TII = TM.getInstrInfo();
537  if (Subtarget->isTargetCygMing())
538    BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
539
540  // Switch the FPU to 64-bit precision mode for better compatibility and speed.
541  int CWFrameIdx = MFI->CreateStackObject(2, 2);
542  addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
543
544  // Set the high part to be 64-bit precision.
545  addFrameReference(BuildMI(BB, TII->get(X86::MOV8mi)),
546                    CWFrameIdx, 1).addImm(2);
547
548  // Reload the modified control word now.
549  addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
550}
551
552void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
553  // If this is main, emit special code for main.
554  MachineBasicBlock *BB = MF.begin();
555  if (Fn.hasExternalLinkage() && Fn.getName() == "main")
556    EmitSpecialCodeForMain(BB, MF.getFrameInfo());
557}
558
559/// MatchAddress - Add the specified node to the specified addressing mode,
560/// returning true if it cannot be done.  This just pattern matches for the
561/// addressing mode
562bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
563                                   bool isRoot) {
564  // RIP relative addressing: %rip + 32-bit displacement!
565  if (AM.isRIPRel) {
566    if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
567      int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
568      if (isInt32(AM.Disp + Val)) {
569        AM.Disp += Val;
570        return false;
571      }
572    }
573    return true;
574  }
575
576  int id = N.Val->getNodeId();
577  bool Available = isSelected(id);
578
579  switch (N.getOpcode()) {
580  default: break;
581  case ISD::Constant: {
582    int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
583    if (isInt32(AM.Disp + Val)) {
584      AM.Disp += Val;
585      return false;
586    }
587    break;
588  }
589
590  case X86ISD::Wrapper: {
591    bool is64Bit = Subtarget->is64Bit();
592    // Under X86-64 non-small code model, GV (and friends) are 64-bits.
593    if (is64Bit && TM.getCodeModel() != CodeModel::Small)
594      break;
595    if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
596      break;
597    // If value is available in a register both base and index components have
598    // been picked, we can't fit the result available in the register in the
599    // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
600    if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
601      bool isStatic = TM.getRelocationModel() == Reloc::Static;
602      SDOperand N0 = N.getOperand(0);
603      if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
604        GlobalValue *GV = G->getGlobal();
605        bool isAbs32 = !is64Bit || isStatic;
606        if (isAbs32 || isRoot) {
607          AM.GV = GV;
608          AM.Disp += G->getOffset();
609          AM.isRIPRel = !isAbs32;
610          return false;
611        }
612      } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
613        if (!is64Bit || isStatic || isRoot) {
614          AM.CP = CP->getConstVal();
615          AM.Align = CP->getAlignment();
616          AM.Disp += CP->getOffset();
617          AM.isRIPRel = !isStatic;
618          return false;
619        }
620      } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
621        if (isStatic || isRoot) {
622          AM.ES = S->getSymbol();
623          AM.isRIPRel = !isStatic;
624          return false;
625        }
626      } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
627        if (isStatic || isRoot) {
628          AM.JT = J->getIndex();
629          AM.isRIPRel = !isStatic;
630          return false;
631        }
632      }
633    }
634    break;
635  }
636
637  case ISD::FrameIndex:
638    if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
639      AM.BaseType = X86ISelAddressMode::FrameIndexBase;
640      AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
641      return false;
642    }
643    break;
644
645  case ISD::SHL:
646    if (!Available && AM.IndexReg.Val == 0 && AM.Scale == 1)
647      if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
648        unsigned Val = CN->getValue();
649        if (Val == 1 || Val == 2 || Val == 3) {
650          AM.Scale = 1 << Val;
651          SDOperand ShVal = N.Val->getOperand(0);
652
653          // Okay, we know that we have a scale by now.  However, if the scaled
654          // value is an add of something and a constant, we can fold the
655          // constant into the disp field here.
656          if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
657              isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
658            AM.IndexReg = ShVal.Val->getOperand(0);
659            ConstantSDNode *AddVal =
660              cast<ConstantSDNode>(ShVal.Val->getOperand(1));
661            uint64_t Disp = AM.Disp + (AddVal->getValue() << Val);
662            if (isInt32(Disp))
663              AM.Disp = Disp;
664            else
665              AM.IndexReg = ShVal;
666          } else {
667            AM.IndexReg = ShVal;
668          }
669          return false;
670        }
671      }
672    break;
673
674  case ISD::MUL:
675    // X*[3,5,9] -> X+X*[2,4,8]
676    if (!Available &&
677        AM.BaseType == X86ISelAddressMode::RegBase &&
678        AM.Base.Reg.Val == 0 &&
679        AM.IndexReg.Val == 0)
680      if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
681        if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
682          AM.Scale = unsigned(CN->getValue())-1;
683
684          SDOperand MulVal = N.Val->getOperand(0);
685          SDOperand Reg;
686
687          // Okay, we know that we have a scale by now.  However, if the scaled
688          // value is an add of something and a constant, we can fold the
689          // constant into the disp field here.
690          if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
691              isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
692            Reg = MulVal.Val->getOperand(0);
693            ConstantSDNode *AddVal =
694              cast<ConstantSDNode>(MulVal.Val->getOperand(1));
695            uint64_t Disp = AM.Disp + AddVal->getValue() * CN->getValue();
696            if (isInt32(Disp))
697              AM.Disp = Disp;
698            else
699              Reg = N.Val->getOperand(0);
700          } else {
701            Reg = N.Val->getOperand(0);
702          }
703
704          AM.IndexReg = AM.Base.Reg = Reg;
705          return false;
706        }
707    break;
708
709  case ISD::ADD: {
710    if (!Available) {
711      X86ISelAddressMode Backup = AM;
712      if (!MatchAddress(N.Val->getOperand(0), AM, false) &&
713          !MatchAddress(N.Val->getOperand(1), AM, false))
714        return false;
715      AM = Backup;
716      if (!MatchAddress(N.Val->getOperand(1), AM, false) &&
717          !MatchAddress(N.Val->getOperand(0), AM, false))
718        return false;
719      AM = Backup;
720    }
721    break;
722  }
723
724  case ISD::OR: {
725    if (!Available) {
726      X86ISelAddressMode Backup = AM;
727      // Look for (x << c1) | c2 where (c2 < c1)
728      ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(0));
729      if (CN && !MatchAddress(N.Val->getOperand(1), AM, false)) {
730        if (AM.GV == NULL && AM.Disp == 0 && CN->getValue() < AM.Scale) {
731          AM.Disp = CN->getValue();
732          return false;
733        }
734      }
735      AM = Backup;
736      CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1));
737      if (CN && !MatchAddress(N.Val->getOperand(0), AM, false)) {
738        if (AM.GV == NULL && AM.Disp == 0 && CN->getValue() < AM.Scale) {
739          AM.Disp = CN->getValue();
740          return false;
741        }
742      }
743      AM = Backup;
744    }
745    break;
746  }
747  }
748
749  // Is the base register already occupied?
750  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
751    // If so, check to see if the scale index register is set.
752    if (AM.IndexReg.Val == 0) {
753      AM.IndexReg = N;
754      AM.Scale = 1;
755      return false;
756    }
757
758    // Otherwise, we cannot select it.
759    return true;
760  }
761
762  // Default, generate it as a register.
763  AM.BaseType = X86ISelAddressMode::RegBase;
764  AM.Base.Reg = N;
765  return false;
766}
767
768/// SelectAddr - returns true if it is able pattern match an addressing mode.
769/// It returns the operands which make up the maximal addressing mode it can
770/// match by reference.
771bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
772                                 SDOperand &Scale, SDOperand &Index,
773                                 SDOperand &Disp) {
774  X86ISelAddressMode AM;
775  if (MatchAddress(N, AM))
776    return false;
777
778  MVT::ValueType VT = N.getValueType();
779  if (AM.BaseType == X86ISelAddressMode::RegBase) {
780    if (!AM.Base.Reg.Val)
781      AM.Base.Reg = CurDAG->getRegister(0, VT);
782  }
783
784  if (!AM.IndexReg.Val)
785    AM.IndexReg = CurDAG->getRegister(0, VT);
786
787  getAddressOperands(AM, Base, Scale, Index, Disp);
788  return true;
789}
790
791/// isZeroNode - Returns true if Elt is a constant zero or a floating point
792/// constant +0.0.
793static inline bool isZeroNode(SDOperand Elt) {
794  return ((isa<ConstantSDNode>(Elt) &&
795  cast<ConstantSDNode>(Elt)->getValue() == 0) ||
796  (isa<ConstantFPSDNode>(Elt) &&
797  cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
798}
799
800
801/// SelectScalarSSELoad - Match a scalar SSE load.  In particular, we want to
802/// match a load whose top elements are either undef or zeros.  The load flavor
803/// is derived from the type of N, which is either v4f32 or v2f64.
804bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
805                                          SDOperand N, SDOperand &Base,
806                                          SDOperand &Scale, SDOperand &Index,
807                                          SDOperand &Disp, SDOperand &InChain,
808                                          SDOperand &OutChain) {
809  if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
810    InChain = N.getOperand(0).getValue(1);
811    if (ISD::isNON_EXTLoad(InChain.Val) &&
812        InChain.getValue(0).hasOneUse() &&
813        N.hasOneUse() &&
814        CanBeFoldedBy(N.Val, Pred.Val, Op.Val)) {
815      LoadSDNode *LD = cast<LoadSDNode>(InChain);
816      if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
817        return false;
818      OutChain = LD->getChain();
819      return true;
820    }
821  }
822
823  // Also handle the case where we explicitly require zeros in the top
824  // elements.  This is a vector shuffle from the zero vector.
825  if (N.getOpcode() == ISD::VECTOR_SHUFFLE && N.Val->hasOneUse() &&
826      N.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
827      N.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR &&
828      N.getOperand(1).Val->hasOneUse() &&
829      ISD::isNON_EXTLoad(N.getOperand(1).getOperand(0).Val) &&
830      N.getOperand(1).getOperand(0).hasOneUse()) {
831    // Check to see if the BUILD_VECTOR is building a zero vector.
832    SDOperand BV = N.getOperand(0);
833    for (unsigned i = 0, e = BV.getNumOperands(); i != e; ++i)
834      if (!isZeroNode(BV.getOperand(i)) &&
835          BV.getOperand(i).getOpcode() != ISD::UNDEF)
836        return false;  // Not a zero/undef vector.
837    // Check to see if the shuffle mask is 4/L/L/L or 2/L, where L is something
838    // from the LHS.
839    unsigned VecWidth = BV.getNumOperands();
840    SDOperand ShufMask = N.getOperand(2);
841    assert(ShufMask.getOpcode() == ISD::BUILD_VECTOR && "Invalid shuf mask!");
842    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(ShufMask.getOperand(0))) {
843      if (C->getValue() == VecWidth) {
844        for (unsigned i = 1; i != VecWidth; ++i) {
845          if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF) {
846            // ok.
847          } else {
848            ConstantSDNode *C = cast<ConstantSDNode>(ShufMask.getOperand(i));
849            if (C->getValue() >= VecWidth) return false;
850          }
851        }
852      }
853
854      // Okay, this is a zero extending load.  Fold it.
855      LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(1).getOperand(0));
856      if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
857        return false;
858      OutChain = LD->getChain();
859      InChain = SDOperand(LD, 1);
860      return true;
861    }
862  }
863  return false;
864}
865
866
867/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
868/// mode it matches can be cost effectively emitted as an LEA instruction.
869bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N,
870                                    SDOperand &Base, SDOperand &Scale,
871                                    SDOperand &Index, SDOperand &Disp) {
872  X86ISelAddressMode AM;
873  if (MatchAddress(N, AM))
874    return false;
875
876  MVT::ValueType VT = N.getValueType();
877  unsigned Complexity = 0;
878  if (AM.BaseType == X86ISelAddressMode::RegBase)
879    if (AM.Base.Reg.Val)
880      Complexity = 1;
881    else
882      AM.Base.Reg = CurDAG->getRegister(0, VT);
883  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
884    Complexity = 4;
885
886  if (AM.IndexReg.Val)
887    Complexity++;
888  else
889    AM.IndexReg = CurDAG->getRegister(0, VT);
890
891  if (AM.Scale > 2)
892    Complexity += 2;
893  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg
894  else if (AM.Scale > 1)
895    Complexity++;
896
897  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
898  // to a LEA. This is determined with some expermentation but is by no means
899  // optimal (especially for code size consideration). LEA is nice because of
900  // its three-address nature. Tweak the cost function again when we can run
901  // convertToThreeAddress() at register allocation time.
902  if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
903    // For X86-64, we should always use lea to materialize RIP relative
904    // addresses.
905    if (Subtarget->is64Bit())
906      Complexity = 4;
907    else
908      Complexity += 2;
909  }
910
911  if (AM.Disp && (AM.Base.Reg.Val || AM.IndexReg.Val))
912    Complexity++;
913
914  if (Complexity > 2) {
915    getAddressOperands(AM, Base, Scale, Index, Disp);
916    return true;
917  }
918  return false;
919}
920
921bool X86DAGToDAGISel::TryFoldLoad(SDOperand P, SDOperand N,
922                                  SDOperand &Base, SDOperand &Scale,
923                                  SDOperand &Index, SDOperand &Disp) {
924  if (ISD::isNON_EXTLoad(N.Val) &&
925      N.hasOneUse() &&
926      CanBeFoldedBy(N.Val, P.Val, P.Val))
927    return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
928  return false;
929}
930
931/// getGlobalBaseReg - Output the instructions required to put the
932/// base address to use for accessing globals into a register.
933///
934SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
935  assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing");
936  if (!GlobalBaseReg) {
937    // Insert the set of GlobalBaseReg into the first MBB of the function
938    MachineBasicBlock &FirstMBB = BB->getParent()->front();
939    MachineBasicBlock::iterator MBBI = FirstMBB.begin();
940    SSARegMap *RegMap = BB->getParent()->getSSARegMap();
941    GlobalBaseReg = RegMap->createVirtualRegister(X86::GR32RegisterClass);
942    const TargetInstrInfo *TII = TM.getInstrInfo();
943    BuildMI(FirstMBB, MBBI, TII->get(X86::MovePCtoStack));
944    BuildMI(FirstMBB, MBBI, TII->get(X86::POP32r), GlobalBaseReg);
945  }
946  return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
947}
948
949static SDNode *FindCallStartFromCall(SDNode *Node) {
950  if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
951    assert(Node->getOperand(0).getValueType() == MVT::Other &&
952         "Node doesn't have a token chain argument!");
953  return FindCallStartFromCall(Node->getOperand(0).Val);
954}
955
956SDNode *X86DAGToDAGISel::Select(SDOperand N) {
957  SDNode *Node = N.Val;
958  MVT::ValueType NVT = Node->getValueType(0);
959  unsigned Opc, MOpc;
960  unsigned Opcode = Node->getOpcode();
961
962#ifndef NDEBUG
963  DOUT << std::string(Indent, ' ') << "Selecting: ";
964  DEBUG(Node->dump(CurDAG));
965  DOUT << "\n";
966  Indent += 2;
967#endif
968
969  if (Opcode >= ISD::BUILTIN_OP_END && Opcode < X86ISD::FIRST_NUMBER) {
970#ifndef NDEBUG
971    DOUT << std::string(Indent-2, ' ') << "== ";
972    DEBUG(Node->dump(CurDAG));
973    DOUT << "\n";
974    Indent -= 2;
975#endif
976    return NULL;   // Already selected.
977  }
978
979  switch (Opcode) {
980    default: break;
981    case X86ISD::GlobalBaseReg:
982      return getGlobalBaseReg();
983
984    case ISD::ADD: {
985      // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
986      // code and is matched first so to prevent it from being turned into
987      // LEA32r X+c.
988      // In 64-bit mode, use LEA to take advantage of RIP-relative addressing.
989      MVT::ValueType PtrVT = TLI.getPointerTy();
990      SDOperand N0 = N.getOperand(0);
991      SDOperand N1 = N.getOperand(1);
992      if (N.Val->getValueType(0) == PtrVT &&
993          N0.getOpcode() == X86ISD::Wrapper &&
994          N1.getOpcode() == ISD::Constant) {
995        unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue();
996        SDOperand C(0, 0);
997        // TODO: handle ExternalSymbolSDNode.
998        if (GlobalAddressSDNode *G =
999            dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
1000          C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
1001                                             G->getOffset() + Offset);
1002        } else if (ConstantPoolSDNode *CP =
1003                   dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
1004          C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
1005                                            CP->getAlignment(),
1006                                            CP->getOffset()+Offset);
1007        }
1008
1009        if (C.Val) {
1010          if (Subtarget->is64Bit()) {
1011            SDOperand Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
1012                                CurDAG->getRegister(0, PtrVT), C };
1013            return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4);
1014          } else
1015            return CurDAG->SelectNodeTo(N.Val, X86::MOV32ri, PtrVT, C);
1016        }
1017      }
1018
1019      // Other cases are handled by auto-generated code.
1020      break;
1021    }
1022
1023    case ISD::MULHU:
1024    case ISD::MULHS: {
1025      if (Opcode == ISD::MULHU)
1026        switch (NVT) {
1027        default: assert(0 && "Unsupported VT!");
1028        case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
1029        case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1030        case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1031        case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1032        }
1033      else
1034        switch (NVT) {
1035        default: assert(0 && "Unsupported VT!");
1036        case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
1037        case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1038        case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1039        case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1040        }
1041
1042      unsigned LoReg, HiReg;
1043      switch (NVT) {
1044      default: assert(0 && "Unsupported VT!");
1045      case MVT::i8:  LoReg = X86::AL;  HiReg = X86::AH;  break;
1046      case MVT::i16: LoReg = X86::AX;  HiReg = X86::DX;  break;
1047      case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1048      case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1049      }
1050
1051      SDOperand N0 = Node->getOperand(0);
1052      SDOperand N1 = Node->getOperand(1);
1053
1054      bool foldedLoad = false;
1055      SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
1056      foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1057      // MULHU and MULHS are commmutative
1058      if (!foldedLoad) {
1059        foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
1060        if (foldedLoad) {
1061          N0 = Node->getOperand(1);
1062          N1 = Node->getOperand(0);
1063        }
1064      }
1065
1066      SDOperand Chain;
1067      if (foldedLoad) {
1068        Chain = N1.getOperand(0);
1069        AddToISelQueue(Chain);
1070      } else
1071        Chain = CurDAG->getEntryNode();
1072
1073      SDOperand InFlag(0, 0);
1074      AddToISelQueue(N0);
1075      Chain  = CurDAG->getCopyToReg(Chain, CurDAG->getRegister(LoReg, NVT),
1076                                    N0, InFlag);
1077      InFlag = Chain.getValue(1);
1078
1079      if (foldedLoad) {
1080        AddToISelQueue(Tmp0);
1081        AddToISelQueue(Tmp1);
1082        AddToISelQueue(Tmp2);
1083        AddToISelQueue(Tmp3);
1084        SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Chain, InFlag };
1085        SDNode *CNode =
1086          CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1087        Chain  = SDOperand(CNode, 0);
1088        InFlag = SDOperand(CNode, 1);
1089      } else {
1090        AddToISelQueue(N1);
1091        InFlag =
1092          SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1093      }
1094
1095      SDOperand Result = CurDAG->getCopyFromReg(Chain, HiReg, NVT, InFlag);
1096      ReplaceUses(N.getValue(0), Result);
1097      if (foldedLoad)
1098        ReplaceUses(N1.getValue(1), Result.getValue(1));
1099
1100#ifndef NDEBUG
1101      DOUT << std::string(Indent-2, ' ') << "=> ";
1102      DEBUG(Result.Val->dump(CurDAG));
1103      DOUT << "\n";
1104      Indent -= 2;
1105#endif
1106      return NULL;
1107    }
1108
1109    case ISD::SDIV:
1110    case ISD::UDIV:
1111    case ISD::SREM:
1112    case ISD::UREM: {
1113      bool isSigned = Opcode == ISD::SDIV || Opcode == ISD::SREM;
1114      bool isDiv    = Opcode == ISD::SDIV || Opcode == ISD::UDIV;
1115      if (!isSigned)
1116        switch (NVT) {
1117        default: assert(0 && "Unsupported VT!");
1118        case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
1119        case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1120        case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1121        case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1122        }
1123      else
1124        switch (NVT) {
1125        default: assert(0 && "Unsupported VT!");
1126        case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
1127        case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1128        case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1129        case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1130        }
1131
1132      unsigned LoReg, HiReg;
1133      unsigned ClrOpcode, SExtOpcode;
1134      switch (NVT) {
1135      default: assert(0 && "Unsupported VT!");
1136      case MVT::i8:
1137        LoReg = X86::AL;  HiReg = X86::AH;
1138        ClrOpcode  = 0;
1139        SExtOpcode = X86::CBW;
1140        break;
1141      case MVT::i16:
1142        LoReg = X86::AX;  HiReg = X86::DX;
1143        ClrOpcode  = X86::MOV16r0;
1144        SExtOpcode = X86::CWD;
1145        break;
1146      case MVT::i32:
1147        LoReg = X86::EAX; HiReg = X86::EDX;
1148        ClrOpcode  = X86::MOV32r0;
1149        SExtOpcode = X86::CDQ;
1150        break;
1151      case MVT::i64:
1152        LoReg = X86::RAX; HiReg = X86::RDX;
1153        ClrOpcode  = X86::MOV64r0;
1154        SExtOpcode = X86::CQO;
1155        break;
1156      }
1157
1158      SDOperand N0 = Node->getOperand(0);
1159      SDOperand N1 = Node->getOperand(1);
1160      SDOperand InFlag(0, 0);
1161      if (NVT == MVT::i8 && !isSigned) {
1162        // Special case for div8, just use a move with zero extension to AX to
1163        // clear the upper 8 bits (AH).
1164        SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1165        if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1166          SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1167          AddToISelQueue(N0.getOperand(0));
1168          AddToISelQueue(Tmp0);
1169          AddToISelQueue(Tmp1);
1170          AddToISelQueue(Tmp2);
1171          AddToISelQueue(Tmp3);
1172          Move =
1173            SDOperand(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1174                                            Ops, 5), 0);
1175          Chain = Move.getValue(1);
1176          ReplaceUses(N0.getValue(1), Chain);
1177        } else {
1178          AddToISelQueue(N0);
1179          Move =
1180            SDOperand(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1181          Chain = CurDAG->getEntryNode();
1182        }
1183        Chain  = CurDAG->getCopyToReg(Chain, X86::AX, Move, InFlag);
1184        InFlag = Chain.getValue(1);
1185      } else {
1186        AddToISelQueue(N0);
1187        InFlag =
1188          CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg, N0,
1189                               InFlag).getValue(1);
1190        if (isSigned) {
1191          // Sign extend the low part into the high part.
1192          InFlag =
1193            SDOperand(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1194        } else {
1195          // Zero out the high part, effectively zero extending the input.
1196          SDOperand ClrNode = SDOperand(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
1197          InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg, ClrNode,
1198                                        InFlag).getValue(1);
1199        }
1200      }
1201
1202      SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Chain;
1203      bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1204      if (foldedLoad) {
1205        AddToISelQueue(N1.getOperand(0));
1206        AddToISelQueue(Tmp0);
1207        AddToISelQueue(Tmp1);
1208        AddToISelQueue(Tmp2);
1209        AddToISelQueue(Tmp3);
1210        SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1211        SDNode *CNode =
1212          CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1213        Chain  = SDOperand(CNode, 0);
1214        InFlag = SDOperand(CNode, 1);
1215      } else {
1216        AddToISelQueue(N1);
1217        Chain = CurDAG->getEntryNode();
1218        InFlag =
1219          SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1220      }
1221
1222      SDOperand Result =
1223        CurDAG->getCopyFromReg(Chain, isDiv ? LoReg : HiReg, NVT, InFlag);
1224      ReplaceUses(N.getValue(0), Result);
1225      if (foldedLoad)
1226        ReplaceUses(N1.getValue(1), Result.getValue(1));
1227
1228#ifndef NDEBUG
1229      DOUT << std::string(Indent-2, ' ') << "=> ";
1230      DEBUG(Result.Val->dump(CurDAG));
1231      DOUT << "\n";
1232      Indent -= 2;
1233#endif
1234
1235      return NULL;
1236    }
1237
1238    case ISD::TRUNCATE: {
1239      if (!Subtarget->is64Bit() && NVT == MVT::i8) {
1240        unsigned Opc2;
1241        MVT::ValueType VT;
1242        switch (Node->getOperand(0).getValueType()) {
1243        default: assert(0 && "Unknown truncate!");
1244        case MVT::i16:
1245          Opc = X86::MOV16to16_;
1246          VT = MVT::i16;
1247          Opc2 = X86::TRUNC_16_to8;
1248          break;
1249        case MVT::i32:
1250          Opc = X86::MOV32to32_;
1251          VT = MVT::i32;
1252          Opc2 = X86::TRUNC_32_to8;
1253          break;
1254        }
1255
1256        AddToISelQueue(Node->getOperand(0));
1257        SDOperand Tmp =
1258          SDOperand(CurDAG->getTargetNode(Opc, VT, Node->getOperand(0)), 0);
1259        SDNode *ResNode = CurDAG->getTargetNode(Opc2, NVT, Tmp);
1260
1261#ifndef NDEBUG
1262        DOUT << std::string(Indent-2, ' ') << "=> ";
1263        DEBUG(ResNode->dump(CurDAG));
1264        DOUT << "\n";
1265        Indent -= 2;
1266#endif
1267        return ResNode;
1268      }
1269
1270      break;
1271    }
1272  }
1273
1274  SDNode *ResNode = SelectCode(N);
1275
1276#ifndef NDEBUG
1277  DOUT << std::string(Indent-2, ' ') << "=> ";
1278  if (ResNode == NULL || ResNode == N.Val)
1279    DEBUG(N.Val->dump(CurDAG));
1280  else
1281    DEBUG(ResNode->dump(CurDAG));
1282  DOUT << "\n";
1283  Indent -= 2;
1284#endif
1285
1286  return ResNode;
1287}
1288
1289bool X86DAGToDAGISel::
1290SelectInlineAsmMemoryOperand(const SDOperand &Op, char ConstraintCode,
1291                             std::vector<SDOperand> &OutOps, SelectionDAG &DAG){
1292  SDOperand Op0, Op1, Op2, Op3;
1293  switch (ConstraintCode) {
1294  case 'o':   // offsetable        ??
1295  case 'v':   // not offsetable    ??
1296  default: return true;
1297  case 'm':   // memory
1298    if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1299      return true;
1300    break;
1301  }
1302
1303  OutOps.push_back(Op0);
1304  OutOps.push_back(Op1);
1305  OutOps.push_back(Op2);
1306  OutOps.push_back(Op3);
1307  AddToISelQueue(Op0);
1308  AddToISelQueue(Op1);
1309  AddToISelQueue(Op2);
1310  AddToISelQueue(Op3);
1311  return false;
1312}
1313
1314/// createX86ISelDag - This pass converts a legalized DAG into a
1315/// X86-specific DAG, ready for instruction scheduling.
1316///
1317FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1318  return new X86DAGToDAGISel(TM, Fast);
1319}
1320