SelectionDAGISel.cpp revision 9b6fb5de49f30d03b3e3f2fcb99e777b3149b783
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAGISel class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "llvm/CodeGen/SelectionDAGISel.h"
16#include "llvm/CodeGen/ScheduleDAG.h"
17#include "llvm/CallingConv.h"
18#include "llvm/Constants.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Function.h"
21#include "llvm/GlobalVariable.h"
22#include "llvm/InlineAsm.h"
23#include "llvm/Instructions.h"
24#include "llvm/Intrinsics.h"
25#include "llvm/CodeGen/IntrinsicLowering.h"
26#include "llvm/CodeGen/MachineDebugInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/SelectionDAG.h"
31#include "llvm/CodeGen/SSARegMap.h"
32#include "llvm/Target/MRegisterInfo.h"
33#include "llvm/Target/TargetData.h"
34#include "llvm/Target/TargetFrameInfo.h"
35#include "llvm/Target/TargetInstrInfo.h"
36#include "llvm/Target/TargetLowering.h"
37#include "llvm/Target/TargetMachine.h"
38#include "llvm/Transforms/Utils/BasicBlockUtils.h"
39#include "llvm/Support/CommandLine.h"
40#include "llvm/Support/MathExtras.h"
41#include "llvm/Support/Debug.h"
42#include <map>
43#include <set>
44#include <iostream>
45using namespace llvm;
46
47#ifndef NDEBUG
48static cl::opt<bool>
49ViewISelDAGs("view-isel-dags", cl::Hidden,
50          cl::desc("Pop up a window to show isel dags as they are selected"));
51static cl::opt<bool>
52ViewSchedDAGs("view-sched-dags", cl::Hidden,
53          cl::desc("Pop up a window to show sched dags as they are processed"));
54#else
55static const bool ViewISelDAGs = 0;
56static const bool ViewSchedDAGs = 0;
57#endif
58
59namespace {
60  cl::opt<SchedHeuristics>
61  ISHeuristic(
62    "sched",
63    cl::desc("Choose scheduling style"),
64    cl::init(defaultScheduling),
65    cl::values(
66      clEnumValN(defaultScheduling, "default",
67                 "Target preferred scheduling style"),
68      clEnumValN(noScheduling, "none",
69                 "No scheduling: breadth first sequencing"),
70      clEnumValN(simpleScheduling, "simple",
71                 "Simple two pass scheduling: minimize critical path "
72                 "and maximize processor utilization"),
73      clEnumValN(simpleNoItinScheduling, "simple-noitin",
74                 "Simple two pass scheduling: Same as simple "
75                 "except using generic latency"),
76      clEnumValN(listSchedulingBURR, "list-burr",
77                 "Bottom up register reduction list scheduling"),
78      clEnumValEnd));
79} // namespace
80
81namespace {
82  /// RegsForValue - This struct represents the physical registers that a
83  /// particular value is assigned and the type information about the value.
84  /// This is needed because values can be promoted into larger registers and
85  /// expanded into multiple smaller registers than the value.
86  struct RegsForValue {
87    /// Regs - This list hold the register (for legal and promoted values)
88    /// or register set (for expanded values) that the value should be assigned
89    /// to.
90    std::vector<unsigned> Regs;
91
92    /// RegVT - The value type of each register.
93    ///
94    MVT::ValueType RegVT;
95
96    /// ValueVT - The value type of the LLVM value, which may be promoted from
97    /// RegVT or made from merging the two expanded parts.
98    MVT::ValueType ValueVT;
99
100    RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
101
102    RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
103      : RegVT(regvt), ValueVT(valuevt) {
104        Regs.push_back(Reg);
105    }
106    RegsForValue(const std::vector<unsigned> &regs,
107                 MVT::ValueType regvt, MVT::ValueType valuevt)
108      : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
109    }
110
111    /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
112    /// this value and returns the result as a ValueVT value.  This uses
113    /// Chain/Flag as the input and updates them for the output Chain/Flag.
114    SDOperand getCopyFromRegs(SelectionDAG &DAG,
115                              SDOperand &Chain, SDOperand &Flag);
116  };
117}
118
119namespace llvm {
120  //===--------------------------------------------------------------------===//
121  /// FunctionLoweringInfo - This contains information that is global to a
122  /// function that is used when lowering a region of the function.
123  class FunctionLoweringInfo {
124  public:
125    TargetLowering &TLI;
126    Function &Fn;
127    MachineFunction &MF;
128    SSARegMap *RegMap;
129
130    FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
131
132    /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
133    std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
134
135    /// ValueMap - Since we emit code for the function a basic block at a time,
136    /// we must remember which virtual registers hold the values for
137    /// cross-basic-block values.
138    std::map<const Value*, unsigned> ValueMap;
139
140    /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
141    /// the entry block.  This allows the allocas to be efficiently referenced
142    /// anywhere in the function.
143    std::map<const AllocaInst*, int> StaticAllocaMap;
144
145    unsigned MakeReg(MVT::ValueType VT) {
146      return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
147    }
148
149    unsigned CreateRegForValue(const Value *V) {
150      MVT::ValueType VT = TLI.getValueType(V->getType());
151      // The common case is that we will only create one register for this
152      // value.  If we have that case, create and return the virtual register.
153      unsigned NV = TLI.getNumElements(VT);
154      if (NV == 1) {
155        // If we are promoting this value, pick the next largest supported type.
156        return MakeReg(TLI.getTypeToTransformTo(VT));
157      }
158
159      // If this value is represented with multiple target registers, make sure
160      // to create enough consecutive registers of the right (smaller) type.
161      unsigned NT = VT-1;  // Find the type to use.
162      while (TLI.getNumElements((MVT::ValueType)NT) != 1)
163        --NT;
164
165      unsigned R = MakeReg((MVT::ValueType)NT);
166      for (unsigned i = 1; i != NV; ++i)
167        MakeReg((MVT::ValueType)NT);
168      return R;
169    }
170
171    unsigned InitializeRegForValue(const Value *V) {
172      unsigned &R = ValueMap[V];
173      assert(R == 0 && "Already initialized this value register!");
174      return R = CreateRegForValue(V);
175    }
176  };
177}
178
179/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
180/// PHI nodes or outside of the basic block that defines it.
181static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
182  if (isa<PHINode>(I)) return true;
183  BasicBlock *BB = I->getParent();
184  for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
185    if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
186      return true;
187  return false;
188}
189
190/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
191/// entry block, return true.
192static bool isOnlyUsedInEntryBlock(Argument *A) {
193  BasicBlock *Entry = A->getParent()->begin();
194  for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
195    if (cast<Instruction>(*UI)->getParent() != Entry)
196      return false;  // Use not in entry block.
197  return true;
198}
199
200FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
201                                           Function &fn, MachineFunction &mf)
202    : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
203
204  // Create a vreg for each argument register that is not dead and is used
205  // outside of the entry block for the function.
206  for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
207       AI != E; ++AI)
208    if (!isOnlyUsedInEntryBlock(AI))
209      InitializeRegForValue(AI);
210
211  // Initialize the mapping of values to registers.  This is only set up for
212  // instruction values that are used outside of the block that defines
213  // them.
214  Function::iterator BB = Fn.begin(), EB = Fn.end();
215  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
216    if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
217      if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
218        const Type *Ty = AI->getAllocatedType();
219        uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
220        unsigned Align =
221          std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
222                   AI->getAlignment());
223
224        // If the alignment of the value is smaller than the size of the value,
225        // and if the size of the value is particularly small (<= 8 bytes),
226        // round up to the size of the value for potentially better performance.
227        //
228        // FIXME: This could be made better with a preferred alignment hook in
229        // TargetData.  It serves primarily to 8-byte align doubles for X86.
230        if (Align < TySize && TySize <= 8) Align = TySize;
231        TySize *= CUI->getValue();   // Get total allocated size.
232        if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
233        StaticAllocaMap[AI] =
234          MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
235      }
236
237  for (; BB != EB; ++BB)
238    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
239      if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
240        if (!isa<AllocaInst>(I) ||
241            !StaticAllocaMap.count(cast<AllocaInst>(I)))
242          InitializeRegForValue(I);
243
244  // Create an initial MachineBasicBlock for each LLVM BasicBlock in F.  This
245  // also creates the initial PHI MachineInstrs, though none of the input
246  // operands are populated.
247  for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
248    MachineBasicBlock *MBB = new MachineBasicBlock(BB);
249    MBBMap[BB] = MBB;
250    MF.getBasicBlockList().push_back(MBB);
251
252    // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
253    // appropriate.
254    PHINode *PN;
255    for (BasicBlock::iterator I = BB->begin();
256         (PN = dyn_cast<PHINode>(I)); ++I)
257      if (!PN->use_empty()) {
258        unsigned NumElements =
259          TLI.getNumElements(TLI.getValueType(PN->getType()));
260        unsigned PHIReg = ValueMap[PN];
261        assert(PHIReg &&"PHI node does not have an assigned virtual register!");
262        for (unsigned i = 0; i != NumElements; ++i)
263          BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
264      }
265  }
266}
267
268
269
270//===----------------------------------------------------------------------===//
271/// SelectionDAGLowering - This is the common target-independent lowering
272/// implementation that is parameterized by a TargetLowering object.
273/// Also, targets can overload any lowering method.
274///
275namespace llvm {
276class SelectionDAGLowering {
277  MachineBasicBlock *CurMBB;
278
279  std::map<const Value*, SDOperand> NodeMap;
280
281  /// PendingLoads - Loads are not emitted to the program immediately.  We bunch
282  /// them up and then emit token factor nodes when possible.  This allows us to
283  /// get simple disambiguation between loads without worrying about alias
284  /// analysis.
285  std::vector<SDOperand> PendingLoads;
286
287public:
288  // TLI - This is information that describes the available target features we
289  // need for lowering.  This indicates when operations are unavailable,
290  // implemented with a libcall, etc.
291  TargetLowering &TLI;
292  SelectionDAG &DAG;
293  const TargetData &TD;
294
295  /// FuncInfo - Information about the function as a whole.
296  ///
297  FunctionLoweringInfo &FuncInfo;
298
299  SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
300                       FunctionLoweringInfo &funcinfo)
301    : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
302      FuncInfo(funcinfo) {
303  }
304
305  /// getRoot - Return the current virtual root of the Selection DAG.
306  ///
307  SDOperand getRoot() {
308    if (PendingLoads.empty())
309      return DAG.getRoot();
310
311    if (PendingLoads.size() == 1) {
312      SDOperand Root = PendingLoads[0];
313      DAG.setRoot(Root);
314      PendingLoads.clear();
315      return Root;
316    }
317
318    // Otherwise, we have to make a token factor node.
319    SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
320    PendingLoads.clear();
321    DAG.setRoot(Root);
322    return Root;
323  }
324
325  void visit(Instruction &I) { visit(I.getOpcode(), I); }
326
327  void visit(unsigned Opcode, User &I) {
328    switch (Opcode) {
329    default: assert(0 && "Unknown instruction type encountered!");
330             abort();
331      // Build the switch statement using the Instruction.def file.
332#define HANDLE_INST(NUM, OPCODE, CLASS) \
333    case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
334#include "llvm/Instruction.def"
335    }
336  }
337
338  void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
339
340
341  SDOperand getIntPtrConstant(uint64_t Val) {
342    return DAG.getConstant(Val, TLI.getPointerTy());
343  }
344
345  SDOperand getValue(const Value *V) {
346    SDOperand &N = NodeMap[V];
347    if (N.Val) return N;
348
349    const Type *VTy = V->getType();
350    MVT::ValueType VT = TLI.getValueType(VTy);
351    if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
352      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
353        visit(CE->getOpcode(), *CE);
354        assert(N.Val && "visit didn't populate the ValueMap!");
355        return N;
356      } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
357        return N = DAG.getGlobalAddress(GV, VT);
358      } else if (isa<ConstantPointerNull>(C)) {
359        return N = DAG.getConstant(0, TLI.getPointerTy());
360      } else if (isa<UndefValue>(C)) {
361        return N = DAG.getNode(ISD::UNDEF, VT);
362      } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
363        return N = DAG.getConstantFP(CFP->getValue(), VT);
364      } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
365        unsigned NumElements = PTy->getNumElements();
366        MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
367        MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
368
369        // Now that we know the number and type of the elements, push a
370        // Constant or ConstantFP node onto the ops list for each element of
371        // the packed constant.
372        std::vector<SDOperand> Ops;
373        if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
374          if (MVT::isFloatingPoint(PVT)) {
375            for (unsigned i = 0; i != NumElements; ++i) {
376              const ConstantFP *El = cast<ConstantFP>(CP->getOperand(i));
377              Ops.push_back(DAG.getConstantFP(El->getValue(), PVT));
378            }
379          } else {
380            for (unsigned i = 0; i != NumElements; ++i) {
381              const ConstantIntegral *El =
382                cast<ConstantIntegral>(CP->getOperand(i));
383              Ops.push_back(DAG.getConstant(El->getRawValue(), PVT));
384            }
385          }
386        } else {
387          assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
388          SDOperand Op;
389          if (MVT::isFloatingPoint(PVT))
390            Op = DAG.getConstantFP(0, PVT);
391          else
392            Op = DAG.getConstant(0, PVT);
393          Ops.assign(NumElements, Op);
394        }
395
396        // Handle the case where we have a 1-element vector, in which
397        // case we want to immediately turn it into a scalar constant.
398        if (Ops.size() == 1) {
399          return N = Ops[0];
400        } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
401          return N = DAG.getNode(ISD::ConstantVec, TVT, Ops);
402        } else {
403          // If the packed type isn't legal, then create a ConstantVec node with
404          // generic Vector type instead.
405          return N = DAG.getNode(ISD::ConstantVec, MVT::Vector, Ops);
406        }
407      } else {
408        // Canonicalize all constant ints to be unsigned.
409        return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
410      }
411
412    if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
413      std::map<const AllocaInst*, int>::iterator SI =
414        FuncInfo.StaticAllocaMap.find(AI);
415      if (SI != FuncInfo.StaticAllocaMap.end())
416        return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
417    }
418
419    std::map<const Value*, unsigned>::const_iterator VMI =
420      FuncInfo.ValueMap.find(V);
421    assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
422
423    unsigned InReg = VMI->second;
424
425    // If this type is not legal, make it so now.
426    MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
427
428    N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
429    if (DestVT < VT) {
430      // Source must be expanded.  This input value is actually coming from the
431      // register pair VMI->second and VMI->second+1.
432      N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
433                      DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
434    } else {
435      if (DestVT > VT) { // Promotion case
436        if (MVT::isFloatingPoint(VT))
437          N = DAG.getNode(ISD::FP_ROUND, VT, N);
438        else
439          N = DAG.getNode(ISD::TRUNCATE, VT, N);
440      }
441    }
442
443    return N;
444  }
445
446  const SDOperand &setValue(const Value *V, SDOperand NewN) {
447    SDOperand &N = NodeMap[V];
448    assert(N.Val == 0 && "Already set a value for this node!");
449    return N = NewN;
450  }
451
452  RegsForValue GetRegistersForValue(const std::string &ConstrCode,
453                                    MVT::ValueType VT,
454                                    bool OutReg, bool InReg,
455                                    std::set<unsigned> &OutputRegs,
456                                    std::set<unsigned> &InputRegs);
457
458  // Terminator instructions.
459  void visitRet(ReturnInst &I);
460  void visitBr(BranchInst &I);
461  void visitUnreachable(UnreachableInst &I) { /* noop */ }
462
463  // These all get lowered before this pass.
464  void visitExtractElement(ExtractElementInst &I) { assert(0 && "TODO"); }
465  void visitInsertElement(InsertElementInst &I) { assert(0 && "TODO"); }
466  void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
467  void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
468  void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
469
470  //
471  void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
472  void visitShift(User &I, unsigned Opcode);
473  void visitAdd(User &I) {
474    visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
475  }
476  void visitSub(User &I);
477  void visitMul(User &I) {
478    visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
479  }
480  void visitDiv(User &I) {
481    const Type *Ty = I.getType();
482    visitBinary(I, Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 0);
483  }
484  void visitRem(User &I) {
485    const Type *Ty = I.getType();
486    visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
487  }
488  void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, 0); }
489  void visitOr (User &I) { visitBinary(I, ISD::OR,  0, 0); }
490  void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, 0); }
491  void visitShl(User &I) { visitShift(I, ISD::SHL); }
492  void visitShr(User &I) {
493    visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
494  }
495
496  void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
497  void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
498  void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
499  void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
500  void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
501  void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
502  void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
503
504  void visitGetElementPtr(User &I);
505  void visitCast(User &I);
506  void visitSelect(User &I);
507  //
508
509  void visitMalloc(MallocInst &I);
510  void visitFree(FreeInst &I);
511  void visitAlloca(AllocaInst &I);
512  void visitLoad(LoadInst &I);
513  void visitStore(StoreInst &I);
514  void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
515  void visitCall(CallInst &I);
516  void visitInlineAsm(CallInst &I);
517  const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
518
519  void visitVAStart(CallInst &I);
520  void visitVAArg(VAArgInst &I);
521  void visitVAEnd(CallInst &I);
522  void visitVACopy(CallInst &I);
523  void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
524
525  void visitMemIntrinsic(CallInst &I, unsigned Op);
526
527  void visitUserOp1(Instruction &I) {
528    assert(0 && "UserOp1 should not exist at instruction selection time!");
529    abort();
530  }
531  void visitUserOp2(Instruction &I) {
532    assert(0 && "UserOp2 should not exist at instruction selection time!");
533    abort();
534  }
535};
536} // end namespace llvm
537
538void SelectionDAGLowering::visitRet(ReturnInst &I) {
539  if (I.getNumOperands() == 0) {
540    DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
541    return;
542  }
543  std::vector<SDOperand> NewValues;
544  NewValues.push_back(getRoot());
545  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
546    SDOperand RetOp = getValue(I.getOperand(i));
547
548    // If this is an integer return value, we need to promote it ourselves to
549    // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
550    // than sign/zero.
551    if (MVT::isInteger(RetOp.getValueType()) &&
552        RetOp.getValueType() < MVT::i64) {
553      MVT::ValueType TmpVT;
554      if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
555        TmpVT = TLI.getTypeToTransformTo(MVT::i32);
556      else
557        TmpVT = MVT::i32;
558
559      if (I.getOperand(i)->getType()->isSigned())
560        RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
561      else
562        RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
563    }
564    NewValues.push_back(RetOp);
565  }
566  DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues));
567}
568
569void SelectionDAGLowering::visitBr(BranchInst &I) {
570  // Update machine-CFG edges.
571  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
572
573  // Figure out which block is immediately after the current one.
574  MachineBasicBlock *NextBlock = 0;
575  MachineFunction::iterator BBI = CurMBB;
576  if (++BBI != CurMBB->getParent()->end())
577    NextBlock = BBI;
578
579  if (I.isUnconditional()) {
580    // If this is not a fall-through branch, emit the branch.
581    if (Succ0MBB != NextBlock)
582      DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
583                              DAG.getBasicBlock(Succ0MBB)));
584  } else {
585    MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
586
587    SDOperand Cond = getValue(I.getCondition());
588    if (Succ1MBB == NextBlock) {
589      // If the condition is false, fall through.  This means we should branch
590      // if the condition is true to Succ #0.
591      DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
592                              Cond, DAG.getBasicBlock(Succ0MBB)));
593    } else if (Succ0MBB == NextBlock) {
594      // If the condition is true, fall through.  This means we should branch if
595      // the condition is false to Succ #1.  Invert the condition first.
596      SDOperand True = DAG.getConstant(1, Cond.getValueType());
597      Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
598      DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
599                              Cond, DAG.getBasicBlock(Succ1MBB)));
600    } else {
601      std::vector<SDOperand> Ops;
602      Ops.push_back(getRoot());
603      // If the false case is the current basic block, then this is a self
604      // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it
605      // adds an extra instruction in the loop.  Instead, invert the
606      // condition and emit "Loop: ... br!cond Loop; br Out.
607      if (CurMBB == Succ1MBB) {
608        std::swap(Succ0MBB, Succ1MBB);
609        SDOperand True = DAG.getConstant(1, Cond.getValueType());
610        Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
611      }
612      Ops.push_back(Cond);
613      Ops.push_back(DAG.getBasicBlock(Succ0MBB));
614      Ops.push_back(DAG.getBasicBlock(Succ1MBB));
615      DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
616    }
617  }
618}
619
620void SelectionDAGLowering::visitSub(User &I) {
621  // -0.0 - X --> fneg
622  if (I.getType()->isFloatingPoint()) {
623    if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
624      if (CFP->isExactlyValue(-0.0)) {
625        SDOperand Op2 = getValue(I.getOperand(1));
626        setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
627        return;
628      }
629  }
630  visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
631}
632
633void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
634                                       unsigned VecOp) {
635  const Type *Ty = I.getType();
636  SDOperand Op1 = getValue(I.getOperand(0));
637  SDOperand Op2 = getValue(I.getOperand(1));
638
639  if (Ty->isIntegral()) {
640    setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
641  } else if (Ty->isFloatingPoint()) {
642    setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
643  } else {
644    const PackedType *PTy = cast<PackedType>(Ty);
645    unsigned NumElements = PTy->getNumElements();
646    MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
647    MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
648
649    // Immediately scalarize packed types containing only one element, so that
650    // the Legalize pass does not have to deal with them.  Similarly, if the
651    // abstract vector is going to turn into one that the target natively
652    // supports, generate that type now so that Legalize doesn't have to deal
653    // with that either.  These steps ensure that Legalize only has to handle
654    // vector types in its Expand case.
655    unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
656    if (NumElements == 1) {
657      setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
658    } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
659      setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
660    } else {
661      SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
662      SDOperand Typ = DAG.getValueType(PVT);
663      setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
664    }
665  }
666}
667
668void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
669  SDOperand Op1 = getValue(I.getOperand(0));
670  SDOperand Op2 = getValue(I.getOperand(1));
671
672  Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
673
674  setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
675}
676
677void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
678                                      ISD::CondCode UnsignedOpcode) {
679  SDOperand Op1 = getValue(I.getOperand(0));
680  SDOperand Op2 = getValue(I.getOperand(1));
681  ISD::CondCode Opcode = SignedOpcode;
682  if (I.getOperand(0)->getType()->isUnsigned())
683    Opcode = UnsignedOpcode;
684  setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
685}
686
687void SelectionDAGLowering::visitSelect(User &I) {
688  SDOperand Cond     = getValue(I.getOperand(0));
689  SDOperand TrueVal  = getValue(I.getOperand(1));
690  SDOperand FalseVal = getValue(I.getOperand(2));
691  setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
692                           TrueVal, FalseVal));
693}
694
695void SelectionDAGLowering::visitCast(User &I) {
696  SDOperand N = getValue(I.getOperand(0));
697  MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
698  MVT::ValueType DestTy = TLI.getValueType(I.getType());
699
700  if (N.getValueType() == DestTy) {
701    setValue(&I, N);  // noop cast.
702  } else if (DestTy == MVT::i1) {
703    // Cast to bool is a comparison against zero, not truncation to zero.
704    SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
705                                       DAG.getConstantFP(0.0, N.getValueType());
706    setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
707  } else if (isInteger(SrcTy)) {
708    if (isInteger(DestTy)) {        // Int -> Int cast
709      if (DestTy < SrcTy)   // Truncating cast?
710        setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
711      else if (I.getOperand(0)->getType()->isSigned())
712        setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
713      else
714        setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
715    } else {                        // Int -> FP cast
716      if (I.getOperand(0)->getType()->isSigned())
717        setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
718      else
719        setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
720    }
721  } else {
722    assert(isFloatingPoint(SrcTy) && "Unknown value type!");
723    if (isFloatingPoint(DestTy)) {  // FP -> FP cast
724      if (DestTy < SrcTy)   // Rounding cast?
725        setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
726      else
727        setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
728    } else {                        // FP -> Int cast.
729      if (I.getType()->isSigned())
730        setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
731      else
732        setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
733    }
734  }
735}
736
737void SelectionDAGLowering::visitGetElementPtr(User &I) {
738  SDOperand N = getValue(I.getOperand(0));
739  const Type *Ty = I.getOperand(0)->getType();
740  const Type *UIntPtrTy = TD.getIntPtrType();
741
742  for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
743       OI != E; ++OI) {
744    Value *Idx = *OI;
745    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
746      unsigned Field = cast<ConstantUInt>(Idx)->getValue();
747      if (Field) {
748        // N = N + Offset
749        uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
750        N = DAG.getNode(ISD::ADD, N.getValueType(), N,
751                        getIntPtrConstant(Offset));
752      }
753      Ty = StTy->getElementType(Field);
754    } else {
755      Ty = cast<SequentialType>(Ty)->getElementType();
756
757      // If this is a constant subscript, handle it quickly.
758      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
759        if (CI->getRawValue() == 0) continue;
760
761        uint64_t Offs;
762        if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
763          Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
764        else
765          Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
766        N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
767        continue;
768      }
769
770      // N = N + Idx * ElementSize;
771      uint64_t ElementSize = TD.getTypeSize(Ty);
772      SDOperand IdxN = getValue(Idx);
773
774      // If the index is smaller or larger than intptr_t, truncate or extend
775      // it.
776      if (IdxN.getValueType() < N.getValueType()) {
777        if (Idx->getType()->isSigned())
778          IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
779        else
780          IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
781      } else if (IdxN.getValueType() > N.getValueType())
782        IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
783
784      // If this is a multiply by a power of two, turn it into a shl
785      // immediately.  This is a very common case.
786      if (isPowerOf2_64(ElementSize)) {
787        unsigned Amt = Log2_64(ElementSize);
788        IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
789                           DAG.getConstant(Amt, TLI.getShiftAmountTy()));
790        N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
791        continue;
792      }
793
794      SDOperand Scale = getIntPtrConstant(ElementSize);
795      IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
796      N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
797    }
798  }
799  setValue(&I, N);
800}
801
802void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
803  // If this is a fixed sized alloca in the entry block of the function,
804  // allocate it statically on the stack.
805  if (FuncInfo.StaticAllocaMap.count(&I))
806    return;   // getValue will auto-populate this.
807
808  const Type *Ty = I.getAllocatedType();
809  uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
810  unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
811                            I.getAlignment());
812
813  SDOperand AllocSize = getValue(I.getArraySize());
814  MVT::ValueType IntPtr = TLI.getPointerTy();
815  if (IntPtr < AllocSize.getValueType())
816    AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
817  else if (IntPtr > AllocSize.getValueType())
818    AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
819
820  AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
821                          getIntPtrConstant(TySize));
822
823  // Handle alignment.  If the requested alignment is less than or equal to the
824  // stack alignment, ignore it and round the size of the allocation up to the
825  // stack alignment size.  If the size is greater than the stack alignment, we
826  // note this in the DYNAMIC_STACKALLOC node.
827  unsigned StackAlign =
828    TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
829  if (Align <= StackAlign) {
830    Align = 0;
831    // Add SA-1 to the size.
832    AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
833                            getIntPtrConstant(StackAlign-1));
834    // Mask out the low bits for alignment purposes.
835    AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
836                            getIntPtrConstant(~(uint64_t)(StackAlign-1)));
837  }
838
839  std::vector<MVT::ValueType> VTs;
840  VTs.push_back(AllocSize.getValueType());
841  VTs.push_back(MVT::Other);
842  std::vector<SDOperand> Ops;
843  Ops.push_back(getRoot());
844  Ops.push_back(AllocSize);
845  Ops.push_back(getIntPtrConstant(Align));
846  SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
847  DAG.setRoot(setValue(&I, DSA).getValue(1));
848
849  // Inform the Frame Information that we have just allocated a variable-sized
850  // object.
851  CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
852}
853
854/// getStringValue - Turn an LLVM constant pointer that eventually points to a
855/// global into a string value.  Return an empty string if we can't do it.
856///
857static std::string getStringValue(GlobalVariable *GV, unsigned Offset = 0) {
858  if (GV->hasInitializer() && isa<ConstantArray>(GV->getInitializer())) {
859    ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
860    if (Init->isString()) {
861      std::string Result = Init->getAsString();
862      if (Offset < Result.size()) {
863        // If we are pointing INTO The string, erase the beginning...
864        Result.erase(Result.begin(), Result.begin()+Offset);
865        return Result;
866      }
867    }
868  }
869  return "";
870}
871
872void SelectionDAGLowering::visitLoad(LoadInst &I) {
873  SDOperand Ptr = getValue(I.getOperand(0));
874
875  SDOperand Root;
876  if (I.isVolatile())
877    Root = getRoot();
878  else {
879    // Do not serialize non-volatile loads against each other.
880    Root = DAG.getRoot();
881  }
882
883  const Type *Ty = I.getType();
884  SDOperand L;
885
886  if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
887    unsigned NumElements = PTy->getNumElements();
888    MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
889    MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
890
891    // Immediately scalarize packed types containing only one element, so that
892    // the Legalize pass does not have to deal with them.
893    if (NumElements == 1) {
894      L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
895    } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
896      L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
897    } else {
898      L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
899                         DAG.getSrcValue(I.getOperand(0)));
900    }
901  } else {
902    L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr,
903                    DAG.getSrcValue(I.getOperand(0)));
904  }
905  setValue(&I, L);
906
907  if (I.isVolatile())
908    DAG.setRoot(L.getValue(1));
909  else
910    PendingLoads.push_back(L.getValue(1));
911}
912
913
914void SelectionDAGLowering::visitStore(StoreInst &I) {
915  Value *SrcV = I.getOperand(0);
916  SDOperand Src = getValue(SrcV);
917  SDOperand Ptr = getValue(I.getOperand(1));
918  DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
919                          DAG.getSrcValue(I.getOperand(1))));
920}
921
922/// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
923/// we want to emit this as a call to a named external function, return the name
924/// otherwise lower it and return null.
925const char *
926SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
927  switch (Intrinsic) {
928  case Intrinsic::vastart:  visitVAStart(I); return 0;
929  case Intrinsic::vaend:    visitVAEnd(I); return 0;
930  case Intrinsic::vacopy:   visitVACopy(I); return 0;
931  case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
932  case Intrinsic::frameaddress:  visitFrameReturnAddress(I, true); return 0;
933  case Intrinsic::setjmp:
934    return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
935    break;
936  case Intrinsic::longjmp:
937    return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
938    break;
939  case Intrinsic::memcpy:  visitMemIntrinsic(I, ISD::MEMCPY); return 0;
940  case Intrinsic::memset:  visitMemIntrinsic(I, ISD::MEMSET); return 0;
941  case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return 0;
942
943  case Intrinsic::readport:
944  case Intrinsic::readio: {
945    std::vector<MVT::ValueType> VTs;
946    VTs.push_back(TLI.getValueType(I.getType()));
947    VTs.push_back(MVT::Other);
948    std::vector<SDOperand> Ops;
949    Ops.push_back(getRoot());
950    Ops.push_back(getValue(I.getOperand(1)));
951    SDOperand Tmp = DAG.getNode(Intrinsic == Intrinsic::readport ?
952                                ISD::READPORT : ISD::READIO, VTs, Ops);
953
954    setValue(&I, Tmp);
955    DAG.setRoot(Tmp.getValue(1));
956    return 0;
957  }
958  case Intrinsic::writeport:
959  case Intrinsic::writeio:
960    DAG.setRoot(DAG.getNode(Intrinsic == Intrinsic::writeport ?
961                            ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
962                            getRoot(), getValue(I.getOperand(1)),
963                            getValue(I.getOperand(2))));
964    return 0;
965
966  case Intrinsic::dbg_stoppoint: {
967    if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
968      return "llvm_debugger_stop";
969
970    MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
971    if (DebugInfo &&  DebugInfo->Verify(I.getOperand(4))) {
972      std::vector<SDOperand> Ops;
973
974      // Input Chain
975      Ops.push_back(getRoot());
976
977      // line number
978      Ops.push_back(getValue(I.getOperand(2)));
979
980      // column
981      Ops.push_back(getValue(I.getOperand(3)));
982
983      DebugInfoDesc *DD = DebugInfo->getDescFor(I.getOperand(4));
984      assert(DD && "Not a debug information descriptor");
985      CompileUnitDesc *CompileUnit = dyn_cast<CompileUnitDesc>(DD);
986      assert(CompileUnit && "Not a compile unit");
987      Ops.push_back(DAG.getString(CompileUnit->getFileName()));
988      Ops.push_back(DAG.getString(CompileUnit->getDirectory()));
989
990      if (Ops.size() == 5)  // Found filename/workingdir.
991        DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
992    }
993
994    setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
995    return 0;
996  }
997  case Intrinsic::dbg_region_start:
998    if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
999      return "llvm_dbg_region_start";
1000    if (I.getType() != Type::VoidTy)
1001      setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
1002    return 0;
1003  case Intrinsic::dbg_region_end:
1004    if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
1005      return "llvm_dbg_region_end";
1006    if (I.getType() != Type::VoidTy)
1007      setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
1008    return 0;
1009  case Intrinsic::dbg_func_start:
1010    if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
1011      return "llvm_dbg_subprogram";
1012    if (I.getType() != Type::VoidTy)
1013      setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
1014    return 0;
1015  case Intrinsic::dbg_declare:
1016    if (I.getType() != Type::VoidTy)
1017      setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
1018    return 0;
1019
1020  case Intrinsic::isunordered_f32:
1021  case Intrinsic::isunordered_f64:
1022    setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1023                              getValue(I.getOperand(2)), ISD::SETUO));
1024    return 0;
1025
1026  case Intrinsic::sqrt_f32:
1027  case Intrinsic::sqrt_f64:
1028    setValue(&I, DAG.getNode(ISD::FSQRT,
1029                             getValue(I.getOperand(1)).getValueType(),
1030                             getValue(I.getOperand(1))));
1031    return 0;
1032  case Intrinsic::pcmarker: {
1033    SDOperand Tmp = getValue(I.getOperand(1));
1034    DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1035    return 0;
1036  }
1037  case Intrinsic::readcyclecounter: {
1038    std::vector<MVT::ValueType> VTs;
1039    VTs.push_back(MVT::i64);
1040    VTs.push_back(MVT::Other);
1041    std::vector<SDOperand> Ops;
1042    Ops.push_back(getRoot());
1043    SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1044    setValue(&I, Tmp);
1045    DAG.setRoot(Tmp.getValue(1));
1046    return 0;
1047  }
1048  case Intrinsic::bswap_i16:
1049  case Intrinsic::bswap_i32:
1050  case Intrinsic::bswap_i64:
1051    setValue(&I, DAG.getNode(ISD::BSWAP,
1052                             getValue(I.getOperand(1)).getValueType(),
1053                             getValue(I.getOperand(1))));
1054    return 0;
1055  case Intrinsic::cttz_i8:
1056  case Intrinsic::cttz_i16:
1057  case Intrinsic::cttz_i32:
1058  case Intrinsic::cttz_i64:
1059    setValue(&I, DAG.getNode(ISD::CTTZ,
1060                             getValue(I.getOperand(1)).getValueType(),
1061                             getValue(I.getOperand(1))));
1062    return 0;
1063  case Intrinsic::ctlz_i8:
1064  case Intrinsic::ctlz_i16:
1065  case Intrinsic::ctlz_i32:
1066  case Intrinsic::ctlz_i64:
1067    setValue(&I, DAG.getNode(ISD::CTLZ,
1068                             getValue(I.getOperand(1)).getValueType(),
1069                             getValue(I.getOperand(1))));
1070    return 0;
1071  case Intrinsic::ctpop_i8:
1072  case Intrinsic::ctpop_i16:
1073  case Intrinsic::ctpop_i32:
1074  case Intrinsic::ctpop_i64:
1075    setValue(&I, DAG.getNode(ISD::CTPOP,
1076                             getValue(I.getOperand(1)).getValueType(),
1077                             getValue(I.getOperand(1))));
1078    return 0;
1079  case Intrinsic::stacksave: {
1080    std::vector<MVT::ValueType> VTs;
1081    VTs.push_back(TLI.getPointerTy());
1082    VTs.push_back(MVT::Other);
1083    std::vector<SDOperand> Ops;
1084    Ops.push_back(getRoot());
1085    SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1086    setValue(&I, Tmp);
1087    DAG.setRoot(Tmp.getValue(1));
1088    return 0;
1089  }
1090  case Intrinsic::stackrestore: {
1091    SDOperand Tmp = getValue(I.getOperand(1));
1092    DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1093    return 0;
1094  }
1095  case Intrinsic::prefetch:
1096    // FIXME: Currently discarding prefetches.
1097    return 0;
1098  default:
1099    std::cerr << I;
1100    assert(0 && "This intrinsic is not implemented yet!");
1101    return 0;
1102  }
1103}
1104
1105
1106void SelectionDAGLowering::visitCall(CallInst &I) {
1107  const char *RenameFn = 0;
1108  if (Function *F = I.getCalledFunction()) {
1109    if (F->isExternal())
1110      if (unsigned IID = F->getIntrinsicID()) {
1111        RenameFn = visitIntrinsicCall(I, IID);
1112        if (!RenameFn)
1113          return;
1114      } else {    // Not an LLVM intrinsic.
1115        const std::string &Name = F->getName();
1116        if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1117          if (I.getNumOperands() == 2 &&   // Basic sanity checks.
1118              I.getOperand(1)->getType()->isFloatingPoint() &&
1119              I.getType() == I.getOperand(1)->getType()) {
1120            SDOperand Tmp = getValue(I.getOperand(1));
1121            setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1122            return;
1123          }
1124        } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1125          if (I.getNumOperands() == 2 &&   // Basic sanity checks.
1126              I.getOperand(1)->getType()->isFloatingPoint() &&
1127              I.getType() == I.getOperand(1)->getType()) {
1128            SDOperand Tmp = getValue(I.getOperand(1));
1129            setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1130            return;
1131          }
1132        } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1133          if (I.getNumOperands() == 2 &&   // Basic sanity checks.
1134              I.getOperand(1)->getType()->isFloatingPoint() &&
1135              I.getType() == I.getOperand(1)->getType()) {
1136            SDOperand Tmp = getValue(I.getOperand(1));
1137            setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1138            return;
1139          }
1140        }
1141      }
1142  } else if (isa<InlineAsm>(I.getOperand(0))) {
1143    visitInlineAsm(I);
1144    return;
1145  }
1146
1147  SDOperand Callee;
1148  if (!RenameFn)
1149    Callee = getValue(I.getOperand(0));
1150  else
1151    Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1152  std::vector<std::pair<SDOperand, const Type*> > Args;
1153  Args.reserve(I.getNumOperands());
1154  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1155    Value *Arg = I.getOperand(i);
1156    SDOperand ArgNode = getValue(Arg);
1157    Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1158  }
1159
1160  const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1161  const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1162
1163  std::pair<SDOperand,SDOperand> Result =
1164    TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1165                    I.isTailCall(), Callee, Args, DAG);
1166  if (I.getType() != Type::VoidTy)
1167    setValue(&I, Result.first);
1168  DAG.setRoot(Result.second);
1169}
1170
1171SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
1172                                        SDOperand &Chain, SDOperand &Flag) {
1173  SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
1174  Chain = Val.getValue(1);
1175  Flag  = Val.getValue(2);
1176
1177  // If the result was expanded, copy from the top part.
1178  if (Regs.size() > 1) {
1179    assert(Regs.size() == 2 &&
1180           "Cannot expand to more than 2 elts yet!");
1181    SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
1182    Chain = Val.getValue(1);
1183    Flag  = Val.getValue(2);
1184    return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
1185  }
1186
1187  // Otherwise, if the return value was promoted, truncate it to the
1188  // appropriate type.
1189  if (RegVT == ValueVT)
1190    return Val;
1191
1192  if (MVT::isInteger(RegVT))
1193    return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
1194  else
1195    return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
1196}
1197
1198
1199
1200/// isAllocatableRegister - If the specified register is safe to allocate,
1201/// i.e. it isn't a stack pointer or some other special register, return the
1202/// register class for the register.  Otherwise, return null.
1203static const TargetRegisterClass *
1204isAllocatableRegister(unsigned Reg, MachineFunction &MF,
1205                      const TargetLowering &TLI, const MRegisterInfo *MRI) {
1206  for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
1207       E = MRI->regclass_end(); RCI != E; ++RCI) {
1208    const TargetRegisterClass *RC = *RCI;
1209    // If none of the the value types for this register class are valid, we
1210    // can't use it.  For example, 64-bit reg classes on 32-bit targets.
1211    bool isLegal = false;
1212    for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1213         I != E; ++I) {
1214      if (TLI.isTypeLegal(*I)) {
1215        isLegal = true;
1216        break;
1217      }
1218    }
1219
1220    if (!isLegal) continue;
1221
1222    // NOTE: This isn't ideal.  In particular, this might allocate the
1223    // frame pointer in functions that need it (due to them not being taken
1224    // out of allocation, because a variable sized allocation hasn't been seen
1225    // yet).  This is a slight code pessimization, but should still work.
1226    for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
1227         E = RC->allocation_order_end(MF); I != E; ++I)
1228      if (*I == Reg)
1229        return RC;
1230  }
1231  return 0;
1232}
1233
1234RegsForValue SelectionDAGLowering::
1235GetRegistersForValue(const std::string &ConstrCode,
1236                     MVT::ValueType VT, bool isOutReg, bool isInReg,
1237                     std::set<unsigned> &OutputRegs,
1238                     std::set<unsigned> &InputRegs) {
1239  std::pair<unsigned, const TargetRegisterClass*> PhysReg =
1240    TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
1241  std::vector<unsigned> Regs;
1242
1243  unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
1244  MVT::ValueType RegVT;
1245  MVT::ValueType ValueVT = VT;
1246
1247  if (PhysReg.first) {
1248    if (VT == MVT::Other)
1249      ValueVT = *PhysReg.second->vt_begin();
1250    RegVT = VT;
1251
1252    // This is a explicit reference to a physical register.
1253    Regs.push_back(PhysReg.first);
1254
1255    // If this is an expanded reference, add the rest of the regs to Regs.
1256    if (NumRegs != 1) {
1257      RegVT = *PhysReg.second->vt_begin();
1258      TargetRegisterClass::iterator I = PhysReg.second->begin();
1259      TargetRegisterClass::iterator E = PhysReg.second->end();
1260      for (; *I != PhysReg.first; ++I)
1261        assert(I != E && "Didn't find reg!");
1262
1263      // Already added the first reg.
1264      --NumRegs; ++I;
1265      for (; NumRegs; --NumRegs, ++I) {
1266        assert(I != E && "Ran out of registers to allocate!");
1267        Regs.push_back(*I);
1268      }
1269    }
1270    return RegsForValue(Regs, RegVT, ValueVT);
1271  }
1272
1273  // This is a reference to a register class.  Allocate NumRegs consecutive,
1274  // available, registers from the class.
1275  std::vector<unsigned> RegClassRegs =
1276    TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
1277
1278  const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
1279  MachineFunction &MF = *CurMBB->getParent();
1280  unsigned NumAllocated = 0;
1281  for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
1282    unsigned Reg = RegClassRegs[i];
1283    // See if this register is available.
1284    if ((isOutReg && OutputRegs.count(Reg)) ||   // Already used.
1285        (isInReg  && InputRegs.count(Reg))) {    // Already used.
1286      // Make sure we find consecutive registers.
1287      NumAllocated = 0;
1288      continue;
1289    }
1290
1291    // Check to see if this register is allocatable (i.e. don't give out the
1292    // stack pointer).
1293    const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
1294    if (!RC) {
1295      // Make sure we find consecutive registers.
1296      NumAllocated = 0;
1297      continue;
1298    }
1299
1300    // Okay, this register is good, we can use it.
1301    ++NumAllocated;
1302
1303    // If we allocated enough consecutive
1304    if (NumAllocated == NumRegs) {
1305      unsigned RegStart = (i-NumAllocated)+1;
1306      unsigned RegEnd   = i+1;
1307      // Mark all of the allocated registers used.
1308      for (unsigned i = RegStart; i != RegEnd; ++i) {
1309        unsigned Reg = RegClassRegs[i];
1310        Regs.push_back(Reg);
1311        if (isOutReg) OutputRegs.insert(Reg);    // Mark reg used.
1312        if (isInReg)  InputRegs.insert(Reg);     // Mark reg used.
1313      }
1314
1315      return RegsForValue(Regs, *RC->vt_begin(), VT);
1316    }
1317  }
1318
1319  // Otherwise, we couldn't allocate enough registers for this.
1320  return RegsForValue();
1321}
1322
1323
1324/// visitInlineAsm - Handle a call to an InlineAsm object.
1325///
1326void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1327  InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1328
1329  SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
1330                                                 MVT::Other);
1331
1332  // Note, we treat inline asms both with and without side-effects as the same.
1333  // If an inline asm doesn't have side effects and doesn't access memory, we
1334  // could not choose to not chain it.
1335  bool hasSideEffects = IA->hasSideEffects();
1336
1337  std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
1338  std::vector<MVT::ValueType> ConstraintVTs;
1339
1340  /// AsmNodeOperands - A list of pairs.  The first element is a register, the
1341  /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
1342  /// if it is a def of that register.
1343  std::vector<SDOperand> AsmNodeOperands;
1344  AsmNodeOperands.push_back(SDOperand());  // reserve space for input chain
1345  AsmNodeOperands.push_back(AsmStr);
1346
1347  SDOperand Chain = getRoot();
1348  SDOperand Flag;
1349
1350  // We fully assign registers here at isel time.  This is not optimal, but
1351  // should work.  For register classes that correspond to LLVM classes, we
1352  // could let the LLVM RA do its thing, but we currently don't.  Do a prepass
1353  // over the constraints, collecting fixed registers that we know we can't use.
1354  std::set<unsigned> OutputRegs, InputRegs;
1355  unsigned OpNum = 1;
1356  for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1357    assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
1358    std::string &ConstraintCode = Constraints[i].Codes[0];
1359
1360    MVT::ValueType OpVT;
1361
1362    // Compute the value type for each operand and add it to ConstraintVTs.
1363    switch (Constraints[i].Type) {
1364    case InlineAsm::isOutput:
1365      if (!Constraints[i].isIndirectOutput) {
1366        assert(I.getType() != Type::VoidTy && "Bad inline asm!");
1367        OpVT = TLI.getValueType(I.getType());
1368      } else {
1369        Value *CallOperand = I.getOperand(OpNum);
1370        const Type *OpTy = CallOperand->getType();
1371        OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
1372        OpNum++;  // Consumes a call operand.
1373      }
1374      break;
1375    case InlineAsm::isInput:
1376      OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
1377      OpNum++;  // Consumes a call operand.
1378      break;
1379    case InlineAsm::isClobber:
1380      OpVT = MVT::Other;
1381      break;
1382    }
1383
1384    ConstraintVTs.push_back(OpVT);
1385
1386    if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
1387      continue;  // Not assigned a fixed reg.
1388
1389    // Build a list of regs that this operand uses.  This always has a single
1390    // element for promoted/expanded operands.
1391    RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
1392                                             false, false,
1393                                             OutputRegs, InputRegs);
1394
1395    switch (Constraints[i].Type) {
1396    case InlineAsm::isOutput:
1397      // We can't assign any other output to this register.
1398      OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1399      // If this is an early-clobber output, it cannot be assigned to the same
1400      // value as the input reg.
1401      if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
1402        InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1403      break;
1404    case InlineAsm::isInput:
1405      // We can't assign any other input to this register.
1406      InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1407      break;
1408    case InlineAsm::isClobber:
1409      // Clobbered regs cannot be used as inputs or outputs.
1410      InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1411      OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
1412      break;
1413    }
1414  }
1415
1416  // Loop over all of the inputs, copying the operand values into the
1417  // appropriate registers and processing the output regs.
1418  RegsForValue RetValRegs;
1419  std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
1420  OpNum = 1;
1421
1422  for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1423    assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
1424    std::string &ConstraintCode = Constraints[i].Codes[0];
1425
1426    switch (Constraints[i].Type) {
1427    case InlineAsm::isOutput: {
1428      // If this is an early-clobber output, or if there is an input
1429      // constraint that matches this, we need to reserve the input register
1430      // so no other inputs allocate to it.
1431      bool UsesInputRegister = false;
1432      if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
1433        UsesInputRegister = true;
1434
1435      // Copy the output from the appropriate register.  Find a register that
1436      // we can use.
1437      RegsForValue Regs =
1438        GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
1439                             true, UsesInputRegister,
1440                             OutputRegs, InputRegs);
1441      assert(!Regs.Regs.empty() && "Couldn't allocate output reg!");
1442
1443      if (!Constraints[i].isIndirectOutput) {
1444        assert(RetValRegs.Regs.empty() &&
1445               "Cannot have multiple output constraints yet!");
1446        assert(I.getType() != Type::VoidTy && "Bad inline asm!");
1447        RetValRegs = Regs;
1448      } else {
1449        Value *CallOperand = I.getOperand(OpNum);
1450        IndirectStoresToEmit.push_back(std::make_pair(Regs, CallOperand));
1451        OpNum++;  // Consumes a call operand.
1452      }
1453
1454      // Add information to the INLINEASM node to know that this register is
1455      // set.
1456
1457      // FIXME:
1458      // FIXME: Handle multiple regs here.
1459      // FIXME:
1460      unsigned DestReg = Regs.Regs[0];
1461      AsmNodeOperands.push_back(DAG.getRegister(DestReg, Regs.RegVT));
1462      AsmNodeOperands.push_back(DAG.getConstant(2, MVT::i32)); // ISDEF
1463      break;
1464    }
1465    case InlineAsm::isInput: {
1466      Value *CallOperand = I.getOperand(OpNum);
1467      OpNum++;  // Consumes a call operand.
1468
1469      SDOperand ResOp;
1470      unsigned ResOpType;
1471      SDOperand InOperandVal = getValue(CallOperand);
1472
1473      if (isdigit(ConstraintCode[0])) {    // Matching constraint?
1474        // If this is required to match an output register we have already set,
1475        // just use its register.
1476        unsigned OperandNo = atoi(ConstraintCode.c_str());
1477        unsigned SrcReg;
1478        SrcReg = cast<RegisterSDNode>(AsmNodeOperands[OperandNo*2+2])->getReg();
1479        ResOp = DAG.getRegister(SrcReg, ConstraintVTs[i]);
1480        ResOpType = 1;
1481
1482        Chain = DAG.getCopyToReg(Chain, SrcReg, InOperandVal, Flag);
1483        Flag = Chain.getValue(1);
1484      } else {
1485        TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
1486        if (ConstraintCode.size() == 1)   // not a physreg name.
1487          CTy = TLI.getConstraintType(ConstraintCode[0]);
1488
1489        switch (CTy) {
1490        default: assert(0 && "Unknown constraint type! FAIL!");
1491        case TargetLowering::C_RegisterClass: {
1492          // Copy the input into the appropriate registers.
1493          RegsForValue InRegs =
1494            GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
1495                                 false, true, OutputRegs, InputRegs);
1496          // FIXME: should be match fail.
1497          assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
1498
1499          if (InRegs.Regs.size() == 1) {
1500            // If there is a single register and the types differ, this must be
1501            // a promotion.
1502            if (InRegs.RegVT != InRegs.ValueVT) {
1503              if (MVT::isInteger(InRegs.RegVT))
1504                InOperandVal = DAG.getNode(ISD::ANY_EXTEND, InRegs.RegVT,
1505                                           InOperandVal);
1506              else
1507                InOperandVal = DAG.getNode(ISD::FP_EXTEND, InRegs.RegVT,
1508                                           InOperandVal);
1509            }
1510            Chain = DAG.getCopyToReg(Chain, InRegs.Regs[0], InOperandVal, Flag);
1511            Flag = Chain.getValue(1);
1512
1513            ResOp = DAG.getRegister(InRegs.Regs[0], InRegs.RegVT);
1514          } else {
1515            for (unsigned i = 0, e = InRegs.Regs.size(); i != e; ++i) {
1516              SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, InRegs.RegVT,
1517                                           InOperandVal,
1518                                           DAG.getConstant(i, MVT::i32));
1519              Chain = DAG.getCopyToReg(Chain, InRegs.Regs[i], Part, Flag);
1520              Flag = Chain.getValue(1);
1521            }
1522            ResOp = DAG.getRegister(InRegs.Regs[0], InRegs.RegVT);
1523          }
1524
1525          ResOpType = 1;
1526          break;
1527        }
1528        case TargetLowering::C_Other:
1529          if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
1530            assert(0 && "MATCH FAIL!");
1531          ResOp = InOperandVal;
1532          ResOpType = 3;
1533          break;
1534        }
1535      }
1536
1537      // Add information to the INLINEASM node to know about this input.
1538      AsmNodeOperands.push_back(ResOp);
1539      AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
1540      break;
1541    }
1542    case InlineAsm::isClobber:
1543      // Nothing to do.
1544      break;
1545    }
1546  }
1547
1548  // Finish up input operands.
1549  AsmNodeOperands[0] = Chain;
1550  if (Flag.Val) AsmNodeOperands.push_back(Flag);
1551
1552  std::vector<MVT::ValueType> VTs;
1553  VTs.push_back(MVT::Other);
1554  VTs.push_back(MVT::Flag);
1555  Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
1556  Flag = Chain.getValue(1);
1557
1558  // If this asm returns a register value, copy the result from that register
1559  // and set it as the value of the call.
1560  if (!RetValRegs.Regs.empty())
1561    setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
1562
1563  std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
1564
1565  // Process indirect outputs, first output all of the flagged copies out of
1566  // physregs.
1567  for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
1568    RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
1569    Value *Ptr = IndirectStoresToEmit[i].second;
1570    SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
1571    StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
1572  }
1573
1574  // Emit the non-flagged stores from the physregs.
1575  std::vector<SDOperand> OutChains;
1576  for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
1577    OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1578                                    StoresToEmit[i].first,
1579                                    getValue(StoresToEmit[i].second),
1580                                    DAG.getSrcValue(StoresToEmit[i].second)));
1581  if (!OutChains.empty())
1582    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
1583  DAG.setRoot(Chain);
1584}
1585
1586
1587void SelectionDAGLowering::visitMalloc(MallocInst &I) {
1588  SDOperand Src = getValue(I.getOperand(0));
1589
1590  MVT::ValueType IntPtr = TLI.getPointerTy();
1591
1592  if (IntPtr < Src.getValueType())
1593    Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
1594  else if (IntPtr > Src.getValueType())
1595    Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
1596
1597  // Scale the source by the type size.
1598  uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
1599  Src = DAG.getNode(ISD::MUL, Src.getValueType(),
1600                    Src, getIntPtrConstant(ElementSize));
1601
1602  std::vector<std::pair<SDOperand, const Type*> > Args;
1603  Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
1604
1605  std::pair<SDOperand,SDOperand> Result =
1606    TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
1607                    DAG.getExternalSymbol("malloc", IntPtr),
1608                    Args, DAG);
1609  setValue(&I, Result.first);  // Pointers always fit in registers
1610  DAG.setRoot(Result.second);
1611}
1612
1613void SelectionDAGLowering::visitFree(FreeInst &I) {
1614  std::vector<std::pair<SDOperand, const Type*> > Args;
1615  Args.push_back(std::make_pair(getValue(I.getOperand(0)),
1616                                TLI.getTargetData().getIntPtrType()));
1617  MVT::ValueType IntPtr = TLI.getPointerTy();
1618  std::pair<SDOperand,SDOperand> Result =
1619    TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
1620                    DAG.getExternalSymbol("free", IntPtr), Args, DAG);
1621  DAG.setRoot(Result.second);
1622}
1623
1624// InsertAtEndOfBasicBlock - This method should be implemented by targets that
1625// mark instructions with the 'usesCustomDAGSchedInserter' flag.  These
1626// instructions are special in various ways, which require special support to
1627// insert.  The specified MachineInstr is created but not inserted into any
1628// basic blocks, and the scheduler passes ownership of it to this method.
1629MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1630                                                       MachineBasicBlock *MBB) {
1631  std::cerr << "If a target marks an instruction with "
1632               "'usesCustomDAGSchedInserter', it must implement "
1633               "TargetLowering::InsertAtEndOfBasicBlock!\n";
1634  abort();
1635  return 0;
1636}
1637
1638void SelectionDAGLowering::visitVAStart(CallInst &I) {
1639  DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
1640                          getValue(I.getOperand(1)),
1641                          DAG.getSrcValue(I.getOperand(1))));
1642}
1643
1644void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
1645  SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
1646                             getValue(I.getOperand(0)),
1647                             DAG.getSrcValue(I.getOperand(0)));
1648  setValue(&I, V);
1649  DAG.setRoot(V.getValue(1));
1650}
1651
1652void SelectionDAGLowering::visitVAEnd(CallInst &I) {
1653  DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
1654                          getValue(I.getOperand(1)),
1655                          DAG.getSrcValue(I.getOperand(1))));
1656}
1657
1658void SelectionDAGLowering::visitVACopy(CallInst &I) {
1659  DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
1660                          getValue(I.getOperand(1)),
1661                          getValue(I.getOperand(2)),
1662                          DAG.getSrcValue(I.getOperand(1)),
1663                          DAG.getSrcValue(I.getOperand(2))));
1664}
1665
1666// It is always conservatively correct for llvm.returnaddress and
1667// llvm.frameaddress to return 0.
1668std::pair<SDOperand, SDOperand>
1669TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
1670                                        unsigned Depth, SelectionDAG &DAG) {
1671  return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
1672}
1673
1674SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1675  assert(0 && "LowerOperation not implemented for this target!");
1676  abort();
1677  return SDOperand();
1678}
1679
1680SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
1681                                                 SelectionDAG &DAG) {
1682  assert(0 && "CustomPromoteOperation not implemented for this target!");
1683  abort();
1684  return SDOperand();
1685}
1686
1687void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
1688  unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
1689  std::pair<SDOperand,SDOperand> Result =
1690    TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
1691  setValue(&I, Result.first);
1692  DAG.setRoot(Result.second);
1693}
1694
1695/// getMemsetValue - Vectorized representation of the memset value
1696/// operand.
1697static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
1698                                SelectionDAG &DAG) {
1699  MVT::ValueType CurVT = VT;
1700  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
1701    uint64_t Val   = C->getValue() & 255;
1702    unsigned Shift = 8;
1703    while (CurVT != MVT::i8) {
1704      Val = (Val << Shift) | Val;
1705      Shift <<= 1;
1706      CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
1707    }
1708    return DAG.getConstant(Val, VT);
1709  } else {
1710    Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
1711    unsigned Shift = 8;
1712    while (CurVT != MVT::i8) {
1713      Value =
1714        DAG.getNode(ISD::OR, VT,
1715                    DAG.getNode(ISD::SHL, VT, Value,
1716                                DAG.getConstant(Shift, MVT::i8)), Value);
1717      Shift <<= 1;
1718      CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
1719    }
1720
1721    return Value;
1722  }
1723}
1724
1725/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
1726/// used when a memcpy is turned into a memset when the source is a constant
1727/// string ptr.
1728static SDOperand getMemsetStringVal(MVT::ValueType VT,
1729                                    SelectionDAG &DAG, TargetLowering &TLI,
1730                                    std::string &Str, unsigned Offset) {
1731  MVT::ValueType CurVT = VT;
1732  uint64_t Val = 0;
1733  unsigned MSB = getSizeInBits(VT) / 8;
1734  if (TLI.isLittleEndian())
1735    Offset = Offset + MSB - 1;
1736  for (unsigned i = 0; i != MSB; ++i) {
1737    Val = (Val << 8) | Str[Offset];
1738    Offset += TLI.isLittleEndian() ? -1 : 1;
1739  }
1740  return DAG.getConstant(Val, VT);
1741}
1742
1743/// getMemBasePlusOffset - Returns base and offset node for the
1744static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
1745                                      SelectionDAG &DAG, TargetLowering &TLI) {
1746  MVT::ValueType VT = Base.getValueType();
1747  return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
1748}
1749
1750/// MeetsMaxMemopRequirement - Determines if the number of memory ops required
1751/// to replace the memset / memcpy is below the threshold. It also returns the
1752/// types of the sequence of  memory ops to perform memset / memcpy.
1753static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
1754                                     unsigned Limit, uint64_t Size,
1755                                     unsigned Align, TargetLowering &TLI) {
1756  MVT::ValueType VT;
1757
1758  if (TLI.allowsUnalignedMemoryAccesses()) {
1759    VT = MVT::i64;
1760  } else {
1761    switch (Align & 7) {
1762    case 0:
1763      VT = MVT::i64;
1764      break;
1765    case 4:
1766      VT = MVT::i32;
1767      break;
1768    case 2:
1769      VT = MVT::i16;
1770      break;
1771    default:
1772      VT = MVT::i8;
1773      break;
1774    }
1775  }
1776
1777  MVT::ValueType LVT = MVT::i64;
1778  while (!TLI.isTypeLegal(LVT))
1779    LVT = (MVT::ValueType)((unsigned)LVT - 1);
1780  assert(MVT::isInteger(LVT));
1781
1782  if (VT > LVT)
1783    VT = LVT;
1784
1785  unsigned NumMemOps = 0;
1786  while (Size != 0) {
1787    unsigned VTSize = getSizeInBits(VT) / 8;
1788    while (VTSize > Size) {
1789      VT = (MVT::ValueType)((unsigned)VT - 1);
1790      VTSize >>= 1;
1791    }
1792    assert(MVT::isInteger(VT));
1793
1794    if (++NumMemOps > Limit)
1795      return false;
1796    MemOps.push_back(VT);
1797    Size -= VTSize;
1798  }
1799
1800  return true;
1801}
1802
1803void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
1804  SDOperand Op1 = getValue(I.getOperand(1));
1805  SDOperand Op2 = getValue(I.getOperand(2));
1806  SDOperand Op3 = getValue(I.getOperand(3));
1807  SDOperand Op4 = getValue(I.getOperand(4));
1808  unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
1809  if (Align == 0) Align = 1;
1810
1811  if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
1812    std::vector<MVT::ValueType> MemOps;
1813
1814    // Expand memset / memcpy to a series of load / store ops
1815    // if the size operand falls below a certain threshold.
1816    std::vector<SDOperand> OutChains;
1817    switch (Op) {
1818    default: break;  // Do nothing for now.
1819    case ISD::MEMSET: {
1820      if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
1821                                   Size->getValue(), Align, TLI)) {
1822        unsigned NumMemOps = MemOps.size();
1823        unsigned Offset = 0;
1824        for (unsigned i = 0; i < NumMemOps; i++) {
1825          MVT::ValueType VT = MemOps[i];
1826          unsigned VTSize = getSizeInBits(VT) / 8;
1827          SDOperand Value = getMemsetValue(Op2, VT, DAG);
1828          SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(),
1829                                        Value,
1830                                    getMemBasePlusOffset(Op1, Offset, DAG, TLI),
1831                                      DAG.getSrcValue(I.getOperand(1), Offset));
1832          OutChains.push_back(Store);
1833          Offset += VTSize;
1834        }
1835      }
1836      break;
1837    }
1838    case ISD::MEMCPY: {
1839      if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
1840                                   Size->getValue(), Align, TLI)) {
1841        unsigned NumMemOps = MemOps.size();
1842        unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
1843        GlobalAddressSDNode *G = NULL;
1844        std::string Str;
1845        bool CopyFromStr = false;
1846
1847        if (Op2.getOpcode() == ISD::GlobalAddress)
1848          G = cast<GlobalAddressSDNode>(Op2);
1849        else if (Op2.getOpcode() == ISD::ADD &&
1850                 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
1851                 Op2.getOperand(1).getOpcode() == ISD::Constant) {
1852          G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
1853          SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
1854        }
1855        if (G) {
1856          GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
1857          if (GV) {
1858            Str = getStringValue(GV);
1859            if (!Str.empty()) {
1860              CopyFromStr = true;
1861              SrcOff += SrcDelta;
1862            }
1863          }
1864        }
1865
1866        for (unsigned i = 0; i < NumMemOps; i++) {
1867          MVT::ValueType VT = MemOps[i];
1868          unsigned VTSize = getSizeInBits(VT) / 8;
1869          SDOperand Value, Chain, Store;
1870
1871          if (CopyFromStr) {
1872            Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
1873            Chain = getRoot();
1874            Store =
1875              DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
1876                          getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
1877                          DAG.getSrcValue(I.getOperand(1), DstOff));
1878          } else {
1879            Value = DAG.getLoad(VT, getRoot(),
1880                        getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
1881                        DAG.getSrcValue(I.getOperand(2), SrcOff));
1882            Chain = Value.getValue(1);
1883            Store =
1884              DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
1885                          getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
1886                          DAG.getSrcValue(I.getOperand(1), DstOff));
1887          }
1888          OutChains.push_back(Store);
1889          SrcOff += VTSize;
1890          DstOff += VTSize;
1891        }
1892      }
1893      break;
1894    }
1895    }
1896
1897    if (!OutChains.empty()) {
1898      DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains));
1899      return;
1900    }
1901  }
1902
1903  std::vector<SDOperand> Ops;
1904  Ops.push_back(getRoot());
1905  Ops.push_back(Op1);
1906  Ops.push_back(Op2);
1907  Ops.push_back(Op3);
1908  Ops.push_back(Op4);
1909  DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
1910}
1911
1912//===----------------------------------------------------------------------===//
1913// SelectionDAGISel code
1914//===----------------------------------------------------------------------===//
1915
1916unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
1917  return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
1918}
1919
1920void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
1921  // FIXME: we only modify the CFG to split critical edges.  This
1922  // updates dom and loop info.
1923}
1924
1925
1926/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
1927/// casting to the type of GEPI.
1928static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
1929                                   Value *Ptr, Value *PtrOffset) {
1930  if (V) return V;   // Already computed.
1931
1932  BasicBlock::iterator InsertPt;
1933  if (BB == GEPI->getParent()) {
1934    // If insert into the GEP's block, insert right after the GEP.
1935    InsertPt = GEPI;
1936    ++InsertPt;
1937  } else {
1938    // Otherwise, insert at the top of BB, after any PHI nodes
1939    InsertPt = BB->begin();
1940    while (isa<PHINode>(InsertPt)) ++InsertPt;
1941  }
1942
1943  // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
1944  // BB so that there is only one value live across basic blocks (the cast
1945  // operand).
1946  if (CastInst *CI = dyn_cast<CastInst>(Ptr))
1947    if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
1948      Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
1949
1950  // Add the offset, cast it to the right type.
1951  Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
1952  Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
1953  return V = Ptr;
1954}
1955
1956
1957/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
1958/// selection, we want to be a bit careful about some things.  In particular, if
1959/// we have a GEP instruction that is used in a different block than it is
1960/// defined, the addressing expression of the GEP cannot be folded into loads or
1961/// stores that use it.  In this case, decompose the GEP and move constant
1962/// indices into blocks that use it.
1963static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
1964                                  const TargetData &TD) {
1965  // If this GEP is only used inside the block it is defined in, there is no
1966  // need to rewrite it.
1967  bool isUsedOutsideDefBB = false;
1968  BasicBlock *DefBB = GEPI->getParent();
1969  for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
1970       UI != E; ++UI) {
1971    if (cast<Instruction>(*UI)->getParent() != DefBB) {
1972      isUsedOutsideDefBB = true;
1973      break;
1974    }
1975  }
1976  if (!isUsedOutsideDefBB) return;
1977
1978  // If this GEP has no non-zero constant indices, there is nothing we can do,
1979  // ignore it.
1980  bool hasConstantIndex = false;
1981  for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1982       E = GEPI->op_end(); OI != E; ++OI) {
1983    if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
1984      if (CI->getRawValue()) {
1985        hasConstantIndex = true;
1986        break;
1987      }
1988  }
1989  // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
1990  if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
1991
1992  // Otherwise, decompose the GEP instruction into multiplies and adds.  Sum the
1993  // constant offset (which we now know is non-zero) and deal with it later.
1994  uint64_t ConstantOffset = 0;
1995  const Type *UIntPtrTy = TD.getIntPtrType();
1996  Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
1997  const Type *Ty = GEPI->getOperand(0)->getType();
1998
1999  for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
2000       E = GEPI->op_end(); OI != E; ++OI) {
2001    Value *Idx = *OI;
2002    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2003      unsigned Field = cast<ConstantUInt>(Idx)->getValue();
2004      if (Field)
2005        ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
2006      Ty = StTy->getElementType(Field);
2007    } else {
2008      Ty = cast<SequentialType>(Ty)->getElementType();
2009
2010      // Handle constant subscripts.
2011      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2012        if (CI->getRawValue() == 0) continue;
2013
2014        if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
2015          ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
2016        else
2017          ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
2018        continue;
2019      }
2020
2021      // Ptr = Ptr + Idx * ElementSize;
2022
2023      // Cast Idx to UIntPtrTy if needed.
2024      Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
2025
2026      uint64_t ElementSize = TD.getTypeSize(Ty);
2027      // Mask off bits that should not be set.
2028      ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2029      Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
2030
2031      // Multiply by the element size and add to the base.
2032      Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
2033      Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
2034    }
2035  }
2036
2037  // Make sure that the offset fits in uintptr_t.
2038  ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2039  Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
2040
2041  // Okay, we have now emitted all of the variable index parts to the BB that
2042  // the GEP is defined in.  Loop over all of the using instructions, inserting
2043  // an "add Ptr, ConstantOffset" into each block that uses it and update the
2044  // instruction to use the newly computed value, making GEPI dead.  When the
2045  // user is a load or store instruction address, we emit the add into the user
2046  // block, otherwise we use a canonical version right next to the gep (these
2047  // won't be foldable as addresses, so we might as well share the computation).
2048
2049  std::map<BasicBlock*,Value*> InsertedExprs;
2050  while (!GEPI->use_empty()) {
2051    Instruction *User = cast<Instruction>(GEPI->use_back());
2052
2053    // If this use is not foldable into the addressing mode, use a version
2054    // emitted in the GEP block.
2055    Value *NewVal;
2056    if (!isa<LoadInst>(User) &&
2057        (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
2058      NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
2059                                    Ptr, PtrOffset);
2060    } else {
2061      // Otherwise, insert the code in the User's block so it can be folded into
2062      // any users in that block.
2063      NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
2064                                    User->getParent(), GEPI,
2065                                    Ptr, PtrOffset);
2066    }
2067    User->replaceUsesOfWith(GEPI, NewVal);
2068  }
2069
2070  // Finally, the GEP is dead, remove it.
2071  GEPI->eraseFromParent();
2072}
2073
2074bool SelectionDAGISel::runOnFunction(Function &Fn) {
2075  MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
2076  RegMap = MF.getSSARegMap();
2077  DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
2078
2079  // First, split all critical edges for PHI nodes with incoming values that are
2080  // constants, this way the load of the constant into a vreg will not be placed
2081  // into MBBs that are used some other way.
2082  //
2083  // In this pass we also look for GEP instructions that are used across basic
2084  // blocks and rewrites them to improve basic-block-at-a-time selection.
2085  //
2086  for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
2087    PHINode *PN;
2088    BasicBlock::iterator BBI;
2089    for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
2090      for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2091        if (isa<Constant>(PN->getIncomingValue(i)))
2092          SplitCriticalEdge(PN->getIncomingBlock(i), BB);
2093
2094    for (BasicBlock::iterator E = BB->end(); BBI != E; )
2095      if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
2096        OptimizeGEPExpression(GEPI, TLI.getTargetData());
2097  }
2098
2099  FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
2100
2101  for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
2102    SelectBasicBlock(I, MF, FuncInfo);
2103
2104  return true;
2105}
2106
2107
2108SDOperand SelectionDAGISel::
2109CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
2110  SDOperand Op = SDL.getValue(V);
2111  assert((Op.getOpcode() != ISD::CopyFromReg ||
2112          cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
2113         "Copy from a reg to the same reg!");
2114
2115  // If this type is not legal, we must make sure to not create an invalid
2116  // register use.
2117  MVT::ValueType SrcVT = Op.getValueType();
2118  MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
2119  SelectionDAG &DAG = SDL.DAG;
2120  if (SrcVT == DestVT) {
2121    return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
2122  } else if (SrcVT < DestVT) {
2123    // The src value is promoted to the register.
2124    if (MVT::isFloatingPoint(SrcVT))
2125      Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
2126    else
2127      Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
2128    return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
2129  } else  {
2130    // The src value is expanded into multiple registers.
2131    SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
2132                               Op, DAG.getConstant(0, MVT::i32));
2133    SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
2134                               Op, DAG.getConstant(1, MVT::i32));
2135    Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
2136    return DAG.getCopyToReg(Op, Reg+1, Hi);
2137  }
2138}
2139
2140void SelectionDAGISel::
2141LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
2142               std::vector<SDOperand> &UnorderedChains) {
2143  // If this is the entry block, emit arguments.
2144  Function &F = *BB->getParent();
2145  FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
2146  SDOperand OldRoot = SDL.DAG.getRoot();
2147  std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
2148
2149  unsigned a = 0;
2150  for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
2151       AI != E; ++AI, ++a)
2152    if (!AI->use_empty()) {
2153      SDL.setValue(AI, Args[a]);
2154
2155      // If this argument is live outside of the entry block, insert a copy from
2156      // whereever we got it to the vreg that other BB's will reference it as.
2157      if (FuncInfo.ValueMap.count(AI)) {
2158        SDOperand Copy =
2159          CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
2160        UnorderedChains.push_back(Copy);
2161      }
2162    }
2163
2164  // Next, if the function has live ins that need to be copied into vregs,
2165  // emit the copies now, into the top of the block.
2166  MachineFunction &MF = SDL.DAG.getMachineFunction();
2167  if (MF.livein_begin() != MF.livein_end()) {
2168    SSARegMap *RegMap = MF.getSSARegMap();
2169    const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo();
2170    for (MachineFunction::livein_iterator LI = MF.livein_begin(),
2171         E = MF.livein_end(); LI != E; ++LI)
2172      if (LI->second)
2173        MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
2174                         LI->first, RegMap->getRegClass(LI->second));
2175  }
2176
2177  // Finally, if the target has anything special to do, allow it to do so.
2178  EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
2179}
2180
2181
2182void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
2183       std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
2184                                    FunctionLoweringInfo &FuncInfo) {
2185  SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
2186
2187  std::vector<SDOperand> UnorderedChains;
2188
2189  // Lower any arguments needed in this block if this is the entry block.
2190  if (LLVMBB == &LLVMBB->getParent()->front())
2191    LowerArguments(LLVMBB, SDL, UnorderedChains);
2192
2193  BB = FuncInfo.MBBMap[LLVMBB];
2194  SDL.setCurrentBasicBlock(BB);
2195
2196  // Lower all of the non-terminator instructions.
2197  for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
2198       I != E; ++I)
2199    SDL.visit(*I);
2200
2201  // Ensure that all instructions which are used outside of their defining
2202  // blocks are available as virtual registers.
2203  for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
2204    if (!I->use_empty() && !isa<PHINode>(I)) {
2205      std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
2206      if (VMI != FuncInfo.ValueMap.end())
2207        UnorderedChains.push_back(
2208                           CopyValueToVirtualRegister(SDL, I, VMI->second));
2209    }
2210
2211  // Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
2212  // ensure constants are generated when needed.  Remember the virtual registers
2213  // that need to be added to the Machine PHI nodes as input.  We cannot just
2214  // directly add them, because expansion might result in multiple MBB's for one
2215  // BB.  As such, the start of the BB might correspond to a different MBB than
2216  // the end.
2217  //
2218
2219  // Emit constants only once even if used by multiple PHI nodes.
2220  std::map<Constant*, unsigned> ConstantsOut;
2221
2222  // Check successor nodes PHI nodes that expect a constant to be available from
2223  // this block.
2224  TerminatorInst *TI = LLVMBB->getTerminator();
2225  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2226    BasicBlock *SuccBB = TI->getSuccessor(succ);
2227    MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
2228    PHINode *PN;
2229
2230    // At this point we know that there is a 1-1 correspondence between LLVM PHI
2231    // nodes and Machine PHI nodes, but the incoming operands have not been
2232    // emitted yet.
2233    for (BasicBlock::iterator I = SuccBB->begin();
2234         (PN = dyn_cast<PHINode>(I)); ++I)
2235      if (!PN->use_empty()) {
2236        unsigned Reg;
2237        Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2238        if (Constant *C = dyn_cast<Constant>(PHIOp)) {
2239          unsigned &RegOut = ConstantsOut[C];
2240          if (RegOut == 0) {
2241            RegOut = FuncInfo.CreateRegForValue(C);
2242            UnorderedChains.push_back(
2243                             CopyValueToVirtualRegister(SDL, C, RegOut));
2244          }
2245          Reg = RegOut;
2246        } else {
2247          Reg = FuncInfo.ValueMap[PHIOp];
2248          if (Reg == 0) {
2249            assert(isa<AllocaInst>(PHIOp) &&
2250                   FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
2251                   "Didn't codegen value into a register!??");
2252            Reg = FuncInfo.CreateRegForValue(PHIOp);
2253            UnorderedChains.push_back(
2254                             CopyValueToVirtualRegister(SDL, PHIOp, Reg));
2255          }
2256        }
2257
2258        // Remember that this register needs to added to the machine PHI node as
2259        // the input for this MBB.
2260        unsigned NumElements =
2261          TLI.getNumElements(TLI.getValueType(PN->getType()));
2262        for (unsigned i = 0, e = NumElements; i != e; ++i)
2263          PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
2264      }
2265  }
2266  ConstantsOut.clear();
2267
2268  // Turn all of the unordered chains into one factored node.
2269  if (!UnorderedChains.empty()) {
2270    SDOperand Root = SDL.getRoot();
2271    if (Root.getOpcode() != ISD::EntryToken) {
2272      unsigned i = 0, e = UnorderedChains.size();
2273      for (; i != e; ++i) {
2274        assert(UnorderedChains[i].Val->getNumOperands() > 1);
2275        if (UnorderedChains[i].Val->getOperand(0) == Root)
2276          break;  // Don't add the root if we already indirectly depend on it.
2277      }
2278
2279      if (i == e)
2280        UnorderedChains.push_back(Root);
2281    }
2282    DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
2283  }
2284
2285  // Lower the terminator after the copies are emitted.
2286  SDL.visit(*LLVMBB->getTerminator());
2287
2288  // Make sure the root of the DAG is up-to-date.
2289  DAG.setRoot(SDL.getRoot());
2290}
2291
2292void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
2293                                        FunctionLoweringInfo &FuncInfo) {
2294  SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
2295  CurDAG = &DAG;
2296  std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
2297
2298  // First step, lower LLVM code to some DAG.  This DAG may use operations and
2299  // types that are not supported by the target.
2300  BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
2301
2302  // Run the DAG combiner in pre-legalize mode.
2303  DAG.Combine(false);
2304
2305  DEBUG(std::cerr << "Lowered selection DAG:\n");
2306  DEBUG(DAG.dump());
2307
2308  // Second step, hack on the DAG until it only uses operations and types that
2309  // the target supports.
2310  DAG.Legalize();
2311
2312  DEBUG(std::cerr << "Legalized selection DAG:\n");
2313  DEBUG(DAG.dump());
2314
2315  // Run the DAG combiner in post-legalize mode.
2316  DAG.Combine(true);
2317
2318  if (ViewISelDAGs) DAG.viewGraph();
2319
2320  // Third, instruction select all of the operations to machine code, adding the
2321  // code to the MachineBasicBlock.
2322  InstructionSelectBasicBlock(DAG);
2323
2324  DEBUG(std::cerr << "Selected machine code:\n");
2325  DEBUG(BB->dump());
2326
2327  // Next, now that we know what the last MBB the LLVM BB expanded is, update
2328  // PHI nodes in successors.
2329  for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
2330    MachineInstr *PHI = PHINodesToUpdate[i].first;
2331    assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
2332           "This is not a machine PHI node that we are updating!");
2333    PHI->addRegOperand(PHINodesToUpdate[i].second);
2334    PHI->addMachineBasicBlockOperand(BB);
2335  }
2336
2337  // Finally, add the CFG edges from the last selected MBB to the successor
2338  // MBBs.
2339  TerminatorInst *TI = LLVMBB->getTerminator();
2340  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
2341    MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
2342    BB->addSuccessor(Succ0MBB);
2343  }
2344}
2345
2346//===----------------------------------------------------------------------===//
2347/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
2348/// target node in the graph.
2349void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
2350  if (ViewSchedDAGs) DAG.viewGraph();
2351  ScheduleDAG *SL = NULL;
2352
2353  switch (ISHeuristic) {
2354  default: assert(0 && "Unrecognized scheduling heuristic");
2355  case defaultScheduling:
2356    if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
2357      SL = createSimpleDAGScheduler(noScheduling, DAG, BB);
2358    else /* TargetLowering::SchedulingForRegPressure */
2359      SL = createBURRListDAGScheduler(DAG, BB);
2360    break;
2361  case noScheduling:
2362  case simpleScheduling:
2363  case simpleNoItinScheduling:
2364    SL = createSimpleDAGScheduler(ISHeuristic, DAG, BB);
2365    break;
2366  case listSchedulingBURR:
2367    SL = createBURRListDAGScheduler(DAG, BB);
2368  }
2369  BB = SL->Run();
2370  delete SL;
2371}
2372