SelectionDAGISel.cpp revision 16ce0df92717cd1474029d87efe596d000dc2caa
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAGISel class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "llvm/CodeGen/SelectionDAGISel.h"
16#include "llvm/Constants.h"
17#include "llvm/DerivedTypes.h"
18#include "llvm/Function.h"
19#include "llvm/Instructions.h"
20#include "llvm/Intrinsics.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/SelectionDAG.h"
25#include "llvm/CodeGen/SSARegMap.h"
26#include "llvm/Target/TargetData.h"
27#include "llvm/Target/TargetFrameInfo.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Target/TargetLowering.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include <map>
34#include <iostream>
35using namespace llvm;
36
37#ifndef _NDEBUG
38static cl::opt<bool>
39ViewDAGs("view-isel-dags", cl::Hidden,
40         cl::desc("Pop up a window to show isel dags as they are selected"));
41#else
42static const bool ViewDAGS = 0;
43#endif
44
45namespace llvm {
46  //===--------------------------------------------------------------------===//
47  /// FunctionLoweringInfo - This contains information that is global to a
48  /// function that is used when lowering a region of the function.
49  class FunctionLoweringInfo {
50  public:
51    TargetLowering &TLI;
52    Function &Fn;
53    MachineFunction &MF;
54    SSARegMap *RegMap;
55
56    FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
57
58    /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
59    std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
60
61    /// ValueMap - Since we emit code for the function a basic block at a time,
62    /// we must remember which virtual registers hold the values for
63    /// cross-basic-block values.
64    std::map<const Value*, unsigned> ValueMap;
65
66    /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
67    /// the entry block.  This allows the allocas to be efficiently referenced
68    /// anywhere in the function.
69    std::map<const AllocaInst*, int> StaticAllocaMap;
70
71    /// BlockLocalArguments - If any arguments are only used in a single basic
72    /// block, and if the target can access the arguments without side-effects,
73    /// avoid emitting CopyToReg nodes for those arguments.  This map keeps
74    /// track of which arguments are local to each BB.
75    std::multimap<BasicBlock*, std::pair<Argument*,
76                                         unsigned> > BlockLocalArguments;
77
78
79    unsigned MakeReg(MVT::ValueType VT) {
80      return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
81    }
82
83    unsigned CreateRegForValue(const Value *V) {
84      MVT::ValueType VT = TLI.getValueType(V->getType());
85      // The common case is that we will only create one register for this
86      // value.  If we have that case, create and return the virtual register.
87      unsigned NV = TLI.getNumElements(VT);
88      if (NV == 1) {
89        // If we are promoting this value, pick the next largest supported type.
90        return MakeReg(TLI.getTypeToTransformTo(VT));
91      }
92
93      // If this value is represented with multiple target registers, make sure
94      // to create enough consequtive registers of the right (smaller) type.
95      unsigned NT = VT-1;  // Find the type to use.
96      while (TLI.getNumElements((MVT::ValueType)NT) != 1)
97        --NT;
98
99      unsigned R = MakeReg((MVT::ValueType)NT);
100      for (unsigned i = 1; i != NV; ++i)
101        MakeReg((MVT::ValueType)NT);
102      return R;
103    }
104
105    unsigned InitializeRegForValue(const Value *V) {
106      unsigned &R = ValueMap[V];
107      assert(R == 0 && "Already initialized this value register!");
108      return R = CreateRegForValue(V);
109    }
110  };
111}
112
113/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
114/// PHI nodes or outside of the basic block that defines it.
115static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
116  if (isa<PHINode>(I)) return true;
117  BasicBlock *BB = I->getParent();
118  for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
119    if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
120      return true;
121  return false;
122}
123
124FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
125                                           Function &fn, MachineFunction &mf)
126    : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
127
128  // Initialize the mapping of values to registers.  This is only set up for
129  // instruction values that are used outside of the block that defines
130  // them.
131  for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
132       AI != E; ++AI)
133    InitializeRegForValue(AI);
134
135  Function::iterator BB = Fn.begin(), E = Fn.end();
136  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
137    if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
138      if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
139        const Type *Ty = AI->getAllocatedType();
140        uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
141        unsigned Align = TLI.getTargetData().getTypeAlignment(Ty);
142        TySize *= CUI->getValue();   // Get total allocated size.
143        StaticAllocaMap[AI] =
144          MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
145      }
146
147  for (; BB != E; ++BB)
148    for (BasicBlock::iterator I = BB->begin(), e = BB->end(); I != e; ++I)
149      if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
150        if (!isa<AllocaInst>(I) ||
151            !StaticAllocaMap.count(cast<AllocaInst>(I)))
152          InitializeRegForValue(I);
153
154  // Create an initial MachineBasicBlock for each LLVM BasicBlock in F.  This
155  // also creates the initial PHI MachineInstrs, though none of the input
156  // operands are populated.
157  for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
158    MachineBasicBlock *MBB = new MachineBasicBlock(BB);
159    MBBMap[BB] = MBB;
160    MF.getBasicBlockList().push_back(MBB);
161
162    // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
163    // appropriate.
164    PHINode *PN;
165    for (BasicBlock::iterator I = BB->begin();
166         (PN = dyn_cast<PHINode>(I)); ++I)
167      if (!PN->use_empty()) {
168        unsigned NumElements =
169          TLI.getNumElements(TLI.getValueType(PN->getType()));
170        unsigned PHIReg = ValueMap[PN];
171        assert(PHIReg &&"PHI node does not have an assigned virtual register!");
172        for (unsigned i = 0; i != NumElements; ++i)
173          BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
174      }
175  }
176}
177
178
179
180//===----------------------------------------------------------------------===//
181/// SelectionDAGLowering - This is the common target-independent lowering
182/// implementation that is parameterized by a TargetLowering object.
183/// Also, targets can overload any lowering method.
184///
185namespace llvm {
186class SelectionDAGLowering {
187  MachineBasicBlock *CurMBB;
188
189  std::map<const Value*, SDOperand> NodeMap;
190
191  /// PendingLoads - Loads are not emitted to the program immediately.  We bunch
192  /// them up and then emit token factor nodes when possible.  This allows us to
193  /// get simple disambiguation between loads without worrying about alias
194  /// analysis.
195  std::vector<SDOperand> PendingLoads;
196
197public:
198  // TLI - This is information that describes the available target features we
199  // need for lowering.  This indicates when operations are unavailable,
200  // implemented with a libcall, etc.
201  TargetLowering &TLI;
202  SelectionDAG &DAG;
203  const TargetData &TD;
204
205  /// FuncInfo - Information about the function as a whole.
206  ///
207  FunctionLoweringInfo &FuncInfo;
208
209  SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
210                       FunctionLoweringInfo &funcinfo)
211    : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
212      FuncInfo(funcinfo) {
213  }
214
215  /// getRoot - Return the current virtual root of the Selection DAG.
216  ///
217  SDOperand getRoot() {
218    if (PendingLoads.empty())
219      return DAG.getRoot();
220
221    if (PendingLoads.size() == 1) {
222      SDOperand Root = PendingLoads[0];
223      DAG.setRoot(Root);
224      PendingLoads.clear();
225      return Root;
226    }
227
228    // Otherwise, we have to make a token factor node.
229    SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
230    PendingLoads.clear();
231    DAG.setRoot(Root);
232    return Root;
233  }
234
235  void visit(Instruction &I) { visit(I.getOpcode(), I); }
236
237  void visit(unsigned Opcode, User &I) {
238    switch (Opcode) {
239    default: assert(0 && "Unknown instruction type encountered!");
240             abort();
241      // Build the switch statement using the Instruction.def file.
242#define HANDLE_INST(NUM, OPCODE, CLASS) \
243    case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
244#include "llvm/Instruction.def"
245    }
246  }
247
248  void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
249
250
251  SDOperand getIntPtrConstant(uint64_t Val) {
252    return DAG.getConstant(Val, TLI.getPointerTy());
253  }
254
255  SDOperand getValue(const Value *V) {
256    SDOperand &N = NodeMap[V];
257    if (N.Val) return N;
258
259    MVT::ValueType VT = TLI.getValueType(V->getType());
260    if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
261      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
262        visit(CE->getOpcode(), *CE);
263        assert(N.Val && "visit didn't populate the ValueMap!");
264        return N;
265      } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
266        return N = DAG.getGlobalAddress(GV, VT);
267      } else if (isa<ConstantPointerNull>(C)) {
268        return N = DAG.getConstant(0, TLI.getPointerTy());
269      } else if (isa<UndefValue>(C)) {
270        return N = DAG.getNode(ISD::UNDEF, VT);
271      } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
272        return N = DAG.getConstantFP(CFP->getValue(), VT);
273      } else {
274        // Canonicalize all constant ints to be unsigned.
275        return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
276      }
277
278    if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
279      std::map<const AllocaInst*, int>::iterator SI =
280        FuncInfo.StaticAllocaMap.find(AI);
281      if (SI != FuncInfo.StaticAllocaMap.end())
282        return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
283    }
284
285    std::map<const Value*, unsigned>::const_iterator VMI =
286      FuncInfo.ValueMap.find(V);
287    assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
288
289    return N = DAG.getCopyFromReg(VMI->second, VT, DAG.getEntryNode());
290  }
291
292  const SDOperand &setValue(const Value *V, SDOperand NewN) {
293    SDOperand &N = NodeMap[V];
294    assert(N.Val == 0 && "Already set a value for this node!");
295    return N = NewN;
296  }
297
298  // Terminator instructions.
299  void visitRet(ReturnInst &I);
300  void visitBr(BranchInst &I);
301  void visitUnreachable(UnreachableInst &I) { /* noop */ }
302
303  // These all get lowered before this pass.
304  void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
305  void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
306  void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
307
308  //
309  void visitBinary(User &I, unsigned Opcode);
310  void visitAdd(User &I) { visitBinary(I, ISD::ADD); }
311  void visitSub(User &I);
312  void visitMul(User &I) { visitBinary(I, ISD::MUL); }
313  void visitDiv(User &I) {
314    visitBinary(I, I.getType()->isUnsigned() ? ISD::UDIV : ISD::SDIV);
315  }
316  void visitRem(User &I) {
317    visitBinary(I, I.getType()->isUnsigned() ? ISD::UREM : ISD::SREM);
318  }
319  void visitAnd(User &I) { visitBinary(I, ISD::AND); }
320  void visitOr (User &I) { visitBinary(I, ISD::OR); }
321  void visitXor(User &I) { visitBinary(I, ISD::XOR); }
322  void visitShl(User &I) { visitBinary(I, ISD::SHL); }
323  void visitShr(User &I) {
324    visitBinary(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
325  }
326
327  void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
328  void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
329  void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
330  void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
331  void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
332  void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
333  void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
334
335  void visitGetElementPtr(User &I);
336  void visitCast(User &I);
337  void visitSelect(User &I);
338  //
339
340  void visitMalloc(MallocInst &I);
341  void visitFree(FreeInst &I);
342  void visitAlloca(AllocaInst &I);
343  void visitLoad(LoadInst &I);
344  void visitStore(StoreInst &I);
345  void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
346  void visitCall(CallInst &I);
347
348  void visitVAStart(CallInst &I);
349  void visitVANext(VANextInst &I);
350  void visitVAArg(VAArgInst &I);
351  void visitVAEnd(CallInst &I);
352  void visitVACopy(CallInst &I);
353  void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
354
355  void visitMemIntrinsic(CallInst &I, unsigned Op);
356
357  void visitUserOp1(Instruction &I) {
358    assert(0 && "UserOp1 should not exist at instruction selection time!");
359    abort();
360  }
361  void visitUserOp2(Instruction &I) {
362    assert(0 && "UserOp2 should not exist at instruction selection time!");
363    abort();
364  }
365};
366} // end namespace llvm
367
368void SelectionDAGLowering::visitRet(ReturnInst &I) {
369  if (I.getNumOperands() == 0) {
370    DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
371    return;
372  }
373
374  SDOperand Op1 = getValue(I.getOperand(0));
375  MVT::ValueType TmpVT;
376
377  switch (Op1.getValueType()) {
378  default: assert(0 && "Unknown value type!");
379  case MVT::i1:
380  case MVT::i8:
381  case MVT::i16:
382  case MVT::i32:
383    // If this is a machine where 32-bits is legal or expanded, promote to
384    // 32-bits, otherwise, promote to 64-bits.
385    if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
386      TmpVT = TLI.getTypeToTransformTo(MVT::i32);
387    else
388      TmpVT = MVT::i32;
389
390    // Extend integer types to result type.
391    if (I.getOperand(0)->getType()->isSigned())
392      Op1 = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, Op1);
393    else
394      Op1 = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, Op1);
395    break;
396  case MVT::f32:
397    // Extend float to double.
398    Op1 = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Op1);
399    break;
400  case MVT::i64:
401  case MVT::f64:
402    break; // No extension needed!
403  }
404
405  DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot(), Op1));
406}
407
408void SelectionDAGLowering::visitBr(BranchInst &I) {
409  // Update machine-CFG edges.
410  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
411
412  // Figure out which block is immediately after the current one.
413  MachineBasicBlock *NextBlock = 0;
414  MachineFunction::iterator BBI = CurMBB;
415  if (++BBI != CurMBB->getParent()->end())
416    NextBlock = BBI;
417
418  if (I.isUnconditional()) {
419    // If this is not a fall-through branch, emit the branch.
420    if (Succ0MBB != NextBlock)
421      DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
422                              DAG.getBasicBlock(Succ0MBB)));
423  } else {
424    MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
425
426    SDOperand Cond = getValue(I.getCondition());
427    if (Succ1MBB == NextBlock) {
428      // If the condition is false, fall through.  This means we should branch
429      // if the condition is true to Succ #0.
430      DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
431                              Cond, DAG.getBasicBlock(Succ0MBB)));
432    } else if (Succ0MBB == NextBlock) {
433      // If the condition is true, fall through.  This means we should branch if
434      // the condition is false to Succ #1.  Invert the condition first.
435      SDOperand True = DAG.getConstant(1, Cond.getValueType());
436      Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
437      DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
438                              Cond, DAG.getBasicBlock(Succ1MBB)));
439    } else {
440      std::vector<SDOperand> Ops;
441      Ops.push_back(getRoot());
442      Ops.push_back(Cond);
443      Ops.push_back(DAG.getBasicBlock(Succ0MBB));
444      Ops.push_back(DAG.getBasicBlock(Succ1MBB));
445      DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
446    }
447  }
448}
449
450void SelectionDAGLowering::visitSub(User &I) {
451  // -0.0 - X --> fneg
452  if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
453    if (CFP->isExactlyValue(-0.0)) {
454      SDOperand Op2 = getValue(I.getOperand(1));
455      setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
456      return;
457    }
458
459  visitBinary(I, ISD::SUB);
460}
461
462void SelectionDAGLowering::visitBinary(User &I, unsigned Opcode) {
463  SDOperand Op1 = getValue(I.getOperand(0));
464  SDOperand Op2 = getValue(I.getOperand(1));
465
466  if (isa<ShiftInst>(I))
467    Op2 = DAG.getNode(ISD::ZERO_EXTEND, TLI.getShiftAmountTy(), Op2);
468
469  setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
470}
471
472void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
473                                      ISD::CondCode UnsignedOpcode) {
474  SDOperand Op1 = getValue(I.getOperand(0));
475  SDOperand Op2 = getValue(I.getOperand(1));
476  ISD::CondCode Opcode = SignedOpcode;
477  if (I.getOperand(0)->getType()->isUnsigned())
478    Opcode = UnsignedOpcode;
479  setValue(&I, DAG.getSetCC(Opcode, MVT::i1, Op1, Op2));
480}
481
482void SelectionDAGLowering::visitSelect(User &I) {
483  SDOperand Cond     = getValue(I.getOperand(0));
484  SDOperand TrueVal  = getValue(I.getOperand(1));
485  SDOperand FalseVal = getValue(I.getOperand(2));
486  setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
487                           TrueVal, FalseVal));
488}
489
490void SelectionDAGLowering::visitCast(User &I) {
491  SDOperand N = getValue(I.getOperand(0));
492  MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
493  MVT::ValueType DestTy = TLI.getValueType(I.getType());
494
495  if (N.getValueType() == DestTy) {
496    setValue(&I, N);  // noop cast.
497  } else if (DestTy == MVT::i1) {
498    // Cast to bool is a comparison against zero, not truncation to zero.
499    SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
500                                       DAG.getConstantFP(0.0, N.getValueType());
501    setValue(&I, DAG.getSetCC(ISD::SETNE, MVT::i1, N, Zero));
502  } else if (isInteger(SrcTy)) {
503    if (isInteger(DestTy)) {        // Int -> Int cast
504      if (DestTy < SrcTy)   // Truncating cast?
505        setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
506      else if (I.getOperand(0)->getType()->isSigned())
507        setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
508      else
509        setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
510    } else {                        // Int -> FP cast
511      if (I.getOperand(0)->getType()->isSigned())
512        setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
513      else
514        setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
515    }
516  } else {
517    assert(isFloatingPoint(SrcTy) && "Unknown value type!");
518    if (isFloatingPoint(DestTy)) {  // FP -> FP cast
519      if (DestTy < SrcTy)   // Rounding cast?
520        setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
521      else
522        setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
523    } else {                        // FP -> Int cast.
524      if (I.getType()->isSigned())
525        setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
526      else
527        setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
528    }
529  }
530}
531
532void SelectionDAGLowering::visitGetElementPtr(User &I) {
533  SDOperand N = getValue(I.getOperand(0));
534  const Type *Ty = I.getOperand(0)->getType();
535  const Type *UIntPtrTy = TD.getIntPtrType();
536
537  for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
538       OI != E; ++OI) {
539    Value *Idx = *OI;
540    if (const StructType *StTy = dyn_cast<StructType> (Ty)) {
541      unsigned Field = cast<ConstantUInt>(Idx)->getValue();
542      if (Field) {
543        // N = N + Offset
544        uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
545        N = DAG.getNode(ISD::ADD, N.getValueType(), N,
546                        getIntPtrConstant(Offset));
547      }
548      Ty = StTy->getElementType(Field);
549    } else {
550      Ty = cast<SequentialType>(Ty)->getElementType();
551      if (!isa<Constant>(Idx) || !cast<Constant>(Idx)->isNullValue()) {
552        // N = N + Idx * ElementSize;
553        uint64_t ElementSize = TD.getTypeSize(Ty);
554        SDOperand IdxN = getValue(Idx), Scale = getIntPtrConstant(ElementSize);
555
556        // If the index is smaller or larger than intptr_t, truncate or extend
557        // it.
558        if (IdxN.getValueType() < Scale.getValueType()) {
559          if (Idx->getType()->isSigned())
560            IdxN = DAG.getNode(ISD::SIGN_EXTEND, Scale.getValueType(), IdxN);
561          else
562            IdxN = DAG.getNode(ISD::ZERO_EXTEND, Scale.getValueType(), IdxN);
563        } else if (IdxN.getValueType() > Scale.getValueType())
564          IdxN = DAG.getNode(ISD::TRUNCATE, Scale.getValueType(), IdxN);
565
566        IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
567        N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
568      }
569    }
570  }
571  setValue(&I, N);
572}
573
574void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
575  // If this is a fixed sized alloca in the entry block of the function,
576  // allocate it statically on the stack.
577  if (FuncInfo.StaticAllocaMap.count(&I))
578    return;   // getValue will auto-populate this.
579
580  const Type *Ty = I.getAllocatedType();
581  uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
582  unsigned Align = TLI.getTargetData().getTypeAlignment(Ty);
583
584  SDOperand AllocSize = getValue(I.getArraySize());
585  MVT::ValueType IntPtr = TLI.getPointerTy();
586  if (IntPtr < AllocSize.getValueType())
587    AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
588  else if (IntPtr > AllocSize.getValueType())
589    AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
590
591  AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
592                          getIntPtrConstant(TySize));
593
594  // Handle alignment.  If the requested alignment is less than or equal to the
595  // stack alignment, ignore it and round the size of the allocation up to the
596  // stack alignment size.  If the size is greater than the stack alignment, we
597  // note this in the DYNAMIC_STACKALLOC node.
598  unsigned StackAlign =
599    TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
600  if (Align <= StackAlign) {
601    Align = 0;
602    // Add SA-1 to the size.
603    AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
604                            getIntPtrConstant(StackAlign-1));
605    // Mask out the low bits for alignment purposes.
606    AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
607                            getIntPtrConstant(~(uint64_t)(StackAlign-1)));
608  }
609
610  SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, AllocSize.getValueType(),
611                              getRoot(), AllocSize,
612                              getIntPtrConstant(Align));
613  DAG.setRoot(setValue(&I, DSA).getValue(1));
614
615  // Inform the Frame Information that we have just allocated a variable-sized
616  // object.
617  CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
618}
619
620
621void SelectionDAGLowering::visitLoad(LoadInst &I) {
622  SDOperand Ptr = getValue(I.getOperand(0));
623
624  SDOperand Root;
625  if (I.isVolatile())
626    Root = getRoot();
627  else {
628    // Do not serialize non-volatile loads against each other.
629    Root = DAG.getRoot();
630  }
631
632  SDOperand L = DAG.getLoad(TLI.getValueType(I.getType()), Root, Ptr,
633                            DAG.getSrcValue(I.getOperand(0)));
634  setValue(&I, L);
635
636  if (I.isVolatile())
637    DAG.setRoot(L.getValue(1));
638  else
639    PendingLoads.push_back(L.getValue(1));
640}
641
642
643void SelectionDAGLowering::visitStore(StoreInst &I) {
644  Value *SrcV = I.getOperand(0);
645  SDOperand Src = getValue(SrcV);
646  SDOperand Ptr = getValue(I.getOperand(1));
647  DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
648                          DAG.getSrcValue(I.getOperand(1))));
649}
650
651void SelectionDAGLowering::visitCall(CallInst &I) {
652  const char *RenameFn = 0;
653  SDOperand Tmp;
654  if (Function *F = I.getCalledFunction())
655    if (F->isExternal())
656      switch (F->getIntrinsicID()) {
657      case 0:     // Not an LLVM intrinsic.
658        if (F->getName() == "fabs" || F->getName() == "fabsf") {
659          if (I.getNumOperands() == 2 &&   // Basic sanity checks.
660              I.getOperand(1)->getType()->isFloatingPoint() &&
661              I.getType() == I.getOperand(1)->getType()) {
662            Tmp = getValue(I.getOperand(1));
663            setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
664            return;
665          }
666        }
667        else if (F->getName() == "sin" || F->getName() == "sinf") {
668          if (I.getNumOperands() == 2 &&   // Basic sanity checks.
669              I.getOperand(1)->getType()->isFloatingPoint() &&
670              I.getType() == I.getOperand(1)->getType()) {
671            Tmp = getValue(I.getOperand(1));
672            setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
673            return;
674          }
675        }
676        else if (F->getName() == "cos" || F->getName() == "cosf") {
677          if (I.getNumOperands() == 2 &&   // Basic sanity checks.
678              I.getOperand(1)->getType()->isFloatingPoint() &&
679              I.getType() == I.getOperand(1)->getType()) {
680            Tmp = getValue(I.getOperand(1));
681            setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
682            return;
683          }
684        }
685        break;
686      case Intrinsic::vastart:  visitVAStart(I); return;
687      case Intrinsic::vaend:    visitVAEnd(I); return;
688      case Intrinsic::vacopy:   visitVACopy(I); return;
689      case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return;
690      case Intrinsic::frameaddress:  visitFrameReturnAddress(I, true); return;
691
692      case Intrinsic::setjmp:  RenameFn = "setjmp"; break;
693      case Intrinsic::longjmp: RenameFn = "longjmp"; break;
694      case Intrinsic::memcpy:  visitMemIntrinsic(I, ISD::MEMCPY); return;
695      case Intrinsic::memset:  visitMemIntrinsic(I, ISD::MEMSET); return;
696      case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return;
697
698      case Intrinsic::readport:
699      case Intrinsic::readio:
700        Tmp = DAG.getNode(F->getIntrinsicID() == Intrinsic::readport ?
701                          ISD::READPORT : ISD::READIO,
702                          TLI.getValueType(I.getType()), getRoot(),
703                          getValue(I.getOperand(1)));
704        setValue(&I, Tmp);
705        DAG.setRoot(Tmp.getValue(1));
706        return;
707      case Intrinsic::writeport:
708      case Intrinsic::writeio:
709        DAG.setRoot(DAG.getNode(F->getIntrinsicID() == Intrinsic::writeport ?
710                                ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
711                                getRoot(), getValue(I.getOperand(1)),
712                                getValue(I.getOperand(2))));
713        return;
714      case Intrinsic::dbg_stoppoint:
715      case Intrinsic::dbg_region_start:
716      case Intrinsic::dbg_region_end:
717      case Intrinsic::dbg_func_start:
718      case Intrinsic::dbg_declare:
719        if (I.getType() != Type::VoidTy)
720          setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
721        return;
722
723      case Intrinsic::isunordered:
724        setValue(&I, DAG.getSetCC(ISD::SETUO, MVT::i1,getValue(I.getOperand(1)),
725                                  getValue(I.getOperand(2))));
726        return;
727
728      case Intrinsic::sqrt:
729        setValue(&I, DAG.getNode(ISD::FSQRT,
730                                 getValue(I.getOperand(1)).getValueType(),
731                                 getValue(I.getOperand(1))));
732        return;
733
734      case Intrinsic::pcmarker:
735        Tmp = getValue(I.getOperand(1));
736        DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
737        return;
738      case Intrinsic::cttz:
739        setValue(&I, DAG.getNode(ISD::CTTZ,
740                                 getValue(I.getOperand(1)).getValueType(),
741                                 getValue(I.getOperand(1))));
742        return;
743      case Intrinsic::ctlz:
744        setValue(&I, DAG.getNode(ISD::CTLZ,
745                                 getValue(I.getOperand(1)).getValueType(),
746                                 getValue(I.getOperand(1))));
747        return;
748      case Intrinsic::ctpop:
749        setValue(&I, DAG.getNode(ISD::CTPOP,
750                                 getValue(I.getOperand(1)).getValueType(),
751                                 getValue(I.getOperand(1))));
752        return;
753      default:
754        std::cerr << I;
755        assert(0 && "This intrinsic is not implemented yet!");
756        return;
757      }
758
759  SDOperand Callee;
760  if (!RenameFn)
761    Callee = getValue(I.getOperand(0));
762  else
763    Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
764  std::vector<std::pair<SDOperand, const Type*> > Args;
765
766  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
767    Value *Arg = I.getOperand(i);
768    SDOperand ArgNode = getValue(Arg);
769    Args.push_back(std::make_pair(ArgNode, Arg->getType()));
770  }
771
772  const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
773  const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
774
775  std::pair<SDOperand,SDOperand> Result =
776    TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), Callee, Args, DAG);
777  if (I.getType() != Type::VoidTy)
778    setValue(&I, Result.first);
779  DAG.setRoot(Result.second);
780}
781
782void SelectionDAGLowering::visitMalloc(MallocInst &I) {
783  SDOperand Src = getValue(I.getOperand(0));
784
785  MVT::ValueType IntPtr = TLI.getPointerTy();
786
787  if (IntPtr < Src.getValueType())
788    Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
789  else if (IntPtr > Src.getValueType())
790    Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
791
792  // Scale the source by the type size.
793  uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
794  Src = DAG.getNode(ISD::MUL, Src.getValueType(),
795                    Src, getIntPtrConstant(ElementSize));
796
797  std::vector<std::pair<SDOperand, const Type*> > Args;
798  Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
799
800  std::pair<SDOperand,SDOperand> Result =
801    TLI.LowerCallTo(getRoot(), I.getType(), false,
802                    DAG.getExternalSymbol("malloc", IntPtr),
803                    Args, DAG);
804  setValue(&I, Result.first);  // Pointers always fit in registers
805  DAG.setRoot(Result.second);
806}
807
808void SelectionDAGLowering::visitFree(FreeInst &I) {
809  std::vector<std::pair<SDOperand, const Type*> > Args;
810  Args.push_back(std::make_pair(getValue(I.getOperand(0)),
811                                TLI.getTargetData().getIntPtrType()));
812  MVT::ValueType IntPtr = TLI.getPointerTy();
813  std::pair<SDOperand,SDOperand> Result =
814    TLI.LowerCallTo(getRoot(), Type::VoidTy, false,
815                    DAG.getExternalSymbol("free", IntPtr), Args, DAG);
816  DAG.setRoot(Result.second);
817}
818
819std::pair<SDOperand, SDOperand>
820TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
821  // We have no sane default behavior, just emit a useful error message and bail
822  // out.
823  std::cerr << "Variable arguments handling not implemented on this target!\n";
824  abort();
825  return std::make_pair(SDOperand(), SDOperand());
826}
827
828SDOperand TargetLowering::LowerVAEnd(SDOperand Chain, SDOperand L,
829                                     SelectionDAG &DAG) {
830  // Default to a noop.
831  return Chain;
832}
833
834std::pair<SDOperand,SDOperand>
835TargetLowering::LowerVACopy(SDOperand Chain, SDOperand L, SelectionDAG &DAG) {
836  // Default to returning the input list.
837  return std::make_pair(L, Chain);
838}
839
840std::pair<SDOperand,SDOperand>
841TargetLowering::LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
842                               const Type *ArgTy, SelectionDAG &DAG) {
843  // We have no sane default behavior, just emit a useful error message and bail
844  // out.
845  std::cerr << "Variable arguments handling not implemented on this target!\n";
846  abort();
847  return std::make_pair(SDOperand(), SDOperand());
848}
849
850
851void SelectionDAGLowering::visitVAStart(CallInst &I) {
852  std::pair<SDOperand,SDOperand> Result = TLI.LowerVAStart(getRoot(), DAG);
853  setValue(&I, Result.first);
854  DAG.setRoot(Result.second);
855}
856
857void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
858  std::pair<SDOperand,SDOperand> Result =
859    TLI.LowerVAArgNext(false, getRoot(), getValue(I.getOperand(0)),
860                       I.getType(), DAG);
861  setValue(&I, Result.first);
862  DAG.setRoot(Result.second);
863}
864
865void SelectionDAGLowering::visitVANext(VANextInst &I) {
866  std::pair<SDOperand,SDOperand> Result =
867    TLI.LowerVAArgNext(true, getRoot(), getValue(I.getOperand(0)),
868                       I.getArgType(), DAG);
869  setValue(&I, Result.first);
870  DAG.setRoot(Result.second);
871}
872
873void SelectionDAGLowering::visitVAEnd(CallInst &I) {
874  DAG.setRoot(TLI.LowerVAEnd(getRoot(), getValue(I.getOperand(1)), DAG));
875}
876
877void SelectionDAGLowering::visitVACopy(CallInst &I) {
878  std::pair<SDOperand,SDOperand> Result =
879    TLI.LowerVACopy(getRoot(), getValue(I.getOperand(1)), DAG);
880  setValue(&I, Result.first);
881  DAG.setRoot(Result.second);
882}
883
884
885// It is always conservatively correct for llvm.returnaddress and
886// llvm.frameaddress to return 0.
887std::pair<SDOperand, SDOperand>
888TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
889                                        unsigned Depth, SelectionDAG &DAG) {
890  return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
891}
892
893SDOperand TargetLowering::LowerOperation(SDOperand Op) {
894  assert(0 && "LowerOperation not implemented for this target!");
895  abort();
896  return SDOperand();
897}
898
899void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
900  unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
901  std::pair<SDOperand,SDOperand> Result =
902    TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
903  setValue(&I, Result.first);
904  DAG.setRoot(Result.second);
905}
906
907void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
908  std::vector<SDOperand> Ops;
909  Ops.push_back(getRoot());
910  Ops.push_back(getValue(I.getOperand(1)));
911  Ops.push_back(getValue(I.getOperand(2)));
912  Ops.push_back(getValue(I.getOperand(3)));
913  Ops.push_back(getValue(I.getOperand(4)));
914  DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
915}
916
917//===----------------------------------------------------------------------===//
918// SelectionDAGISel code
919//===----------------------------------------------------------------------===//
920
921unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
922  return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
923}
924
925
926
927bool SelectionDAGISel::runOnFunction(Function &Fn) {
928  MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
929  RegMap = MF.getSSARegMap();
930  DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
931
932  FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
933
934  for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
935    SelectBasicBlock(I, MF, FuncInfo);
936
937  return true;
938}
939
940
941SDOperand SelectionDAGISel::
942CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
943  SelectionDAG &DAG = SDL.DAG;
944  SDOperand Op = SDL.getValue(V);
945  assert((Op.getOpcode() != ISD::CopyFromReg ||
946          cast<RegSDNode>(Op)->getReg() != Reg) &&
947         "Copy from a reg to the same reg!");
948  return DAG.getCopyToReg(SDL.getRoot(), Op, Reg);
949}
950
951/// IsOnlyUsedInOneBasicBlock - If the specified argument is only used in a
952/// single basic block, return that block.  Otherwise, return a null pointer.
953static BasicBlock *IsOnlyUsedInOneBasicBlock(Argument *A) {
954  if (A->use_empty()) return 0;
955  BasicBlock *BB = cast<Instruction>(A->use_back())->getParent();
956  for (Argument::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E;
957       ++UI)
958    if (isa<PHINode>(*UI) || cast<Instruction>(*UI)->getParent() != BB)
959      return 0;  // Disagreement among the users?
960
961  // Okay, there is a single BB user.  Only permit this optimization if this is
962  // the entry block, otherwise, we might sink argument loads into loops and
963  // stuff.  Later, when we have global instruction selection, this won't be an
964  // issue clearly.
965  if (BB == BB->getParent()->begin())
966    return BB;
967  return 0;
968}
969
970void SelectionDAGISel::
971LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
972               std::vector<SDOperand> &UnorderedChains) {
973  // If this is the entry block, emit arguments.
974  Function &F = *BB->getParent();
975  FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
976
977  if (BB == &F.front()) {
978    SDOperand OldRoot = SDL.DAG.getRoot();
979
980    std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
981
982    // If there were side effects accessing the argument list, do not do
983    // anything special.
984    if (OldRoot != SDL.DAG.getRoot()) {
985      unsigned a = 0;
986      for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
987           AI != E; ++AI,++a)
988        if (!AI->use_empty()) {
989          SDL.setValue(AI, Args[a]);
990          SDOperand Copy =
991            CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
992          UnorderedChains.push_back(Copy);
993        }
994    } else {
995      // Otherwise, if any argument is only accessed in a single basic block,
996      // emit that argument only to that basic block.
997      unsigned a = 0;
998      for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
999           AI != E; ++AI,++a)
1000        if (!AI->use_empty()) {
1001          if (BasicBlock *BBU = IsOnlyUsedInOneBasicBlock(AI)) {
1002            FuncInfo.BlockLocalArguments.insert(std::make_pair(BBU,
1003                                                      std::make_pair(AI, a)));
1004          } else {
1005            SDL.setValue(AI, Args[a]);
1006            SDOperand Copy =
1007              CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
1008            UnorderedChains.push_back(Copy);
1009          }
1010        }
1011    }
1012  }
1013
1014  // See if there are any block-local arguments that need to be emitted in this
1015  // block.
1016
1017  if (!FuncInfo.BlockLocalArguments.empty()) {
1018    std::multimap<BasicBlock*, std::pair<Argument*, unsigned> >::iterator BLAI =
1019      FuncInfo.BlockLocalArguments.lower_bound(BB);
1020    if (BLAI != FuncInfo.BlockLocalArguments.end() && BLAI->first == BB) {
1021      // Lower the arguments into this block.
1022      std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
1023
1024      // Set up the value mapping for the local arguments.
1025      for (; BLAI != FuncInfo.BlockLocalArguments.end() && BLAI->first == BB;
1026           ++BLAI)
1027        SDL.setValue(BLAI->second.first, Args[BLAI->second.second]);
1028
1029      // Any dead arguments will just be ignored here.
1030    }
1031  }
1032}
1033
1034
1035void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
1036       std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
1037                                    FunctionLoweringInfo &FuncInfo) {
1038  SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
1039
1040  std::vector<SDOperand> UnorderedChains;
1041
1042  // Lower any arguments needed in this block.
1043  LowerArguments(LLVMBB, SDL, UnorderedChains);
1044
1045  BB = FuncInfo.MBBMap[LLVMBB];
1046  SDL.setCurrentBasicBlock(BB);
1047
1048  // Lower all of the non-terminator instructions.
1049  for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
1050       I != E; ++I)
1051    SDL.visit(*I);
1052
1053  // Ensure that all instructions which are used outside of their defining
1054  // blocks are available as virtual registers.
1055  for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
1056    if (!I->use_empty() && !isa<PHINode>(I)) {
1057      std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
1058      if (VMI != FuncInfo.ValueMap.end())
1059        UnorderedChains.push_back(
1060                           CopyValueToVirtualRegister(SDL, I, VMI->second));
1061    }
1062
1063  // Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
1064  // ensure constants are generated when needed.  Remember the virtual registers
1065  // that need to be added to the Machine PHI nodes as input.  We cannot just
1066  // directly add them, because expansion might result in multiple MBB's for one
1067  // BB.  As such, the start of the BB might correspond to a different MBB than
1068  // the end.
1069  //
1070
1071  // Emit constants only once even if used by multiple PHI nodes.
1072  std::map<Constant*, unsigned> ConstantsOut;
1073
1074  // Check successor nodes PHI nodes that expect a constant to be available from
1075  // this block.
1076  TerminatorInst *TI = LLVMBB->getTerminator();
1077  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1078    BasicBlock *SuccBB = TI->getSuccessor(succ);
1079    MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
1080    PHINode *PN;
1081
1082    // At this point we know that there is a 1-1 correspondence between LLVM PHI
1083    // nodes and Machine PHI nodes, but the incoming operands have not been
1084    // emitted yet.
1085    for (BasicBlock::iterator I = SuccBB->begin();
1086         (PN = dyn_cast<PHINode>(I)); ++I)
1087      if (!PN->use_empty()) {
1088        unsigned Reg;
1089        Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1090        if (Constant *C = dyn_cast<Constant>(PHIOp)) {
1091          unsigned &RegOut = ConstantsOut[C];
1092          if (RegOut == 0) {
1093            RegOut = FuncInfo.CreateRegForValue(C);
1094            UnorderedChains.push_back(
1095                             CopyValueToVirtualRegister(SDL, C, RegOut));
1096          }
1097          Reg = RegOut;
1098        } else {
1099          Reg = FuncInfo.ValueMap[PHIOp];
1100          if (Reg == 0) {
1101            assert(isa<AllocaInst>(PHIOp) &&
1102                   FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
1103                   "Didn't codegen value into a register!??");
1104            Reg = FuncInfo.CreateRegForValue(PHIOp);
1105            UnorderedChains.push_back(
1106                             CopyValueToVirtualRegister(SDL, PHIOp, Reg));
1107          }
1108        }
1109
1110        // Remember that this register needs to added to the machine PHI node as
1111        // the input for this MBB.
1112        unsigned NumElements =
1113          TLI.getNumElements(TLI.getValueType(PN->getType()));
1114        for (unsigned i = 0, e = NumElements; i != e; ++i)
1115          PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
1116      }
1117  }
1118  ConstantsOut.clear();
1119
1120  // Turn all of the unordered chains into one factored node.
1121  if (!UnorderedChains.empty()) {
1122    UnorderedChains.push_back(SDL.getRoot());
1123    DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
1124  }
1125
1126  // Lower the terminator after the copies are emitted.
1127  SDL.visit(*LLVMBB->getTerminator());
1128
1129  // Make sure the root of the DAG is up-to-date.
1130  DAG.setRoot(SDL.getRoot());
1131}
1132
1133void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
1134                                        FunctionLoweringInfo &FuncInfo) {
1135  SelectionDAG DAG(TLI, MF);
1136  CurDAG = &DAG;
1137  std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
1138
1139  // First step, lower LLVM code to some DAG.  This DAG may use operations and
1140  // types that are not supported by the target.
1141  BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
1142
1143  DEBUG(std::cerr << "Lowered selection DAG:\n");
1144  DEBUG(DAG.dump());
1145
1146  // Second step, hack on the DAG until it only uses operations and types that
1147  // the target supports.
1148  DAG.Legalize();
1149
1150  DEBUG(std::cerr << "Legalized selection DAG:\n");
1151  DEBUG(DAG.dump());
1152
1153  // Third, instruction select all of the operations to machine code, adding the
1154  // code to the MachineBasicBlock.
1155  InstructionSelectBasicBlock(DAG);
1156
1157  if (ViewDAGs) DAG.viewGraph();
1158
1159  DEBUG(std::cerr << "Selected machine code:\n");
1160  DEBUG(BB->dump());
1161
1162  // Next, now that we know what the last MBB the LLVM BB expanded is, update
1163  // PHI nodes in successors.
1164  for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
1165    MachineInstr *PHI = PHINodesToUpdate[i].first;
1166    assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
1167           "This is not a machine PHI node that we are updating!");
1168    PHI->addRegOperand(PHINodesToUpdate[i].second);
1169    PHI->addMachineBasicBlockOperand(BB);
1170  }
1171
1172  // Finally, add the CFG edges from the last selected MBB to the successor
1173  // MBBs.
1174  TerminatorInst *TI = LLVMBB->getTerminator();
1175  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1176    MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
1177    BB->addSuccessor(Succ0MBB);
1178  }
1179}
1180