SelectionDAGBuilder.cpp revision 0777e927214c61c5d681e5b7dd5d00665c81133a
1//===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements routines for translating from LLVM IR into SelectionDAG IR.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "SelectionDAGBuilder.h"
16#include "FunctionLoweringInfo.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Constants.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/GlobalVariable.h"
25#include "llvm/InlineAsm.h"
26#include "llvm/Instructions.h"
27#include "llvm/Intrinsics.h"
28#include "llvm/IntrinsicInst.h"
29#include "llvm/LLVMContext.h"
30#include "llvm/Module.h"
31#include "llvm/CodeGen/FastISel.h"
32#include "llvm/CodeGen/GCStrategy.h"
33#include "llvm/CodeGen/GCMetadata.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineFrameInfo.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/MachineModuleInfo.h"
39#include "llvm/CodeGen/MachineRegisterInfo.h"
40#include "llvm/CodeGen/PseudoSourceValue.h"
41#include "llvm/CodeGen/SelectionDAG.h"
42#include "llvm/CodeGen/DwarfWriter.h"
43#include "llvm/Analysis/DebugInfo.h"
44#include "llvm/Target/TargetRegisterInfo.h"
45#include "llvm/Target/TargetData.h"
46#include "llvm/Target/TargetFrameInfo.h"
47#include "llvm/Target/TargetInstrInfo.h"
48#include "llvm/Target/TargetIntrinsicInfo.h"
49#include "llvm/Target/TargetLowering.h"
50#include "llvm/Target/TargetOptions.h"
51#include "llvm/Support/Compiler.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MathExtras.h"
56#include "llvm/Support/raw_ostream.h"
57#include <algorithm>
58using namespace llvm;
59
60/// LimitFloatPrecision - Generate low-precision inline sequences for
61/// some float libcalls (6, 8 or 12 bits).
62static unsigned LimitFloatPrecision;
63
64static cl::opt<unsigned, true>
65LimitFPPrecision("limit-float-precision",
66                 cl::desc("Generate low-precision inline sequences "
67                          "for some float libcalls"),
68                 cl::location(LimitFloatPrecision),
69                 cl::init(0));
70
71namespace {
72  /// RegsForValue - This struct represents the registers (physical or virtual)
73  /// that a particular set of values is assigned, and the type information about
74  /// the value. The most common situation is to represent one value at a time,
75  /// but struct or array values are handled element-wise as multiple values.
76  /// The splitting of aggregates is performed recursively, so that we never
77  /// have aggregate-typed registers. The values at this point do not necessarily
78  /// have legal types, so each value may require one or more registers of some
79  /// legal type.
80  ///
81  struct RegsForValue {
82    /// TLI - The TargetLowering object.
83    ///
84    const TargetLowering *TLI;
85
86    /// ValueVTs - The value types of the values, which may not be legal, and
87    /// may need be promoted or synthesized from one or more registers.
88    ///
89    SmallVector<EVT, 4> ValueVTs;
90
91    /// RegVTs - The value types of the registers. This is the same size as
92    /// ValueVTs and it records, for each value, what the type of the assigned
93    /// register or registers are. (Individual values are never synthesized
94    /// from more than one type of register.)
95    ///
96    /// With virtual registers, the contents of RegVTs is redundant with TLI's
97    /// getRegisterType member function, however when with physical registers
98    /// it is necessary to have a separate record of the types.
99    ///
100    SmallVector<EVT, 4> RegVTs;
101
102    /// Regs - This list holds the registers assigned to the values.
103    /// Each legal or promoted value requires one register, and each
104    /// expanded value requires multiple registers.
105    ///
106    SmallVector<unsigned, 4> Regs;
107
108    RegsForValue() : TLI(0) {}
109
110    RegsForValue(const TargetLowering &tli,
111                 const SmallVector<unsigned, 4> &regs,
112                 EVT regvt, EVT valuevt)
113      : TLI(&tli),  ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
114    RegsForValue(const TargetLowering &tli,
115                 const SmallVector<unsigned, 4> &regs,
116                 const SmallVector<EVT, 4> &regvts,
117                 const SmallVector<EVT, 4> &valuevts)
118      : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
119    RegsForValue(LLVMContext &Context, const TargetLowering &tli,
120                 unsigned Reg, const Type *Ty) : TLI(&tli) {
121      ComputeValueVTs(tli, Ty, ValueVTs);
122
123      for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
124        EVT ValueVT = ValueVTs[Value];
125        unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
126        EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
127        for (unsigned i = 0; i != NumRegs; ++i)
128          Regs.push_back(Reg + i);
129        RegVTs.push_back(RegisterVT);
130        Reg += NumRegs;
131      }
132    }
133
134    /// append - Add the specified values to this one.
135    void append(const RegsForValue &RHS) {
136      TLI = RHS.TLI;
137      ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
138      RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
139      Regs.append(RHS.Regs.begin(), RHS.Regs.end());
140    }
141
142
143    /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
144    /// this value and returns the result as a ValueVTs value.  This uses
145    /// Chain/Flag as the input and updates them for the output Chain/Flag.
146    /// If the Flag pointer is NULL, no flag is used.
147    SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
148                              SDValue &Chain, SDValue *Flag) const;
149
150    /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
151    /// specified value into the registers specified by this object.  This uses
152    /// Chain/Flag as the input and updates them for the output Chain/Flag.
153    /// If the Flag pointer is NULL, no flag is used.
154    void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
155                       SDValue &Chain, SDValue *Flag) const;
156
157    /// AddInlineAsmOperands - Add this value to the specified inlineasm node
158    /// operand list.  This adds the code marker, matching input operand index
159    /// (if applicable), and includes the number of values added into it.
160    void AddInlineAsmOperands(unsigned Code,
161                              bool HasMatching, unsigned MatchingIdx,
162                              SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
163  };
164}
165
166/// getCopyFromParts - Create a value that contains the specified legal parts
167/// combined into the value they represent.  If the parts combine to a type
168/// larger then ValueVT then AssertOp can be used to specify whether the extra
169/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
170/// (ISD::AssertSext).
171static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
172                                const SDValue *Parts,
173                                unsigned NumParts, EVT PartVT, EVT ValueVT,
174                                ISD::NodeType AssertOp = ISD::DELETED_NODE) {
175  assert(NumParts > 0 && "No parts to assemble!");
176  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
177  SDValue Val = Parts[0];
178
179  if (NumParts > 1) {
180    // Assemble the value from multiple parts.
181    if (!ValueVT.isVector() && ValueVT.isInteger()) {
182      unsigned PartBits = PartVT.getSizeInBits();
183      unsigned ValueBits = ValueVT.getSizeInBits();
184
185      // Assemble the power of 2 part.
186      unsigned RoundParts = NumParts & (NumParts - 1) ?
187        1 << Log2_32(NumParts) : NumParts;
188      unsigned RoundBits = PartBits * RoundParts;
189      EVT RoundVT = RoundBits == ValueBits ?
190        ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
191      SDValue Lo, Hi;
192
193      EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
194
195      if (RoundParts > 2) {
196        Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
197        Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
198                              PartVT, HalfVT);
199      } else {
200        Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
201        Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
202      }
203      if (TLI.isBigEndian())
204        std::swap(Lo, Hi);
205      Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
206
207      if (RoundParts < NumParts) {
208        // Assemble the trailing non-power-of-2 part.
209        unsigned OddParts = NumParts - RoundParts;
210        EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
211        Hi = getCopyFromParts(DAG, dl,
212                              Parts+RoundParts, OddParts, PartVT, OddVT);
213
214        // Combine the round and odd parts.
215        Lo = Val;
216        if (TLI.isBigEndian())
217          std::swap(Lo, Hi);
218        EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
219        Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
220        Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
221                         DAG.getConstant(Lo.getValueType().getSizeInBits(),
222                                         TLI.getPointerTy()));
223        Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
224        Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
225      }
226    } else if (ValueVT.isVector()) {
227      // Handle a multi-element vector.
228      EVT IntermediateVT, RegisterVT;
229      unsigned NumIntermediates;
230      unsigned NumRegs =
231        TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
232                                   NumIntermediates, RegisterVT);
233      assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
234      NumParts = NumRegs; // Silence a compiler warning.
235      assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
236      assert(RegisterVT == Parts[0].getValueType() &&
237             "Part type doesn't match part!");
238
239      // Assemble the parts into intermediate operands.
240      SmallVector<SDValue, 8> Ops(NumIntermediates);
241      if (NumIntermediates == NumParts) {
242        // If the register was not expanded, truncate or copy the value,
243        // as appropriate.
244        for (unsigned i = 0; i != NumParts; ++i)
245          Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
246                                    PartVT, IntermediateVT);
247      } else if (NumParts > 0) {
248        // If the intermediate type was expanded, build the intermediate operands
249        // from the parts.
250        assert(NumParts % NumIntermediates == 0 &&
251               "Must expand into a divisible number of parts!");
252        unsigned Factor = NumParts / NumIntermediates;
253        for (unsigned i = 0; i != NumIntermediates; ++i)
254          Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
255                                    PartVT, IntermediateVT);
256      }
257
258      // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
259      // operands.
260      Val = DAG.getNode(IntermediateVT.isVector() ?
261                        ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
262                        ValueVT, &Ops[0], NumIntermediates);
263    } else if (PartVT.isFloatingPoint()) {
264      // FP split into multiple FP parts (for ppcf128)
265      assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
266             "Unexpected split");
267      SDValue Lo, Hi;
268      Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
269      Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
270      if (TLI.isBigEndian())
271        std::swap(Lo, Hi);
272      Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
273    } else {
274      // FP split into integer parts (soft fp)
275      assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
276             !PartVT.isVector() && "Unexpected split");
277      EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
278      Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
279    }
280  }
281
282  // There is now one part, held in Val.  Correct it to match ValueVT.
283  PartVT = Val.getValueType();
284
285  if (PartVT == ValueVT)
286    return Val;
287
288  if (PartVT.isVector()) {
289    assert(ValueVT.isVector() && "Unknown vector conversion!");
290    return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
291  }
292
293  if (ValueVT.isVector()) {
294    assert(ValueVT.getVectorElementType() == PartVT &&
295           ValueVT.getVectorNumElements() == 1 &&
296           "Only trivial scalar-to-vector conversions should get here!");
297    return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
298  }
299
300  if (PartVT.isInteger() &&
301      ValueVT.isInteger()) {
302    if (ValueVT.bitsLT(PartVT)) {
303      // For a truncate, see if we have any information to
304      // indicate whether the truncated bits will always be
305      // zero or sign-extension.
306      if (AssertOp != ISD::DELETED_NODE)
307        Val = DAG.getNode(AssertOp, dl, PartVT, Val,
308                          DAG.getValueType(ValueVT));
309      return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
310    } else {
311      return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
312    }
313  }
314
315  if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
316    if (ValueVT.bitsLT(Val.getValueType()))
317      // FP_ROUND's are always exact here.
318      return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
319                         DAG.getIntPtrConstant(1));
320    return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
321  }
322
323  if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
324    return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
325
326  llvm_unreachable("Unknown mismatch!");
327  return SDValue();
328}
329
330/// getCopyToParts - Create a series of nodes that contain the specified value
331/// split into legal parts.  If the parts contain more bits than Val, then, for
332/// integers, ExtendKind can be used to specify how to generate the extra bits.
333static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
334                           SDValue *Parts, unsigned NumParts, EVT PartVT,
335                           ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
336  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
337  EVT PtrVT = TLI.getPointerTy();
338  EVT ValueVT = Val.getValueType();
339  unsigned PartBits = PartVT.getSizeInBits();
340  unsigned OrigNumParts = NumParts;
341  assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
342
343  if (!NumParts)
344    return;
345
346  if (!ValueVT.isVector()) {
347    if (PartVT == ValueVT) {
348      assert(NumParts == 1 && "No-op copy with multiple parts!");
349      Parts[0] = Val;
350      return;
351    }
352
353    if (NumParts * PartBits > ValueVT.getSizeInBits()) {
354      // If the parts cover more bits than the value has, promote the value.
355      if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
356        assert(NumParts == 1 && "Do not know what to promote to!");
357        Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
358      } else if (PartVT.isInteger() && ValueVT.isInteger()) {
359        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
360        Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
361      } else {
362        llvm_unreachable("Unknown mismatch!");
363      }
364    } else if (PartBits == ValueVT.getSizeInBits()) {
365      // Different types of the same size.
366      assert(NumParts == 1 && PartVT != ValueVT);
367      Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
368    } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
369      // If the parts cover less bits than value has, truncate the value.
370      if (PartVT.isInteger() && ValueVT.isInteger()) {
371        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
372        Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
373      } else {
374        llvm_unreachable("Unknown mismatch!");
375      }
376    }
377
378    // The value may have changed - recompute ValueVT.
379    ValueVT = Val.getValueType();
380    assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
381           "Failed to tile the value with PartVT!");
382
383    if (NumParts == 1) {
384      assert(PartVT == ValueVT && "Type conversion failed!");
385      Parts[0] = Val;
386      return;
387    }
388
389    // Expand the value into multiple parts.
390    if (NumParts & (NumParts - 1)) {
391      // The number of parts is not a power of 2.  Split off and copy the tail.
392      assert(PartVT.isInteger() && ValueVT.isInteger() &&
393             "Do not know what to expand to!");
394      unsigned RoundParts = 1 << Log2_32(NumParts);
395      unsigned RoundBits = RoundParts * PartBits;
396      unsigned OddParts = NumParts - RoundParts;
397      SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
398                                   DAG.getConstant(RoundBits,
399                                                   TLI.getPointerTy()));
400      getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
401      if (TLI.isBigEndian())
402        // The odd parts were reversed by getCopyToParts - unreverse them.
403        std::reverse(Parts + RoundParts, Parts + NumParts);
404      NumParts = RoundParts;
405      ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
406      Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
407    }
408
409    // The number of parts is a power of 2.  Repeatedly bisect the value using
410    // EXTRACT_ELEMENT.
411    Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
412                           EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
413                           Val);
414    for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
415      for (unsigned i = 0; i < NumParts; i += StepSize) {
416        unsigned ThisBits = StepSize * PartBits / 2;
417        EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
418        SDValue &Part0 = Parts[i];
419        SDValue &Part1 = Parts[i+StepSize/2];
420
421        Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
422                            ThisVT, Part0,
423                            DAG.getConstant(1, PtrVT));
424        Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
425                            ThisVT, Part0,
426                            DAG.getConstant(0, PtrVT));
427
428        if (ThisBits == PartBits && ThisVT != PartVT) {
429          Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
430                                                PartVT, Part0);
431          Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
432                                                PartVT, Part1);
433        }
434      }
435    }
436
437    if (TLI.isBigEndian())
438      std::reverse(Parts, Parts + OrigNumParts);
439
440    return;
441  }
442
443  // Vector ValueVT.
444  if (NumParts == 1) {
445    if (PartVT != ValueVT) {
446      if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
447        Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
448      } else {
449        assert(ValueVT.getVectorElementType() == PartVT &&
450               ValueVT.getVectorNumElements() == 1 &&
451               "Only trivial vector-to-scalar conversions should get here!");
452        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
453                          PartVT, Val,
454                          DAG.getConstant(0, PtrVT));
455      }
456    }
457
458    Parts[0] = Val;
459    return;
460  }
461
462  // Handle a multi-element vector.
463  EVT IntermediateVT, RegisterVT;
464  unsigned NumIntermediates;
465  unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
466                              IntermediateVT, NumIntermediates, RegisterVT);
467  unsigned NumElements = ValueVT.getVectorNumElements();
468
469  assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
470  NumParts = NumRegs; // Silence a compiler warning.
471  assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
472
473  // Split the vector into intermediate operands.
474  SmallVector<SDValue, 8> Ops(NumIntermediates);
475  for (unsigned i = 0; i != NumIntermediates; ++i)
476    if (IntermediateVT.isVector())
477      Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
478                           IntermediateVT, Val,
479                           DAG.getConstant(i * (NumElements / NumIntermediates),
480                                           PtrVT));
481    else
482      Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
483                           IntermediateVT, Val,
484                           DAG.getConstant(i, PtrVT));
485
486  // Split the intermediate operands into legal parts.
487  if (NumParts == NumIntermediates) {
488    // If the register was not expanded, promote or copy the value,
489    // as appropriate.
490    for (unsigned i = 0; i != NumParts; ++i)
491      getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
492  } else if (NumParts > 0) {
493    // If the intermediate type was expanded, split each the value into
494    // legal parts.
495    assert(NumParts % NumIntermediates == 0 &&
496           "Must expand into a divisible number of parts!");
497    unsigned Factor = NumParts / NumIntermediates;
498    for (unsigned i = 0; i != NumIntermediates; ++i)
499      getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
500  }
501}
502
503
504void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
505  AA = &aa;
506  GFI = gfi;
507  TD = DAG.getTarget().getTargetData();
508}
509
510/// clear - Clear out the curret SelectionDAG and the associated
511/// state and prepare this SelectionDAGBuilder object to be used
512/// for a new block. This doesn't clear out information about
513/// additional blocks that are needed to complete switch lowering
514/// or PHI node updating; that information is cleared out as it is
515/// consumed.
516void SelectionDAGBuilder::clear() {
517  NodeMap.clear();
518  PendingLoads.clear();
519  PendingExports.clear();
520  EdgeMapping.clear();
521  DAG.clear();
522  CurDebugLoc = DebugLoc::getUnknownLoc();
523  HasTailCall = false;
524}
525
526/// getRoot - Return the current virtual root of the Selection DAG,
527/// flushing any PendingLoad items. This must be done before emitting
528/// a store or any other node that may need to be ordered after any
529/// prior load instructions.
530///
531SDValue SelectionDAGBuilder::getRoot() {
532  if (PendingLoads.empty())
533    return DAG.getRoot();
534
535  if (PendingLoads.size() == 1) {
536    SDValue Root = PendingLoads[0];
537    DAG.setRoot(Root);
538    PendingLoads.clear();
539    return Root;
540  }
541
542  // Otherwise, we have to make a token factor node.
543  SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
544                               &PendingLoads[0], PendingLoads.size());
545  PendingLoads.clear();
546  DAG.setRoot(Root);
547  return Root;
548}
549
550/// getControlRoot - Similar to getRoot, but instead of flushing all the
551/// PendingLoad items, flush all the PendingExports items. It is necessary
552/// to do this before emitting a terminator instruction.
553///
554SDValue SelectionDAGBuilder::getControlRoot() {
555  SDValue Root = DAG.getRoot();
556
557  if (PendingExports.empty())
558    return Root;
559
560  // Turn all of the CopyToReg chains into one factored node.
561  if (Root.getOpcode() != ISD::EntryToken) {
562    unsigned i = 0, e = PendingExports.size();
563    for (; i != e; ++i) {
564      assert(PendingExports[i].getNode()->getNumOperands() > 1);
565      if (PendingExports[i].getNode()->getOperand(0) == Root)
566        break;  // Don't add the root if we already indirectly depend on it.
567    }
568
569    if (i == e)
570      PendingExports.push_back(Root);
571  }
572
573  Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
574                     &PendingExports[0],
575                     PendingExports.size());
576  PendingExports.clear();
577  DAG.setRoot(Root);
578  return Root;
579}
580
581void SelectionDAGBuilder::visit(Instruction &I) {
582  visit(I.getOpcode(), I);
583}
584
585void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
586  // We're processing a new instruction.
587  ++SDNodeOrder;
588
589  // Note: this doesn't use InstVisitor, because it has to work with
590  // ConstantExpr's in addition to instructions.
591  switch (Opcode) {
592  default: llvm_unreachable("Unknown instruction type encountered!");
593    // Build the switch statement using the Instruction.def file.
594#define HANDLE_INST(NUM, OPCODE, CLASS) \
595  case Instruction::OPCODE: return visit##OPCODE((CLASS&)I);
596#include "llvm/Instruction.def"
597  }
598}
599
600SDValue SelectionDAGBuilder::getValue(const Value *V) {
601  SDValue &N = NodeMap[V];
602  if (N.getNode()) return N;
603
604  if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
605    EVT VT = TLI.getValueType(V->getType(), true);
606
607    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
608      return N = DAG.getConstant(*CI, VT);
609
610    if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
611      return N = DAG.getGlobalAddress(GV, VT);
612
613    if (isa<ConstantPointerNull>(C))
614      return N = DAG.getConstant(0, TLI.getPointerTy());
615
616    if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
617      return N = DAG.getConstantFP(*CFP, VT);
618
619    if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
620      return N = DAG.getUNDEF(VT);
621
622    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
623      visit(CE->getOpcode(), *CE);
624      SDValue N1 = NodeMap[V];
625      assert(N1.getNode() && "visit didn't populate the ValueMap!");
626      return N1;
627    }
628
629    if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
630      SmallVector<SDValue, 4> Constants;
631      for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
632           OI != OE; ++OI) {
633        SDNode *Val = getValue(*OI).getNode();
634        // If the operand is an empty aggregate, there are no values.
635        if (!Val) continue;
636        // Add each leaf value from the operand to the Constants list
637        // to form a flattened list of all the values.
638        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
639          Constants.push_back(SDValue(Val, i));
640      }
641      return DAG.getMergeValues(&Constants[0], Constants.size(),
642                                getCurDebugLoc());
643    }
644
645    if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
646      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
647             "Unknown struct or array constant!");
648
649      SmallVector<EVT, 4> ValueVTs;
650      ComputeValueVTs(TLI, C->getType(), ValueVTs);
651      unsigned NumElts = ValueVTs.size();
652      if (NumElts == 0)
653        return SDValue(); // empty struct
654      SmallVector<SDValue, 4> Constants(NumElts);
655      for (unsigned i = 0; i != NumElts; ++i) {
656        EVT EltVT = ValueVTs[i];
657        if (isa<UndefValue>(C))
658          Constants[i] = DAG.getUNDEF(EltVT);
659        else if (EltVT.isFloatingPoint())
660          Constants[i] = DAG.getConstantFP(0, EltVT);
661        else
662          Constants[i] = DAG.getConstant(0, EltVT);
663      }
664      return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
665    }
666
667    if (BlockAddress *BA = dyn_cast<BlockAddress>(C))
668      return DAG.getBlockAddress(BA, VT);
669
670    const VectorType *VecTy = cast<VectorType>(V->getType());
671    unsigned NumElements = VecTy->getNumElements();
672
673    // Now that we know the number and type of the elements, get that number of
674    // elements into the Ops array based on what kind of constant it is.
675    SmallVector<SDValue, 16> Ops;
676    if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
677      for (unsigned i = 0; i != NumElements; ++i)
678        Ops.push_back(getValue(CP->getOperand(i)));
679    } else {
680      assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
681      EVT EltVT = TLI.getValueType(VecTy->getElementType());
682
683      SDValue Op;
684      if (EltVT.isFloatingPoint())
685        Op = DAG.getConstantFP(0, EltVT);
686      else
687        Op = DAG.getConstant(0, EltVT);
688      Ops.assign(NumElements, Op);
689    }
690
691    // Create a BUILD_VECTOR node.
692    return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
693                                    VT, &Ops[0], Ops.size());
694  }
695
696  // If this is a static alloca, generate it as the frameindex instead of
697  // computation.
698  if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
699    DenseMap<const AllocaInst*, int>::iterator SI =
700      FuncInfo.StaticAllocaMap.find(AI);
701    if (SI != FuncInfo.StaticAllocaMap.end())
702      return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
703  }
704
705  unsigned InReg = FuncInfo.ValueMap[V];
706  assert(InReg && "Value not in map!");
707
708  RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
709  SDValue Chain = DAG.getEntryNode();
710  return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
711}
712
713/// Get the EVTs and ArgFlags collections that represent the return type
714/// of the given function.  This does not require a DAG or a return value, and
715/// is suitable for use before any DAGs for the function are constructed.
716static void getReturnInfo(const Type* ReturnType,
717                   Attributes attr, SmallVectorImpl<EVT> &OutVTs,
718                   SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
719                   TargetLowering &TLI,
720                   SmallVectorImpl<uint64_t> *Offsets = 0) {
721  SmallVector<EVT, 4> ValueVTs;
722  ComputeValueVTs(TLI, ReturnType, ValueVTs, Offsets);
723  unsigned NumValues = ValueVTs.size();
724  if ( NumValues == 0 ) return;
725
726  for (unsigned j = 0, f = NumValues; j != f; ++j) {
727    EVT VT = ValueVTs[j];
728    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
729
730    if (attr & Attribute::SExt)
731      ExtendKind = ISD::SIGN_EXTEND;
732    else if (attr & Attribute::ZExt)
733      ExtendKind = ISD::ZERO_EXTEND;
734
735    // FIXME: C calling convention requires the return type to be promoted to
736    // at least 32-bit. But this is not necessary for non-C calling
737    // conventions. The frontend should mark functions whose return values
738    // require promoting with signext or zeroext attributes.
739    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
740      EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
741      if (VT.bitsLT(MinVT))
742        VT = MinVT;
743    }
744
745    unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
746    EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
747    // 'inreg' on function refers to return value
748    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
749    if (attr & Attribute::InReg)
750      Flags.setInReg();
751
752    // Propagate extension type if any
753    if (attr & Attribute::SExt)
754      Flags.setSExt();
755    else if (attr & Attribute::ZExt)
756      Flags.setZExt();
757
758    for (unsigned i = 0; i < NumParts; ++i) {
759      OutVTs.push_back(PartVT);
760      OutFlags.push_back(Flags);
761    }
762  }
763}
764
765void SelectionDAGBuilder::visitRet(ReturnInst &I) {
766  SDValue Chain = getControlRoot();
767  SmallVector<ISD::OutputArg, 8> Outs;
768  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
769
770  if (!FLI.CanLowerReturn) {
771    unsigned DemoteReg = FLI.DemoteRegister;
772    const Function *F = I.getParent()->getParent();
773
774    // Emit a store of the return value through the virtual register.
775    // Leave Outs empty so that LowerReturn won't try to load return
776    // registers the usual way.
777    SmallVector<EVT, 1> PtrValueVTs;
778    ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
779                    PtrValueVTs);
780
781    SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
782    SDValue RetOp = getValue(I.getOperand(0));
783
784    SmallVector<EVT, 4> ValueVTs;
785    SmallVector<uint64_t, 4> Offsets;
786    ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
787    unsigned NumValues = ValueVTs.size();
788
789    SmallVector<SDValue, 4> Chains(NumValues);
790    EVT PtrVT = PtrValueVTs[0];
791    for (unsigned i = 0; i != NumValues; ++i)
792      Chains[i] = DAG.getStore(Chain, getCurDebugLoc(),
793                  SDValue(RetOp.getNode(), RetOp.getResNo() + i),
794                  DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr,
795                  DAG.getConstant(Offsets[i], PtrVT)),
796                  NULL, Offsets[i], false, 0);
797    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
798                        MVT::Other, &Chains[0], NumValues);
799  }
800  else {
801    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
802      SmallVector<EVT, 4> ValueVTs;
803      ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
804      unsigned NumValues = ValueVTs.size();
805      if (NumValues == 0) continue;
806
807      SDValue RetOp = getValue(I.getOperand(i));
808      for (unsigned j = 0, f = NumValues; j != f; ++j) {
809        EVT VT = ValueVTs[j];
810
811        ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
812
813        const Function *F = I.getParent()->getParent();
814        if (F->paramHasAttr(0, Attribute::SExt))
815          ExtendKind = ISD::SIGN_EXTEND;
816        else if (F->paramHasAttr(0, Attribute::ZExt))
817          ExtendKind = ISD::ZERO_EXTEND;
818
819        // FIXME: C calling convention requires the return type to be promoted to
820        // at least 32-bit. But this is not necessary for non-C calling
821        // conventions. The frontend should mark functions whose return values
822        // require promoting with signext or zeroext attributes.
823        if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
824          EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
825          if (VT.bitsLT(MinVT))
826            VT = MinVT;
827        }
828
829        unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
830        EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
831        SmallVector<SDValue, 4> Parts(NumParts);
832        getCopyToParts(DAG, getCurDebugLoc(),
833                       SDValue(RetOp.getNode(), RetOp.getResNo() + j),
834                       &Parts[0], NumParts, PartVT, ExtendKind);
835
836        // 'inreg' on function refers to return value
837        ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
838        if (F->paramHasAttr(0, Attribute::InReg))
839          Flags.setInReg();
840
841        // Propagate extension type if any
842        if (F->paramHasAttr(0, Attribute::SExt))
843          Flags.setSExt();
844        else if (F->paramHasAttr(0, Attribute::ZExt))
845          Flags.setZExt();
846
847        for (unsigned i = 0; i < NumParts; ++i)
848          Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
849      }
850    }
851  }
852
853  bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
854  CallingConv::ID CallConv =
855    DAG.getMachineFunction().getFunction()->getCallingConv();
856  Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
857                          Outs, getCurDebugLoc(), DAG);
858
859  // Verify that the target's LowerReturn behaved as expected.
860  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
861         "LowerReturn didn't return a valid chain!");
862
863  // Update the DAG with the new chain value resulting from return lowering.
864  DAG.setRoot(Chain);
865}
866
867/// CopyToExportRegsIfNeeded - If the given value has virtual registers
868/// created for it, emit nodes to copy the value into the virtual
869/// registers.
870void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
871  if (!V->use_empty()) {
872    DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
873    if (VMI != FuncInfo.ValueMap.end())
874      CopyValueToVirtualRegister(V, VMI->second);
875  }
876}
877
878/// ExportFromCurrentBlock - If this condition isn't known to be exported from
879/// the current basic block, add it to ValueMap now so that we'll get a
880/// CopyTo/FromReg.
881void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
882  // No need to export constants.
883  if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
884
885  // Already exported?
886  if (FuncInfo.isExportedInst(V)) return;
887
888  unsigned Reg = FuncInfo.InitializeRegForValue(V);
889  CopyValueToVirtualRegister(V, Reg);
890}
891
892bool SelectionDAGBuilder::isExportableFromCurrentBlock(Value *V,
893                                                     const BasicBlock *FromBB) {
894  // The operands of the setcc have to be in this block.  We don't know
895  // how to export them from some other block.
896  if (Instruction *VI = dyn_cast<Instruction>(V)) {
897    // Can export from current BB.
898    if (VI->getParent() == FromBB)
899      return true;
900
901    // Is already exported, noop.
902    return FuncInfo.isExportedInst(V);
903  }
904
905  // If this is an argument, we can export it if the BB is the entry block or
906  // if it is already exported.
907  if (isa<Argument>(V)) {
908    if (FromBB == &FromBB->getParent()->getEntryBlock())
909      return true;
910
911    // Otherwise, can only export this if it is already exported.
912    return FuncInfo.isExportedInst(V);
913  }
914
915  // Otherwise, constants can always be exported.
916  return true;
917}
918
919static bool InBlock(const Value *V, const BasicBlock *BB) {
920  if (const Instruction *I = dyn_cast<Instruction>(V))
921    return I->getParent() == BB;
922  return true;
923}
924
925/// getFCmpCondCode - Return the ISD condition code corresponding to
926/// the given LLVM IR floating-point condition code.  This includes
927/// consideration of global floating-point math flags.
928///
929static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
930  ISD::CondCode FPC, FOC;
931  switch (Pred) {
932  case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
933  case FCmpInst::FCMP_OEQ:   FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
934  case FCmpInst::FCMP_OGT:   FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
935  case FCmpInst::FCMP_OGE:   FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
936  case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
937  case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
938  case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
939  case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
940  case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
941  case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
942  case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
943  case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
944  case FCmpInst::FCMP_ULT:   FOC = ISD::SETLT; FPC = ISD::SETULT; break;
945  case FCmpInst::FCMP_ULE:   FOC = ISD::SETLE; FPC = ISD::SETULE; break;
946  case FCmpInst::FCMP_UNE:   FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
947  case FCmpInst::FCMP_TRUE:  FOC = FPC = ISD::SETTRUE; break;
948  default:
949    llvm_unreachable("Invalid FCmp predicate opcode!");
950    FOC = FPC = ISD::SETFALSE;
951    break;
952  }
953  if (FiniteOnlyFPMath())
954    return FOC;
955  else
956    return FPC;
957}
958
959/// getICmpCondCode - Return the ISD condition code corresponding to
960/// the given LLVM IR integer condition code.
961///
962static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
963  switch (Pred) {
964  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
965  case ICmpInst::ICMP_NE:  return ISD::SETNE;
966  case ICmpInst::ICMP_SLE: return ISD::SETLE;
967  case ICmpInst::ICMP_ULE: return ISD::SETULE;
968  case ICmpInst::ICMP_SGE: return ISD::SETGE;
969  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
970  case ICmpInst::ICMP_SLT: return ISD::SETLT;
971  case ICmpInst::ICMP_ULT: return ISD::SETULT;
972  case ICmpInst::ICMP_SGT: return ISD::SETGT;
973  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
974  default:
975    llvm_unreachable("Invalid ICmp predicate opcode!");
976    return ISD::SETNE;
977  }
978}
979
980/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
981/// This function emits a branch and is used at the leaves of an OR or an
982/// AND operator tree.
983///
984void
985SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
986                                                  MachineBasicBlock *TBB,
987                                                  MachineBasicBlock *FBB,
988                                                  MachineBasicBlock *CurBB) {
989  const BasicBlock *BB = CurBB->getBasicBlock();
990
991  // If the leaf of the tree is a comparison, merge the condition into
992  // the caseblock.
993  if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
994    // The operands of the cmp have to be in this block.  We don't know
995    // how to export them from some other block.  If this is the first block
996    // of the sequence, no exporting is needed.
997    if (CurBB == CurMBB ||
998        (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
999         isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1000      ISD::CondCode Condition;
1001      if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1002        Condition = getICmpCondCode(IC->getPredicate());
1003      } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1004        Condition = getFCmpCondCode(FC->getPredicate());
1005      } else {
1006        Condition = ISD::SETEQ; // silence warning.
1007        llvm_unreachable("Unknown compare instruction");
1008      }
1009
1010      CaseBlock CB(Condition, BOp->getOperand(0),
1011                   BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1012      SwitchCases.push_back(CB);
1013      return;
1014    }
1015  }
1016
1017  // Create a CaseBlock record representing this branch.
1018  CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1019               NULL, TBB, FBB, CurBB);
1020  SwitchCases.push_back(CB);
1021}
1022
1023/// FindMergedConditions - If Cond is an expression like
1024void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
1025                                               MachineBasicBlock *TBB,
1026                                               MachineBasicBlock *FBB,
1027                                               MachineBasicBlock *CurBB,
1028                                               unsigned Opc) {
1029  // If this node is not part of the or/and tree, emit it as a branch.
1030  Instruction *BOp = dyn_cast<Instruction>(Cond);
1031  if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1032      (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1033      BOp->getParent() != CurBB->getBasicBlock() ||
1034      !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1035      !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1036    EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1037    return;
1038  }
1039
1040  //  Create TmpBB after CurBB.
1041  MachineFunction::iterator BBI = CurBB;
1042  MachineFunction &MF = DAG.getMachineFunction();
1043  MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1044  CurBB->getParent()->insert(++BBI, TmpBB);
1045
1046  if (Opc == Instruction::Or) {
1047    // Codegen X | Y as:
1048    //   jmp_if_X TBB
1049    //   jmp TmpBB
1050    // TmpBB:
1051    //   jmp_if_Y TBB
1052    //   jmp FBB
1053    //
1054
1055    // Emit the LHS condition.
1056    FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1057
1058    // Emit the RHS condition into TmpBB.
1059    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1060  } else {
1061    assert(Opc == Instruction::And && "Unknown merge op!");
1062    // Codegen X & Y as:
1063    //   jmp_if_X TmpBB
1064    //   jmp FBB
1065    // TmpBB:
1066    //   jmp_if_Y TBB
1067    //   jmp FBB
1068    //
1069    //  This requires creation of TmpBB after CurBB.
1070
1071    // Emit the LHS condition.
1072    FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1073
1074    // Emit the RHS condition into TmpBB.
1075    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1076  }
1077}
1078
1079/// If the set of cases should be emitted as a series of branches, return true.
1080/// If we should emit this as a bunch of and/or'd together conditions, return
1081/// false.
1082bool
1083SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1084  if (Cases.size() != 2) return true;
1085
1086  // If this is two comparisons of the same values or'd or and'd together, they
1087  // will get folded into a single comparison, so don't emit two blocks.
1088  if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1089       Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1090      (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1091       Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1092    return false;
1093  }
1094
1095  return true;
1096}
1097
1098void SelectionDAGBuilder::visitBr(BranchInst &I) {
1099  // Update machine-CFG edges.
1100  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1101
1102  // Figure out which block is immediately after the current one.
1103  MachineBasicBlock *NextBlock = 0;
1104  MachineFunction::iterator BBI = CurMBB;
1105  if (++BBI != FuncInfo.MF->end())
1106    NextBlock = BBI;
1107
1108  if (I.isUnconditional()) {
1109    // Update machine-CFG edges.
1110    CurMBB->addSuccessor(Succ0MBB);
1111
1112    // If this is not a fall-through branch, emit the branch.
1113    if (Succ0MBB != NextBlock) {
1114      SDValue V = DAG.getNode(ISD::BR, getCurDebugLoc(),
1115                              MVT::Other, getControlRoot(),
1116                              DAG.getBasicBlock(Succ0MBB));
1117      DAG.setRoot(V);
1118
1119      if (DisableScheduling)
1120        DAG.AssignOrdering(V.getNode(), SDNodeOrder);
1121    }
1122
1123    return;
1124  }
1125
1126  // If this condition is one of the special cases we handle, do special stuff
1127  // now.
1128  Value *CondVal = I.getCondition();
1129  MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1130
1131  // If this is a series of conditions that are or'd or and'd together, emit
1132  // this as a sequence of branches instead of setcc's with and/or operations.
1133  // For example, instead of something like:
1134  //     cmp A, B
1135  //     C = seteq
1136  //     cmp D, E
1137  //     F = setle
1138  //     or C, F
1139  //     jnz foo
1140  // Emit:
1141  //     cmp A, B
1142  //     je foo
1143  //     cmp D, E
1144  //     jle foo
1145  //
1146  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1147    if (BOp->hasOneUse() &&
1148        (BOp->getOpcode() == Instruction::And ||
1149         BOp->getOpcode() == Instruction::Or)) {
1150      FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1151      // If the compares in later blocks need to use values not currently
1152      // exported from this block, export them now.  This block should always
1153      // be the first entry.
1154      assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1155
1156      // Allow some cases to be rejected.
1157      if (ShouldEmitAsBranches(SwitchCases)) {
1158        for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1159          ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1160          ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1161        }
1162
1163        // Emit the branch for this block.
1164        visitSwitchCase(SwitchCases[0]);
1165        SwitchCases.erase(SwitchCases.begin());
1166        return;
1167      }
1168
1169      // Okay, we decided not to do this, remove any inserted MBB's and clear
1170      // SwitchCases.
1171      for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1172        FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1173
1174      SwitchCases.clear();
1175    }
1176  }
1177
1178  // Create a CaseBlock record representing this branch.
1179  CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1180               NULL, Succ0MBB, Succ1MBB, CurMBB);
1181
1182  // Use visitSwitchCase to actually insert the fast branch sequence for this
1183  // cond branch.
1184  visitSwitchCase(CB);
1185}
1186
1187/// visitSwitchCase - Emits the necessary code to represent a single node in
1188/// the binary search tree resulting from lowering a switch instruction.
1189void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
1190  SDValue Cond;
1191  SDValue CondLHS = getValue(CB.CmpLHS);
1192  DebugLoc dl = getCurDebugLoc();
1193
1194  // Build the setcc now.
1195  if (CB.CmpMHS == NULL) {
1196    // Fold "(X == true)" to X and "(X == false)" to !X to
1197    // handle common cases produced by branch lowering.
1198    if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1199        CB.CC == ISD::SETEQ)
1200      Cond = CondLHS;
1201    else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1202             CB.CC == ISD::SETEQ) {
1203      SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1204      Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1205    } else
1206      Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1207  } else {
1208    assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1209
1210    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1211    const APInt& High  = cast<ConstantInt>(CB.CmpRHS)->getValue();
1212
1213    SDValue CmpOp = getValue(CB.CmpMHS);
1214    EVT VT = CmpOp.getValueType();
1215
1216    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1217      Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1218                          ISD::SETLE);
1219    } else {
1220      SDValue SUB = DAG.getNode(ISD::SUB, dl,
1221                                VT, CmpOp, DAG.getConstant(Low, VT));
1222      Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1223                          DAG.getConstant(High-Low, VT), ISD::SETULE);
1224    }
1225  }
1226
1227  // Update successor info
1228  CurMBB->addSuccessor(CB.TrueBB);
1229  CurMBB->addSuccessor(CB.FalseBB);
1230
1231  // Set NextBlock to be the MBB immediately after the current one, if any.
1232  // This is used to avoid emitting unnecessary branches to the next block.
1233  MachineBasicBlock *NextBlock = 0;
1234  MachineFunction::iterator BBI = CurMBB;
1235  if (++BBI != FuncInfo.MF->end())
1236    NextBlock = BBI;
1237
1238  // If the lhs block is the next block, invert the condition so that we can
1239  // fall through to the lhs instead of the rhs block.
1240  if (CB.TrueBB == NextBlock) {
1241    std::swap(CB.TrueBB, CB.FalseBB);
1242    SDValue True = DAG.getConstant(1, Cond.getValueType());
1243    Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1244  }
1245
1246  SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1247                               MVT::Other, getControlRoot(), Cond,
1248                               DAG.getBasicBlock(CB.TrueBB));
1249
1250  // If the branch was constant folded, fix up the CFG.
1251  if (BrCond.getOpcode() == ISD::BR) {
1252    CurMBB->removeSuccessor(CB.FalseBB);
1253  } else {
1254    // Otherwise, go ahead and insert the false branch.
1255    if (BrCond == getControlRoot())
1256      CurMBB->removeSuccessor(CB.TrueBB);
1257
1258    if (CB.FalseBB != NextBlock)
1259      BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1260                           DAG.getBasicBlock(CB.FalseBB));
1261  }
1262
1263  DAG.setRoot(BrCond);
1264
1265  if (DisableScheduling)
1266    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1267}
1268
1269/// visitJumpTable - Emit JumpTable node in the current MBB
1270void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1271  // Emit the code for the jump table
1272  assert(JT.Reg != -1U && "Should lower JT Header first!");
1273  EVT PTy = TLI.getPointerTy();
1274  SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1275                                     JT.Reg, PTy);
1276  SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1277  SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1278                                    MVT::Other, Index.getValue(1),
1279                                    Table, Index);
1280  DAG.setRoot(BrJumpTable);
1281
1282  if (DisableScheduling)
1283    DAG.AssignOrdering(BrJumpTable.getNode(), SDNodeOrder);
1284}
1285
1286/// visitJumpTableHeader - This function emits necessary code to produce index
1287/// in the JumpTable from switch case.
1288void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1289                                               JumpTableHeader &JTH) {
1290  // Subtract the lowest switch case value from the value being switched on and
1291  // conditional branch to default mbb if the result is greater than the
1292  // difference between smallest and largest cases.
1293  SDValue SwitchOp = getValue(JTH.SValue);
1294  EVT VT = SwitchOp.getValueType();
1295  SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1296                            DAG.getConstant(JTH.First, VT));
1297
1298  // The SDNode we just created, which holds the value being switched on minus
1299  // the the smallest case value, needs to be copied to a virtual register so it
1300  // can be used as an index into the jump table in a subsequent basic block.
1301  // This value may be smaller or larger than the target's pointer type, and
1302  // therefore require extension or truncating.
1303  SwitchOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
1304
1305  unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1306  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1307                                    JumpTableReg, SwitchOp);
1308  JT.Reg = JumpTableReg;
1309
1310  // Emit the range check for the jump table, and branch to the default block
1311  // for the switch statement if the value being switched on exceeds the largest
1312  // case in the switch.
1313  SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1314                             TLI.getSetCCResultType(SUB.getValueType()), SUB,
1315                             DAG.getConstant(JTH.Last-JTH.First,VT),
1316                             ISD::SETUGT);
1317
1318  // Set NextBlock to be the MBB immediately after the current one, if any.
1319  // This is used to avoid emitting unnecessary branches to the next block.
1320  MachineBasicBlock *NextBlock = 0;
1321  MachineFunction::iterator BBI = CurMBB;
1322  if (++BBI != FuncInfo.MF->end())
1323    NextBlock = BBI;
1324
1325  SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1326                               MVT::Other, CopyTo, CMP,
1327                               DAG.getBasicBlock(JT.Default));
1328
1329  if (JT.MBB != NextBlock)
1330    BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1331                         DAG.getBasicBlock(JT.MBB));
1332
1333  DAG.setRoot(BrCond);
1334
1335  if (DisableScheduling)
1336    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1337}
1338
1339/// visitBitTestHeader - This function emits necessary code to produce value
1340/// suitable for "bit tests"
1341void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
1342  // Subtract the minimum value
1343  SDValue SwitchOp = getValue(B.SValue);
1344  EVT VT = SwitchOp.getValueType();
1345  SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1346                            DAG.getConstant(B.First, VT));
1347
1348  // Check range
1349  SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1350                                  TLI.getSetCCResultType(SUB.getValueType()),
1351                                  SUB, DAG.getConstant(B.Range, VT),
1352                                  ISD::SETUGT);
1353
1354  SDValue ShiftOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
1355
1356  B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1357  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1358                                    B.Reg, ShiftOp);
1359
1360  // Set NextBlock to be the MBB immediately after the current one, if any.
1361  // This is used to avoid emitting unnecessary branches to the next block.
1362  MachineBasicBlock *NextBlock = 0;
1363  MachineFunction::iterator BBI = CurMBB;
1364  if (++BBI != FuncInfo.MF->end())
1365    NextBlock = BBI;
1366
1367  MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1368
1369  CurMBB->addSuccessor(B.Default);
1370  CurMBB->addSuccessor(MBB);
1371
1372  SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1373                                MVT::Other, CopyTo, RangeCmp,
1374                                DAG.getBasicBlock(B.Default));
1375
1376  if (MBB != NextBlock)
1377    BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1378                          DAG.getBasicBlock(MBB));
1379
1380  DAG.setRoot(BrRange);
1381
1382  if (DisableScheduling)
1383    DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder);
1384}
1385
1386/// visitBitTestCase - this function produces one "bit test"
1387void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
1388                                           unsigned Reg,
1389                                           BitTestCase &B) {
1390  // Make desired shift
1391  SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1392                                       TLI.getPointerTy());
1393  SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1394                                  TLI.getPointerTy(),
1395                                  DAG.getConstant(1, TLI.getPointerTy()),
1396                                  ShiftOp);
1397
1398  // Emit bit tests and jumps
1399  SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1400                              TLI.getPointerTy(), SwitchVal,
1401                              DAG.getConstant(B.Mask, TLI.getPointerTy()));
1402  SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1403                                TLI.getSetCCResultType(AndOp.getValueType()),
1404                                AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1405                                ISD::SETNE);
1406
1407  CurMBB->addSuccessor(B.TargetBB);
1408  CurMBB->addSuccessor(NextMBB);
1409
1410  SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1411                              MVT::Other, getControlRoot(),
1412                              AndCmp, DAG.getBasicBlock(B.TargetBB));
1413
1414  // Set NextBlock to be the MBB immediately after the current one, if any.
1415  // This is used to avoid emitting unnecessary branches to the next block.
1416  MachineBasicBlock *NextBlock = 0;
1417  MachineFunction::iterator BBI = CurMBB;
1418  if (++BBI != FuncInfo.MF->end())
1419    NextBlock = BBI;
1420
1421  if (NextMBB != NextBlock)
1422    BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1423                        DAG.getBasicBlock(NextMBB));
1424
1425  DAG.setRoot(BrAnd);
1426
1427  if (DisableScheduling)
1428    DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder);
1429}
1430
1431void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
1432  // Retrieve successors.
1433  MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1434  MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1435
1436  const Value *Callee(I.getCalledValue());
1437  if (isa<InlineAsm>(Callee))
1438    visitInlineAsm(&I);
1439  else
1440    LowerCallTo(&I, getValue(Callee), false, LandingPad);
1441
1442  // If the value of the invoke is used outside of its defining block, make it
1443  // available as a virtual register.
1444  CopyToExportRegsIfNeeded(&I);
1445
1446  // Update successor info
1447  CurMBB->addSuccessor(Return);
1448  CurMBB->addSuccessor(LandingPad);
1449
1450  // Drop into normal successor.
1451  SDValue Branch = DAG.getNode(ISD::BR, getCurDebugLoc(),
1452                               MVT::Other, getControlRoot(),
1453                               DAG.getBasicBlock(Return));
1454  DAG.setRoot(Branch);
1455
1456  if (DisableScheduling)
1457    DAG.AssignOrdering(Branch.getNode(), SDNodeOrder);
1458}
1459
1460void SelectionDAGBuilder::visitUnwind(UnwindInst &I) {
1461}
1462
1463/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1464/// small case ranges).
1465bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
1466                                                 CaseRecVector& WorkList,
1467                                                 Value* SV,
1468                                                 MachineBasicBlock* Default) {
1469  Case& BackCase  = *(CR.Range.second-1);
1470
1471  // Size is the number of Cases represented by this range.
1472  size_t Size = CR.Range.second - CR.Range.first;
1473  if (Size > 3)
1474    return false;
1475
1476  // Get the MachineFunction which holds the current MBB.  This is used when
1477  // inserting any additional MBBs necessary to represent the switch.
1478  MachineFunction *CurMF = FuncInfo.MF;
1479
1480  // Figure out which block is immediately after the current one.
1481  MachineBasicBlock *NextBlock = 0;
1482  MachineFunction::iterator BBI = CR.CaseBB;
1483
1484  if (++BBI != FuncInfo.MF->end())
1485    NextBlock = BBI;
1486
1487  // TODO: If any two of the cases has the same destination, and if one value
1488  // is the same as the other, but has one bit unset that the other has set,
1489  // use bit manipulation to do two compares at once.  For example:
1490  // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1491
1492  // Rearrange the case blocks so that the last one falls through if possible.
1493  if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1494    // The last case block won't fall through into 'NextBlock' if we emit the
1495    // branches in this order.  See if rearranging a case value would help.
1496    for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1497      if (I->BB == NextBlock) {
1498        std::swap(*I, BackCase);
1499        break;
1500      }
1501    }
1502  }
1503
1504  // Create a CaseBlock record representing a conditional branch to
1505  // the Case's target mbb if the value being switched on SV is equal
1506  // to C.
1507  MachineBasicBlock *CurBlock = CR.CaseBB;
1508  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1509    MachineBasicBlock *FallThrough;
1510    if (I != E-1) {
1511      FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1512      CurMF->insert(BBI, FallThrough);
1513
1514      // Put SV in a virtual register to make it available from the new blocks.
1515      ExportFromCurrentBlock(SV);
1516    } else {
1517      // If the last case doesn't match, go to the default block.
1518      FallThrough = Default;
1519    }
1520
1521    Value *RHS, *LHS, *MHS;
1522    ISD::CondCode CC;
1523    if (I->High == I->Low) {
1524      // This is just small small case range :) containing exactly 1 case
1525      CC = ISD::SETEQ;
1526      LHS = SV; RHS = I->High; MHS = NULL;
1527    } else {
1528      CC = ISD::SETLE;
1529      LHS = I->Low; MHS = SV; RHS = I->High;
1530    }
1531    CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1532
1533    // If emitting the first comparison, just call visitSwitchCase to emit the
1534    // code into the current block.  Otherwise, push the CaseBlock onto the
1535    // vector to be later processed by SDISel, and insert the node's MBB
1536    // before the next MBB.
1537    if (CurBlock == CurMBB)
1538      visitSwitchCase(CB);
1539    else
1540      SwitchCases.push_back(CB);
1541
1542    CurBlock = FallThrough;
1543  }
1544
1545  return true;
1546}
1547
1548static inline bool areJTsAllowed(const TargetLowering &TLI) {
1549  return !DisableJumpTables &&
1550          (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1551           TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1552}
1553
1554static APInt ComputeRange(const APInt &First, const APInt &Last) {
1555  APInt LastExt(Last), FirstExt(First);
1556  uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1557  LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1558  return (LastExt - FirstExt + 1ULL);
1559}
1560
1561/// handleJTSwitchCase - Emit jumptable for current switch case range
1562bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
1563                                             CaseRecVector& WorkList,
1564                                             Value* SV,
1565                                             MachineBasicBlock* Default) {
1566  Case& FrontCase = *CR.Range.first;
1567  Case& BackCase  = *(CR.Range.second-1);
1568
1569  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1570  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1571
1572  APInt TSize(First.getBitWidth(), 0);
1573  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1574       I!=E; ++I)
1575    TSize += I->size();
1576
1577  if (!areJTsAllowed(TLI) || TSize.ult(APInt(First.getBitWidth(), 4)))
1578    return false;
1579
1580  APInt Range = ComputeRange(First, Last);
1581  double Density = TSize.roundToDouble() / Range.roundToDouble();
1582  if (Density < 0.4)
1583    return false;
1584
1585  DEBUG(errs() << "Lowering jump table\n"
1586               << "First entry: " << First << ". Last entry: " << Last << '\n'
1587               << "Range: " << Range
1588               << "Size: " << TSize << ". Density: " << Density << "\n\n");
1589
1590  // Get the MachineFunction which holds the current MBB.  This is used when
1591  // inserting any additional MBBs necessary to represent the switch.
1592  MachineFunction *CurMF = FuncInfo.MF;
1593
1594  // Figure out which block is immediately after the current one.
1595  MachineFunction::iterator BBI = CR.CaseBB;
1596  ++BBI;
1597
1598  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1599
1600  // Create a new basic block to hold the code for loading the address
1601  // of the jump table, and jumping to it.  Update successor information;
1602  // we will either branch to the default case for the switch, or the jump
1603  // table.
1604  MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1605  CurMF->insert(BBI, JumpTableBB);
1606  CR.CaseBB->addSuccessor(Default);
1607  CR.CaseBB->addSuccessor(JumpTableBB);
1608
1609  // Build a vector of destination BBs, corresponding to each target
1610  // of the jump table. If the value of the jump table slot corresponds to
1611  // a case statement, push the case's BB onto the vector, otherwise, push
1612  // the default BB.
1613  std::vector<MachineBasicBlock*> DestBBs;
1614  APInt TEI = First;
1615  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1616    const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1617    const APInt& High = cast<ConstantInt>(I->High)->getValue();
1618
1619    if (Low.sle(TEI) && TEI.sle(High)) {
1620      DestBBs.push_back(I->BB);
1621      if (TEI==High)
1622        ++I;
1623    } else {
1624      DestBBs.push_back(Default);
1625    }
1626  }
1627
1628  // Update successor info. Add one edge to each unique successor.
1629  BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1630  for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1631         E = DestBBs.end(); I != E; ++I) {
1632    if (!SuccsHandled[(*I)->getNumber()]) {
1633      SuccsHandled[(*I)->getNumber()] = true;
1634      JumpTableBB->addSuccessor(*I);
1635    }
1636  }
1637
1638  // Create a jump table index for this jump table, or return an existing
1639  // one.
1640  unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1641
1642  // Set the jump table information so that we can codegen it as a second
1643  // MachineBasicBlock
1644  JumpTable JT(-1U, JTI, JumpTableBB, Default);
1645  JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1646  if (CR.CaseBB == CurMBB)
1647    visitJumpTableHeader(JT, JTH);
1648
1649  JTCases.push_back(JumpTableBlock(JTH, JT));
1650
1651  return true;
1652}
1653
1654/// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1655/// 2 subtrees.
1656bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
1657                                                  CaseRecVector& WorkList,
1658                                                  Value* SV,
1659                                                  MachineBasicBlock* Default) {
1660  // Get the MachineFunction which holds the current MBB.  This is used when
1661  // inserting any additional MBBs necessary to represent the switch.
1662  MachineFunction *CurMF = FuncInfo.MF;
1663
1664  // Figure out which block is immediately after the current one.
1665  MachineFunction::iterator BBI = CR.CaseBB;
1666  ++BBI;
1667
1668  Case& FrontCase = *CR.Range.first;
1669  Case& BackCase  = *(CR.Range.second-1);
1670  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1671
1672  // Size is the number of Cases represented by this range.
1673  unsigned Size = CR.Range.second - CR.Range.first;
1674
1675  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1676  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1677  double FMetric = 0;
1678  CaseItr Pivot = CR.Range.first + Size/2;
1679
1680  // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1681  // (heuristically) allow us to emit JumpTable's later.
1682  APInt TSize(First.getBitWidth(), 0);
1683  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1684       I!=E; ++I)
1685    TSize += I->size();
1686
1687  APInt LSize = FrontCase.size();
1688  APInt RSize = TSize-LSize;
1689  DEBUG(errs() << "Selecting best pivot: \n"
1690               << "First: " << First << ", Last: " << Last <<'\n'
1691               << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1692  for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1693       J!=E; ++I, ++J) {
1694    const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
1695    const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
1696    APInt Range = ComputeRange(LEnd, RBegin);
1697    assert((Range - 2ULL).isNonNegative() &&
1698           "Invalid case distance");
1699    double LDensity = (double)LSize.roundToDouble() /
1700                           (LEnd - First + 1ULL).roundToDouble();
1701    double RDensity = (double)RSize.roundToDouble() /
1702                           (Last - RBegin + 1ULL).roundToDouble();
1703    double Metric = Range.logBase2()*(LDensity+RDensity);
1704    // Should always split in some non-trivial place
1705    DEBUG(errs() <<"=>Step\n"
1706                 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1707                 << "LDensity: " << LDensity
1708                 << ", RDensity: " << RDensity << '\n'
1709                 << "Metric: " << Metric << '\n');
1710    if (FMetric < Metric) {
1711      Pivot = J;
1712      FMetric = Metric;
1713      DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1714    }
1715
1716    LSize += J->size();
1717    RSize -= J->size();
1718  }
1719  if (areJTsAllowed(TLI)) {
1720    // If our case is dense we *really* should handle it earlier!
1721    assert((FMetric > 0) && "Should handle dense range earlier!");
1722  } else {
1723    Pivot = CR.Range.first + Size/2;
1724  }
1725
1726  CaseRange LHSR(CR.Range.first, Pivot);
1727  CaseRange RHSR(Pivot, CR.Range.second);
1728  Constant *C = Pivot->Low;
1729  MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1730
1731  // We know that we branch to the LHS if the Value being switched on is
1732  // less than the Pivot value, C.  We use this to optimize our binary
1733  // tree a bit, by recognizing that if SV is greater than or equal to the
1734  // LHS's Case Value, and that Case Value is exactly one less than the
1735  // Pivot's Value, then we can branch directly to the LHS's Target,
1736  // rather than creating a leaf node for it.
1737  if ((LHSR.second - LHSR.first) == 1 &&
1738      LHSR.first->High == CR.GE &&
1739      cast<ConstantInt>(C)->getValue() ==
1740      (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1741    TrueBB = LHSR.first->BB;
1742  } else {
1743    TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1744    CurMF->insert(BBI, TrueBB);
1745    WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1746
1747    // Put SV in a virtual register to make it available from the new blocks.
1748    ExportFromCurrentBlock(SV);
1749  }
1750
1751  // Similar to the optimization above, if the Value being switched on is
1752  // known to be less than the Constant CR.LT, and the current Case Value
1753  // is CR.LT - 1, then we can branch directly to the target block for
1754  // the current Case Value, rather than emitting a RHS leaf node for it.
1755  if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1756      cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1757      (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1758    FalseBB = RHSR.first->BB;
1759  } else {
1760    FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1761    CurMF->insert(BBI, FalseBB);
1762    WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1763
1764    // Put SV in a virtual register to make it available from the new blocks.
1765    ExportFromCurrentBlock(SV);
1766  }
1767
1768  // Create a CaseBlock record representing a conditional branch to
1769  // the LHS node if the value being switched on SV is less than C.
1770  // Otherwise, branch to LHS.
1771  CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1772
1773  if (CR.CaseBB == CurMBB)
1774    visitSwitchCase(CB);
1775  else
1776    SwitchCases.push_back(CB);
1777
1778  return true;
1779}
1780
1781/// handleBitTestsSwitchCase - if current case range has few destination and
1782/// range span less, than machine word bitwidth, encode case range into series
1783/// of masks and emit bit tests with these masks.
1784bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
1785                                                   CaseRecVector& WorkList,
1786                                                   Value* SV,
1787                                                   MachineBasicBlock* Default){
1788  EVT PTy = TLI.getPointerTy();
1789  unsigned IntPtrBits = PTy.getSizeInBits();
1790
1791  Case& FrontCase = *CR.Range.first;
1792  Case& BackCase  = *(CR.Range.second-1);
1793
1794  // Get the MachineFunction which holds the current MBB.  This is used when
1795  // inserting any additional MBBs necessary to represent the switch.
1796  MachineFunction *CurMF = FuncInfo.MF;
1797
1798  // If target does not have legal shift left, do not emit bit tests at all.
1799  if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1800    return false;
1801
1802  size_t numCmps = 0;
1803  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1804       I!=E; ++I) {
1805    // Single case counts one, case range - two.
1806    numCmps += (I->Low == I->High ? 1 : 2);
1807  }
1808
1809  // Count unique destinations
1810  SmallSet<MachineBasicBlock*, 4> Dests;
1811  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1812    Dests.insert(I->BB);
1813    if (Dests.size() > 3)
1814      // Don't bother the code below, if there are too much unique destinations
1815      return false;
1816  }
1817  DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1818               << "Total number of comparisons: " << numCmps << '\n');
1819
1820  // Compute span of values.
1821  const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1822  const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1823  APInt cmpRange = maxValue - minValue;
1824
1825  DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1826               << "Low bound: " << minValue << '\n'
1827               << "High bound: " << maxValue << '\n');
1828
1829  if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1830      (!(Dests.size() == 1 && numCmps >= 3) &&
1831       !(Dests.size() == 2 && numCmps >= 5) &&
1832       !(Dests.size() >= 3 && numCmps >= 6)))
1833    return false;
1834
1835  DEBUG(errs() << "Emitting bit tests\n");
1836  APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1837
1838  // Optimize the case where all the case values fit in a
1839  // word without having to subtract minValue. In this case,
1840  // we can optimize away the subtraction.
1841  if (minValue.isNonNegative() &&
1842      maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1843    cmpRange = maxValue;
1844  } else {
1845    lowBound = minValue;
1846  }
1847
1848  CaseBitsVector CasesBits;
1849  unsigned i, count = 0;
1850
1851  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1852    MachineBasicBlock* Dest = I->BB;
1853    for (i = 0; i < count; ++i)
1854      if (Dest == CasesBits[i].BB)
1855        break;
1856
1857    if (i == count) {
1858      assert((count < 3) && "Too much destinations to test!");
1859      CasesBits.push_back(CaseBits(0, Dest, 0));
1860      count++;
1861    }
1862
1863    const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1864    const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1865
1866    uint64_t lo = (lowValue - lowBound).getZExtValue();
1867    uint64_t hi = (highValue - lowBound).getZExtValue();
1868
1869    for (uint64_t j = lo; j <= hi; j++) {
1870      CasesBits[i].Mask |=  1ULL << j;
1871      CasesBits[i].Bits++;
1872    }
1873
1874  }
1875  std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
1876
1877  BitTestInfo BTC;
1878
1879  // Figure out which block is immediately after the current one.
1880  MachineFunction::iterator BBI = CR.CaseBB;
1881  ++BBI;
1882
1883  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1884
1885  DEBUG(errs() << "Cases:\n");
1886  for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
1887    DEBUG(errs() << "Mask: " << CasesBits[i].Mask
1888                 << ", Bits: " << CasesBits[i].Bits
1889                 << ", BB: " << CasesBits[i].BB << '\n');
1890
1891    MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1892    CurMF->insert(BBI, CaseBB);
1893    BTC.push_back(BitTestCase(CasesBits[i].Mask,
1894                              CaseBB,
1895                              CasesBits[i].BB));
1896
1897    // Put SV in a virtual register to make it available from the new blocks.
1898    ExportFromCurrentBlock(SV);
1899  }
1900
1901  BitTestBlock BTB(lowBound, cmpRange, SV,
1902                   -1U, (CR.CaseBB == CurMBB),
1903                   CR.CaseBB, Default, BTC);
1904
1905  if (CR.CaseBB == CurMBB)
1906    visitBitTestHeader(BTB);
1907
1908  BitTestCases.push_back(BTB);
1909
1910  return true;
1911}
1912
1913
1914/// Clusterify - Transform simple list of Cases into list of CaseRange's
1915size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
1916                                       const SwitchInst& SI) {
1917  size_t numCmps = 0;
1918
1919  // Start with "simple" cases
1920  for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
1921    MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
1922    Cases.push_back(Case(SI.getSuccessorValue(i),
1923                         SI.getSuccessorValue(i),
1924                         SMBB));
1925  }
1926  std::sort(Cases.begin(), Cases.end(), CaseCmp());
1927
1928  // Merge case into clusters
1929  if (Cases.size() >= 2)
1930    // Must recompute end() each iteration because it may be
1931    // invalidated by erase if we hold on to it
1932    for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
1933      const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
1934      const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
1935      MachineBasicBlock* nextBB = J->BB;
1936      MachineBasicBlock* currentBB = I->BB;
1937
1938      // If the two neighboring cases go to the same destination, merge them
1939      // into a single case.
1940      if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
1941        I->High = J->High;
1942        J = Cases.erase(J);
1943      } else {
1944        I = J++;
1945      }
1946    }
1947
1948  for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
1949    if (I->Low != I->High)
1950      // A range counts double, since it requires two compares.
1951      ++numCmps;
1952  }
1953
1954  return numCmps;
1955}
1956
1957void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
1958  // Figure out which block is immediately after the current one.
1959  MachineBasicBlock *NextBlock = 0;
1960
1961  MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
1962
1963  // If there is only the default destination, branch to it if it is not the
1964  // next basic block.  Otherwise, just fall through.
1965  if (SI.getNumOperands() == 2) {
1966    // Update machine-CFG edges.
1967
1968    // If this is not a fall-through branch, emit the branch.
1969    CurMBB->addSuccessor(Default);
1970    if (Default != NextBlock)
1971      DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1972                              MVT::Other, getControlRoot(),
1973                              DAG.getBasicBlock(Default)));
1974    return;
1975  }
1976
1977  // If there are any non-default case statements, create a vector of Cases
1978  // representing each one, and sort the vector so that we can efficiently
1979  // create a binary search tree from them.
1980  CaseVector Cases;
1981  size_t numCmps = Clusterify(Cases, SI);
1982  DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
1983               << ". Total compares: " << numCmps << '\n');
1984  numCmps = 0;
1985
1986  // Get the Value to be switched on and default basic blocks, which will be
1987  // inserted into CaseBlock records, representing basic blocks in the binary
1988  // search tree.
1989  Value *SV = SI.getOperand(0);
1990
1991  // Push the initial CaseRec onto the worklist
1992  CaseRecVector WorkList;
1993  WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
1994
1995  while (!WorkList.empty()) {
1996    // Grab a record representing a case range to process off the worklist
1997    CaseRec CR = WorkList.back();
1998    WorkList.pop_back();
1999
2000    if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2001      continue;
2002
2003    // If the range has few cases (two or less) emit a series of specific
2004    // tests.
2005    if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2006      continue;
2007
2008    // If the switch has more than 5 blocks, and at least 40% dense, and the
2009    // target supports indirect branches, then emit a jump table rather than
2010    // lowering the switch to a binary tree of conditional branches.
2011    if (handleJTSwitchCase(CR, WorkList, SV, Default))
2012      continue;
2013
2014    // Emit binary tree. We need to pick a pivot, and push left and right ranges
2015    // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2016    handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2017  }
2018}
2019
2020void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
2021  // Update machine-CFG edges.
2022  for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i)
2023    CurMBB->addSuccessor(FuncInfo.MBBMap[I.getSuccessor(i)]);
2024
2025  DAG.setRoot(DAG.getNode(ISD::BRIND, getCurDebugLoc(),
2026                          MVT::Other, getControlRoot(),
2027                          getValue(I.getAddress())));
2028}
2029
2030
2031void SelectionDAGBuilder::visitFSub(User &I) {
2032  // -0.0 - X --> fneg
2033  const Type *Ty = I.getType();
2034  if (isa<VectorType>(Ty)) {
2035    if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2036      const VectorType *DestTy = cast<VectorType>(I.getType());
2037      const Type *ElTy = DestTy->getElementType();
2038      unsigned VL = DestTy->getNumElements();
2039      std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2040      Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2041      if (CV == CNZ) {
2042        SDValue Op2 = getValue(I.getOperand(1));
2043        setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2044                                 Op2.getValueType(), Op2));
2045        return;
2046      }
2047    }
2048  }
2049  if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2050    if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2051      SDValue Op2 = getValue(I.getOperand(1));
2052      setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2053                               Op2.getValueType(), Op2));
2054      return;
2055    }
2056
2057  visitBinary(I, ISD::FSUB);
2058}
2059
2060void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) {
2061  SDValue Op1 = getValue(I.getOperand(0));
2062  SDValue Op2 = getValue(I.getOperand(1));
2063
2064  setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2065                           Op1.getValueType(), Op1, Op2));
2066}
2067
2068void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
2069  SDValue Op1 = getValue(I.getOperand(0));
2070  SDValue Op2 = getValue(I.getOperand(1));
2071  if (!isa<VectorType>(I.getType()) &&
2072      Op2.getValueType() != TLI.getShiftAmountTy()) {
2073    // If the operand is smaller than the shift count type, promote it.
2074    EVT PTy = TLI.getPointerTy();
2075    EVT STy = TLI.getShiftAmountTy();
2076    if (STy.bitsGT(Op2.getValueType()))
2077      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2078                        TLI.getShiftAmountTy(), Op2);
2079    // If the operand is larger than the shift count type but the shift
2080    // count type has enough bits to represent any shift value, truncate
2081    // it now. This is a common case and it exposes the truncate to
2082    // optimization early.
2083    else if (STy.getSizeInBits() >=
2084             Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2085      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2086                        TLI.getShiftAmountTy(), Op2);
2087    // Otherwise we'll need to temporarily settle for some other
2088    // convenient type; type legalization will make adjustments as
2089    // needed.
2090    else if (PTy.bitsLT(Op2.getValueType()))
2091      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2092                        TLI.getPointerTy(), Op2);
2093    else if (PTy.bitsGT(Op2.getValueType()))
2094      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2095                        TLI.getPointerTy(), Op2);
2096  }
2097
2098  setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2099                           Op1.getValueType(), Op1, Op2));
2100}
2101
2102void SelectionDAGBuilder::visitICmp(User &I) {
2103  ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2104  if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2105    predicate = IC->getPredicate();
2106  else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2107    predicate = ICmpInst::Predicate(IC->getPredicate());
2108  SDValue Op1 = getValue(I.getOperand(0));
2109  SDValue Op2 = getValue(I.getOperand(1));
2110  ISD::CondCode Opcode = getICmpCondCode(predicate);
2111
2112  EVT DestVT = TLI.getValueType(I.getType());
2113  setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
2114}
2115
2116void SelectionDAGBuilder::visitFCmp(User &I) {
2117  FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2118  if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2119    predicate = FC->getPredicate();
2120  else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2121    predicate = FCmpInst::Predicate(FC->getPredicate());
2122  SDValue Op1 = getValue(I.getOperand(0));
2123  SDValue Op2 = getValue(I.getOperand(1));
2124  ISD::CondCode Condition = getFCmpCondCode(predicate);
2125  EVT DestVT = TLI.getValueType(I.getType());
2126  setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2127}
2128
2129void SelectionDAGBuilder::visitSelect(User &I) {
2130  SmallVector<EVT, 4> ValueVTs;
2131  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2132  unsigned NumValues = ValueVTs.size();
2133  if (NumValues != 0) {
2134    SmallVector<SDValue, 4> Values(NumValues);
2135    SDValue Cond     = getValue(I.getOperand(0));
2136    SDValue TrueVal  = getValue(I.getOperand(1));
2137    SDValue FalseVal = getValue(I.getOperand(2));
2138
2139    for (unsigned i = 0; i != NumValues; ++i)
2140      Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2141                              TrueVal.getNode()->getValueType(i), Cond,
2142                              SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2143                              SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2144
2145    setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2146                             DAG.getVTList(&ValueVTs[0], NumValues),
2147                             &Values[0], NumValues));
2148  }
2149}
2150
2151
2152void SelectionDAGBuilder::visitTrunc(User &I) {
2153  // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2154  SDValue N = getValue(I.getOperand(0));
2155  EVT DestVT = TLI.getValueType(I.getType());
2156  setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2157}
2158
2159void SelectionDAGBuilder::visitZExt(User &I) {
2160  // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2161  // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2162  SDValue N = getValue(I.getOperand(0));
2163  EVT DestVT = TLI.getValueType(I.getType());
2164  setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2165}
2166
2167void SelectionDAGBuilder::visitSExt(User &I) {
2168  // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2169  // SExt also can't be a cast to bool for same reason. So, nothing much to do
2170  SDValue N = getValue(I.getOperand(0));
2171  EVT DestVT = TLI.getValueType(I.getType());
2172  setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2173}
2174
2175void SelectionDAGBuilder::visitFPTrunc(User &I) {
2176  // FPTrunc is never a no-op cast, no need to check
2177  SDValue N = getValue(I.getOperand(0));
2178  EVT DestVT = TLI.getValueType(I.getType());
2179  setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2180                           DestVT, N, DAG.getIntPtrConstant(0)));
2181}
2182
2183void SelectionDAGBuilder::visitFPExt(User &I){
2184  // FPTrunc is never a no-op cast, no need to check
2185  SDValue N = getValue(I.getOperand(0));
2186  EVT DestVT = TLI.getValueType(I.getType());
2187  setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2188}
2189
2190void SelectionDAGBuilder::visitFPToUI(User &I) {
2191  // FPToUI is never a no-op cast, no need to check
2192  SDValue N = getValue(I.getOperand(0));
2193  EVT DestVT = TLI.getValueType(I.getType());
2194  setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2195}
2196
2197void SelectionDAGBuilder::visitFPToSI(User &I) {
2198  // FPToSI is never a no-op cast, no need to check
2199  SDValue N = getValue(I.getOperand(0));
2200  EVT DestVT = TLI.getValueType(I.getType());
2201  setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2202}
2203
2204void SelectionDAGBuilder::visitUIToFP(User &I) {
2205  // UIToFP is never a no-op cast, no need to check
2206  SDValue N = getValue(I.getOperand(0));
2207  EVT DestVT = TLI.getValueType(I.getType());
2208  setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2209}
2210
2211void SelectionDAGBuilder::visitSIToFP(User &I){
2212  // SIToFP is never a no-op cast, no need to check
2213  SDValue N = getValue(I.getOperand(0));
2214  EVT DestVT = TLI.getValueType(I.getType());
2215  setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2216}
2217
2218void SelectionDAGBuilder::visitPtrToInt(User &I) {
2219  // What to do depends on the size of the integer and the size of the pointer.
2220  // We can either truncate, zero extend, or no-op, accordingly.
2221  SDValue N = getValue(I.getOperand(0));
2222  EVT SrcVT = N.getValueType();
2223  EVT DestVT = TLI.getValueType(I.getType());
2224  SDValue Result = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2225  setValue(&I, Result);
2226}
2227
2228void SelectionDAGBuilder::visitIntToPtr(User &I) {
2229  // What to do depends on the size of the integer and the size of the pointer.
2230  // We can either truncate, zero extend, or no-op, accordingly.
2231  SDValue N = getValue(I.getOperand(0));
2232  EVT SrcVT = N.getValueType();
2233  EVT DestVT = TLI.getValueType(I.getType());
2234  setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
2235}
2236
2237void SelectionDAGBuilder::visitBitCast(User &I) {
2238  SDValue N = getValue(I.getOperand(0));
2239  EVT DestVT = TLI.getValueType(I.getType());
2240
2241  // BitCast assures us that source and destination are the same size so this
2242  // is either a BIT_CONVERT or a no-op.
2243  if (DestVT != N.getValueType())
2244    setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2245                             DestVT, N)); // convert types
2246  else
2247    setValue(&I, N); // noop cast.
2248}
2249
2250void SelectionDAGBuilder::visitInsertElement(User &I) {
2251  SDValue InVec = getValue(I.getOperand(0));
2252  SDValue InVal = getValue(I.getOperand(1));
2253  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2254                                TLI.getPointerTy(),
2255                                getValue(I.getOperand(2)));
2256
2257  setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2258                           TLI.getValueType(I.getType()),
2259                           InVec, InVal, InIdx));
2260}
2261
2262void SelectionDAGBuilder::visitExtractElement(User &I) {
2263  SDValue InVec = getValue(I.getOperand(0));
2264  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2265                                TLI.getPointerTy(),
2266                                getValue(I.getOperand(1)));
2267  setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2268                           TLI.getValueType(I.getType()), InVec, InIdx));
2269}
2270
2271
2272// Utility for visitShuffleVector - Returns true if the mask is mask starting
2273// from SIndx and increasing to the element length (undefs are allowed).
2274static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2275  unsigned MaskNumElts = Mask.size();
2276  for (unsigned i = 0; i != MaskNumElts; ++i)
2277    if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2278      return false;
2279  return true;
2280}
2281
2282void SelectionDAGBuilder::visitShuffleVector(User &I) {
2283  SmallVector<int, 8> Mask;
2284  SDValue Src1 = getValue(I.getOperand(0));
2285  SDValue Src2 = getValue(I.getOperand(1));
2286
2287  // Convert the ConstantVector mask operand into an array of ints, with -1
2288  // representing undef values.
2289  SmallVector<Constant*, 8> MaskElts;
2290  cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
2291                                                     MaskElts);
2292  unsigned MaskNumElts = MaskElts.size();
2293  for (unsigned i = 0; i != MaskNumElts; ++i) {
2294    if (isa<UndefValue>(MaskElts[i]))
2295      Mask.push_back(-1);
2296    else
2297      Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2298  }
2299
2300  EVT VT = TLI.getValueType(I.getType());
2301  EVT SrcVT = Src1.getValueType();
2302  unsigned SrcNumElts = SrcVT.getVectorNumElements();
2303
2304  if (SrcNumElts == MaskNumElts) {
2305    setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2306                                      &Mask[0]));
2307    return;
2308  }
2309
2310  // Normalize the shuffle vector since mask and vector length don't match.
2311  if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2312    // Mask is longer than the source vectors and is a multiple of the source
2313    // vectors.  We can use concatenate vector to make the mask and vectors
2314    // lengths match.
2315    if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2316      // The shuffle is concatenating two vectors together.
2317      setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2318                               VT, Src1, Src2));
2319      return;
2320    }
2321
2322    // Pad both vectors with undefs to make them the same length as the mask.
2323    unsigned NumConcat = MaskNumElts / SrcNumElts;
2324    bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2325    bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2326    SDValue UndefVal = DAG.getUNDEF(SrcVT);
2327
2328    SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2329    SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2330    MOps1[0] = Src1;
2331    MOps2[0] = Src2;
2332
2333    Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2334                                                  getCurDebugLoc(), VT,
2335                                                  &MOps1[0], NumConcat);
2336    Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2337                                                  getCurDebugLoc(), VT,
2338                                                  &MOps2[0], NumConcat);
2339
2340    // Readjust mask for new input vector length.
2341    SmallVector<int, 8> MappedOps;
2342    for (unsigned i = 0; i != MaskNumElts; ++i) {
2343      int Idx = Mask[i];
2344      if (Idx < (int)SrcNumElts)
2345        MappedOps.push_back(Idx);
2346      else
2347        MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2348    }
2349    setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2350                                      &MappedOps[0]));
2351    return;
2352  }
2353
2354  if (SrcNumElts > MaskNumElts) {
2355    // Analyze the access pattern of the vector to see if we can extract
2356    // two subvectors and do the shuffle. The analysis is done by calculating
2357    // the range of elements the mask access on both vectors.
2358    int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2359    int MaxRange[2] = {-1, -1};
2360
2361    for (unsigned i = 0; i != MaskNumElts; ++i) {
2362      int Idx = Mask[i];
2363      int Input = 0;
2364      if (Idx < 0)
2365        continue;
2366
2367      if (Idx >= (int)SrcNumElts) {
2368        Input = 1;
2369        Idx -= SrcNumElts;
2370      }
2371      if (Idx > MaxRange[Input])
2372        MaxRange[Input] = Idx;
2373      if (Idx < MinRange[Input])
2374        MinRange[Input] = Idx;
2375    }
2376
2377    // Check if the access is smaller than the vector size and can we find
2378    // a reasonable extract index.
2379    int RangeUse[2] = { 2, 2 };  // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2380    int StartIdx[2];  // StartIdx to extract from
2381    for (int Input=0; Input < 2; ++Input) {
2382      if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2383        RangeUse[Input] = 0; // Unused
2384        StartIdx[Input] = 0;
2385      } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2386        // Fits within range but we should see if we can find a good
2387        // start index that is a multiple of the mask length.
2388        if (MaxRange[Input] < (int)MaskNumElts) {
2389          RangeUse[Input] = 1; // Extract from beginning of the vector
2390          StartIdx[Input] = 0;
2391        } else {
2392          StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2393          if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2394              StartIdx[Input] + MaskNumElts < SrcNumElts)
2395            RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2396        }
2397      }
2398    }
2399
2400    if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2401      setValue(&I, DAG.getUNDEF(VT));  // Vectors are not used.
2402      return;
2403    }
2404    else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2405      // Extract appropriate subvector and generate a vector shuffle
2406      for (int Input=0; Input < 2; ++Input) {
2407        SDValue& Src = Input == 0 ? Src1 : Src2;
2408        if (RangeUse[Input] == 0) {
2409          Src = DAG.getUNDEF(VT);
2410        } else {
2411          Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2412                            Src, DAG.getIntPtrConstant(StartIdx[Input]));
2413        }
2414      }
2415      // Calculate new mask.
2416      SmallVector<int, 8> MappedOps;
2417      for (unsigned i = 0; i != MaskNumElts; ++i) {
2418        int Idx = Mask[i];
2419        if (Idx < 0)
2420          MappedOps.push_back(Idx);
2421        else if (Idx < (int)SrcNumElts)
2422          MappedOps.push_back(Idx - StartIdx[0]);
2423        else
2424          MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2425      }
2426      setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2427                                        &MappedOps[0]));
2428      return;
2429    }
2430  }
2431
2432  // We can't use either concat vectors or extract subvectors so fall back to
2433  // replacing the shuffle with extract and build vector.
2434  // to insert and build vector.
2435  EVT EltVT = VT.getVectorElementType();
2436  EVT PtrVT = TLI.getPointerTy();
2437  SmallVector<SDValue,8> Ops;
2438  for (unsigned i = 0; i != MaskNumElts; ++i) {
2439    if (Mask[i] < 0) {
2440      Ops.push_back(DAG.getUNDEF(EltVT));
2441    } else {
2442      int Idx = Mask[i];
2443      if (Idx < (int)SrcNumElts)
2444        Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2445                                  EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2446      else
2447        Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2448                                  EltVT, Src2,
2449                                  DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2450    }
2451  }
2452  setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2453                           VT, &Ops[0], Ops.size()));
2454}
2455
2456void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
2457  const Value *Op0 = I.getOperand(0);
2458  const Value *Op1 = I.getOperand(1);
2459  const Type *AggTy = I.getType();
2460  const Type *ValTy = Op1->getType();
2461  bool IntoUndef = isa<UndefValue>(Op0);
2462  bool FromUndef = isa<UndefValue>(Op1);
2463
2464  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2465                                            I.idx_begin(), I.idx_end());
2466
2467  SmallVector<EVT, 4> AggValueVTs;
2468  ComputeValueVTs(TLI, AggTy, AggValueVTs);
2469  SmallVector<EVT, 4> ValValueVTs;
2470  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2471
2472  unsigned NumAggValues = AggValueVTs.size();
2473  unsigned NumValValues = ValValueVTs.size();
2474  SmallVector<SDValue, 4> Values(NumAggValues);
2475
2476  SDValue Agg = getValue(Op0);
2477  SDValue Val = getValue(Op1);
2478  unsigned i = 0;
2479  // Copy the beginning value(s) from the original aggregate.
2480  for (; i != LinearIndex; ++i)
2481    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2482                SDValue(Agg.getNode(), Agg.getResNo() + i);
2483  // Copy values from the inserted value(s).
2484  for (; i != LinearIndex + NumValValues; ++i)
2485    Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2486                SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2487  // Copy remaining value(s) from the original aggregate.
2488  for (; i != NumAggValues; ++i)
2489    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2490                SDValue(Agg.getNode(), Agg.getResNo() + i);
2491
2492  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2493                           DAG.getVTList(&AggValueVTs[0], NumAggValues),
2494                           &Values[0], NumAggValues));
2495}
2496
2497void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
2498  const Value *Op0 = I.getOperand(0);
2499  const Type *AggTy = Op0->getType();
2500  const Type *ValTy = I.getType();
2501  bool OutOfUndef = isa<UndefValue>(Op0);
2502
2503  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2504                                            I.idx_begin(), I.idx_end());
2505
2506  SmallVector<EVT, 4> ValValueVTs;
2507  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2508
2509  unsigned NumValValues = ValValueVTs.size();
2510  SmallVector<SDValue, 4> Values(NumValValues);
2511
2512  SDValue Agg = getValue(Op0);
2513  // Copy out the selected value(s).
2514  for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2515    Values[i - LinearIndex] =
2516      OutOfUndef ?
2517        DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2518        SDValue(Agg.getNode(), Agg.getResNo() + i);
2519
2520  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2521                           DAG.getVTList(&ValValueVTs[0], NumValValues),
2522                           &Values[0], NumValValues));
2523}
2524
2525
2526void SelectionDAGBuilder::visitGetElementPtr(User &I) {
2527  SDValue N = getValue(I.getOperand(0));
2528  const Type *Ty = I.getOperand(0)->getType();
2529
2530  for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2531       OI != E; ++OI) {
2532    Value *Idx = *OI;
2533    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2534      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2535      if (Field) {
2536        // N = N + Offset
2537        uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2538        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2539                        DAG.getIntPtrConstant(Offset));
2540      }
2541      Ty = StTy->getElementType(Field);
2542    } else {
2543      Ty = cast<SequentialType>(Ty)->getElementType();
2544
2545      // If this is a constant subscript, handle it quickly.
2546      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2547        if (CI->getZExtValue() == 0) continue;
2548        uint64_t Offs =
2549            TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2550        SDValue OffsVal;
2551        EVT PTy = TLI.getPointerTy();
2552        unsigned PtrBits = PTy.getSizeInBits();
2553        if (PtrBits < 64) {
2554          OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2555                                TLI.getPointerTy(),
2556                                DAG.getConstant(Offs, MVT::i64));
2557        } else
2558          OffsVal = DAG.getIntPtrConstant(Offs);
2559        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2560                        OffsVal);
2561        continue;
2562      }
2563
2564      // N = N + Idx * ElementSize;
2565      APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
2566                                TD->getTypeAllocSize(Ty));
2567      SDValue IdxN = getValue(Idx);
2568
2569      // If the index is smaller or larger than intptr_t, truncate or extend
2570      // it.
2571      IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
2572
2573      // If this is a multiply by a power of two, turn it into a shl
2574      // immediately.  This is a very common case.
2575      if (ElementSize != 1) {
2576        if (ElementSize.isPowerOf2()) {
2577          unsigned Amt = ElementSize.logBase2();
2578          IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2579                             N.getValueType(), IdxN,
2580                             DAG.getConstant(Amt, TLI.getPointerTy()));
2581        } else {
2582          SDValue Scale = DAG.getConstant(ElementSize, TLI.getPointerTy());
2583          IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2584                             N.getValueType(), IdxN, Scale);
2585        }
2586      }
2587
2588      N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2589                      N.getValueType(), N, IdxN);
2590    }
2591  }
2592  setValue(&I, N);
2593}
2594
2595void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
2596  // If this is a fixed sized alloca in the entry block of the function,
2597  // allocate it statically on the stack.
2598  if (FuncInfo.StaticAllocaMap.count(&I))
2599    return;   // getValue will auto-populate this.
2600
2601  const Type *Ty = I.getAllocatedType();
2602  uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2603  unsigned Align =
2604    std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2605             I.getAlignment());
2606
2607  SDValue AllocSize = getValue(I.getArraySize());
2608
2609  AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2610                          AllocSize,
2611                          DAG.getConstant(TySize, AllocSize.getValueType()));
2612
2613
2614
2615  EVT IntPtr = TLI.getPointerTy();
2616  AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
2617
2618  // Handle alignment.  If the requested alignment is less than or equal to
2619  // the stack alignment, ignore it.  If the size is greater than or equal to
2620  // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2621  unsigned StackAlign =
2622    TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2623  if (Align <= StackAlign)
2624    Align = 0;
2625
2626  // Round the size of the allocation up to the stack alignment size
2627  // by add SA-1 to the size.
2628  AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2629                          AllocSize.getValueType(), AllocSize,
2630                          DAG.getIntPtrConstant(StackAlign-1));
2631  // Mask out the low bits for alignment purposes.
2632  AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2633                          AllocSize.getValueType(), AllocSize,
2634                          DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2635
2636  SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2637  SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2638  SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2639                            VTs, Ops, 3);
2640  setValue(&I, DSA);
2641  DAG.setRoot(DSA.getValue(1));
2642
2643  // Inform the Frame Information that we have just allocated a variable-sized
2644  // object.
2645  FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2646}
2647
2648void SelectionDAGBuilder::visitLoad(LoadInst &I) {
2649  const Value *SV = I.getOperand(0);
2650  SDValue Ptr = getValue(SV);
2651
2652  const Type *Ty = I.getType();
2653  bool isVolatile = I.isVolatile();
2654  unsigned Alignment = I.getAlignment();
2655
2656  SmallVector<EVT, 4> ValueVTs;
2657  SmallVector<uint64_t, 4> Offsets;
2658  ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2659  unsigned NumValues = ValueVTs.size();
2660  if (NumValues == 0)
2661    return;
2662
2663  SDValue Root;
2664  bool ConstantMemory = false;
2665  if (I.isVolatile())
2666    // Serialize volatile loads with other side effects.
2667    Root = getRoot();
2668  else if (AA->pointsToConstantMemory(SV)) {
2669    // Do not serialize (non-volatile) loads of constant memory with anything.
2670    Root = DAG.getEntryNode();
2671    ConstantMemory = true;
2672  } else {
2673    // Do not serialize non-volatile loads against each other.
2674    Root = DAG.getRoot();
2675  }
2676
2677  SmallVector<SDValue, 4> Values(NumValues);
2678  SmallVector<SDValue, 4> Chains(NumValues);
2679  EVT PtrVT = Ptr.getValueType();
2680  for (unsigned i = 0; i != NumValues; ++i) {
2681    SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2682                            DAG.getNode(ISD::ADD, getCurDebugLoc(),
2683                                        PtrVT, Ptr,
2684                                        DAG.getConstant(Offsets[i], PtrVT)),
2685                            SV, Offsets[i], isVolatile, Alignment);
2686    Values[i] = L;
2687    Chains[i] = L.getValue(1);
2688  }
2689
2690  if (!ConstantMemory) {
2691    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2692                                  MVT::Other,
2693                                  &Chains[0], NumValues);
2694    if (isVolatile)
2695      DAG.setRoot(Chain);
2696    else
2697      PendingLoads.push_back(Chain);
2698  }
2699
2700  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2701                           DAG.getVTList(&ValueVTs[0], NumValues),
2702                           &Values[0], NumValues));
2703}
2704
2705
2706void SelectionDAGBuilder::visitStore(StoreInst &I) {
2707  Value *SrcV = I.getOperand(0);
2708  Value *PtrV = I.getOperand(1);
2709
2710  SmallVector<EVT, 4> ValueVTs;
2711  SmallVector<uint64_t, 4> Offsets;
2712  ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2713  unsigned NumValues = ValueVTs.size();
2714  if (NumValues == 0)
2715    return;
2716
2717  // Get the lowered operands. Note that we do this after
2718  // checking if NumResults is zero, because with zero results
2719  // the operands won't have values in the map.
2720  SDValue Src = getValue(SrcV);
2721  SDValue Ptr = getValue(PtrV);
2722
2723  SDValue Root = getRoot();
2724  SmallVector<SDValue, 4> Chains(NumValues);
2725  EVT PtrVT = Ptr.getValueType();
2726  bool isVolatile = I.isVolatile();
2727  unsigned Alignment = I.getAlignment();
2728  for (unsigned i = 0; i != NumValues; ++i)
2729    Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2730                             SDValue(Src.getNode(), Src.getResNo() + i),
2731                             DAG.getNode(ISD::ADD, getCurDebugLoc(),
2732                                         PtrVT, Ptr,
2733                                         DAG.getConstant(Offsets[i], PtrVT)),
2734                             PtrV, Offsets[i], isVolatile, Alignment);
2735
2736  DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2737                          MVT::Other, &Chains[0], NumValues));
2738}
2739
2740/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2741/// node.
2742void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
2743                                               unsigned Intrinsic) {
2744  bool HasChain = !I.doesNotAccessMemory();
2745  bool OnlyLoad = HasChain && I.onlyReadsMemory();
2746
2747  // Build the operand list.
2748  SmallVector<SDValue, 8> Ops;
2749  if (HasChain) {  // If this intrinsic has side-effects, chainify it.
2750    if (OnlyLoad) {
2751      // We don't need to serialize loads against other loads.
2752      Ops.push_back(DAG.getRoot());
2753    } else {
2754      Ops.push_back(getRoot());
2755    }
2756  }
2757
2758  // Info is set by getTgtMemInstrinsic
2759  TargetLowering::IntrinsicInfo Info;
2760  bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2761
2762  // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2763  if (!IsTgtIntrinsic)
2764    Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2765
2766  // Add all operands of the call to the operand list.
2767  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2768    SDValue Op = getValue(I.getOperand(i));
2769    assert(TLI.isTypeLegal(Op.getValueType()) &&
2770           "Intrinsic uses a non-legal type?");
2771    Ops.push_back(Op);
2772  }
2773
2774  SmallVector<EVT, 4> ValueVTs;
2775  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2776#ifndef NDEBUG
2777  for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
2778    assert(TLI.isTypeLegal(ValueVTs[Val]) &&
2779           "Intrinsic uses a non-legal type?");
2780  }
2781#endif // NDEBUG
2782  if (HasChain)
2783    ValueVTs.push_back(MVT::Other);
2784
2785  SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
2786
2787  // Create the node.
2788  SDValue Result;
2789  if (IsTgtIntrinsic) {
2790    // This is target intrinsic that touches memory
2791    Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2792                                     VTs, &Ops[0], Ops.size(),
2793                                     Info.memVT, Info.ptrVal, Info.offset,
2794                                     Info.align, Info.vol,
2795                                     Info.readMem, Info.writeMem);
2796  }
2797  else if (!HasChain)
2798    Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2799                         VTs, &Ops[0], Ops.size());
2800  else if (I.getType() != Type::getVoidTy(*DAG.getContext()))
2801    Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2802                         VTs, &Ops[0], Ops.size());
2803  else
2804    Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2805                         VTs, &Ops[0], Ops.size());
2806
2807  if (HasChain) {
2808    SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2809    if (OnlyLoad)
2810      PendingLoads.push_back(Chain);
2811    else
2812      DAG.setRoot(Chain);
2813  }
2814  if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
2815    if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2816      EVT VT = TLI.getValueType(PTy);
2817      Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2818    }
2819    setValue(&I, Result);
2820  }
2821}
2822
2823/// GetSignificand - Get the significand and build it into a floating-point
2824/// number with exponent of 1:
2825///
2826///   Op = (Op & 0x007fffff) | 0x3f800000;
2827///
2828/// where Op is the hexidecimal representation of floating point value.
2829static SDValue
2830GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
2831  SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
2832                           DAG.getConstant(0x007fffff, MVT::i32));
2833  SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
2834                           DAG.getConstant(0x3f800000, MVT::i32));
2835  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
2836}
2837
2838/// GetExponent - Get the exponent:
2839///
2840///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
2841///
2842/// where Op is the hexidecimal representation of floating point value.
2843static SDValue
2844GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
2845            DebugLoc dl) {
2846  SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
2847                           DAG.getConstant(0x7f800000, MVT::i32));
2848  SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
2849                           DAG.getConstant(23, TLI.getPointerTy()));
2850  SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
2851                           DAG.getConstant(127, MVT::i32));
2852  return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
2853}
2854
2855/// getF32Constant - Get 32-bit floating point constant.
2856static SDValue
2857getF32Constant(SelectionDAG &DAG, unsigned Flt) {
2858  return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
2859}
2860
2861/// Inlined utility function to implement binary input atomic intrinsics for
2862/// visitIntrinsicCall: I is a call instruction
2863///                     Op is the associated NodeType for I
2864const char *
2865SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
2866  SDValue Root = getRoot();
2867  SDValue L =
2868    DAG.getAtomic(Op, getCurDebugLoc(),
2869                  getValue(I.getOperand(2)).getValueType().getSimpleVT(),
2870                  Root,
2871                  getValue(I.getOperand(1)),
2872                  getValue(I.getOperand(2)),
2873                  I.getOperand(1));
2874  setValue(&I, L);
2875  DAG.setRoot(L.getValue(1));
2876  return 0;
2877}
2878
2879// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
2880const char *
2881SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
2882  SDValue Op1 = getValue(I.getOperand(1));
2883  SDValue Op2 = getValue(I.getOperand(2));
2884
2885  SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
2886  SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
2887
2888  setValue(&I, Result);
2889  return 0;
2890}
2891
2892/// visitExp - Lower an exp intrinsic. Handles the special sequences for
2893/// limited-precision mode.
2894void
2895SelectionDAGBuilder::visitExp(CallInst &I) {
2896  SDValue result;
2897  DebugLoc dl = getCurDebugLoc();
2898
2899  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
2900      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
2901    SDValue Op = getValue(I.getOperand(1));
2902
2903    // Put the exponent in the right bit position for later addition to the
2904    // final result:
2905    //
2906    //   #define LOG2OFe 1.4426950f
2907    //   IntegerPartOfX = ((int32_t)(X * LOG2OFe));
2908    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
2909                             getF32Constant(DAG, 0x3fb8aa3b));
2910    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
2911
2912    //   FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
2913    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
2914    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
2915
2916    //   IntegerPartOfX <<= 23;
2917    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
2918                                 DAG.getConstant(23, TLI.getPointerTy()));
2919
2920    if (LimitFloatPrecision <= 6) {
2921      // For floating-point precision of 6:
2922      //
2923      //   TwoToFractionalPartOfX =
2924      //     0.997535578f +
2925      //       (0.735607626f + 0.252464424f * x) * x;
2926      //
2927      // error 0.0144103317, which is 6 bits
2928      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
2929                               getF32Constant(DAG, 0x3e814304));
2930      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
2931                               getF32Constant(DAG, 0x3f3c50c8));
2932      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
2933      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
2934                               getF32Constant(DAG, 0x3f7f5e7e));
2935      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
2936
2937      // Add the exponent into the result in integer domain.
2938      SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
2939                               TwoToFracPartOfX, IntegerPartOfX);
2940
2941      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
2942    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
2943      // For floating-point precision of 12:
2944      //
2945      //   TwoToFractionalPartOfX =
2946      //     0.999892986f +
2947      //       (0.696457318f +
2948      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
2949      //
2950      // 0.000107046256 error, which is 13 to 14 bits
2951      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
2952                               getF32Constant(DAG, 0x3da235e3));
2953      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
2954                               getF32Constant(DAG, 0x3e65b8f3));
2955      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
2956      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
2957                               getF32Constant(DAG, 0x3f324b07));
2958      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
2959      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
2960                               getF32Constant(DAG, 0x3f7ff8fd));
2961      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
2962
2963      // Add the exponent into the result in integer domain.
2964      SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
2965                               TwoToFracPartOfX, IntegerPartOfX);
2966
2967      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
2968    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
2969      // For floating-point precision of 18:
2970      //
2971      //   TwoToFractionalPartOfX =
2972      //     0.999999982f +
2973      //       (0.693148872f +
2974      //         (0.240227044f +
2975      //           (0.554906021e-1f +
2976      //             (0.961591928e-2f +
2977      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
2978      //
2979      // error 2.47208000*10^(-7), which is better than 18 bits
2980      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
2981                               getF32Constant(DAG, 0x3924b03e));
2982      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
2983                               getF32Constant(DAG, 0x3ab24b87));
2984      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
2985      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
2986                               getF32Constant(DAG, 0x3c1d8c17));
2987      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
2988      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
2989                               getF32Constant(DAG, 0x3d634a1d));
2990      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
2991      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
2992                               getF32Constant(DAG, 0x3e75fe14));
2993      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
2994      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
2995                                getF32Constant(DAG, 0x3f317234));
2996      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
2997      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
2998                                getF32Constant(DAG, 0x3f800000));
2999      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3000                                             MVT::i32, t13);
3001
3002      // Add the exponent into the result in integer domain.
3003      SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3004                                TwoToFracPartOfX, IntegerPartOfX);
3005
3006      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3007    }
3008  } else {
3009    // No special expansion.
3010    result = DAG.getNode(ISD::FEXP, dl,
3011                         getValue(I.getOperand(1)).getValueType(),
3012                         getValue(I.getOperand(1)));
3013  }
3014
3015  setValue(&I, result);
3016}
3017
3018/// visitLog - Lower a log intrinsic. Handles the special sequences for
3019/// limited-precision mode.
3020void
3021SelectionDAGBuilder::visitLog(CallInst &I) {
3022  SDValue result;
3023  DebugLoc dl = getCurDebugLoc();
3024
3025  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3026      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3027    SDValue Op = getValue(I.getOperand(1));
3028    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3029
3030    // Scale the exponent by log(2) [0.69314718f].
3031    SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3032    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3033                                        getF32Constant(DAG, 0x3f317218));
3034
3035    // Get the significand and build it into a floating-point number with
3036    // exponent of 1.
3037    SDValue X = GetSignificand(DAG, Op1, dl);
3038
3039    if (LimitFloatPrecision <= 6) {
3040      // For floating-point precision of 6:
3041      //
3042      //   LogofMantissa =
3043      //     -1.1609546f +
3044      //       (1.4034025f - 0.23903021f * x) * x;
3045      //
3046      // error 0.0034276066, which is better than 8 bits
3047      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3048                               getF32Constant(DAG, 0xbe74c456));
3049      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3050                               getF32Constant(DAG, 0x3fb3a2b1));
3051      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3052      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3053                                          getF32Constant(DAG, 0x3f949a29));
3054
3055      result = DAG.getNode(ISD::FADD, dl,
3056                           MVT::f32, LogOfExponent, LogOfMantissa);
3057    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3058      // For floating-point precision of 12:
3059      //
3060      //   LogOfMantissa =
3061      //     -1.7417939f +
3062      //       (2.8212026f +
3063      //         (-1.4699568f +
3064      //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3065      //
3066      // error 0.000061011436, which is 14 bits
3067      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3068                               getF32Constant(DAG, 0xbd67b6d6));
3069      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3070                               getF32Constant(DAG, 0x3ee4f4b8));
3071      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3072      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3073                               getF32Constant(DAG, 0x3fbc278b));
3074      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3075      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3076                               getF32Constant(DAG, 0x40348e95));
3077      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3078      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3079                                          getF32Constant(DAG, 0x3fdef31a));
3080
3081      result = DAG.getNode(ISD::FADD, dl,
3082                           MVT::f32, LogOfExponent, LogOfMantissa);
3083    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3084      // For floating-point precision of 18:
3085      //
3086      //   LogOfMantissa =
3087      //     -2.1072184f +
3088      //       (4.2372794f +
3089      //         (-3.7029485f +
3090      //           (2.2781945f +
3091      //             (-0.87823314f +
3092      //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3093      //
3094      // error 0.0000023660568, which is better than 18 bits
3095      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3096                               getF32Constant(DAG, 0xbc91e5ac));
3097      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3098                               getF32Constant(DAG, 0x3e4350aa));
3099      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3100      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3101                               getF32Constant(DAG, 0x3f60d3e3));
3102      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3103      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3104                               getF32Constant(DAG, 0x4011cdf0));
3105      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3106      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3107                               getF32Constant(DAG, 0x406cfd1c));
3108      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3109      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3110                               getF32Constant(DAG, 0x408797cb));
3111      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3112      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3113                                          getF32Constant(DAG, 0x4006dcab));
3114
3115      result = DAG.getNode(ISD::FADD, dl,
3116                           MVT::f32, LogOfExponent, LogOfMantissa);
3117    }
3118  } else {
3119    // No special expansion.
3120    result = DAG.getNode(ISD::FLOG, dl,
3121                         getValue(I.getOperand(1)).getValueType(),
3122                         getValue(I.getOperand(1)));
3123  }
3124
3125  setValue(&I, result);
3126}
3127
3128/// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3129/// limited-precision mode.
3130void
3131SelectionDAGBuilder::visitLog2(CallInst &I) {
3132  SDValue result;
3133  DebugLoc dl = getCurDebugLoc();
3134
3135  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3136      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3137    SDValue Op = getValue(I.getOperand(1));
3138    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3139
3140    // Get the exponent.
3141    SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3142
3143    // Get the significand and build it into a floating-point number with
3144    // exponent of 1.
3145    SDValue X = GetSignificand(DAG, Op1, dl);
3146
3147    // Different possible minimax approximations of significand in
3148    // floating-point for various degrees of accuracy over [1,2].
3149    if (LimitFloatPrecision <= 6) {
3150      // For floating-point precision of 6:
3151      //
3152      //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3153      //
3154      // error 0.0049451742, which is more than 7 bits
3155      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3156                               getF32Constant(DAG, 0xbeb08fe0));
3157      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3158                               getF32Constant(DAG, 0x40019463));
3159      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3160      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3161                                           getF32Constant(DAG, 0x3fd6633d));
3162
3163      result = DAG.getNode(ISD::FADD, dl,
3164                           MVT::f32, LogOfExponent, Log2ofMantissa);
3165    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3166      // For floating-point precision of 12:
3167      //
3168      //   Log2ofMantissa =
3169      //     -2.51285454f +
3170      //       (4.07009056f +
3171      //         (-2.12067489f +
3172      //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3173      //
3174      // error 0.0000876136000, which is better than 13 bits
3175      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3176                               getF32Constant(DAG, 0xbda7262e));
3177      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3178                               getF32Constant(DAG, 0x3f25280b));
3179      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3180      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3181                               getF32Constant(DAG, 0x4007b923));
3182      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3183      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3184                               getF32Constant(DAG, 0x40823e2f));
3185      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3186      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3187                                           getF32Constant(DAG, 0x4020d29c));
3188
3189      result = DAG.getNode(ISD::FADD, dl,
3190                           MVT::f32, LogOfExponent, Log2ofMantissa);
3191    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3192      // For floating-point precision of 18:
3193      //
3194      //   Log2ofMantissa =
3195      //     -3.0400495f +
3196      //       (6.1129976f +
3197      //         (-5.3420409f +
3198      //           (3.2865683f +
3199      //             (-1.2669343f +
3200      //               (0.27515199f -
3201      //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3202      //
3203      // error 0.0000018516, which is better than 18 bits
3204      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3205                               getF32Constant(DAG, 0xbcd2769e));
3206      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3207                               getF32Constant(DAG, 0x3e8ce0b9));
3208      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3209      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3210                               getF32Constant(DAG, 0x3fa22ae7));
3211      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3212      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3213                               getF32Constant(DAG, 0x40525723));
3214      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3215      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3216                               getF32Constant(DAG, 0x40aaf200));
3217      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3218      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3219                               getF32Constant(DAG, 0x40c39dad));
3220      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3221      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3222                                           getF32Constant(DAG, 0x4042902c));
3223
3224      result = DAG.getNode(ISD::FADD, dl,
3225                           MVT::f32, LogOfExponent, Log2ofMantissa);
3226    }
3227  } else {
3228    // No special expansion.
3229    result = DAG.getNode(ISD::FLOG2, dl,
3230                         getValue(I.getOperand(1)).getValueType(),
3231                         getValue(I.getOperand(1)));
3232  }
3233
3234  setValue(&I, result);
3235}
3236
3237/// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3238/// limited-precision mode.
3239void
3240SelectionDAGBuilder::visitLog10(CallInst &I) {
3241  SDValue result;
3242  DebugLoc dl = getCurDebugLoc();
3243
3244  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3245      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3246    SDValue Op = getValue(I.getOperand(1));
3247    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3248
3249    // Scale the exponent by log10(2) [0.30102999f].
3250    SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3251    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3252                                        getF32Constant(DAG, 0x3e9a209a));
3253
3254    // Get the significand and build it into a floating-point number with
3255    // exponent of 1.
3256    SDValue X = GetSignificand(DAG, Op1, dl);
3257
3258    if (LimitFloatPrecision <= 6) {
3259      // For floating-point precision of 6:
3260      //
3261      //   Log10ofMantissa =
3262      //     -0.50419619f +
3263      //       (0.60948995f - 0.10380950f * x) * x;
3264      //
3265      // error 0.0014886165, which is 6 bits
3266      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3267                               getF32Constant(DAG, 0xbdd49a13));
3268      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3269                               getF32Constant(DAG, 0x3f1c0789));
3270      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3271      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3272                                            getF32Constant(DAG, 0x3f011300));
3273
3274      result = DAG.getNode(ISD::FADD, dl,
3275                           MVT::f32, LogOfExponent, Log10ofMantissa);
3276    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3277      // For floating-point precision of 12:
3278      //
3279      //   Log10ofMantissa =
3280      //     -0.64831180f +
3281      //       (0.91751397f +
3282      //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3283      //
3284      // error 0.00019228036, which is better than 12 bits
3285      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3286                               getF32Constant(DAG, 0x3d431f31));
3287      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3288                               getF32Constant(DAG, 0x3ea21fb2));
3289      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3290      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3291                               getF32Constant(DAG, 0x3f6ae232));
3292      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3293      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3294                                            getF32Constant(DAG, 0x3f25f7c3));
3295
3296      result = DAG.getNode(ISD::FADD, dl,
3297                           MVT::f32, LogOfExponent, Log10ofMantissa);
3298    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3299      // For floating-point precision of 18:
3300      //
3301      //   Log10ofMantissa =
3302      //     -0.84299375f +
3303      //       (1.5327582f +
3304      //         (-1.0688956f +
3305      //           (0.49102474f +
3306      //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3307      //
3308      // error 0.0000037995730, which is better than 18 bits
3309      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3310                               getF32Constant(DAG, 0x3c5d51ce));
3311      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3312                               getF32Constant(DAG, 0x3e00685a));
3313      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3314      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3315                               getF32Constant(DAG, 0x3efb6798));
3316      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3317      SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3318                               getF32Constant(DAG, 0x3f88d192));
3319      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3320      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3321                               getF32Constant(DAG, 0x3fc4316c));
3322      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3323      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3324                                            getF32Constant(DAG, 0x3f57ce70));
3325
3326      result = DAG.getNode(ISD::FADD, dl,
3327                           MVT::f32, LogOfExponent, Log10ofMantissa);
3328    }
3329  } else {
3330    // No special expansion.
3331    result = DAG.getNode(ISD::FLOG10, dl,
3332                         getValue(I.getOperand(1)).getValueType(),
3333                         getValue(I.getOperand(1)));
3334  }
3335
3336  setValue(&I, result);
3337}
3338
3339/// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3340/// limited-precision mode.
3341void
3342SelectionDAGBuilder::visitExp2(CallInst &I) {
3343  SDValue result;
3344  DebugLoc dl = getCurDebugLoc();
3345
3346  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3347      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3348    SDValue Op = getValue(I.getOperand(1));
3349
3350    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3351
3352    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3353    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3354    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3355
3356    //   IntegerPartOfX <<= 23;
3357    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3358                                 DAG.getConstant(23, TLI.getPointerTy()));
3359
3360    if (LimitFloatPrecision <= 6) {
3361      // For floating-point precision of 6:
3362      //
3363      //   TwoToFractionalPartOfX =
3364      //     0.997535578f +
3365      //       (0.735607626f + 0.252464424f * x) * x;
3366      //
3367      // error 0.0144103317, which is 6 bits
3368      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3369                               getF32Constant(DAG, 0x3e814304));
3370      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3371                               getF32Constant(DAG, 0x3f3c50c8));
3372      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3373      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3374                               getF32Constant(DAG, 0x3f7f5e7e));
3375      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3376      SDValue TwoToFractionalPartOfX =
3377        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3378
3379      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3380                           MVT::f32, TwoToFractionalPartOfX);
3381    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3382      // For floating-point precision of 12:
3383      //
3384      //   TwoToFractionalPartOfX =
3385      //     0.999892986f +
3386      //       (0.696457318f +
3387      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3388      //
3389      // error 0.000107046256, which is 13 to 14 bits
3390      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3391                               getF32Constant(DAG, 0x3da235e3));
3392      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3393                               getF32Constant(DAG, 0x3e65b8f3));
3394      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3395      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3396                               getF32Constant(DAG, 0x3f324b07));
3397      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3398      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3399                               getF32Constant(DAG, 0x3f7ff8fd));
3400      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3401      SDValue TwoToFractionalPartOfX =
3402        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3403
3404      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3405                           MVT::f32, TwoToFractionalPartOfX);
3406    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3407      // For floating-point precision of 18:
3408      //
3409      //   TwoToFractionalPartOfX =
3410      //     0.999999982f +
3411      //       (0.693148872f +
3412      //         (0.240227044f +
3413      //           (0.554906021e-1f +
3414      //             (0.961591928e-2f +
3415      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3416      // error 2.47208000*10^(-7), which is better than 18 bits
3417      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3418                               getF32Constant(DAG, 0x3924b03e));
3419      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3420                               getF32Constant(DAG, 0x3ab24b87));
3421      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3422      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3423                               getF32Constant(DAG, 0x3c1d8c17));
3424      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3425      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3426                               getF32Constant(DAG, 0x3d634a1d));
3427      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3428      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3429                               getF32Constant(DAG, 0x3e75fe14));
3430      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3431      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3432                                getF32Constant(DAG, 0x3f317234));
3433      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3434      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3435                                getF32Constant(DAG, 0x3f800000));
3436      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3437      SDValue TwoToFractionalPartOfX =
3438        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3439
3440      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3441                           MVT::f32, TwoToFractionalPartOfX);
3442    }
3443  } else {
3444    // No special expansion.
3445    result = DAG.getNode(ISD::FEXP2, dl,
3446                         getValue(I.getOperand(1)).getValueType(),
3447                         getValue(I.getOperand(1)));
3448  }
3449
3450  setValue(&I, result);
3451}
3452
3453/// visitPow - Lower a pow intrinsic. Handles the special sequences for
3454/// limited-precision mode with x == 10.0f.
3455void
3456SelectionDAGBuilder::visitPow(CallInst &I) {
3457  SDValue result;
3458  Value *Val = I.getOperand(1);
3459  DebugLoc dl = getCurDebugLoc();
3460  bool IsExp10 = false;
3461
3462  if (getValue(Val).getValueType() == MVT::f32 &&
3463      getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3464      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3465    if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3466      if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3467        APFloat Ten(10.0f);
3468        IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3469      }
3470    }
3471  }
3472
3473  if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3474    SDValue Op = getValue(I.getOperand(2));
3475
3476    // Put the exponent in the right bit position for later addition to the
3477    // final result:
3478    //
3479    //   #define LOG2OF10 3.3219281f
3480    //   IntegerPartOfX = (int32_t)(x * LOG2OF10);
3481    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3482                             getF32Constant(DAG, 0x40549a78));
3483    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3484
3485    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3486    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3487    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3488
3489    //   IntegerPartOfX <<= 23;
3490    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3491                                 DAG.getConstant(23, TLI.getPointerTy()));
3492
3493    if (LimitFloatPrecision <= 6) {
3494      // For floating-point precision of 6:
3495      //
3496      //   twoToFractionalPartOfX =
3497      //     0.997535578f +
3498      //       (0.735607626f + 0.252464424f * x) * x;
3499      //
3500      // error 0.0144103317, which is 6 bits
3501      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3502                               getF32Constant(DAG, 0x3e814304));
3503      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3504                               getF32Constant(DAG, 0x3f3c50c8));
3505      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3506      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3507                               getF32Constant(DAG, 0x3f7f5e7e));
3508      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3509      SDValue TwoToFractionalPartOfX =
3510        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3511
3512      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3513                           MVT::f32, TwoToFractionalPartOfX);
3514    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3515      // For floating-point precision of 12:
3516      //
3517      //   TwoToFractionalPartOfX =
3518      //     0.999892986f +
3519      //       (0.696457318f +
3520      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3521      //
3522      // error 0.000107046256, which is 13 to 14 bits
3523      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3524                               getF32Constant(DAG, 0x3da235e3));
3525      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3526                               getF32Constant(DAG, 0x3e65b8f3));
3527      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3528      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3529                               getF32Constant(DAG, 0x3f324b07));
3530      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3531      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3532                               getF32Constant(DAG, 0x3f7ff8fd));
3533      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3534      SDValue TwoToFractionalPartOfX =
3535        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3536
3537      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3538                           MVT::f32, TwoToFractionalPartOfX);
3539    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3540      // For floating-point precision of 18:
3541      //
3542      //   TwoToFractionalPartOfX =
3543      //     0.999999982f +
3544      //       (0.693148872f +
3545      //         (0.240227044f +
3546      //           (0.554906021e-1f +
3547      //             (0.961591928e-2f +
3548      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3549      // error 2.47208000*10^(-7), which is better than 18 bits
3550      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3551                               getF32Constant(DAG, 0x3924b03e));
3552      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3553                               getF32Constant(DAG, 0x3ab24b87));
3554      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3555      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3556                               getF32Constant(DAG, 0x3c1d8c17));
3557      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3558      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3559                               getF32Constant(DAG, 0x3d634a1d));
3560      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3561      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3562                               getF32Constant(DAG, 0x3e75fe14));
3563      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3564      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3565                                getF32Constant(DAG, 0x3f317234));
3566      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3567      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3568                                getF32Constant(DAG, 0x3f800000));
3569      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3570      SDValue TwoToFractionalPartOfX =
3571        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3572
3573      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3574                           MVT::f32, TwoToFractionalPartOfX);
3575    }
3576  } else {
3577    // No special expansion.
3578    result = DAG.getNode(ISD::FPOW, dl,
3579                         getValue(I.getOperand(1)).getValueType(),
3580                         getValue(I.getOperand(1)),
3581                         getValue(I.getOperand(2)));
3582  }
3583
3584  setValue(&I, result);
3585}
3586
3587/// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
3588/// we want to emit this as a call to a named external function, return the name
3589/// otherwise lower it and return null.
3590const char *
3591SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3592  DebugLoc dl = getCurDebugLoc();
3593  switch (Intrinsic) {
3594  default:
3595    // By default, turn this into a target intrinsic node.
3596    visitTargetIntrinsic(I, Intrinsic);
3597    return 0;
3598  case Intrinsic::vastart:  visitVAStart(I); return 0;
3599  case Intrinsic::vaend:    visitVAEnd(I); return 0;
3600  case Intrinsic::vacopy:   visitVACopy(I); return 0;
3601  case Intrinsic::returnaddress:
3602    setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3603                             getValue(I.getOperand(1))));
3604    return 0;
3605  case Intrinsic::frameaddress:
3606    setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3607                             getValue(I.getOperand(1))));
3608    return 0;
3609  case Intrinsic::setjmp:
3610    return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3611    break;
3612  case Intrinsic::longjmp:
3613    return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3614    break;
3615  case Intrinsic::memcpy: {
3616    SDValue Op1 = getValue(I.getOperand(1));
3617    SDValue Op2 = getValue(I.getOperand(2));
3618    SDValue Op3 = getValue(I.getOperand(3));
3619    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3620    DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3621                              I.getOperand(1), 0, I.getOperand(2), 0));
3622    return 0;
3623  }
3624  case Intrinsic::memset: {
3625    SDValue Op1 = getValue(I.getOperand(1));
3626    SDValue Op2 = getValue(I.getOperand(2));
3627    SDValue Op3 = getValue(I.getOperand(3));
3628    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3629    DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3630                              I.getOperand(1), 0));
3631    return 0;
3632  }
3633  case Intrinsic::memmove: {
3634    SDValue Op1 = getValue(I.getOperand(1));
3635    SDValue Op2 = getValue(I.getOperand(2));
3636    SDValue Op3 = getValue(I.getOperand(3));
3637    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3638
3639    // If the source and destination are known to not be aliases, we can
3640    // lower memmove as memcpy.
3641    uint64_t Size = -1ULL;
3642    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3643      Size = C->getZExtValue();
3644    if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3645        AliasAnalysis::NoAlias) {
3646      DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3647                                I.getOperand(1), 0, I.getOperand(2), 0));
3648      return 0;
3649    }
3650
3651    DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3652                               I.getOperand(1), 0, I.getOperand(2), 0));
3653    return 0;
3654  }
3655  case Intrinsic::dbg_stoppoint:
3656  case Intrinsic::dbg_region_start:
3657  case Intrinsic::dbg_region_end:
3658  case Intrinsic::dbg_func_start:
3659    // FIXME - Remove this instructions once the dust settles.
3660    return 0;
3661  case Intrinsic::dbg_declare: {
3662    if (OptLevel != CodeGenOpt::None)
3663      // FIXME: Variable debug info is not supported here.
3664      return 0;
3665    DwarfWriter *DW = DAG.getDwarfWriter();
3666    if (!DW)
3667      return 0;
3668    DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3669    if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
3670      return 0;
3671
3672    MDNode *Variable = DI.getVariable();
3673    Value *Address = DI.getAddress();
3674    if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
3675      Address = BCI->getOperand(0);
3676    AllocaInst *AI = dyn_cast<AllocaInst>(Address);
3677    // Don't handle byval struct arguments or VLAs, for example.
3678    if (!AI)
3679      return 0;
3680    DenseMap<const AllocaInst*, int>::iterator SI =
3681      FuncInfo.StaticAllocaMap.find(AI);
3682    if (SI == FuncInfo.StaticAllocaMap.end())
3683      return 0; // VLAs.
3684    int FI = SI->second;
3685
3686    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3687    if (MMI) {
3688      MetadataContext &TheMetadata =
3689        DI.getParent()->getContext().getMetadata();
3690      unsigned MDDbgKind = TheMetadata.getMDKind("dbg");
3691      MDNode *Dbg = TheMetadata.getMD(MDDbgKind, &DI);
3692      MMI->setVariableDbgInfo(Variable, FI, Dbg);
3693    }
3694    return 0;
3695  }
3696  case Intrinsic::eh_exception: {
3697    // Insert the EXCEPTIONADDR instruction.
3698    assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3699    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3700    SDValue Ops[1];
3701    Ops[0] = DAG.getRoot();
3702    SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3703    setValue(&I, Op);
3704    DAG.setRoot(Op.getValue(1));
3705    return 0;
3706  }
3707
3708  case Intrinsic::eh_selector: {
3709    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3710
3711    if (CurMBB->isLandingPad())
3712      AddCatchInfo(I, MMI, CurMBB);
3713    else {
3714#ifndef NDEBUG
3715      FuncInfo.CatchInfoLost.insert(&I);
3716#endif
3717      // FIXME: Mark exception selector register as live in.  Hack for PR1508.
3718      unsigned Reg = TLI.getExceptionSelectorRegister();
3719      if (Reg) CurMBB->addLiveIn(Reg);
3720    }
3721
3722    // Insert the EHSELECTION instruction.
3723    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3724    SDValue Ops[2];
3725    Ops[0] = getValue(I.getOperand(1));
3726    Ops[1] = getRoot();
3727    SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
3728
3729    DAG.setRoot(Op.getValue(1));
3730
3731    setValue(&I, DAG.getSExtOrTrunc(Op, dl, MVT::i32));
3732    return 0;
3733  }
3734
3735  case Intrinsic::eh_typeid_for: {
3736    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3737
3738    if (MMI) {
3739      // Find the type id for the given typeinfo.
3740      GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
3741
3742      unsigned TypeID = MMI->getTypeIDFor(GV);
3743      setValue(&I, DAG.getConstant(TypeID, MVT::i32));
3744    } else {
3745      // Return something different to eh_selector.
3746      setValue(&I, DAG.getConstant(1, MVT::i32));
3747    }
3748
3749    return 0;
3750  }
3751
3752  case Intrinsic::eh_return_i32:
3753  case Intrinsic::eh_return_i64:
3754    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3755      MMI->setCallsEHReturn(true);
3756      DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
3757                              MVT::Other,
3758                              getControlRoot(),
3759                              getValue(I.getOperand(1)),
3760                              getValue(I.getOperand(2))));
3761    } else {
3762      setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
3763    }
3764
3765    return 0;
3766  case Intrinsic::eh_unwind_init:
3767    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3768      MMI->setCallsUnwindInit(true);
3769    }
3770
3771    return 0;
3772
3773  case Intrinsic::eh_dwarf_cfa: {
3774    EVT VT = getValue(I.getOperand(1)).getValueType();
3775    SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
3776                                        TLI.getPointerTy());
3777
3778    SDValue Offset = DAG.getNode(ISD::ADD, dl,
3779                                 TLI.getPointerTy(),
3780                                 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
3781                                             TLI.getPointerTy()),
3782                                 CfaArg);
3783    setValue(&I, DAG.getNode(ISD::ADD, dl,
3784                             TLI.getPointerTy(),
3785                             DAG.getNode(ISD::FRAMEADDR, dl,
3786                                         TLI.getPointerTy(),
3787                                         DAG.getConstant(0,
3788                                                         TLI.getPointerTy())),
3789                             Offset));
3790    return 0;
3791  }
3792  case Intrinsic::convertff:
3793  case Intrinsic::convertfsi:
3794  case Intrinsic::convertfui:
3795  case Intrinsic::convertsif:
3796  case Intrinsic::convertuif:
3797  case Intrinsic::convertss:
3798  case Intrinsic::convertsu:
3799  case Intrinsic::convertus:
3800  case Intrinsic::convertuu: {
3801    ISD::CvtCode Code = ISD::CVT_INVALID;
3802    switch (Intrinsic) {
3803    case Intrinsic::convertff:  Code = ISD::CVT_FF; break;
3804    case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
3805    case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
3806    case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
3807    case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
3808    case Intrinsic::convertss:  Code = ISD::CVT_SS; break;
3809    case Intrinsic::convertsu:  Code = ISD::CVT_SU; break;
3810    case Intrinsic::convertus:  Code = ISD::CVT_US; break;
3811    case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
3812    }
3813    EVT DestVT = TLI.getValueType(I.getType());
3814    Value* Op1 = I.getOperand(1);
3815    setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
3816                                DAG.getValueType(DestVT),
3817                                DAG.getValueType(getValue(Op1).getValueType()),
3818                                getValue(I.getOperand(2)),
3819                                getValue(I.getOperand(3)),
3820                                Code));
3821    return 0;
3822  }
3823
3824  case Intrinsic::sqrt:
3825    setValue(&I, DAG.getNode(ISD::FSQRT, dl,
3826                             getValue(I.getOperand(1)).getValueType(),
3827                             getValue(I.getOperand(1))));
3828    return 0;
3829  case Intrinsic::powi:
3830    setValue(&I, DAG.getNode(ISD::FPOWI, dl,
3831                             getValue(I.getOperand(1)).getValueType(),
3832                             getValue(I.getOperand(1)),
3833                             getValue(I.getOperand(2))));
3834    return 0;
3835  case Intrinsic::sin:
3836    setValue(&I, DAG.getNode(ISD::FSIN, dl,
3837                             getValue(I.getOperand(1)).getValueType(),
3838                             getValue(I.getOperand(1))));
3839    return 0;
3840  case Intrinsic::cos:
3841    setValue(&I, DAG.getNode(ISD::FCOS, dl,
3842                             getValue(I.getOperand(1)).getValueType(),
3843                             getValue(I.getOperand(1))));
3844    return 0;
3845  case Intrinsic::log:
3846    visitLog(I);
3847    return 0;
3848  case Intrinsic::log2:
3849    visitLog2(I);
3850    return 0;
3851  case Intrinsic::log10:
3852    visitLog10(I);
3853    return 0;
3854  case Intrinsic::exp:
3855    visitExp(I);
3856    return 0;
3857  case Intrinsic::exp2:
3858    visitExp2(I);
3859    return 0;
3860  case Intrinsic::pow:
3861    visitPow(I);
3862    return 0;
3863  case Intrinsic::pcmarker: {
3864    SDValue Tmp = getValue(I.getOperand(1));
3865    DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
3866    return 0;
3867  }
3868  case Intrinsic::readcyclecounter: {
3869    SDValue Op = getRoot();
3870    SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
3871                              DAG.getVTList(MVT::i64, MVT::Other),
3872                              &Op, 1);
3873    setValue(&I, Tmp);
3874    DAG.setRoot(Tmp.getValue(1));
3875    return 0;
3876  }
3877  case Intrinsic::bswap:
3878    setValue(&I, DAG.getNode(ISD::BSWAP, dl,
3879                             getValue(I.getOperand(1)).getValueType(),
3880                             getValue(I.getOperand(1))));
3881    return 0;
3882  case Intrinsic::cttz: {
3883    SDValue Arg = getValue(I.getOperand(1));
3884    EVT Ty = Arg.getValueType();
3885    SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
3886    setValue(&I, result);
3887    return 0;
3888  }
3889  case Intrinsic::ctlz: {
3890    SDValue Arg = getValue(I.getOperand(1));
3891    EVT Ty = Arg.getValueType();
3892    SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
3893    setValue(&I, result);
3894    return 0;
3895  }
3896  case Intrinsic::ctpop: {
3897    SDValue Arg = getValue(I.getOperand(1));
3898    EVT Ty = Arg.getValueType();
3899    SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
3900    setValue(&I, result);
3901    return 0;
3902  }
3903  case Intrinsic::stacksave: {
3904    SDValue Op = getRoot();
3905    SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
3906              DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
3907    setValue(&I, Tmp);
3908    DAG.setRoot(Tmp.getValue(1));
3909    return 0;
3910  }
3911  case Intrinsic::stackrestore: {
3912    SDValue Tmp = getValue(I.getOperand(1));
3913    DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
3914    return 0;
3915  }
3916  case Intrinsic::stackprotector: {
3917    // Emit code into the DAG to store the stack guard onto the stack.
3918    MachineFunction &MF = DAG.getMachineFunction();
3919    MachineFrameInfo *MFI = MF.getFrameInfo();
3920    EVT PtrTy = TLI.getPointerTy();
3921
3922    SDValue Src = getValue(I.getOperand(1));   // The guard's value.
3923    AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
3924
3925    int FI = FuncInfo.StaticAllocaMap[Slot];
3926    MFI->setStackProtectorIndex(FI);
3927
3928    SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
3929
3930    // Store the stack protector onto the stack.
3931    SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
3932                                  PseudoSourceValue::getFixedStack(FI),
3933                                  0, true);
3934    setValue(&I, Result);
3935    DAG.setRoot(Result);
3936    return 0;
3937  }
3938  case Intrinsic::objectsize: {
3939    // If we don't know by now, we're never going to know.
3940    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
3941
3942    assert(CI && "Non-constant type in __builtin_object_size?");
3943
3944    SDValue Arg = getValue(I.getOperand(0));
3945    EVT Ty = Arg.getValueType();
3946
3947    if (CI->getZExtValue() < 2)
3948      setValue(&I, DAG.getConstant(-1ULL, Ty));
3949    else
3950      setValue(&I, DAG.getConstant(0, Ty));
3951    return 0;
3952  }
3953  case Intrinsic::var_annotation:
3954    // Discard annotate attributes
3955    return 0;
3956
3957  case Intrinsic::init_trampoline: {
3958    const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
3959
3960    SDValue Ops[6];
3961    Ops[0] = getRoot();
3962    Ops[1] = getValue(I.getOperand(1));
3963    Ops[2] = getValue(I.getOperand(2));
3964    Ops[3] = getValue(I.getOperand(3));
3965    Ops[4] = DAG.getSrcValue(I.getOperand(1));
3966    Ops[5] = DAG.getSrcValue(F);
3967
3968    SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
3969                              DAG.getVTList(TLI.getPointerTy(), MVT::Other),
3970                              Ops, 6);
3971
3972    setValue(&I, Tmp);
3973    DAG.setRoot(Tmp.getValue(1));
3974    return 0;
3975  }
3976
3977  case Intrinsic::gcroot:
3978    if (GFI) {
3979      Value *Alloca = I.getOperand(1);
3980      Constant *TypeMap = cast<Constant>(I.getOperand(2));
3981
3982      FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
3983      GFI->addStackRoot(FI->getIndex(), TypeMap);
3984    }
3985    return 0;
3986
3987  case Intrinsic::gcread:
3988  case Intrinsic::gcwrite:
3989    llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
3990    return 0;
3991
3992  case Intrinsic::flt_rounds: {
3993    setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
3994    return 0;
3995  }
3996
3997  case Intrinsic::trap: {
3998    DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
3999    return 0;
4000  }
4001
4002  case Intrinsic::uadd_with_overflow:
4003    return implVisitAluOverflow(I, ISD::UADDO);
4004  case Intrinsic::sadd_with_overflow:
4005    return implVisitAluOverflow(I, ISD::SADDO);
4006  case Intrinsic::usub_with_overflow:
4007    return implVisitAluOverflow(I, ISD::USUBO);
4008  case Intrinsic::ssub_with_overflow:
4009    return implVisitAluOverflow(I, ISD::SSUBO);
4010  case Intrinsic::umul_with_overflow:
4011    return implVisitAluOverflow(I, ISD::UMULO);
4012  case Intrinsic::smul_with_overflow:
4013    return implVisitAluOverflow(I, ISD::SMULO);
4014
4015  case Intrinsic::prefetch: {
4016    SDValue Ops[4];
4017    Ops[0] = getRoot();
4018    Ops[1] = getValue(I.getOperand(1));
4019    Ops[2] = getValue(I.getOperand(2));
4020    Ops[3] = getValue(I.getOperand(3));
4021    DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4022    return 0;
4023  }
4024
4025  case Intrinsic::memory_barrier: {
4026    SDValue Ops[6];
4027    Ops[0] = getRoot();
4028    for (int x = 1; x < 6; ++x)
4029      Ops[x] = getValue(I.getOperand(x));
4030
4031    DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4032    return 0;
4033  }
4034  case Intrinsic::atomic_cmp_swap: {
4035    SDValue Root = getRoot();
4036    SDValue L =
4037      DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4038                    getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4039                    Root,
4040                    getValue(I.getOperand(1)),
4041                    getValue(I.getOperand(2)),
4042                    getValue(I.getOperand(3)),
4043                    I.getOperand(1));
4044    setValue(&I, L);
4045    DAG.setRoot(L.getValue(1));
4046    return 0;
4047  }
4048  case Intrinsic::atomic_load_add:
4049    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4050  case Intrinsic::atomic_load_sub:
4051    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4052  case Intrinsic::atomic_load_or:
4053    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4054  case Intrinsic::atomic_load_xor:
4055    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4056  case Intrinsic::atomic_load_and:
4057    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4058  case Intrinsic::atomic_load_nand:
4059    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4060  case Intrinsic::atomic_load_max:
4061    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4062  case Intrinsic::atomic_load_min:
4063    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4064  case Intrinsic::atomic_load_umin:
4065    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4066  case Intrinsic::atomic_load_umax:
4067    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4068  case Intrinsic::atomic_swap:
4069    return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4070
4071  case Intrinsic::invariant_start:
4072  case Intrinsic::lifetime_start:
4073    // Discard region information.
4074    setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
4075    return 0;
4076  case Intrinsic::invariant_end:
4077  case Intrinsic::lifetime_end:
4078    // Discard region information.
4079    return 0;
4080  }
4081}
4082
4083/// Test if the given instruction is in a position to be optimized
4084/// with a tail-call. This roughly means that it's in a block with
4085/// a return and there's nothing that needs to be scheduled
4086/// between it and the return.
4087///
4088/// This function only tests target-independent requirements.
4089/// For target-dependent requirements, a target should override
4090/// TargetLowering::IsEligibleForTailCallOptimization.
4091///
4092static bool
4093isInTailCallPosition(const Instruction *I, Attributes CalleeRetAttr,
4094                     const TargetLowering &TLI) {
4095  const BasicBlock *ExitBB = I->getParent();
4096  const TerminatorInst *Term = ExitBB->getTerminator();
4097  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4098  const Function *F = ExitBB->getParent();
4099
4100  // The block must end in a return statement or an unreachable.
4101  if (!Ret && !isa<UnreachableInst>(Term)) return false;
4102
4103  // If I will have a chain, make sure no other instruction that will have a
4104  // chain interposes between I and the return.
4105  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4106      !I->isSafeToSpeculativelyExecute())
4107    for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4108         --BBI) {
4109      if (&*BBI == I)
4110        break;
4111      if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4112          !BBI->isSafeToSpeculativelyExecute())
4113        return false;
4114    }
4115
4116  // If the block ends with a void return or unreachable, it doesn't matter
4117  // what the call's return type is.
4118  if (!Ret || Ret->getNumOperands() == 0) return true;
4119
4120  // If the return value is undef, it doesn't matter what the call's
4121  // return type is.
4122  if (isa<UndefValue>(Ret->getOperand(0))) return true;
4123
4124  // Conservatively require the attributes of the call to match those of
4125  // the return. Ignore noalias because it doesn't affect the call sequence.
4126  unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
4127  if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
4128    return false;
4129
4130  // Otherwise, make sure the unmodified return value of I is the return value.
4131  for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4132       U = dyn_cast<Instruction>(U->getOperand(0))) {
4133    if (!U)
4134      return false;
4135    if (!U->hasOneUse())
4136      return false;
4137    if (U == I)
4138      break;
4139    // Check for a truly no-op truncate.
4140    if (isa<TruncInst>(U) &&
4141        TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4142      continue;
4143    // Check for a truly no-op bitcast.
4144    if (isa<BitCastInst>(U) &&
4145        (U->getOperand(0)->getType() == U->getType() ||
4146         (isa<PointerType>(U->getOperand(0)->getType()) &&
4147          isa<PointerType>(U->getType()))))
4148      continue;
4149    // Otherwise it's not a true no-op.
4150    return false;
4151  }
4152
4153  return true;
4154}
4155
4156void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
4157                                      bool isTailCall,
4158                                      MachineBasicBlock *LandingPad) {
4159  const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4160  const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4161  const Type *RetTy = FTy->getReturnType();
4162  MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4163  unsigned BeginLabel = 0, EndLabel = 0;
4164
4165  TargetLowering::ArgListTy Args;
4166  TargetLowering::ArgListEntry Entry;
4167  Args.reserve(CS.arg_size());
4168
4169  // Check whether the function can return without sret-demotion.
4170  SmallVector<EVT, 4> OutVTs;
4171  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
4172  SmallVector<uint64_t, 4> Offsets;
4173  getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
4174    OutVTs, OutsFlags, TLI, &Offsets);
4175
4176
4177  bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
4178                        FTy->isVarArg(), OutVTs, OutsFlags, DAG);
4179
4180  SDValue DemoteStackSlot;
4181
4182  if (!CanLowerReturn) {
4183    uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
4184                      FTy->getReturnType());
4185    unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(
4186                      FTy->getReturnType());
4187    MachineFunction &MF = DAG.getMachineFunction();
4188    int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
4189    const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
4190
4191    DemoteStackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
4192    Entry.Node = DemoteStackSlot;
4193    Entry.Ty = StackSlotPtrType;
4194    Entry.isSExt = false;
4195    Entry.isZExt = false;
4196    Entry.isInReg = false;
4197    Entry.isSRet = true;
4198    Entry.isNest = false;
4199    Entry.isByVal = false;
4200    Entry.Alignment = Align;
4201    Args.push_back(Entry);
4202    RetTy = Type::getVoidTy(FTy->getContext());
4203  }
4204
4205  for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4206       i != e; ++i) {
4207    SDValue ArgNode = getValue(*i);
4208    Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4209
4210    unsigned attrInd = i - CS.arg_begin() + 1;
4211    Entry.isSExt  = CS.paramHasAttr(attrInd, Attribute::SExt);
4212    Entry.isZExt  = CS.paramHasAttr(attrInd, Attribute::ZExt);
4213    Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4214    Entry.isSRet  = CS.paramHasAttr(attrInd, Attribute::StructRet);
4215    Entry.isNest  = CS.paramHasAttr(attrInd, Attribute::Nest);
4216    Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4217    Entry.Alignment = CS.getParamAlignment(attrInd);
4218    Args.push_back(Entry);
4219  }
4220
4221  if (LandingPad && MMI) {
4222    // Insert a label before the invoke call to mark the try range.  This can be
4223    // used to detect deletion of the invoke via the MachineModuleInfo.
4224    BeginLabel = MMI->NextLabelID();
4225
4226    // Both PendingLoads and PendingExports must be flushed here;
4227    // this call might not return.
4228    (void)getRoot();
4229    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4230                             getControlRoot(), BeginLabel));
4231  }
4232
4233  // Check if target-independent constraints permit a tail call here.
4234  // Target-dependent constraints are checked within TLI.LowerCallTo.
4235  if (isTailCall &&
4236      !isInTailCallPosition(CS.getInstruction(),
4237                            CS.getAttributes().getRetAttributes(),
4238                            TLI))
4239    isTailCall = false;
4240
4241  std::pair<SDValue,SDValue> Result =
4242    TLI.LowerCallTo(getRoot(), RetTy,
4243                    CS.paramHasAttr(0, Attribute::SExt),
4244                    CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4245                    CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4246                    CS.getCallingConv(),
4247                    isTailCall,
4248                    !CS.getInstruction()->use_empty(),
4249                    Callee, Args, DAG, getCurDebugLoc());
4250  assert((isTailCall || Result.second.getNode()) &&
4251         "Non-null chain expected with non-tail call!");
4252  assert((Result.second.getNode() || !Result.first.getNode()) &&
4253         "Null value expected with tail call!");
4254  if (Result.first.getNode())
4255    setValue(CS.getInstruction(), Result.first);
4256  else if (!CanLowerReturn && Result.second.getNode()) {
4257    // The instruction result is the result of loading from the
4258    // hidden sret parameter.
4259    SmallVector<EVT, 1> PVTs;
4260    const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
4261
4262    ComputeValueVTs(TLI, PtrRetTy, PVTs);
4263    assert(PVTs.size() == 1 && "Pointers should fit in one register");
4264    EVT PtrVT = PVTs[0];
4265    unsigned NumValues = OutVTs.size();
4266    SmallVector<SDValue, 4> Values(NumValues);
4267    SmallVector<SDValue, 4> Chains(NumValues);
4268
4269    for (unsigned i = 0; i < NumValues; ++i) {
4270      SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
4271        DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, DemoteStackSlot,
4272        DAG.getConstant(Offsets[i], PtrVT)),
4273        NULL, Offsets[i], false, 1);
4274      Values[i] = L;
4275      Chains[i] = L.getValue(1);
4276    }
4277    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
4278                                MVT::Other, &Chains[0], NumValues);
4279    PendingLoads.push_back(Chain);
4280
4281    setValue(CS.getInstruction(), DAG.getNode(ISD::MERGE_VALUES,
4282             getCurDebugLoc(), DAG.getVTList(&OutVTs[0], NumValues),
4283             &Values[0], NumValues));
4284  }
4285  // As a special case, a null chain means that a tail call has
4286  // been emitted and the DAG root is already updated.
4287  if (Result.second.getNode())
4288    DAG.setRoot(Result.second);
4289  else
4290    HasTailCall = true;
4291
4292  if (LandingPad && MMI) {
4293    // Insert a label at the end of the invoke call to mark the try range.  This
4294    // can be used to detect deletion of the invoke via the MachineModuleInfo.
4295    EndLabel = MMI->NextLabelID();
4296    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4297                             getRoot(), EndLabel));
4298
4299    // Inform MachineModuleInfo of range.
4300    MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4301  }
4302}
4303
4304
4305void SelectionDAGBuilder::visitCall(CallInst &I) {
4306  const char *RenameFn = 0;
4307  if (Function *F = I.getCalledFunction()) {
4308    if (F->isDeclaration()) {
4309      const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4310      if (II) {
4311        if (unsigned IID = II->getIntrinsicID(F)) {
4312          RenameFn = visitIntrinsicCall(I, IID);
4313          if (!RenameFn)
4314            return;
4315        }
4316      }
4317      if (unsigned IID = F->getIntrinsicID()) {
4318        RenameFn = visitIntrinsicCall(I, IID);
4319        if (!RenameFn)
4320          return;
4321      }
4322    }
4323
4324    // Check for well-known libc/libm calls.  If the function is internal, it
4325    // can't be a library call.
4326    if (!F->hasLocalLinkage() && F->hasName()) {
4327      StringRef Name = F->getName();
4328      if (Name == "copysign" || Name == "copysignf") {
4329        if (I.getNumOperands() == 3 &&   // Basic sanity checks.
4330            I.getOperand(1)->getType()->isFloatingPoint() &&
4331            I.getType() == I.getOperand(1)->getType() &&
4332            I.getType() == I.getOperand(2)->getType()) {
4333          SDValue LHS = getValue(I.getOperand(1));
4334          SDValue RHS = getValue(I.getOperand(2));
4335          setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4336                                   LHS.getValueType(), LHS, RHS));
4337          return;
4338        }
4339      } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
4340        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4341            I.getOperand(1)->getType()->isFloatingPoint() &&
4342            I.getType() == I.getOperand(1)->getType()) {
4343          SDValue Tmp = getValue(I.getOperand(1));
4344          setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4345                                   Tmp.getValueType(), Tmp));
4346          return;
4347        }
4348      } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
4349        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4350            I.getOperand(1)->getType()->isFloatingPoint() &&
4351            I.getType() == I.getOperand(1)->getType() &&
4352            I.onlyReadsMemory()) {
4353          SDValue Tmp = getValue(I.getOperand(1));
4354          setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4355                                   Tmp.getValueType(), Tmp));
4356          return;
4357        }
4358      } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
4359        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4360            I.getOperand(1)->getType()->isFloatingPoint() &&
4361            I.getType() == I.getOperand(1)->getType() &&
4362            I.onlyReadsMemory()) {
4363          SDValue Tmp = getValue(I.getOperand(1));
4364          setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4365                                   Tmp.getValueType(), Tmp));
4366          return;
4367        }
4368      } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
4369        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4370            I.getOperand(1)->getType()->isFloatingPoint() &&
4371            I.getType() == I.getOperand(1)->getType() &&
4372            I.onlyReadsMemory()) {
4373          SDValue Tmp = getValue(I.getOperand(1));
4374          setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
4375                                   Tmp.getValueType(), Tmp));
4376          return;
4377        }
4378      }
4379    }
4380  } else if (isa<InlineAsm>(I.getOperand(0))) {
4381    visitInlineAsm(&I);
4382    return;
4383  }
4384
4385  SDValue Callee;
4386  if (!RenameFn)
4387    Callee = getValue(I.getOperand(0));
4388  else
4389    Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4390
4391  // Check if we can potentially perform a tail call. More detailed
4392  // checking is be done within LowerCallTo, after more information
4393  // about the call is known.
4394  bool isTailCall = PerformTailCallOpt && I.isTailCall();
4395
4396  LowerCallTo(&I, Callee, isTailCall);
4397}
4398
4399
4400/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4401/// this value and returns the result as a ValueVT value.  This uses
4402/// Chain/Flag as the input and updates them for the output Chain/Flag.
4403/// If the Flag pointer is NULL, no flag is used.
4404SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4405                                      SDValue &Chain,
4406                                      SDValue *Flag) const {
4407  // Assemble the legal parts into the final values.
4408  SmallVector<SDValue, 4> Values(ValueVTs.size());
4409  SmallVector<SDValue, 8> Parts;
4410  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4411    // Copy the legal parts from the registers.
4412    EVT ValueVT = ValueVTs[Value];
4413    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4414    EVT RegisterVT = RegVTs[Value];
4415
4416    Parts.resize(NumRegs);
4417    for (unsigned i = 0; i != NumRegs; ++i) {
4418      SDValue P;
4419      if (Flag == 0)
4420        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4421      else {
4422        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4423        *Flag = P.getValue(2);
4424      }
4425      Chain = P.getValue(1);
4426
4427      // If the source register was virtual and if we know something about it,
4428      // add an assert node.
4429      if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4430          RegisterVT.isInteger() && !RegisterVT.isVector()) {
4431        unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4432        FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4433        if (FLI.LiveOutRegInfo.size() > SlotNo) {
4434          FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4435
4436          unsigned RegSize = RegisterVT.getSizeInBits();
4437          unsigned NumSignBits = LOI.NumSignBits;
4438          unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4439
4440          // FIXME: We capture more information than the dag can represent.  For
4441          // now, just use the tightest assertzext/assertsext possible.
4442          bool isSExt = true;
4443          EVT FromVT(MVT::Other);
4444          if (NumSignBits == RegSize)
4445            isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
4446          else if (NumZeroBits >= RegSize-1)
4447            isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
4448          else if (NumSignBits > RegSize-8)
4449            isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
4450          else if (NumZeroBits >= RegSize-8)
4451            isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
4452          else if (NumSignBits > RegSize-16)
4453            isSExt = true, FromVT = MVT::i16;  // ASSERT SEXT 16
4454          else if (NumZeroBits >= RegSize-16)
4455            isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4456          else if (NumSignBits > RegSize-32)
4457            isSExt = true, FromVT = MVT::i32;  // ASSERT SEXT 32
4458          else if (NumZeroBits >= RegSize-32)
4459            isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4460
4461          if (FromVT != MVT::Other) {
4462            P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4463                            RegisterVT, P, DAG.getValueType(FromVT));
4464
4465          }
4466        }
4467      }
4468
4469      Parts[i] = P;
4470    }
4471
4472    Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4473                                     NumRegs, RegisterVT, ValueVT);
4474    Part += NumRegs;
4475    Parts.clear();
4476  }
4477
4478  return DAG.getNode(ISD::MERGE_VALUES, dl,
4479                     DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4480                     &Values[0], ValueVTs.size());
4481}
4482
4483/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4484/// specified value into the registers specified by this object.  This uses
4485/// Chain/Flag as the input and updates them for the output Chain/Flag.
4486/// If the Flag pointer is NULL, no flag is used.
4487void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4488                                 SDValue &Chain, SDValue *Flag) const {
4489  // Get the list of the values's legal parts.
4490  unsigned NumRegs = Regs.size();
4491  SmallVector<SDValue, 8> Parts(NumRegs);
4492  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4493    EVT ValueVT = ValueVTs[Value];
4494    unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4495    EVT RegisterVT = RegVTs[Value];
4496
4497    getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4498                   &Parts[Part], NumParts, RegisterVT);
4499    Part += NumParts;
4500  }
4501
4502  // Copy the parts into the registers.
4503  SmallVector<SDValue, 8> Chains(NumRegs);
4504  for (unsigned i = 0; i != NumRegs; ++i) {
4505    SDValue Part;
4506    if (Flag == 0)
4507      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4508    else {
4509      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4510      *Flag = Part.getValue(1);
4511    }
4512    Chains[i] = Part.getValue(0);
4513  }
4514
4515  if (NumRegs == 1 || Flag)
4516    // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4517    // flagged to it. That is the CopyToReg nodes and the user are considered
4518    // a single scheduling unit. If we create a TokenFactor and return it as
4519    // chain, then the TokenFactor is both a predecessor (operand) of the
4520    // user as well as a successor (the TF operands are flagged to the user).
4521    // c1, f1 = CopyToReg
4522    // c2, f2 = CopyToReg
4523    // c3     = TokenFactor c1, c2
4524    // ...
4525    //        = op c3, ..., f2
4526    Chain = Chains[NumRegs-1];
4527  else
4528    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4529}
4530
4531/// AddInlineAsmOperands - Add this value to the specified inlineasm node
4532/// operand list.  This adds the code marker and includes the number of
4533/// values added into it.
4534void RegsForValue::AddInlineAsmOperands(unsigned Code,
4535                                        bool HasMatching,unsigned MatchingIdx,
4536                                        SelectionDAG &DAG,
4537                                        std::vector<SDValue> &Ops) const {
4538  EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4539  assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4540  unsigned Flag = Code | (Regs.size() << 3);
4541  if (HasMatching)
4542    Flag |= 0x80000000 | (MatchingIdx << 16);
4543  Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4544  for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4545    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
4546    EVT RegisterVT = RegVTs[Value];
4547    for (unsigned i = 0; i != NumRegs; ++i) {
4548      assert(Reg < Regs.size() && "Mismatch in # registers expected");
4549      Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4550    }
4551  }
4552}
4553
4554/// isAllocatableRegister - If the specified register is safe to allocate,
4555/// i.e. it isn't a stack pointer or some other special register, return the
4556/// register class for the register.  Otherwise, return null.
4557static const TargetRegisterClass *
4558isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4559                      const TargetLowering &TLI,
4560                      const TargetRegisterInfo *TRI) {
4561  EVT FoundVT = MVT::Other;
4562  const TargetRegisterClass *FoundRC = 0;
4563  for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4564       E = TRI->regclass_end(); RCI != E; ++RCI) {
4565    EVT ThisVT = MVT::Other;
4566
4567    const TargetRegisterClass *RC = *RCI;
4568    // If none of the the value types for this register class are valid, we
4569    // can't use it.  For example, 64-bit reg classes on 32-bit targets.
4570    for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4571         I != E; ++I) {
4572      if (TLI.isTypeLegal(*I)) {
4573        // If we have already found this register in a different register class,
4574        // choose the one with the largest VT specified.  For example, on
4575        // PowerPC, we favor f64 register classes over f32.
4576        if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4577          ThisVT = *I;
4578          break;
4579        }
4580      }
4581    }
4582
4583    if (ThisVT == MVT::Other) continue;
4584
4585    // NOTE: This isn't ideal.  In particular, this might allocate the
4586    // frame pointer in functions that need it (due to them not being taken
4587    // out of allocation, because a variable sized allocation hasn't been seen
4588    // yet).  This is a slight code pessimization, but should still work.
4589    for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4590         E = RC->allocation_order_end(MF); I != E; ++I)
4591      if (*I == Reg) {
4592        // We found a matching register class.  Keep looking at others in case
4593        // we find one with larger registers that this physreg is also in.
4594        FoundRC = RC;
4595        FoundVT = ThisVT;
4596        break;
4597      }
4598  }
4599  return FoundRC;
4600}
4601
4602
4603namespace llvm {
4604/// AsmOperandInfo - This contains information for each constraint that we are
4605/// lowering.
4606class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4607    public TargetLowering::AsmOperandInfo {
4608public:
4609  /// CallOperand - If this is the result output operand or a clobber
4610  /// this is null, otherwise it is the incoming operand to the CallInst.
4611  /// This gets modified as the asm is processed.
4612  SDValue CallOperand;
4613
4614  /// AssignedRegs - If this is a register or register class operand, this
4615  /// contains the set of register corresponding to the operand.
4616  RegsForValue AssignedRegs;
4617
4618  explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4619    : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4620  }
4621
4622  /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4623  /// busy in OutputRegs/InputRegs.
4624  void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4625                         std::set<unsigned> &OutputRegs,
4626                         std::set<unsigned> &InputRegs,
4627                         const TargetRegisterInfo &TRI) const {
4628    if (isOutReg) {
4629      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4630        MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4631    }
4632    if (isInReg) {
4633      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4634        MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4635    }
4636  }
4637
4638  /// getCallOperandValEVT - Return the EVT of the Value* that this operand
4639  /// corresponds to.  If there is no Value* for this operand, it returns
4640  /// MVT::Other.
4641  EVT getCallOperandValEVT(LLVMContext &Context,
4642                           const TargetLowering &TLI,
4643                           const TargetData *TD) const {
4644    if (CallOperandVal == 0) return MVT::Other;
4645
4646    if (isa<BasicBlock>(CallOperandVal))
4647      return TLI.getPointerTy();
4648
4649    const llvm::Type *OpTy = CallOperandVal->getType();
4650
4651    // If this is an indirect operand, the operand is a pointer to the
4652    // accessed type.
4653    if (isIndirect)
4654      OpTy = cast<PointerType>(OpTy)->getElementType();
4655
4656    // If OpTy is not a single value, it may be a struct/union that we
4657    // can tile with integers.
4658    if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4659      unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4660      switch (BitSize) {
4661      default: break;
4662      case 1:
4663      case 8:
4664      case 16:
4665      case 32:
4666      case 64:
4667      case 128:
4668        OpTy = IntegerType::get(Context, BitSize);
4669        break;
4670      }
4671    }
4672
4673    return TLI.getValueType(OpTy, true);
4674  }
4675
4676private:
4677  /// MarkRegAndAliases - Mark the specified register and all aliases in the
4678  /// specified set.
4679  static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4680                                const TargetRegisterInfo &TRI) {
4681    assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4682    Regs.insert(Reg);
4683    if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4684      for (; *Aliases; ++Aliases)
4685        Regs.insert(*Aliases);
4686  }
4687};
4688} // end llvm namespace.
4689
4690
4691/// GetRegistersForValue - Assign registers (virtual or physical) for the
4692/// specified operand.  We prefer to assign virtual registers, to allow the
4693/// register allocator to handle the assignment process.  However, if the asm
4694/// uses features that we can't model on machineinstrs, we have SDISel do the
4695/// allocation.  This produces generally horrible, but correct, code.
4696///
4697///   OpInfo describes the operand.
4698///   Input and OutputRegs are the set of already allocated physical registers.
4699///
4700void SelectionDAGBuilder::
4701GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4702                     std::set<unsigned> &OutputRegs,
4703                     std::set<unsigned> &InputRegs) {
4704  LLVMContext &Context = FuncInfo.Fn->getContext();
4705
4706  // Compute whether this value requires an input register, an output register,
4707  // or both.
4708  bool isOutReg = false;
4709  bool isInReg = false;
4710  switch (OpInfo.Type) {
4711  case InlineAsm::isOutput:
4712    isOutReg = true;
4713
4714    // If there is an input constraint that matches this, we need to reserve
4715    // the input register so no other inputs allocate to it.
4716    isInReg = OpInfo.hasMatchingInput();
4717    break;
4718  case InlineAsm::isInput:
4719    isInReg = true;
4720    isOutReg = false;
4721    break;
4722  case InlineAsm::isClobber:
4723    isOutReg = true;
4724    isInReg = true;
4725    break;
4726  }
4727
4728
4729  MachineFunction &MF = DAG.getMachineFunction();
4730  SmallVector<unsigned, 4> Regs;
4731
4732  // If this is a constraint for a single physreg, or a constraint for a
4733  // register class, find it.
4734  std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4735    TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4736                                     OpInfo.ConstraintVT);
4737
4738  unsigned NumRegs = 1;
4739  if (OpInfo.ConstraintVT != MVT::Other) {
4740    // If this is a FP input in an integer register (or visa versa) insert a bit
4741    // cast of the input value.  More generally, handle any case where the input
4742    // value disagrees with the register class we plan to stick this in.
4743    if (OpInfo.Type == InlineAsm::isInput &&
4744        PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4745      // Try to convert to the first EVT that the reg class contains.  If the
4746      // types are identical size, use a bitcast to convert (e.g. two differing
4747      // vector types).
4748      EVT RegVT = *PhysReg.second->vt_begin();
4749      if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4750        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4751                                         RegVT, OpInfo.CallOperand);
4752        OpInfo.ConstraintVT = RegVT;
4753      } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4754        // If the input is a FP value and we want it in FP registers, do a
4755        // bitcast to the corresponding integer type.  This turns an f64 value
4756        // into i64, which can be passed with two i32 values on a 32-bit
4757        // machine.
4758        RegVT = EVT::getIntegerVT(Context,
4759                                  OpInfo.ConstraintVT.getSizeInBits());
4760        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4761                                         RegVT, OpInfo.CallOperand);
4762        OpInfo.ConstraintVT = RegVT;
4763      }
4764    }
4765
4766    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
4767  }
4768
4769  EVT RegVT;
4770  EVT ValueVT = OpInfo.ConstraintVT;
4771
4772  // If this is a constraint for a specific physical register, like {r17},
4773  // assign it now.
4774  if (unsigned AssignedReg = PhysReg.first) {
4775    const TargetRegisterClass *RC = PhysReg.second;
4776    if (OpInfo.ConstraintVT == MVT::Other)
4777      ValueVT = *RC->vt_begin();
4778
4779    // Get the actual register value type.  This is important, because the user
4780    // may have asked for (e.g.) the AX register in i32 type.  We need to
4781    // remember that AX is actually i16 to get the right extension.
4782    RegVT = *RC->vt_begin();
4783
4784    // This is a explicit reference to a physical register.
4785    Regs.push_back(AssignedReg);
4786
4787    // If this is an expanded reference, add the rest of the regs to Regs.
4788    if (NumRegs != 1) {
4789      TargetRegisterClass::iterator I = RC->begin();
4790      for (; *I != AssignedReg; ++I)
4791        assert(I != RC->end() && "Didn't find reg!");
4792
4793      // Already added the first reg.
4794      --NumRegs; ++I;
4795      for (; NumRegs; --NumRegs, ++I) {
4796        assert(I != RC->end() && "Ran out of registers to allocate!");
4797        Regs.push_back(*I);
4798      }
4799    }
4800    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4801    const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4802    OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4803    return;
4804  }
4805
4806  // Otherwise, if this was a reference to an LLVM register class, create vregs
4807  // for this reference.
4808  if (const TargetRegisterClass *RC = PhysReg.second) {
4809    RegVT = *RC->vt_begin();
4810    if (OpInfo.ConstraintVT == MVT::Other)
4811      ValueVT = RegVT;
4812
4813    // Create the appropriate number of virtual registers.
4814    MachineRegisterInfo &RegInfo = MF.getRegInfo();
4815    for (; NumRegs; --NumRegs)
4816      Regs.push_back(RegInfo.createVirtualRegister(RC));
4817
4818    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4819    return;
4820  }
4821
4822  // This is a reference to a register class that doesn't directly correspond
4823  // to an LLVM register class.  Allocate NumRegs consecutive, available,
4824  // registers from the class.
4825  std::vector<unsigned> RegClassRegs
4826    = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
4827                                            OpInfo.ConstraintVT);
4828
4829  const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4830  unsigned NumAllocated = 0;
4831  for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
4832    unsigned Reg = RegClassRegs[i];
4833    // See if this register is available.
4834    if ((isOutReg && OutputRegs.count(Reg)) ||   // Already used.
4835        (isInReg  && InputRegs.count(Reg))) {    // Already used.
4836      // Make sure we find consecutive registers.
4837      NumAllocated = 0;
4838      continue;
4839    }
4840
4841    // Check to see if this register is allocatable (i.e. don't give out the
4842    // stack pointer).
4843    const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
4844    if (!RC) {        // Couldn't allocate this register.
4845      // Reset NumAllocated to make sure we return consecutive registers.
4846      NumAllocated = 0;
4847      continue;
4848    }
4849
4850    // Okay, this register is good, we can use it.
4851    ++NumAllocated;
4852
4853    // If we allocated enough consecutive registers, succeed.
4854    if (NumAllocated == NumRegs) {
4855      unsigned RegStart = (i-NumAllocated)+1;
4856      unsigned RegEnd   = i+1;
4857      // Mark all of the allocated registers used.
4858      for (unsigned i = RegStart; i != RegEnd; ++i)
4859        Regs.push_back(RegClassRegs[i]);
4860
4861      OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
4862                                         OpInfo.ConstraintVT);
4863      OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4864      return;
4865    }
4866  }
4867
4868  // Otherwise, we couldn't allocate enough registers for this.
4869}
4870
4871/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
4872/// processed uses a memory 'm' constraint.
4873static bool
4874hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
4875                          const TargetLowering &TLI) {
4876  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
4877    InlineAsm::ConstraintInfo &CI = CInfos[i];
4878    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
4879      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
4880      if (CType == TargetLowering::C_Memory)
4881        return true;
4882    }
4883
4884    // Indirect operand accesses access memory.
4885    if (CI.isIndirect)
4886      return true;
4887  }
4888
4889  return false;
4890}
4891
4892/// visitInlineAsm - Handle a call to an InlineAsm object.
4893///
4894void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
4895  InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
4896
4897  /// ConstraintOperands - Information about all of the constraints.
4898  std::vector<SDISelAsmOperandInfo> ConstraintOperands;
4899
4900  std::set<unsigned> OutputRegs, InputRegs;
4901
4902  // Do a prepass over the constraints, canonicalizing them, and building up the
4903  // ConstraintOperands list.
4904  std::vector<InlineAsm::ConstraintInfo>
4905    ConstraintInfos = IA->ParseConstraints();
4906
4907  bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
4908
4909  SDValue Chain, Flag;
4910
4911  // We won't need to flush pending loads if this asm doesn't touch
4912  // memory and is nonvolatile.
4913  if (hasMemory || IA->hasSideEffects())
4914    Chain = getRoot();
4915  else
4916    Chain = DAG.getRoot();
4917
4918  unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
4919  unsigned ResNo = 0;   // ResNo - The result number of the next output.
4920  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
4921    ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
4922    SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
4923
4924    EVT OpVT = MVT::Other;
4925
4926    // Compute the value type for each operand.
4927    switch (OpInfo.Type) {
4928    case InlineAsm::isOutput:
4929      // Indirect outputs just consume an argument.
4930      if (OpInfo.isIndirect) {
4931        OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
4932        break;
4933      }
4934
4935      // The return value of the call is this value.  As such, there is no
4936      // corresponding argument.
4937      assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
4938             "Bad inline asm!");
4939      if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
4940        OpVT = TLI.getValueType(STy->getElementType(ResNo));
4941      } else {
4942        assert(ResNo == 0 && "Asm only has one result!");
4943        OpVT = TLI.getValueType(CS.getType());
4944      }
4945      ++ResNo;
4946      break;
4947    case InlineAsm::isInput:
4948      OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
4949      break;
4950    case InlineAsm::isClobber:
4951      // Nothing to do.
4952      break;
4953    }
4954
4955    // If this is an input or an indirect output, process the call argument.
4956    // BasicBlocks are labels, currently appearing only in asm's.
4957    if (OpInfo.CallOperandVal) {
4958      // Strip bitcasts, if any.  This mostly comes up for functions.
4959      OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
4960
4961      if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
4962        OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
4963      } else {
4964        OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
4965      }
4966
4967      OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
4968    }
4969
4970    OpInfo.ConstraintVT = OpVT;
4971  }
4972
4973  // Second pass over the constraints: compute which constraint option to use
4974  // and assign registers to constraints that want a specific physreg.
4975  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
4976    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
4977
4978    // If this is an output operand with a matching input operand, look up the
4979    // matching input. If their types mismatch, e.g. one is an integer, the
4980    // other is floating point, or their sizes are different, flag it as an
4981    // error.
4982    if (OpInfo.hasMatchingInput()) {
4983      SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
4984      if (OpInfo.ConstraintVT != Input.ConstraintVT) {
4985        if ((OpInfo.ConstraintVT.isInteger() !=
4986             Input.ConstraintVT.isInteger()) ||
4987            (OpInfo.ConstraintVT.getSizeInBits() !=
4988             Input.ConstraintVT.getSizeInBits())) {
4989          llvm_report_error("Unsupported asm: input constraint"
4990                            " with a matching output constraint of incompatible"
4991                            " type!");
4992        }
4993        Input.ConstraintVT = OpInfo.ConstraintVT;
4994      }
4995    }
4996
4997    // Compute the constraint code and ConstraintType to use.
4998    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
4999
5000    // If this is a memory input, and if the operand is not indirect, do what we
5001    // need to to provide an address for the memory input.
5002    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5003        !OpInfo.isIndirect) {
5004      assert(OpInfo.Type == InlineAsm::isInput &&
5005             "Can only indirectify direct input operands!");
5006
5007      // Memory operands really want the address of the value.  If we don't have
5008      // an indirect input, put it in the constpool if we can, otherwise spill
5009      // it to a stack slot.
5010
5011      // If the operand is a float, integer, or vector constant, spill to a
5012      // constant pool entry to get its address.
5013      Value *OpVal = OpInfo.CallOperandVal;
5014      if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5015          isa<ConstantVector>(OpVal)) {
5016        OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5017                                                 TLI.getPointerTy());
5018      } else {
5019        // Otherwise, create a stack slot and emit a store to it before the
5020        // asm.
5021        const Type *Ty = OpVal->getType();
5022        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5023        unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5024        MachineFunction &MF = DAG.getMachineFunction();
5025        int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
5026        SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5027        Chain = DAG.getStore(Chain, getCurDebugLoc(),
5028                             OpInfo.CallOperand, StackSlot, NULL, 0);
5029        OpInfo.CallOperand = StackSlot;
5030      }
5031
5032      // There is no longer a Value* corresponding to this operand.
5033      OpInfo.CallOperandVal = 0;
5034      // It is now an indirect operand.
5035      OpInfo.isIndirect = true;
5036    }
5037
5038    // If this constraint is for a specific register, allocate it before
5039    // anything else.
5040    if (OpInfo.ConstraintType == TargetLowering::C_Register)
5041      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5042  }
5043  ConstraintInfos.clear();
5044
5045
5046  // Second pass - Loop over all of the operands, assigning virtual or physregs
5047  // to register class operands.
5048  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5049    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5050
5051    // C_Register operands have already been allocated, Other/Memory don't need
5052    // to be.
5053    if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5054      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5055  }
5056
5057  // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5058  std::vector<SDValue> AsmNodeOperands;
5059  AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
5060  AsmNodeOperands.push_back(
5061          DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5062
5063
5064  // Loop over all of the inputs, copying the operand values into the
5065  // appropriate registers and processing the output regs.
5066  RegsForValue RetValRegs;
5067
5068  // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5069  std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5070
5071  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5072    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5073
5074    switch (OpInfo.Type) {
5075    case InlineAsm::isOutput: {
5076      if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5077          OpInfo.ConstraintType != TargetLowering::C_Register) {
5078        // Memory output, or 'other' output (e.g. 'X' constraint).
5079        assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5080
5081        // Add information to the INLINEASM node to know about this output.
5082        unsigned ResOpType = 4/*MEM*/ | (1<<3);
5083        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5084                                                        TLI.getPointerTy()));
5085        AsmNodeOperands.push_back(OpInfo.CallOperand);
5086        break;
5087      }
5088
5089      // Otherwise, this is a register or register class output.
5090
5091      // Copy the output from the appropriate register.  Find a register that
5092      // we can use.
5093      if (OpInfo.AssignedRegs.Regs.empty()) {
5094        llvm_report_error("Couldn't allocate output reg for"
5095                          " constraint '" + OpInfo.ConstraintCode + "'!");
5096      }
5097
5098      // If this is an indirect operand, store through the pointer after the
5099      // asm.
5100      if (OpInfo.isIndirect) {
5101        IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5102                                                      OpInfo.CallOperandVal));
5103      } else {
5104        // This is the result value of the call.
5105        assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5106               "Bad inline asm!");
5107        // Concatenate this output onto the outputs list.
5108        RetValRegs.append(OpInfo.AssignedRegs);
5109      }
5110
5111      // Add information to the INLINEASM node to know that this register is
5112      // set.
5113      OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5114                                               6 /* EARLYCLOBBER REGDEF */ :
5115                                               2 /* REGDEF */ ,
5116                                               false,
5117                                               0,
5118                                               DAG, AsmNodeOperands);
5119      break;
5120    }
5121    case InlineAsm::isInput: {
5122      SDValue InOperandVal = OpInfo.CallOperand;
5123
5124      if (OpInfo.isMatchingInputConstraint()) {   // Matching constraint?
5125        // If this is required to match an output register we have already set,
5126        // just use its register.
5127        unsigned OperandNo = OpInfo.getMatchedOperand();
5128
5129        // Scan until we find the definition we already emitted of this operand.
5130        // When we find it, create a RegsForValue operand.
5131        unsigned CurOp = 2;  // The first operand.
5132        for (; OperandNo; --OperandNo) {
5133          // Advance to the next operand.
5134          unsigned OpFlag =
5135            cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5136          assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5137                  (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5138                  (OpFlag & 7) == 4 /*MEM*/) &&
5139                 "Skipped past definitions?");
5140          CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5141        }
5142
5143        unsigned OpFlag =
5144          cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5145        if ((OpFlag & 7) == 2 /*REGDEF*/
5146            || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5147          // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5148          if (OpInfo.isIndirect) {
5149            llvm_report_error("Don't know how to handle tied indirect "
5150                              "register inputs yet!");
5151          }
5152          RegsForValue MatchedRegs;
5153          MatchedRegs.TLI = &TLI;
5154          MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5155          EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5156          MatchedRegs.RegVTs.push_back(RegVT);
5157          MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5158          for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5159               i != e; ++i)
5160            MatchedRegs.Regs.
5161              push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5162
5163          // Use the produced MatchedRegs object to
5164          MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5165                                    Chain, &Flag);
5166          MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5167                                           true, OpInfo.getMatchedOperand(),
5168                                           DAG, AsmNodeOperands);
5169          break;
5170        } else {
5171          assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5172          assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5173                 "Unexpected number of operands");
5174          // Add information to the INLINEASM node to know about this input.
5175          // See InlineAsm.h isUseOperandTiedToDef.
5176          OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5177          AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5178                                                          TLI.getPointerTy()));
5179          AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5180          break;
5181        }
5182      }
5183
5184      if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5185        assert(!OpInfo.isIndirect &&
5186               "Don't know how to handle indirect other inputs yet!");
5187
5188        std::vector<SDValue> Ops;
5189        TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5190                                         hasMemory, Ops, DAG);
5191        if (Ops.empty()) {
5192          llvm_report_error("Invalid operand for inline asm"
5193                            " constraint '" + OpInfo.ConstraintCode + "'!");
5194        }
5195
5196        // Add information to the INLINEASM node to know about this input.
5197        unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5198        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5199                                                        TLI.getPointerTy()));
5200        AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5201        break;
5202      } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5203        assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5204        assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5205               "Memory operands expect pointer values");
5206
5207        // Add information to the INLINEASM node to know about this input.
5208        unsigned ResOpType = 4/*MEM*/ | (1<<3);
5209        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5210                                                        TLI.getPointerTy()));
5211        AsmNodeOperands.push_back(InOperandVal);
5212        break;
5213      }
5214
5215      assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5216              OpInfo.ConstraintType == TargetLowering::C_Register) &&
5217             "Unknown constraint type!");
5218      assert(!OpInfo.isIndirect &&
5219             "Don't know how to handle indirect register inputs yet!");
5220
5221      // Copy the input into the appropriate registers.
5222      if (OpInfo.AssignedRegs.Regs.empty()) {
5223        llvm_report_error("Couldn't allocate input reg for"
5224                          " constraint '"+ OpInfo.ConstraintCode +"'!");
5225      }
5226
5227      OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5228                                        Chain, &Flag);
5229
5230      OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5231                                               DAG, AsmNodeOperands);
5232      break;
5233    }
5234    case InlineAsm::isClobber: {
5235      // Add the clobbered value to the operand list, so that the register
5236      // allocator is aware that the physreg got clobbered.
5237      if (!OpInfo.AssignedRegs.Regs.empty())
5238        OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5239                                                 false, 0, DAG,AsmNodeOperands);
5240      break;
5241    }
5242    }
5243  }
5244
5245  // Finish up input operands.
5246  AsmNodeOperands[0] = Chain;
5247  if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5248
5249  Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5250                      DAG.getVTList(MVT::Other, MVT::Flag),
5251                      &AsmNodeOperands[0], AsmNodeOperands.size());
5252  Flag = Chain.getValue(1);
5253
5254  // If this asm returns a register value, copy the result from that register
5255  // and set it as the value of the call.
5256  if (!RetValRegs.Regs.empty()) {
5257    SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5258                                             Chain, &Flag);
5259
5260    // FIXME: Why don't we do this for inline asms with MRVs?
5261    if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5262      EVT ResultType = TLI.getValueType(CS.getType());
5263
5264      // If any of the results of the inline asm is a vector, it may have the
5265      // wrong width/num elts.  This can happen for register classes that can
5266      // contain multiple different value types.  The preg or vreg allocated may
5267      // not have the same VT as was expected.  Convert it to the right type
5268      // with bit_convert.
5269      if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5270        Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5271                          ResultType, Val);
5272
5273      } else if (ResultType != Val.getValueType() &&
5274                 ResultType.isInteger() && Val.getValueType().isInteger()) {
5275        // If a result value was tied to an input value, the computed result may
5276        // have a wider width than the expected result.  Extract the relevant
5277        // portion.
5278        Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5279      }
5280
5281      assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5282    }
5283
5284    setValue(CS.getInstruction(), Val);
5285    // Don't need to use this as a chain in this case.
5286    if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5287      return;
5288  }
5289
5290  std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5291
5292  // Process indirect outputs, first output all of the flagged copies out of
5293  // physregs.
5294  for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5295    RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5296    Value *Ptr = IndirectStoresToEmit[i].second;
5297    SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5298                                             Chain, &Flag);
5299    StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5300
5301  }
5302
5303  // Emit the non-flagged stores from the physregs.
5304  SmallVector<SDValue, 8> OutChains;
5305  for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5306    OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5307                                    StoresToEmit[i].first,
5308                                    getValue(StoresToEmit[i].second),
5309                                    StoresToEmit[i].second, 0));
5310  if (!OutChains.empty())
5311    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5312                        &OutChains[0], OutChains.size());
5313  DAG.setRoot(Chain);
5314}
5315
5316void SelectionDAGBuilder::visitVAStart(CallInst &I) {
5317  DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5318                          MVT::Other, getRoot(),
5319                          getValue(I.getOperand(1)),
5320                          DAG.getSrcValue(I.getOperand(1))));
5321}
5322
5323void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
5324  SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5325                           getRoot(), getValue(I.getOperand(0)),
5326                           DAG.getSrcValue(I.getOperand(0)));
5327  setValue(&I, V);
5328  DAG.setRoot(V.getValue(1));
5329}
5330
5331void SelectionDAGBuilder::visitVAEnd(CallInst &I) {
5332  DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5333                          MVT::Other, getRoot(),
5334                          getValue(I.getOperand(1)),
5335                          DAG.getSrcValue(I.getOperand(1))));
5336}
5337
5338void SelectionDAGBuilder::visitVACopy(CallInst &I) {
5339  DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5340                          MVT::Other, getRoot(),
5341                          getValue(I.getOperand(1)),
5342                          getValue(I.getOperand(2)),
5343                          DAG.getSrcValue(I.getOperand(1)),
5344                          DAG.getSrcValue(I.getOperand(2))));
5345}
5346
5347/// TargetLowering::LowerCallTo - This is the default LowerCallTo
5348/// implementation, which just calls LowerCall.
5349/// FIXME: When all targets are
5350/// migrated to using LowerCall, this hook should be integrated into SDISel.
5351std::pair<SDValue, SDValue>
5352TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5353                            bool RetSExt, bool RetZExt, bool isVarArg,
5354                            bool isInreg, unsigned NumFixedArgs,
5355                            CallingConv::ID CallConv, bool isTailCall,
5356                            bool isReturnValueUsed,
5357                            SDValue Callee,
5358                            ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5359
5360  assert((!isTailCall || PerformTailCallOpt) &&
5361         "isTailCall set when tail-call optimizations are disabled!");
5362
5363  // Handle all of the outgoing arguments.
5364  SmallVector<ISD::OutputArg, 32> Outs;
5365  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5366    SmallVector<EVT, 4> ValueVTs;
5367    ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5368    for (unsigned Value = 0, NumValues = ValueVTs.size();
5369         Value != NumValues; ++Value) {
5370      EVT VT = ValueVTs[Value];
5371      const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
5372      SDValue Op = SDValue(Args[i].Node.getNode(),
5373                           Args[i].Node.getResNo() + Value);
5374      ISD::ArgFlagsTy Flags;
5375      unsigned OriginalAlignment =
5376        getTargetData()->getABITypeAlignment(ArgTy);
5377
5378      if (Args[i].isZExt)
5379        Flags.setZExt();
5380      if (Args[i].isSExt)
5381        Flags.setSExt();
5382      if (Args[i].isInReg)
5383        Flags.setInReg();
5384      if (Args[i].isSRet)
5385        Flags.setSRet();
5386      if (Args[i].isByVal) {
5387        Flags.setByVal();
5388        const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5389        const Type *ElementTy = Ty->getElementType();
5390        unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5391        unsigned FrameSize  = getTargetData()->getTypeAllocSize(ElementTy);
5392        // For ByVal, alignment should come from FE.  BE will guess if this
5393        // info is not there but there are cases it cannot get right.
5394        if (Args[i].Alignment)
5395          FrameAlign = Args[i].Alignment;
5396        Flags.setByValAlign(FrameAlign);
5397        Flags.setByValSize(FrameSize);
5398      }
5399      if (Args[i].isNest)
5400        Flags.setNest();
5401      Flags.setOrigAlign(OriginalAlignment);
5402
5403      EVT PartVT = getRegisterType(RetTy->getContext(), VT);
5404      unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
5405      SmallVector<SDValue, 4> Parts(NumParts);
5406      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5407
5408      if (Args[i].isSExt)
5409        ExtendKind = ISD::SIGN_EXTEND;
5410      else if (Args[i].isZExt)
5411        ExtendKind = ISD::ZERO_EXTEND;
5412
5413      getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5414
5415      for (unsigned j = 0; j != NumParts; ++j) {
5416        // if it isn't first piece, alignment must be 1
5417        ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
5418        if (NumParts > 1 && j == 0)
5419          MyFlags.Flags.setSplit();
5420        else if (j != 0)
5421          MyFlags.Flags.setOrigAlign(1);
5422
5423        Outs.push_back(MyFlags);
5424      }
5425    }
5426  }
5427
5428  // Handle the incoming return values from the call.
5429  SmallVector<ISD::InputArg, 32> Ins;
5430  SmallVector<EVT, 4> RetTys;
5431  ComputeValueVTs(*this, RetTy, RetTys);
5432  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5433    EVT VT = RetTys[I];
5434    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5435    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5436    for (unsigned i = 0; i != NumRegs; ++i) {
5437      ISD::InputArg MyFlags;
5438      MyFlags.VT = RegisterVT;
5439      MyFlags.Used = isReturnValueUsed;
5440      if (RetSExt)
5441        MyFlags.Flags.setSExt();
5442      if (RetZExt)
5443        MyFlags.Flags.setZExt();
5444      if (isInreg)
5445        MyFlags.Flags.setInReg();
5446      Ins.push_back(MyFlags);
5447    }
5448  }
5449
5450  // Check if target-dependent constraints permit a tail call here.
5451  // Target-independent constraints should be checked by the caller.
5452  if (isTailCall &&
5453      !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
5454    isTailCall = false;
5455
5456  SmallVector<SDValue, 4> InVals;
5457  Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
5458                    Outs, Ins, dl, DAG, InVals);
5459
5460  // Verify that the target's LowerCall behaved as expected.
5461  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
5462         "LowerCall didn't return a valid chain!");
5463  assert((!isTailCall || InVals.empty()) &&
5464         "LowerCall emitted a return value for a tail call!");
5465  assert((isTailCall || InVals.size() == Ins.size()) &&
5466         "LowerCall didn't emit the correct number of values!");
5467  DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5468          assert(InVals[i].getNode() &&
5469                 "LowerCall emitted a null value!");
5470          assert(Ins[i].VT == InVals[i].getValueType() &&
5471                 "LowerCall emitted a value with the wrong type!");
5472        });
5473
5474  // For a tail call, the return value is merely live-out and there aren't
5475  // any nodes in the DAG representing it. Return a special value to
5476  // indicate that a tail call has been emitted and no more Instructions
5477  // should be processed in the current block.
5478  if (isTailCall) {
5479    DAG.setRoot(Chain);
5480    return std::make_pair(SDValue(), SDValue());
5481  }
5482
5483  // Collect the legal value parts into potentially illegal values
5484  // that correspond to the original function's return values.
5485  ISD::NodeType AssertOp = ISD::DELETED_NODE;
5486  if (RetSExt)
5487    AssertOp = ISD::AssertSext;
5488  else if (RetZExt)
5489    AssertOp = ISD::AssertZext;
5490  SmallVector<SDValue, 4> ReturnValues;
5491  unsigned CurReg = 0;
5492  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5493    EVT VT = RetTys[I];
5494    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5495    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5496
5497    SDValue ReturnValue =
5498      getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT,
5499                       AssertOp);
5500    ReturnValues.push_back(ReturnValue);
5501    CurReg += NumRegs;
5502  }
5503
5504  // For a function returning void, there is no return value. We can't create
5505  // such a node, so we just return a null return value in that case. In
5506  // that case, nothing will actualy look at the value.
5507  if (ReturnValues.empty())
5508    return std::make_pair(SDValue(), Chain);
5509
5510  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5511                            DAG.getVTList(&RetTys[0], RetTys.size()),
5512                            &ReturnValues[0], ReturnValues.size());
5513
5514  return std::make_pair(Res, Chain);
5515}
5516
5517void TargetLowering::LowerOperationWrapper(SDNode *N,
5518                                           SmallVectorImpl<SDValue> &Results,
5519                                           SelectionDAG &DAG) {
5520  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5521  if (Res.getNode())
5522    Results.push_back(Res);
5523}
5524
5525SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5526  llvm_unreachable("LowerOperation not implemented for this target!");
5527  return SDValue();
5528}
5529
5530
5531void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5532  SDValue Op = getValue(V);
5533  assert((Op.getOpcode() != ISD::CopyFromReg ||
5534          cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5535         "Copy from a reg to the same reg!");
5536  assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5537
5538  RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
5539  SDValue Chain = DAG.getEntryNode();
5540  RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5541  PendingExports.push_back(Chain);
5542}
5543
5544#include "llvm/CodeGen/SelectionDAGISel.h"
5545
5546void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
5547  // If this is the entry block, emit arguments.
5548  Function &F = *LLVMBB->getParent();
5549  SelectionDAG &DAG = SDB->DAG;
5550  SDValue OldRoot = DAG.getRoot();
5551  DebugLoc dl = SDB->getCurDebugLoc();
5552  const TargetData *TD = TLI.getTargetData();
5553  SmallVector<ISD::InputArg, 16> Ins;
5554
5555  // Check whether the function can return without sret-demotion.
5556  SmallVector<EVT, 4> OutVTs;
5557  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
5558  getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
5559                OutVTs, OutsFlags, TLI);
5560  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
5561
5562  FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(),
5563    OutVTs, OutsFlags, DAG);
5564  if (!FLI.CanLowerReturn) {
5565    // Put in an sret pointer parameter before all the other parameters.
5566    SmallVector<EVT, 1> ValueVTs;
5567    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
5568
5569    // NOTE: Assuming that a pointer will never break down to more than one VT
5570    // or one register.
5571    ISD::ArgFlagsTy Flags;
5572    Flags.setSRet();
5573    EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), ValueVTs[0]);
5574    ISD::InputArg RetArg(Flags, RegisterVT, true);
5575    Ins.push_back(RetArg);
5576  }
5577
5578  // Set up the incoming argument description vector.
5579  unsigned Idx = 1;
5580  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5581       I != E; ++I, ++Idx) {
5582    SmallVector<EVT, 4> ValueVTs;
5583    ComputeValueVTs(TLI, I->getType(), ValueVTs);
5584    bool isArgValueUsed = !I->use_empty();
5585    for (unsigned Value = 0, NumValues = ValueVTs.size();
5586         Value != NumValues; ++Value) {
5587      EVT VT = ValueVTs[Value];
5588      const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
5589      ISD::ArgFlagsTy Flags;
5590      unsigned OriginalAlignment =
5591        TD->getABITypeAlignment(ArgTy);
5592
5593      if (F.paramHasAttr(Idx, Attribute::ZExt))
5594        Flags.setZExt();
5595      if (F.paramHasAttr(Idx, Attribute::SExt))
5596        Flags.setSExt();
5597      if (F.paramHasAttr(Idx, Attribute::InReg))
5598        Flags.setInReg();
5599      if (F.paramHasAttr(Idx, Attribute::StructRet))
5600        Flags.setSRet();
5601      if (F.paramHasAttr(Idx, Attribute::ByVal)) {
5602        Flags.setByVal();
5603        const PointerType *Ty = cast<PointerType>(I->getType());
5604        const Type *ElementTy = Ty->getElementType();
5605        unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
5606        unsigned FrameSize  = TD->getTypeAllocSize(ElementTy);
5607        // For ByVal, alignment should be passed from FE.  BE will guess if
5608        // this info is not there but there are cases it cannot get right.
5609        if (F.getParamAlignment(Idx))
5610          FrameAlign = F.getParamAlignment(Idx);
5611        Flags.setByValAlign(FrameAlign);
5612        Flags.setByValSize(FrameSize);
5613      }
5614      if (F.paramHasAttr(Idx, Attribute::Nest))
5615        Flags.setNest();
5616      Flags.setOrigAlign(OriginalAlignment);
5617
5618      EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5619      unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5620      for (unsigned i = 0; i != NumRegs; ++i) {
5621        ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
5622        if (NumRegs > 1 && i == 0)
5623          MyFlags.Flags.setSplit();
5624        // if it isn't first piece, alignment must be 1
5625        else if (i > 0)
5626          MyFlags.Flags.setOrigAlign(1);
5627        Ins.push_back(MyFlags);
5628      }
5629    }
5630  }
5631
5632  // Call the target to set up the argument values.
5633  SmallVector<SDValue, 8> InVals;
5634  SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
5635                                             F.isVarArg(), Ins,
5636                                             dl, DAG, InVals);
5637
5638  // Verify that the target's LowerFormalArguments behaved as expected.
5639  assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
5640         "LowerFormalArguments didn't return a valid chain!");
5641  assert(InVals.size() == Ins.size() &&
5642         "LowerFormalArguments didn't emit the correct number of values!");
5643  DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5644          assert(InVals[i].getNode() &&
5645                 "LowerFormalArguments emitted a null value!");
5646          assert(Ins[i].VT == InVals[i].getValueType() &&
5647                 "LowerFormalArguments emitted a value with the wrong type!");
5648        });
5649
5650  // Update the DAG with the new chain value resulting from argument lowering.
5651  DAG.setRoot(NewRoot);
5652
5653  // Set up the argument values.
5654  unsigned i = 0;
5655  Idx = 1;
5656  if (!FLI.CanLowerReturn) {
5657    // Create a virtual register for the sret pointer, and put in a copy
5658    // from the sret argument into it.
5659    SmallVector<EVT, 1> ValueVTs;
5660    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
5661    EVT VT = ValueVTs[0];
5662    EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5663    ISD::NodeType AssertOp = ISD::DELETED_NODE;
5664    SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT,
5665                                        VT, AssertOp);
5666
5667    MachineFunction& MF = SDB->DAG.getMachineFunction();
5668    MachineRegisterInfo& RegInfo = MF.getRegInfo();
5669    unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
5670    FLI.DemoteRegister = SRetReg;
5671    NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(), SRetReg, ArgValue);
5672    DAG.setRoot(NewRoot);
5673
5674    // i indexes lowered arguments.  Bump it past the hidden sret argument.
5675    // Idx indexes LLVM arguments.  Don't touch it.
5676    ++i;
5677  }
5678  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5679      ++I, ++Idx) {
5680    SmallVector<SDValue, 4> ArgValues;
5681    SmallVector<EVT, 4> ValueVTs;
5682    ComputeValueVTs(TLI, I->getType(), ValueVTs);
5683    unsigned NumValues = ValueVTs.size();
5684    for (unsigned Value = 0; Value != NumValues; ++Value) {
5685      EVT VT = ValueVTs[Value];
5686      EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5687      unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5688
5689      if (!I->use_empty()) {
5690        ISD::NodeType AssertOp = ISD::DELETED_NODE;
5691        if (F.paramHasAttr(Idx, Attribute::SExt))
5692          AssertOp = ISD::AssertSext;
5693        else if (F.paramHasAttr(Idx, Attribute::ZExt))
5694          AssertOp = ISD::AssertZext;
5695
5696        ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
5697                                             PartVT, VT, AssertOp));
5698      }
5699      i += NumParts;
5700    }
5701    if (!I->use_empty()) {
5702      SDB->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues,
5703                                          SDB->getCurDebugLoc()));
5704      // If this argument is live outside of the entry block, insert a copy from
5705      // whereever we got it to the vreg that other BB's will reference it as.
5706      SDB->CopyToExportRegsIfNeeded(I);
5707    }
5708  }
5709  assert(i == InVals.size() && "Argument register count mismatch!");
5710
5711  // Finally, if the target has anything special to do, allow it to do so.
5712  // FIXME: this should insert code into the DAG!
5713  EmitFunctionEntryCode(F, SDB->DAG.getMachineFunction());
5714}
5715
5716/// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
5717/// ensure constants are generated when needed.  Remember the virtual registers
5718/// that need to be added to the Machine PHI nodes as input.  We cannot just
5719/// directly add them, because expansion might result in multiple MBB's for one
5720/// BB.  As such, the start of the BB might correspond to a different MBB than
5721/// the end.
5722///
5723void
5724SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5725  TerminatorInst *TI = LLVMBB->getTerminator();
5726
5727  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5728
5729  // Check successor nodes' PHI nodes that expect a constant to be available
5730  // from this block.
5731  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5732    BasicBlock *SuccBB = TI->getSuccessor(succ);
5733    if (!isa<PHINode>(SuccBB->begin())) continue;
5734    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5735
5736    // If this terminator has multiple identical successors (common for
5737    // switches), only handle each succ once.
5738    if (!SuccsHandled.insert(SuccMBB)) continue;
5739
5740    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5741    PHINode *PN;
5742
5743    // At this point we know that there is a 1-1 correspondence between LLVM PHI
5744    // nodes and Machine PHI nodes, but the incoming operands have not been
5745    // emitted yet.
5746    for (BasicBlock::iterator I = SuccBB->begin();
5747         (PN = dyn_cast<PHINode>(I)); ++I) {
5748      // Ignore dead phi's.
5749      if (PN->use_empty()) continue;
5750
5751      unsigned Reg;
5752      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5753
5754      if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5755        unsigned &RegOut = SDB->ConstantsOut[C];
5756        if (RegOut == 0) {
5757          RegOut = FuncInfo->CreateRegForValue(C);
5758          SDB->CopyValueToVirtualRegister(C, RegOut);
5759        }
5760        Reg = RegOut;
5761      } else {
5762        Reg = FuncInfo->ValueMap[PHIOp];
5763        if (Reg == 0) {
5764          assert(isa<AllocaInst>(PHIOp) &&
5765                 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5766                 "Didn't codegen value into a register!??");
5767          Reg = FuncInfo->CreateRegForValue(PHIOp);
5768          SDB->CopyValueToVirtualRegister(PHIOp, Reg);
5769        }
5770      }
5771
5772      // Remember that this register needs to added to the machine PHI node as
5773      // the input for this MBB.
5774      SmallVector<EVT, 4> ValueVTs;
5775      ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5776      for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5777        EVT VT = ValueVTs[vti];
5778        unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5779        for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5780          SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5781        Reg += NumRegisters;
5782      }
5783    }
5784  }
5785  SDB->ConstantsOut.clear();
5786}
5787
5788/// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5789/// supports legal types, and it emits MachineInstrs directly instead of
5790/// creating SelectionDAG nodes.
5791///
5792bool
5793SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5794                                                      FastISel *F) {
5795  TerminatorInst *TI = LLVMBB->getTerminator();
5796
5797  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5798  unsigned OrigNumPHINodesToUpdate = SDB->PHINodesToUpdate.size();
5799
5800  // Check successor nodes' PHI nodes that expect a constant to be available
5801  // from this block.
5802  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5803    BasicBlock *SuccBB = TI->getSuccessor(succ);
5804    if (!isa<PHINode>(SuccBB->begin())) continue;
5805    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5806
5807    // If this terminator has multiple identical successors (common for
5808    // switches), only handle each succ once.
5809    if (!SuccsHandled.insert(SuccMBB)) continue;
5810
5811    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5812    PHINode *PN;
5813
5814    // At this point we know that there is a 1-1 correspondence between LLVM PHI
5815    // nodes and Machine PHI nodes, but the incoming operands have not been
5816    // emitted yet.
5817    for (BasicBlock::iterator I = SuccBB->begin();
5818         (PN = dyn_cast<PHINode>(I)); ++I) {
5819      // Ignore dead phi's.
5820      if (PN->use_empty()) continue;
5821
5822      // Only handle legal types. Two interesting things to note here. First,
5823      // by bailing out early, we may leave behind some dead instructions,
5824      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
5825      // own moves. Second, this check is necessary becuase FastISel doesn't
5826      // use CreateRegForValue to create registers, so it always creates
5827      // exactly one register for each non-void instruction.
5828      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
5829      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
5830        // Promote MVT::i1.
5831        if (VT == MVT::i1)
5832          VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
5833        else {
5834          SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5835          return false;
5836        }
5837      }
5838
5839      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5840
5841      unsigned Reg = F->getRegForValue(PHIOp);
5842      if (Reg == 0) {
5843        SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5844        return false;
5845      }
5846      SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
5847    }
5848  }
5849
5850  return true;
5851}
5852