SelectionDAGBuilder.cpp revision 71dac16402a187d9e3dbb52e2ae1fe5cbf559723
1//===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements routines for translating from LLVM IR into SelectionDAG IR.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "SelectionDAGBuilder.h"
16#include "FunctionLoweringInfo.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Constants.h"
21#include "llvm/CallingConv.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/GlobalVariable.h"
25#include "llvm/InlineAsm.h"
26#include "llvm/Instructions.h"
27#include "llvm/Intrinsics.h"
28#include "llvm/IntrinsicInst.h"
29#include "llvm/LLVMContext.h"
30#include "llvm/Module.h"
31#include "llvm/CodeGen/FastISel.h"
32#include "llvm/CodeGen/GCStrategy.h"
33#include "llvm/CodeGen/GCMetadata.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineFrameInfo.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/MachineModuleInfo.h"
39#include "llvm/CodeGen/MachineRegisterInfo.h"
40#include "llvm/CodeGen/PseudoSourceValue.h"
41#include "llvm/CodeGen/SelectionDAG.h"
42#include "llvm/CodeGen/DwarfWriter.h"
43#include "llvm/Analysis/DebugInfo.h"
44#include "llvm/Target/TargetRegisterInfo.h"
45#include "llvm/Target/TargetData.h"
46#include "llvm/Target/TargetFrameInfo.h"
47#include "llvm/Target/TargetInstrInfo.h"
48#include "llvm/Target/TargetIntrinsicInfo.h"
49#include "llvm/Target/TargetLowering.h"
50#include "llvm/Target/TargetOptions.h"
51#include "llvm/Support/Compiler.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MathExtras.h"
56#include "llvm/Support/raw_ostream.h"
57#include <algorithm>
58using namespace llvm;
59
60/// LimitFloatPrecision - Generate low-precision inline sequences for
61/// some float libcalls (6, 8 or 12 bits).
62static unsigned LimitFloatPrecision;
63
64static cl::opt<unsigned, true>
65LimitFPPrecision("limit-float-precision",
66                 cl::desc("Generate low-precision inline sequences "
67                          "for some float libcalls"),
68                 cl::location(LimitFloatPrecision),
69                 cl::init(0));
70
71namespace {
72  /// RegsForValue - This struct represents the registers (physical or virtual)
73  /// that a particular set of values is assigned, and the type information about
74  /// the value. The most common situation is to represent one value at a time,
75  /// but struct or array values are handled element-wise as multiple values.
76  /// The splitting of aggregates is performed recursively, so that we never
77  /// have aggregate-typed registers. The values at this point do not necessarily
78  /// have legal types, so each value may require one or more registers of some
79  /// legal type.
80  ///
81  struct RegsForValue {
82    /// TLI - The TargetLowering object.
83    ///
84    const TargetLowering *TLI;
85
86    /// ValueVTs - The value types of the values, which may not be legal, and
87    /// may need be promoted or synthesized from one or more registers.
88    ///
89    SmallVector<EVT, 4> ValueVTs;
90
91    /// RegVTs - The value types of the registers. This is the same size as
92    /// ValueVTs and it records, for each value, what the type of the assigned
93    /// register or registers are. (Individual values are never synthesized
94    /// from more than one type of register.)
95    ///
96    /// With virtual registers, the contents of RegVTs is redundant with TLI's
97    /// getRegisterType member function, however when with physical registers
98    /// it is necessary to have a separate record of the types.
99    ///
100    SmallVector<EVT, 4> RegVTs;
101
102    /// Regs - This list holds the registers assigned to the values.
103    /// Each legal or promoted value requires one register, and each
104    /// expanded value requires multiple registers.
105    ///
106    SmallVector<unsigned, 4> Regs;
107
108    RegsForValue() : TLI(0) {}
109
110    RegsForValue(const TargetLowering &tli,
111                 const SmallVector<unsigned, 4> &regs,
112                 EVT regvt, EVT valuevt)
113      : TLI(&tli),  ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
114    RegsForValue(const TargetLowering &tli,
115                 const SmallVector<unsigned, 4> &regs,
116                 const SmallVector<EVT, 4> &regvts,
117                 const SmallVector<EVT, 4> &valuevts)
118      : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
119    RegsForValue(LLVMContext &Context, const TargetLowering &tli,
120                 unsigned Reg, const Type *Ty) : TLI(&tli) {
121      ComputeValueVTs(tli, Ty, ValueVTs);
122
123      for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
124        EVT ValueVT = ValueVTs[Value];
125        unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
126        EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
127        for (unsigned i = 0; i != NumRegs; ++i)
128          Regs.push_back(Reg + i);
129        RegVTs.push_back(RegisterVT);
130        Reg += NumRegs;
131      }
132    }
133
134    /// append - Add the specified values to this one.
135    void append(const RegsForValue &RHS) {
136      TLI = RHS.TLI;
137      ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
138      RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
139      Regs.append(RHS.Regs.begin(), RHS.Regs.end());
140    }
141
142
143    /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
144    /// this value and returns the result as a ValueVTs value.  This uses
145    /// Chain/Flag as the input and updates them for the output Chain/Flag.
146    /// If the Flag pointer is NULL, no flag is used.
147    SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
148                              SDValue &Chain, SDValue *Flag) const;
149
150    /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
151    /// specified value into the registers specified by this object.  This uses
152    /// Chain/Flag as the input and updates them for the output Chain/Flag.
153    /// If the Flag pointer is NULL, no flag is used.
154    void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
155                       SDValue &Chain, SDValue *Flag) const;
156
157    /// AddInlineAsmOperands - Add this value to the specified inlineasm node
158    /// operand list.  This adds the code marker, matching input operand index
159    /// (if applicable), and includes the number of values added into it.
160    void AddInlineAsmOperands(unsigned Code,
161                              bool HasMatching, unsigned MatchingIdx,
162                              SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
163  };
164}
165
166/// getCopyFromParts - Create a value that contains the specified legal parts
167/// combined into the value they represent.  If the parts combine to a type
168/// larger then ValueVT then AssertOp can be used to specify whether the extra
169/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
170/// (ISD::AssertSext).
171static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
172                                const SDValue *Parts,
173                                unsigned NumParts, EVT PartVT, EVT ValueVT,
174                                ISD::NodeType AssertOp = ISD::DELETED_NODE) {
175  assert(NumParts > 0 && "No parts to assemble!");
176  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
177  SDValue Val = Parts[0];
178
179  if (NumParts > 1) {
180    // Assemble the value from multiple parts.
181    if (!ValueVT.isVector() && ValueVT.isInteger()) {
182      unsigned PartBits = PartVT.getSizeInBits();
183      unsigned ValueBits = ValueVT.getSizeInBits();
184
185      // Assemble the power of 2 part.
186      unsigned RoundParts = NumParts & (NumParts - 1) ?
187        1 << Log2_32(NumParts) : NumParts;
188      unsigned RoundBits = PartBits * RoundParts;
189      EVT RoundVT = RoundBits == ValueBits ?
190        ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
191      SDValue Lo, Hi;
192
193      EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
194
195      if (RoundParts > 2) {
196        Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
197        Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
198                              PartVT, HalfVT);
199      } else {
200        Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
201        Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
202      }
203      if (TLI.isBigEndian())
204        std::swap(Lo, Hi);
205      Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
206
207      if (RoundParts < NumParts) {
208        // Assemble the trailing non-power-of-2 part.
209        unsigned OddParts = NumParts - RoundParts;
210        EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
211        Hi = getCopyFromParts(DAG, dl,
212                              Parts+RoundParts, OddParts, PartVT, OddVT);
213
214        // Combine the round and odd parts.
215        Lo = Val;
216        if (TLI.isBigEndian())
217          std::swap(Lo, Hi);
218        EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
219        Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
220        Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
221                         DAG.getConstant(Lo.getValueType().getSizeInBits(),
222                                         TLI.getPointerTy()));
223        Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
224        Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
225      }
226    } else if (ValueVT.isVector()) {
227      // Handle a multi-element vector.
228      EVT IntermediateVT, RegisterVT;
229      unsigned NumIntermediates;
230      unsigned NumRegs =
231        TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
232                                   NumIntermediates, RegisterVT);
233      assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
234      NumParts = NumRegs; // Silence a compiler warning.
235      assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
236      assert(RegisterVT == Parts[0].getValueType() &&
237             "Part type doesn't match part!");
238
239      // Assemble the parts into intermediate operands.
240      SmallVector<SDValue, 8> Ops(NumIntermediates);
241      if (NumIntermediates == NumParts) {
242        // If the register was not expanded, truncate or copy the value,
243        // as appropriate.
244        for (unsigned i = 0; i != NumParts; ++i)
245          Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
246                                    PartVT, IntermediateVT);
247      } else if (NumParts > 0) {
248        // If the intermediate type was expanded, build the intermediate operands
249        // from the parts.
250        assert(NumParts % NumIntermediates == 0 &&
251               "Must expand into a divisible number of parts!");
252        unsigned Factor = NumParts / NumIntermediates;
253        for (unsigned i = 0; i != NumIntermediates; ++i)
254          Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
255                                    PartVT, IntermediateVT);
256      }
257
258      // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
259      // operands.
260      Val = DAG.getNode(IntermediateVT.isVector() ?
261                        ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
262                        ValueVT, &Ops[0], NumIntermediates);
263    } else if (PartVT.isFloatingPoint()) {
264      // FP split into multiple FP parts (for ppcf128)
265      assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
266             "Unexpected split");
267      SDValue Lo, Hi;
268      Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
269      Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
270      if (TLI.isBigEndian())
271        std::swap(Lo, Hi);
272      Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
273    } else {
274      // FP split into integer parts (soft fp)
275      assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
276             !PartVT.isVector() && "Unexpected split");
277      EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
278      Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
279    }
280  }
281
282  // There is now one part, held in Val.  Correct it to match ValueVT.
283  PartVT = Val.getValueType();
284
285  if (PartVT == ValueVT)
286    return Val;
287
288  if (PartVT.isVector()) {
289    assert(ValueVT.isVector() && "Unknown vector conversion!");
290    return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
291  }
292
293  if (ValueVT.isVector()) {
294    assert(ValueVT.getVectorElementType() == PartVT &&
295           ValueVT.getVectorNumElements() == 1 &&
296           "Only trivial scalar-to-vector conversions should get here!");
297    return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
298  }
299
300  if (PartVT.isInteger() &&
301      ValueVT.isInteger()) {
302    if (ValueVT.bitsLT(PartVT)) {
303      // For a truncate, see if we have any information to
304      // indicate whether the truncated bits will always be
305      // zero or sign-extension.
306      if (AssertOp != ISD::DELETED_NODE)
307        Val = DAG.getNode(AssertOp, dl, PartVT, Val,
308                          DAG.getValueType(ValueVT));
309      return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
310    } else {
311      return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
312    }
313  }
314
315  if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
316    if (ValueVT.bitsLT(Val.getValueType()))
317      // FP_ROUND's are always exact here.
318      return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
319                         DAG.getIntPtrConstant(1));
320    return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
321  }
322
323  if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
324    return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
325
326  llvm_unreachable("Unknown mismatch!");
327  return SDValue();
328}
329
330/// getCopyToParts - Create a series of nodes that contain the specified value
331/// split into legal parts.  If the parts contain more bits than Val, then, for
332/// integers, ExtendKind can be used to specify how to generate the extra bits.
333static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
334                           SDValue *Parts, unsigned NumParts, EVT PartVT,
335                           ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
336  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
337  EVT PtrVT = TLI.getPointerTy();
338  EVT ValueVT = Val.getValueType();
339  unsigned PartBits = PartVT.getSizeInBits();
340  unsigned OrigNumParts = NumParts;
341  assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
342
343  if (!NumParts)
344    return;
345
346  if (!ValueVT.isVector()) {
347    if (PartVT == ValueVT) {
348      assert(NumParts == 1 && "No-op copy with multiple parts!");
349      Parts[0] = Val;
350      return;
351    }
352
353    if (NumParts * PartBits > ValueVT.getSizeInBits()) {
354      // If the parts cover more bits than the value has, promote the value.
355      if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
356        assert(NumParts == 1 && "Do not know what to promote to!");
357        Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
358      } else if (PartVT.isInteger() && ValueVT.isInteger()) {
359        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
360        Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
361      } else {
362        llvm_unreachable("Unknown mismatch!");
363      }
364    } else if (PartBits == ValueVT.getSizeInBits()) {
365      // Different types of the same size.
366      assert(NumParts == 1 && PartVT != ValueVT);
367      Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
368    } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
369      // If the parts cover less bits than value has, truncate the value.
370      if (PartVT.isInteger() && ValueVT.isInteger()) {
371        ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
372        Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
373      } else {
374        llvm_unreachable("Unknown mismatch!");
375      }
376    }
377
378    // The value may have changed - recompute ValueVT.
379    ValueVT = Val.getValueType();
380    assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
381           "Failed to tile the value with PartVT!");
382
383    if (NumParts == 1) {
384      assert(PartVT == ValueVT && "Type conversion failed!");
385      Parts[0] = Val;
386      return;
387    }
388
389    // Expand the value into multiple parts.
390    if (NumParts & (NumParts - 1)) {
391      // The number of parts is not a power of 2.  Split off and copy the tail.
392      assert(PartVT.isInteger() && ValueVT.isInteger() &&
393             "Do not know what to expand to!");
394      unsigned RoundParts = 1 << Log2_32(NumParts);
395      unsigned RoundBits = RoundParts * PartBits;
396      unsigned OddParts = NumParts - RoundParts;
397      SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
398                                   DAG.getConstant(RoundBits,
399                                                   TLI.getPointerTy()));
400      getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
401      if (TLI.isBigEndian())
402        // The odd parts were reversed by getCopyToParts - unreverse them.
403        std::reverse(Parts + RoundParts, Parts + NumParts);
404      NumParts = RoundParts;
405      ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
406      Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
407    }
408
409    // The number of parts is a power of 2.  Repeatedly bisect the value using
410    // EXTRACT_ELEMENT.
411    Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
412                           EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
413                           Val);
414    for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
415      for (unsigned i = 0; i < NumParts; i += StepSize) {
416        unsigned ThisBits = StepSize * PartBits / 2;
417        EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
418        SDValue &Part0 = Parts[i];
419        SDValue &Part1 = Parts[i+StepSize/2];
420
421        Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
422                            ThisVT, Part0,
423                            DAG.getConstant(1, PtrVT));
424        Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
425                            ThisVT, Part0,
426                            DAG.getConstant(0, PtrVT));
427
428        if (ThisBits == PartBits && ThisVT != PartVT) {
429          Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
430                                                PartVT, Part0);
431          Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
432                                                PartVT, Part1);
433        }
434      }
435    }
436
437    if (TLI.isBigEndian())
438      std::reverse(Parts, Parts + OrigNumParts);
439
440    return;
441  }
442
443  // Vector ValueVT.
444  if (NumParts == 1) {
445    if (PartVT != ValueVT) {
446      if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
447        Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
448      } else {
449        assert(ValueVT.getVectorElementType() == PartVT &&
450               ValueVT.getVectorNumElements() == 1 &&
451               "Only trivial vector-to-scalar conversions should get here!");
452        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
453                          PartVT, Val,
454                          DAG.getConstant(0, PtrVT));
455      }
456    }
457
458    Parts[0] = Val;
459    return;
460  }
461
462  // Handle a multi-element vector.
463  EVT IntermediateVT, RegisterVT;
464  unsigned NumIntermediates;
465  unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
466                              IntermediateVT, NumIntermediates, RegisterVT);
467  unsigned NumElements = ValueVT.getVectorNumElements();
468
469  assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
470  NumParts = NumRegs; // Silence a compiler warning.
471  assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
472
473  // Split the vector into intermediate operands.
474  SmallVector<SDValue, 8> Ops(NumIntermediates);
475  for (unsigned i = 0; i != NumIntermediates; ++i)
476    if (IntermediateVT.isVector())
477      Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
478                           IntermediateVT, Val,
479                           DAG.getConstant(i * (NumElements / NumIntermediates),
480                                           PtrVT));
481    else
482      Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
483                           IntermediateVT, Val,
484                           DAG.getConstant(i, PtrVT));
485
486  // Split the intermediate operands into legal parts.
487  if (NumParts == NumIntermediates) {
488    // If the register was not expanded, promote or copy the value,
489    // as appropriate.
490    for (unsigned i = 0; i != NumParts; ++i)
491      getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
492  } else if (NumParts > 0) {
493    // If the intermediate type was expanded, split each the value into
494    // legal parts.
495    assert(NumParts % NumIntermediates == 0 &&
496           "Must expand into a divisible number of parts!");
497    unsigned Factor = NumParts / NumIntermediates;
498    for (unsigned i = 0; i != NumIntermediates; ++i)
499      getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
500  }
501}
502
503
504void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
505  AA = &aa;
506  GFI = gfi;
507  TD = DAG.getTarget().getTargetData();
508}
509
510/// clear - Clear out the curret SelectionDAG and the associated
511/// state and prepare this SelectionDAGBuilder object to be used
512/// for a new block. This doesn't clear out information about
513/// additional blocks that are needed to complete switch lowering
514/// or PHI node updating; that information is cleared out as it is
515/// consumed.
516void SelectionDAGBuilder::clear() {
517  NodeMap.clear();
518  PendingLoads.clear();
519  PendingExports.clear();
520  EdgeMapping.clear();
521  DAG.clear();
522  CurDebugLoc = DebugLoc::getUnknownLoc();
523  HasTailCall = false;
524}
525
526/// getRoot - Return the current virtual root of the Selection DAG,
527/// flushing any PendingLoad items. This must be done before emitting
528/// a store or any other node that may need to be ordered after any
529/// prior load instructions.
530///
531SDValue SelectionDAGBuilder::getRoot() {
532  if (PendingLoads.empty())
533    return DAG.getRoot();
534
535  if (PendingLoads.size() == 1) {
536    SDValue Root = PendingLoads[0];
537    DAG.setRoot(Root);
538    PendingLoads.clear();
539    return Root;
540  }
541
542  // Otherwise, we have to make a token factor node.
543  SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
544                               &PendingLoads[0], PendingLoads.size());
545  PendingLoads.clear();
546  DAG.setRoot(Root);
547  return Root;
548}
549
550/// getControlRoot - Similar to getRoot, but instead of flushing all the
551/// PendingLoad items, flush all the PendingExports items. It is necessary
552/// to do this before emitting a terminator instruction.
553///
554SDValue SelectionDAGBuilder::getControlRoot() {
555  SDValue Root = DAG.getRoot();
556
557  if (PendingExports.empty())
558    return Root;
559
560  // Turn all of the CopyToReg chains into one factored node.
561  if (Root.getOpcode() != ISD::EntryToken) {
562    unsigned i = 0, e = PendingExports.size();
563    for (; i != e; ++i) {
564      assert(PendingExports[i].getNode()->getNumOperands() > 1);
565      if (PendingExports[i].getNode()->getOperand(0) == Root)
566        break;  // Don't add the root if we already indirectly depend on it.
567    }
568
569    if (i == e)
570      PendingExports.push_back(Root);
571  }
572
573  Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
574                     &PendingExports[0],
575                     PendingExports.size());
576  PendingExports.clear();
577  DAG.setRoot(Root);
578  return Root;
579}
580
581void SelectionDAGBuilder::visit(Instruction &I) {
582  visit(I.getOpcode(), I);
583}
584
585void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
586  // We're processing a new instruction.
587  ++SDNodeOrder;
588
589  // Note: this doesn't use InstVisitor, because it has to work with
590  // ConstantExpr's in addition to instructions.
591  switch (Opcode) {
592  default: llvm_unreachable("Unknown instruction type encountered!");
593    // Build the switch statement using the Instruction.def file.
594#define HANDLE_INST(NUM, OPCODE, CLASS) \
595  case Instruction::OPCODE: return visit##OPCODE((CLASS&)I);
596#include "llvm/Instruction.def"
597  }
598}
599
600SDValue SelectionDAGBuilder::getValue(const Value *V) {
601  SDValue &N = NodeMap[V];
602  if (N.getNode()) return N;
603
604  if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
605    EVT VT = TLI.getValueType(V->getType(), true);
606
607    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
608      return N = DAG.getConstant(*CI, VT);
609
610    if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
611      return N = DAG.getGlobalAddress(GV, VT);
612
613    if (isa<ConstantPointerNull>(C))
614      return N = DAG.getConstant(0, TLI.getPointerTy());
615
616    if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
617      return N = DAG.getConstantFP(*CFP, VT);
618
619    if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
620      return N = DAG.getUNDEF(VT);
621
622    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
623      visit(CE->getOpcode(), *CE);
624      SDValue N1 = NodeMap[V];
625      assert(N1.getNode() && "visit didn't populate the ValueMap!");
626      return N1;
627    }
628
629    if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
630      SmallVector<SDValue, 4> Constants;
631      for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
632           OI != OE; ++OI) {
633        SDNode *Val = getValue(*OI).getNode();
634        // If the operand is an empty aggregate, there are no values.
635        if (!Val) continue;
636        // Add each leaf value from the operand to the Constants list
637        // to form a flattened list of all the values.
638        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
639          Constants.push_back(SDValue(Val, i));
640      }
641      return DAG.getMergeValues(&Constants[0], Constants.size(),
642                                getCurDebugLoc());
643    }
644
645    if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
646      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
647             "Unknown struct or array constant!");
648
649      SmallVector<EVT, 4> ValueVTs;
650      ComputeValueVTs(TLI, C->getType(), ValueVTs);
651      unsigned NumElts = ValueVTs.size();
652      if (NumElts == 0)
653        return SDValue(); // empty struct
654      SmallVector<SDValue, 4> Constants(NumElts);
655      for (unsigned i = 0; i != NumElts; ++i) {
656        EVT EltVT = ValueVTs[i];
657        if (isa<UndefValue>(C))
658          Constants[i] = DAG.getUNDEF(EltVT);
659        else if (EltVT.isFloatingPoint())
660          Constants[i] = DAG.getConstantFP(0, EltVT);
661        else
662          Constants[i] = DAG.getConstant(0, EltVT);
663      }
664      return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
665    }
666
667    if (BlockAddress *BA = dyn_cast<BlockAddress>(C))
668      return DAG.getBlockAddress(BA, VT);
669
670    const VectorType *VecTy = cast<VectorType>(V->getType());
671    unsigned NumElements = VecTy->getNumElements();
672
673    // Now that we know the number and type of the elements, get that number of
674    // elements into the Ops array based on what kind of constant it is.
675    SmallVector<SDValue, 16> Ops;
676    if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
677      for (unsigned i = 0; i != NumElements; ++i)
678        Ops.push_back(getValue(CP->getOperand(i)));
679    } else {
680      assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
681      EVT EltVT = TLI.getValueType(VecTy->getElementType());
682
683      SDValue Op;
684      if (EltVT.isFloatingPoint())
685        Op = DAG.getConstantFP(0, EltVT);
686      else
687        Op = DAG.getConstant(0, EltVT);
688      Ops.assign(NumElements, Op);
689    }
690
691    // Create a BUILD_VECTOR node.
692    return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
693                                    VT, &Ops[0], Ops.size());
694  }
695
696  // If this is a static alloca, generate it as the frameindex instead of
697  // computation.
698  if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
699    DenseMap<const AllocaInst*, int>::iterator SI =
700      FuncInfo.StaticAllocaMap.find(AI);
701    if (SI != FuncInfo.StaticAllocaMap.end())
702      return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
703  }
704
705  unsigned InReg = FuncInfo.ValueMap[V];
706  assert(InReg && "Value not in map!");
707
708  RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
709  SDValue Chain = DAG.getEntryNode();
710  return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
711}
712
713/// Get the EVTs and ArgFlags collections that represent the return type
714/// of the given function.  This does not require a DAG or a return value, and
715/// is suitable for use before any DAGs for the function are constructed.
716static void getReturnInfo(const Type* ReturnType,
717                   Attributes attr, SmallVectorImpl<EVT> &OutVTs,
718                   SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
719                   TargetLowering &TLI,
720                   SmallVectorImpl<uint64_t> *Offsets = 0) {
721  SmallVector<EVT, 4> ValueVTs;
722  ComputeValueVTs(TLI, ReturnType, ValueVTs, Offsets);
723  unsigned NumValues = ValueVTs.size();
724  if ( NumValues == 0 ) return;
725
726  for (unsigned j = 0, f = NumValues; j != f; ++j) {
727    EVT VT = ValueVTs[j];
728    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
729
730    if (attr & Attribute::SExt)
731      ExtendKind = ISD::SIGN_EXTEND;
732    else if (attr & Attribute::ZExt)
733      ExtendKind = ISD::ZERO_EXTEND;
734
735    // FIXME: C calling convention requires the return type to be promoted to
736    // at least 32-bit. But this is not necessary for non-C calling
737    // conventions. The frontend should mark functions whose return values
738    // require promoting with signext or zeroext attributes.
739    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
740      EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
741      if (VT.bitsLT(MinVT))
742        VT = MinVT;
743    }
744
745    unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
746    EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
747    // 'inreg' on function refers to return value
748    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
749    if (attr & Attribute::InReg)
750      Flags.setInReg();
751
752    // Propagate extension type if any
753    if (attr & Attribute::SExt)
754      Flags.setSExt();
755    else if (attr & Attribute::ZExt)
756      Flags.setZExt();
757
758    for (unsigned i = 0; i < NumParts; ++i) {
759      OutVTs.push_back(PartVT);
760      OutFlags.push_back(Flags);
761    }
762  }
763}
764
765void SelectionDAGBuilder::visitRet(ReturnInst &I) {
766  SDValue Chain = getControlRoot();
767  SmallVector<ISD::OutputArg, 8> Outs;
768  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
769
770  if (!FLI.CanLowerReturn) {
771    unsigned DemoteReg = FLI.DemoteRegister;
772    const Function *F = I.getParent()->getParent();
773
774    // Emit a store of the return value through the virtual register.
775    // Leave Outs empty so that LowerReturn won't try to load return
776    // registers the usual way.
777    SmallVector<EVT, 1> PtrValueVTs;
778    ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
779                    PtrValueVTs);
780
781    SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
782    SDValue RetOp = getValue(I.getOperand(0));
783
784    SmallVector<EVT, 4> ValueVTs;
785    SmallVector<uint64_t, 4> Offsets;
786    ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
787    unsigned NumValues = ValueVTs.size();
788
789    SmallVector<SDValue, 4> Chains(NumValues);
790    EVT PtrVT = PtrValueVTs[0];
791    for (unsigned i = 0; i != NumValues; ++i)
792      Chains[i] = DAG.getStore(Chain, getCurDebugLoc(),
793                  SDValue(RetOp.getNode(), RetOp.getResNo() + i),
794                  DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr,
795                  DAG.getConstant(Offsets[i], PtrVT)),
796                  NULL, Offsets[i], false, 0);
797    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
798                        MVT::Other, &Chains[0], NumValues);
799  }
800  else {
801    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
802      SmallVector<EVT, 4> ValueVTs;
803      ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
804      unsigned NumValues = ValueVTs.size();
805      if (NumValues == 0) continue;
806
807      SDValue RetOp = getValue(I.getOperand(i));
808      for (unsigned j = 0, f = NumValues; j != f; ++j) {
809        EVT VT = ValueVTs[j];
810
811        ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
812
813        const Function *F = I.getParent()->getParent();
814        if (F->paramHasAttr(0, Attribute::SExt))
815          ExtendKind = ISD::SIGN_EXTEND;
816        else if (F->paramHasAttr(0, Attribute::ZExt))
817          ExtendKind = ISD::ZERO_EXTEND;
818
819        // FIXME: C calling convention requires the return type to be promoted to
820        // at least 32-bit. But this is not necessary for non-C calling
821        // conventions. The frontend should mark functions whose return values
822        // require promoting with signext or zeroext attributes.
823        if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
824          EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
825          if (VT.bitsLT(MinVT))
826            VT = MinVT;
827        }
828
829        unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
830        EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
831        SmallVector<SDValue, 4> Parts(NumParts);
832        getCopyToParts(DAG, getCurDebugLoc(),
833                       SDValue(RetOp.getNode(), RetOp.getResNo() + j),
834                       &Parts[0], NumParts, PartVT, ExtendKind);
835
836        // 'inreg' on function refers to return value
837        ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
838        if (F->paramHasAttr(0, Attribute::InReg))
839          Flags.setInReg();
840
841        // Propagate extension type if any
842        if (F->paramHasAttr(0, Attribute::SExt))
843          Flags.setSExt();
844        else if (F->paramHasAttr(0, Attribute::ZExt))
845          Flags.setZExt();
846
847        for (unsigned i = 0; i < NumParts; ++i)
848          Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
849      }
850    }
851  }
852
853  bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
854  CallingConv::ID CallConv =
855    DAG.getMachineFunction().getFunction()->getCallingConv();
856  Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
857                          Outs, getCurDebugLoc(), DAG);
858
859  // Verify that the target's LowerReturn behaved as expected.
860  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
861         "LowerReturn didn't return a valid chain!");
862
863  // Update the DAG with the new chain value resulting from return lowering.
864  DAG.setRoot(Chain);
865}
866
867/// CopyToExportRegsIfNeeded - If the given value has virtual registers
868/// created for it, emit nodes to copy the value into the virtual
869/// registers.
870void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
871  if (!V->use_empty()) {
872    DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
873    if (VMI != FuncInfo.ValueMap.end())
874      CopyValueToVirtualRegister(V, VMI->second);
875  }
876}
877
878/// ExportFromCurrentBlock - If this condition isn't known to be exported from
879/// the current basic block, add it to ValueMap now so that we'll get a
880/// CopyTo/FromReg.
881void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
882  // No need to export constants.
883  if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
884
885  // Already exported?
886  if (FuncInfo.isExportedInst(V)) return;
887
888  unsigned Reg = FuncInfo.InitializeRegForValue(V);
889  CopyValueToVirtualRegister(V, Reg);
890}
891
892bool SelectionDAGBuilder::isExportableFromCurrentBlock(Value *V,
893                                                     const BasicBlock *FromBB) {
894  // The operands of the setcc have to be in this block.  We don't know
895  // how to export them from some other block.
896  if (Instruction *VI = dyn_cast<Instruction>(V)) {
897    // Can export from current BB.
898    if (VI->getParent() == FromBB)
899      return true;
900
901    // Is already exported, noop.
902    return FuncInfo.isExportedInst(V);
903  }
904
905  // If this is an argument, we can export it if the BB is the entry block or
906  // if it is already exported.
907  if (isa<Argument>(V)) {
908    if (FromBB == &FromBB->getParent()->getEntryBlock())
909      return true;
910
911    // Otherwise, can only export this if it is already exported.
912    return FuncInfo.isExportedInst(V);
913  }
914
915  // Otherwise, constants can always be exported.
916  return true;
917}
918
919static bool InBlock(const Value *V, const BasicBlock *BB) {
920  if (const Instruction *I = dyn_cast<Instruction>(V))
921    return I->getParent() == BB;
922  return true;
923}
924
925/// getFCmpCondCode - Return the ISD condition code corresponding to
926/// the given LLVM IR floating-point condition code.  This includes
927/// consideration of global floating-point math flags.
928///
929static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
930  ISD::CondCode FPC, FOC;
931  switch (Pred) {
932  case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
933  case FCmpInst::FCMP_OEQ:   FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
934  case FCmpInst::FCMP_OGT:   FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
935  case FCmpInst::FCMP_OGE:   FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
936  case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
937  case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
938  case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
939  case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
940  case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
941  case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
942  case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
943  case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
944  case FCmpInst::FCMP_ULT:   FOC = ISD::SETLT; FPC = ISD::SETULT; break;
945  case FCmpInst::FCMP_ULE:   FOC = ISD::SETLE; FPC = ISD::SETULE; break;
946  case FCmpInst::FCMP_UNE:   FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
947  case FCmpInst::FCMP_TRUE:  FOC = FPC = ISD::SETTRUE; break;
948  default:
949    llvm_unreachable("Invalid FCmp predicate opcode!");
950    FOC = FPC = ISD::SETFALSE;
951    break;
952  }
953  if (FiniteOnlyFPMath())
954    return FOC;
955  else
956    return FPC;
957}
958
959/// getICmpCondCode - Return the ISD condition code corresponding to
960/// the given LLVM IR integer condition code.
961///
962static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
963  switch (Pred) {
964  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
965  case ICmpInst::ICMP_NE:  return ISD::SETNE;
966  case ICmpInst::ICMP_SLE: return ISD::SETLE;
967  case ICmpInst::ICMP_ULE: return ISD::SETULE;
968  case ICmpInst::ICMP_SGE: return ISD::SETGE;
969  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
970  case ICmpInst::ICMP_SLT: return ISD::SETLT;
971  case ICmpInst::ICMP_ULT: return ISD::SETULT;
972  case ICmpInst::ICMP_SGT: return ISD::SETGT;
973  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
974  default:
975    llvm_unreachable("Invalid ICmp predicate opcode!");
976    return ISD::SETNE;
977  }
978}
979
980/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
981/// This function emits a branch and is used at the leaves of an OR or an
982/// AND operator tree.
983///
984void
985SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
986                                                  MachineBasicBlock *TBB,
987                                                  MachineBasicBlock *FBB,
988                                                  MachineBasicBlock *CurBB) {
989  const BasicBlock *BB = CurBB->getBasicBlock();
990
991  // If the leaf of the tree is a comparison, merge the condition into
992  // the caseblock.
993  if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
994    // The operands of the cmp have to be in this block.  We don't know
995    // how to export them from some other block.  If this is the first block
996    // of the sequence, no exporting is needed.
997    if (CurBB == CurMBB ||
998        (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
999         isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1000      ISD::CondCode Condition;
1001      if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1002        Condition = getICmpCondCode(IC->getPredicate());
1003      } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1004        Condition = getFCmpCondCode(FC->getPredicate());
1005      } else {
1006        Condition = ISD::SETEQ; // silence warning.
1007        llvm_unreachable("Unknown compare instruction");
1008      }
1009
1010      CaseBlock CB(Condition, BOp->getOperand(0),
1011                   BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1012      SwitchCases.push_back(CB);
1013      return;
1014    }
1015  }
1016
1017  // Create a CaseBlock record representing this branch.
1018  CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1019               NULL, TBB, FBB, CurBB);
1020  SwitchCases.push_back(CB);
1021}
1022
1023/// FindMergedConditions - If Cond is an expression like
1024void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
1025                                               MachineBasicBlock *TBB,
1026                                               MachineBasicBlock *FBB,
1027                                               MachineBasicBlock *CurBB,
1028                                               unsigned Opc) {
1029  // If this node is not part of the or/and tree, emit it as a branch.
1030  Instruction *BOp = dyn_cast<Instruction>(Cond);
1031  if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1032      (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1033      BOp->getParent() != CurBB->getBasicBlock() ||
1034      !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1035      !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1036    EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1037    return;
1038  }
1039
1040  //  Create TmpBB after CurBB.
1041  MachineFunction::iterator BBI = CurBB;
1042  MachineFunction &MF = DAG.getMachineFunction();
1043  MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1044  CurBB->getParent()->insert(++BBI, TmpBB);
1045
1046  if (Opc == Instruction::Or) {
1047    // Codegen X | Y as:
1048    //   jmp_if_X TBB
1049    //   jmp TmpBB
1050    // TmpBB:
1051    //   jmp_if_Y TBB
1052    //   jmp FBB
1053    //
1054
1055    // Emit the LHS condition.
1056    FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1057
1058    // Emit the RHS condition into TmpBB.
1059    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1060  } else {
1061    assert(Opc == Instruction::And && "Unknown merge op!");
1062    // Codegen X & Y as:
1063    //   jmp_if_X TmpBB
1064    //   jmp FBB
1065    // TmpBB:
1066    //   jmp_if_Y TBB
1067    //   jmp FBB
1068    //
1069    //  This requires creation of TmpBB after CurBB.
1070
1071    // Emit the LHS condition.
1072    FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1073
1074    // Emit the RHS condition into TmpBB.
1075    FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1076  }
1077}
1078
1079/// If the set of cases should be emitted as a series of branches, return true.
1080/// If we should emit this as a bunch of and/or'd together conditions, return
1081/// false.
1082bool
1083SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1084  if (Cases.size() != 2) return true;
1085
1086  // If this is two comparisons of the same values or'd or and'd together, they
1087  // will get folded into a single comparison, so don't emit two blocks.
1088  if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1089       Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1090      (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1091       Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1092    return false;
1093  }
1094
1095  return true;
1096}
1097
1098void SelectionDAGBuilder::visitBr(BranchInst &I) {
1099  // Update machine-CFG edges.
1100  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1101
1102  // Figure out which block is immediately after the current one.
1103  MachineBasicBlock *NextBlock = 0;
1104  MachineFunction::iterator BBI = CurMBB;
1105  if (++BBI != FuncInfo.MF->end())
1106    NextBlock = BBI;
1107
1108  if (I.isUnconditional()) {
1109    // Update machine-CFG edges.
1110    CurMBB->addSuccessor(Succ0MBB);
1111
1112    // If this is not a fall-through branch, emit the branch.
1113    if (Succ0MBB != NextBlock) {
1114      SDValue V = DAG.getNode(ISD::BR, getCurDebugLoc(),
1115                              MVT::Other, getControlRoot(),
1116                              DAG.getBasicBlock(Succ0MBB));
1117      DAG.setRoot(V);
1118
1119      if (DisableScheduling)
1120        DAG.AssignOrdering(V.getNode(), SDNodeOrder);
1121    }
1122
1123    return;
1124  }
1125
1126  // If this condition is one of the special cases we handle, do special stuff
1127  // now.
1128  Value *CondVal = I.getCondition();
1129  MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1130
1131  // If this is a series of conditions that are or'd or and'd together, emit
1132  // this as a sequence of branches instead of setcc's with and/or operations.
1133  // For example, instead of something like:
1134  //     cmp A, B
1135  //     C = seteq
1136  //     cmp D, E
1137  //     F = setle
1138  //     or C, F
1139  //     jnz foo
1140  // Emit:
1141  //     cmp A, B
1142  //     je foo
1143  //     cmp D, E
1144  //     jle foo
1145  //
1146  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1147    if (BOp->hasOneUse() &&
1148        (BOp->getOpcode() == Instruction::And ||
1149         BOp->getOpcode() == Instruction::Or)) {
1150      FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1151      // If the compares in later blocks need to use values not currently
1152      // exported from this block, export them now.  This block should always
1153      // be the first entry.
1154      assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1155
1156      // Allow some cases to be rejected.
1157      if (ShouldEmitAsBranches(SwitchCases)) {
1158        for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1159          ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1160          ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1161        }
1162
1163        // Emit the branch for this block.
1164        visitSwitchCase(SwitchCases[0]);
1165        SwitchCases.erase(SwitchCases.begin());
1166        return;
1167      }
1168
1169      // Okay, we decided not to do this, remove any inserted MBB's and clear
1170      // SwitchCases.
1171      for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1172        FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1173
1174      SwitchCases.clear();
1175    }
1176  }
1177
1178  // Create a CaseBlock record representing this branch.
1179  CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1180               NULL, Succ0MBB, Succ1MBB, CurMBB);
1181
1182  // Use visitSwitchCase to actually insert the fast branch sequence for this
1183  // cond branch.
1184  visitSwitchCase(CB);
1185}
1186
1187/// visitSwitchCase - Emits the necessary code to represent a single node in
1188/// the binary search tree resulting from lowering a switch instruction.
1189void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
1190  SDValue Cond;
1191  SDValue CondLHS = getValue(CB.CmpLHS);
1192  DebugLoc dl = getCurDebugLoc();
1193
1194  // Build the setcc now.
1195  if (CB.CmpMHS == NULL) {
1196    // Fold "(X == true)" to X and "(X == false)" to !X to
1197    // handle common cases produced by branch lowering.
1198    if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1199        CB.CC == ISD::SETEQ)
1200      Cond = CondLHS;
1201    else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1202             CB.CC == ISD::SETEQ) {
1203      SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1204      Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1205    } else
1206      Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1207  } else {
1208    assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1209
1210    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1211    const APInt& High  = cast<ConstantInt>(CB.CmpRHS)->getValue();
1212
1213    SDValue CmpOp = getValue(CB.CmpMHS);
1214    EVT VT = CmpOp.getValueType();
1215
1216    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1217      Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1218                          ISD::SETLE);
1219    } else {
1220      SDValue SUB = DAG.getNode(ISD::SUB, dl,
1221                                VT, CmpOp, DAG.getConstant(Low, VT));
1222      Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1223                          DAG.getConstant(High-Low, VT), ISD::SETULE);
1224    }
1225  }
1226
1227  // Update successor info
1228  CurMBB->addSuccessor(CB.TrueBB);
1229  CurMBB->addSuccessor(CB.FalseBB);
1230
1231  // Set NextBlock to be the MBB immediately after the current one, if any.
1232  // This is used to avoid emitting unnecessary branches to the next block.
1233  MachineBasicBlock *NextBlock = 0;
1234  MachineFunction::iterator BBI = CurMBB;
1235  if (++BBI != FuncInfo.MF->end())
1236    NextBlock = BBI;
1237
1238  // If the lhs block is the next block, invert the condition so that we can
1239  // fall through to the lhs instead of the rhs block.
1240  if (CB.TrueBB == NextBlock) {
1241    std::swap(CB.TrueBB, CB.FalseBB);
1242    SDValue True = DAG.getConstant(1, Cond.getValueType());
1243    Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1244  }
1245
1246  SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1247                               MVT::Other, getControlRoot(), Cond,
1248                               DAG.getBasicBlock(CB.TrueBB));
1249
1250  // If the branch was constant folded, fix up the CFG.
1251  if (BrCond.getOpcode() == ISD::BR) {
1252    CurMBB->removeSuccessor(CB.FalseBB);
1253  } else {
1254    // Otherwise, go ahead and insert the false branch.
1255    if (BrCond == getControlRoot())
1256      CurMBB->removeSuccessor(CB.TrueBB);
1257
1258    if (CB.FalseBB != NextBlock)
1259      BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1260                           DAG.getBasicBlock(CB.FalseBB));
1261  }
1262
1263  DAG.setRoot(BrCond);
1264
1265  if (DisableScheduling)
1266    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1267}
1268
1269/// visitJumpTable - Emit JumpTable node in the current MBB
1270void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1271  // Emit the code for the jump table
1272  assert(JT.Reg != -1U && "Should lower JT Header first!");
1273  EVT PTy = TLI.getPointerTy();
1274  SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1275                                     JT.Reg, PTy);
1276  SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1277  SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1278                                    MVT::Other, Index.getValue(1),
1279                                    Table, Index);
1280  DAG.setRoot(BrJumpTable);
1281
1282  if (DisableScheduling)
1283    DAG.AssignOrdering(BrJumpTable.getNode(), SDNodeOrder);
1284}
1285
1286/// visitJumpTableHeader - This function emits necessary code to produce index
1287/// in the JumpTable from switch case.
1288void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1289                                               JumpTableHeader &JTH) {
1290  // Subtract the lowest switch case value from the value being switched on and
1291  // conditional branch to default mbb if the result is greater than the
1292  // difference between smallest and largest cases.
1293  SDValue SwitchOp = getValue(JTH.SValue);
1294  EVT VT = SwitchOp.getValueType();
1295  SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1296                            DAG.getConstant(JTH.First, VT));
1297
1298  // The SDNode we just created, which holds the value being switched on minus
1299  // the the smallest case value, needs to be copied to a virtual register so it
1300  // can be used as an index into the jump table in a subsequent basic block.
1301  // This value may be smaller or larger than the target's pointer type, and
1302  // therefore require extension or truncating.
1303  SwitchOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
1304
1305  unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1306  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1307                                    JumpTableReg, SwitchOp);
1308  JT.Reg = JumpTableReg;
1309
1310  // Emit the range check for the jump table, and branch to the default block
1311  // for the switch statement if the value being switched on exceeds the largest
1312  // case in the switch.
1313  SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1314                             TLI.getSetCCResultType(SUB.getValueType()), SUB,
1315                             DAG.getConstant(JTH.Last-JTH.First,VT),
1316                             ISD::SETUGT);
1317
1318  // Set NextBlock to be the MBB immediately after the current one, if any.
1319  // This is used to avoid emitting unnecessary branches to the next block.
1320  MachineBasicBlock *NextBlock = 0;
1321  MachineFunction::iterator BBI = CurMBB;
1322  if (++BBI != FuncInfo.MF->end())
1323    NextBlock = BBI;
1324
1325  SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1326                               MVT::Other, CopyTo, CMP,
1327                               DAG.getBasicBlock(JT.Default));
1328
1329  if (JT.MBB != NextBlock)
1330    BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1331                         DAG.getBasicBlock(JT.MBB));
1332
1333  DAG.setRoot(BrCond);
1334
1335  if (DisableScheduling)
1336    DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder);
1337}
1338
1339/// visitBitTestHeader - This function emits necessary code to produce value
1340/// suitable for "bit tests"
1341void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
1342  // Subtract the minimum value
1343  SDValue SwitchOp = getValue(B.SValue);
1344  EVT VT = SwitchOp.getValueType();
1345  SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1346                            DAG.getConstant(B.First, VT));
1347
1348  // Check range
1349  SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1350                                  TLI.getSetCCResultType(SUB.getValueType()),
1351                                  SUB, DAG.getConstant(B.Range, VT),
1352                                  ISD::SETUGT);
1353
1354  SDValue ShiftOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
1355
1356  B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1357  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1358                                    B.Reg, ShiftOp);
1359
1360  // Set NextBlock to be the MBB immediately after the current one, if any.
1361  // This is used to avoid emitting unnecessary branches to the next block.
1362  MachineBasicBlock *NextBlock = 0;
1363  MachineFunction::iterator BBI = CurMBB;
1364  if (++BBI != FuncInfo.MF->end())
1365    NextBlock = BBI;
1366
1367  MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1368
1369  CurMBB->addSuccessor(B.Default);
1370  CurMBB->addSuccessor(MBB);
1371
1372  SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1373                                MVT::Other, CopyTo, RangeCmp,
1374                                DAG.getBasicBlock(B.Default));
1375
1376  if (MBB != NextBlock)
1377    BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1378                          DAG.getBasicBlock(MBB));
1379
1380  DAG.setRoot(BrRange);
1381
1382  if (DisableScheduling)
1383    DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder);
1384}
1385
1386/// visitBitTestCase - this function produces one "bit test"
1387void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
1388                                           unsigned Reg,
1389                                           BitTestCase &B) {
1390  // Make desired shift
1391  SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1392                                       TLI.getPointerTy());
1393  SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1394                                  TLI.getPointerTy(),
1395                                  DAG.getConstant(1, TLI.getPointerTy()),
1396                                  ShiftOp);
1397
1398  // Emit bit tests and jumps
1399  SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1400                              TLI.getPointerTy(), SwitchVal,
1401                              DAG.getConstant(B.Mask, TLI.getPointerTy()));
1402  SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1403                                TLI.getSetCCResultType(AndOp.getValueType()),
1404                                AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1405                                ISD::SETNE);
1406
1407  CurMBB->addSuccessor(B.TargetBB);
1408  CurMBB->addSuccessor(NextMBB);
1409
1410  SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1411                              MVT::Other, getControlRoot(),
1412                              AndCmp, DAG.getBasicBlock(B.TargetBB));
1413
1414  // Set NextBlock to be the MBB immediately after the current one, if any.
1415  // This is used to avoid emitting unnecessary branches to the next block.
1416  MachineBasicBlock *NextBlock = 0;
1417  MachineFunction::iterator BBI = CurMBB;
1418  if (++BBI != FuncInfo.MF->end())
1419    NextBlock = BBI;
1420
1421  if (NextMBB != NextBlock)
1422    BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1423                        DAG.getBasicBlock(NextMBB));
1424
1425  DAG.setRoot(BrAnd);
1426
1427  if (DisableScheduling)
1428    DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder);
1429}
1430
1431void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
1432  // Retrieve successors.
1433  MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1434  MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1435
1436  const Value *Callee(I.getCalledValue());
1437  if (isa<InlineAsm>(Callee))
1438    visitInlineAsm(&I);
1439  else
1440    LowerCallTo(&I, getValue(Callee), false, LandingPad);
1441
1442  // If the value of the invoke is used outside of its defining block, make it
1443  // available as a virtual register.
1444  CopyToExportRegsIfNeeded(&I);
1445
1446  // Update successor info
1447  CurMBB->addSuccessor(Return);
1448  CurMBB->addSuccessor(LandingPad);
1449
1450  // Drop into normal successor.
1451  SDValue Branch = DAG.getNode(ISD::BR, getCurDebugLoc(),
1452                               MVT::Other, getControlRoot(),
1453                               DAG.getBasicBlock(Return));
1454  DAG.setRoot(Branch);
1455
1456  if (DisableScheduling)
1457    DAG.AssignOrdering(Branch.getNode(), SDNodeOrder);
1458}
1459
1460void SelectionDAGBuilder::visitUnwind(UnwindInst &I) {
1461}
1462
1463/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1464/// small case ranges).
1465bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
1466                                                 CaseRecVector& WorkList,
1467                                                 Value* SV,
1468                                                 MachineBasicBlock* Default) {
1469  Case& BackCase  = *(CR.Range.second-1);
1470
1471  // Size is the number of Cases represented by this range.
1472  size_t Size = CR.Range.second - CR.Range.first;
1473  if (Size > 3)
1474    return false;
1475
1476  // Get the MachineFunction which holds the current MBB.  This is used when
1477  // inserting any additional MBBs necessary to represent the switch.
1478  MachineFunction *CurMF = FuncInfo.MF;
1479
1480  // Figure out which block is immediately after the current one.
1481  MachineBasicBlock *NextBlock = 0;
1482  MachineFunction::iterator BBI = CR.CaseBB;
1483
1484  if (++BBI != FuncInfo.MF->end())
1485    NextBlock = BBI;
1486
1487  // TODO: If any two of the cases has the same destination, and if one value
1488  // is the same as the other, but has one bit unset that the other has set,
1489  // use bit manipulation to do two compares at once.  For example:
1490  // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1491
1492  // Rearrange the case blocks so that the last one falls through if possible.
1493  if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1494    // The last case block won't fall through into 'NextBlock' if we emit the
1495    // branches in this order.  See if rearranging a case value would help.
1496    for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1497      if (I->BB == NextBlock) {
1498        std::swap(*I, BackCase);
1499        break;
1500      }
1501    }
1502  }
1503
1504  // Create a CaseBlock record representing a conditional branch to
1505  // the Case's target mbb if the value being switched on SV is equal
1506  // to C.
1507  MachineBasicBlock *CurBlock = CR.CaseBB;
1508  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1509    MachineBasicBlock *FallThrough;
1510    if (I != E-1) {
1511      FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1512      CurMF->insert(BBI, FallThrough);
1513
1514      // Put SV in a virtual register to make it available from the new blocks.
1515      ExportFromCurrentBlock(SV);
1516    } else {
1517      // If the last case doesn't match, go to the default block.
1518      FallThrough = Default;
1519    }
1520
1521    Value *RHS, *LHS, *MHS;
1522    ISD::CondCode CC;
1523    if (I->High == I->Low) {
1524      // This is just small small case range :) containing exactly 1 case
1525      CC = ISD::SETEQ;
1526      LHS = SV; RHS = I->High; MHS = NULL;
1527    } else {
1528      CC = ISD::SETLE;
1529      LHS = I->Low; MHS = SV; RHS = I->High;
1530    }
1531    CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1532
1533    // If emitting the first comparison, just call visitSwitchCase to emit the
1534    // code into the current block.  Otherwise, push the CaseBlock onto the
1535    // vector to be later processed by SDISel, and insert the node's MBB
1536    // before the next MBB.
1537    if (CurBlock == CurMBB)
1538      visitSwitchCase(CB);
1539    else
1540      SwitchCases.push_back(CB);
1541
1542    CurBlock = FallThrough;
1543  }
1544
1545  return true;
1546}
1547
1548static inline bool areJTsAllowed(const TargetLowering &TLI) {
1549  return !DisableJumpTables &&
1550          (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1551           TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1552}
1553
1554static APInt ComputeRange(const APInt &First, const APInt &Last) {
1555  APInt LastExt(Last), FirstExt(First);
1556  uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1557  LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1558  return (LastExt - FirstExt + 1ULL);
1559}
1560
1561/// handleJTSwitchCase - Emit jumptable for current switch case range
1562bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
1563                                             CaseRecVector& WorkList,
1564                                             Value* SV,
1565                                             MachineBasicBlock* Default) {
1566  Case& FrontCase = *CR.Range.first;
1567  Case& BackCase  = *(CR.Range.second-1);
1568
1569  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1570  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1571
1572  APInt TSize(First.getBitWidth(), 0);
1573  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1574       I!=E; ++I)
1575    TSize += I->size();
1576
1577  if (!areJTsAllowed(TLI) || TSize.ult(APInt(First.getBitWidth(), 4)))
1578    return false;
1579
1580  APInt Range = ComputeRange(First, Last);
1581  double Density = TSize.roundToDouble() / Range.roundToDouble();
1582  if (Density < 0.4)
1583    return false;
1584
1585  DEBUG(errs() << "Lowering jump table\n"
1586               << "First entry: " << First << ". Last entry: " << Last << '\n'
1587               << "Range: " << Range
1588               << "Size: " << TSize << ". Density: " << Density << "\n\n");
1589
1590  // Get the MachineFunction which holds the current MBB.  This is used when
1591  // inserting any additional MBBs necessary to represent the switch.
1592  MachineFunction *CurMF = FuncInfo.MF;
1593
1594  // Figure out which block is immediately after the current one.
1595  MachineFunction::iterator BBI = CR.CaseBB;
1596  ++BBI;
1597
1598  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1599
1600  // Create a new basic block to hold the code for loading the address
1601  // of the jump table, and jumping to it.  Update successor information;
1602  // we will either branch to the default case for the switch, or the jump
1603  // table.
1604  MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1605  CurMF->insert(BBI, JumpTableBB);
1606  CR.CaseBB->addSuccessor(Default);
1607  CR.CaseBB->addSuccessor(JumpTableBB);
1608
1609  // Build a vector of destination BBs, corresponding to each target
1610  // of the jump table. If the value of the jump table slot corresponds to
1611  // a case statement, push the case's BB onto the vector, otherwise, push
1612  // the default BB.
1613  std::vector<MachineBasicBlock*> DestBBs;
1614  APInt TEI = First;
1615  for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1616    const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1617    const APInt& High = cast<ConstantInt>(I->High)->getValue();
1618
1619    if (Low.sle(TEI) && TEI.sle(High)) {
1620      DestBBs.push_back(I->BB);
1621      if (TEI==High)
1622        ++I;
1623    } else {
1624      DestBBs.push_back(Default);
1625    }
1626  }
1627
1628  // Update successor info. Add one edge to each unique successor.
1629  BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1630  for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1631         E = DestBBs.end(); I != E; ++I) {
1632    if (!SuccsHandled[(*I)->getNumber()]) {
1633      SuccsHandled[(*I)->getNumber()] = true;
1634      JumpTableBB->addSuccessor(*I);
1635    }
1636  }
1637
1638  // Create a jump table index for this jump table, or return an existing
1639  // one.
1640  unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1641
1642  // Set the jump table information so that we can codegen it as a second
1643  // MachineBasicBlock
1644  JumpTable JT(-1U, JTI, JumpTableBB, Default);
1645  JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1646  if (CR.CaseBB == CurMBB)
1647    visitJumpTableHeader(JT, JTH);
1648
1649  JTCases.push_back(JumpTableBlock(JTH, JT));
1650
1651  return true;
1652}
1653
1654/// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1655/// 2 subtrees.
1656bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
1657                                                  CaseRecVector& WorkList,
1658                                                  Value* SV,
1659                                                  MachineBasicBlock* Default) {
1660  // Get the MachineFunction which holds the current MBB.  This is used when
1661  // inserting any additional MBBs necessary to represent the switch.
1662  MachineFunction *CurMF = FuncInfo.MF;
1663
1664  // Figure out which block is immediately after the current one.
1665  MachineFunction::iterator BBI = CR.CaseBB;
1666  ++BBI;
1667
1668  Case& FrontCase = *CR.Range.first;
1669  Case& BackCase  = *(CR.Range.second-1);
1670  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1671
1672  // Size is the number of Cases represented by this range.
1673  unsigned Size = CR.Range.second - CR.Range.first;
1674
1675  const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
1676  const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
1677  double FMetric = 0;
1678  CaseItr Pivot = CR.Range.first + Size/2;
1679
1680  // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1681  // (heuristically) allow us to emit JumpTable's later.
1682  APInt TSize(First.getBitWidth(), 0);
1683  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1684       I!=E; ++I)
1685    TSize += I->size();
1686
1687  APInt LSize = FrontCase.size();
1688  APInt RSize = TSize-LSize;
1689  DEBUG(errs() << "Selecting best pivot: \n"
1690               << "First: " << First << ", Last: " << Last <<'\n'
1691               << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1692  for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1693       J!=E; ++I, ++J) {
1694    const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
1695    const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
1696    APInt Range = ComputeRange(LEnd, RBegin);
1697    assert((Range - 2ULL).isNonNegative() &&
1698           "Invalid case distance");
1699    double LDensity = (double)LSize.roundToDouble() /
1700                           (LEnd - First + 1ULL).roundToDouble();
1701    double RDensity = (double)RSize.roundToDouble() /
1702                           (Last - RBegin + 1ULL).roundToDouble();
1703    double Metric = Range.logBase2()*(LDensity+RDensity);
1704    // Should always split in some non-trivial place
1705    DEBUG(errs() <<"=>Step\n"
1706                 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1707                 << "LDensity: " << LDensity
1708                 << ", RDensity: " << RDensity << '\n'
1709                 << "Metric: " << Metric << '\n');
1710    if (FMetric < Metric) {
1711      Pivot = J;
1712      FMetric = Metric;
1713      DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1714    }
1715
1716    LSize += J->size();
1717    RSize -= J->size();
1718  }
1719  if (areJTsAllowed(TLI)) {
1720    // If our case is dense we *really* should handle it earlier!
1721    assert((FMetric > 0) && "Should handle dense range earlier!");
1722  } else {
1723    Pivot = CR.Range.first + Size/2;
1724  }
1725
1726  CaseRange LHSR(CR.Range.first, Pivot);
1727  CaseRange RHSR(Pivot, CR.Range.second);
1728  Constant *C = Pivot->Low;
1729  MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1730
1731  // We know that we branch to the LHS if the Value being switched on is
1732  // less than the Pivot value, C.  We use this to optimize our binary
1733  // tree a bit, by recognizing that if SV is greater than or equal to the
1734  // LHS's Case Value, and that Case Value is exactly one less than the
1735  // Pivot's Value, then we can branch directly to the LHS's Target,
1736  // rather than creating a leaf node for it.
1737  if ((LHSR.second - LHSR.first) == 1 &&
1738      LHSR.first->High == CR.GE &&
1739      cast<ConstantInt>(C)->getValue() ==
1740      (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1741    TrueBB = LHSR.first->BB;
1742  } else {
1743    TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1744    CurMF->insert(BBI, TrueBB);
1745    WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1746
1747    // Put SV in a virtual register to make it available from the new blocks.
1748    ExportFromCurrentBlock(SV);
1749  }
1750
1751  // Similar to the optimization above, if the Value being switched on is
1752  // known to be less than the Constant CR.LT, and the current Case Value
1753  // is CR.LT - 1, then we can branch directly to the target block for
1754  // the current Case Value, rather than emitting a RHS leaf node for it.
1755  if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1756      cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1757      (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1758    FalseBB = RHSR.first->BB;
1759  } else {
1760    FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1761    CurMF->insert(BBI, FalseBB);
1762    WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1763
1764    // Put SV in a virtual register to make it available from the new blocks.
1765    ExportFromCurrentBlock(SV);
1766  }
1767
1768  // Create a CaseBlock record representing a conditional branch to
1769  // the LHS node if the value being switched on SV is less than C.
1770  // Otherwise, branch to LHS.
1771  CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1772
1773  if (CR.CaseBB == CurMBB)
1774    visitSwitchCase(CB);
1775  else
1776    SwitchCases.push_back(CB);
1777
1778  return true;
1779}
1780
1781/// handleBitTestsSwitchCase - if current case range has few destination and
1782/// range span less, than machine word bitwidth, encode case range into series
1783/// of masks and emit bit tests with these masks.
1784bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
1785                                                   CaseRecVector& WorkList,
1786                                                   Value* SV,
1787                                                   MachineBasicBlock* Default){
1788  EVT PTy = TLI.getPointerTy();
1789  unsigned IntPtrBits = PTy.getSizeInBits();
1790
1791  Case& FrontCase = *CR.Range.first;
1792  Case& BackCase  = *(CR.Range.second-1);
1793
1794  // Get the MachineFunction which holds the current MBB.  This is used when
1795  // inserting any additional MBBs necessary to represent the switch.
1796  MachineFunction *CurMF = FuncInfo.MF;
1797
1798  // If target does not have legal shift left, do not emit bit tests at all.
1799  if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1800    return false;
1801
1802  size_t numCmps = 0;
1803  for (CaseItr I = CR.Range.first, E = CR.Range.second;
1804       I!=E; ++I) {
1805    // Single case counts one, case range - two.
1806    numCmps += (I->Low == I->High ? 1 : 2);
1807  }
1808
1809  // Count unique destinations
1810  SmallSet<MachineBasicBlock*, 4> Dests;
1811  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1812    Dests.insert(I->BB);
1813    if (Dests.size() > 3)
1814      // Don't bother the code below, if there are too much unique destinations
1815      return false;
1816  }
1817  DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1818               << "Total number of comparisons: " << numCmps << '\n');
1819
1820  // Compute span of values.
1821  const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1822  const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1823  APInt cmpRange = maxValue - minValue;
1824
1825  DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1826               << "Low bound: " << minValue << '\n'
1827               << "High bound: " << maxValue << '\n');
1828
1829  if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1830      (!(Dests.size() == 1 && numCmps >= 3) &&
1831       !(Dests.size() == 2 && numCmps >= 5) &&
1832       !(Dests.size() >= 3 && numCmps >= 6)))
1833    return false;
1834
1835  DEBUG(errs() << "Emitting bit tests\n");
1836  APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1837
1838  // Optimize the case where all the case values fit in a
1839  // word without having to subtract minValue. In this case,
1840  // we can optimize away the subtraction.
1841  if (minValue.isNonNegative() &&
1842      maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1843    cmpRange = maxValue;
1844  } else {
1845    lowBound = minValue;
1846  }
1847
1848  CaseBitsVector CasesBits;
1849  unsigned i, count = 0;
1850
1851  for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1852    MachineBasicBlock* Dest = I->BB;
1853    for (i = 0; i < count; ++i)
1854      if (Dest == CasesBits[i].BB)
1855        break;
1856
1857    if (i == count) {
1858      assert((count < 3) && "Too much destinations to test!");
1859      CasesBits.push_back(CaseBits(0, Dest, 0));
1860      count++;
1861    }
1862
1863    const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1864    const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1865
1866    uint64_t lo = (lowValue - lowBound).getZExtValue();
1867    uint64_t hi = (highValue - lowBound).getZExtValue();
1868
1869    for (uint64_t j = lo; j <= hi; j++) {
1870      CasesBits[i].Mask |=  1ULL << j;
1871      CasesBits[i].Bits++;
1872    }
1873
1874  }
1875  std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
1876
1877  BitTestInfo BTC;
1878
1879  // Figure out which block is immediately after the current one.
1880  MachineFunction::iterator BBI = CR.CaseBB;
1881  ++BBI;
1882
1883  const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1884
1885  DEBUG(errs() << "Cases:\n");
1886  for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
1887    DEBUG(errs() << "Mask: " << CasesBits[i].Mask
1888                 << ", Bits: " << CasesBits[i].Bits
1889                 << ", BB: " << CasesBits[i].BB << '\n');
1890
1891    MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1892    CurMF->insert(BBI, CaseBB);
1893    BTC.push_back(BitTestCase(CasesBits[i].Mask,
1894                              CaseBB,
1895                              CasesBits[i].BB));
1896
1897    // Put SV in a virtual register to make it available from the new blocks.
1898    ExportFromCurrentBlock(SV);
1899  }
1900
1901  BitTestBlock BTB(lowBound, cmpRange, SV,
1902                   -1U, (CR.CaseBB == CurMBB),
1903                   CR.CaseBB, Default, BTC);
1904
1905  if (CR.CaseBB == CurMBB)
1906    visitBitTestHeader(BTB);
1907
1908  BitTestCases.push_back(BTB);
1909
1910  return true;
1911}
1912
1913
1914/// Clusterify - Transform simple list of Cases into list of CaseRange's
1915size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
1916                                       const SwitchInst& SI) {
1917  size_t numCmps = 0;
1918
1919  // Start with "simple" cases
1920  for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
1921    MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
1922    Cases.push_back(Case(SI.getSuccessorValue(i),
1923                         SI.getSuccessorValue(i),
1924                         SMBB));
1925  }
1926  std::sort(Cases.begin(), Cases.end(), CaseCmp());
1927
1928  // Merge case into clusters
1929  if (Cases.size() >= 2)
1930    // Must recompute end() each iteration because it may be
1931    // invalidated by erase if we hold on to it
1932    for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
1933      const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
1934      const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
1935      MachineBasicBlock* nextBB = J->BB;
1936      MachineBasicBlock* currentBB = I->BB;
1937
1938      // If the two neighboring cases go to the same destination, merge them
1939      // into a single case.
1940      if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
1941        I->High = J->High;
1942        J = Cases.erase(J);
1943      } else {
1944        I = J++;
1945      }
1946    }
1947
1948  for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
1949    if (I->Low != I->High)
1950      // A range counts double, since it requires two compares.
1951      ++numCmps;
1952  }
1953
1954  return numCmps;
1955}
1956
1957void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
1958  // Figure out which block is immediately after the current one.
1959  MachineBasicBlock *NextBlock = 0;
1960
1961  MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
1962
1963  // If there is only the default destination, branch to it if it is not the
1964  // next basic block.  Otherwise, just fall through.
1965  if (SI.getNumOperands() == 2) {
1966    // Update machine-CFG edges.
1967
1968    // If this is not a fall-through branch, emit the branch.
1969    CurMBB->addSuccessor(Default);
1970    if (Default != NextBlock) {
1971      SDValue Val = DAG.getNode(ISD::BR, getCurDebugLoc(),
1972                                MVT::Other, getControlRoot(),
1973                                DAG.getBasicBlock(Default));
1974      DAG.setRoot(Val);
1975
1976      if (DisableScheduling)
1977        DAG.AssignOrdering(Val.getNode(), SDNodeOrder);
1978    }
1979
1980    return;
1981  }
1982
1983  // If there are any non-default case statements, create a vector of Cases
1984  // representing each one, and sort the vector so that we can efficiently
1985  // create a binary search tree from them.
1986  CaseVector Cases;
1987  size_t numCmps = Clusterify(Cases, SI);
1988  DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
1989               << ". Total compares: " << numCmps << '\n');
1990  numCmps = 0;
1991
1992  // Get the Value to be switched on and default basic blocks, which will be
1993  // inserted into CaseBlock records, representing basic blocks in the binary
1994  // search tree.
1995  Value *SV = SI.getOperand(0);
1996
1997  // Push the initial CaseRec onto the worklist
1998  CaseRecVector WorkList;
1999  WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2000
2001  while (!WorkList.empty()) {
2002    // Grab a record representing a case range to process off the worklist
2003    CaseRec CR = WorkList.back();
2004    WorkList.pop_back();
2005
2006    if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2007      continue;
2008
2009    // If the range has few cases (two or less) emit a series of specific
2010    // tests.
2011    if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2012      continue;
2013
2014    // If the switch has more than 5 blocks, and at least 40% dense, and the
2015    // target supports indirect branches, then emit a jump table rather than
2016    // lowering the switch to a binary tree of conditional branches.
2017    if (handleJTSwitchCase(CR, WorkList, SV, Default))
2018      continue;
2019
2020    // Emit binary tree. We need to pick a pivot, and push left and right ranges
2021    // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2022    handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2023  }
2024}
2025
2026void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
2027  // Update machine-CFG edges.
2028  for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i)
2029    CurMBB->addSuccessor(FuncInfo.MBBMap[I.getSuccessor(i)]);
2030
2031  SDValue Res = DAG.getNode(ISD::BRIND, getCurDebugLoc(),
2032                            MVT::Other, getControlRoot(),
2033                            getValue(I.getAddress()));
2034  DAG.setRoot(Res);
2035
2036  if (DisableScheduling)
2037    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2038}
2039
2040void SelectionDAGBuilder::visitFSub(User &I) {
2041  // -0.0 - X --> fneg
2042  const Type *Ty = I.getType();
2043  if (isa<VectorType>(Ty)) {
2044    if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2045      const VectorType *DestTy = cast<VectorType>(I.getType());
2046      const Type *ElTy = DestTy->getElementType();
2047      unsigned VL = DestTy->getNumElements();
2048      std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2049      Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2050      if (CV == CNZ) {
2051        SDValue Op2 = getValue(I.getOperand(1));
2052        SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2053                                  Op2.getValueType(), Op2);
2054        setValue(&I, Res);
2055
2056        if (DisableScheduling)
2057          DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2058
2059        return;
2060      }
2061    }
2062  }
2063
2064  if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2065    if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2066      SDValue Op2 = getValue(I.getOperand(1));
2067      SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2068                                Op2.getValueType(), Op2);
2069      setValue(&I, Res);
2070
2071      if (DisableScheduling)
2072        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2073
2074      return;
2075    }
2076
2077  visitBinary(I, ISD::FSUB);
2078}
2079
2080void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) {
2081  SDValue Op1 = getValue(I.getOperand(0));
2082  SDValue Op2 = getValue(I.getOperand(1));
2083  SDValue Res = DAG.getNode(OpCode, getCurDebugLoc(),
2084                            Op1.getValueType(), Op1, Op2);
2085  setValue(&I, Res);
2086
2087  if (DisableScheduling)
2088    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2089}
2090
2091void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
2092  SDValue Op1 = getValue(I.getOperand(0));
2093  SDValue Op2 = getValue(I.getOperand(1));
2094  if (!isa<VectorType>(I.getType()) &&
2095      Op2.getValueType() != TLI.getShiftAmountTy()) {
2096    // If the operand is smaller than the shift count type, promote it.
2097    EVT PTy = TLI.getPointerTy();
2098    EVT STy = TLI.getShiftAmountTy();
2099    if (STy.bitsGT(Op2.getValueType()))
2100      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2101                        TLI.getShiftAmountTy(), Op2);
2102    // If the operand is larger than the shift count type but the shift
2103    // count type has enough bits to represent any shift value, truncate
2104    // it now. This is a common case and it exposes the truncate to
2105    // optimization early.
2106    else if (STy.getSizeInBits() >=
2107             Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2108      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2109                        TLI.getShiftAmountTy(), Op2);
2110    // Otherwise we'll need to temporarily settle for some other
2111    // convenient type; type legalization will make adjustments as
2112    // needed.
2113    else if (PTy.bitsLT(Op2.getValueType()))
2114      Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2115                        TLI.getPointerTy(), Op2);
2116    else if (PTy.bitsGT(Op2.getValueType()))
2117      Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2118                        TLI.getPointerTy(), Op2);
2119  }
2120
2121  SDValue Res = DAG.getNode(Opcode, getCurDebugLoc(),
2122                            Op1.getValueType(), Op1, Op2);
2123  setValue(&I, Res);
2124
2125  if (DisableScheduling)
2126    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2127}
2128
2129void SelectionDAGBuilder::visitICmp(User &I) {
2130  ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2131  if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2132    predicate = IC->getPredicate();
2133  else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2134    predicate = ICmpInst::Predicate(IC->getPredicate());
2135  SDValue Op1 = getValue(I.getOperand(0));
2136  SDValue Op2 = getValue(I.getOperand(1));
2137  ISD::CondCode Opcode = getICmpCondCode(predicate);
2138
2139  EVT DestVT = TLI.getValueType(I.getType());
2140  SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode);
2141  setValue(&I, Res);
2142
2143  if (DisableScheduling)
2144    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2145}
2146
2147void SelectionDAGBuilder::visitFCmp(User &I) {
2148  FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2149  if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2150    predicate = FC->getPredicate();
2151  else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2152    predicate = FCmpInst::Predicate(FC->getPredicate());
2153  SDValue Op1 = getValue(I.getOperand(0));
2154  SDValue Op2 = getValue(I.getOperand(1));
2155  ISD::CondCode Condition = getFCmpCondCode(predicate);
2156  EVT DestVT = TLI.getValueType(I.getType());
2157  SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition);
2158  setValue(&I, Res);
2159
2160  if (DisableScheduling)
2161    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2162}
2163
2164void SelectionDAGBuilder::visitSelect(User &I) {
2165  SmallVector<EVT, 4> ValueVTs;
2166  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2167  unsigned NumValues = ValueVTs.size();
2168  if (NumValues == 0) return;
2169
2170  SmallVector<SDValue, 4> Values(NumValues);
2171  SDValue Cond     = getValue(I.getOperand(0));
2172  SDValue TrueVal  = getValue(I.getOperand(1));
2173  SDValue FalseVal = getValue(I.getOperand(2));
2174
2175  for (unsigned i = 0; i != NumValues; ++i) {
2176    Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2177                            TrueVal.getNode()->getValueType(i), Cond,
2178                            SDValue(TrueVal.getNode(),
2179                                    TrueVal.getResNo() + i),
2180                            SDValue(FalseVal.getNode(),
2181                                    FalseVal.getResNo() + i));
2182
2183    if (DisableScheduling)
2184      DAG.AssignOrdering(Values[i].getNode(), SDNodeOrder);
2185  }
2186
2187  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2188                            DAG.getVTList(&ValueVTs[0], NumValues),
2189                            &Values[0], NumValues);
2190  setValue(&I, Res);
2191
2192  if (DisableScheduling)
2193    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2194}
2195
2196void SelectionDAGBuilder::visitTrunc(User &I) {
2197  // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2198  SDValue N = getValue(I.getOperand(0));
2199  EVT DestVT = TLI.getValueType(I.getType());
2200  SDValue Res = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2201  setValue(&I, Res);
2202
2203  if (DisableScheduling)
2204    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2205}
2206
2207void SelectionDAGBuilder::visitZExt(User &I) {
2208  // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2209  // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2210  SDValue N = getValue(I.getOperand(0));
2211  EVT DestVT = TLI.getValueType(I.getType());
2212  SDValue Res = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2213  setValue(&I, Res);
2214
2215  if (DisableScheduling)
2216    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2217}
2218
2219void SelectionDAGBuilder::visitSExt(User &I) {
2220  // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2221  // SExt also can't be a cast to bool for same reason. So, nothing much to do
2222  SDValue N = getValue(I.getOperand(0));
2223  EVT DestVT = TLI.getValueType(I.getType());
2224  SDValue Res = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N);
2225  setValue(&I, Res);
2226
2227  if (DisableScheduling)
2228    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2229}
2230
2231void SelectionDAGBuilder::visitFPTrunc(User &I) {
2232  // FPTrunc is never a no-op cast, no need to check
2233  SDValue N = getValue(I.getOperand(0));
2234  EVT DestVT = TLI.getValueType(I.getType());
2235  SDValue Res = DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2236                            DestVT, N, DAG.getIntPtrConstant(0));
2237  setValue(&I, Res);
2238
2239  if (DisableScheduling)
2240    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2241}
2242
2243void SelectionDAGBuilder::visitFPExt(User &I){
2244  // FPTrunc is never a no-op cast, no need to check
2245  SDValue N = getValue(I.getOperand(0));
2246  EVT DestVT = TLI.getValueType(I.getType());
2247  SDValue Res = DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N);
2248  setValue(&I, Res);
2249
2250  if (DisableScheduling)
2251    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2252}
2253
2254void SelectionDAGBuilder::visitFPToUI(User &I) {
2255  // FPToUI is never a no-op cast, no need to check
2256  SDValue N = getValue(I.getOperand(0));
2257  EVT DestVT = TLI.getValueType(I.getType());
2258  SDValue Res = DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N);
2259  setValue(&I, Res);
2260
2261  if (DisableScheduling)
2262    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2263}
2264
2265void SelectionDAGBuilder::visitFPToSI(User &I) {
2266  // FPToSI is never a no-op cast, no need to check
2267  SDValue N = getValue(I.getOperand(0));
2268  EVT DestVT = TLI.getValueType(I.getType());
2269  SDValue Res = DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N);
2270  setValue(&I, Res);
2271
2272  if (DisableScheduling)
2273    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2274}
2275
2276void SelectionDAGBuilder::visitUIToFP(User &I) {
2277  // UIToFP is never a no-op cast, no need to check
2278  SDValue N = getValue(I.getOperand(0));
2279  EVT DestVT = TLI.getValueType(I.getType());
2280  SDValue Res = DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N);
2281  setValue(&I, Res);
2282
2283  if (DisableScheduling)
2284    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2285}
2286
2287void SelectionDAGBuilder::visitSIToFP(User &I){
2288  // SIToFP is never a no-op cast, no need to check
2289  SDValue N = getValue(I.getOperand(0));
2290  EVT DestVT = TLI.getValueType(I.getType());
2291  SDValue Res = DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N);
2292  setValue(&I, Res);
2293
2294  if (DisableScheduling)
2295    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2296}
2297
2298void SelectionDAGBuilder::visitPtrToInt(User &I) {
2299  // What to do depends on the size of the integer and the size of the pointer.
2300  // We can either truncate, zero extend, or no-op, accordingly.
2301  SDValue N = getValue(I.getOperand(0));
2302  EVT SrcVT = N.getValueType();
2303  EVT DestVT = TLI.getValueType(I.getType());
2304  SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2305  setValue(&I, Res);
2306
2307  if (DisableScheduling)
2308    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2309}
2310
2311void SelectionDAGBuilder::visitIntToPtr(User &I) {
2312  // What to do depends on the size of the integer and the size of the pointer.
2313  // We can either truncate, zero extend, or no-op, accordingly.
2314  SDValue N = getValue(I.getOperand(0));
2315  EVT SrcVT = N.getValueType();
2316  EVT DestVT = TLI.getValueType(I.getType());
2317  SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2318  setValue(&I, Res);
2319
2320  if (DisableScheduling)
2321    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2322}
2323
2324void SelectionDAGBuilder::visitBitCast(User &I) {
2325  SDValue N = getValue(I.getOperand(0));
2326  EVT DestVT = TLI.getValueType(I.getType());
2327
2328  // BitCast assures us that source and destination are the same size so this is
2329  // either a BIT_CONVERT or a no-op.
2330  if (DestVT != N.getValueType()) {
2331    SDValue Res = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2332                              DestVT, N); // convert types.
2333    setValue(&I, Res);
2334
2335    if (DisableScheduling)
2336      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2337  } else {
2338    setValue(&I, N);            // noop cast.
2339  }
2340}
2341
2342void SelectionDAGBuilder::visitInsertElement(User &I) {
2343  SDValue InVec = getValue(I.getOperand(0));
2344  SDValue InVal = getValue(I.getOperand(1));
2345  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2346                                TLI.getPointerTy(),
2347                                getValue(I.getOperand(2)));
2348  SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2349                            TLI.getValueType(I.getType()),
2350                            InVec, InVal, InIdx);
2351  setValue(&I, Res);
2352
2353  if (DisableScheduling)
2354    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2355}
2356
2357void SelectionDAGBuilder::visitExtractElement(User &I) {
2358  SDValue InVec = getValue(I.getOperand(0));
2359  SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2360                                TLI.getPointerTy(),
2361                                getValue(I.getOperand(1)));
2362  SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2363                            TLI.getValueType(I.getType()), InVec, InIdx);
2364  setValue(&I, Res);
2365
2366  if (DisableScheduling)
2367    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2368}
2369
2370
2371// Utility for visitShuffleVector - Returns true if the mask is mask starting
2372// from SIndx and increasing to the element length (undefs are allowed).
2373static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2374  unsigned MaskNumElts = Mask.size();
2375  for (unsigned i = 0; i != MaskNumElts; ++i)
2376    if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2377      return false;
2378  return true;
2379}
2380
2381void SelectionDAGBuilder::visitShuffleVector(User &I) {
2382  SmallVector<int, 8> Mask;
2383  SDValue Src1 = getValue(I.getOperand(0));
2384  SDValue Src2 = getValue(I.getOperand(1));
2385
2386  // Convert the ConstantVector mask operand into an array of ints, with -1
2387  // representing undef values.
2388  SmallVector<Constant*, 8> MaskElts;
2389  cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
2390                                                     MaskElts);
2391  unsigned MaskNumElts = MaskElts.size();
2392  for (unsigned i = 0; i != MaskNumElts; ++i) {
2393    if (isa<UndefValue>(MaskElts[i]))
2394      Mask.push_back(-1);
2395    else
2396      Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2397  }
2398
2399  EVT VT = TLI.getValueType(I.getType());
2400  EVT SrcVT = Src1.getValueType();
2401  unsigned SrcNumElts = SrcVT.getVectorNumElements();
2402
2403  if (SrcNumElts == MaskNumElts) {
2404    SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2405                                       &Mask[0]);
2406    setValue(&I, Res);
2407
2408    if (DisableScheduling)
2409      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2410
2411    return;
2412  }
2413
2414  // Normalize the shuffle vector since mask and vector length don't match.
2415  if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2416    // Mask is longer than the source vectors and is a multiple of the source
2417    // vectors.  We can use concatenate vector to make the mask and vectors
2418    // lengths match.
2419    if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2420      // The shuffle is concatenating two vectors together.
2421      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2422                                VT, Src1, Src2);
2423      setValue(&I, Res);
2424
2425      if (DisableScheduling)
2426        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2427
2428      return;
2429    }
2430
2431    // Pad both vectors with undefs to make them the same length as the mask.
2432    unsigned NumConcat = MaskNumElts / SrcNumElts;
2433    bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2434    bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2435    SDValue UndefVal = DAG.getUNDEF(SrcVT);
2436
2437    SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2438    SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2439    MOps1[0] = Src1;
2440    MOps2[0] = Src2;
2441
2442    Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2443                                                  getCurDebugLoc(), VT,
2444                                                  &MOps1[0], NumConcat);
2445    Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2446                                                  getCurDebugLoc(), VT,
2447                                                  &MOps2[0], NumConcat);
2448
2449    // Readjust mask for new input vector length.
2450    SmallVector<int, 8> MappedOps;
2451    for (unsigned i = 0; i != MaskNumElts; ++i) {
2452      int Idx = Mask[i];
2453      if (Idx < (int)SrcNumElts)
2454        MappedOps.push_back(Idx);
2455      else
2456        MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2457    }
2458
2459    SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2460                                       &MappedOps[0]);
2461    setValue(&I, Res);
2462
2463    if (DisableScheduling)
2464      DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2465
2466    return;
2467  }
2468
2469  if (SrcNumElts > MaskNumElts) {
2470    // Analyze the access pattern of the vector to see if we can extract
2471    // two subvectors and do the shuffle. The analysis is done by calculating
2472    // the range of elements the mask access on both vectors.
2473    int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2474    int MaxRange[2] = {-1, -1};
2475
2476    for (unsigned i = 0; i != MaskNumElts; ++i) {
2477      int Idx = Mask[i];
2478      int Input = 0;
2479      if (Idx < 0)
2480        continue;
2481
2482      if (Idx >= (int)SrcNumElts) {
2483        Input = 1;
2484        Idx -= SrcNumElts;
2485      }
2486      if (Idx > MaxRange[Input])
2487        MaxRange[Input] = Idx;
2488      if (Idx < MinRange[Input])
2489        MinRange[Input] = Idx;
2490    }
2491
2492    // Check if the access is smaller than the vector size and can we find
2493    // a reasonable extract index.
2494    int RangeUse[2] = { 2, 2 };  // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2495    int StartIdx[2];  // StartIdx to extract from
2496    for (int Input=0; Input < 2; ++Input) {
2497      if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2498        RangeUse[Input] = 0; // Unused
2499        StartIdx[Input] = 0;
2500      } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2501        // Fits within range but we should see if we can find a good
2502        // start index that is a multiple of the mask length.
2503        if (MaxRange[Input] < (int)MaskNumElts) {
2504          RangeUse[Input] = 1; // Extract from beginning of the vector
2505          StartIdx[Input] = 0;
2506        } else {
2507          StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2508          if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2509              StartIdx[Input] + MaskNumElts < SrcNumElts)
2510            RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2511        }
2512      }
2513    }
2514
2515    if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2516      SDValue Res = DAG.getUNDEF(VT);
2517      setValue(&I, Res);  // Vectors are not used.
2518
2519      if (DisableScheduling)
2520        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2521
2522      return;
2523    }
2524    else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2525      // Extract appropriate subvector and generate a vector shuffle
2526      for (int Input=0; Input < 2; ++Input) {
2527        SDValue& Src = Input == 0 ? Src1 : Src2;
2528        if (RangeUse[Input] == 0)
2529          Src = DAG.getUNDEF(VT);
2530        else
2531          Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2532                            Src, DAG.getIntPtrConstant(StartIdx[Input]));
2533
2534        if (DisableScheduling)
2535          DAG.AssignOrdering(Src.getNode(), SDNodeOrder);
2536      }
2537
2538      // Calculate new mask.
2539      SmallVector<int, 8> MappedOps;
2540      for (unsigned i = 0; i != MaskNumElts; ++i) {
2541        int Idx = Mask[i];
2542        if (Idx < 0)
2543          MappedOps.push_back(Idx);
2544        else if (Idx < (int)SrcNumElts)
2545          MappedOps.push_back(Idx - StartIdx[0]);
2546        else
2547          MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2548      }
2549
2550      SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2551                                         &MappedOps[0]);
2552      setValue(&I, Res);
2553
2554      if (DisableScheduling)
2555        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2556
2557      return;
2558    }
2559  }
2560
2561  // We can't use either concat vectors or extract subvectors so fall back to
2562  // replacing the shuffle with extract and build vector.
2563  // to insert and build vector.
2564  EVT EltVT = VT.getVectorElementType();
2565  EVT PtrVT = TLI.getPointerTy();
2566  SmallVector<SDValue,8> Ops;
2567  for (unsigned i = 0; i != MaskNumElts; ++i) {
2568    if (Mask[i] < 0) {
2569      Ops.push_back(DAG.getUNDEF(EltVT));
2570    } else {
2571      int Idx = Mask[i];
2572      SDValue Res;
2573
2574      if (Idx < (int)SrcNumElts)
2575        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2576                          EltVT, Src1, DAG.getConstant(Idx, PtrVT));
2577      else
2578        Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2579                          EltVT, Src2,
2580                          DAG.getConstant(Idx - SrcNumElts, PtrVT));
2581
2582      Ops.push_back(Res);
2583
2584      if (DisableScheduling)
2585        DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2586    }
2587  }
2588
2589  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2590                            VT, &Ops[0], Ops.size());
2591  setValue(&I, Res);
2592
2593  if (DisableScheduling)
2594    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2595}
2596
2597void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
2598  const Value *Op0 = I.getOperand(0);
2599  const Value *Op1 = I.getOperand(1);
2600  const Type *AggTy = I.getType();
2601  const Type *ValTy = Op1->getType();
2602  bool IntoUndef = isa<UndefValue>(Op0);
2603  bool FromUndef = isa<UndefValue>(Op1);
2604
2605  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2606                                            I.idx_begin(), I.idx_end());
2607
2608  SmallVector<EVT, 4> AggValueVTs;
2609  ComputeValueVTs(TLI, AggTy, AggValueVTs);
2610  SmallVector<EVT, 4> ValValueVTs;
2611  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2612
2613  unsigned NumAggValues = AggValueVTs.size();
2614  unsigned NumValValues = ValValueVTs.size();
2615  SmallVector<SDValue, 4> Values(NumAggValues);
2616
2617  SDValue Agg = getValue(Op0);
2618  SDValue Val = getValue(Op1);
2619  unsigned i = 0;
2620  // Copy the beginning value(s) from the original aggregate.
2621  for (; i != LinearIndex; ++i)
2622    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2623                SDValue(Agg.getNode(), Agg.getResNo() + i);
2624  // Copy values from the inserted value(s).
2625  for (; i != LinearIndex + NumValValues; ++i)
2626    Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2627                SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2628  // Copy remaining value(s) from the original aggregate.
2629  for (; i != NumAggValues; ++i)
2630    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2631                SDValue(Agg.getNode(), Agg.getResNo() + i);
2632
2633  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2634                            DAG.getVTList(&AggValueVTs[0], NumAggValues),
2635                            &Values[0], NumAggValues);
2636  setValue(&I, Res);
2637
2638  if (DisableScheduling)
2639    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2640}
2641
2642void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
2643  const Value *Op0 = I.getOperand(0);
2644  const Type *AggTy = Op0->getType();
2645  const Type *ValTy = I.getType();
2646  bool OutOfUndef = isa<UndefValue>(Op0);
2647
2648  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2649                                            I.idx_begin(), I.idx_end());
2650
2651  SmallVector<EVT, 4> ValValueVTs;
2652  ComputeValueVTs(TLI, ValTy, ValValueVTs);
2653
2654  unsigned NumValValues = ValValueVTs.size();
2655  SmallVector<SDValue, 4> Values(NumValValues);
2656
2657  SDValue Agg = getValue(Op0);
2658  // Copy out the selected value(s).
2659  for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2660    Values[i - LinearIndex] =
2661      OutOfUndef ?
2662        DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2663        SDValue(Agg.getNode(), Agg.getResNo() + i);
2664
2665  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2666                            DAG.getVTList(&ValValueVTs[0], NumValValues),
2667                            &Values[0], NumValValues);
2668  setValue(&I, Res);
2669
2670  if (DisableScheduling)
2671    DAG.AssignOrdering(Res.getNode(), SDNodeOrder);
2672}
2673
2674
2675void SelectionDAGBuilder::visitGetElementPtr(User &I) {
2676  SDValue N = getValue(I.getOperand(0));
2677  const Type *Ty = I.getOperand(0)->getType();
2678
2679  for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2680       OI != E; ++OI) {
2681    Value *Idx = *OI;
2682    if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2683      unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2684      if (Field) {
2685        // N = N + Offset
2686        uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2687        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2688                        DAG.getIntPtrConstant(Offset));
2689      }
2690      Ty = StTy->getElementType(Field);
2691    } else {
2692      Ty = cast<SequentialType>(Ty)->getElementType();
2693
2694      // If this is a constant subscript, handle it quickly.
2695      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2696        if (CI->getZExtValue() == 0) continue;
2697        uint64_t Offs =
2698            TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2699        SDValue OffsVal;
2700        EVT PTy = TLI.getPointerTy();
2701        unsigned PtrBits = PTy.getSizeInBits();
2702        if (PtrBits < 64) {
2703          OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2704                                TLI.getPointerTy(),
2705                                DAG.getConstant(Offs, MVT::i64));
2706        } else
2707          OffsVal = DAG.getIntPtrConstant(Offs);
2708        N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2709                        OffsVal);
2710        continue;
2711      }
2712
2713      // N = N + Idx * ElementSize;
2714      APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
2715                                TD->getTypeAllocSize(Ty));
2716      SDValue IdxN = getValue(Idx);
2717
2718      // If the index is smaller or larger than intptr_t, truncate or extend
2719      // it.
2720      IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
2721
2722      // If this is a multiply by a power of two, turn it into a shl
2723      // immediately.  This is a very common case.
2724      if (ElementSize != 1) {
2725        if (ElementSize.isPowerOf2()) {
2726          unsigned Amt = ElementSize.logBase2();
2727          IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2728                             N.getValueType(), IdxN,
2729                             DAG.getConstant(Amt, TLI.getPointerTy()));
2730        } else {
2731          SDValue Scale = DAG.getConstant(ElementSize, TLI.getPointerTy());
2732          IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2733                             N.getValueType(), IdxN, Scale);
2734        }
2735      }
2736
2737      N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2738                      N.getValueType(), N, IdxN);
2739    }
2740  }
2741  setValue(&I, N);
2742}
2743
2744void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
2745  // If this is a fixed sized alloca in the entry block of the function,
2746  // allocate it statically on the stack.
2747  if (FuncInfo.StaticAllocaMap.count(&I))
2748    return;   // getValue will auto-populate this.
2749
2750  const Type *Ty = I.getAllocatedType();
2751  uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2752  unsigned Align =
2753    std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2754             I.getAlignment());
2755
2756  SDValue AllocSize = getValue(I.getArraySize());
2757
2758  AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2759                          AllocSize,
2760                          DAG.getConstant(TySize, AllocSize.getValueType()));
2761
2762
2763
2764  EVT IntPtr = TLI.getPointerTy();
2765  AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
2766
2767  // Handle alignment.  If the requested alignment is less than or equal to
2768  // the stack alignment, ignore it.  If the size is greater than or equal to
2769  // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2770  unsigned StackAlign =
2771    TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2772  if (Align <= StackAlign)
2773    Align = 0;
2774
2775  // Round the size of the allocation up to the stack alignment size
2776  // by add SA-1 to the size.
2777  AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2778                          AllocSize.getValueType(), AllocSize,
2779                          DAG.getIntPtrConstant(StackAlign-1));
2780  // Mask out the low bits for alignment purposes.
2781  AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2782                          AllocSize.getValueType(), AllocSize,
2783                          DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2784
2785  SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2786  SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2787  SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2788                            VTs, Ops, 3);
2789  setValue(&I, DSA);
2790  DAG.setRoot(DSA.getValue(1));
2791
2792  // Inform the Frame Information that we have just allocated a variable-sized
2793  // object.
2794  FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2795}
2796
2797void SelectionDAGBuilder::visitLoad(LoadInst &I) {
2798  const Value *SV = I.getOperand(0);
2799  SDValue Ptr = getValue(SV);
2800
2801  const Type *Ty = I.getType();
2802  bool isVolatile = I.isVolatile();
2803  unsigned Alignment = I.getAlignment();
2804
2805  SmallVector<EVT, 4> ValueVTs;
2806  SmallVector<uint64_t, 4> Offsets;
2807  ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2808  unsigned NumValues = ValueVTs.size();
2809  if (NumValues == 0)
2810    return;
2811
2812  SDValue Root;
2813  bool ConstantMemory = false;
2814  if (I.isVolatile())
2815    // Serialize volatile loads with other side effects.
2816    Root = getRoot();
2817  else if (AA->pointsToConstantMemory(SV)) {
2818    // Do not serialize (non-volatile) loads of constant memory with anything.
2819    Root = DAG.getEntryNode();
2820    ConstantMemory = true;
2821  } else {
2822    // Do not serialize non-volatile loads against each other.
2823    Root = DAG.getRoot();
2824  }
2825
2826  SmallVector<SDValue, 4> Values(NumValues);
2827  SmallVector<SDValue, 4> Chains(NumValues);
2828  EVT PtrVT = Ptr.getValueType();
2829  for (unsigned i = 0; i != NumValues; ++i) {
2830    SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2831                            DAG.getNode(ISD::ADD, getCurDebugLoc(),
2832                                        PtrVT, Ptr,
2833                                        DAG.getConstant(Offsets[i], PtrVT)),
2834                            SV, Offsets[i], isVolatile, Alignment);
2835    Values[i] = L;
2836    Chains[i] = L.getValue(1);
2837  }
2838
2839  if (!ConstantMemory) {
2840    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2841                                  MVT::Other,
2842                                  &Chains[0], NumValues);
2843    if (isVolatile)
2844      DAG.setRoot(Chain);
2845    else
2846      PendingLoads.push_back(Chain);
2847  }
2848
2849  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2850                           DAG.getVTList(&ValueVTs[0], NumValues),
2851                           &Values[0], NumValues));
2852}
2853
2854
2855void SelectionDAGBuilder::visitStore(StoreInst &I) {
2856  Value *SrcV = I.getOperand(0);
2857  Value *PtrV = I.getOperand(1);
2858
2859  SmallVector<EVT, 4> ValueVTs;
2860  SmallVector<uint64_t, 4> Offsets;
2861  ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2862  unsigned NumValues = ValueVTs.size();
2863  if (NumValues == 0)
2864    return;
2865
2866  // Get the lowered operands. Note that we do this after
2867  // checking if NumResults is zero, because with zero results
2868  // the operands won't have values in the map.
2869  SDValue Src = getValue(SrcV);
2870  SDValue Ptr = getValue(PtrV);
2871
2872  SDValue Root = getRoot();
2873  SmallVector<SDValue, 4> Chains(NumValues);
2874  EVT PtrVT = Ptr.getValueType();
2875  bool isVolatile = I.isVolatile();
2876  unsigned Alignment = I.getAlignment();
2877  for (unsigned i = 0; i != NumValues; ++i)
2878    Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2879                             SDValue(Src.getNode(), Src.getResNo() + i),
2880                             DAG.getNode(ISD::ADD, getCurDebugLoc(),
2881                                         PtrVT, Ptr,
2882                                         DAG.getConstant(Offsets[i], PtrVT)),
2883                             PtrV, Offsets[i], isVolatile, Alignment);
2884
2885  DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2886                          MVT::Other, &Chains[0], NumValues));
2887}
2888
2889/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2890/// node.
2891void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
2892                                               unsigned Intrinsic) {
2893  bool HasChain = !I.doesNotAccessMemory();
2894  bool OnlyLoad = HasChain && I.onlyReadsMemory();
2895
2896  // Build the operand list.
2897  SmallVector<SDValue, 8> Ops;
2898  if (HasChain) {  // If this intrinsic has side-effects, chainify it.
2899    if (OnlyLoad) {
2900      // We don't need to serialize loads against other loads.
2901      Ops.push_back(DAG.getRoot());
2902    } else {
2903      Ops.push_back(getRoot());
2904    }
2905  }
2906
2907  // Info is set by getTgtMemInstrinsic
2908  TargetLowering::IntrinsicInfo Info;
2909  bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2910
2911  // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2912  if (!IsTgtIntrinsic)
2913    Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2914
2915  // Add all operands of the call to the operand list.
2916  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2917    SDValue Op = getValue(I.getOperand(i));
2918    assert(TLI.isTypeLegal(Op.getValueType()) &&
2919           "Intrinsic uses a non-legal type?");
2920    Ops.push_back(Op);
2921  }
2922
2923  SmallVector<EVT, 4> ValueVTs;
2924  ComputeValueVTs(TLI, I.getType(), ValueVTs);
2925#ifndef NDEBUG
2926  for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
2927    assert(TLI.isTypeLegal(ValueVTs[Val]) &&
2928           "Intrinsic uses a non-legal type?");
2929  }
2930#endif // NDEBUG
2931  if (HasChain)
2932    ValueVTs.push_back(MVT::Other);
2933
2934  SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
2935
2936  // Create the node.
2937  SDValue Result;
2938  if (IsTgtIntrinsic) {
2939    // This is target intrinsic that touches memory
2940    Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2941                                     VTs, &Ops[0], Ops.size(),
2942                                     Info.memVT, Info.ptrVal, Info.offset,
2943                                     Info.align, Info.vol,
2944                                     Info.readMem, Info.writeMem);
2945  }
2946  else if (!HasChain)
2947    Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2948                         VTs, &Ops[0], Ops.size());
2949  else if (I.getType() != Type::getVoidTy(*DAG.getContext()))
2950    Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2951                         VTs, &Ops[0], Ops.size());
2952  else
2953    Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2954                         VTs, &Ops[0], Ops.size());
2955
2956  if (HasChain) {
2957    SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2958    if (OnlyLoad)
2959      PendingLoads.push_back(Chain);
2960    else
2961      DAG.setRoot(Chain);
2962  }
2963  if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
2964    if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2965      EVT VT = TLI.getValueType(PTy);
2966      Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2967    }
2968    setValue(&I, Result);
2969  }
2970}
2971
2972/// GetSignificand - Get the significand and build it into a floating-point
2973/// number with exponent of 1:
2974///
2975///   Op = (Op & 0x007fffff) | 0x3f800000;
2976///
2977/// where Op is the hexidecimal representation of floating point value.
2978static SDValue
2979GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
2980  SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
2981                           DAG.getConstant(0x007fffff, MVT::i32));
2982  SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
2983                           DAG.getConstant(0x3f800000, MVT::i32));
2984  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
2985}
2986
2987/// GetExponent - Get the exponent:
2988///
2989///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
2990///
2991/// where Op is the hexidecimal representation of floating point value.
2992static SDValue
2993GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
2994            DebugLoc dl) {
2995  SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
2996                           DAG.getConstant(0x7f800000, MVT::i32));
2997  SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
2998                           DAG.getConstant(23, TLI.getPointerTy()));
2999  SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3000                           DAG.getConstant(127, MVT::i32));
3001  return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3002}
3003
3004/// getF32Constant - Get 32-bit floating point constant.
3005static SDValue
3006getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3007  return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3008}
3009
3010/// Inlined utility function to implement binary input atomic intrinsics for
3011/// visitIntrinsicCall: I is a call instruction
3012///                     Op is the associated NodeType for I
3013const char *
3014SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3015  SDValue Root = getRoot();
3016  SDValue L =
3017    DAG.getAtomic(Op, getCurDebugLoc(),
3018                  getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3019                  Root,
3020                  getValue(I.getOperand(1)),
3021                  getValue(I.getOperand(2)),
3022                  I.getOperand(1));
3023  setValue(&I, L);
3024  DAG.setRoot(L.getValue(1));
3025  return 0;
3026}
3027
3028// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3029const char *
3030SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3031  SDValue Op1 = getValue(I.getOperand(1));
3032  SDValue Op2 = getValue(I.getOperand(2));
3033
3034  SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3035  SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3036
3037  setValue(&I, Result);
3038  return 0;
3039}
3040
3041/// visitExp - Lower an exp intrinsic. Handles the special sequences for
3042/// limited-precision mode.
3043void
3044SelectionDAGBuilder::visitExp(CallInst &I) {
3045  SDValue result;
3046  DebugLoc dl = getCurDebugLoc();
3047
3048  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3049      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3050    SDValue Op = getValue(I.getOperand(1));
3051
3052    // Put the exponent in the right bit position for later addition to the
3053    // final result:
3054    //
3055    //   #define LOG2OFe 1.4426950f
3056    //   IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3057    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3058                             getF32Constant(DAG, 0x3fb8aa3b));
3059    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3060
3061    //   FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3062    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3063    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3064
3065    //   IntegerPartOfX <<= 23;
3066    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3067                                 DAG.getConstant(23, TLI.getPointerTy()));
3068
3069    if (LimitFloatPrecision <= 6) {
3070      // For floating-point precision of 6:
3071      //
3072      //   TwoToFractionalPartOfX =
3073      //     0.997535578f +
3074      //       (0.735607626f + 0.252464424f * x) * x;
3075      //
3076      // error 0.0144103317, which is 6 bits
3077      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3078                               getF32Constant(DAG, 0x3e814304));
3079      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3080                               getF32Constant(DAG, 0x3f3c50c8));
3081      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3082      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3083                               getF32Constant(DAG, 0x3f7f5e7e));
3084      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3085
3086      // Add the exponent into the result in integer domain.
3087      SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3088                               TwoToFracPartOfX, IntegerPartOfX);
3089
3090      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3091    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3092      // For floating-point precision of 12:
3093      //
3094      //   TwoToFractionalPartOfX =
3095      //     0.999892986f +
3096      //       (0.696457318f +
3097      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3098      //
3099      // 0.000107046256 error, which is 13 to 14 bits
3100      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3101                               getF32Constant(DAG, 0x3da235e3));
3102      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3103                               getF32Constant(DAG, 0x3e65b8f3));
3104      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3105      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3106                               getF32Constant(DAG, 0x3f324b07));
3107      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3108      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3109                               getF32Constant(DAG, 0x3f7ff8fd));
3110      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3111
3112      // Add the exponent into the result in integer domain.
3113      SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3114                               TwoToFracPartOfX, IntegerPartOfX);
3115
3116      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3117    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3118      // For floating-point precision of 18:
3119      //
3120      //   TwoToFractionalPartOfX =
3121      //     0.999999982f +
3122      //       (0.693148872f +
3123      //         (0.240227044f +
3124      //           (0.554906021e-1f +
3125      //             (0.961591928e-2f +
3126      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3127      //
3128      // error 2.47208000*10^(-7), which is better than 18 bits
3129      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3130                               getF32Constant(DAG, 0x3924b03e));
3131      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3132                               getF32Constant(DAG, 0x3ab24b87));
3133      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3134      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3135                               getF32Constant(DAG, 0x3c1d8c17));
3136      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3137      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3138                               getF32Constant(DAG, 0x3d634a1d));
3139      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3140      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3141                               getF32Constant(DAG, 0x3e75fe14));
3142      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3143      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3144                                getF32Constant(DAG, 0x3f317234));
3145      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3146      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3147                                getF32Constant(DAG, 0x3f800000));
3148      SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3149                                             MVT::i32, t13);
3150
3151      // Add the exponent into the result in integer domain.
3152      SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3153                                TwoToFracPartOfX, IntegerPartOfX);
3154
3155      result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3156    }
3157  } else {
3158    // No special expansion.
3159    result = DAG.getNode(ISD::FEXP, dl,
3160                         getValue(I.getOperand(1)).getValueType(),
3161                         getValue(I.getOperand(1)));
3162  }
3163
3164  setValue(&I, result);
3165}
3166
3167/// visitLog - Lower a log intrinsic. Handles the special sequences for
3168/// limited-precision mode.
3169void
3170SelectionDAGBuilder::visitLog(CallInst &I) {
3171  SDValue result;
3172  DebugLoc dl = getCurDebugLoc();
3173
3174  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3175      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3176    SDValue Op = getValue(I.getOperand(1));
3177    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3178
3179    // Scale the exponent by log(2) [0.69314718f].
3180    SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3181    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3182                                        getF32Constant(DAG, 0x3f317218));
3183
3184    // Get the significand and build it into a floating-point number with
3185    // exponent of 1.
3186    SDValue X = GetSignificand(DAG, Op1, dl);
3187
3188    if (LimitFloatPrecision <= 6) {
3189      // For floating-point precision of 6:
3190      //
3191      //   LogofMantissa =
3192      //     -1.1609546f +
3193      //       (1.4034025f - 0.23903021f * x) * x;
3194      //
3195      // error 0.0034276066, which is better than 8 bits
3196      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3197                               getF32Constant(DAG, 0xbe74c456));
3198      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3199                               getF32Constant(DAG, 0x3fb3a2b1));
3200      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3201      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3202                                          getF32Constant(DAG, 0x3f949a29));
3203
3204      result = DAG.getNode(ISD::FADD, dl,
3205                           MVT::f32, LogOfExponent, LogOfMantissa);
3206    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3207      // For floating-point precision of 12:
3208      //
3209      //   LogOfMantissa =
3210      //     -1.7417939f +
3211      //       (2.8212026f +
3212      //         (-1.4699568f +
3213      //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3214      //
3215      // error 0.000061011436, which is 14 bits
3216      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3217                               getF32Constant(DAG, 0xbd67b6d6));
3218      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3219                               getF32Constant(DAG, 0x3ee4f4b8));
3220      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3221      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3222                               getF32Constant(DAG, 0x3fbc278b));
3223      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3224      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3225                               getF32Constant(DAG, 0x40348e95));
3226      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3227      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3228                                          getF32Constant(DAG, 0x3fdef31a));
3229
3230      result = DAG.getNode(ISD::FADD, dl,
3231                           MVT::f32, LogOfExponent, LogOfMantissa);
3232    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3233      // For floating-point precision of 18:
3234      //
3235      //   LogOfMantissa =
3236      //     -2.1072184f +
3237      //       (4.2372794f +
3238      //         (-3.7029485f +
3239      //           (2.2781945f +
3240      //             (-0.87823314f +
3241      //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3242      //
3243      // error 0.0000023660568, which is better than 18 bits
3244      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3245                               getF32Constant(DAG, 0xbc91e5ac));
3246      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3247                               getF32Constant(DAG, 0x3e4350aa));
3248      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3249      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3250                               getF32Constant(DAG, 0x3f60d3e3));
3251      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3252      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3253                               getF32Constant(DAG, 0x4011cdf0));
3254      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3255      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3256                               getF32Constant(DAG, 0x406cfd1c));
3257      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3258      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3259                               getF32Constant(DAG, 0x408797cb));
3260      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3261      SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3262                                          getF32Constant(DAG, 0x4006dcab));
3263
3264      result = DAG.getNode(ISD::FADD, dl,
3265                           MVT::f32, LogOfExponent, LogOfMantissa);
3266    }
3267  } else {
3268    // No special expansion.
3269    result = DAG.getNode(ISD::FLOG, dl,
3270                         getValue(I.getOperand(1)).getValueType(),
3271                         getValue(I.getOperand(1)));
3272  }
3273
3274  setValue(&I, result);
3275}
3276
3277/// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3278/// limited-precision mode.
3279void
3280SelectionDAGBuilder::visitLog2(CallInst &I) {
3281  SDValue result;
3282  DebugLoc dl = getCurDebugLoc();
3283
3284  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3285      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3286    SDValue Op = getValue(I.getOperand(1));
3287    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3288
3289    // Get the exponent.
3290    SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3291
3292    // Get the significand and build it into a floating-point number with
3293    // exponent of 1.
3294    SDValue X = GetSignificand(DAG, Op1, dl);
3295
3296    // Different possible minimax approximations of significand in
3297    // floating-point for various degrees of accuracy over [1,2].
3298    if (LimitFloatPrecision <= 6) {
3299      // For floating-point precision of 6:
3300      //
3301      //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3302      //
3303      // error 0.0049451742, which is more than 7 bits
3304      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3305                               getF32Constant(DAG, 0xbeb08fe0));
3306      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3307                               getF32Constant(DAG, 0x40019463));
3308      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3309      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3310                                           getF32Constant(DAG, 0x3fd6633d));
3311
3312      result = DAG.getNode(ISD::FADD, dl,
3313                           MVT::f32, LogOfExponent, Log2ofMantissa);
3314    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3315      // For floating-point precision of 12:
3316      //
3317      //   Log2ofMantissa =
3318      //     -2.51285454f +
3319      //       (4.07009056f +
3320      //         (-2.12067489f +
3321      //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3322      //
3323      // error 0.0000876136000, which is better than 13 bits
3324      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3325                               getF32Constant(DAG, 0xbda7262e));
3326      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3327                               getF32Constant(DAG, 0x3f25280b));
3328      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3329      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3330                               getF32Constant(DAG, 0x4007b923));
3331      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3332      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3333                               getF32Constant(DAG, 0x40823e2f));
3334      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3335      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3336                                           getF32Constant(DAG, 0x4020d29c));
3337
3338      result = DAG.getNode(ISD::FADD, dl,
3339                           MVT::f32, LogOfExponent, Log2ofMantissa);
3340    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3341      // For floating-point precision of 18:
3342      //
3343      //   Log2ofMantissa =
3344      //     -3.0400495f +
3345      //       (6.1129976f +
3346      //         (-5.3420409f +
3347      //           (3.2865683f +
3348      //             (-1.2669343f +
3349      //               (0.27515199f -
3350      //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3351      //
3352      // error 0.0000018516, which is better than 18 bits
3353      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3354                               getF32Constant(DAG, 0xbcd2769e));
3355      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3356                               getF32Constant(DAG, 0x3e8ce0b9));
3357      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3358      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3359                               getF32Constant(DAG, 0x3fa22ae7));
3360      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3361      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3362                               getF32Constant(DAG, 0x40525723));
3363      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3364      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3365                               getF32Constant(DAG, 0x40aaf200));
3366      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3367      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3368                               getF32Constant(DAG, 0x40c39dad));
3369      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3370      SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3371                                           getF32Constant(DAG, 0x4042902c));
3372
3373      result = DAG.getNode(ISD::FADD, dl,
3374                           MVT::f32, LogOfExponent, Log2ofMantissa);
3375    }
3376  } else {
3377    // No special expansion.
3378    result = DAG.getNode(ISD::FLOG2, dl,
3379                         getValue(I.getOperand(1)).getValueType(),
3380                         getValue(I.getOperand(1)));
3381  }
3382
3383  setValue(&I, result);
3384}
3385
3386/// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3387/// limited-precision mode.
3388void
3389SelectionDAGBuilder::visitLog10(CallInst &I) {
3390  SDValue result;
3391  DebugLoc dl = getCurDebugLoc();
3392
3393  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3394      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3395    SDValue Op = getValue(I.getOperand(1));
3396    SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3397
3398    // Scale the exponent by log10(2) [0.30102999f].
3399    SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3400    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3401                                        getF32Constant(DAG, 0x3e9a209a));
3402
3403    // Get the significand and build it into a floating-point number with
3404    // exponent of 1.
3405    SDValue X = GetSignificand(DAG, Op1, dl);
3406
3407    if (LimitFloatPrecision <= 6) {
3408      // For floating-point precision of 6:
3409      //
3410      //   Log10ofMantissa =
3411      //     -0.50419619f +
3412      //       (0.60948995f - 0.10380950f * x) * x;
3413      //
3414      // error 0.0014886165, which is 6 bits
3415      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3416                               getF32Constant(DAG, 0xbdd49a13));
3417      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3418                               getF32Constant(DAG, 0x3f1c0789));
3419      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3420      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3421                                            getF32Constant(DAG, 0x3f011300));
3422
3423      result = DAG.getNode(ISD::FADD, dl,
3424                           MVT::f32, LogOfExponent, Log10ofMantissa);
3425    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3426      // For floating-point precision of 12:
3427      //
3428      //   Log10ofMantissa =
3429      //     -0.64831180f +
3430      //       (0.91751397f +
3431      //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3432      //
3433      // error 0.00019228036, which is better than 12 bits
3434      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3435                               getF32Constant(DAG, 0x3d431f31));
3436      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3437                               getF32Constant(DAG, 0x3ea21fb2));
3438      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3439      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3440                               getF32Constant(DAG, 0x3f6ae232));
3441      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3442      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3443                                            getF32Constant(DAG, 0x3f25f7c3));
3444
3445      result = DAG.getNode(ISD::FADD, dl,
3446                           MVT::f32, LogOfExponent, Log10ofMantissa);
3447    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3448      // For floating-point precision of 18:
3449      //
3450      //   Log10ofMantissa =
3451      //     -0.84299375f +
3452      //       (1.5327582f +
3453      //         (-1.0688956f +
3454      //           (0.49102474f +
3455      //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3456      //
3457      // error 0.0000037995730, which is better than 18 bits
3458      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3459                               getF32Constant(DAG, 0x3c5d51ce));
3460      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3461                               getF32Constant(DAG, 0x3e00685a));
3462      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3463      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3464                               getF32Constant(DAG, 0x3efb6798));
3465      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3466      SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3467                               getF32Constant(DAG, 0x3f88d192));
3468      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3469      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3470                               getF32Constant(DAG, 0x3fc4316c));
3471      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3472      SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3473                                            getF32Constant(DAG, 0x3f57ce70));
3474
3475      result = DAG.getNode(ISD::FADD, dl,
3476                           MVT::f32, LogOfExponent, Log10ofMantissa);
3477    }
3478  } else {
3479    // No special expansion.
3480    result = DAG.getNode(ISD::FLOG10, dl,
3481                         getValue(I.getOperand(1)).getValueType(),
3482                         getValue(I.getOperand(1)));
3483  }
3484
3485  setValue(&I, result);
3486}
3487
3488/// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3489/// limited-precision mode.
3490void
3491SelectionDAGBuilder::visitExp2(CallInst &I) {
3492  SDValue result;
3493  DebugLoc dl = getCurDebugLoc();
3494
3495  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3496      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3497    SDValue Op = getValue(I.getOperand(1));
3498
3499    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3500
3501    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3502    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3503    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3504
3505    //   IntegerPartOfX <<= 23;
3506    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3507                                 DAG.getConstant(23, TLI.getPointerTy()));
3508
3509    if (LimitFloatPrecision <= 6) {
3510      // For floating-point precision of 6:
3511      //
3512      //   TwoToFractionalPartOfX =
3513      //     0.997535578f +
3514      //       (0.735607626f + 0.252464424f * x) * x;
3515      //
3516      // error 0.0144103317, which is 6 bits
3517      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3518                               getF32Constant(DAG, 0x3e814304));
3519      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3520                               getF32Constant(DAG, 0x3f3c50c8));
3521      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3522      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3523                               getF32Constant(DAG, 0x3f7f5e7e));
3524      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3525      SDValue TwoToFractionalPartOfX =
3526        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3527
3528      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3529                           MVT::f32, TwoToFractionalPartOfX);
3530    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3531      // For floating-point precision of 12:
3532      //
3533      //   TwoToFractionalPartOfX =
3534      //     0.999892986f +
3535      //       (0.696457318f +
3536      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3537      //
3538      // error 0.000107046256, which is 13 to 14 bits
3539      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3540                               getF32Constant(DAG, 0x3da235e3));
3541      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3542                               getF32Constant(DAG, 0x3e65b8f3));
3543      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3544      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3545                               getF32Constant(DAG, 0x3f324b07));
3546      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3547      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3548                               getF32Constant(DAG, 0x3f7ff8fd));
3549      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3550      SDValue TwoToFractionalPartOfX =
3551        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3552
3553      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3554                           MVT::f32, TwoToFractionalPartOfX);
3555    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3556      // For floating-point precision of 18:
3557      //
3558      //   TwoToFractionalPartOfX =
3559      //     0.999999982f +
3560      //       (0.693148872f +
3561      //         (0.240227044f +
3562      //           (0.554906021e-1f +
3563      //             (0.961591928e-2f +
3564      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3565      // error 2.47208000*10^(-7), which is better than 18 bits
3566      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3567                               getF32Constant(DAG, 0x3924b03e));
3568      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3569                               getF32Constant(DAG, 0x3ab24b87));
3570      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3571      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3572                               getF32Constant(DAG, 0x3c1d8c17));
3573      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3574      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3575                               getF32Constant(DAG, 0x3d634a1d));
3576      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3577      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3578                               getF32Constant(DAG, 0x3e75fe14));
3579      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3580      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3581                                getF32Constant(DAG, 0x3f317234));
3582      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3583      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3584                                getF32Constant(DAG, 0x3f800000));
3585      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3586      SDValue TwoToFractionalPartOfX =
3587        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3588
3589      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3590                           MVT::f32, TwoToFractionalPartOfX);
3591    }
3592  } else {
3593    // No special expansion.
3594    result = DAG.getNode(ISD::FEXP2, dl,
3595                         getValue(I.getOperand(1)).getValueType(),
3596                         getValue(I.getOperand(1)));
3597  }
3598
3599  setValue(&I, result);
3600}
3601
3602/// visitPow - Lower a pow intrinsic. Handles the special sequences for
3603/// limited-precision mode with x == 10.0f.
3604void
3605SelectionDAGBuilder::visitPow(CallInst &I) {
3606  SDValue result;
3607  Value *Val = I.getOperand(1);
3608  DebugLoc dl = getCurDebugLoc();
3609  bool IsExp10 = false;
3610
3611  if (getValue(Val).getValueType() == MVT::f32 &&
3612      getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3613      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3614    if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3615      if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3616        APFloat Ten(10.0f);
3617        IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3618      }
3619    }
3620  }
3621
3622  if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3623    SDValue Op = getValue(I.getOperand(2));
3624
3625    // Put the exponent in the right bit position for later addition to the
3626    // final result:
3627    //
3628    //   #define LOG2OF10 3.3219281f
3629    //   IntegerPartOfX = (int32_t)(x * LOG2OF10);
3630    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3631                             getF32Constant(DAG, 0x40549a78));
3632    SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3633
3634    //   FractionalPartOfX = x - (float)IntegerPartOfX;
3635    SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3636    SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3637
3638    //   IntegerPartOfX <<= 23;
3639    IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3640                                 DAG.getConstant(23, TLI.getPointerTy()));
3641
3642    if (LimitFloatPrecision <= 6) {
3643      // For floating-point precision of 6:
3644      //
3645      //   twoToFractionalPartOfX =
3646      //     0.997535578f +
3647      //       (0.735607626f + 0.252464424f * x) * x;
3648      //
3649      // error 0.0144103317, which is 6 bits
3650      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3651                               getF32Constant(DAG, 0x3e814304));
3652      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3653                               getF32Constant(DAG, 0x3f3c50c8));
3654      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3655      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3656                               getF32Constant(DAG, 0x3f7f5e7e));
3657      SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3658      SDValue TwoToFractionalPartOfX =
3659        DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3660
3661      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3662                           MVT::f32, TwoToFractionalPartOfX);
3663    } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3664      // For floating-point precision of 12:
3665      //
3666      //   TwoToFractionalPartOfX =
3667      //     0.999892986f +
3668      //       (0.696457318f +
3669      //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
3670      //
3671      // error 0.000107046256, which is 13 to 14 bits
3672      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3673                               getF32Constant(DAG, 0x3da235e3));
3674      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3675                               getF32Constant(DAG, 0x3e65b8f3));
3676      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3677      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3678                               getF32Constant(DAG, 0x3f324b07));
3679      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3680      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3681                               getF32Constant(DAG, 0x3f7ff8fd));
3682      SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3683      SDValue TwoToFractionalPartOfX =
3684        DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3685
3686      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3687                           MVT::f32, TwoToFractionalPartOfX);
3688    } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3689      // For floating-point precision of 18:
3690      //
3691      //   TwoToFractionalPartOfX =
3692      //     0.999999982f +
3693      //       (0.693148872f +
3694      //         (0.240227044f +
3695      //           (0.554906021e-1f +
3696      //             (0.961591928e-2f +
3697      //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3698      // error 2.47208000*10^(-7), which is better than 18 bits
3699      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3700                               getF32Constant(DAG, 0x3924b03e));
3701      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3702                               getF32Constant(DAG, 0x3ab24b87));
3703      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3704      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3705                               getF32Constant(DAG, 0x3c1d8c17));
3706      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3707      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3708                               getF32Constant(DAG, 0x3d634a1d));
3709      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3710      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3711                               getF32Constant(DAG, 0x3e75fe14));
3712      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3713      SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3714                                getF32Constant(DAG, 0x3f317234));
3715      SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3716      SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3717                                getF32Constant(DAG, 0x3f800000));
3718      SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3719      SDValue TwoToFractionalPartOfX =
3720        DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3721
3722      result = DAG.getNode(ISD::BIT_CONVERT, dl,
3723                           MVT::f32, TwoToFractionalPartOfX);
3724    }
3725  } else {
3726    // No special expansion.
3727    result = DAG.getNode(ISD::FPOW, dl,
3728                         getValue(I.getOperand(1)).getValueType(),
3729                         getValue(I.getOperand(1)),
3730                         getValue(I.getOperand(2)));
3731  }
3732
3733  setValue(&I, result);
3734}
3735
3736/// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
3737/// we want to emit this as a call to a named external function, return the name
3738/// otherwise lower it and return null.
3739const char *
3740SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3741  DebugLoc dl = getCurDebugLoc();
3742  switch (Intrinsic) {
3743  default:
3744    // By default, turn this into a target intrinsic node.
3745    visitTargetIntrinsic(I, Intrinsic);
3746    return 0;
3747  case Intrinsic::vastart:  visitVAStart(I); return 0;
3748  case Intrinsic::vaend:    visitVAEnd(I); return 0;
3749  case Intrinsic::vacopy:   visitVACopy(I); return 0;
3750  case Intrinsic::returnaddress:
3751    setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3752                             getValue(I.getOperand(1))));
3753    return 0;
3754  case Intrinsic::frameaddress:
3755    setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3756                             getValue(I.getOperand(1))));
3757    return 0;
3758  case Intrinsic::setjmp:
3759    return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3760    break;
3761  case Intrinsic::longjmp:
3762    return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3763    break;
3764  case Intrinsic::memcpy: {
3765    SDValue Op1 = getValue(I.getOperand(1));
3766    SDValue Op2 = getValue(I.getOperand(2));
3767    SDValue Op3 = getValue(I.getOperand(3));
3768    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3769    DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3770                              I.getOperand(1), 0, I.getOperand(2), 0));
3771    return 0;
3772  }
3773  case Intrinsic::memset: {
3774    SDValue Op1 = getValue(I.getOperand(1));
3775    SDValue Op2 = getValue(I.getOperand(2));
3776    SDValue Op3 = getValue(I.getOperand(3));
3777    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3778    DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3779                              I.getOperand(1), 0));
3780    return 0;
3781  }
3782  case Intrinsic::memmove: {
3783    SDValue Op1 = getValue(I.getOperand(1));
3784    SDValue Op2 = getValue(I.getOperand(2));
3785    SDValue Op3 = getValue(I.getOperand(3));
3786    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3787
3788    // If the source and destination are known to not be aliases, we can
3789    // lower memmove as memcpy.
3790    uint64_t Size = -1ULL;
3791    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3792      Size = C->getZExtValue();
3793    if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3794        AliasAnalysis::NoAlias) {
3795      DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3796                                I.getOperand(1), 0, I.getOperand(2), 0));
3797      return 0;
3798    }
3799
3800    DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3801                               I.getOperand(1), 0, I.getOperand(2), 0));
3802    return 0;
3803  }
3804  case Intrinsic::dbg_stoppoint:
3805  case Intrinsic::dbg_region_start:
3806  case Intrinsic::dbg_region_end:
3807  case Intrinsic::dbg_func_start:
3808    // FIXME - Remove this instructions once the dust settles.
3809    return 0;
3810  case Intrinsic::dbg_declare: {
3811    if (OptLevel != CodeGenOpt::None)
3812      // FIXME: Variable debug info is not supported here.
3813      return 0;
3814    DwarfWriter *DW = DAG.getDwarfWriter();
3815    if (!DW)
3816      return 0;
3817    DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3818    if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
3819      return 0;
3820
3821    MDNode *Variable = DI.getVariable();
3822    Value *Address = DI.getAddress();
3823    if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
3824      Address = BCI->getOperand(0);
3825    AllocaInst *AI = dyn_cast<AllocaInst>(Address);
3826    // Don't handle byval struct arguments or VLAs, for example.
3827    if (!AI)
3828      return 0;
3829    DenseMap<const AllocaInst*, int>::iterator SI =
3830      FuncInfo.StaticAllocaMap.find(AI);
3831    if (SI == FuncInfo.StaticAllocaMap.end())
3832      return 0; // VLAs.
3833    int FI = SI->second;
3834
3835    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3836    if (MMI) {
3837      MetadataContext &TheMetadata =
3838        DI.getParent()->getContext().getMetadata();
3839      unsigned MDDbgKind = TheMetadata.getMDKind("dbg");
3840      MDNode *Dbg = TheMetadata.getMD(MDDbgKind, &DI);
3841      MMI->setVariableDbgInfo(Variable, FI, Dbg);
3842    }
3843    return 0;
3844  }
3845  case Intrinsic::eh_exception: {
3846    // Insert the EXCEPTIONADDR instruction.
3847    assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3848    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3849    SDValue Ops[1];
3850    Ops[0] = DAG.getRoot();
3851    SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3852    setValue(&I, Op);
3853    DAG.setRoot(Op.getValue(1));
3854    return 0;
3855  }
3856
3857  case Intrinsic::eh_selector: {
3858    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3859
3860    if (CurMBB->isLandingPad())
3861      AddCatchInfo(I, MMI, CurMBB);
3862    else {
3863#ifndef NDEBUG
3864      FuncInfo.CatchInfoLost.insert(&I);
3865#endif
3866      // FIXME: Mark exception selector register as live in.  Hack for PR1508.
3867      unsigned Reg = TLI.getExceptionSelectorRegister();
3868      if (Reg) CurMBB->addLiveIn(Reg);
3869    }
3870
3871    // Insert the EHSELECTION instruction.
3872    SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3873    SDValue Ops[2];
3874    Ops[0] = getValue(I.getOperand(1));
3875    Ops[1] = getRoot();
3876    SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
3877
3878    DAG.setRoot(Op.getValue(1));
3879
3880    setValue(&I, DAG.getSExtOrTrunc(Op, dl, MVT::i32));
3881    return 0;
3882  }
3883
3884  case Intrinsic::eh_typeid_for: {
3885    MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3886
3887    if (MMI) {
3888      // Find the type id for the given typeinfo.
3889      GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
3890
3891      unsigned TypeID = MMI->getTypeIDFor(GV);
3892      setValue(&I, DAG.getConstant(TypeID, MVT::i32));
3893    } else {
3894      // Return something different to eh_selector.
3895      setValue(&I, DAG.getConstant(1, MVT::i32));
3896    }
3897
3898    return 0;
3899  }
3900
3901  case Intrinsic::eh_return_i32:
3902  case Intrinsic::eh_return_i64:
3903    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3904      MMI->setCallsEHReturn(true);
3905      DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
3906                              MVT::Other,
3907                              getControlRoot(),
3908                              getValue(I.getOperand(1)),
3909                              getValue(I.getOperand(2))));
3910    } else {
3911      setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
3912    }
3913
3914    return 0;
3915  case Intrinsic::eh_unwind_init:
3916    if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3917      MMI->setCallsUnwindInit(true);
3918    }
3919
3920    return 0;
3921
3922  case Intrinsic::eh_dwarf_cfa: {
3923    EVT VT = getValue(I.getOperand(1)).getValueType();
3924    SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
3925                                        TLI.getPointerTy());
3926
3927    SDValue Offset = DAG.getNode(ISD::ADD, dl,
3928                                 TLI.getPointerTy(),
3929                                 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
3930                                             TLI.getPointerTy()),
3931                                 CfaArg);
3932    setValue(&I, DAG.getNode(ISD::ADD, dl,
3933                             TLI.getPointerTy(),
3934                             DAG.getNode(ISD::FRAMEADDR, dl,
3935                                         TLI.getPointerTy(),
3936                                         DAG.getConstant(0,
3937                                                         TLI.getPointerTy())),
3938                             Offset));
3939    return 0;
3940  }
3941  case Intrinsic::convertff:
3942  case Intrinsic::convertfsi:
3943  case Intrinsic::convertfui:
3944  case Intrinsic::convertsif:
3945  case Intrinsic::convertuif:
3946  case Intrinsic::convertss:
3947  case Intrinsic::convertsu:
3948  case Intrinsic::convertus:
3949  case Intrinsic::convertuu: {
3950    ISD::CvtCode Code = ISD::CVT_INVALID;
3951    switch (Intrinsic) {
3952    case Intrinsic::convertff:  Code = ISD::CVT_FF; break;
3953    case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
3954    case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
3955    case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
3956    case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
3957    case Intrinsic::convertss:  Code = ISD::CVT_SS; break;
3958    case Intrinsic::convertsu:  Code = ISD::CVT_SU; break;
3959    case Intrinsic::convertus:  Code = ISD::CVT_US; break;
3960    case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
3961    }
3962    EVT DestVT = TLI.getValueType(I.getType());
3963    Value* Op1 = I.getOperand(1);
3964    setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
3965                                DAG.getValueType(DestVT),
3966                                DAG.getValueType(getValue(Op1).getValueType()),
3967                                getValue(I.getOperand(2)),
3968                                getValue(I.getOperand(3)),
3969                                Code));
3970    return 0;
3971  }
3972
3973  case Intrinsic::sqrt:
3974    setValue(&I, DAG.getNode(ISD::FSQRT, dl,
3975                             getValue(I.getOperand(1)).getValueType(),
3976                             getValue(I.getOperand(1))));
3977    return 0;
3978  case Intrinsic::powi:
3979    setValue(&I, DAG.getNode(ISD::FPOWI, dl,
3980                             getValue(I.getOperand(1)).getValueType(),
3981                             getValue(I.getOperand(1)),
3982                             getValue(I.getOperand(2))));
3983    return 0;
3984  case Intrinsic::sin:
3985    setValue(&I, DAG.getNode(ISD::FSIN, dl,
3986                             getValue(I.getOperand(1)).getValueType(),
3987                             getValue(I.getOperand(1))));
3988    return 0;
3989  case Intrinsic::cos:
3990    setValue(&I, DAG.getNode(ISD::FCOS, dl,
3991                             getValue(I.getOperand(1)).getValueType(),
3992                             getValue(I.getOperand(1))));
3993    return 0;
3994  case Intrinsic::log:
3995    visitLog(I);
3996    return 0;
3997  case Intrinsic::log2:
3998    visitLog2(I);
3999    return 0;
4000  case Intrinsic::log10:
4001    visitLog10(I);
4002    return 0;
4003  case Intrinsic::exp:
4004    visitExp(I);
4005    return 0;
4006  case Intrinsic::exp2:
4007    visitExp2(I);
4008    return 0;
4009  case Intrinsic::pow:
4010    visitPow(I);
4011    return 0;
4012  case Intrinsic::pcmarker: {
4013    SDValue Tmp = getValue(I.getOperand(1));
4014    DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4015    return 0;
4016  }
4017  case Intrinsic::readcyclecounter: {
4018    SDValue Op = getRoot();
4019    SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4020                              DAG.getVTList(MVT::i64, MVT::Other),
4021                              &Op, 1);
4022    setValue(&I, Tmp);
4023    DAG.setRoot(Tmp.getValue(1));
4024    return 0;
4025  }
4026  case Intrinsic::bswap:
4027    setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4028                             getValue(I.getOperand(1)).getValueType(),
4029                             getValue(I.getOperand(1))));
4030    return 0;
4031  case Intrinsic::cttz: {
4032    SDValue Arg = getValue(I.getOperand(1));
4033    EVT Ty = Arg.getValueType();
4034    SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4035    setValue(&I, result);
4036    return 0;
4037  }
4038  case Intrinsic::ctlz: {
4039    SDValue Arg = getValue(I.getOperand(1));
4040    EVT Ty = Arg.getValueType();
4041    SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4042    setValue(&I, result);
4043    return 0;
4044  }
4045  case Intrinsic::ctpop: {
4046    SDValue Arg = getValue(I.getOperand(1));
4047    EVT Ty = Arg.getValueType();
4048    SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4049    setValue(&I, result);
4050    return 0;
4051  }
4052  case Intrinsic::stacksave: {
4053    SDValue Op = getRoot();
4054    SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4055              DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4056    setValue(&I, Tmp);
4057    DAG.setRoot(Tmp.getValue(1));
4058    return 0;
4059  }
4060  case Intrinsic::stackrestore: {
4061    SDValue Tmp = getValue(I.getOperand(1));
4062    DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4063    return 0;
4064  }
4065  case Intrinsic::stackprotector: {
4066    // Emit code into the DAG to store the stack guard onto the stack.
4067    MachineFunction &MF = DAG.getMachineFunction();
4068    MachineFrameInfo *MFI = MF.getFrameInfo();
4069    EVT PtrTy = TLI.getPointerTy();
4070
4071    SDValue Src = getValue(I.getOperand(1));   // The guard's value.
4072    AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4073
4074    int FI = FuncInfo.StaticAllocaMap[Slot];
4075    MFI->setStackProtectorIndex(FI);
4076
4077    SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4078
4079    // Store the stack protector onto the stack.
4080    SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4081                                  PseudoSourceValue::getFixedStack(FI),
4082                                  0, true);
4083    setValue(&I, Result);
4084    DAG.setRoot(Result);
4085    return 0;
4086  }
4087  case Intrinsic::objectsize: {
4088    // If we don't know by now, we're never going to know.
4089    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
4090
4091    assert(CI && "Non-constant type in __builtin_object_size?");
4092
4093    SDValue Arg = getValue(I.getOperand(0));
4094    EVT Ty = Arg.getValueType();
4095
4096    if (CI->getZExtValue() < 2)
4097      setValue(&I, DAG.getConstant(-1ULL, Ty));
4098    else
4099      setValue(&I, DAG.getConstant(0, Ty));
4100    return 0;
4101  }
4102  case Intrinsic::var_annotation:
4103    // Discard annotate attributes
4104    return 0;
4105
4106  case Intrinsic::init_trampoline: {
4107    const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4108
4109    SDValue Ops[6];
4110    Ops[0] = getRoot();
4111    Ops[1] = getValue(I.getOperand(1));
4112    Ops[2] = getValue(I.getOperand(2));
4113    Ops[3] = getValue(I.getOperand(3));
4114    Ops[4] = DAG.getSrcValue(I.getOperand(1));
4115    Ops[5] = DAG.getSrcValue(F);
4116
4117    SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4118                              DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4119                              Ops, 6);
4120
4121    setValue(&I, Tmp);
4122    DAG.setRoot(Tmp.getValue(1));
4123    return 0;
4124  }
4125
4126  case Intrinsic::gcroot:
4127    if (GFI) {
4128      Value *Alloca = I.getOperand(1);
4129      Constant *TypeMap = cast<Constant>(I.getOperand(2));
4130
4131      FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4132      GFI->addStackRoot(FI->getIndex(), TypeMap);
4133    }
4134    return 0;
4135
4136  case Intrinsic::gcread:
4137  case Intrinsic::gcwrite:
4138    llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4139    return 0;
4140
4141  case Intrinsic::flt_rounds: {
4142    setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4143    return 0;
4144  }
4145
4146  case Intrinsic::trap: {
4147    DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4148    return 0;
4149  }
4150
4151  case Intrinsic::uadd_with_overflow:
4152    return implVisitAluOverflow(I, ISD::UADDO);
4153  case Intrinsic::sadd_with_overflow:
4154    return implVisitAluOverflow(I, ISD::SADDO);
4155  case Intrinsic::usub_with_overflow:
4156    return implVisitAluOverflow(I, ISD::USUBO);
4157  case Intrinsic::ssub_with_overflow:
4158    return implVisitAluOverflow(I, ISD::SSUBO);
4159  case Intrinsic::umul_with_overflow:
4160    return implVisitAluOverflow(I, ISD::UMULO);
4161  case Intrinsic::smul_with_overflow:
4162    return implVisitAluOverflow(I, ISD::SMULO);
4163
4164  case Intrinsic::prefetch: {
4165    SDValue Ops[4];
4166    Ops[0] = getRoot();
4167    Ops[1] = getValue(I.getOperand(1));
4168    Ops[2] = getValue(I.getOperand(2));
4169    Ops[3] = getValue(I.getOperand(3));
4170    DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4171    return 0;
4172  }
4173
4174  case Intrinsic::memory_barrier: {
4175    SDValue Ops[6];
4176    Ops[0] = getRoot();
4177    for (int x = 1; x < 6; ++x)
4178      Ops[x] = getValue(I.getOperand(x));
4179
4180    DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4181    return 0;
4182  }
4183  case Intrinsic::atomic_cmp_swap: {
4184    SDValue Root = getRoot();
4185    SDValue L =
4186      DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4187                    getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4188                    Root,
4189                    getValue(I.getOperand(1)),
4190                    getValue(I.getOperand(2)),
4191                    getValue(I.getOperand(3)),
4192                    I.getOperand(1));
4193    setValue(&I, L);
4194    DAG.setRoot(L.getValue(1));
4195    return 0;
4196  }
4197  case Intrinsic::atomic_load_add:
4198    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4199  case Intrinsic::atomic_load_sub:
4200    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4201  case Intrinsic::atomic_load_or:
4202    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4203  case Intrinsic::atomic_load_xor:
4204    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4205  case Intrinsic::atomic_load_and:
4206    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4207  case Intrinsic::atomic_load_nand:
4208    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4209  case Intrinsic::atomic_load_max:
4210    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4211  case Intrinsic::atomic_load_min:
4212    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4213  case Intrinsic::atomic_load_umin:
4214    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4215  case Intrinsic::atomic_load_umax:
4216    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4217  case Intrinsic::atomic_swap:
4218    return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4219
4220  case Intrinsic::invariant_start:
4221  case Intrinsic::lifetime_start:
4222    // Discard region information.
4223    setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
4224    return 0;
4225  case Intrinsic::invariant_end:
4226  case Intrinsic::lifetime_end:
4227    // Discard region information.
4228    return 0;
4229  }
4230}
4231
4232/// Test if the given instruction is in a position to be optimized
4233/// with a tail-call. This roughly means that it's in a block with
4234/// a return and there's nothing that needs to be scheduled
4235/// between it and the return.
4236///
4237/// This function only tests target-independent requirements.
4238/// For target-dependent requirements, a target should override
4239/// TargetLowering::IsEligibleForTailCallOptimization.
4240///
4241static bool
4242isInTailCallPosition(const Instruction *I, Attributes CalleeRetAttr,
4243                     const TargetLowering &TLI) {
4244  const BasicBlock *ExitBB = I->getParent();
4245  const TerminatorInst *Term = ExitBB->getTerminator();
4246  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4247  const Function *F = ExitBB->getParent();
4248
4249  // The block must end in a return statement or an unreachable.
4250  if (!Ret && !isa<UnreachableInst>(Term)) return false;
4251
4252  // If I will have a chain, make sure no other instruction that will have a
4253  // chain interposes between I and the return.
4254  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4255      !I->isSafeToSpeculativelyExecute())
4256    for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4257         --BBI) {
4258      if (&*BBI == I)
4259        break;
4260      if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4261          !BBI->isSafeToSpeculativelyExecute())
4262        return false;
4263    }
4264
4265  // If the block ends with a void return or unreachable, it doesn't matter
4266  // what the call's return type is.
4267  if (!Ret || Ret->getNumOperands() == 0) return true;
4268
4269  // If the return value is undef, it doesn't matter what the call's
4270  // return type is.
4271  if (isa<UndefValue>(Ret->getOperand(0))) return true;
4272
4273  // Conservatively require the attributes of the call to match those of
4274  // the return. Ignore noalias because it doesn't affect the call sequence.
4275  unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
4276  if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
4277    return false;
4278
4279  // Otherwise, make sure the unmodified return value of I is the return value.
4280  for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4281       U = dyn_cast<Instruction>(U->getOperand(0))) {
4282    if (!U)
4283      return false;
4284    if (!U->hasOneUse())
4285      return false;
4286    if (U == I)
4287      break;
4288    // Check for a truly no-op truncate.
4289    if (isa<TruncInst>(U) &&
4290        TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4291      continue;
4292    // Check for a truly no-op bitcast.
4293    if (isa<BitCastInst>(U) &&
4294        (U->getOperand(0)->getType() == U->getType() ||
4295         (isa<PointerType>(U->getOperand(0)->getType()) &&
4296          isa<PointerType>(U->getType()))))
4297      continue;
4298    // Otherwise it's not a true no-op.
4299    return false;
4300  }
4301
4302  return true;
4303}
4304
4305void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
4306                                      bool isTailCall,
4307                                      MachineBasicBlock *LandingPad) {
4308  const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4309  const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4310  const Type *RetTy = FTy->getReturnType();
4311  MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4312  unsigned BeginLabel = 0, EndLabel = 0;
4313
4314  TargetLowering::ArgListTy Args;
4315  TargetLowering::ArgListEntry Entry;
4316  Args.reserve(CS.arg_size());
4317
4318  // Check whether the function can return without sret-demotion.
4319  SmallVector<EVT, 4> OutVTs;
4320  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
4321  SmallVector<uint64_t, 4> Offsets;
4322  getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
4323    OutVTs, OutsFlags, TLI, &Offsets);
4324
4325
4326  bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
4327                        FTy->isVarArg(), OutVTs, OutsFlags, DAG);
4328
4329  SDValue DemoteStackSlot;
4330
4331  if (!CanLowerReturn) {
4332    uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
4333                      FTy->getReturnType());
4334    unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(
4335                      FTy->getReturnType());
4336    MachineFunction &MF = DAG.getMachineFunction();
4337    int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
4338    const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
4339
4340    DemoteStackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
4341    Entry.Node = DemoteStackSlot;
4342    Entry.Ty = StackSlotPtrType;
4343    Entry.isSExt = false;
4344    Entry.isZExt = false;
4345    Entry.isInReg = false;
4346    Entry.isSRet = true;
4347    Entry.isNest = false;
4348    Entry.isByVal = false;
4349    Entry.Alignment = Align;
4350    Args.push_back(Entry);
4351    RetTy = Type::getVoidTy(FTy->getContext());
4352  }
4353
4354  for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4355       i != e; ++i) {
4356    SDValue ArgNode = getValue(*i);
4357    Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4358
4359    unsigned attrInd = i - CS.arg_begin() + 1;
4360    Entry.isSExt  = CS.paramHasAttr(attrInd, Attribute::SExt);
4361    Entry.isZExt  = CS.paramHasAttr(attrInd, Attribute::ZExt);
4362    Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4363    Entry.isSRet  = CS.paramHasAttr(attrInd, Attribute::StructRet);
4364    Entry.isNest  = CS.paramHasAttr(attrInd, Attribute::Nest);
4365    Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4366    Entry.Alignment = CS.getParamAlignment(attrInd);
4367    Args.push_back(Entry);
4368  }
4369
4370  if (LandingPad && MMI) {
4371    // Insert a label before the invoke call to mark the try range.  This can be
4372    // used to detect deletion of the invoke via the MachineModuleInfo.
4373    BeginLabel = MMI->NextLabelID();
4374
4375    // Both PendingLoads and PendingExports must be flushed here;
4376    // this call might not return.
4377    (void)getRoot();
4378    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4379                             getControlRoot(), BeginLabel));
4380  }
4381
4382  // Check if target-independent constraints permit a tail call here.
4383  // Target-dependent constraints are checked within TLI.LowerCallTo.
4384  if (isTailCall &&
4385      !isInTailCallPosition(CS.getInstruction(),
4386                            CS.getAttributes().getRetAttributes(),
4387                            TLI))
4388    isTailCall = false;
4389
4390  std::pair<SDValue,SDValue> Result =
4391    TLI.LowerCallTo(getRoot(), RetTy,
4392                    CS.paramHasAttr(0, Attribute::SExt),
4393                    CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4394                    CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4395                    CS.getCallingConv(),
4396                    isTailCall,
4397                    !CS.getInstruction()->use_empty(),
4398                    Callee, Args, DAG, getCurDebugLoc());
4399  assert((isTailCall || Result.second.getNode()) &&
4400         "Non-null chain expected with non-tail call!");
4401  assert((Result.second.getNode() || !Result.first.getNode()) &&
4402         "Null value expected with tail call!");
4403  if (Result.first.getNode())
4404    setValue(CS.getInstruction(), Result.first);
4405  else if (!CanLowerReturn && Result.second.getNode()) {
4406    // The instruction result is the result of loading from the
4407    // hidden sret parameter.
4408    SmallVector<EVT, 1> PVTs;
4409    const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
4410
4411    ComputeValueVTs(TLI, PtrRetTy, PVTs);
4412    assert(PVTs.size() == 1 && "Pointers should fit in one register");
4413    EVT PtrVT = PVTs[0];
4414    unsigned NumValues = OutVTs.size();
4415    SmallVector<SDValue, 4> Values(NumValues);
4416    SmallVector<SDValue, 4> Chains(NumValues);
4417
4418    for (unsigned i = 0; i < NumValues; ++i) {
4419      SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
4420        DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, DemoteStackSlot,
4421        DAG.getConstant(Offsets[i], PtrVT)),
4422        NULL, Offsets[i], false, 1);
4423      Values[i] = L;
4424      Chains[i] = L.getValue(1);
4425    }
4426    SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
4427                                MVT::Other, &Chains[0], NumValues);
4428    PendingLoads.push_back(Chain);
4429
4430    setValue(CS.getInstruction(), DAG.getNode(ISD::MERGE_VALUES,
4431             getCurDebugLoc(), DAG.getVTList(&OutVTs[0], NumValues),
4432             &Values[0], NumValues));
4433  }
4434  // As a special case, a null chain means that a tail call has
4435  // been emitted and the DAG root is already updated.
4436  if (Result.second.getNode())
4437    DAG.setRoot(Result.second);
4438  else
4439    HasTailCall = true;
4440
4441  if (LandingPad && MMI) {
4442    // Insert a label at the end of the invoke call to mark the try range.  This
4443    // can be used to detect deletion of the invoke via the MachineModuleInfo.
4444    EndLabel = MMI->NextLabelID();
4445    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4446                             getRoot(), EndLabel));
4447
4448    // Inform MachineModuleInfo of range.
4449    MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4450  }
4451}
4452
4453
4454void SelectionDAGBuilder::visitCall(CallInst &I) {
4455  const char *RenameFn = 0;
4456  if (Function *F = I.getCalledFunction()) {
4457    if (F->isDeclaration()) {
4458      const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4459      if (II) {
4460        if (unsigned IID = II->getIntrinsicID(F)) {
4461          RenameFn = visitIntrinsicCall(I, IID);
4462          if (!RenameFn)
4463            return;
4464        }
4465      }
4466      if (unsigned IID = F->getIntrinsicID()) {
4467        RenameFn = visitIntrinsicCall(I, IID);
4468        if (!RenameFn)
4469          return;
4470      }
4471    }
4472
4473    // Check for well-known libc/libm calls.  If the function is internal, it
4474    // can't be a library call.
4475    if (!F->hasLocalLinkage() && F->hasName()) {
4476      StringRef Name = F->getName();
4477      if (Name == "copysign" || Name == "copysignf") {
4478        if (I.getNumOperands() == 3 &&   // Basic sanity checks.
4479            I.getOperand(1)->getType()->isFloatingPoint() &&
4480            I.getType() == I.getOperand(1)->getType() &&
4481            I.getType() == I.getOperand(2)->getType()) {
4482          SDValue LHS = getValue(I.getOperand(1));
4483          SDValue RHS = getValue(I.getOperand(2));
4484          setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4485                                   LHS.getValueType(), LHS, RHS));
4486          return;
4487        }
4488      } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
4489        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4490            I.getOperand(1)->getType()->isFloatingPoint() &&
4491            I.getType() == I.getOperand(1)->getType()) {
4492          SDValue Tmp = getValue(I.getOperand(1));
4493          setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4494                                   Tmp.getValueType(), Tmp));
4495          return;
4496        }
4497      } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
4498        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4499            I.getOperand(1)->getType()->isFloatingPoint() &&
4500            I.getType() == I.getOperand(1)->getType() &&
4501            I.onlyReadsMemory()) {
4502          SDValue Tmp = getValue(I.getOperand(1));
4503          setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4504                                   Tmp.getValueType(), Tmp));
4505          return;
4506        }
4507      } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
4508        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4509            I.getOperand(1)->getType()->isFloatingPoint() &&
4510            I.getType() == I.getOperand(1)->getType() &&
4511            I.onlyReadsMemory()) {
4512          SDValue Tmp = getValue(I.getOperand(1));
4513          setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4514                                   Tmp.getValueType(), Tmp));
4515          return;
4516        }
4517      } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
4518        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
4519            I.getOperand(1)->getType()->isFloatingPoint() &&
4520            I.getType() == I.getOperand(1)->getType() &&
4521            I.onlyReadsMemory()) {
4522          SDValue Tmp = getValue(I.getOperand(1));
4523          setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
4524                                   Tmp.getValueType(), Tmp));
4525          return;
4526        }
4527      }
4528    }
4529  } else if (isa<InlineAsm>(I.getOperand(0))) {
4530    visitInlineAsm(&I);
4531    return;
4532  }
4533
4534  SDValue Callee;
4535  if (!RenameFn)
4536    Callee = getValue(I.getOperand(0));
4537  else
4538    Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4539
4540  // Check if we can potentially perform a tail call. More detailed
4541  // checking is be done within LowerCallTo, after more information
4542  // about the call is known.
4543  bool isTailCall = PerformTailCallOpt && I.isTailCall();
4544
4545  LowerCallTo(&I, Callee, isTailCall);
4546}
4547
4548
4549/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4550/// this value and returns the result as a ValueVT value.  This uses
4551/// Chain/Flag as the input and updates them for the output Chain/Flag.
4552/// If the Flag pointer is NULL, no flag is used.
4553SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4554                                      SDValue &Chain,
4555                                      SDValue *Flag) const {
4556  // Assemble the legal parts into the final values.
4557  SmallVector<SDValue, 4> Values(ValueVTs.size());
4558  SmallVector<SDValue, 8> Parts;
4559  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4560    // Copy the legal parts from the registers.
4561    EVT ValueVT = ValueVTs[Value];
4562    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4563    EVT RegisterVT = RegVTs[Value];
4564
4565    Parts.resize(NumRegs);
4566    for (unsigned i = 0; i != NumRegs; ++i) {
4567      SDValue P;
4568      if (Flag == 0)
4569        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4570      else {
4571        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4572        *Flag = P.getValue(2);
4573      }
4574      Chain = P.getValue(1);
4575
4576      // If the source register was virtual and if we know something about it,
4577      // add an assert node.
4578      if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4579          RegisterVT.isInteger() && !RegisterVT.isVector()) {
4580        unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4581        FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4582        if (FLI.LiveOutRegInfo.size() > SlotNo) {
4583          FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4584
4585          unsigned RegSize = RegisterVT.getSizeInBits();
4586          unsigned NumSignBits = LOI.NumSignBits;
4587          unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4588
4589          // FIXME: We capture more information than the dag can represent.  For
4590          // now, just use the tightest assertzext/assertsext possible.
4591          bool isSExt = true;
4592          EVT FromVT(MVT::Other);
4593          if (NumSignBits == RegSize)
4594            isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
4595          else if (NumZeroBits >= RegSize-1)
4596            isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
4597          else if (NumSignBits > RegSize-8)
4598            isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
4599          else if (NumZeroBits >= RegSize-8)
4600            isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
4601          else if (NumSignBits > RegSize-16)
4602            isSExt = true, FromVT = MVT::i16;  // ASSERT SEXT 16
4603          else if (NumZeroBits >= RegSize-16)
4604            isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4605          else if (NumSignBits > RegSize-32)
4606            isSExt = true, FromVT = MVT::i32;  // ASSERT SEXT 32
4607          else if (NumZeroBits >= RegSize-32)
4608            isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4609
4610          if (FromVT != MVT::Other) {
4611            P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4612                            RegisterVT, P, DAG.getValueType(FromVT));
4613
4614          }
4615        }
4616      }
4617
4618      Parts[i] = P;
4619    }
4620
4621    Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4622                                     NumRegs, RegisterVT, ValueVT);
4623    Part += NumRegs;
4624    Parts.clear();
4625  }
4626
4627  return DAG.getNode(ISD::MERGE_VALUES, dl,
4628                     DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4629                     &Values[0], ValueVTs.size());
4630}
4631
4632/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4633/// specified value into the registers specified by this object.  This uses
4634/// Chain/Flag as the input and updates them for the output Chain/Flag.
4635/// If the Flag pointer is NULL, no flag is used.
4636void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4637                                 SDValue &Chain, SDValue *Flag) const {
4638  // Get the list of the values's legal parts.
4639  unsigned NumRegs = Regs.size();
4640  SmallVector<SDValue, 8> Parts(NumRegs);
4641  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4642    EVT ValueVT = ValueVTs[Value];
4643    unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4644    EVT RegisterVT = RegVTs[Value];
4645
4646    getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4647                   &Parts[Part], NumParts, RegisterVT);
4648    Part += NumParts;
4649  }
4650
4651  // Copy the parts into the registers.
4652  SmallVector<SDValue, 8> Chains(NumRegs);
4653  for (unsigned i = 0; i != NumRegs; ++i) {
4654    SDValue Part;
4655    if (Flag == 0)
4656      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4657    else {
4658      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4659      *Flag = Part.getValue(1);
4660    }
4661    Chains[i] = Part.getValue(0);
4662  }
4663
4664  if (NumRegs == 1 || Flag)
4665    // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4666    // flagged to it. That is the CopyToReg nodes and the user are considered
4667    // a single scheduling unit. If we create a TokenFactor and return it as
4668    // chain, then the TokenFactor is both a predecessor (operand) of the
4669    // user as well as a successor (the TF operands are flagged to the user).
4670    // c1, f1 = CopyToReg
4671    // c2, f2 = CopyToReg
4672    // c3     = TokenFactor c1, c2
4673    // ...
4674    //        = op c3, ..., f2
4675    Chain = Chains[NumRegs-1];
4676  else
4677    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4678}
4679
4680/// AddInlineAsmOperands - Add this value to the specified inlineasm node
4681/// operand list.  This adds the code marker and includes the number of
4682/// values added into it.
4683void RegsForValue::AddInlineAsmOperands(unsigned Code,
4684                                        bool HasMatching,unsigned MatchingIdx,
4685                                        SelectionDAG &DAG,
4686                                        std::vector<SDValue> &Ops) const {
4687  EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4688  assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4689  unsigned Flag = Code | (Regs.size() << 3);
4690  if (HasMatching)
4691    Flag |= 0x80000000 | (MatchingIdx << 16);
4692  Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4693  for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4694    unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
4695    EVT RegisterVT = RegVTs[Value];
4696    for (unsigned i = 0; i != NumRegs; ++i) {
4697      assert(Reg < Regs.size() && "Mismatch in # registers expected");
4698      Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4699    }
4700  }
4701}
4702
4703/// isAllocatableRegister - If the specified register is safe to allocate,
4704/// i.e. it isn't a stack pointer or some other special register, return the
4705/// register class for the register.  Otherwise, return null.
4706static const TargetRegisterClass *
4707isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4708                      const TargetLowering &TLI,
4709                      const TargetRegisterInfo *TRI) {
4710  EVT FoundVT = MVT::Other;
4711  const TargetRegisterClass *FoundRC = 0;
4712  for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4713       E = TRI->regclass_end(); RCI != E; ++RCI) {
4714    EVT ThisVT = MVT::Other;
4715
4716    const TargetRegisterClass *RC = *RCI;
4717    // If none of the the value types for this register class are valid, we
4718    // can't use it.  For example, 64-bit reg classes on 32-bit targets.
4719    for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4720         I != E; ++I) {
4721      if (TLI.isTypeLegal(*I)) {
4722        // If we have already found this register in a different register class,
4723        // choose the one with the largest VT specified.  For example, on
4724        // PowerPC, we favor f64 register classes over f32.
4725        if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4726          ThisVT = *I;
4727          break;
4728        }
4729      }
4730    }
4731
4732    if (ThisVT == MVT::Other) continue;
4733
4734    // NOTE: This isn't ideal.  In particular, this might allocate the
4735    // frame pointer in functions that need it (due to them not being taken
4736    // out of allocation, because a variable sized allocation hasn't been seen
4737    // yet).  This is a slight code pessimization, but should still work.
4738    for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4739         E = RC->allocation_order_end(MF); I != E; ++I)
4740      if (*I == Reg) {
4741        // We found a matching register class.  Keep looking at others in case
4742        // we find one with larger registers that this physreg is also in.
4743        FoundRC = RC;
4744        FoundVT = ThisVT;
4745        break;
4746      }
4747  }
4748  return FoundRC;
4749}
4750
4751
4752namespace llvm {
4753/// AsmOperandInfo - This contains information for each constraint that we are
4754/// lowering.
4755class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4756    public TargetLowering::AsmOperandInfo {
4757public:
4758  /// CallOperand - If this is the result output operand or a clobber
4759  /// this is null, otherwise it is the incoming operand to the CallInst.
4760  /// This gets modified as the asm is processed.
4761  SDValue CallOperand;
4762
4763  /// AssignedRegs - If this is a register or register class operand, this
4764  /// contains the set of register corresponding to the operand.
4765  RegsForValue AssignedRegs;
4766
4767  explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4768    : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4769  }
4770
4771  /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4772  /// busy in OutputRegs/InputRegs.
4773  void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4774                         std::set<unsigned> &OutputRegs,
4775                         std::set<unsigned> &InputRegs,
4776                         const TargetRegisterInfo &TRI) const {
4777    if (isOutReg) {
4778      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4779        MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4780    }
4781    if (isInReg) {
4782      for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4783        MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4784    }
4785  }
4786
4787  /// getCallOperandValEVT - Return the EVT of the Value* that this operand
4788  /// corresponds to.  If there is no Value* for this operand, it returns
4789  /// MVT::Other.
4790  EVT getCallOperandValEVT(LLVMContext &Context,
4791                           const TargetLowering &TLI,
4792                           const TargetData *TD) const {
4793    if (CallOperandVal == 0) return MVT::Other;
4794
4795    if (isa<BasicBlock>(CallOperandVal))
4796      return TLI.getPointerTy();
4797
4798    const llvm::Type *OpTy = CallOperandVal->getType();
4799
4800    // If this is an indirect operand, the operand is a pointer to the
4801    // accessed type.
4802    if (isIndirect)
4803      OpTy = cast<PointerType>(OpTy)->getElementType();
4804
4805    // If OpTy is not a single value, it may be a struct/union that we
4806    // can tile with integers.
4807    if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4808      unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4809      switch (BitSize) {
4810      default: break;
4811      case 1:
4812      case 8:
4813      case 16:
4814      case 32:
4815      case 64:
4816      case 128:
4817        OpTy = IntegerType::get(Context, BitSize);
4818        break;
4819      }
4820    }
4821
4822    return TLI.getValueType(OpTy, true);
4823  }
4824
4825private:
4826  /// MarkRegAndAliases - Mark the specified register and all aliases in the
4827  /// specified set.
4828  static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4829                                const TargetRegisterInfo &TRI) {
4830    assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4831    Regs.insert(Reg);
4832    if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4833      for (; *Aliases; ++Aliases)
4834        Regs.insert(*Aliases);
4835  }
4836};
4837} // end llvm namespace.
4838
4839
4840/// GetRegistersForValue - Assign registers (virtual or physical) for the
4841/// specified operand.  We prefer to assign virtual registers, to allow the
4842/// register allocator to handle the assignment process.  However, if the asm
4843/// uses features that we can't model on machineinstrs, we have SDISel do the
4844/// allocation.  This produces generally horrible, but correct, code.
4845///
4846///   OpInfo describes the operand.
4847///   Input and OutputRegs are the set of already allocated physical registers.
4848///
4849void SelectionDAGBuilder::
4850GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4851                     std::set<unsigned> &OutputRegs,
4852                     std::set<unsigned> &InputRegs) {
4853  LLVMContext &Context = FuncInfo.Fn->getContext();
4854
4855  // Compute whether this value requires an input register, an output register,
4856  // or both.
4857  bool isOutReg = false;
4858  bool isInReg = false;
4859  switch (OpInfo.Type) {
4860  case InlineAsm::isOutput:
4861    isOutReg = true;
4862
4863    // If there is an input constraint that matches this, we need to reserve
4864    // the input register so no other inputs allocate to it.
4865    isInReg = OpInfo.hasMatchingInput();
4866    break;
4867  case InlineAsm::isInput:
4868    isInReg = true;
4869    isOutReg = false;
4870    break;
4871  case InlineAsm::isClobber:
4872    isOutReg = true;
4873    isInReg = true;
4874    break;
4875  }
4876
4877
4878  MachineFunction &MF = DAG.getMachineFunction();
4879  SmallVector<unsigned, 4> Regs;
4880
4881  // If this is a constraint for a single physreg, or a constraint for a
4882  // register class, find it.
4883  std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4884    TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4885                                     OpInfo.ConstraintVT);
4886
4887  unsigned NumRegs = 1;
4888  if (OpInfo.ConstraintVT != MVT::Other) {
4889    // If this is a FP input in an integer register (or visa versa) insert a bit
4890    // cast of the input value.  More generally, handle any case where the input
4891    // value disagrees with the register class we plan to stick this in.
4892    if (OpInfo.Type == InlineAsm::isInput &&
4893        PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4894      // Try to convert to the first EVT that the reg class contains.  If the
4895      // types are identical size, use a bitcast to convert (e.g. two differing
4896      // vector types).
4897      EVT RegVT = *PhysReg.second->vt_begin();
4898      if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4899        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4900                                         RegVT, OpInfo.CallOperand);
4901        OpInfo.ConstraintVT = RegVT;
4902      } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4903        // If the input is a FP value and we want it in FP registers, do a
4904        // bitcast to the corresponding integer type.  This turns an f64 value
4905        // into i64, which can be passed with two i32 values on a 32-bit
4906        // machine.
4907        RegVT = EVT::getIntegerVT(Context,
4908                                  OpInfo.ConstraintVT.getSizeInBits());
4909        OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4910                                         RegVT, OpInfo.CallOperand);
4911        OpInfo.ConstraintVT = RegVT;
4912      }
4913    }
4914
4915    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
4916  }
4917
4918  EVT RegVT;
4919  EVT ValueVT = OpInfo.ConstraintVT;
4920
4921  // If this is a constraint for a specific physical register, like {r17},
4922  // assign it now.
4923  if (unsigned AssignedReg = PhysReg.first) {
4924    const TargetRegisterClass *RC = PhysReg.second;
4925    if (OpInfo.ConstraintVT == MVT::Other)
4926      ValueVT = *RC->vt_begin();
4927
4928    // Get the actual register value type.  This is important, because the user
4929    // may have asked for (e.g.) the AX register in i32 type.  We need to
4930    // remember that AX is actually i16 to get the right extension.
4931    RegVT = *RC->vt_begin();
4932
4933    // This is a explicit reference to a physical register.
4934    Regs.push_back(AssignedReg);
4935
4936    // If this is an expanded reference, add the rest of the regs to Regs.
4937    if (NumRegs != 1) {
4938      TargetRegisterClass::iterator I = RC->begin();
4939      for (; *I != AssignedReg; ++I)
4940        assert(I != RC->end() && "Didn't find reg!");
4941
4942      // Already added the first reg.
4943      --NumRegs; ++I;
4944      for (; NumRegs; --NumRegs, ++I) {
4945        assert(I != RC->end() && "Ran out of registers to allocate!");
4946        Regs.push_back(*I);
4947      }
4948    }
4949    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4950    const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4951    OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4952    return;
4953  }
4954
4955  // Otherwise, if this was a reference to an LLVM register class, create vregs
4956  // for this reference.
4957  if (const TargetRegisterClass *RC = PhysReg.second) {
4958    RegVT = *RC->vt_begin();
4959    if (OpInfo.ConstraintVT == MVT::Other)
4960      ValueVT = RegVT;
4961
4962    // Create the appropriate number of virtual registers.
4963    MachineRegisterInfo &RegInfo = MF.getRegInfo();
4964    for (; NumRegs; --NumRegs)
4965      Regs.push_back(RegInfo.createVirtualRegister(RC));
4966
4967    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4968    return;
4969  }
4970
4971  // This is a reference to a register class that doesn't directly correspond
4972  // to an LLVM register class.  Allocate NumRegs consecutive, available,
4973  // registers from the class.
4974  std::vector<unsigned> RegClassRegs
4975    = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
4976                                            OpInfo.ConstraintVT);
4977
4978  const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4979  unsigned NumAllocated = 0;
4980  for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
4981    unsigned Reg = RegClassRegs[i];
4982    // See if this register is available.
4983    if ((isOutReg && OutputRegs.count(Reg)) ||   // Already used.
4984        (isInReg  && InputRegs.count(Reg))) {    // Already used.
4985      // Make sure we find consecutive registers.
4986      NumAllocated = 0;
4987      continue;
4988    }
4989
4990    // Check to see if this register is allocatable (i.e. don't give out the
4991    // stack pointer).
4992    const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
4993    if (!RC) {        // Couldn't allocate this register.
4994      // Reset NumAllocated to make sure we return consecutive registers.
4995      NumAllocated = 0;
4996      continue;
4997    }
4998
4999    // Okay, this register is good, we can use it.
5000    ++NumAllocated;
5001
5002    // If we allocated enough consecutive registers, succeed.
5003    if (NumAllocated == NumRegs) {
5004      unsigned RegStart = (i-NumAllocated)+1;
5005      unsigned RegEnd   = i+1;
5006      // Mark all of the allocated registers used.
5007      for (unsigned i = RegStart; i != RegEnd; ++i)
5008        Regs.push_back(RegClassRegs[i]);
5009
5010      OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5011                                         OpInfo.ConstraintVT);
5012      OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5013      return;
5014    }
5015  }
5016
5017  // Otherwise, we couldn't allocate enough registers for this.
5018}
5019
5020/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5021/// processed uses a memory 'm' constraint.
5022static bool
5023hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5024                          const TargetLowering &TLI) {
5025  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5026    InlineAsm::ConstraintInfo &CI = CInfos[i];
5027    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5028      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5029      if (CType == TargetLowering::C_Memory)
5030        return true;
5031    }
5032
5033    // Indirect operand accesses access memory.
5034    if (CI.isIndirect)
5035      return true;
5036  }
5037
5038  return false;
5039}
5040
5041/// visitInlineAsm - Handle a call to an InlineAsm object.
5042///
5043void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
5044  InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5045
5046  /// ConstraintOperands - Information about all of the constraints.
5047  std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5048
5049  std::set<unsigned> OutputRegs, InputRegs;
5050
5051  // Do a prepass over the constraints, canonicalizing them, and building up the
5052  // ConstraintOperands list.
5053  std::vector<InlineAsm::ConstraintInfo>
5054    ConstraintInfos = IA->ParseConstraints();
5055
5056  bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5057
5058  SDValue Chain, Flag;
5059
5060  // We won't need to flush pending loads if this asm doesn't touch
5061  // memory and is nonvolatile.
5062  if (hasMemory || IA->hasSideEffects())
5063    Chain = getRoot();
5064  else
5065    Chain = DAG.getRoot();
5066
5067  unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
5068  unsigned ResNo = 0;   // ResNo - The result number of the next output.
5069  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5070    ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5071    SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5072
5073    EVT OpVT = MVT::Other;
5074
5075    // Compute the value type for each operand.
5076    switch (OpInfo.Type) {
5077    case InlineAsm::isOutput:
5078      // Indirect outputs just consume an argument.
5079      if (OpInfo.isIndirect) {
5080        OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5081        break;
5082      }
5083
5084      // The return value of the call is this value.  As such, there is no
5085      // corresponding argument.
5086      assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5087             "Bad inline asm!");
5088      if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5089        OpVT = TLI.getValueType(STy->getElementType(ResNo));
5090      } else {
5091        assert(ResNo == 0 && "Asm only has one result!");
5092        OpVT = TLI.getValueType(CS.getType());
5093      }
5094      ++ResNo;
5095      break;
5096    case InlineAsm::isInput:
5097      OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5098      break;
5099    case InlineAsm::isClobber:
5100      // Nothing to do.
5101      break;
5102    }
5103
5104    // If this is an input or an indirect output, process the call argument.
5105    // BasicBlocks are labels, currently appearing only in asm's.
5106    if (OpInfo.CallOperandVal) {
5107      // Strip bitcasts, if any.  This mostly comes up for functions.
5108      OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
5109
5110      if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5111        OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5112      } else {
5113        OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5114      }
5115
5116      OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
5117    }
5118
5119    OpInfo.ConstraintVT = OpVT;
5120  }
5121
5122  // Second pass over the constraints: compute which constraint option to use
5123  // and assign registers to constraints that want a specific physreg.
5124  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5125    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5126
5127    // If this is an output operand with a matching input operand, look up the
5128    // matching input. If their types mismatch, e.g. one is an integer, the
5129    // other is floating point, or their sizes are different, flag it as an
5130    // error.
5131    if (OpInfo.hasMatchingInput()) {
5132      SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5133      if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5134        if ((OpInfo.ConstraintVT.isInteger() !=
5135             Input.ConstraintVT.isInteger()) ||
5136            (OpInfo.ConstraintVT.getSizeInBits() !=
5137             Input.ConstraintVT.getSizeInBits())) {
5138          llvm_report_error("Unsupported asm: input constraint"
5139                            " with a matching output constraint of incompatible"
5140                            " type!");
5141        }
5142        Input.ConstraintVT = OpInfo.ConstraintVT;
5143      }
5144    }
5145
5146    // Compute the constraint code and ConstraintType to use.
5147    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5148
5149    // If this is a memory input, and if the operand is not indirect, do what we
5150    // need to to provide an address for the memory input.
5151    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5152        !OpInfo.isIndirect) {
5153      assert(OpInfo.Type == InlineAsm::isInput &&
5154             "Can only indirectify direct input operands!");
5155
5156      // Memory operands really want the address of the value.  If we don't have
5157      // an indirect input, put it in the constpool if we can, otherwise spill
5158      // it to a stack slot.
5159
5160      // If the operand is a float, integer, or vector constant, spill to a
5161      // constant pool entry to get its address.
5162      Value *OpVal = OpInfo.CallOperandVal;
5163      if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5164          isa<ConstantVector>(OpVal)) {
5165        OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5166                                                 TLI.getPointerTy());
5167      } else {
5168        // Otherwise, create a stack slot and emit a store to it before the
5169        // asm.
5170        const Type *Ty = OpVal->getType();
5171        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5172        unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5173        MachineFunction &MF = DAG.getMachineFunction();
5174        int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
5175        SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5176        Chain = DAG.getStore(Chain, getCurDebugLoc(),
5177                             OpInfo.CallOperand, StackSlot, NULL, 0);
5178        OpInfo.CallOperand = StackSlot;
5179      }
5180
5181      // There is no longer a Value* corresponding to this operand.
5182      OpInfo.CallOperandVal = 0;
5183      // It is now an indirect operand.
5184      OpInfo.isIndirect = true;
5185    }
5186
5187    // If this constraint is for a specific register, allocate it before
5188    // anything else.
5189    if (OpInfo.ConstraintType == TargetLowering::C_Register)
5190      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5191  }
5192  ConstraintInfos.clear();
5193
5194
5195  // Second pass - Loop over all of the operands, assigning virtual or physregs
5196  // to register class operands.
5197  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5198    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5199
5200    // C_Register operands have already been allocated, Other/Memory don't need
5201    // to be.
5202    if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5203      GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5204  }
5205
5206  // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5207  std::vector<SDValue> AsmNodeOperands;
5208  AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
5209  AsmNodeOperands.push_back(
5210          DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5211
5212
5213  // Loop over all of the inputs, copying the operand values into the
5214  // appropriate registers and processing the output regs.
5215  RegsForValue RetValRegs;
5216
5217  // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5218  std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5219
5220  for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5221    SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5222
5223    switch (OpInfo.Type) {
5224    case InlineAsm::isOutput: {
5225      if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5226          OpInfo.ConstraintType != TargetLowering::C_Register) {
5227        // Memory output, or 'other' output (e.g. 'X' constraint).
5228        assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5229
5230        // Add information to the INLINEASM node to know about this output.
5231        unsigned ResOpType = 4/*MEM*/ | (1<<3);
5232        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5233                                                        TLI.getPointerTy()));
5234        AsmNodeOperands.push_back(OpInfo.CallOperand);
5235        break;
5236      }
5237
5238      // Otherwise, this is a register or register class output.
5239
5240      // Copy the output from the appropriate register.  Find a register that
5241      // we can use.
5242      if (OpInfo.AssignedRegs.Regs.empty()) {
5243        llvm_report_error("Couldn't allocate output reg for"
5244                          " constraint '" + OpInfo.ConstraintCode + "'!");
5245      }
5246
5247      // If this is an indirect operand, store through the pointer after the
5248      // asm.
5249      if (OpInfo.isIndirect) {
5250        IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5251                                                      OpInfo.CallOperandVal));
5252      } else {
5253        // This is the result value of the call.
5254        assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5255               "Bad inline asm!");
5256        // Concatenate this output onto the outputs list.
5257        RetValRegs.append(OpInfo.AssignedRegs);
5258      }
5259
5260      // Add information to the INLINEASM node to know that this register is
5261      // set.
5262      OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5263                                               6 /* EARLYCLOBBER REGDEF */ :
5264                                               2 /* REGDEF */ ,
5265                                               false,
5266                                               0,
5267                                               DAG, AsmNodeOperands);
5268      break;
5269    }
5270    case InlineAsm::isInput: {
5271      SDValue InOperandVal = OpInfo.CallOperand;
5272
5273      if (OpInfo.isMatchingInputConstraint()) {   // Matching constraint?
5274        // If this is required to match an output register we have already set,
5275        // just use its register.
5276        unsigned OperandNo = OpInfo.getMatchedOperand();
5277
5278        // Scan until we find the definition we already emitted of this operand.
5279        // When we find it, create a RegsForValue operand.
5280        unsigned CurOp = 2;  // The first operand.
5281        for (; OperandNo; --OperandNo) {
5282          // Advance to the next operand.
5283          unsigned OpFlag =
5284            cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5285          assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5286                  (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5287                  (OpFlag & 7) == 4 /*MEM*/) &&
5288                 "Skipped past definitions?");
5289          CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5290        }
5291
5292        unsigned OpFlag =
5293          cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5294        if ((OpFlag & 7) == 2 /*REGDEF*/
5295            || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5296          // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5297          if (OpInfo.isIndirect) {
5298            llvm_report_error("Don't know how to handle tied indirect "
5299                              "register inputs yet!");
5300          }
5301          RegsForValue MatchedRegs;
5302          MatchedRegs.TLI = &TLI;
5303          MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5304          EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5305          MatchedRegs.RegVTs.push_back(RegVT);
5306          MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5307          for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5308               i != e; ++i)
5309            MatchedRegs.Regs.
5310              push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5311
5312          // Use the produced MatchedRegs object to
5313          MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5314                                    Chain, &Flag);
5315          MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5316                                           true, OpInfo.getMatchedOperand(),
5317                                           DAG, AsmNodeOperands);
5318          break;
5319        } else {
5320          assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5321          assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5322                 "Unexpected number of operands");
5323          // Add information to the INLINEASM node to know about this input.
5324          // See InlineAsm.h isUseOperandTiedToDef.
5325          OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5326          AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5327                                                          TLI.getPointerTy()));
5328          AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5329          break;
5330        }
5331      }
5332
5333      if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5334        assert(!OpInfo.isIndirect &&
5335               "Don't know how to handle indirect other inputs yet!");
5336
5337        std::vector<SDValue> Ops;
5338        TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5339                                         hasMemory, Ops, DAG);
5340        if (Ops.empty()) {
5341          llvm_report_error("Invalid operand for inline asm"
5342                            " constraint '" + OpInfo.ConstraintCode + "'!");
5343        }
5344
5345        // Add information to the INLINEASM node to know about this input.
5346        unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5347        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5348                                                        TLI.getPointerTy()));
5349        AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5350        break;
5351      } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5352        assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5353        assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5354               "Memory operands expect pointer values");
5355
5356        // Add information to the INLINEASM node to know about this input.
5357        unsigned ResOpType = 4/*MEM*/ | (1<<3);
5358        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5359                                                        TLI.getPointerTy()));
5360        AsmNodeOperands.push_back(InOperandVal);
5361        break;
5362      }
5363
5364      assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5365              OpInfo.ConstraintType == TargetLowering::C_Register) &&
5366             "Unknown constraint type!");
5367      assert(!OpInfo.isIndirect &&
5368             "Don't know how to handle indirect register inputs yet!");
5369
5370      // Copy the input into the appropriate registers.
5371      if (OpInfo.AssignedRegs.Regs.empty()) {
5372        llvm_report_error("Couldn't allocate input reg for"
5373                          " constraint '"+ OpInfo.ConstraintCode +"'!");
5374      }
5375
5376      OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5377                                        Chain, &Flag);
5378
5379      OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5380                                               DAG, AsmNodeOperands);
5381      break;
5382    }
5383    case InlineAsm::isClobber: {
5384      // Add the clobbered value to the operand list, so that the register
5385      // allocator is aware that the physreg got clobbered.
5386      if (!OpInfo.AssignedRegs.Regs.empty())
5387        OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5388                                                 false, 0, DAG,AsmNodeOperands);
5389      break;
5390    }
5391    }
5392  }
5393
5394  // Finish up input operands.
5395  AsmNodeOperands[0] = Chain;
5396  if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5397
5398  Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5399                      DAG.getVTList(MVT::Other, MVT::Flag),
5400                      &AsmNodeOperands[0], AsmNodeOperands.size());
5401  Flag = Chain.getValue(1);
5402
5403  // If this asm returns a register value, copy the result from that register
5404  // and set it as the value of the call.
5405  if (!RetValRegs.Regs.empty()) {
5406    SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5407                                             Chain, &Flag);
5408
5409    // FIXME: Why don't we do this for inline asms with MRVs?
5410    if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5411      EVT ResultType = TLI.getValueType(CS.getType());
5412
5413      // If any of the results of the inline asm is a vector, it may have the
5414      // wrong width/num elts.  This can happen for register classes that can
5415      // contain multiple different value types.  The preg or vreg allocated may
5416      // not have the same VT as was expected.  Convert it to the right type
5417      // with bit_convert.
5418      if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5419        Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5420                          ResultType, Val);
5421
5422      } else if (ResultType != Val.getValueType() &&
5423                 ResultType.isInteger() && Val.getValueType().isInteger()) {
5424        // If a result value was tied to an input value, the computed result may
5425        // have a wider width than the expected result.  Extract the relevant
5426        // portion.
5427        Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5428      }
5429
5430      assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5431    }
5432
5433    setValue(CS.getInstruction(), Val);
5434    // Don't need to use this as a chain in this case.
5435    if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5436      return;
5437  }
5438
5439  std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5440
5441  // Process indirect outputs, first output all of the flagged copies out of
5442  // physregs.
5443  for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5444    RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5445    Value *Ptr = IndirectStoresToEmit[i].second;
5446    SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5447                                             Chain, &Flag);
5448    StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5449
5450  }
5451
5452  // Emit the non-flagged stores from the physregs.
5453  SmallVector<SDValue, 8> OutChains;
5454  for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5455    OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5456                                    StoresToEmit[i].first,
5457                                    getValue(StoresToEmit[i].second),
5458                                    StoresToEmit[i].second, 0));
5459  if (!OutChains.empty())
5460    Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5461                        &OutChains[0], OutChains.size());
5462  DAG.setRoot(Chain);
5463}
5464
5465void SelectionDAGBuilder::visitVAStart(CallInst &I) {
5466  DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5467                          MVT::Other, getRoot(),
5468                          getValue(I.getOperand(1)),
5469                          DAG.getSrcValue(I.getOperand(1))));
5470}
5471
5472void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
5473  SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5474                           getRoot(), getValue(I.getOperand(0)),
5475                           DAG.getSrcValue(I.getOperand(0)));
5476  setValue(&I, V);
5477  DAG.setRoot(V.getValue(1));
5478}
5479
5480void SelectionDAGBuilder::visitVAEnd(CallInst &I) {
5481  DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5482                          MVT::Other, getRoot(),
5483                          getValue(I.getOperand(1)),
5484                          DAG.getSrcValue(I.getOperand(1))));
5485}
5486
5487void SelectionDAGBuilder::visitVACopy(CallInst &I) {
5488  DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5489                          MVT::Other, getRoot(),
5490                          getValue(I.getOperand(1)),
5491                          getValue(I.getOperand(2)),
5492                          DAG.getSrcValue(I.getOperand(1)),
5493                          DAG.getSrcValue(I.getOperand(2))));
5494}
5495
5496/// TargetLowering::LowerCallTo - This is the default LowerCallTo
5497/// implementation, which just calls LowerCall.
5498/// FIXME: When all targets are
5499/// migrated to using LowerCall, this hook should be integrated into SDISel.
5500std::pair<SDValue, SDValue>
5501TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5502                            bool RetSExt, bool RetZExt, bool isVarArg,
5503                            bool isInreg, unsigned NumFixedArgs,
5504                            CallingConv::ID CallConv, bool isTailCall,
5505                            bool isReturnValueUsed,
5506                            SDValue Callee,
5507                            ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5508
5509  assert((!isTailCall || PerformTailCallOpt) &&
5510         "isTailCall set when tail-call optimizations are disabled!");
5511
5512  // Handle all of the outgoing arguments.
5513  SmallVector<ISD::OutputArg, 32> Outs;
5514  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5515    SmallVector<EVT, 4> ValueVTs;
5516    ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5517    for (unsigned Value = 0, NumValues = ValueVTs.size();
5518         Value != NumValues; ++Value) {
5519      EVT VT = ValueVTs[Value];
5520      const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
5521      SDValue Op = SDValue(Args[i].Node.getNode(),
5522                           Args[i].Node.getResNo() + Value);
5523      ISD::ArgFlagsTy Flags;
5524      unsigned OriginalAlignment =
5525        getTargetData()->getABITypeAlignment(ArgTy);
5526
5527      if (Args[i].isZExt)
5528        Flags.setZExt();
5529      if (Args[i].isSExt)
5530        Flags.setSExt();
5531      if (Args[i].isInReg)
5532        Flags.setInReg();
5533      if (Args[i].isSRet)
5534        Flags.setSRet();
5535      if (Args[i].isByVal) {
5536        Flags.setByVal();
5537        const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5538        const Type *ElementTy = Ty->getElementType();
5539        unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5540        unsigned FrameSize  = getTargetData()->getTypeAllocSize(ElementTy);
5541        // For ByVal, alignment should come from FE.  BE will guess if this
5542        // info is not there but there are cases it cannot get right.
5543        if (Args[i].Alignment)
5544          FrameAlign = Args[i].Alignment;
5545        Flags.setByValAlign(FrameAlign);
5546        Flags.setByValSize(FrameSize);
5547      }
5548      if (Args[i].isNest)
5549        Flags.setNest();
5550      Flags.setOrigAlign(OriginalAlignment);
5551
5552      EVT PartVT = getRegisterType(RetTy->getContext(), VT);
5553      unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
5554      SmallVector<SDValue, 4> Parts(NumParts);
5555      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5556
5557      if (Args[i].isSExt)
5558        ExtendKind = ISD::SIGN_EXTEND;
5559      else if (Args[i].isZExt)
5560        ExtendKind = ISD::ZERO_EXTEND;
5561
5562      getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5563
5564      for (unsigned j = 0; j != NumParts; ++j) {
5565        // if it isn't first piece, alignment must be 1
5566        ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
5567        if (NumParts > 1 && j == 0)
5568          MyFlags.Flags.setSplit();
5569        else if (j != 0)
5570          MyFlags.Flags.setOrigAlign(1);
5571
5572        Outs.push_back(MyFlags);
5573      }
5574    }
5575  }
5576
5577  // Handle the incoming return values from the call.
5578  SmallVector<ISD::InputArg, 32> Ins;
5579  SmallVector<EVT, 4> RetTys;
5580  ComputeValueVTs(*this, RetTy, RetTys);
5581  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5582    EVT VT = RetTys[I];
5583    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5584    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5585    for (unsigned i = 0; i != NumRegs; ++i) {
5586      ISD::InputArg MyFlags;
5587      MyFlags.VT = RegisterVT;
5588      MyFlags.Used = isReturnValueUsed;
5589      if (RetSExt)
5590        MyFlags.Flags.setSExt();
5591      if (RetZExt)
5592        MyFlags.Flags.setZExt();
5593      if (isInreg)
5594        MyFlags.Flags.setInReg();
5595      Ins.push_back(MyFlags);
5596    }
5597  }
5598
5599  // Check if target-dependent constraints permit a tail call here.
5600  // Target-independent constraints should be checked by the caller.
5601  if (isTailCall &&
5602      !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
5603    isTailCall = false;
5604
5605  SmallVector<SDValue, 4> InVals;
5606  Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
5607                    Outs, Ins, dl, DAG, InVals);
5608
5609  // Verify that the target's LowerCall behaved as expected.
5610  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
5611         "LowerCall didn't return a valid chain!");
5612  assert((!isTailCall || InVals.empty()) &&
5613         "LowerCall emitted a return value for a tail call!");
5614  assert((isTailCall || InVals.size() == Ins.size()) &&
5615         "LowerCall didn't emit the correct number of values!");
5616  DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5617          assert(InVals[i].getNode() &&
5618                 "LowerCall emitted a null value!");
5619          assert(Ins[i].VT == InVals[i].getValueType() &&
5620                 "LowerCall emitted a value with the wrong type!");
5621        });
5622
5623  // For a tail call, the return value is merely live-out and there aren't
5624  // any nodes in the DAG representing it. Return a special value to
5625  // indicate that a tail call has been emitted and no more Instructions
5626  // should be processed in the current block.
5627  if (isTailCall) {
5628    DAG.setRoot(Chain);
5629    return std::make_pair(SDValue(), SDValue());
5630  }
5631
5632  // Collect the legal value parts into potentially illegal values
5633  // that correspond to the original function's return values.
5634  ISD::NodeType AssertOp = ISD::DELETED_NODE;
5635  if (RetSExt)
5636    AssertOp = ISD::AssertSext;
5637  else if (RetZExt)
5638    AssertOp = ISD::AssertZext;
5639  SmallVector<SDValue, 4> ReturnValues;
5640  unsigned CurReg = 0;
5641  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5642    EVT VT = RetTys[I];
5643    EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5644    unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5645
5646    SDValue ReturnValue =
5647      getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT,
5648                       AssertOp);
5649    ReturnValues.push_back(ReturnValue);
5650    CurReg += NumRegs;
5651  }
5652
5653  // For a function returning void, there is no return value. We can't create
5654  // such a node, so we just return a null return value in that case. In
5655  // that case, nothing will actualy look at the value.
5656  if (ReturnValues.empty())
5657    return std::make_pair(SDValue(), Chain);
5658
5659  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5660                            DAG.getVTList(&RetTys[0], RetTys.size()),
5661                            &ReturnValues[0], ReturnValues.size());
5662
5663  return std::make_pair(Res, Chain);
5664}
5665
5666void TargetLowering::LowerOperationWrapper(SDNode *N,
5667                                           SmallVectorImpl<SDValue> &Results,
5668                                           SelectionDAG &DAG) {
5669  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5670  if (Res.getNode())
5671    Results.push_back(Res);
5672}
5673
5674SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5675  llvm_unreachable("LowerOperation not implemented for this target!");
5676  return SDValue();
5677}
5678
5679
5680void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5681  SDValue Op = getValue(V);
5682  assert((Op.getOpcode() != ISD::CopyFromReg ||
5683          cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5684         "Copy from a reg to the same reg!");
5685  assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5686
5687  RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
5688  SDValue Chain = DAG.getEntryNode();
5689  RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5690  PendingExports.push_back(Chain);
5691}
5692
5693#include "llvm/CodeGen/SelectionDAGISel.h"
5694
5695void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
5696  // If this is the entry block, emit arguments.
5697  Function &F = *LLVMBB->getParent();
5698  SelectionDAG &DAG = SDB->DAG;
5699  SDValue OldRoot = DAG.getRoot();
5700  DebugLoc dl = SDB->getCurDebugLoc();
5701  const TargetData *TD = TLI.getTargetData();
5702  SmallVector<ISD::InputArg, 16> Ins;
5703
5704  // Check whether the function can return without sret-demotion.
5705  SmallVector<EVT, 4> OutVTs;
5706  SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
5707  getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
5708                OutVTs, OutsFlags, TLI);
5709  FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
5710
5711  FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(),
5712    OutVTs, OutsFlags, DAG);
5713  if (!FLI.CanLowerReturn) {
5714    // Put in an sret pointer parameter before all the other parameters.
5715    SmallVector<EVT, 1> ValueVTs;
5716    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
5717
5718    // NOTE: Assuming that a pointer will never break down to more than one VT
5719    // or one register.
5720    ISD::ArgFlagsTy Flags;
5721    Flags.setSRet();
5722    EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), ValueVTs[0]);
5723    ISD::InputArg RetArg(Flags, RegisterVT, true);
5724    Ins.push_back(RetArg);
5725  }
5726
5727  // Set up the incoming argument description vector.
5728  unsigned Idx = 1;
5729  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5730       I != E; ++I, ++Idx) {
5731    SmallVector<EVT, 4> ValueVTs;
5732    ComputeValueVTs(TLI, I->getType(), ValueVTs);
5733    bool isArgValueUsed = !I->use_empty();
5734    for (unsigned Value = 0, NumValues = ValueVTs.size();
5735         Value != NumValues; ++Value) {
5736      EVT VT = ValueVTs[Value];
5737      const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
5738      ISD::ArgFlagsTy Flags;
5739      unsigned OriginalAlignment =
5740        TD->getABITypeAlignment(ArgTy);
5741
5742      if (F.paramHasAttr(Idx, Attribute::ZExt))
5743        Flags.setZExt();
5744      if (F.paramHasAttr(Idx, Attribute::SExt))
5745        Flags.setSExt();
5746      if (F.paramHasAttr(Idx, Attribute::InReg))
5747        Flags.setInReg();
5748      if (F.paramHasAttr(Idx, Attribute::StructRet))
5749        Flags.setSRet();
5750      if (F.paramHasAttr(Idx, Attribute::ByVal)) {
5751        Flags.setByVal();
5752        const PointerType *Ty = cast<PointerType>(I->getType());
5753        const Type *ElementTy = Ty->getElementType();
5754        unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
5755        unsigned FrameSize  = TD->getTypeAllocSize(ElementTy);
5756        // For ByVal, alignment should be passed from FE.  BE will guess if
5757        // this info is not there but there are cases it cannot get right.
5758        if (F.getParamAlignment(Idx))
5759          FrameAlign = F.getParamAlignment(Idx);
5760        Flags.setByValAlign(FrameAlign);
5761        Flags.setByValSize(FrameSize);
5762      }
5763      if (F.paramHasAttr(Idx, Attribute::Nest))
5764        Flags.setNest();
5765      Flags.setOrigAlign(OriginalAlignment);
5766
5767      EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5768      unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5769      for (unsigned i = 0; i != NumRegs; ++i) {
5770        ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
5771        if (NumRegs > 1 && i == 0)
5772          MyFlags.Flags.setSplit();
5773        // if it isn't first piece, alignment must be 1
5774        else if (i > 0)
5775          MyFlags.Flags.setOrigAlign(1);
5776        Ins.push_back(MyFlags);
5777      }
5778    }
5779  }
5780
5781  // Call the target to set up the argument values.
5782  SmallVector<SDValue, 8> InVals;
5783  SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
5784                                             F.isVarArg(), Ins,
5785                                             dl, DAG, InVals);
5786
5787  // Verify that the target's LowerFormalArguments behaved as expected.
5788  assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
5789         "LowerFormalArguments didn't return a valid chain!");
5790  assert(InVals.size() == Ins.size() &&
5791         "LowerFormalArguments didn't emit the correct number of values!");
5792  DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5793          assert(InVals[i].getNode() &&
5794                 "LowerFormalArguments emitted a null value!");
5795          assert(Ins[i].VT == InVals[i].getValueType() &&
5796                 "LowerFormalArguments emitted a value with the wrong type!");
5797        });
5798
5799  // Update the DAG with the new chain value resulting from argument lowering.
5800  DAG.setRoot(NewRoot);
5801
5802  // Set up the argument values.
5803  unsigned i = 0;
5804  Idx = 1;
5805  if (!FLI.CanLowerReturn) {
5806    // Create a virtual register for the sret pointer, and put in a copy
5807    // from the sret argument into it.
5808    SmallVector<EVT, 1> ValueVTs;
5809    ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
5810    EVT VT = ValueVTs[0];
5811    EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5812    ISD::NodeType AssertOp = ISD::DELETED_NODE;
5813    SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT,
5814                                        VT, AssertOp);
5815
5816    MachineFunction& MF = SDB->DAG.getMachineFunction();
5817    MachineRegisterInfo& RegInfo = MF.getRegInfo();
5818    unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
5819    FLI.DemoteRegister = SRetReg;
5820    NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(), SRetReg, ArgValue);
5821    DAG.setRoot(NewRoot);
5822
5823    // i indexes lowered arguments.  Bump it past the hidden sret argument.
5824    // Idx indexes LLVM arguments.  Don't touch it.
5825    ++i;
5826  }
5827  for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5828      ++I, ++Idx) {
5829    SmallVector<SDValue, 4> ArgValues;
5830    SmallVector<EVT, 4> ValueVTs;
5831    ComputeValueVTs(TLI, I->getType(), ValueVTs);
5832    unsigned NumValues = ValueVTs.size();
5833    for (unsigned Value = 0; Value != NumValues; ++Value) {
5834      EVT VT = ValueVTs[Value];
5835      EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5836      unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5837
5838      if (!I->use_empty()) {
5839        ISD::NodeType AssertOp = ISD::DELETED_NODE;
5840        if (F.paramHasAttr(Idx, Attribute::SExt))
5841          AssertOp = ISD::AssertSext;
5842        else if (F.paramHasAttr(Idx, Attribute::ZExt))
5843          AssertOp = ISD::AssertZext;
5844
5845        ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
5846                                             PartVT, VT, AssertOp));
5847      }
5848      i += NumParts;
5849    }
5850    if (!I->use_empty()) {
5851      SDB->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues,
5852                                          SDB->getCurDebugLoc()));
5853      // If this argument is live outside of the entry block, insert a copy from
5854      // whereever we got it to the vreg that other BB's will reference it as.
5855      SDB->CopyToExportRegsIfNeeded(I);
5856    }
5857  }
5858  assert(i == InVals.size() && "Argument register count mismatch!");
5859
5860  // Finally, if the target has anything special to do, allow it to do so.
5861  // FIXME: this should insert code into the DAG!
5862  EmitFunctionEntryCode(F, SDB->DAG.getMachineFunction());
5863}
5864
5865/// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
5866/// ensure constants are generated when needed.  Remember the virtual registers
5867/// that need to be added to the Machine PHI nodes as input.  We cannot just
5868/// directly add them, because expansion might result in multiple MBB's for one
5869/// BB.  As such, the start of the BB might correspond to a different MBB than
5870/// the end.
5871///
5872void
5873SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5874  TerminatorInst *TI = LLVMBB->getTerminator();
5875
5876  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5877
5878  // Check successor nodes' PHI nodes that expect a constant to be available
5879  // from this block.
5880  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5881    BasicBlock *SuccBB = TI->getSuccessor(succ);
5882    if (!isa<PHINode>(SuccBB->begin())) continue;
5883    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5884
5885    // If this terminator has multiple identical successors (common for
5886    // switches), only handle each succ once.
5887    if (!SuccsHandled.insert(SuccMBB)) continue;
5888
5889    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5890    PHINode *PN;
5891
5892    // At this point we know that there is a 1-1 correspondence between LLVM PHI
5893    // nodes and Machine PHI nodes, but the incoming operands have not been
5894    // emitted yet.
5895    for (BasicBlock::iterator I = SuccBB->begin();
5896         (PN = dyn_cast<PHINode>(I)); ++I) {
5897      // Ignore dead phi's.
5898      if (PN->use_empty()) continue;
5899
5900      unsigned Reg;
5901      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5902
5903      if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5904        unsigned &RegOut = SDB->ConstantsOut[C];
5905        if (RegOut == 0) {
5906          RegOut = FuncInfo->CreateRegForValue(C);
5907          SDB->CopyValueToVirtualRegister(C, RegOut);
5908        }
5909        Reg = RegOut;
5910      } else {
5911        Reg = FuncInfo->ValueMap[PHIOp];
5912        if (Reg == 0) {
5913          assert(isa<AllocaInst>(PHIOp) &&
5914                 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5915                 "Didn't codegen value into a register!??");
5916          Reg = FuncInfo->CreateRegForValue(PHIOp);
5917          SDB->CopyValueToVirtualRegister(PHIOp, Reg);
5918        }
5919      }
5920
5921      // Remember that this register needs to added to the machine PHI node as
5922      // the input for this MBB.
5923      SmallVector<EVT, 4> ValueVTs;
5924      ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5925      for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5926        EVT VT = ValueVTs[vti];
5927        unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5928        for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5929          SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5930        Reg += NumRegisters;
5931      }
5932    }
5933  }
5934  SDB->ConstantsOut.clear();
5935}
5936
5937/// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5938/// supports legal types, and it emits MachineInstrs directly instead of
5939/// creating SelectionDAG nodes.
5940///
5941bool
5942SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5943                                                      FastISel *F) {
5944  TerminatorInst *TI = LLVMBB->getTerminator();
5945
5946  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5947  unsigned OrigNumPHINodesToUpdate = SDB->PHINodesToUpdate.size();
5948
5949  // Check successor nodes' PHI nodes that expect a constant to be available
5950  // from this block.
5951  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5952    BasicBlock *SuccBB = TI->getSuccessor(succ);
5953    if (!isa<PHINode>(SuccBB->begin())) continue;
5954    MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5955
5956    // If this terminator has multiple identical successors (common for
5957    // switches), only handle each succ once.
5958    if (!SuccsHandled.insert(SuccMBB)) continue;
5959
5960    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5961    PHINode *PN;
5962
5963    // At this point we know that there is a 1-1 correspondence between LLVM PHI
5964    // nodes and Machine PHI nodes, but the incoming operands have not been
5965    // emitted yet.
5966    for (BasicBlock::iterator I = SuccBB->begin();
5967         (PN = dyn_cast<PHINode>(I)); ++I) {
5968      // Ignore dead phi's.
5969      if (PN->use_empty()) continue;
5970
5971      // Only handle legal types. Two interesting things to note here. First,
5972      // by bailing out early, we may leave behind some dead instructions,
5973      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
5974      // own moves. Second, this check is necessary becuase FastISel doesn't
5975      // use CreateRegForValue to create registers, so it always creates
5976      // exactly one register for each non-void instruction.
5977      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
5978      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
5979        // Promote MVT::i1.
5980        if (VT == MVT::i1)
5981          VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
5982        else {
5983          SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5984          return false;
5985        }
5986      }
5987
5988      Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5989
5990      unsigned Reg = F->getRegForValue(PHIOp);
5991      if (Reg == 0) {
5992        SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5993        return false;
5994      }
5995      SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
5996    }
5997  }
5998
5999  return true;
6000}
6001